ngram
listlengths
0
67.8k
[ "Import the Operative System Library as operative_system import os as operative_system # Disable", "Print the Tensor for the constant \"b\" tensorflow.print(\"b = \", b) # Print", "TensorFlow Author: - <NAME> (<EMAIL>) - <NAME> (<EMAIL>) \"\"\" # Import the Libraries", "\"b\" tensorflow.print(\"b = \", b) # Print the Tensor for the addition of", "dtype=tensorflow.float32, name=\"a\") # Create the constant \"b\" and assign to it the value", "of the Execution:\\n\") # Print the Tensor for the constant \"a\" tensorflow.print(\"a =", "\"\"\" # Import the Libraries and Packages # Import the Operative System Library", "the Execution:\\n\") # Print the Tensor for the constant \"a\" tensorflow.print(\"a = \",", "constants \"a\" and \"b\", as \"total\" tensorflow.print(\"total = a + b = \",", "1.1 - Introduction to TensorFlow Author: - <NAME> (<EMAIL>) - <NAME> (<EMAIL>) \"\"\"", "Import the TensorFlow Library as tensorflow alias import tensorflow as tensorflow # Constants", "= '2' # Import the TensorFlow Library as tensorflow alias import tensorflow as", "total = tensorflow.add(a, b, name=\"total\") # If the Logging Flag is set to", "for the constant \"b\" tensorflow.print(\"b = \", b) # Print the Tensor for", "Logging Flag is set to True if LOGGING_FLAG: # Print the header for", "Lab 1.1 - Introduction to TensorFlow Author: - <NAME> (<EMAIL>) - <NAME> (<EMAIL>)", "Create the constant \"b\" and assign to it the value 4 b =", "and # assign to it the value 3 a = tensorflow.constant(3.0, dtype=tensorflow.float32, name=\"a\")", "System Library as operative_system import os as operative_system # Disable all the Debugging", "i.e., total = a + b total = tensorflow.add(a, b, name=\"total\") # If", "header for the Logging tensorflow.print(\"\\n\\nLogging of the Execution:\\n\") # Print the Tensor for", "value 4 b = tensorflow.constant(4.0, name=\"b\") # Create the addition of the constants", "tensorflow.constant(4.0, name=\"b\") # Create the addition of the constants \"a\" and \"b\", as", "Import the Libraries and Packages # Import the Operative System Library as operative_system", "the addition of the constants \"a\" and \"b\", as \"total\", # i.e., total", "\"b\" and assign to it the value 4 b = tensorflow.constant(4.0, name=\"b\") #", "b = tensorflow.constant(4.0, name=\"b\") # Create the addition of the constants \"a\" and", "+ b total = tensorflow.add(a, b, name=\"total\") # If the Logging Flag is", "name=\"total\") # If the Logging Flag is set to True if LOGGING_FLAG: #", "import os as operative_system # Disable all the Debugging Logs from TensorFlow Library", "LOGGING_FLAG = True # Create the constant \"a\" as a float of 32", "= tensorflow.add(a, b, name=\"total\") # If the Logging Flag is set to True", "operative_system import os as operative_system # Disable all the Debugging Logs from TensorFlow", "operative_system # Disable all the Debugging Logs from TensorFlow Library operative_system.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'", "if LOGGING_FLAG: # Print the header for the Logging tensorflow.print(\"\\n\\nLogging of the Execution:\\n\")", "If the Logging Flag is set to True if LOGGING_FLAG: # Print the", "Author: - <NAME> (<EMAIL>) - <NAME> (<EMAIL>) \"\"\" # Import the Libraries and", "the Logging tensorflow.print(\"\\n\\nLogging of the Execution:\\n\") # Print the Tensor for the constant", "= a + b total = tensorflow.add(a, b, name=\"total\") # If the Logging", "Logging tensorflow.print(\"\\n\\nLogging of the Execution:\\n\") # Print the Tensor for the constant \"a\"", "= tensorflow.constant(3.0, dtype=tensorflow.float32, name=\"a\") # Create the constant \"b\" and assign to it", "# Import the TensorFlow Library as tensorflow alias import tensorflow as tensorflow #", "TensorFlow Library operative_system.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Import the TensorFlow Library as tensorflow alias", "True # Create the constant \"a\" as a float of 32 bits and", "\"a\" as a float of 32 bits and # assign to it the", "constants \"a\" and \"b\", as \"total\", # i.e., total = a + b", "it the value 4 b = tensorflow.constant(4.0, name=\"b\") # Create the addition of", "the constants \"a\" and \"b\", as \"total\", # i.e., total = a +", "# Import the Operative System Library as operative_system import os as operative_system #", "of # the constants \"a\" and \"b\", as \"total\" tensorflow.print(\"total = a +", "tensorflow.print(\"\\n\\nLogging of the Execution:\\n\") # Print the Tensor for the constant \"a\" tensorflow.print(\"a", "and Packages # Import the Operative System Library as operative_system import os as", "tensorflow # Constants LOGGING_FLAG = True # Create the constant \"a\" as a", "Packages # Import the Operative System Library as operative_system import os as operative_system", "tensorflow.constant(3.0, dtype=tensorflow.float32, name=\"a\") # Create the constant \"b\" and assign to it the", "b, name=\"total\") # If the Logging Flag is set to True if LOGGING_FLAG:", "# the constants \"a\" and \"b\", as \"total\" tensorflow.print(\"total = a + b", "name=\"a\") # Create the constant \"b\" and assign to it the value 4", "the Libraries and Packages # Import the Operative System Library as operative_system import", "alias import tensorflow as tensorflow # Constants LOGGING_FLAG = True # Create the", "3 a = tensorflow.constant(3.0, dtype=tensorflow.float32, name=\"a\") # Create the constant \"b\" and assign", "from TensorFlow Library operative_system.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Import the TensorFlow Library as tensorflow", "Print the header for the Logging tensorflow.print(\"\\n\\nLogging of the Execution:\\n\") # Print the", "# Print the Tensor for the constant \"a\" tensorflow.print(\"a = \", a) #", "the Operative System Library as operative_system import os as operative_system # Disable all", "a) # Print the Tensor for the constant \"b\" tensorflow.print(\"b = \", b)", "name=\"b\") # Create the addition of the constants \"a\" and \"b\", as \"total\",", "bits and # assign to it the value 3 a = tensorflow.constant(3.0, dtype=tensorflow.float32,", "# If the Logging Flag is set to True if LOGGING_FLAG: # Print", "Operative System Library as operative_system import os as operative_system # Disable all the", "(<EMAIL>) - <NAME> (<EMAIL>) \"\"\" # Import the Libraries and Packages # Import", "= \", a) # Print the Tensor for the constant \"b\" tensorflow.print(\"b =", "Libraries and Packages # Import the Operative System Library as operative_system import os", "= \", b) # Print the Tensor for the addition of # the", "a float of 32 bits and # assign to it the value 3", "Create the constant \"a\" as a float of 32 bits and # assign", "the value 3 a = tensorflow.constant(3.0, dtype=tensorflow.float32, name=\"a\") # Create the constant \"b\"", "= True # Create the constant \"a\" as a float of 32 bits", "b) # Print the Tensor for the addition of # the constants \"a\"", "\", a) # Print the Tensor for the constant \"b\" tensorflow.print(\"b = \",", "the constant \"a\" tensorflow.print(\"a = \", a) # Print the Tensor for the", "as operative_system # Disable all the Debugging Logs from TensorFlow Library operative_system.environ['TF_CPP_MIN_LOG_LEVEL'] =", "as a float of 32 bits and # assign to it the value", "constant \"a\" as a float of 32 bits and # assign to it", "assign to it the value 3 a = tensorflow.constant(3.0, dtype=tensorflow.float32, name=\"a\") # Create", "TensorFlow Library as tensorflow alias import tensorflow as tensorflow # Constants LOGGING_FLAG =", "the value 4 b = tensorflow.constant(4.0, name=\"b\") # Create the addition of the", "# Print the Tensor for the addition of # the constants \"a\" and", "# Create the constant \"b\" and assign to it the value 4 b", "Logs from TensorFlow Library operative_system.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Import the TensorFlow Library as", "b total = tensorflow.add(a, b, name=\"total\") # If the Logging Flag is set", "as tensorflow alias import tensorflow as tensorflow # Constants LOGGING_FLAG = True #", "# Create the constant \"a\" as a float of 32 bits and #", "Flag is set to True if LOGGING_FLAG: # Print the header for the", "= tensorflow.constant(4.0, name=\"b\") # Create the addition of the constants \"a\" and \"b\",", "Tensor for the constant \"a\" tensorflow.print(\"a = \", a) # Print the Tensor", "Debugging Logs from TensorFlow Library operative_system.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Import the TensorFlow Library", "\"b\", as \"total\", # i.e., total = a + b total = tensorflow.add(a,", "Constants LOGGING_FLAG = True # Create the constant \"a\" as a float of", "the Tensor for the constant \"a\" tensorflow.print(\"a = \", a) # Print the", "and assign to it the value 4 b = tensorflow.constant(4.0, name=\"b\") # Create", "# Create the addition of the constants \"a\" and \"b\", as \"total\", #", "# Disable all the Debugging Logs from TensorFlow Library operative_system.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' #", "a = tensorflow.constant(3.0, dtype=tensorflow.float32, name=\"a\") # Create the constant \"b\" and assign to", "# i.e., total = a + b total = tensorflow.add(a, b, name=\"total\") #", "the header for the Logging tensorflow.print(\"\\n\\nLogging of the Execution:\\n\") # Print the Tensor", "the Debugging Logs from TensorFlow Library operative_system.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Import the TensorFlow", "Library as operative_system import os as operative_system # Disable all the Debugging Logs", "# Import the Libraries and Packages # Import the Operative System Library as", "as \"total\", # i.e., total = a + b total = tensorflow.add(a, b,", "\"a\" and \"b\", as \"total\", # i.e., total = a + b total", "tensorflow.print(\"b = \", b) # Print the Tensor for the addition of #", "to it the value 4 b = tensorflow.constant(4.0, name=\"b\") # Create the addition", "value 3 a = tensorflow.constant(3.0, dtype=tensorflow.float32, name=\"a\") # Create the constant \"b\" and", "as tensorflow # Constants LOGGING_FLAG = True # Create the constant \"a\" as", "Print the Tensor for the constant \"a\" tensorflow.print(\"a = \", a) # Print", "tensorflow.print(\"a = \", a) # Print the Tensor for the constant \"b\" tensorflow.print(\"b", "32 bits and # assign to it the value 3 a = tensorflow.constant(3.0,", "\"a\" and \"b\", as \"total\" tensorflow.print(\"total = a + b = \", total)", "the constant \"a\" as a float of 32 bits and # assign to", "the addition of # the constants \"a\" and \"b\", as \"total\" tensorflow.print(\"total =", "total = a + b total = tensorflow.add(a, b, name=\"total\") # If the", "tensorflow.add(a, b, name=\"total\") # If the Logging Flag is set to True if", "to TensorFlow Author: - <NAME> (<EMAIL>) - <NAME> (<EMAIL>) \"\"\" # Import the", "to it the value 3 a = tensorflow.constant(3.0, dtype=tensorflow.float32, name=\"a\") # Create the", "- <NAME> (<EMAIL>) \"\"\" # Import the Libraries and Packages # Import the", "assign to it the value 4 b = tensorflow.constant(4.0, name=\"b\") # Create the", "operative_system.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Import the TensorFlow Library as tensorflow alias import tensorflow", "the constant \"b\" and assign to it the value 4 b = tensorflow.constant(4.0,", "'2' # Import the TensorFlow Library as tensorflow alias import tensorflow as tensorflow", "for the Logging tensorflow.print(\"\\n\\nLogging of the Execution:\\n\") # Print the Tensor for the", "# Print the header for the Logging tensorflow.print(\"\\n\\nLogging of the Execution:\\n\") # Print", "import tensorflow as tensorflow # Constants LOGGING_FLAG = True # Create the constant", "<reponame>rubenandrebarreiro/fct-nova-deep-learning-labs \"\"\" Lab 1.1 - Introduction to TensorFlow Author: - <NAME> (<EMAIL>) -", "- <NAME> (<EMAIL>) - <NAME> (<EMAIL>) \"\"\" # Import the Libraries and Packages", "- Introduction to TensorFlow Author: - <NAME> (<EMAIL>) - <NAME> (<EMAIL>) \"\"\" #", "Introduction to TensorFlow Author: - <NAME> (<EMAIL>) - <NAME> (<EMAIL>) \"\"\" # Import", "the Tensor for the constant \"b\" tensorflow.print(\"b = \", b) # Print the", "is set to True if LOGGING_FLAG: # Print the header for the Logging", "Library as tensorflow alias import tensorflow as tensorflow # Constants LOGGING_FLAG = True", "# Print the Tensor for the constant \"b\" tensorflow.print(\"b = \", b) #", "to True if LOGGING_FLAG: # Print the header for the Logging tensorflow.print(\"\\n\\nLogging of", "a + b total = tensorflow.add(a, b, name=\"total\") # If the Logging Flag", "the Logging Flag is set to True if LOGGING_FLAG: # Print the header", "# assign to it the value 3 a = tensorflow.constant(3.0, dtype=tensorflow.float32, name=\"a\") #", "Disable all the Debugging Logs from TensorFlow Library operative_system.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Import", "Tensor for the addition of # the constants \"a\" and \"b\", as \"total\"", "constant \"b\" tensorflow.print(\"b = \", b) # Print the Tensor for the addition", "for the constant \"a\" tensorflow.print(\"a = \", a) # Print the Tensor for", "float of 32 bits and # assign to it the value 3 a", "as operative_system import os as operative_system # Disable all the Debugging Logs from", "constant \"a\" tensorflow.print(\"a = \", a) # Print the Tensor for the constant", "for the addition of # the constants \"a\" and \"b\", as \"total\" tensorflow.print(\"total", "# Constants LOGGING_FLAG = True # Create the constant \"a\" as a float", "all the Debugging Logs from TensorFlow Library operative_system.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Import the", "of the constants \"a\" and \"b\", as \"total\", # i.e., total = a", "tensorflow as tensorflow # Constants LOGGING_FLAG = True # Create the constant \"a\"", "and \"b\", as \"total\", # i.e., total = a + b total =", "\"total\", # i.e., total = a + b total = tensorflow.add(a, b, name=\"total\")", "set to True if LOGGING_FLAG: # Print the header for the Logging tensorflow.print(\"\\n\\nLogging", "it the value 3 a = tensorflow.constant(3.0, dtype=tensorflow.float32, name=\"a\") # Create the constant", "the constants \"a\" and \"b\", as \"total\" tensorflow.print(\"total = a + b =", "Create the addition of the constants \"a\" and \"b\", as \"total\", # i.e.,", "<NAME> (<EMAIL>) - <NAME> (<EMAIL>) \"\"\" # Import the Libraries and Packages #", "the Tensor for the addition of # the constants \"a\" and \"b\", as", "the constant \"b\" tensorflow.print(\"b = \", b) # Print the Tensor for the", "Execution:\\n\") # Print the Tensor for the constant \"a\" tensorflow.print(\"a = \", a)", "Tensor for the constant \"b\" tensorflow.print(\"b = \", b) # Print the Tensor", "tensorflow alias import tensorflow as tensorflow # Constants LOGGING_FLAG = True # Create", "\"\"\" Lab 1.1 - Introduction to TensorFlow Author: - <NAME> (<EMAIL>) - <NAME>", "Library operative_system.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Import the TensorFlow Library as tensorflow alias import", "addition of # the constants \"a\" and \"b\", as \"total\" tensorflow.print(\"total = a", "os as operative_system # Disable all the Debugging Logs from TensorFlow Library operative_system.environ['TF_CPP_MIN_LOG_LEVEL']", "of 32 bits and # assign to it the value 3 a =", "addition of the constants \"a\" and \"b\", as \"total\", # i.e., total =", "4 b = tensorflow.constant(4.0, name=\"b\") # Create the addition of the constants \"a\"", "the TensorFlow Library as tensorflow alias import tensorflow as tensorflow # Constants LOGGING_FLAG", "(<EMAIL>) \"\"\" # Import the Libraries and Packages # Import the Operative System", "\", b) # Print the Tensor for the addition of # the constants", "<NAME> (<EMAIL>) \"\"\" # Import the Libraries and Packages # Import the Operative", "constant \"b\" and assign to it the value 4 b = tensorflow.constant(4.0, name=\"b\")", "True if LOGGING_FLAG: # Print the header for the Logging tensorflow.print(\"\\n\\nLogging of the", "LOGGING_FLAG: # Print the header for the Logging tensorflow.print(\"\\n\\nLogging of the Execution:\\n\") #", "Print the Tensor for the addition of # the constants \"a\" and \"b\",", "\"a\" tensorflow.print(\"a = \", a) # Print the Tensor for the constant \"b\"" ]
[ "requests.\"\"\" patch_client = patch(\"homeassistant.components.meteo_france.meteofranceClient\") patch_weather_alert = patch( \"homeassistant.components.meteo_france.VigilanceMeteoFranceProxy\" ) with patch_client, patch_weather_alert: yield", "utils.\"\"\" from unittest.mock import patch import pytest @pytest.fixture(autouse=True) def patch_requests(): \"\"\"Stub out services", "from unittest.mock import patch import pytest @pytest.fixture(autouse=True) def patch_requests(): \"\"\"Stub out services that", "test utils.\"\"\" from unittest.mock import patch import pytest @pytest.fixture(autouse=True) def patch_requests(): \"\"\"Stub out", "\"\"\"Meteo-France generic test utils.\"\"\" from unittest.mock import patch import pytest @pytest.fixture(autouse=True) def patch_requests():", "import pytest @pytest.fixture(autouse=True) def patch_requests(): \"\"\"Stub out services that makes requests.\"\"\" patch_client =", "makes requests.\"\"\" patch_client = patch(\"homeassistant.components.meteo_france.meteofranceClient\") patch_weather_alert = patch( \"homeassistant.components.meteo_france.VigilanceMeteoFranceProxy\" ) with patch_client, patch_weather_alert:", "generic test utils.\"\"\" from unittest.mock import patch import pytest @pytest.fixture(autouse=True) def patch_requests(): \"\"\"Stub", "out services that makes requests.\"\"\" patch_client = patch(\"homeassistant.components.meteo_france.meteofranceClient\") patch_weather_alert = patch( \"homeassistant.components.meteo_france.VigilanceMeteoFranceProxy\" )", "import patch import pytest @pytest.fixture(autouse=True) def patch_requests(): \"\"\"Stub out services that makes requests.\"\"\"", "@pytest.fixture(autouse=True) def patch_requests(): \"\"\"Stub out services that makes requests.\"\"\" patch_client = patch(\"homeassistant.components.meteo_france.meteofranceClient\") patch_weather_alert", "unittest.mock import patch import pytest @pytest.fixture(autouse=True) def patch_requests(): \"\"\"Stub out services that makes", "pytest @pytest.fixture(autouse=True) def patch_requests(): \"\"\"Stub out services that makes requests.\"\"\" patch_client = patch(\"homeassistant.components.meteo_france.meteofranceClient\")", "def patch_requests(): \"\"\"Stub out services that makes requests.\"\"\" patch_client = patch(\"homeassistant.components.meteo_france.meteofranceClient\") patch_weather_alert =", "\"\"\"Stub out services that makes requests.\"\"\" patch_client = patch(\"homeassistant.components.meteo_france.meteofranceClient\") patch_weather_alert = patch( \"homeassistant.components.meteo_france.VigilanceMeteoFranceProxy\"", "services that makes requests.\"\"\" patch_client = patch(\"homeassistant.components.meteo_france.meteofranceClient\") patch_weather_alert = patch( \"homeassistant.components.meteo_france.VigilanceMeteoFranceProxy\" ) with", "that makes requests.\"\"\" patch_client = patch(\"homeassistant.components.meteo_france.meteofranceClient\") patch_weather_alert = patch( \"homeassistant.components.meteo_france.VigilanceMeteoFranceProxy\" ) with patch_client,", "patch import pytest @pytest.fixture(autouse=True) def patch_requests(): \"\"\"Stub out services that makes requests.\"\"\" patch_client", "patch_requests(): \"\"\"Stub out services that makes requests.\"\"\" patch_client = patch(\"homeassistant.components.meteo_france.meteofranceClient\") patch_weather_alert = patch(" ]
[ "= pre_spark.config(key, value) spark = pre_spark.getOrCreate() logger.info(\"Created Spark session\") try: yield spark finally:", "pre_spark = pre_spark.config(key, value) spark = pre_spark.getOrCreate() logger.info(\"Created Spark session\") try: yield spark", "\\ if config is not None: for key, value in config.items(): pre_spark =", "None: for key, value in config.items(): pre_spark = pre_spark.config(key, value) spark = pre_spark.getOrCreate()", "value) spark = pre_spark.getOrCreate() logger.info(\"Created Spark session\") try: yield spark finally: logger.info(\"Stopping Spark", "\\ .appName('science-papers-ml') \\ .master(f\"spark://{os.environ.get('SPARK_MASTER_HOST', 'spark-master')}:\" f\"{os.environ.get('SPARK_MASTER_PORT', '7077')}\") \\ if config is not None:", "os from contextlib import contextmanager from pyspark.sql import SparkSession from .log import logger", "not None: for key, value in config.items(): pre_spark = pre_spark.config(key, value) spark =", "@contextmanager def spark_session(config=None): pre_spark = SparkSession.builder \\ .appName('science-papers-ml') \\ .master(f\"spark://{os.environ.get('SPARK_MASTER_HOST', 'spark-master')}:\" f\"{os.environ.get('SPARK_MASTER_PORT', '7077')}\")", "f\"{os.environ.get('SPARK_MASTER_PORT', '7077')}\") \\ if config is not None: for key, value in config.items():", "pre_spark = SparkSession.builder \\ .appName('science-papers-ml') \\ .master(f\"spark://{os.environ.get('SPARK_MASTER_HOST', 'spark-master')}:\" f\"{os.environ.get('SPARK_MASTER_PORT', '7077')}\") \\ if config", "def spark_session(config=None): pre_spark = SparkSession.builder \\ .appName('science-papers-ml') \\ .master(f\"spark://{os.environ.get('SPARK_MASTER_HOST', 'spark-master')}:\" f\"{os.environ.get('SPARK_MASTER_PORT', '7077')}\") \\", "= SparkSession.builder \\ .appName('science-papers-ml') \\ .master(f\"spark://{os.environ.get('SPARK_MASTER_HOST', 'spark-master')}:\" f\"{os.environ.get('SPARK_MASTER_PORT', '7077')}\") \\ if config is", ".log import logger @contextmanager def spark_session(config=None): pre_spark = SparkSession.builder \\ .appName('science-papers-ml') \\ .master(f\"spark://{os.environ.get('SPARK_MASTER_HOST',", ".master(f\"spark://{os.environ.get('SPARK_MASTER_HOST', 'spark-master')}:\" f\"{os.environ.get('SPARK_MASTER_PORT', '7077')}\") \\ if config is not None: for key, value", "import SparkSession from .log import logger @contextmanager def spark_session(config=None): pre_spark = SparkSession.builder \\", "import os from contextlib import contextmanager from pyspark.sql import SparkSession from .log import", "key, value in config.items(): pre_spark = pre_spark.config(key, value) spark = pre_spark.getOrCreate() logger.info(\"Created Spark", "pyspark.sql import SparkSession from .log import logger @contextmanager def spark_session(config=None): pre_spark = SparkSession.builder", "from .log import logger @contextmanager def spark_session(config=None): pre_spark = SparkSession.builder \\ .appName('science-papers-ml') \\", "is not None: for key, value in config.items(): pre_spark = pre_spark.config(key, value) spark", "for key, value in config.items(): pre_spark = pre_spark.config(key, value) spark = pre_spark.getOrCreate() logger.info(\"Created", "in config.items(): pre_spark = pre_spark.config(key, value) spark = pre_spark.getOrCreate() logger.info(\"Created Spark session\") try:", "if config is not None: for key, value in config.items(): pre_spark = pre_spark.config(key,", "from contextlib import contextmanager from pyspark.sql import SparkSession from .log import logger @contextmanager", "from pyspark.sql import SparkSession from .log import logger @contextmanager def spark_session(config=None): pre_spark =", "spark_session(config=None): pre_spark = SparkSession.builder \\ .appName('science-papers-ml') \\ .master(f\"spark://{os.environ.get('SPARK_MASTER_HOST', 'spark-master')}:\" f\"{os.environ.get('SPARK_MASTER_PORT', '7077')}\") \\ if", "spark = pre_spark.getOrCreate() logger.info(\"Created Spark session\") try: yield spark finally: logger.info(\"Stopping Spark Session\")", "import logger @contextmanager def spark_session(config=None): pre_spark = SparkSession.builder \\ .appName('science-papers-ml') \\ .master(f\"spark://{os.environ.get('SPARK_MASTER_HOST', 'spark-master')}:\"", "contextmanager from pyspark.sql import SparkSession from .log import logger @contextmanager def spark_session(config=None): pre_spark", "import contextmanager from pyspark.sql import SparkSession from .log import logger @contextmanager def spark_session(config=None):", "SparkSession from .log import logger @contextmanager def spark_session(config=None): pre_spark = SparkSession.builder \\ .appName('science-papers-ml')", "config is not None: for key, value in config.items(): pre_spark = pre_spark.config(key, value)", "pre_spark.config(key, value) spark = pre_spark.getOrCreate() logger.info(\"Created Spark session\") try: yield spark finally: logger.info(\"Stopping", "= pre_spark.getOrCreate() logger.info(\"Created Spark session\") try: yield spark finally: logger.info(\"Stopping Spark Session\") spark.stop()", "'spark-master')}:\" f\"{os.environ.get('SPARK_MASTER_PORT', '7077')}\") \\ if config is not None: for key, value in", "'7077')}\") \\ if config is not None: for key, value in config.items(): pre_spark", "config.items(): pre_spark = pre_spark.config(key, value) spark = pre_spark.getOrCreate() logger.info(\"Created Spark session\") try: yield", "\\ .master(f\"spark://{os.environ.get('SPARK_MASTER_HOST', 'spark-master')}:\" f\"{os.environ.get('SPARK_MASTER_PORT', '7077')}\") \\ if config is not None: for key,", "SparkSession.builder \\ .appName('science-papers-ml') \\ .master(f\"spark://{os.environ.get('SPARK_MASTER_HOST', 'spark-master')}:\" f\"{os.environ.get('SPARK_MASTER_PORT', '7077')}\") \\ if config is not", "logger @contextmanager def spark_session(config=None): pre_spark = SparkSession.builder \\ .appName('science-papers-ml') \\ .master(f\"spark://{os.environ.get('SPARK_MASTER_HOST', 'spark-master')}:\" f\"{os.environ.get('SPARK_MASTER_PORT',", "contextlib import contextmanager from pyspark.sql import SparkSession from .log import logger @contextmanager def", ".appName('science-papers-ml') \\ .master(f\"spark://{os.environ.get('SPARK_MASTER_HOST', 'spark-master')}:\" f\"{os.environ.get('SPARK_MASTER_PORT', '7077')}\") \\ if config is not None: for", "value in config.items(): pre_spark = pre_spark.config(key, value) spark = pre_spark.getOrCreate() logger.info(\"Created Spark session\")" ]
[ "result def fillNull(months): months[\"Jan\"].append(0) months[\"Feb\"].append(0) months[\"Mar\"].append(0) months[\"Apr\"].append(0) months[\"May\"].append(0) months[\"Jun\"].append(0) months[\"Jul\"].append(0) months[\"Aug\"].append(0) months[\"Sep\"].append(0) months[\"Oct\"].append(0)", "0 distance months = fillNull(months) if month == \"01\": months[\"Jan\"][len(months[\"Jan\"])-1] = dist if", "'#32CD32', '#FAF0E6', '#FF00FF', '#800000', '#66CDAA', '#0000CD', '#BA55D3', '#9370DB', '#3CB371', '#7B68EE', '#00FA9A', '#48D1CC', '#C71585',", "in monthTable: if f[0] == ID: result.append(f) return result def fillNull(months): months[\"Jan\"].append(0) months[\"Feb\"].append(0)", "if month == \"05\": months[\"May\"][len(months[\"May\"])-1] = dist if month == \"06\": months[\"Jun\"][len(months[\"Jun\"])-1] =", "'#FF7F50', '#6495ED', '#FFF8DC', '#DC143C', '#00FFFF', '#00008B', '#008B8B', '#B8860B', '#A9A9A9', '#006400', '#BDB76B', '#8B008B', '#556B2F',", "all Data for one owl # fill all month with distance # missing", "} def getOwl(monthTable, ID): result = [] for f in monthTable: if f[0]", "months[\"May\"][len(months[\"May\"])-1] = dist if month == \"06\": months[\"Jun\"][len(months[\"Jun\"])-1] = dist if month ==", "if month == \"02\": curOwl[1] = i[3] if month == \"03\": curOwl[2] =", "== \"03\": months[\"Mar\"][len(months[\"Mar\"])-1] = dist if month == \"04\": months[\"Apr\"][len(months[\"Apr\"])-1] = dist if", "[], 'Mar': [], 'Apr': [], 'May': [], 'Jun': [], 'Jul': [], 'Aug': [],", "'#FFF0F5', '#7CFC00', '#FFFACD', '#ADD8E6', '#F08080', '#E0FFFF', '#FAFAD2', '#90EE90', '#D3D3D3', '#FFB6C1', '#FFA07A', '#20B2AA', '#87CEFA',", "def fillMonths(monthTable, months): curOwl = monthTable[0][0] for feature in monthTable: tempOwl = feature[0]", "\"11\": curOwl[10] = i[3] if month == \"12\": curOwl[11] = i[3] col =", "if month == \"01\": months[\"Jan\"][len(months[\"Jan\"])-1] = dist if month == \"02\": months[\"Feb\"][len(months[\"Feb\"])-1] =", "[], 'Nov': [], 'Dec': [] } def getOwl(monthTable, ID): result = [] for", "'#DCDCDC', '#F8F8FF', '#FFD700', '#DAA520', '#808080', '#008000', '#ADFF2F', '#F0FFF0', '#FF69B4', '#CD5C5C', '#4B0082', '#FFFFF0', '#F0E68C',", "result = [] for f in monthTable: if f[0] == ID: result.append(f) return", "'#FFFFFF', '#F5F5F5', '#FFFF00', '#9ACD32'] months = {'Jan': [], 'Feb': [], 'Mar': [], 'Apr':", "[], 'Sep': [], 'Oct': [], 'Nov': [], 'Dec': [] } def getOwl(monthTable, ID):", "'#FF8C00', '#9932CC', '#8B0000', '#E9967A', '#8FBC8F', '#483D8B', '#2F4F4F', '#00CED1', '#9400D3', '#FF1493', '#00BFFF', '#696969', '#1E90FF',", "cnames[counter] if lastOwl == \"none\": plt.bar(X, curOwl, color = col) else: plt.bar(X, curOwl,", "'#B22222', '#FFFAF0', '#228B22', '#FF00FF', '#DCDCDC', '#F8F8FF', '#FFD700', '#DAA520', '#808080', '#008000', '#ADFF2F', '#F0FFF0', '#FF69B4',", "return months def fillMonths(monthTable, months): curOwl = monthTable[0][0] for feature in monthTable: tempOwl", "month == \"07\": months[\"Jul\"][len(months[\"Jul\"])-1] = dist if month == \"08\": months[\"Aug\"][len(months[\"Aug\"])-1] = dist", "'#7CFC00', '#FFFACD', '#ADD8E6', '#F08080', '#E0FFFF', '#FAFAD2', '#90EE90', '#D3D3D3', '#FFB6C1', '#FFA07A', '#20B2AA', '#87CEFA', '#778899',", "if month == \"08\": months[\"Aug\"][len(months[\"Aug\"])-1] = dist if month == \"09\": months[\"Sep\"][len(months[\"Sep\"])-1] =", "if month == \"06\": curOwl[5] = i[3] if month == \"07\": curOwl[6] =", "\"05\": months[\"May\"][len(months[\"May\"])-1] = dist if month == \"06\": months[\"Jun\"][len(months[\"Jun\"])-1] = dist if month", "numpy as np cnames = [ '#F0F8FF', '#FAEBD7', '#00FFFF', '#7FFFD4', '#F0FFFF', '#F5F5DC', '#FFE4C4',", "curOwl, color = col) else: plt.bar(X, curOwl, color = col, bottom = lastOwl)", "dist return months months = fillMonths(monthTable, months) X = np.arange(12) curOwl = [np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,]", "[], 'Apr': [], 'May': [], 'Jun': [], 'Jul': [], 'Aug': [], 'Sep': [],", "'#FFEBCD', '#0000FF', '#8A2BE2', '#A52A2A', '#DEB887', '#5F9EA0', '#7FFF00', '#D2691E', '#FF7F50', '#6495ED', '#FFF8DC', '#DC143C', '#00FFFF',", "\"07\": months[\"Jul\"][len(months[\"Jul\"])-1] = dist if month == \"08\": months[\"Aug\"][len(months[\"Aug\"])-1] = dist if month", "[] for f in monthTable: if f[0] == ID: result.append(f) return result def", "== \"01\": curOwl[0] = i[3] if month == \"02\": curOwl[1] = i[3] if", "for f in monthTable: if f[0] == ID: result.append(f) return result def fillNull(months):", "= dist if month == \"07\": months[\"Jul\"][len(months[\"Jul\"])-1] = dist if month == \"08\":", "curOwl[0] = i[3] if month == \"02\": curOwl[1] = i[3] if month ==", "'#B0C4DE', '#FFFFE0', '#00FF00', '#32CD32', '#FAF0E6', '#FF00FF', '#800000', '#66CDAA', '#0000CD', '#BA55D3', '#9370DB', '#3CB371', '#7B68EE',", "'#87CEEB', '#6A5ACD', '#708090', '#FFFAFA', '#00FF7F', '#4682B4', '#D2B48C', '#008080', '#D8BFD8', '#FF6347', '#40E0D0', '#EE82EE', '#F5DEB3',", "'#6B8E23', '#FFA500', '#FF4500', '#DA70D6', '#EEE8AA', '#98FB98', '#AFEEEE', '#DB7093', '#FFEFD5', '#FFDAB9', '#CD853F', '#FFC0CB', '#DDA0DD',", "month == \"01\": months[\"Jan\"][len(months[\"Jan\"])-1] = dist if month == \"02\": months[\"Feb\"][len(months[\"Feb\"])-1] = dist", "= dist if month == \"08\": months[\"Aug\"][len(months[\"Aug\"])-1] = dist if month == \"09\":", "'#BC8F8F', '#4169E1', '#8B4513', '#FA8072', '#FAA460', '#2E8B57', '#FFF5EE', '#A0522D', '#C0C0C0', '#87CEEB', '#6A5ACD', '#708090', '#FFFAFA',", "months[\"Feb\"][len(months[\"Feb\"])-1] = dist if month == \"03\": months[\"Mar\"][len(months[\"Mar\"])-1] = dist if month ==", "\"09\": months[\"Sep\"][len(months[\"Sep\"])-1] = dist if month == \"10\": months[\"Oct\"][len(months[\"Oct\"])-1] = dist if month", "'#4B0082', '#FFFFF0', '#F0E68C', '#E6E6FA', '#FFF0F5', '#7CFC00', '#FFFACD', '#ADD8E6', '#F08080', '#E0FFFF', '#FAFAD2', '#90EE90', '#D3D3D3',", "'#9400D3', '#FF1493', '#00BFFF', '#696969', '#1E90FF', '#B22222', '#FFFAF0', '#228B22', '#FF00FF', '#DCDCDC', '#F8F8FF', '#FFD700', '#DAA520',", "'#20B2AA', '#87CEFA', '#778899', '#B0C4DE', '#FFFFE0', '#00FF00', '#32CD32', '#FAF0E6', '#FF00FF', '#800000', '#66CDAA', '#0000CD', '#BA55D3',", "get all Data for one owl # fill all month with distance #", "'#FAEBD7', '#00FFFF', '#7FFFD4', '#F0FFFF', '#F5F5DC', '#FFE4C4', '#000000', '#FFEBCD', '#0000FF', '#8A2BE2', '#A52A2A', '#DEB887', '#5F9EA0',", "in monthTable: tempOwl = feature[0] month = feature[2] dist = feature[3] owl =", "month == \"09\": months[\"Sep\"][len(months[\"Sep\"])-1] = dist if month == \"10\": months[\"Oct\"][len(months[\"Oct\"])-1] = dist", "getOwl(monthTable, feature[0]) for i in t: month = i[2] if month == \"01\":", "plt import numpy as np cnames = [ '#F0F8FF', '#FAEBD7', '#00FFFF', '#7FFFD4', '#F0FFFF',", "'Sep': [], 'Oct': [], 'Nov': [], 'Dec': [] } def getOwl(monthTable, ID): result", "month == \"06\": curOwl[5] = i[3] if month == \"07\": curOwl[6] = i[3]", "'#4169E1', '#8B4513', '#FA8072', '#FAA460', '#2E8B57', '#FFF5EE', '#A0522D', '#C0C0C0', '#87CEEB', '#6A5ACD', '#708090', '#FFFAFA', '#00FF7F',", "'#9ACD32'] months = {'Jan': [], 'Feb': [], 'Mar': [], 'Apr': [], 'May': [],", "owl = getOwl(monthTable, \"1751\") # get all Data for one owl # fill", "'#FF1493', '#00BFFF', '#696969', '#1E90FF', '#B22222', '#FFFAF0', '#228B22', '#FF00FF', '#DCDCDC', '#F8F8FF', '#FFD700', '#DAA520', '#808080',", "\"08\": curOwl[7] = i[3] if month == \"09\": curOwl[8] = i[3] if month", "= i[3] if month == \"12\": curOwl[11] = i[3] col = cnames[counter] if", "plt.bar(X, curOwl, color = col) else: plt.bar(X, curOwl, color = col, bottom =", "curOwl[3] = i[3] if month == \"05\": curOwl[4] = i[3] if month ==", "counter = 0 tempOwl = \"0\" lastOwl=\"none\" for feature in monthTable: owl =", "'#F5DEB3', '#FFFFFF', '#F5F5F5', '#FFFF00', '#9ACD32'] months = {'Jan': [], 'Feb': [], 'Mar': [],", "col) else: plt.bar(X, curOwl, color = col, bottom = lastOwl) lastOwl = curOwl", "'#D2B48C', '#008080', '#D8BFD8', '#FF6347', '#40E0D0', '#EE82EE', '#F5DEB3', '#FFFFFF', '#F5F5F5', '#FFFF00', '#9ACD32'] months =", "'Mar': [], 'Apr': [], 'May': [], 'Jun': [], 'Jul': [], 'Aug': [], 'Sep':", "month == \"10\": curOwl[9] = i[3] if month == \"11\": curOwl[10] = i[3]", "months = fillMonths(monthTable, months) X = np.arange(12) curOwl = [np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,] counter = 0", "'#FFD700', '#DAA520', '#808080', '#008000', '#ADFF2F', '#F0FFF0', '#FF69B4', '#CD5C5C', '#4B0082', '#FFFFF0', '#F0E68C', '#E6E6FA', '#FFF0F5',", "i[3] if month == \"07\": curOwl[6] = i[3] if month == \"08\": curOwl[7]", "i[3] col = cnames[counter] if lastOwl == \"none\": plt.bar(X, curOwl, color = col)", "'#D2691E', '#FF7F50', '#6495ED', '#FFF8DC', '#DC143C', '#00FFFF', '#00008B', '#008B8B', '#B8860B', '#A9A9A9', '#006400', '#BDB76B', '#8B008B',", "0 tempOwl = \"0\" lastOwl=\"none\" for feature in monthTable: owl = feature[0] if", "'#191970', '#F5FFFA', '#FFE4E1', '#FFE4B5', '#FFDEAD', '#000080', '#FDF5E6', '#808000', '#6B8E23', '#FFA500', '#FF4500', '#DA70D6', '#EEE8AA',", "'#FFDAB9', '#CD853F', '#FFC0CB', '#DDA0DD', '#B0E0E6', '#800080', '#FF0000', '#BC8F8F', '#4169E1', '#8B4513', '#FA8072', '#FAA460', '#2E8B57',", "'#FFFAF0', '#228B22', '#FF00FF', '#DCDCDC', '#F8F8FF', '#FFD700', '#DAA520', '#808080', '#008000', '#ADFF2F', '#F0FFF0', '#FF69B4', '#CD5C5C',", "= i[3] if month == \"07\": curOwl[6] = i[3] if month == \"08\":", "= feature[3] owl = getOwl(monthTable, \"1751\") # get all Data for one owl", "'#A0522D', '#C0C0C0', '#87CEEB', '#6A5ACD', '#708090', '#FFFAFA', '#00FF7F', '#4682B4', '#D2B48C', '#008080', '#D8BFD8', '#FF6347', '#40E0D0',", "fill all month with distance # missing data = 0 distance months =", "months[\"Jul\"][len(months[\"Jul\"])-1] = dist if month == \"08\": months[\"Aug\"][len(months[\"Aug\"])-1] = dist if month ==", "if month == \"12\": months[\"Dec\"][len(months[\"Dec\"])-1] = dist return months months = fillMonths(monthTable, months)", "= i[3] if month == \"06\": curOwl[5] = i[3] if month == \"07\":", "'#5F9EA0', '#7FFF00', '#D2691E', '#FF7F50', '#6495ED', '#FFF8DC', '#DC143C', '#00FFFF', '#00008B', '#008B8B', '#B8860B', '#A9A9A9', '#006400',", "month with distance # missing data = 0 distance months = fillNull(months) if", "i[3] if month == \"03\": curOwl[2] = i[3] if month == \"04\": curOwl[3]", "all month with distance # missing data = 0 distance months = fillNull(months)", "'#C71585', '#191970', '#F5FFFA', '#FFE4E1', '#FFE4B5', '#FFDEAD', '#000080', '#FDF5E6', '#808000', '#6B8E23', '#FFA500', '#FF4500', '#DA70D6',", "months[\"Aug\"][len(months[\"Aug\"])-1] = dist if month == \"09\": months[\"Sep\"][len(months[\"Sep\"])-1] = dist if month ==", "'#0000FF', '#8A2BE2', '#A52A2A', '#DEB887', '#5F9EA0', '#7FFF00', '#D2691E', '#FF7F50', '#6495ED', '#FFF8DC', '#DC143C', '#00FFFF', '#00008B',", "'#CD853F', '#FFC0CB', '#DDA0DD', '#B0E0E6', '#800080', '#FF0000', '#BC8F8F', '#4169E1', '#8B4513', '#FA8072', '#FAA460', '#2E8B57', '#FFF5EE',", "= i[3] if month == \"10\": curOwl[9] = i[3] if month == \"11\":", "fillMonths(monthTable, months): curOwl = monthTable[0][0] for feature in monthTable: tempOwl = feature[0] month", "i[3] if month == \"02\": curOwl[1] = i[3] if month == \"03\": curOwl[2]", "dist if month == \"10\": months[\"Oct\"][len(months[\"Oct\"])-1] = dist if month == \"11\": months[\"Nov\"][len(months[\"Nov\"])-1]", "== \"04\": curOwl[3] = i[3] if month == \"05\": curOwl[4] = i[3] if", "t = getOwl(monthTable, feature[0]) for i in t: month = i[2] if month", "'#FFFF00', '#9ACD32'] months = {'Jan': [], 'Feb': [], 'Mar': [], 'Apr': [], 'May':", "feature[0] if owl != tempOwl: tempOwl = owl t = getOwl(monthTable, feature[0]) for", "dist if month == \"11\": months[\"Nov\"][len(months[\"Nov\"])-1] = dist if month == \"12\": months[\"Dec\"][len(months[\"Dec\"])-1]", "= [np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,] counter = 0 tempOwl = \"0\" lastOwl=\"none\" for feature in monthTable:", "for i in t: month = i[2] if month == \"01\": curOwl[0] =", "monthTable: owl = feature[0] if owl != tempOwl: tempOwl = owl t =", "'#F0FFFF', '#F5F5DC', '#FFE4C4', '#000000', '#FFEBCD', '#0000FF', '#8A2BE2', '#A52A2A', '#DEB887', '#5F9EA0', '#7FFF00', '#D2691E', '#FF7F50',", "'#7B68EE', '#00FA9A', '#48D1CC', '#C71585', '#191970', '#F5FFFA', '#FFE4E1', '#FFE4B5', '#FFDEAD', '#000080', '#FDF5E6', '#808000', '#6B8E23',", "feature in monthTable: owl = feature[0] if owl != tempOwl: tempOwl = owl", "f in monthTable: if f[0] == ID: result.append(f) return result def fillNull(months): months[\"Jan\"].append(0)", "if month == \"03\": months[\"Mar\"][len(months[\"Mar\"])-1] = dist if month == \"04\": months[\"Apr\"][len(months[\"Apr\"])-1] =", "'#E6E6FA', '#FFF0F5', '#7CFC00', '#FFFACD', '#ADD8E6', '#F08080', '#E0FFFF', '#FAFAD2', '#90EE90', '#D3D3D3', '#FFB6C1', '#FFA07A', '#20B2AA',", "[], 'Aug': [], 'Sep': [], 'Oct': [], 'Nov': [], 'Dec': [] } def", "[], 'May': [], 'Jun': [], 'Jul': [], 'Aug': [], 'Sep': [], 'Oct': [],", "'#008080', '#D8BFD8', '#FF6347', '#40E0D0', '#EE82EE', '#F5DEB3', '#FFFFFF', '#F5F5F5', '#FFFF00', '#9ACD32'] months = {'Jan':", "= i[3] col = cnames[counter] if lastOwl == \"none\": plt.bar(X, curOwl, color =", "'#696969', '#1E90FF', '#B22222', '#FFFAF0', '#228B22', '#FF00FF', '#DCDCDC', '#F8F8FF', '#FFD700', '#DAA520', '#808080', '#008000', '#ADFF2F',", "# missing data = 0 distance months = fillNull(months) if month == \"01\":", "== ID: result.append(f) return result def fillNull(months): months[\"Jan\"].append(0) months[\"Feb\"].append(0) months[\"Mar\"].append(0) months[\"Apr\"].append(0) months[\"May\"].append(0) months[\"Jun\"].append(0)", "def fillNull(months): months[\"Jan\"].append(0) months[\"Feb\"].append(0) months[\"Mar\"].append(0) months[\"Apr\"].append(0) months[\"May\"].append(0) months[\"Jun\"].append(0) months[\"Jul\"].append(0) months[\"Aug\"].append(0) months[\"Sep\"].append(0) months[\"Oct\"].append(0) months[\"Nov\"].append(0)", "i[2] if month == \"01\": curOwl[0] = i[3] if month == \"02\": curOwl[1]", "i[3] if month == \"04\": curOwl[3] = i[3] if month == \"05\": curOwl[4]", "'#4682B4', '#D2B48C', '#008080', '#D8BFD8', '#FF6347', '#40E0D0', '#EE82EE', '#F5DEB3', '#FFFFFF', '#F5F5F5', '#FFFF00', '#9ACD32'] months", "'#F0F8FF', '#FAEBD7', '#00FFFF', '#7FFFD4', '#F0FFFF', '#F5F5DC', '#FFE4C4', '#000000', '#FFEBCD', '#0000FF', '#8A2BE2', '#A52A2A', '#DEB887',", "'#FF0000', '#BC8F8F', '#4169E1', '#8B4513', '#FA8072', '#FAA460', '#2E8B57', '#FFF5EE', '#A0522D', '#C0C0C0', '#87CEEB', '#6A5ACD', '#708090',", "'#FF69B4', '#CD5C5C', '#4B0082', '#FFFFF0', '#F0E68C', '#E6E6FA', '#FFF0F5', '#7CFC00', '#FFFACD', '#ADD8E6', '#F08080', '#E0FFFF', '#FAFAD2',", "feature in monthTable: tempOwl = feature[0] month = feature[2] dist = feature[3] owl", "= 0 distance months = fillNull(months) if month == \"01\": months[\"Jan\"][len(months[\"Jan\"])-1] = dist", "\"10\": months[\"Oct\"][len(months[\"Oct\"])-1] = dist if month == \"11\": months[\"Nov\"][len(months[\"Nov\"])-1] = dist if month", "= np.arange(12) curOwl = [np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,] counter = 0 tempOwl = \"0\" lastOwl=\"none\" for", "month == \"05\": months[\"May\"][len(months[\"May\"])-1] = dist if month == \"06\": months[\"Jun\"][len(months[\"Jun\"])-1] = dist", "feature[0] month = feature[2] dist = feature[3] owl = getOwl(monthTable, \"1751\") # get", "if month == \"11\": curOwl[10] = i[3] if month == \"12\": curOwl[11] =", "if month == \"05\": curOwl[4] = i[3] if month == \"06\": curOwl[5] =", "\"02\": months[\"Feb\"][len(months[\"Feb\"])-1] = dist if month == \"03\": months[\"Mar\"][len(months[\"Mar\"])-1] = dist if month", "monthTable: tempOwl = feature[0] month = feature[2] dist = feature[3] owl = getOwl(monthTable,", "distance months = fillNull(months) if month == \"01\": months[\"Jan\"][len(months[\"Jan\"])-1] = dist if month", "i[3] if month == \"06\": curOwl[5] = i[3] if month == \"07\": curOwl[6]", "== \"11\": curOwl[10] = i[3] if month == \"12\": curOwl[11] = i[3] col", "'#A52A2A', '#DEB887', '#5F9EA0', '#7FFF00', '#D2691E', '#FF7F50', '#6495ED', '#FFF8DC', '#DC143C', '#00FFFF', '#00008B', '#008B8B', '#B8860B',", "'#FFA500', '#FF4500', '#DA70D6', '#EEE8AA', '#98FB98', '#AFEEEE', '#DB7093', '#FFEFD5', '#FFDAB9', '#CD853F', '#FFC0CB', '#DDA0DD', '#B0E0E6',", "'#800080', '#FF0000', '#BC8F8F', '#4169E1', '#8B4513', '#FA8072', '#FAA460', '#2E8B57', '#FFF5EE', '#A0522D', '#C0C0C0', '#87CEEB', '#6A5ACD',", "'#C0C0C0', '#87CEEB', '#6A5ACD', '#708090', '#FFFAFA', '#00FF7F', '#4682B4', '#D2B48C', '#008080', '#D8BFD8', '#FF6347', '#40E0D0', '#EE82EE',", "months[\"Jul\"].append(0) months[\"Aug\"].append(0) months[\"Sep\"].append(0) months[\"Oct\"].append(0) months[\"Nov\"].append(0) months[\"Dec\"].append(0) return months def fillMonths(monthTable, months): curOwl =", "'#FF00FF', '#DCDCDC', '#F8F8FF', '#FFD700', '#DAA520', '#808080', '#008000', '#ADFF2F', '#F0FFF0', '#FF69B4', '#CD5C5C', '#4B0082', '#FFFFF0',", "curOwl[5] = i[3] if month == \"07\": curOwl[6] = i[3] if month ==", "getOwl(monthTable, \"1751\") # get all Data for one owl # fill all month", "== \"08\": curOwl[7] = i[3] if month == \"09\": curOwl[8] = i[3] if", "owl t = getOwl(monthTable, feature[0]) for i in t: month = i[2] if", "'Feb': [], 'Mar': [], 'Apr': [], 'May': [], 'Jun': [], 'Jul': [], 'Aug':", "'#E0FFFF', '#FAFAD2', '#90EE90', '#D3D3D3', '#FFB6C1', '#FFA07A', '#20B2AA', '#87CEFA', '#778899', '#B0C4DE', '#FFFFE0', '#00FF00', '#32CD32',", "'#2F4F4F', '#00CED1', '#9400D3', '#FF1493', '#00BFFF', '#696969', '#1E90FF', '#B22222', '#FFFAF0', '#228B22', '#FF00FF', '#DCDCDC', '#F8F8FF',", "lastOwl == \"none\": plt.bar(X, curOwl, color = col) else: plt.bar(X, curOwl, color =", "'#FFFFE0', '#00FF00', '#32CD32', '#FAF0E6', '#FF00FF', '#800000', '#66CDAA', '#0000CD', '#BA55D3', '#9370DB', '#3CB371', '#7B68EE', '#00FA9A',", "= feature[0] if owl != tempOwl: tempOwl = owl t = getOwl(monthTable, feature[0])", "'#98FB98', '#AFEEEE', '#DB7093', '#FFEFD5', '#FFDAB9', '#CD853F', '#FFC0CB', '#DDA0DD', '#B0E0E6', '#800080', '#FF0000', '#BC8F8F', '#4169E1',", "= i[2] if month == \"01\": curOwl[0] = i[3] if month == \"02\":", "month == \"03\": months[\"Mar\"][len(months[\"Mar\"])-1] = dist if month == \"04\": months[\"Apr\"][len(months[\"Apr\"])-1] = dist", "if month == \"10\": months[\"Oct\"][len(months[\"Oct\"])-1] = dist if month == \"11\": months[\"Nov\"][len(months[\"Nov\"])-1] =", "i[3] if month == \"10\": curOwl[9] = i[3] if month == \"11\": curOwl[10]", "owl != tempOwl: tempOwl = owl t = getOwl(monthTable, feature[0]) for i in", "if f[0] == ID: result.append(f) return result def fillNull(months): months[\"Jan\"].append(0) months[\"Feb\"].append(0) months[\"Mar\"].append(0) months[\"Apr\"].append(0)", "month = feature[2] dist = feature[3] owl = getOwl(monthTable, \"1751\") # get all", "months[\"Jun\"].append(0) months[\"Jul\"].append(0) months[\"Aug\"].append(0) months[\"Sep\"].append(0) months[\"Oct\"].append(0) months[\"Nov\"].append(0) months[\"Dec\"].append(0) return months def fillMonths(monthTable, months): curOwl", "one owl # fill all month with distance # missing data = 0", "'#DC143C', '#00FFFF', '#00008B', '#008B8B', '#B8860B', '#A9A9A9', '#006400', '#BDB76B', '#8B008B', '#556B2F', '#FF8C00', '#9932CC', '#8B0000',", "if month == \"11\": months[\"Nov\"][len(months[\"Nov\"])-1] = dist if month == \"12\": months[\"Dec\"][len(months[\"Dec\"])-1] =", "month == \"03\": curOwl[2] = i[3] if month == \"04\": curOwl[3] = i[3]", "'#FFF5EE', '#A0522D', '#C0C0C0', '#87CEEB', '#6A5ACD', '#708090', '#FFFAFA', '#00FF7F', '#4682B4', '#D2B48C', '#008080', '#D8BFD8', '#FF6347',", "cnames = [ '#F0F8FF', '#FAEBD7', '#00FFFF', '#7FFFD4', '#F0FFFF', '#F5F5DC', '#FFE4C4', '#000000', '#FFEBCD', '#0000FF',", "'#FFB6C1', '#FFA07A', '#20B2AA', '#87CEFA', '#778899', '#B0C4DE', '#FFFFE0', '#00FF00', '#32CD32', '#FAF0E6', '#FF00FF', '#800000', '#66CDAA',", "[], 'Jun': [], 'Jul': [], 'Aug': [], 'Sep': [], 'Oct': [], 'Nov': [],", "for feature in monthTable: tempOwl = feature[0] month = feature[2] dist = feature[3]", "'#808080', '#008000', '#ADFF2F', '#F0FFF0', '#FF69B4', '#CD5C5C', '#4B0082', '#FFFFF0', '#F0E68C', '#E6E6FA', '#FFF0F5', '#7CFC00', '#FFFACD',", "months[\"Oct\"][len(months[\"Oct\"])-1] = dist if month == \"11\": months[\"Nov\"][len(months[\"Nov\"])-1] = dist if month ==", "months[\"Dec\"][len(months[\"Dec\"])-1] = dist return months months = fillMonths(monthTable, months) X = np.arange(12) curOwl", "curOwl[2] = i[3] if month == \"04\": curOwl[3] = i[3] if month ==", "\"none\": plt.bar(X, curOwl, color = col) else: plt.bar(X, curOwl, color = col, bottom", "'#00FA9A', '#48D1CC', '#C71585', '#191970', '#F5FFFA', '#FFE4E1', '#FFE4B5', '#FFDEAD', '#000080', '#FDF5E6', '#808000', '#6B8E23', '#FFA500',", "else: plt.bar(X, curOwl, color = col, bottom = lastOwl) lastOwl = curOwl counter", "month == \"11\": months[\"Nov\"][len(months[\"Nov\"])-1] = dist if month == \"12\": months[\"Dec\"][len(months[\"Dec\"])-1] = dist", "with distance # missing data = 0 distance months = fillNull(months) if month", "months[\"May\"].append(0) months[\"Jun\"].append(0) months[\"Jul\"].append(0) months[\"Aug\"].append(0) months[\"Sep\"].append(0) months[\"Oct\"].append(0) months[\"Nov\"].append(0) months[\"Dec\"].append(0) return months def fillMonths(monthTable, months):", "X = np.arange(12) curOwl = [np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,] counter = 0 tempOwl = \"0\" lastOwl=\"none\"", "'#CD5C5C', '#4B0082', '#FFFFF0', '#F0E68C', '#E6E6FA', '#FFF0F5', '#7CFC00', '#FFFACD', '#ADD8E6', '#F08080', '#E0FFFF', '#FAFAD2', '#90EE90',", "return result def fillNull(months): months[\"Jan\"].append(0) months[\"Feb\"].append(0) months[\"Mar\"].append(0) months[\"Apr\"].append(0) months[\"May\"].append(0) months[\"Jun\"].append(0) months[\"Jul\"].append(0) months[\"Aug\"].append(0) months[\"Sep\"].append(0)", "'#8B4513', '#FA8072', '#FAA460', '#2E8B57', '#FFF5EE', '#A0522D', '#C0C0C0', '#87CEEB', '#6A5ACD', '#708090', '#FFFAFA', '#00FF7F', '#4682B4',", "\"03\": months[\"Mar\"][len(months[\"Mar\"])-1] = dist if month == \"04\": months[\"Apr\"][len(months[\"Apr\"])-1] = dist if month", "'#3CB371', '#7B68EE', '#00FA9A', '#48D1CC', '#C71585', '#191970', '#F5FFFA', '#FFE4E1', '#FFE4B5', '#FFDEAD', '#000080', '#FDF5E6', '#808000',", "curOwl[6] = i[3] if month == \"08\": curOwl[7] = i[3] if month ==", "'#FFE4B5', '#FFDEAD', '#000080', '#FDF5E6', '#808000', '#6B8E23', '#FFA500', '#FF4500', '#DA70D6', '#EEE8AA', '#98FB98', '#AFEEEE', '#DB7093',", "'#F5F5DC', '#FFE4C4', '#000000', '#FFEBCD', '#0000FF', '#8A2BE2', '#A52A2A', '#DEB887', '#5F9EA0', '#7FFF00', '#D2691E', '#FF7F50', '#6495ED',", "= {'Jan': [], 'Feb': [], 'Mar': [], 'Apr': [], 'May': [], 'Jun': [],", "# fill all month with distance # missing data = 0 distance months", "i[3] if month == \"05\": curOwl[4] = i[3] if month == \"06\": curOwl[5]", "months[\"Nov\"].append(0) months[\"Dec\"].append(0) return months def fillMonths(monthTable, months): curOwl = monthTable[0][0] for feature in", "owl # fill all month with distance # missing data = 0 distance", "def getOwl(monthTable, ID): result = [] for f in monthTable: if f[0] ==", "if month == \"04\": curOwl[3] = i[3] if month == \"05\": curOwl[4] =", "= i[3] if month == \"04\": curOwl[3] = i[3] if month == \"05\":", "== \"01\": months[\"Jan\"][len(months[\"Jan\"])-1] = dist if month == \"02\": months[\"Feb\"][len(months[\"Feb\"])-1] = dist if", "== \"12\": curOwl[11] = i[3] col = cnames[counter] if lastOwl == \"none\": plt.bar(X,", "\"01\": months[\"Jan\"][len(months[\"Jan\"])-1] = dist if month == \"02\": months[\"Feb\"][len(months[\"Feb\"])-1] = dist if month", "[], 'Dec': [] } def getOwl(monthTable, ID): result = [] for f in", "= dist if month == \"04\": months[\"Apr\"][len(months[\"Apr\"])-1] = dist if month == \"05\":", "months[\"Jan\"].append(0) months[\"Feb\"].append(0) months[\"Mar\"].append(0) months[\"Apr\"].append(0) months[\"May\"].append(0) months[\"Jun\"].append(0) months[\"Jul\"].append(0) months[\"Aug\"].append(0) months[\"Sep\"].append(0) months[\"Oct\"].append(0) months[\"Nov\"].append(0) months[\"Dec\"].append(0) return", "= getOwl(monthTable, \"1751\") # get all Data for one owl # fill all", "feature[0]) for i in t: month = i[2] if month == \"01\": curOwl[0]", "== \"10\": months[\"Oct\"][len(months[\"Oct\"])-1] = dist if month == \"11\": months[\"Nov\"][len(months[\"Nov\"])-1] = dist if", "months months = fillMonths(monthTable, months) X = np.arange(12) curOwl = [np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,] counter =", "\"04\": months[\"Apr\"][len(months[\"Apr\"])-1] = dist if month == \"05\": months[\"May\"][len(months[\"May\"])-1] = dist if month", "== \"05\": curOwl[4] = i[3] if month == \"06\": curOwl[5] = i[3] if", "month == \"05\": curOwl[4] = i[3] if month == \"06\": curOwl[5] = i[3]", "'#A9A9A9', '#006400', '#BDB76B', '#8B008B', '#556B2F', '#FF8C00', '#9932CC', '#8B0000', '#E9967A', '#8FBC8F', '#483D8B', '#2F4F4F', '#00CED1',", "import numpy as np cnames = [ '#F0F8FF', '#FAEBD7', '#00FFFF', '#7FFFD4', '#F0FFFF', '#F5F5DC',", "'#66CDAA', '#0000CD', '#BA55D3', '#9370DB', '#3CB371', '#7B68EE', '#00FA9A', '#48D1CC', '#C71585', '#191970', '#F5FFFA', '#FFE4E1', '#FFE4B5',", "if month == \"12\": curOwl[11] = i[3] col = cnames[counter] if lastOwl ==", "fillNull(months) if month == \"01\": months[\"Jan\"][len(months[\"Jan\"])-1] = dist if month == \"02\": months[\"Feb\"][len(months[\"Feb\"])-1]", "month == \"08\": curOwl[7] = i[3] if month == \"09\": curOwl[8] = i[3]", "'#8FBC8F', '#483D8B', '#2F4F4F', '#00CED1', '#9400D3', '#FF1493', '#00BFFF', '#696969', '#1E90FF', '#B22222', '#FFFAF0', '#228B22', '#FF00FF',", "== \"11\": months[\"Nov\"][len(months[\"Nov\"])-1] = dist if month == \"12\": months[\"Dec\"][len(months[\"Dec\"])-1] = dist return", "'#FF4500', '#DA70D6', '#EEE8AA', '#98FB98', '#AFEEEE', '#DB7093', '#FFEFD5', '#FFDAB9', '#CD853F', '#FFC0CB', '#DDA0DD', '#B0E0E6', '#800080',", "'#DA70D6', '#EEE8AA', '#98FB98', '#AFEEEE', '#DB7093', '#FFEFD5', '#FFDAB9', '#CD853F', '#FFC0CB', '#DDA0DD', '#B0E0E6', '#800080', '#FF0000',", "\"01\": curOwl[0] = i[3] if month == \"02\": curOwl[1] = i[3] if month", "'#556B2F', '#FF8C00', '#9932CC', '#8B0000', '#E9967A', '#8FBC8F', '#483D8B', '#2F4F4F', '#00CED1', '#9400D3', '#FF1493', '#00BFFF', '#696969',", "'#8B008B', '#556B2F', '#FF8C00', '#9932CC', '#8B0000', '#E9967A', '#8FBC8F', '#483D8B', '#2F4F4F', '#00CED1', '#9400D3', '#FF1493', '#00BFFF',", "'#FFC0CB', '#DDA0DD', '#B0E0E6', '#800080', '#FF0000', '#BC8F8F', '#4169E1', '#8B4513', '#FA8072', '#FAA460', '#2E8B57', '#FFF5EE', '#A0522D',", "curOwl[4] = i[3] if month == \"06\": curOwl[5] = i[3] if month ==", "'Jun': [], 'Jul': [], 'Aug': [], 'Sep': [], 'Oct': [], 'Nov': [], 'Dec':", "dist if month == \"04\": months[\"Apr\"][len(months[\"Apr\"])-1] = dist if month == \"05\": months[\"May\"][len(months[\"May\"])-1]", "curOwl[11] = i[3] col = cnames[counter] if lastOwl == \"none\": plt.bar(X, curOwl, color", "== \"02\": curOwl[1] = i[3] if month == \"03\": curOwl[2] = i[3] if", "= col) else: plt.bar(X, curOwl, color = col, bottom = lastOwl) lastOwl =", "Data for one owl # fill all month with distance # missing data", "'#FAFAD2', '#90EE90', '#D3D3D3', '#FFB6C1', '#FFA07A', '#20B2AA', '#87CEFA', '#778899', '#B0C4DE', '#FFFFE0', '#00FF00', '#32CD32', '#FAF0E6',", "[np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,] counter = 0 tempOwl = \"0\" lastOwl=\"none\" for feature in monthTable: owl", "'#7FFFD4', '#F0FFFF', '#F5F5DC', '#FFE4C4', '#000000', '#FFEBCD', '#0000FF', '#8A2BE2', '#A52A2A', '#DEB887', '#5F9EA0', '#7FFF00', '#D2691E',", "fillNull(months): months[\"Jan\"].append(0) months[\"Feb\"].append(0) months[\"Mar\"].append(0) months[\"Apr\"].append(0) months[\"May\"].append(0) months[\"Jun\"].append(0) months[\"Jul\"].append(0) months[\"Aug\"].append(0) months[\"Sep\"].append(0) months[\"Oct\"].append(0) months[\"Nov\"].append(0) months[\"Dec\"].append(0)", "missing data = 0 distance months = fillNull(months) if month == \"01\": months[\"Jan\"][len(months[\"Jan\"])-1]", "'#FFDEAD', '#000080', '#FDF5E6', '#808000', '#6B8E23', '#FFA500', '#FF4500', '#DA70D6', '#EEE8AA', '#98FB98', '#AFEEEE', '#DB7093', '#FFEFD5',", "= 0 tempOwl = \"0\" lastOwl=\"none\" for feature in monthTable: owl = feature[0]", "\"03\": curOwl[2] = i[3] if month == \"04\": curOwl[3] = i[3] if month", "'Jul': [], 'Aug': [], 'Sep': [], 'Oct': [], 'Nov': [], 'Dec': [] }", "'#FFFACD', '#ADD8E6', '#F08080', '#E0FFFF', '#FAFAD2', '#90EE90', '#D3D3D3', '#FFB6C1', '#FFA07A', '#20B2AA', '#87CEFA', '#778899', '#B0C4DE',", "= dist if month == \"12\": months[\"Dec\"][len(months[\"Dec\"])-1] = dist return months months =", "{'Jan': [], 'Feb': [], 'Mar': [], 'Apr': [], 'May': [], 'Jun': [], 'Jul':", "= i[3] if month == \"09\": curOwl[8] = i[3] if month == \"10\":", "= dist if month == \"03\": months[\"Mar\"][len(months[\"Mar\"])-1] = dist if month == \"04\":", "= i[3] if month == \"02\": curOwl[1] = i[3] if month == \"03\":", "month == \"12\": months[\"Dec\"][len(months[\"Dec\"])-1] = dist return months months = fillMonths(monthTable, months) X", "\"12\": months[\"Dec\"][len(months[\"Dec\"])-1] = dist return months months = fillMonths(monthTable, months) X = np.arange(12)", "# get all Data for one owl # fill all month with distance", "if month == \"02\": months[\"Feb\"][len(months[\"Feb\"])-1] = dist if month == \"03\": months[\"Mar\"][len(months[\"Mar\"])-1] =", "= dist if month == \"10\": months[\"Oct\"][len(months[\"Oct\"])-1] = dist if month == \"11\":", "tempOwl: tempOwl = owl t = getOwl(monthTable, feature[0]) for i in t: month", "= i[3] if month == \"08\": curOwl[7] = i[3] if month == \"09\":", "!= tempOwl: tempOwl = owl t = getOwl(monthTable, feature[0]) for i in t:", "months[\"Mar\"][len(months[\"Mar\"])-1] = dist if month == \"04\": months[\"Apr\"][len(months[\"Apr\"])-1] = dist if month ==", "month == \"12\": curOwl[11] = i[3] col = cnames[counter] if lastOwl == \"none\":", "'#2E8B57', '#FFF5EE', '#A0522D', '#C0C0C0', '#87CEEB', '#6A5ACD', '#708090', '#FFFAFA', '#00FF7F', '#4682B4', '#D2B48C', '#008080', '#D8BFD8',", "months[\"Aug\"].append(0) months[\"Sep\"].append(0) months[\"Oct\"].append(0) months[\"Nov\"].append(0) months[\"Dec\"].append(0) return months def fillMonths(monthTable, months): curOwl = monthTable[0][0]", "== \"06\": curOwl[5] = i[3] if month == \"07\": curOwl[6] = i[3] if", "t: month = i[2] if month == \"01\": curOwl[0] = i[3] if month", "== \"09\": months[\"Sep\"][len(months[\"Sep\"])-1] = dist if month == \"10\": months[\"Oct\"][len(months[\"Oct\"])-1] = dist if", "'#FFF8DC', '#DC143C', '#00FFFF', '#00008B', '#008B8B', '#B8860B', '#A9A9A9', '#006400', '#BDB76B', '#8B008B', '#556B2F', '#FF8C00', '#9932CC',", "if owl != tempOwl: tempOwl = owl t = getOwl(monthTable, feature[0]) for i", "curOwl[7] = i[3] if month == \"09\": curOwl[8] = i[3] if month ==", "= cnames[counter] if lastOwl == \"none\": plt.bar(X, curOwl, color = col) else: plt.bar(X,", "dist = feature[3] owl = getOwl(monthTable, \"1751\") # get all Data for one", "'#DDA0DD', '#B0E0E6', '#800080', '#FF0000', '#BC8F8F', '#4169E1', '#8B4513', '#FA8072', '#FAA460', '#2E8B57', '#FFF5EE', '#A0522D', '#C0C0C0',", "ID: result.append(f) return result def fillNull(months): months[\"Jan\"].append(0) months[\"Feb\"].append(0) months[\"Mar\"].append(0) months[\"Apr\"].append(0) months[\"May\"].append(0) months[\"Jun\"].append(0) months[\"Jul\"].append(0)", "months[\"Apr\"].append(0) months[\"May\"].append(0) months[\"Jun\"].append(0) months[\"Jul\"].append(0) months[\"Aug\"].append(0) months[\"Sep\"].append(0) months[\"Oct\"].append(0) months[\"Nov\"].append(0) months[\"Dec\"].append(0) return months def fillMonths(monthTable,", "'#FAF0E6', '#FF00FF', '#800000', '#66CDAA', '#0000CD', '#BA55D3', '#9370DB', '#3CB371', '#7B68EE', '#00FA9A', '#48D1CC', '#C71585', '#191970',", "as plt import numpy as np cnames = [ '#F0F8FF', '#FAEBD7', '#00FFFF', '#7FFFD4',", "'#9932CC', '#8B0000', '#E9967A', '#8FBC8F', '#483D8B', '#2F4F4F', '#00CED1', '#9400D3', '#FF1493', '#00BFFF', '#696969', '#1E90FF', '#B22222',", "'#DB7093', '#FFEFD5', '#FFDAB9', '#CD853F', '#FFC0CB', '#DDA0DD', '#B0E0E6', '#800080', '#FF0000', '#BC8F8F', '#4169E1', '#8B4513', '#FA8072',", "'#FFFAFA', '#00FF7F', '#4682B4', '#D2B48C', '#008080', '#D8BFD8', '#FF6347', '#40E0D0', '#EE82EE', '#F5DEB3', '#FFFFFF', '#F5F5F5', '#FFFF00',", "== \"06\": months[\"Jun\"][len(months[\"Jun\"])-1] = dist if month == \"07\": months[\"Jul\"][len(months[\"Jul\"])-1] = dist if", "matplotlib.pyplot as plt import numpy as np cnames = [ '#F0F8FF', '#FAEBD7', '#00FFFF',", "\"10\": curOwl[9] = i[3] if month == \"11\": curOwl[10] = i[3] if month", "'#F08080', '#E0FFFF', '#FAFAD2', '#90EE90', '#D3D3D3', '#FFB6C1', '#FFA07A', '#20B2AA', '#87CEFA', '#778899', '#B0C4DE', '#FFFFE0', '#00FF00',", "if month == \"03\": curOwl[2] = i[3] if month == \"04\": curOwl[3] =", "i[3] if month == \"09\": curOwl[8] = i[3] if month == \"10\": curOwl[9]", "\"12\": curOwl[11] = i[3] col = cnames[counter] if lastOwl == \"none\": plt.bar(X, curOwl,", "\"11\": months[\"Nov\"][len(months[\"Nov\"])-1] = dist if month == \"12\": months[\"Dec\"][len(months[\"Dec\"])-1] = dist return months", "= dist return months months = fillMonths(monthTable, months) X = np.arange(12) curOwl =", "== \"05\": months[\"May\"][len(months[\"May\"])-1] = dist if month == \"06\": months[\"Jun\"][len(months[\"Jun\"])-1] = dist if", "= i[3] if month == \"11\": curOwl[10] = i[3] if month == \"12\":", "'#006400', '#BDB76B', '#8B008B', '#556B2F', '#FF8C00', '#9932CC', '#8B0000', '#E9967A', '#8FBC8F', '#483D8B', '#2F4F4F', '#00CED1', '#9400D3',", "'#FFFFF0', '#F0E68C', '#E6E6FA', '#FFF0F5', '#7CFC00', '#FFFACD', '#ADD8E6', '#F08080', '#E0FFFF', '#FAFAD2', '#90EE90', '#D3D3D3', '#FFB6C1',", "'#0000CD', '#BA55D3', '#9370DB', '#3CB371', '#7B68EE', '#00FA9A', '#48D1CC', '#C71585', '#191970', '#F5FFFA', '#FFE4E1', '#FFE4B5', '#FFDEAD',", "import matplotlib.pyplot as plt import numpy as np cnames = [ '#F0F8FF', '#FAEBD7',", "in t: month = i[2] if month == \"01\": curOwl[0] = i[3] if", "'#BDB76B', '#8B008B', '#556B2F', '#FF8C00', '#9932CC', '#8B0000', '#E9967A', '#8FBC8F', '#483D8B', '#2F4F4F', '#00CED1', '#9400D3', '#FF1493',", "= [ '#F0F8FF', '#FAEBD7', '#00FFFF', '#7FFFD4', '#F0FFFF', '#F5F5DC', '#FFE4C4', '#000000', '#FFEBCD', '#0000FF', '#8A2BE2',", "month == \"04\": curOwl[3] = i[3] if month == \"05\": curOwl[4] = i[3]", "'#40E0D0', '#EE82EE', '#F5DEB3', '#FFFFFF', '#F5F5F5', '#FFFF00', '#9ACD32'] months = {'Jan': [], 'Feb': [],", "if month == \"04\": months[\"Apr\"][len(months[\"Apr\"])-1] = dist if month == \"05\": months[\"May\"][len(months[\"May\"])-1] =", "== \"03\": curOwl[2] = i[3] if month == \"04\": curOwl[3] = i[3] if", "dist if month == \"07\": months[\"Jul\"][len(months[\"Jul\"])-1] = dist if month == \"08\": months[\"Aug\"][len(months[\"Aug\"])-1]", "month == \"07\": curOwl[6] = i[3] if month == \"08\": curOwl[7] = i[3]", "month == \"04\": months[\"Apr\"][len(months[\"Apr\"])-1] = dist if month == \"05\": months[\"May\"][len(months[\"May\"])-1] = dist", "dist if month == \"09\": months[\"Sep\"][len(months[\"Sep\"])-1] = dist if month == \"10\": months[\"Oct\"][len(months[\"Oct\"])-1]", "'#FFE4E1', '#FFE4B5', '#FFDEAD', '#000080', '#FDF5E6', '#808000', '#6B8E23', '#FFA500', '#FF4500', '#DA70D6', '#EEE8AA', '#98FB98', '#AFEEEE',", "feature[2] dist = feature[3] owl = getOwl(monthTable, \"1751\") # get all Data for", "i[3] if month == \"11\": curOwl[10] = i[3] if month == \"12\": curOwl[11]", "'#F5F5F5', '#FFFF00', '#9ACD32'] months = {'Jan': [], 'Feb': [], 'Mar': [], 'Apr': [],", "months): curOwl = monthTable[0][0] for feature in monthTable: tempOwl = feature[0] month =", "'#008B8B', '#B8860B', '#A9A9A9', '#006400', '#BDB76B', '#8B008B', '#556B2F', '#FF8C00', '#9932CC', '#8B0000', '#E9967A', '#8FBC8F', '#483D8B',", "= [] for f in monthTable: if f[0] == ID: result.append(f) return result", "= dist if month == \"11\": months[\"Nov\"][len(months[\"Nov\"])-1] = dist if month == \"12\":", "curOwl, color = col, bottom = lastOwl) lastOwl = curOwl counter = counter", "if month == \"07\": curOwl[6] = i[3] if month == \"08\": curOwl[7] =", "months[\"Jun\"][len(months[\"Jun\"])-1] = dist if month == \"07\": months[\"Jul\"][len(months[\"Jul\"])-1] = dist if month ==", "[], 'Feb': [], 'Mar': [], 'Apr': [], 'May': [], 'Jun': [], 'Jul': [],", "'#FA8072', '#FAA460', '#2E8B57', '#FFF5EE', '#A0522D', '#C0C0C0', '#87CEEB', '#6A5ACD', '#708090', '#FFFAFA', '#00FF7F', '#4682B4', '#D2B48C',", "tempOwl = owl t = getOwl(monthTable, feature[0]) for i in t: month =", "'#FDF5E6', '#808000', '#6B8E23', '#FFA500', '#FF4500', '#DA70D6', '#EEE8AA', '#98FB98', '#AFEEEE', '#DB7093', '#FFEFD5', '#FFDAB9', '#CD853F',", "'#6495ED', '#FFF8DC', '#DC143C', '#00FFFF', '#00008B', '#008B8B', '#B8860B', '#A9A9A9', '#006400', '#BDB76B', '#8B008B', '#556B2F', '#FF8C00',", "result.append(f) return result def fillNull(months): months[\"Jan\"].append(0) months[\"Feb\"].append(0) months[\"Mar\"].append(0) months[\"Apr\"].append(0) months[\"May\"].append(0) months[\"Jun\"].append(0) months[\"Jul\"].append(0) months[\"Aug\"].append(0)", "\"06\": curOwl[5] = i[3] if month == \"07\": curOwl[6] = i[3] if month", "'Aug': [], 'Sep': [], 'Oct': [], 'Nov': [], 'Dec': [] } def getOwl(monthTable,", "'#1E90FF', '#B22222', '#FFFAF0', '#228B22', '#FF00FF', '#DCDCDC', '#F8F8FF', '#FFD700', '#DAA520', '#808080', '#008000', '#ADFF2F', '#F0FFF0',", "getOwl(monthTable, ID): result = [] for f in monthTable: if f[0] == ID:", "== \"10\": curOwl[9] = i[3] if month == \"11\": curOwl[10] = i[3] if", "month == \"02\": curOwl[1] = i[3] if month == \"03\": curOwl[2] = i[3]", "'#483D8B', '#2F4F4F', '#00CED1', '#9400D3', '#FF1493', '#00BFFF', '#696969', '#1E90FF', '#B22222', '#FFFAF0', '#228B22', '#FF00FF', '#DCDCDC',", "'#778899', '#B0C4DE', '#FFFFE0', '#00FF00', '#32CD32', '#FAF0E6', '#FF00FF', '#800000', '#66CDAA', '#0000CD', '#BA55D3', '#9370DB', '#3CB371',", "month == \"01\": curOwl[0] = i[3] if month == \"02\": curOwl[1] = i[3]", "'#00FF00', '#32CD32', '#FAF0E6', '#FF00FF', '#800000', '#66CDAA', '#0000CD', '#BA55D3', '#9370DB', '#3CB371', '#7B68EE', '#00FA9A', '#48D1CC',", "if month == \"06\": months[\"Jun\"][len(months[\"Jun\"])-1] = dist if month == \"07\": months[\"Jul\"][len(months[\"Jul\"])-1] =", "'#DAA520', '#808080', '#008000', '#ADFF2F', '#F0FFF0', '#FF69B4', '#CD5C5C', '#4B0082', '#FFFFF0', '#F0E68C', '#E6E6FA', '#FFF0F5', '#7CFC00',", "tempOwl = feature[0] month = feature[2] dist = feature[3] owl = getOwl(monthTable, \"1751\")", "\"08\": months[\"Aug\"][len(months[\"Aug\"])-1] = dist if month == \"09\": months[\"Sep\"][len(months[\"Sep\"])-1] = dist if month", "i[3] if month == \"08\": curOwl[7] = i[3] if month == \"09\": curOwl[8]", "= col, bottom = lastOwl) lastOwl = curOwl counter = counter + 5", "months = {'Jan': [], 'Feb': [], 'Mar': [], 'Apr': [], 'May': [], 'Jun':", "month == \"11\": curOwl[10] = i[3] if month == \"12\": curOwl[11] = i[3]", "'#F5FFFA', '#FFE4E1', '#FFE4B5', '#FFDEAD', '#000080', '#FDF5E6', '#808000', '#6B8E23', '#FFA500', '#FF4500', '#DA70D6', '#EEE8AA', '#98FB98',", "months def fillMonths(monthTable, months): curOwl = monthTable[0][0] for feature in monthTable: tempOwl =", "months[\"Nov\"][len(months[\"Nov\"])-1] = dist if month == \"12\": months[\"Dec\"][len(months[\"Dec\"])-1] = dist return months months", "if month == \"09\": curOwl[8] = i[3] if month == \"10\": curOwl[9] =", "curOwl[9] = i[3] if month == \"11\": curOwl[10] = i[3] if month ==", "'#00008B', '#008B8B', '#B8860B', '#A9A9A9', '#006400', '#BDB76B', '#8B008B', '#556B2F', '#FF8C00', '#9932CC', '#8B0000', '#E9967A', '#8FBC8F',", "curOwl = monthTable[0][0] for feature in monthTable: tempOwl = feature[0] month = feature[2]", "'Oct': [], 'Nov': [], 'Dec': [] } def getOwl(monthTable, ID): result = []", "monthTable[0][0] for feature in monthTable: tempOwl = feature[0] month = feature[2] dist =", "'#ADD8E6', '#F08080', '#E0FFFF', '#FAFAD2', '#90EE90', '#D3D3D3', '#FFB6C1', '#FFA07A', '#20B2AA', '#87CEFA', '#778899', '#B0C4DE', '#FFFFE0',", "'#E9967A', '#8FBC8F', '#483D8B', '#2F4F4F', '#00CED1', '#9400D3', '#FF1493', '#00BFFF', '#696969', '#1E90FF', '#B22222', '#FFFAF0', '#228B22',", "fillMonths(monthTable, months) X = np.arange(12) curOwl = [np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,] counter = 0 tempOwl =", "month == \"06\": months[\"Jun\"][len(months[\"Jun\"])-1] = dist if month == \"07\": months[\"Jul\"][len(months[\"Jul\"])-1] = dist", "col, bottom = lastOwl) lastOwl = curOwl counter = counter + 5 plt.show()", "'#F0FFF0', '#FF69B4', '#CD5C5C', '#4B0082', '#FFFFF0', '#F0E68C', '#E6E6FA', '#FFF0F5', '#7CFC00', '#FFFACD', '#ADD8E6', '#F08080', '#E0FFFF',", "'#AFEEEE', '#DB7093', '#FFEFD5', '#FFDAB9', '#CD853F', '#FFC0CB', '#DDA0DD', '#B0E0E6', '#800080', '#FF0000', '#BC8F8F', '#4169E1', '#8B4513',", "'May': [], 'Jun': [], 'Jul': [], 'Aug': [], 'Sep': [], 'Oct': [], 'Nov':", "'#ADFF2F', '#F0FFF0', '#FF69B4', '#CD5C5C', '#4B0082', '#FFFFF0', '#F0E68C', '#E6E6FA', '#FFF0F5', '#7CFC00', '#FFFACD', '#ADD8E6', '#F08080',", "for one owl # fill all month with distance # missing data =", "'#FFE4C4', '#000000', '#FFEBCD', '#0000FF', '#8A2BE2', '#A52A2A', '#DEB887', '#5F9EA0', '#7FFF00', '#D2691E', '#FF7F50', '#6495ED', '#FFF8DC',", "distance # missing data = 0 distance months = fillNull(months) if month ==", "'#B8860B', '#A9A9A9', '#006400', '#BDB76B', '#8B008B', '#556B2F', '#FF8C00', '#9932CC', '#8B0000', '#E9967A', '#8FBC8F', '#483D8B', '#2F4F4F',", "'#48D1CC', '#C71585', '#191970', '#F5FFFA', '#FFE4E1', '#FFE4B5', '#FFDEAD', '#000080', '#FDF5E6', '#808000', '#6B8E23', '#FFA500', '#FF4500',", "= i[3] if month == \"03\": curOwl[2] = i[3] if month == \"04\":", "= fillMonths(monthTable, months) X = np.arange(12) curOwl = [np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,] counter = 0 tempOwl", "lastOwl=\"none\" for feature in monthTable: owl = feature[0] if owl != tempOwl: tempOwl", "'#8B0000', '#E9967A', '#8FBC8F', '#483D8B', '#2F4F4F', '#00CED1', '#9400D3', '#FF1493', '#00BFFF', '#696969', '#1E90FF', '#B22222', '#FFFAF0',", "[] } def getOwl(monthTable, ID): result = [] for f in monthTable: if", "month == \"10\": months[\"Oct\"][len(months[\"Oct\"])-1] = dist if month == \"11\": months[\"Nov\"][len(months[\"Nov\"])-1] = dist", "curOwl[1] = i[3] if month == \"03\": curOwl[2] = i[3] if month ==", "'Nov': [], 'Dec': [] } def getOwl(monthTable, ID): result = [] for f", "curOwl[10] = i[3] if month == \"12\": curOwl[11] = i[3] col = cnames[counter]", "month = i[2] if month == \"01\": curOwl[0] = i[3] if month ==", "'#808000', '#6B8E23', '#FFA500', '#FF4500', '#DA70D6', '#EEE8AA', '#98FB98', '#AFEEEE', '#DB7093', '#FFEFD5', '#FFDAB9', '#CD853F', '#FFC0CB',", "\"02\": curOwl[1] = i[3] if month == \"03\": curOwl[2] = i[3] if month", "'#F0E68C', '#E6E6FA', '#FFF0F5', '#7CFC00', '#FFFACD', '#ADD8E6', '#F08080', '#E0FFFF', '#FAFAD2', '#90EE90', '#D3D3D3', '#FFB6C1', '#FFA07A',", "if month == \"09\": months[\"Sep\"][len(months[\"Sep\"])-1] = dist if month == \"10\": months[\"Oct\"][len(months[\"Oct\"])-1] =", "f[0] == ID: result.append(f) return result def fillNull(months): months[\"Jan\"].append(0) months[\"Feb\"].append(0) months[\"Mar\"].append(0) months[\"Apr\"].append(0) months[\"May\"].append(0)", "'#FFA07A', '#20B2AA', '#87CEFA', '#778899', '#B0C4DE', '#FFFFE0', '#00FF00', '#32CD32', '#FAF0E6', '#FF00FF', '#800000', '#66CDAA', '#0000CD',", "[ '#F0F8FF', '#FAEBD7', '#00FFFF', '#7FFFD4', '#F0FFFF', '#F5F5DC', '#FFE4C4', '#000000', '#FFEBCD', '#0000FF', '#8A2BE2', '#A52A2A',", "'Apr': [], 'May': [], 'Jun': [], 'Jul': [], 'Aug': [], 'Sep': [], 'Oct':", "i in t: month = i[2] if month == \"01\": curOwl[0] = i[3]", "'#8A2BE2', '#A52A2A', '#DEB887', '#5F9EA0', '#7FFF00', '#D2691E', '#FF7F50', '#6495ED', '#FFF8DC', '#DC143C', '#00FFFF', '#00008B', '#008B8B',", "months = fillNull(months) if month == \"01\": months[\"Jan\"][len(months[\"Jan\"])-1] = dist if month ==", "for feature in monthTable: owl = feature[0] if owl != tempOwl: tempOwl =", "= owl t = getOwl(monthTable, feature[0]) for i in t: month = i[2]", "np cnames = [ '#F0F8FF', '#FAEBD7', '#00FFFF', '#7FFFD4', '#F0FFFF', '#F5F5DC', '#FFE4C4', '#000000', '#FFEBCD',", "== \"12\": months[\"Dec\"][len(months[\"Dec\"])-1] = dist return months months = fillMonths(monthTable, months) X =", "'#90EE90', '#D3D3D3', '#FFB6C1', '#FFA07A', '#20B2AA', '#87CEFA', '#778899', '#B0C4DE', '#FFFFE0', '#00FF00', '#32CD32', '#FAF0E6', '#FF00FF',", "curOwl[8] = i[3] if month == \"10\": curOwl[9] = i[3] if month ==", "monthTable: if f[0] == ID: result.append(f) return result def fillNull(months): months[\"Jan\"].append(0) months[\"Feb\"].append(0) months[\"Mar\"].append(0)", "'#FF00FF', '#800000', '#66CDAA', '#0000CD', '#BA55D3', '#9370DB', '#3CB371', '#7B68EE', '#00FA9A', '#48D1CC', '#C71585', '#191970', '#F5FFFA',", "if month == \"10\": curOwl[9] = i[3] if month == \"11\": curOwl[10] =", "= dist if month == \"02\": months[\"Feb\"][len(months[\"Feb\"])-1] = dist if month == \"03\":", "'#708090', '#FFFAFA', '#00FF7F', '#4682B4', '#D2B48C', '#008080', '#D8BFD8', '#FF6347', '#40E0D0', '#EE82EE', '#F5DEB3', '#FFFFFF', '#F5F5F5',", "dist if month == \"05\": months[\"May\"][len(months[\"May\"])-1] = dist if month == \"06\": months[\"Jun\"][len(months[\"Jun\"])-1]", "= dist if month == \"06\": months[\"Jun\"][len(months[\"Jun\"])-1] = dist if month == \"07\":", "'#D3D3D3', '#FFB6C1', '#FFA07A', '#20B2AA', '#87CEFA', '#778899', '#B0C4DE', '#FFFFE0', '#00FF00', '#32CD32', '#FAF0E6', '#FF00FF', '#800000',", "'#00FF7F', '#4682B4', '#D2B48C', '#008080', '#D8BFD8', '#FF6347', '#40E0D0', '#EE82EE', '#F5DEB3', '#FFFFFF', '#F5F5F5', '#FFFF00', '#9ACD32']", "dist if month == \"08\": months[\"Aug\"][len(months[\"Aug\"])-1] = dist if month == \"09\": months[\"Sep\"][len(months[\"Sep\"])-1]", "[], 'Oct': [], 'Nov': [], 'Dec': [] } def getOwl(monthTable, ID): result =", "== \"04\": months[\"Apr\"][len(months[\"Apr\"])-1] = dist if month == \"05\": months[\"May\"][len(months[\"May\"])-1] = dist if", "months[\"Mar\"].append(0) months[\"Apr\"].append(0) months[\"May\"].append(0) months[\"Jun\"].append(0) months[\"Jul\"].append(0) months[\"Aug\"].append(0) months[\"Sep\"].append(0) months[\"Oct\"].append(0) months[\"Nov\"].append(0) months[\"Dec\"].append(0) return months def", "\"07\": curOwl[6] = i[3] if month == \"08\": curOwl[7] = i[3] if month", "dist if month == \"12\": months[\"Dec\"][len(months[\"Dec\"])-1] = dist return months months = fillMonths(monthTable,", "as np cnames = [ '#F0F8FF', '#FAEBD7', '#00FFFF', '#7FFFD4', '#F0FFFF', '#F5F5DC', '#FFE4C4', '#000000',", "'#7FFF00', '#D2691E', '#FF7F50', '#6495ED', '#FFF8DC', '#DC143C', '#00FFFF', '#00008B', '#008B8B', '#B8860B', '#A9A9A9', '#006400', '#BDB76B',", "\"06\": months[\"Jun\"][len(months[\"Jun\"])-1] = dist if month == \"07\": months[\"Jul\"][len(months[\"Jul\"])-1] = dist if month", "'#EE82EE', '#F5DEB3', '#FFFFFF', '#F5F5F5', '#FFFF00', '#9ACD32'] months = {'Jan': [], 'Feb': [], 'Mar':", "months[\"Feb\"].append(0) months[\"Mar\"].append(0) months[\"Apr\"].append(0) months[\"May\"].append(0) months[\"Jun\"].append(0) months[\"Jul\"].append(0) months[\"Aug\"].append(0) months[\"Sep\"].append(0) months[\"Oct\"].append(0) months[\"Nov\"].append(0) months[\"Dec\"].append(0) return months", "== \"none\": plt.bar(X, curOwl, color = col) else: plt.bar(X, curOwl, color = col,", "i[3] if month == \"12\": curOwl[11] = i[3] col = cnames[counter] if lastOwl", "'#DEB887', '#5F9EA0', '#7FFF00', '#D2691E', '#FF7F50', '#6495ED', '#FFF8DC', '#DC143C', '#00FFFF', '#00008B', '#008B8B', '#B8860B', '#A9A9A9',", "== \"08\": months[\"Aug\"][len(months[\"Aug\"])-1] = dist if month == \"09\": months[\"Sep\"][len(months[\"Sep\"])-1] = dist if", "= fillNull(months) if month == \"01\": months[\"Jan\"][len(months[\"Jan\"])-1] = dist if month == \"02\":", "'#000080', '#FDF5E6', '#808000', '#6B8E23', '#FFA500', '#FF4500', '#DA70D6', '#EEE8AA', '#98FB98', '#AFEEEE', '#DB7093', '#FFEFD5', '#FFDAB9',", "months[\"Oct\"].append(0) months[\"Nov\"].append(0) months[\"Dec\"].append(0) return months def fillMonths(monthTable, months): curOwl = monthTable[0][0] for feature", "months) X = np.arange(12) curOwl = [np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,] counter = 0 tempOwl = \"0\"", "color = col) else: plt.bar(X, curOwl, color = col, bottom = lastOwl) lastOwl", "== \"02\": months[\"Feb\"][len(months[\"Feb\"])-1] = dist if month == \"03\": months[\"Mar\"][len(months[\"Mar\"])-1] = dist if", "return months months = fillMonths(monthTable, months) X = np.arange(12) curOwl = [np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,] counter", "== \"07\": months[\"Jul\"][len(months[\"Jul\"])-1] = dist if month == \"08\": months[\"Aug\"][len(months[\"Aug\"])-1] = dist if", "feature[3] owl = getOwl(monthTable, \"1751\") # get all Data for one owl #", "data = 0 distance months = fillNull(months) if month == \"01\": months[\"Jan\"][len(months[\"Jan\"])-1] =", "\"1751\") # get all Data for one owl # fill all month with", "'#D8BFD8', '#FF6347', '#40E0D0', '#EE82EE', '#F5DEB3', '#FFFFFF', '#F5F5F5', '#FFFF00', '#9ACD32'] months = {'Jan': [],", "= feature[2] dist = feature[3] owl = getOwl(monthTable, \"1751\") # get all Data", "dist if month == \"06\": months[\"Jun\"][len(months[\"Jun\"])-1] = dist if month == \"07\": months[\"Jul\"][len(months[\"Jul\"])-1]", "'#228B22', '#FF00FF', '#DCDCDC', '#F8F8FF', '#FFD700', '#DAA520', '#808080', '#008000', '#ADFF2F', '#F0FFF0', '#FF69B4', '#CD5C5C', '#4B0082',", "= dist if month == \"05\": months[\"May\"][len(months[\"May\"])-1] = dist if month == \"06\":", "== \"07\": curOwl[6] = i[3] if month == \"08\": curOwl[7] = i[3] if", "months[\"Apr\"][len(months[\"Apr\"])-1] = dist if month == \"05\": months[\"May\"][len(months[\"May\"])-1] = dist if month ==", "'#00BFFF', '#696969', '#1E90FF', '#B22222', '#FFFAF0', '#228B22', '#FF00FF', '#DCDCDC', '#F8F8FF', '#FFD700', '#DAA520', '#808080', '#008000',", "'#00FFFF', '#00008B', '#008B8B', '#B8860B', '#A9A9A9', '#006400', '#BDB76B', '#8B008B', '#556B2F', '#FF8C00', '#9932CC', '#8B0000', '#E9967A',", "ID): result = [] for f in monthTable: if f[0] == ID: result.append(f)", "if month == \"01\": curOwl[0] = i[3] if month == \"02\": curOwl[1] =", "months[\"Sep\"][len(months[\"Sep\"])-1] = dist if month == \"10\": months[\"Oct\"][len(months[\"Oct\"])-1] = dist if month ==", "'#008000', '#ADFF2F', '#F0FFF0', '#FF69B4', '#CD5C5C', '#4B0082', '#FFFFF0', '#F0E68C', '#E6E6FA', '#FFF0F5', '#7CFC00', '#FFFACD', '#ADD8E6',", "'#00CED1', '#9400D3', '#FF1493', '#00BFFF', '#696969', '#1E90FF', '#B22222', '#FFFAF0', '#228B22', '#FF00FF', '#DCDCDC', '#F8F8FF', '#FFD700',", "if month == \"07\": months[\"Jul\"][len(months[\"Jul\"])-1] = dist if month == \"08\": months[\"Aug\"][len(months[\"Aug\"])-1] =", "'#BA55D3', '#9370DB', '#3CB371', '#7B68EE', '#00FA9A', '#48D1CC', '#C71585', '#191970', '#F5FFFA', '#FFE4E1', '#FFE4B5', '#FFDEAD', '#000080',", "= feature[0] month = feature[2] dist = feature[3] owl = getOwl(monthTable, \"1751\") #", "if month == \"08\": curOwl[7] = i[3] if month == \"09\": curOwl[8] =", "\"05\": curOwl[4] = i[3] if month == \"06\": curOwl[5] = i[3] if month", "= getOwl(monthTable, feature[0]) for i in t: month = i[2] if month ==", "if lastOwl == \"none\": plt.bar(X, curOwl, color = col) else: plt.bar(X, curOwl, color", "'#FFEFD5', '#FFDAB9', '#CD853F', '#FFC0CB', '#DDA0DD', '#B0E0E6', '#800080', '#FF0000', '#BC8F8F', '#4169E1', '#8B4513', '#FA8072', '#FAA460',", "months[\"Jan\"][len(months[\"Jan\"])-1] = dist if month == \"02\": months[\"Feb\"][len(months[\"Feb\"])-1] = dist if month ==", "\"0\" lastOwl=\"none\" for feature in monthTable: owl = feature[0] if owl != tempOwl:", "month == \"02\": months[\"Feb\"][len(months[\"Feb\"])-1] = dist if month == \"03\": months[\"Mar\"][len(months[\"Mar\"])-1] = dist", "np.arange(12) curOwl = [np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,] counter = 0 tempOwl = \"0\" lastOwl=\"none\" for feature", "'#EEE8AA', '#98FB98', '#AFEEEE', '#DB7093', '#FFEFD5', '#FFDAB9', '#CD853F', '#FFC0CB', '#DDA0DD', '#B0E0E6', '#800080', '#FF0000', '#BC8F8F',", "\"09\": curOwl[8] = i[3] if month == \"10\": curOwl[9] = i[3] if month", "'#FAA460', '#2E8B57', '#FFF5EE', '#A0522D', '#C0C0C0', '#87CEEB', '#6A5ACD', '#708090', '#FFFAFA', '#00FF7F', '#4682B4', '#D2B48C', '#008080',", "tempOwl = \"0\" lastOwl=\"none\" for feature in monthTable: owl = feature[0] if owl", "'#B0E0E6', '#800080', '#FF0000', '#BC8F8F', '#4169E1', '#8B4513', '#FA8072', '#FAA460', '#2E8B57', '#FFF5EE', '#A0522D', '#C0C0C0', '#87CEEB',", "month == \"08\": months[\"Aug\"][len(months[\"Aug\"])-1] = dist if month == \"09\": months[\"Sep\"][len(months[\"Sep\"])-1] = dist", "months[\"Dec\"].append(0) return months def fillMonths(monthTable, months): curOwl = monthTable[0][0] for feature in monthTable:", "'#FF6347', '#40E0D0', '#EE82EE', '#F5DEB3', '#FFFFFF', '#F5F5F5', '#FFFF00', '#9ACD32'] months = {'Jan': [], 'Feb':", "'Dec': [] } def getOwl(monthTable, ID): result = [] for f in monthTable:", "curOwl = [np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,] counter = 0 tempOwl = \"0\" lastOwl=\"none\" for feature in", "owl = feature[0] if owl != tempOwl: tempOwl = owl t = getOwl(monthTable,", "'#87CEFA', '#778899', '#B0C4DE', '#FFFFE0', '#00FF00', '#32CD32', '#FAF0E6', '#FF00FF', '#800000', '#66CDAA', '#0000CD', '#BA55D3', '#9370DB',", "'#000000', '#FFEBCD', '#0000FF', '#8A2BE2', '#A52A2A', '#DEB887', '#5F9EA0', '#7FFF00', '#D2691E', '#FF7F50', '#6495ED', '#FFF8DC', '#DC143C',", "= i[3] if month == \"05\": curOwl[4] = i[3] if month == \"06\":", "== \"09\": curOwl[8] = i[3] if month == \"10\": curOwl[9] = i[3] if", "plt.bar(X, curOwl, color = col, bottom = lastOwl) lastOwl = curOwl counter =", "[], 'Jul': [], 'Aug': [], 'Sep': [], 'Oct': [], 'Nov': [], 'Dec': []", "= monthTable[0][0] for feature in monthTable: tempOwl = feature[0] month = feature[2] dist", "= dist if month == \"09\": months[\"Sep\"][len(months[\"Sep\"])-1] = dist if month == \"10\":", "in monthTable: owl = feature[0] if owl != tempOwl: tempOwl = owl t", "dist if month == \"03\": months[\"Mar\"][len(months[\"Mar\"])-1] = dist if month == \"04\": months[\"Apr\"][len(months[\"Apr\"])-1]", "col = cnames[counter] if lastOwl == \"none\": plt.bar(X, curOwl, color = col) else:", "color = col, bottom = lastOwl) lastOwl = curOwl counter = counter +", "'#F8F8FF', '#FFD700', '#DAA520', '#808080', '#008000', '#ADFF2F', '#F0FFF0', '#FF69B4', '#CD5C5C', '#4B0082', '#FFFFF0', '#F0E68C', '#E6E6FA',", "month == \"09\": curOwl[8] = i[3] if month == \"10\": curOwl[9] = i[3]", "'#00FFFF', '#7FFFD4', '#F0FFFF', '#F5F5DC', '#FFE4C4', '#000000', '#FFEBCD', '#0000FF', '#8A2BE2', '#A52A2A', '#DEB887', '#5F9EA0', '#7FFF00',", "'#6A5ACD', '#708090', '#FFFAFA', '#00FF7F', '#4682B4', '#D2B48C', '#008080', '#D8BFD8', '#FF6347', '#40E0D0', '#EE82EE', '#F5DEB3', '#FFFFFF',", "months[\"Sep\"].append(0) months[\"Oct\"].append(0) months[\"Nov\"].append(0) months[\"Dec\"].append(0) return months def fillMonths(monthTable, months): curOwl = monthTable[0][0] for", "= \"0\" lastOwl=\"none\" for feature in monthTable: owl = feature[0] if owl !=", "'#9370DB', '#3CB371', '#7B68EE', '#00FA9A', '#48D1CC', '#C71585', '#191970', '#F5FFFA', '#FFE4E1', '#FFE4B5', '#FFDEAD', '#000080', '#FDF5E6',", "dist if month == \"02\": months[\"Feb\"][len(months[\"Feb\"])-1] = dist if month == \"03\": months[\"Mar\"][len(months[\"Mar\"])-1]", "\"04\": curOwl[3] = i[3] if month == \"05\": curOwl[4] = i[3] if month", "'#800000', '#66CDAA', '#0000CD', '#BA55D3', '#9370DB', '#3CB371', '#7B68EE', '#00FA9A', '#48D1CC', '#C71585', '#191970', '#F5FFFA', '#FFE4E1'," ]
[ "LinkExtractor XPATH = { 'name' : \"//h1[@class='nameprod']\", 'price' : \"//div[@class='c2']/div[@class='imp'][1]/span[@class='price']\", 'category' : \"//div[@class='nav_center']/span/a/span\",", "'description' : \"//div[@class='intro']\", 'images' : \"//div[@class='ui-corner-all']/div/div/a/@href\", 'canonical' : \"\", 'base_url' : \"\", 'brand'", ": \"//div[@class='nav_center']/span/a/span\", 'description' : \"//div[@class='intro']\", 'images' : \"//div[@class='ui-corner-all']/div/div/a/@href\", 'canonical' : \"\", 'base_url' :", "scrapy.linkextractors import LinkExtractor XPATH = { 'name' : \"//h1[@class='nameprod']\", 'price' : \"//div[@class='c2']/div[@class='imp'][1]/span[@class='price']\", 'category'", ": \"//h1[@class='nameprod']\", 'price' : \"//div[@class='c2']/div[@class='imp'][1]/span[@class='price']\", 'category' : \"//div[@class='nav_center']/span/a/span\", 'description' : \"//div[@class='intro']\", 'images' :", "\"//div[@class='nav_center']/span/a/span\", 'description' : \"//div[@class='intro']\", 'images' : \"//div[@class='ui-corner-all']/div/div/a/@href\", 'canonical' : \"\", 'base_url' : \"\",", "'images' : \"//div[@class='ui-corner-all']/div/div/a/@href\", 'canonical' : \"\", 'base_url' : \"\", 'brand' : \"\" }", "'canonical' : \"\", 'base_url' : \"\", 'brand' : \"\" } name = '<EMAIL>'", "you make modification. from scrapy.spiders import Rule from scrapy.linkextractors import LinkExtractor XPATH =", ": \"//div[@class='intro']\", 'images' : \"//div[@class='ui-corner-all']/div/div/a/@href\", 'canonical' : \"\", 'base_url' : \"\", 'brand' :", ": \"\", 'base_url' : \"\", 'brand' : \"\" } name = '<EMAIL>' allowed_domains", "tracking_url = '' sitemap_urls = [''] sitemap_rules = [('', 'parse_item')] sitemap_follow = ['']", "'parse_item')] sitemap_follow = [''] rules = [ Rule(LinkExtractor(allow=['/yen-huyet/','/hong-yen/','/bach-yen/','/yen-gay-to/','/chan-to-yen/','/yen-tuoi/','/che-to-yen-sup-yen/','/combo-yen-sao/']), 'parse_item'), Rule(LinkExtractor(allow=['/yen-huyet.html','/hong-yen.html','/bach-yen.html','/yen-gay-to.html','/chan-to-yen.html','/yen-tuoi.html','/che-to-yen-sup-yen.html','/combo-yen-sao.html','page=\\d+\\.html$']), 'parse'), #Rule(LinkExtractor(), 'parse_item_and_links'),", "= ['yensaophuyen.vn'] start_urls = ['http://yensaophuyen.vn/'] tracking_url = '' sitemap_urls = [''] sitemap_rules =", "\"//div[@class='intro']\", 'images' : \"//div[@class='ui-corner-all']/div/div/a/@href\", 'canonical' : \"\", 'base_url' : \"\", 'brand' : \"\"", "Delete this line if you make modification. from scrapy.spiders import Rule from scrapy.linkextractors", "['yensaophuyen.vn'] start_urls = ['http://yensaophuyen.vn/'] tracking_url = '' sitemap_urls = [''] sitemap_rules = [('',", "generator.py. Delete this line if you make modification. from scrapy.spiders import Rule from", "<reponame>chongiadung/choinho # Auto generated by generator.py. Delete this line if you make modification.", "'' sitemap_urls = [''] sitemap_rules = [('', 'parse_item')] sitemap_follow = [''] rules =", "= { 'name' : \"//h1[@class='nameprod']\", 'price' : \"//div[@class='c2']/div[@class='imp'][1]/span[@class='price']\", 'category' : \"//div[@class='nav_center']/span/a/span\", 'description' :", "'price' : \"//div[@class='c2']/div[@class='imp'][1]/span[@class='price']\", 'category' : \"//div[@class='nav_center']/span/a/span\", 'description' : \"//div[@class='intro']\", 'images' : \"//div[@class='ui-corner-all']/div/div/a/@href\", 'canonical'", "by generator.py. Delete this line if you make modification. from scrapy.spiders import Rule", "sitemap_rules = [('', 'parse_item')] sitemap_follow = [''] rules = [ Rule(LinkExtractor(allow=['/yen-huyet/','/hong-yen/','/bach-yen/','/yen-gay-to/','/chan-to-yen/','/yen-tuoi/','/che-to-yen-sup-yen/','/combo-yen-sao/']), 'parse_item'), Rule(LinkExtractor(allow=['/yen-huyet.html','/hong-yen.html','/bach-yen.html','/yen-gay-to.html','/chan-to-yen.html','/yen-tuoi.html','/che-to-yen-sup-yen.html','/combo-yen-sao.html','page=\\d+\\.html$']),", "sitemap_follow = [''] rules = [ Rule(LinkExtractor(allow=['/yen-huyet/','/hong-yen/','/bach-yen/','/yen-gay-to/','/chan-to-yen/','/yen-tuoi/','/che-to-yen-sup-yen/','/combo-yen-sao/']), 'parse_item'), Rule(LinkExtractor(allow=['/yen-huyet.html','/hong-yen.html','/bach-yen.html','/yen-gay-to.html','/chan-to-yen.html','/yen-tuoi.html','/che-to-yen-sup-yen.html','/combo-yen-sao.html','page=\\d+\\.html$']), 'parse'), #Rule(LinkExtractor(), 'parse_item_and_links'), ]", ": \"//div[@class='ui-corner-all']/div/div/a/@href\", 'canonical' : \"\", 'base_url' : \"\", 'brand' : \"\" } name", "from scrapy.spiders import Rule from scrapy.linkextractors import LinkExtractor XPATH = { 'name' :", "= ['http://yensaophuyen.vn/'] tracking_url = '' sitemap_urls = [''] sitemap_rules = [('', 'parse_item')] sitemap_follow", "= '' sitemap_urls = [''] sitemap_rules = [('', 'parse_item')] sitemap_follow = [''] rules", "\"\" } name = '<EMAIL>' allowed_domains = ['yensaophuyen.vn'] start_urls = ['http://yensaophuyen.vn/'] tracking_url =", "= '<EMAIL>' allowed_domains = ['yensaophuyen.vn'] start_urls = ['http://yensaophuyen.vn/'] tracking_url = '' sitemap_urls =", ": \"//div[@class='c2']/div[@class='imp'][1]/span[@class='price']\", 'category' : \"//div[@class='nav_center']/span/a/span\", 'description' : \"//div[@class='intro']\", 'images' : \"//div[@class='ui-corner-all']/div/div/a/@href\", 'canonical' :", "['http://yensaophuyen.vn/'] tracking_url = '' sitemap_urls = [''] sitemap_rules = [('', 'parse_item')] sitemap_follow =", "start_urls = ['http://yensaophuyen.vn/'] tracking_url = '' sitemap_urls = [''] sitemap_rules = [('', 'parse_item')]", ": \"\" } name = '<EMAIL>' allowed_domains = ['yensaophuyen.vn'] start_urls = ['http://yensaophuyen.vn/'] tracking_url", "this line if you make modification. from scrapy.spiders import Rule from scrapy.linkextractors import", "Rule from scrapy.linkextractors import LinkExtractor XPATH = { 'name' : \"//h1[@class='nameprod']\", 'price' :", "= [('', 'parse_item')] sitemap_follow = [''] rules = [ Rule(LinkExtractor(allow=['/yen-huyet/','/hong-yen/','/bach-yen/','/yen-gay-to/','/chan-to-yen/','/yen-tuoi/','/che-to-yen-sup-yen/','/combo-yen-sao/']), 'parse_item'), Rule(LinkExtractor(allow=['/yen-huyet.html','/hong-yen.html','/bach-yen.html','/yen-gay-to.html','/chan-to-yen.html','/yen-tuoi.html','/che-to-yen-sup-yen.html','/combo-yen-sao.html','page=\\d+\\.html$']), 'parse'),", "generated by generator.py. Delete this line if you make modification. from scrapy.spiders import", "from scrapy.linkextractors import LinkExtractor XPATH = { 'name' : \"//h1[@class='nameprod']\", 'price' : \"//div[@class='c2']/div[@class='imp'][1]/span[@class='price']\",", "import Rule from scrapy.linkextractors import LinkExtractor XPATH = { 'name' : \"//h1[@class='nameprod']\", 'price'", "\"//div[@class='ui-corner-all']/div/div/a/@href\", 'canonical' : \"\", 'base_url' : \"\", 'brand' : \"\" } name =", "{ 'name' : \"//h1[@class='nameprod']\", 'price' : \"//div[@class='c2']/div[@class='imp'][1]/span[@class='price']\", 'category' : \"//div[@class='nav_center']/span/a/span\", 'description' : \"//div[@class='intro']\",", "} name = '<EMAIL>' allowed_domains = ['yensaophuyen.vn'] start_urls = ['http://yensaophuyen.vn/'] tracking_url = ''", "sitemap_urls = [''] sitemap_rules = [('', 'parse_item')] sitemap_follow = [''] rules = [", "scrapy.spiders import Rule from scrapy.linkextractors import LinkExtractor XPATH = { 'name' : \"//h1[@class='nameprod']\",", "line if you make modification. from scrapy.spiders import Rule from scrapy.linkextractors import LinkExtractor", "import LinkExtractor XPATH = { 'name' : \"//h1[@class='nameprod']\", 'price' : \"//div[@class='c2']/div[@class='imp'][1]/span[@class='price']\", 'category' :", "'base_url' : \"\", 'brand' : \"\" } name = '<EMAIL>' allowed_domains = ['yensaophuyen.vn']", "XPATH = { 'name' : \"//h1[@class='nameprod']\", 'price' : \"//div[@class='c2']/div[@class='imp'][1]/span[@class='price']\", 'category' : \"//div[@class='nav_center']/span/a/span\", 'description'", "allowed_domains = ['yensaophuyen.vn'] start_urls = ['http://yensaophuyen.vn/'] tracking_url = '' sitemap_urls = [''] sitemap_rules", "[''] sitemap_rules = [('', 'parse_item')] sitemap_follow = [''] rules = [ Rule(LinkExtractor(allow=['/yen-huyet/','/hong-yen/','/bach-yen/','/yen-gay-to/','/chan-to-yen/','/yen-tuoi/','/che-to-yen-sup-yen/','/combo-yen-sao/']), 'parse_item'),", "make modification. from scrapy.spiders import Rule from scrapy.linkextractors import LinkExtractor XPATH = {", ": \"\", 'brand' : \"\" } name = '<EMAIL>' allowed_domains = ['yensaophuyen.vn'] start_urls", "= [''] sitemap_rules = [('', 'parse_item')] sitemap_follow = [''] rules = [ Rule(LinkExtractor(allow=['/yen-huyet/','/hong-yen/','/bach-yen/','/yen-gay-to/','/chan-to-yen/','/yen-tuoi/','/che-to-yen-sup-yen/','/combo-yen-sao/']),", "'name' : \"//h1[@class='nameprod']\", 'price' : \"//div[@class='c2']/div[@class='imp'][1]/span[@class='price']\", 'category' : \"//div[@class='nav_center']/span/a/span\", 'description' : \"//div[@class='intro']\", 'images'", "\"//div[@class='c2']/div[@class='imp'][1]/span[@class='price']\", 'category' : \"//div[@class='nav_center']/span/a/span\", 'description' : \"//div[@class='intro']\", 'images' : \"//div[@class='ui-corner-all']/div/div/a/@href\", 'canonical' : \"\",", "'<EMAIL>' allowed_domains = ['yensaophuyen.vn'] start_urls = ['http://yensaophuyen.vn/'] tracking_url = '' sitemap_urls = ['']", "name = '<EMAIL>' allowed_domains = ['yensaophuyen.vn'] start_urls = ['http://yensaophuyen.vn/'] tracking_url = '' sitemap_urls", "\"//h1[@class='nameprod']\", 'price' : \"//div[@class='c2']/div[@class='imp'][1]/span[@class='price']\", 'category' : \"//div[@class='nav_center']/span/a/span\", 'description' : \"//div[@class='intro']\", 'images' : \"//div[@class='ui-corner-all']/div/div/a/@href\",", "if you make modification. from scrapy.spiders import Rule from scrapy.linkextractors import LinkExtractor XPATH", "[('', 'parse_item')] sitemap_follow = [''] rules = [ Rule(LinkExtractor(allow=['/yen-huyet/','/hong-yen/','/bach-yen/','/yen-gay-to/','/chan-to-yen/','/yen-tuoi/','/che-to-yen-sup-yen/','/combo-yen-sao/']), 'parse_item'), Rule(LinkExtractor(allow=['/yen-huyet.html','/hong-yen.html','/bach-yen.html','/yen-gay-to.html','/chan-to-yen.html','/yen-tuoi.html','/che-to-yen-sup-yen.html','/combo-yen-sao.html','page=\\d+\\.html$']), 'parse'), #Rule(LinkExtractor(),", "Auto generated by generator.py. Delete this line if you make modification. from scrapy.spiders", "'category' : \"//div[@class='nav_center']/span/a/span\", 'description' : \"//div[@class='intro']\", 'images' : \"//div[@class='ui-corner-all']/div/div/a/@href\", 'canonical' : \"\", 'base_url'", "\"\", 'brand' : \"\" } name = '<EMAIL>' allowed_domains = ['yensaophuyen.vn'] start_urls =", "\"\", 'base_url' : \"\", 'brand' : \"\" } name = '<EMAIL>' allowed_domains =", "# Auto generated by generator.py. Delete this line if you make modification. from", "'brand' : \"\" } name = '<EMAIL>' allowed_domains = ['yensaophuyen.vn'] start_urls = ['http://yensaophuyen.vn/']", "modification. from scrapy.spiders import Rule from scrapy.linkextractors import LinkExtractor XPATH = { 'name'" ]
[ "KIND, either express or implied. # See the License for the specific language", "Unless required by applicable law or agreed to in writing, software # distributed", "limitations under the License. # ============================================================================== \"\"\"Contains utilities for downloading and converting datasets.\"\"\"", "quality=100) # Initializes function that decodes RGB JPEG data. self._decode_jpeg_data = tf.placeholder(dtype=tf.string) self._decode_jpeg", "tf.image.decode_jpeg(self._cmyk_data, channels=0) self._cmyk_to_rgb = tf.image.encode_jpeg(image, format='rgb', quality=100) # Initializes function that decodes RGB", "int64s. Args: values: A scalar or list of values. Returns: a TF-Feature. \"\"\"", "pickle.HIGHEST_PROTOCOL) def load_obj(name, file_dir): with open(os.path.join(file_dir, name + '.pkl'), 'rb') as f: return", "\"\"\" if not isinstance(values, (tuple, list)): values = [values] return tf.train.Feature(int64_list=tf.train.Int64List(value=values)) def floats_feature(value):", "open(os.path.join(file_dir, name + '.pkl'), 'rb') as f: return pickle.load(f) def int64_feature(values): \"\"\"Returns a", "string. Returns: a TF-Feature. \"\"\" return tf.train.Feature(bytes_list=tf.train.BytesList(value=[values])) def to_tfexample(image_data, image_format, im_size, bbox, azimuth,", "png_to_jpeg(self, image_data): return self._sess.run(self._png_to_jpeg, feed_dict={self._png_data: image_data}) def cmyk_to_rgb(self, image_data): return self._sess.run(self._cmyk_to_rgb, feed_dict={self._cmyk_data: image_data})", "elevation, theta): return tf.train.Example(features=tf.train.Features(feature={ 'image/encoded': bytes_feature(image_data), 'image/format': bytes_feature(image_format), 'image/height': int64_feature(im_size[0]), 'image/width': int64_feature(im_size[1]), 'image/bbox':", "this file except in compliance with the License. # You may obtain a", "image_format, height, width, class_id): return tf.train.Example(features=tf.train.Features(feature={ 'image/encoded': bytes_feature(image_data), 'image/format': bytes_feature(image_format), 'image/class/label': int64_feature(class_id), 'image/height':", "bytes_feature(image_format), 'image/height': int64_feature(im_size[0]), 'image/width': int64_feature(im_size[1]), 'image/bbox': floats_feature(bbox), 'image/viewpoint': floats_feature([azimuth, elevation, theta]), })) def", "def image_to_tfexample(image_data, image_format, height, width, class_id): return tf.train.Example(features=tf.train.Features(feature={ 'image/encoded': bytes_feature(image_data), 'image/format': bytes_feature(image_format), 'image/class/label':", "= tf.image.decode_jpeg(self._decode_jpeg_data, channels=3) # Initializes function that encodes RGB JPEG data. self._encode_image_data =", "open(os.path.join(save_dir, name + '.pkl'), 'wb') as f: pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL) def load_obj(name, file_dir):", "int64_feature(class_id), 'image/height': int64_feature(height), 'image/width': int64_feature(width), })) class ImageCoder(object): \"\"\"Helper class that provides TensorFlow", "ANY KIND, either express or implied. # See the License for the specific", "run all image coding calls. self._sess = tf.Session() # Initializes function that converts", "class that provides TensorFlow image coding utilities.\"\"\" def __init__(self): # Create a single", "that provides TensorFlow image coding utilities.\"\"\" def __init__(self): # Create a single Session", "floats_feature(bbox), 'image/viewpoint': floats_feature([azimuth, elevation, theta]), })) def image_to_tfexample(image_data, image_format, height, width, class_id): return", "TF-Feature. \"\"\" return tf.train.Feature(bytes_list=tf.train.BytesList(value=[values])) def to_tfexample(image_data, image_format, im_size, bbox, azimuth, elevation, theta): return", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See", "__init__(self): # Create a single Session to run all image coding calls. self._sess", "Args: values: A string. Returns: a TF-Feature. \"\"\" return tf.train.Feature(bytes_list=tf.train.BytesList(value=[values])) def to_tfexample(image_data, image_format,", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "image_data}) def decode_jpeg(self, image_data): image = self._sess.run(self._decode_jpeg, feed_dict={self._decode_jpeg_data: image_data}) assert len(image.shape) == 3", "TF-Feature. \"\"\" if not isinstance(values, (tuple, list)): values = [values] return tf.train.Feature(int64_list=tf.train.Int64List(value=values)) def", "class_id): return tf.train.Example(features=tf.train.Features(feature={ 'image/encoded': bytes_feature(image_data), 'image/format': bytes_feature(image_format), 'image/class/label': int64_feature(class_id), 'image/height': int64_feature(height), 'image/width': int64_feature(width),", "int64_feature(im_size[0]), 'image/width': int64_feature(im_size[1]), 'image/bbox': floats_feature(bbox), 'image/viewpoint': floats_feature([azimuth, elevation, theta]), })) def image_to_tfexample(image_data, image_format,", "image_to_tfexample(image_data, image_format, height, width, class_id): return tf.train.Example(features=tf.train.Features(feature={ 'image/encoded': bytes_feature(image_data), 'image/format': bytes_feature(image_format), 'image/class/label': int64_feature(class_id),", "OF ANY KIND, either express or implied. # See the License for the", "name): with open(os.path.join(save_dir, name + '.pkl'), 'wb') as f: pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL) def", "__future__ import absolute_import from __future__ import division from __future__ import print_function import numpy", "# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under", "def encode_jpeg(self, image_data): image_data = image_data.astype(dtype=np.uint8) image = self._sess.run(self._encode_jpeg, feed_dict={self._encode_image_data: image_data}) return image", "decode_jpeg(self, image_data): image = self._sess.run(self._decode_jpeg, feed_dict={self._decode_jpeg_data: image_data}) assert len(image.shape) == 3 assert image.shape[2]", "Returns: a TF-Feature. \"\"\" return tf.train.Feature(bytes_list=tf.train.BytesList(value=[values])) def to_tfexample(image_data, image_format, im_size, bbox, azimuth, elevation,", "'image/class/label': int64_feature(class_id), 'image/height': int64_feature(height), 'image/width': int64_feature(width), })) class ImageCoder(object): \"\"\"Helper class that provides", "assert len(image.shape) == 3 assert image.shape[2] == 3 return image def encode_jpeg(self, image_data):", "3 return image def encode_jpeg(self, image_data): image_data = image_data.astype(dtype=np.uint8) image = self._sess.run(self._encode_jpeg, feed_dict={self._encode_image_data:", "list of values. Returns: a TF-Feature. \"\"\" if not isinstance(values, (tuple, list)): values", "the License. # ============================================================================== \"\"\"Contains utilities for downloading and converting datasets.\"\"\" from __future__", "image_data}) def cmyk_to_rgb(self, image_data): return self._sess.run(self._cmyk_to_rgb, feed_dict={self._cmyk_data: image_data}) def decode_jpeg(self, image_data): image =", "'image/encoded': bytes_feature(image_data), 'image/format': bytes_feature(image_format), 'image/class/label': int64_feature(class_id), 'image/height': int64_feature(height), 'image/width': int64_feature(width), })) class ImageCoder(object):", "self._sess.run(self._decode_jpeg, feed_dict={self._decode_jpeg_data: image_data}) assert len(image.shape) == 3 assert image.shape[2] == 3 return image", "load_obj(name, file_dir): with open(os.path.join(file_dir, name + '.pkl'), 'rb') as f: return pickle.load(f) def", "tf.train.Feature(bytes_list=tf.train.BytesList(value=[values])) def to_tfexample(image_data, image_format, im_size, bbox, azimuth, elevation, theta): return tf.train.Example(features=tf.train.Features(feature={ 'image/encoded': bytes_feature(image_data),", "(tuple, list)): values = [values] return tf.train.Feature(int64_list=tf.train.Int64List(value=values)) def floats_feature(value): return tf.train.Feature(float_list=tf.train.FloatList(value=value)) def bytes_feature(values):", "All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the", "\"\"\"Returns a TF-Feature of bytes. Args: values: A string. Returns: a TF-Feature. \"\"\"", "# Initializes function that converts PNG to JPEG data. self._png_data = tf.placeholder(dtype=tf.string) image", "software # distributed under the License is distributed on an \"AS IS\" BASIS,", "3 assert image.shape[2] == 3 return image def encode_jpeg(self, image_data): image_data = image_data.astype(dtype=np.uint8)", "of bytes. Args: values: A string. Returns: a TF-Feature. \"\"\" return tf.train.Feature(bytes_list=tf.train.BytesList(value=[values])) def", "image def encode_jpeg(self, image_data): image_data = image_data.astype(dtype=np.uint8) image = self._sess.run(self._encode_jpeg, feed_dict={self._encode_image_data: image_data}) return", "single Session to run all image coding calls. self._sess = tf.Session() # Initializes", "int64_feature(im_size[1]), 'image/bbox': floats_feature(bbox), 'image/viewpoint': floats_feature([azimuth, elevation, theta]), })) def image_to_tfexample(image_data, image_format, height, width,", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to", "image coding utilities.\"\"\" def __init__(self): # Create a single Session to run all", "# Create a single Session to run all image coding calls. self._sess =", "governing permissions and # limitations under the License. # ============================================================================== \"\"\"Contains utilities for", "class ImageCoder(object): \"\"\"Helper class that provides TensorFlow image coding utilities.\"\"\" def __init__(self): #", "def save_obj(obj, save_dir, name): with open(os.path.join(save_dir, name + '.pkl'), 'wb') as f: pickle.dump(obj,", "values. Returns: a TF-Feature. \"\"\" if not isinstance(values, (tuple, list)): values = [values]", "pickle import os def save_obj(obj, save_dir, name): with open(os.path.join(save_dir, name + '.pkl'), 'wb')", "under the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "self._encode_image_data = tf.placeholder(dtype=tf.uint8) self._encode_jpeg = tf.image.encode_jpeg(self._encode_image_data) def png_to_jpeg(self, image_data): return self._sess.run(self._png_to_jpeg, feed_dict={self._png_data: image_data})", "def decode_jpeg(self, image_data): image = self._sess.run(self._decode_jpeg, feed_dict={self._decode_jpeg_data: image_data}) assert len(image.shape) == 3 assert", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "np import tensorflow as tf import pickle import os def save_obj(obj, save_dir, name):", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "numpy as np import tensorflow as tf import pickle import os def save_obj(obj,", "tf.placeholder(dtype=tf.string) image = tf.image.decode_jpeg(self._cmyk_data, channels=0) self._cmyk_to_rgb = tf.image.encode_jpeg(image, format='rgb', quality=100) # Initializes function", "required by applicable law or agreed to in writing, software # distributed under", "applicable law or agreed to in writing, software # distributed under the License", "calls. self._sess = tf.Session() # Initializes function that converts PNG to JPEG data.", "bytes_feature(image_format), 'image/class/label': int64_feature(class_id), 'image/height': int64_feature(height), 'image/width': int64_feature(width), })) class ImageCoder(object): \"\"\"Helper class that", "specific language governing permissions and # limitations under the License. # ============================================================================== \"\"\"Contains", "RGB JPEG data. self._encode_image_data = tf.placeholder(dtype=tf.uint8) self._encode_jpeg = tf.image.encode_jpeg(self._encode_image_data) def png_to_jpeg(self, image_data): return", "to_tfexample(image_data, image_format, im_size, bbox, azimuth, elevation, theta): return tf.train.Example(features=tf.train.Features(feature={ 'image/encoded': bytes_feature(image_data), 'image/format': bytes_feature(image_format),", "import print_function import numpy as np import tensorflow as tf import pickle import", "a TF-Feature of int64s. Args: values: A scalar or list of values. Returns:", "or agreed to in writing, software # distributed under the License is distributed", "all image coding calls. self._sess = tf.Session() # Initializes function that converts PNG", "Returns: a TF-Feature. \"\"\" if not isinstance(values, (tuple, list)): values = [values] return", "that converts PNG to JPEG data. self._png_data = tf.placeholder(dtype=tf.string) image = tf.image.decode_png(self._png_data, channels=3)", "image = tf.image.decode_jpeg(self._cmyk_data, channels=0) self._cmyk_to_rgb = tf.image.encode_jpeg(image, format='rgb', quality=100) # Initializes function that", "CONDITIONS OF ANY KIND, either express or implied. # See the License for", "'.pkl'), 'rb') as f: return pickle.load(f) def int64_feature(values): \"\"\"Returns a TF-Feature of int64s.", "RGB JPEG data. self._decode_jpeg_data = tf.placeholder(dtype=tf.string) self._decode_jpeg = tf.image.decode_jpeg(self._decode_jpeg_data, channels=3) # Initializes function", "TF-Feature of bytes. Args: values: A string. Returns: a TF-Feature. \"\"\" return tf.train.Feature(bytes_list=tf.train.BytesList(value=[values]))", "image = self._sess.run(self._decode_jpeg, feed_dict={self._decode_jpeg_data: image_data}) assert len(image.shape) == 3 assert image.shape[2] == 3", "converts PNG to JPEG data. self._png_data = tf.placeholder(dtype=tf.string) image = tf.image.decode_png(self._png_data, channels=3) self._png_to_jpeg", "under the Apache License, Version 2.0 (the \"License\"); # you may not use", "writing, software # distributed under the License is distributed on an \"AS IS\"", "You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "'.pkl'), 'wb') as f: pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL) def load_obj(name, file_dir): with open(os.path.join(file_dir, name", "as f: pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL) def load_obj(name, file_dir): with open(os.path.join(file_dir, name + '.pkl'),", "License. # You may obtain a copy of the License at # #", "# Initializes function that converts CMYK JPEG data to RGB JPEG data. self._cmyk_data", "scalar or list of values. Returns: a TF-Feature. \"\"\" if not isinstance(values, (tuple,", "+ '.pkl'), 'rb') as f: return pickle.load(f) def int64_feature(values): \"\"\"Returns a TF-Feature of", "def load_obj(name, file_dir): with open(os.path.join(file_dir, name + '.pkl'), 'rb') as f: return pickle.load(f)", "tf.image.encode_jpeg(image, format='rgb', quality=100) # Initializes function that decodes RGB JPEG data. self._decode_jpeg_data =", "height, width, class_id): return tf.train.Example(features=tf.train.Features(feature={ 'image/encoded': bytes_feature(image_data), 'image/format': bytes_feature(image_format), 'image/class/label': int64_feature(class_id), 'image/height': int64_feature(height),", "compliance with the License. # You may obtain a copy of the License", "self._cmyk_to_rgb = tf.image.encode_jpeg(image, format='rgb', quality=100) # Initializes function that decodes RGB JPEG data.", "tf.image.decode_png(self._png_data, channels=3) self._png_to_jpeg = tf.image.encode_jpeg(image, format='rgb', quality=100) # Initializes function that converts CMYK", "self._decode_jpeg = tf.image.decode_jpeg(self._decode_jpeg_data, channels=3) # Initializes function that encodes RGB JPEG data. self._encode_image_data", "pickle.load(f) def int64_feature(values): \"\"\"Returns a TF-Feature of int64s. Args: values: A scalar or", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "def int64_feature(values): \"\"\"Returns a TF-Feature of int64s. Args: values: A scalar or list", "language governing permissions and # limitations under the License. # ============================================================================== \"\"\"Contains utilities", "'image/bbox': floats_feature(bbox), 'image/viewpoint': floats_feature([azimuth, elevation, theta]), })) def image_to_tfexample(image_data, image_format, height, width, class_id):", "floats_feature([azimuth, elevation, theta]), })) def image_to_tfexample(image_data, image_format, height, width, class_id): return tf.train.Example(features=tf.train.Features(feature={ 'image/encoded':", "Initializes function that converts PNG to JPEG data. self._png_data = tf.placeholder(dtype=tf.string) image =", "Args: values: A scalar or list of values. Returns: a TF-Feature. \"\"\" if", "print_function import numpy as np import tensorflow as tf import pickle import os", "'wb') as f: pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL) def load_obj(name, file_dir): with open(os.path.join(file_dir, name +", "bytes_feature(values): \"\"\"Returns a TF-Feature of bytes. Args: values: A string. Returns: a TF-Feature.", "Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the", "A scalar or list of values. Returns: a TF-Feature. \"\"\" if not isinstance(values,", "bbox, azimuth, elevation, theta): return tf.train.Example(features=tf.train.Features(feature={ 'image/encoded': bytes_feature(image_data), 'image/format': bytes_feature(image_format), 'image/height': int64_feature(im_size[0]), 'image/width':", "})) class ImageCoder(object): \"\"\"Helper class that provides TensorFlow image coding utilities.\"\"\" def __init__(self):", "downloading and converting datasets.\"\"\" from __future__ import absolute_import from __future__ import division from", "return pickle.load(f) def int64_feature(values): \"\"\"Returns a TF-Feature of int64s. Args: values: A scalar", "provides TensorFlow image coding utilities.\"\"\" def __init__(self): # Create a single Session to", "not use this file except in compliance with the License. # You may", "return tf.train.Example(features=tf.train.Features(feature={ 'image/encoded': bytes_feature(image_data), 'image/format': bytes_feature(image_format), 'image/height': int64_feature(im_size[0]), 'image/width': int64_feature(im_size[1]), 'image/bbox': floats_feature(bbox), 'image/viewpoint':", "== 3 assert image.shape[2] == 3 return image def encode_jpeg(self, image_data): image_data =", "License, Version 2.0 (the \"License\"); # you may not use this file except", "to RGB JPEG data. self._cmyk_data = tf.placeholder(dtype=tf.string) image = tf.image.decode_jpeg(self._cmyk_data, channels=0) self._cmyk_to_rgb =", "channels=3) # Initializes function that encodes RGB JPEG data. self._encode_image_data = tf.placeholder(dtype=tf.uint8) self._encode_jpeg", "azimuth, elevation, theta): return tf.train.Example(features=tf.train.Features(feature={ 'image/encoded': bytes_feature(image_data), 'image/format': bytes_feature(image_format), 'image/height': int64_feature(im_size[0]), 'image/width': int64_feature(im_size[1]),", "'image/format': bytes_feature(image_format), 'image/height': int64_feature(im_size[0]), 'image/width': int64_feature(im_size[1]), 'image/bbox': floats_feature(bbox), 'image/viewpoint': floats_feature([azimuth, elevation, theta]), }))", "'image/height': int64_feature(height), 'image/width': int64_feature(width), })) class ImageCoder(object): \"\"\"Helper class that provides TensorFlow image", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "and converting datasets.\"\"\" from __future__ import absolute_import from __future__ import division from __future__", "self._sess.run(self._png_to_jpeg, feed_dict={self._png_data: image_data}) def cmyk_to_rgb(self, image_data): return self._sess.run(self._cmyk_to_rgb, feed_dict={self._cmyk_data: image_data}) def decode_jpeg(self, image_data):", "tensorflow as tf import pickle import os def save_obj(obj, save_dir, name): with open(os.path.join(save_dir,", "Initializes function that decodes RGB JPEG data. self._decode_jpeg_data = tf.placeholder(dtype=tf.string) self._decode_jpeg = tf.image.decode_jpeg(self._decode_jpeg_data,", "# Initializes function that encodes RGB JPEG data. self._encode_image_data = tf.placeholder(dtype=tf.uint8) self._encode_jpeg =", "# you may not use this file except in compliance with the License.", "as tf import pickle import os def save_obj(obj, save_dir, name): with open(os.path.join(save_dir, name", "coding utilities.\"\"\" def __init__(self): # Create a single Session to run all image", "JPEG data. self._decode_jpeg_data = tf.placeholder(dtype=tf.string) self._decode_jpeg = tf.image.decode_jpeg(self._decode_jpeg_data, channels=3) # Initializes function that", "agreed to in writing, software # distributed under the License is distributed on", "def bytes_feature(values): \"\"\"Returns a TF-Feature of bytes. Args: values: A string. Returns: a", "feed_dict={self._png_data: image_data}) def cmyk_to_rgb(self, image_data): return self._sess.run(self._cmyk_to_rgb, feed_dict={self._cmyk_data: image_data}) def decode_jpeg(self, image_data): image", "(the \"License\"); # you may not use this file except in compliance with", "# Unless required by applicable law or agreed to in writing, software #", "by applicable law or agreed to in writing, software # distributed under the", "return image def encode_jpeg(self, image_data): image_data = image_data.astype(dtype=np.uint8) image = self._sess.run(self._encode_jpeg, feed_dict={self._encode_image_data: image_data})", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "= tf.image.decode_jpeg(self._cmyk_data, channels=0) self._cmyk_to_rgb = tf.image.encode_jpeg(image, format='rgb', quality=100) # Initializes function that decodes", "permissions and # limitations under the License. # ============================================================================== \"\"\"Contains utilities for downloading", "# ============================================================================== \"\"\"Contains utilities for downloading and converting datasets.\"\"\" from __future__ import absolute_import", "def to_tfexample(image_data, image_format, im_size, bbox, azimuth, elevation, theta): return tf.train.Example(features=tf.train.Features(feature={ 'image/encoded': bytes_feature(image_data), 'image/format':", "image = tf.image.decode_png(self._png_data, channels=3) self._png_to_jpeg = tf.image.encode_jpeg(image, format='rgb', quality=100) # Initializes function that", "tf.placeholder(dtype=tf.string) image = tf.image.decode_png(self._png_data, channels=3) self._png_to_jpeg = tf.image.encode_jpeg(image, format='rgb', quality=100) # Initializes function", "file except in compliance with the License. # You may obtain a copy", "data to RGB JPEG data. self._cmyk_data = tf.placeholder(dtype=tf.string) image = tf.image.decode_jpeg(self._cmyk_data, channels=0) self._cmyk_to_rgb", "values: A scalar or list of values. Returns: a TF-Feature. \"\"\" if not", "a TF-Feature of bytes. Args: values: A string. Returns: a TF-Feature. \"\"\" return", "import numpy as np import tensorflow as tf import pickle import os def", "ImageCoder(object): \"\"\"Helper class that provides TensorFlow image coding utilities.\"\"\" def __init__(self): # Create", "import absolute_import from __future__ import division from __future__ import print_function import numpy as", "License for the specific language governing permissions and # limitations under the License.", "return tf.train.Example(features=tf.train.Features(feature={ 'image/encoded': bytes_feature(image_data), 'image/format': bytes_feature(image_format), 'image/class/label': int64_feature(class_id), 'image/height': int64_feature(height), 'image/width': int64_feature(width), }))", "to in writing, software # distributed under the License is distributed on an", "that encodes RGB JPEG data. self._encode_image_data = tf.placeholder(dtype=tf.uint8) self._encode_jpeg = tf.image.encode_jpeg(self._encode_image_data) def png_to_jpeg(self,", "elevation, theta]), })) def image_to_tfexample(image_data, image_format, height, width, class_id): return tf.train.Example(features=tf.train.Features(feature={ 'image/encoded': bytes_feature(image_data),", "implied. # See the License for the specific language governing permissions and #", "\"License\"); # you may not use this file except in compliance with the", "Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the \"License\");", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "int64_feature(width), })) class ImageCoder(object): \"\"\"Helper class that provides TensorFlow image coding utilities.\"\"\" def", "function that decodes RGB JPEG data. self._decode_jpeg_data = tf.placeholder(dtype=tf.string) self._decode_jpeg = tf.image.decode_jpeg(self._decode_jpeg_data, channels=3)", "decodes RGB JPEG data. self._decode_jpeg_data = tf.placeholder(dtype=tf.string) self._decode_jpeg = tf.image.decode_jpeg(self._decode_jpeg_data, channels=3) # Initializes", "self._png_to_jpeg = tf.image.encode_jpeg(image, format='rgb', quality=100) # Initializes function that converts CMYK JPEG data", "return self._sess.run(self._png_to_jpeg, feed_dict={self._png_data: image_data}) def cmyk_to_rgb(self, image_data): return self._sess.run(self._cmyk_to_rgb, feed_dict={self._cmyk_data: image_data}) def decode_jpeg(self,", "if not isinstance(values, (tuple, list)): values = [values] return tf.train.Feature(int64_list=tf.train.Int64List(value=values)) def floats_feature(value): return", "= tf.placeholder(dtype=tf.uint8) self._encode_jpeg = tf.image.encode_jpeg(self._encode_image_data) def png_to_jpeg(self, image_data): return self._sess.run(self._png_to_jpeg, feed_dict={self._png_data: image_data}) def", "data. self._png_data = tf.placeholder(dtype=tf.string) image = tf.image.decode_png(self._png_data, channels=3) self._png_to_jpeg = tf.image.encode_jpeg(image, format='rgb', quality=100)", "int64_feature(height), 'image/width': int64_feature(width), })) class ImageCoder(object): \"\"\"Helper class that provides TensorFlow image coding", "or implied. # See the License for the specific language governing permissions and", "datasets.\"\"\" from __future__ import absolute_import from __future__ import division from __future__ import print_function", "__future__ import print_function import numpy as np import tensorflow as tf import pickle", "Apache License, Version 2.0 (the \"License\"); # you may not use this file", "TF-Feature of int64s. Args: values: A scalar or list of values. Returns: a", "OR CONDITIONS OF ANY KIND, either express or implied. # See the License", "may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "isinstance(values, (tuple, list)): values = [values] return tf.train.Feature(int64_list=tf.train.Int64List(value=values)) def floats_feature(value): return tf.train.Feature(float_list=tf.train.FloatList(value=value)) def", "Create a single Session to run all image coding calls. self._sess = tf.Session()", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,", "in writing, software # distributed under the License is distributed on an \"AS", "f: return pickle.load(f) def int64_feature(values): \"\"\"Returns a TF-Feature of int64s. Args: values: A", "A string. Returns: a TF-Feature. \"\"\" return tf.train.Feature(bytes_list=tf.train.BytesList(value=[values])) def to_tfexample(image_data, image_format, im_size, bbox,", "or list of values. Returns: a TF-Feature. \"\"\" if not isinstance(values, (tuple, list)):", "from __future__ import print_function import numpy as np import tensorflow as tf import", "'rb') as f: return pickle.load(f) def int64_feature(values): \"\"\"Returns a TF-Feature of int64s. Args:", "# See the License for the specific language governing permissions and # limitations", "the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "tf import pickle import os def save_obj(obj, save_dir, name): with open(os.path.join(save_dir, name +", "Initializes function that encodes RGB JPEG data. self._encode_image_data = tf.placeholder(dtype=tf.uint8) self._encode_jpeg = tf.image.encode_jpeg(self._encode_image_data)", "data. self._decode_jpeg_data = tf.placeholder(dtype=tf.string) self._decode_jpeg = tf.image.decode_jpeg(self._decode_jpeg_data, channels=3) # Initializes function that encodes", "save_dir, name): with open(os.path.join(save_dir, name + '.pkl'), 'wb') as f: pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)", "utilities.\"\"\" def __init__(self): # Create a single Session to run all image coding", "image_data): return self._sess.run(self._png_to_jpeg, feed_dict={self._png_data: image_data}) def cmyk_to_rgb(self, image_data): return self._sess.run(self._cmyk_to_rgb, feed_dict={self._cmyk_data: image_data}) def", "the Apache License, Version 2.0 (the \"License\"); # you may not use this", "you may not use this file except in compliance with the License. #", "self._decode_jpeg_data = tf.placeholder(dtype=tf.string) self._decode_jpeg = tf.image.decode_jpeg(self._decode_jpeg_data, channels=3) # Initializes function that encodes RGB", "channels=0) self._cmyk_to_rgb = tf.image.encode_jpeg(image, format='rgb', quality=100) # Initializes function that decodes RGB JPEG", "use this file except in compliance with the License. # You may obtain", "def png_to_jpeg(self, image_data): return self._sess.run(self._png_to_jpeg, feed_dict={self._png_data: image_data}) def cmyk_to_rgb(self, image_data): return self._sess.run(self._cmyk_to_rgb, feed_dict={self._cmyk_data:", "int64_feature(values): \"\"\"Returns a TF-Feature of int64s. Args: values: A scalar or list of", "list)): values = [values] return tf.train.Feature(int64_list=tf.train.Int64List(value=values)) def floats_feature(value): return tf.train.Feature(float_list=tf.train.FloatList(value=value)) def bytes_feature(values): \"\"\"Returns", "== 3 return image def encode_jpeg(self, image_data): image_data = image_data.astype(dtype=np.uint8) image = self._sess.run(self._encode_jpeg,", "Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0", "# Licensed under the Apache License, Version 2.0 (the \"License\"); # you may", "'image/width': int64_feature(im_size[1]), 'image/bbox': floats_feature(bbox), 'image/viewpoint': floats_feature([azimuth, elevation, theta]), })) def image_to_tfexample(image_data, image_format, height,", "self._sess = tf.Session() # Initializes function that converts PNG to JPEG data. self._png_data", "= tf.Session() # Initializes function that converts PNG to JPEG data. self._png_data =", "'image/width': int64_feature(width), })) class ImageCoder(object): \"\"\"Helper class that provides TensorFlow image coding utilities.\"\"\"", "2.0 (the \"License\"); # you may not use this file except in compliance", "\"\"\" return tf.train.Feature(bytes_list=tf.train.BytesList(value=[values])) def to_tfexample(image_data, image_format, im_size, bbox, azimuth, elevation, theta): return tf.train.Example(features=tf.train.Features(feature={", "for the specific language governing permissions and # limitations under the License. #", "to run all image coding calls. self._sess = tf.Session() # Initializes function that", "feed_dict={self._cmyk_data: image_data}) def decode_jpeg(self, image_data): image = self._sess.run(self._decode_jpeg, feed_dict={self._decode_jpeg_data: image_data}) assert len(image.shape) ==", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the", "coding calls. self._sess = tf.Session() # Initializes function that converts PNG to JPEG", "TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version", "__future__ import division from __future__ import print_function import numpy as np import tensorflow", "# # Unless required by applicable law or agreed to in writing, software", "JPEG data. self._png_data = tf.placeholder(dtype=tf.string) image = tf.image.decode_png(self._png_data, channels=3) self._png_to_jpeg = tf.image.encode_jpeg(image, format='rgb',", "express or implied. # See the License for the specific language governing permissions", "as f: return pickle.load(f) def int64_feature(values): \"\"\"Returns a TF-Feature of int64s. Args: values:", "either express or implied. # See the License for the specific language governing", "that decodes RGB JPEG data. self._decode_jpeg_data = tf.placeholder(dtype=tf.string) self._decode_jpeg = tf.image.decode_jpeg(self._decode_jpeg_data, channels=3) #", "tf.train.Feature(float_list=tf.train.FloatList(value=value)) def bytes_feature(values): \"\"\"Returns a TF-Feature of bytes. Args: values: A string. Returns:", "def __init__(self): # Create a single Session to run all image coding calls.", "Licensed under the Apache License, Version 2.0 (the \"License\"); # you may not", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "\"\"\"Returns a TF-Feature of int64s. Args: values: A scalar or list of values.", "'image/format': bytes_feature(image_format), 'image/class/label': int64_feature(class_id), 'image/height': int64_feature(height), 'image/width': int64_feature(width), })) class ImageCoder(object): \"\"\"Helper class", "division from __future__ import print_function import numpy as np import tensorflow as tf", "of values. Returns: a TF-Feature. \"\"\" if not isinstance(values, (tuple, list)): values =", "return tf.train.Feature(float_list=tf.train.FloatList(value=value)) def bytes_feature(values): \"\"\"Returns a TF-Feature of bytes. Args: values: A string.", "function that encodes RGB JPEG data. self._encode_image_data = tf.placeholder(dtype=tf.uint8) self._encode_jpeg = tf.image.encode_jpeg(self._encode_image_data) def", "image_data}) assert len(image.shape) == 3 assert image.shape[2] == 3 return image def encode_jpeg(self,", "channels=3) self._png_to_jpeg = tf.image.encode_jpeg(image, format='rgb', quality=100) # Initializes function that converts CMYK JPEG", "the License. # You may obtain a copy of the License at #", "# distributed under the License is distributed on an \"AS IS\" BASIS, #", "data. self._encode_image_data = tf.placeholder(dtype=tf.uint8) self._encode_jpeg = tf.image.encode_jpeg(self._encode_image_data) def png_to_jpeg(self, image_data): return self._sess.run(self._png_to_jpeg, feed_dict={self._png_data:", "image_data): return self._sess.run(self._cmyk_to_rgb, feed_dict={self._cmyk_data: image_data}) def decode_jpeg(self, image_data): image = self._sess.run(self._decode_jpeg, feed_dict={self._decode_jpeg_data: image_data})", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "for downloading and converting datasets.\"\"\" from __future__ import absolute_import from __future__ import division", "JPEG data. self._cmyk_data = tf.placeholder(dtype=tf.string) image = tf.image.decode_jpeg(self._cmyk_data, channels=0) self._cmyk_to_rgb = tf.image.encode_jpeg(image, format='rgb',", "+ '.pkl'), 'wb') as f: pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL) def load_obj(name, file_dir): with open(os.path.join(file_dir,", "tf.Session() # Initializes function that converts PNG to JPEG data. self._png_data = tf.placeholder(dtype=tf.string)", "return tf.train.Feature(bytes_list=tf.train.BytesList(value=[values])) def to_tfexample(image_data, image_format, im_size, bbox, azimuth, elevation, theta): return tf.train.Example(features=tf.train.Features(feature={ 'image/encoded':", "self._cmyk_data = tf.placeholder(dtype=tf.string) image = tf.image.decode_jpeg(self._cmyk_data, channels=0) self._cmyk_to_rgb = tf.image.encode_jpeg(image, format='rgb', quality=100) #", "tf.image.encode_jpeg(self._encode_image_data) def png_to_jpeg(self, image_data): return self._sess.run(self._png_to_jpeg, feed_dict={self._png_data: image_data}) def cmyk_to_rgb(self, image_data): return self._sess.run(self._cmyk_to_rgb,", "from __future__ import absolute_import from __future__ import division from __future__ import print_function import", "a TF-Feature. \"\"\" if not isinstance(values, (tuple, list)): values = [values] return tf.train.Feature(int64_list=tf.train.Int64List(value=values))", "'image/encoded': bytes_feature(image_data), 'image/format': bytes_feature(image_format), 'image/height': int64_feature(im_size[0]), 'image/width': int64_feature(im_size[1]), 'image/bbox': floats_feature(bbox), 'image/viewpoint': floats_feature([azimuth, elevation,", "import division from __future__ import print_function import numpy as np import tensorflow as", "# limitations under the License. # ============================================================================== \"\"\"Contains utilities for downloading and converting", "with the License. # You may obtain a copy of the License at", "# Initializes function that decodes RGB JPEG data. self._decode_jpeg_data = tf.placeholder(dtype=tf.string) self._decode_jpeg =", "theta): return tf.train.Example(features=tf.train.Features(feature={ 'image/encoded': bytes_feature(image_data), 'image/format': bytes_feature(image_format), 'image/height': int64_feature(im_size[0]), 'image/width': int64_feature(im_size[1]), 'image/bbox': floats_feature(bbox),", "CMYK JPEG data to RGB JPEG data. self._cmyk_data = tf.placeholder(dtype=tf.string) image = tf.image.decode_jpeg(self._cmyk_data,", "utilities for downloading and converting datasets.\"\"\" from __future__ import absolute_import from __future__ import", "# # Licensed under the Apache License, Version 2.0 (the \"License\"); # you", "= tf.placeholder(dtype=tf.string) image = tf.image.decode_png(self._png_data, channels=3) self._png_to_jpeg = tf.image.encode_jpeg(image, format='rgb', quality=100) # Initializes", "encodes RGB JPEG data. self._encode_image_data = tf.placeholder(dtype=tf.uint8) self._encode_jpeg = tf.image.encode_jpeg(self._encode_image_data) def png_to_jpeg(self, image_data):", "= tf.image.encode_jpeg(image, format='rgb', quality=100) # Initializes function that decodes RGB JPEG data. self._decode_jpeg_data", "JPEG data. self._encode_image_data = tf.placeholder(dtype=tf.uint8) self._encode_jpeg = tf.image.encode_jpeg(self._encode_image_data) def png_to_jpeg(self, image_data): return self._sess.run(self._png_to_jpeg,", "self._sess.run(self._cmyk_to_rgb, feed_dict={self._cmyk_data: image_data}) def decode_jpeg(self, image_data): image = self._sess.run(self._decode_jpeg, feed_dict={self._decode_jpeg_data: image_data}) assert len(image.shape)", "values = [values] return tf.train.Feature(int64_list=tf.train.Int64List(value=values)) def floats_feature(value): return tf.train.Feature(float_list=tf.train.FloatList(value=value)) def bytes_feature(values): \"\"\"Returns a", "= tf.image.decode_png(self._png_data, channels=3) self._png_to_jpeg = tf.image.encode_jpeg(image, format='rgb', quality=100) # Initializes function that converts", "\"\"\"Contains utilities for downloading and converting datasets.\"\"\" from __future__ import absolute_import from __future__", "tf.image.decode_jpeg(self._decode_jpeg_data, channels=3) # Initializes function that encodes RGB JPEG data. self._encode_image_data = tf.placeholder(dtype=tf.uint8)", "law or agreed to in writing, software # distributed under the License is", "a TF-Feature. \"\"\" return tf.train.Feature(bytes_list=tf.train.BytesList(value=[values])) def to_tfexample(image_data, image_format, im_size, bbox, azimuth, elevation, theta):", "the License for the specific language governing permissions and # limitations under the", "image coding calls. self._sess = tf.Session() # Initializes function that converts PNG to", "Initializes function that converts CMYK JPEG data to RGB JPEG data. self._cmyk_data =", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "quality=100) # Initializes function that converts CMYK JPEG data to RGB JPEG data.", "cmyk_to_rgb(self, image_data): return self._sess.run(self._cmyk_to_rgb, feed_dict={self._cmyk_data: image_data}) def decode_jpeg(self, image_data): image = self._sess.run(self._decode_jpeg, feed_dict={self._decode_jpeg_data:", "f, pickle.HIGHEST_PROTOCOL) def load_obj(name, file_dir): with open(os.path.join(file_dir, name + '.pkl'), 'rb') as f:", "JPEG data to RGB JPEG data. self._cmyk_data = tf.placeholder(dtype=tf.string) image = tf.image.decode_jpeg(self._cmyk_data, channels=0)", "def cmyk_to_rgb(self, image_data): return self._sess.run(self._cmyk_to_rgb, feed_dict={self._cmyk_data: image_data}) def decode_jpeg(self, image_data): image = self._sess.run(self._decode_jpeg,", "License. # ============================================================================== \"\"\"Contains utilities for downloading and converting datasets.\"\"\" from __future__ import", "values: A string. Returns: a TF-Feature. \"\"\" return tf.train.Feature(bytes_list=tf.train.BytesList(value=[values])) def to_tfexample(image_data, image_format, im_size,", "absolute_import from __future__ import division from __future__ import print_function import numpy as np", "under the License. # ============================================================================== \"\"\"Contains utilities for downloading and converting datasets.\"\"\" from", "PNG to JPEG data. self._png_data = tf.placeholder(dtype=tf.string) image = tf.image.decode_png(self._png_data, channels=3) self._png_to_jpeg =", "a single Session to run all image coding calls. self._sess = tf.Session() #", "Reserved. # # Licensed under the Apache License, Version 2.0 (the \"License\"); #", "import pickle import os def save_obj(obj, save_dir, name): with open(os.path.join(save_dir, name + '.pkl'),", "in compliance with the License. # You may obtain a copy of the", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "})) def image_to_tfexample(image_data, image_format, height, width, class_id): return tf.train.Example(features=tf.train.Features(feature={ 'image/encoded': bytes_feature(image_data), 'image/format': bytes_feature(image_format),", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #", "save_obj(obj, save_dir, name): with open(os.path.join(save_dir, name + '.pkl'), 'wb') as f: pickle.dump(obj, f,", "import tensorflow as tf import pickle import os def save_obj(obj, save_dir, name): with", "name + '.pkl'), 'rb') as f: return pickle.load(f) def int64_feature(values): \"\"\"Returns a TF-Feature", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "theta]), })) def image_to_tfexample(image_data, image_format, height, width, class_id): return tf.train.Example(features=tf.train.Features(feature={ 'image/encoded': bytes_feature(image_data), 'image/format':", "format='rgb', quality=100) # Initializes function that decodes RGB JPEG data. self._decode_jpeg_data = tf.placeholder(dtype=tf.string)", "= tf.placeholder(dtype=tf.string) self._decode_jpeg = tf.image.decode_jpeg(self._decode_jpeg_data, channels=3) # Initializes function that encodes RGB JPEG", "The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License,", "tf.placeholder(dtype=tf.string) self._decode_jpeg = tf.image.decode_jpeg(self._decode_jpeg_data, channels=3) # Initializes function that encodes RGB JPEG data.", "data. self._cmyk_data = tf.placeholder(dtype=tf.string) image = tf.image.decode_jpeg(self._cmyk_data, channels=0) self._cmyk_to_rgb = tf.image.encode_jpeg(image, format='rgb', quality=100)", "def floats_feature(value): return tf.train.Feature(float_list=tf.train.FloatList(value=value)) def bytes_feature(values): \"\"\"Returns a TF-Feature of bytes. Args: values:", "Session to run all image coding calls. self._sess = tf.Session() # Initializes function", "import os def save_obj(obj, save_dir, name): with open(os.path.join(save_dir, name + '.pkl'), 'wb') as", "See the License for the specific language governing permissions and # limitations under", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "= tf.placeholder(dtype=tf.string) image = tf.image.decode_jpeg(self._cmyk_data, channels=0) self._cmyk_to_rgb = tf.image.encode_jpeg(image, format='rgb', quality=100) # Initializes", "bytes. Args: values: A string. Returns: a TF-Feature. \"\"\" return tf.train.Feature(bytes_list=tf.train.BytesList(value=[values])) def to_tfexample(image_data,", "name + '.pkl'), 'wb') as f: pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL) def load_obj(name, file_dir): with", "bytes_feature(image_data), 'image/format': bytes_feature(image_format), 'image/class/label': int64_feature(class_id), 'image/height': int64_feature(height), 'image/width': int64_feature(width), })) class ImageCoder(object): \"\"\"Helper", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "feed_dict={self._decode_jpeg_data: image_data}) assert len(image.shape) == 3 assert image.shape[2] == 3 return image def", "file_dir): with open(os.path.join(file_dir, name + '.pkl'), 'rb') as f: return pickle.load(f) def int64_feature(values):", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in", "bytes_feature(image_data), 'image/format': bytes_feature(image_format), 'image/height': int64_feature(im_size[0]), 'image/width': int64_feature(im_size[1]), 'image/bbox': floats_feature(bbox), 'image/viewpoint': floats_feature([azimuth, elevation, theta]),", "to JPEG data. self._png_data = tf.placeholder(dtype=tf.string) image = tf.image.decode_png(self._png_data, channels=3) self._png_to_jpeg = tf.image.encode_jpeg(image,", "[values] return tf.train.Feature(int64_list=tf.train.Int64List(value=values)) def floats_feature(value): return tf.train.Feature(float_list=tf.train.FloatList(value=value)) def bytes_feature(values): \"\"\"Returns a TF-Feature of", "that converts CMYK JPEG data to RGB JPEG data. self._cmyk_data = tf.placeholder(dtype=tf.string) image", "converting datasets.\"\"\" from __future__ import absolute_import from __future__ import division from __future__ import", "floats_feature(value): return tf.train.Feature(float_list=tf.train.FloatList(value=value)) def bytes_feature(values): \"\"\"Returns a TF-Feature of bytes. Args: values: A", "self._png_data = tf.placeholder(dtype=tf.string) image = tf.image.decode_png(self._png_data, channels=3) self._png_to_jpeg = tf.image.encode_jpeg(image, format='rgb', quality=100) #", "return tf.train.Feature(int64_list=tf.train.Int64List(value=values)) def floats_feature(value): return tf.train.Feature(float_list=tf.train.FloatList(value=value)) def bytes_feature(values): \"\"\"Returns a TF-Feature of bytes.", "return self._sess.run(self._cmyk_to_rgb, feed_dict={self._cmyk_data: image_data}) def decode_jpeg(self, image_data): image = self._sess.run(self._decode_jpeg, feed_dict={self._decode_jpeg_data: image_data}) assert", "from __future__ import division from __future__ import print_function import numpy as np import", "with open(os.path.join(file_dir, name + '.pkl'), 'rb') as f: return pickle.load(f) def int64_feature(values): \"\"\"Returns", "'image/viewpoint': floats_feature([azimuth, elevation, theta]), })) def image_to_tfexample(image_data, image_format, height, width, class_id): return tf.train.Example(features=tf.train.Features(feature={", "Version 2.0 (the \"License\"); # you may not use this file except in", "except in compliance with the License. # You may obtain a copy of", "assert image.shape[2] == 3 return image def encode_jpeg(self, image_data): image_data = image_data.astype(dtype=np.uint8) image", "the specific language governing permissions and # limitations under the License. # ==============================================================================", "image_data): image = self._sess.run(self._decode_jpeg, feed_dict={self._decode_jpeg_data: image_data}) assert len(image.shape) == 3 assert image.shape[2] ==", "2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache", "os def save_obj(obj, save_dir, name): with open(os.path.join(save_dir, name + '.pkl'), 'wb') as f:", "im_size, bbox, azimuth, elevation, theta): return tf.train.Example(features=tf.train.Features(feature={ 'image/encoded': bytes_feature(image_data), 'image/format': bytes_feature(image_format), 'image/height': int64_feature(im_size[0]),", "as np import tensorflow as tf import pickle import os def save_obj(obj, save_dir,", "TensorFlow image coding utilities.\"\"\" def __init__(self): # Create a single Session to run", "# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "may not use this file except in compliance with the License. # You", "License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "tf.image.encode_jpeg(image, format='rgb', quality=100) # Initializes function that converts CMYK JPEG data to RGB", "format='rgb', quality=100) # Initializes function that converts CMYK JPEG data to RGB JPEG", "'image/height': int64_feature(im_size[0]), 'image/width': int64_feature(im_size[1]), 'image/bbox': floats_feature(bbox), 'image/viewpoint': floats_feature([azimuth, elevation, theta]), })) def image_to_tfexample(image_data,", "= [values] return tf.train.Feature(int64_list=tf.train.Int64List(value=values)) def floats_feature(value): return tf.train.Feature(float_list=tf.train.FloatList(value=value)) def bytes_feature(values): \"\"\"Returns a TF-Feature", "= tf.image.encode_jpeg(image, format='rgb', quality=100) # Initializes function that converts CMYK JPEG data to", "with open(os.path.join(save_dir, name + '.pkl'), 'wb') as f: pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL) def load_obj(name,", "tf.placeholder(dtype=tf.uint8) self._encode_jpeg = tf.image.encode_jpeg(self._encode_image_data) def png_to_jpeg(self, image_data): return self._sess.run(self._png_to_jpeg, feed_dict={self._png_data: image_data}) def cmyk_to_rgb(self,", "of int64s. Args: values: A scalar or list of values. Returns: a TF-Feature.", "tf.train.Example(features=tf.train.Features(feature={ 'image/encoded': bytes_feature(image_data), 'image/format': bytes_feature(image_format), 'image/class/label': int64_feature(class_id), 'image/height': int64_feature(height), 'image/width': int64_feature(width), })) class", "============================================================================== \"\"\"Contains utilities for downloading and converting datasets.\"\"\" from __future__ import absolute_import from", "tf.train.Feature(int64_list=tf.train.Int64List(value=values)) def floats_feature(value): return tf.train.Feature(float_list=tf.train.FloatList(value=value)) def bytes_feature(values): \"\"\"Returns a TF-Feature of bytes. Args:", "converts CMYK JPEG data to RGB JPEG data. self._cmyk_data = tf.placeholder(dtype=tf.string) image =", "width, class_id): return tf.train.Example(features=tf.train.Features(feature={ 'image/encoded': bytes_feature(image_data), 'image/format': bytes_feature(image_format), 'image/class/label': int64_feature(class_id), 'image/height': int64_feature(height), 'image/width':", "RGB JPEG data. self._cmyk_data = tf.placeholder(dtype=tf.string) image = tf.image.decode_jpeg(self._cmyk_data, channels=0) self._cmyk_to_rgb = tf.image.encode_jpeg(image,", "not isinstance(values, (tuple, list)): values = [values] return tf.train.Feature(int64_list=tf.train.Int64List(value=values)) def floats_feature(value): return tf.train.Feature(float_list=tf.train.FloatList(value=value))", "self._encode_jpeg = tf.image.encode_jpeg(self._encode_image_data) def png_to_jpeg(self, image_data): return self._sess.run(self._png_to_jpeg, feed_dict={self._png_data: image_data}) def cmyk_to_rgb(self, image_data):", "image_format, im_size, bbox, azimuth, elevation, theta): return tf.train.Example(features=tf.train.Features(feature={ 'image/encoded': bytes_feature(image_data), 'image/format': bytes_feature(image_format), 'image/height':", "function that converts CMYK JPEG data to RGB JPEG data. self._cmyk_data = tf.placeholder(dtype=tf.string)", "distributed under the License is distributed on an \"AS IS\" BASIS, # WITHOUT", "pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL) def load_obj(name, file_dir): with open(os.path.join(file_dir, name + '.pkl'), 'rb') as", "= self._sess.run(self._decode_jpeg, feed_dict={self._decode_jpeg_data: image_data}) assert len(image.shape) == 3 assert image.shape[2] == 3 return", "image.shape[2] == 3 return image def encode_jpeg(self, image_data): image_data = image_data.astype(dtype=np.uint8) image =", "function that converts PNG to JPEG data. self._png_data = tf.placeholder(dtype=tf.string) image = tf.image.decode_png(self._png_data,", "\"\"\"Helper class that provides TensorFlow image coding utilities.\"\"\" def __init__(self): # Create a", "tf.train.Example(features=tf.train.Features(feature={ 'image/encoded': bytes_feature(image_data), 'image/format': bytes_feature(image_format), 'image/height': int64_feature(im_size[0]), 'image/width': int64_feature(im_size[1]), 'image/bbox': floats_feature(bbox), 'image/viewpoint': floats_feature([azimuth,", "and # limitations under the License. # ============================================================================== \"\"\"Contains utilities for downloading and", "f: pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL) def load_obj(name, file_dir): with open(os.path.join(file_dir, name + '.pkl'), 'rb')", "len(image.shape) == 3 assert image.shape[2] == 3 return image def encode_jpeg(self, image_data): image_data", "= tf.image.encode_jpeg(self._encode_image_data) def png_to_jpeg(self, image_data): return self._sess.run(self._png_to_jpeg, feed_dict={self._png_data: image_data}) def cmyk_to_rgb(self, image_data): return" ]
[ "lasts, t, s, n, na, d, da = 0, three, 3, 1, 0,", "8D9DD300 741FA7BF 8AFC47ED 2576F693 6BA42466 3AAB639C 5AE4F568 3423B474 2BF1C978 238F16CB E39D652D E3FDB8BE FC848AD9", "has been used for the implementations for # quite a long time, but", "208552BB 9ED52907 7096966D 670C354E 4ABC9804 F1746C08 CA18217C 32905E46 2E36CE3B E39E772C 180E8603 9B2783A2 EC07A28F", "decimal import * from binascii import unhexlify # need some precision to evaluate", "4ABC9804 F1746C08 CA237327 FFFFFFFF FFFFFFFF\"\"\" p = int.from_bytes(unhexlify(p.replace('\\n', '').replace(' ', '')), 'big') assert", "[2^1918 pi] + 124476 } f = 2**2048 - 2**1984 - 1 +", "is: 2^1536 - 2^1472 - 1 + 2^64 * { [2^1406 pi] +", "378CD2BF 5983CA01 C64B92EC F032EA15 D1721D03 F482D7CE 6E74FEF6 D55E702F 46980C82 B5A84031 900B1C9E 59E7C97F BEC7E8F3", "+s # unary plus applies the new precision ### The DH values defined", "The prime is: 2^1536 - 2^1472 - 1 + 2^64 * { [2^1406", "CA18217C 32905E46 2E36CE3B E39E772C 180E8603 9B2783A2 EC07A28F B5C55DF0 6F4C52C9 DE2BCBF6 95581718 3995497C EA956AE5", "# # The generator is: 2. #4. 3072-bit MODP Group # # This", "93B4EA98 8D8FDDC1 86FFB7DC 90A6C08F 4DF435C9 34063199 FFFFFFFF FFFFFFFF\"\"\" p = int.from_bytes(unhexlify(p.replace('\\n', '').replace(' ',", "8A1FBFF0 EB19CCB1 A313D55C DA56C9EC 2EF29632 387FE8D7 6E3C0468 043E8F66 3F4860EE 12BF2D5B 0B7474D6 E694F91E 6DCC4024", "043E8F66 3F4860EE 12BF2D5B 0B7474D6 E694F91E 6DBE1159 74A3926F 12FEE5E4 38777CB6 A932DF8C D8BEC4D0 73B931BA 3BC832B6", "= d+da, da+32 t = (t * n) / d s += t", "is: # p = \"\"\"FFFFFFFF FFFFFFFF C90FDAA2 2168C234 C4C6628B 80DC1CD1 29024E08 8A67CC74 020BBEA6", "# (needs higher precision) getcontext().prec = 4000 # # This group is assigned", "# # The 1536 bit MODP group has been used for the implementations", "2E8EFC14 1FBECAA6 287C5947 4E6BC05D 99B2964F A090C3A2 233BA186 515BE7ED 1F612970 CEE2D7AF B81BDD76 2170481C D0069127", "pi()).to_integral_exact(ROUND_FLOOR) + 1690314) # # Its hexadecimal value is: # p =\"\"\"FFFFFFFF FFFFFFFF", "+ 2^64 * { [2^2942 pi] + 1690314 } f = 2**3072 -", "current precision. >>> print(pi()) 3.141592653589793238462643383 \"\"\" getcontext().prec += 2 # extra digits for", "+= 2 # extra digits for intermediate steps three = Decimal(3) # substitute", "Implementations have been using group 5 to designate this group, we # standardize", "- 2**3008 - 1 + 2**64 * ((Decimal(2**2942) * pi()).to_integral_exact(ROUND_FLOOR) + 1690314) #", "# # The generator is: 2. #6. 6144-bit MODP Group # # This", "FF585AC5 4BD407B2 2B4154AA CC8F6D7E BF48E1D8 14CC5ED2 0F8037E0 A79715EE F29BE328 06A1D58B B7C5DA76 F550AA3D 8A1FBFF0", "This group is assigned id 18. # # This prime is: 2^8192 -", "FFFFFFFF\"\"\" p = int.from_bytes(unhexlify(p.replace('\\n', '').replace(' ', '')), 'big') print(Decimal(p) - f) assert Decimal(p)", "defined in RFC 2409 (IKE). # Implementations have been using group 5 to", "F25F1437 4FE1356D 6D51C245 E485B576 625E7EC6 F44C42E9 A637ED6B 0BFF5CB6 F406B7ED EE386BFB 5A899FA5 AE9F2411 7C4B1FE6", "precision. >>> print(pi()) 3.141592653589793238462643383 \"\"\" getcontext().prec += 2 # extra digits for intermediate", "2**1536 - 2**1472 - 1 + 2**64 * ((Decimal(2**1406) * pi()).to_integral_exact(ROUND_FLOOR) + 741804)", "MODP group has been used for the implementations for # quite a long", "889A002E D5EE382B C9190DA6 FC026E47 9558E447 5677E9AA 9E3050E2 765694DF C81F56E8 80B96E71 60C980DD 98EDD3DF FFFFFFFF", "* from binascii import unhexlify # need some precision to evaluate PI getcontext().prec", "na, d, da = 0, three, 3, 1, 0, 0, 24 while s", "generator is: 2. #5. 4096-bit MODP Group # # This group is assigned", "BAD946E2 08E24FA0 74E5AB31 43DB5BFC E0FD108E 4B82D120 A9210801 1A723C12 A787E6D7 88719A10 BDBA5B26 99C32718 6AF4E23C", "# The generator is: 2. #3. 2048-bit MODP Group # # This group", "The generator is: 2. #7. 8192-bit MODP Group # (needs higher precision) getcontext().prec", "83655D23 DCA3AD96 1C62F356 208552BB 9ED52907 7096966D 670C354E 4ABC9804 F1746C08 CA18217C 32905E46 2E36CE3B E39E772C", "* pi()).to_integral_exact(ROUND_FLOOR) + 4743158) # # Its hexadecimal value is: # p =", "\"three=3.0\" for regular floats lasts, t, s, n, na, d, da = 0,", "4E6BC05D 99B2964F A090C3A2 233BA186 515BE7ED 1F612970 CEE2D7AF B81BDD76 2170481C D0069127 D5B05AA9 93B4EA98 8D8FDDC1", "in RFC 2409 (IKE). # Implementations have been using group 5 to designate", "d, da = 0, three, 3, 1, 0, 0, 24 while s !=", "3072-bit MODP Group # # This group is assigned id 15. # #", "* ((Decimal(2**1918) * pi()).to_integral_exact(ROUND_FLOOR) + 124476) # # Its hexadecimal value is: #", "f = 2**3072 - 2**3008 - 1 + 2**64 * ((Decimal(2**2942) * pi()).to_integral_exact(ROUND_FLOOR)", "4FE1356D 6D51C245 E485B576 625E7EC6 F44C42E9 A637ED6B 0BFF5CB6 F406B7ED EE386BFB 5A899FA5 AE9F2411 7C4B1FE6 49286651", "Decimal(p) - f == 0 # # The generator is: 2. #3. 2048-bit", "D0069127 D5B05AA9 93B4EA98 8D8FDDC1 86FFB7DC 90A6C08F 4DF435C9 34063199 FFFFFFFF FFFFFFFF\"\"\" p = int.from_bytes(unhexlify(p.replace('\\n',", "', '')), 'big') print(Decimal(p) - f) assert Decimal(p) - f == 0 #", "5 to designate this group, we # standardize that practice here. # #", "7EE74D73 FAF36BC3 1ECFA268 359046F4 EB879F92 4009438B 481C6CD7 889A002E D5EE382B C9190DA6 FC026E47 9558E447 5677E9AA", "The DH values defined in the RFC #2. 1536-bit MODP Group # #", "from decimal import * from binascii import unhexlify # need some precision to", "some precision to evaluate PI getcontext().prec = 2000 # from the python docs", "15728E5A 8AACAA68 FFFFFFFF FFFFFFFF\"\"\" p = int.from_bytes(unhexlify(p.replace('\\n', '').replace(' ', '')), 'big') assert Decimal(p)", "240904 } f = 2**4096 - 2**4032 - 1 + 2**64 * ((Decimal(2**3966)", "# The generator is: 2. #6. 6144-bit MODP Group # # This group", "6FB8F401 378CD2BF 5983CA01 C64B92EC F032EA15 D1721D03 F482D7CE 6E74FEF6 D55E702F 46980C82 B5A84031 900B1C9E 59E7C97F", "f = 2**8192 - 2**8128 - 1 + 2**64 * ((Decimal(2**8062) * pi()).to_integral_exact(ROUND_FLOOR)", "precision to evaluate PI getcontext().prec = 2000 # from the python docs def", "83655D23 DCA3AD96 1C62F356 208552BB 9ED52907 7096966D 670C354E 4ABC9804 F1746C08 CA237327 FFFFFFFF FFFFFFFF\"\"\" p", "EA956AE5 15D22618 98FA0510 15728E5A 8AAAC42D AD33170D 04507A33 A85521AB DF1CBA64 ECFB8504 58DBEF0A 8AEA7157 5D060C7D", "pi] + 240904 } f = 2**4096 - 2**4032 - 1 + 2**64", "2^64 * { [2^8062 pi] + 4743158 } f = 2**8192 - 2**8128", "1AD2EE6B F12FFA06 D98A0864 D8760273 3EC86A64 521F2B18 177B200C BBE11757 7A615D6C 770988C0 BAD946E2 08E24FA0 74E5AB31", "# Implementations have been using group 5 to designate this group, we #", "FAF36BC3 1ECFA268 359046F4 EB879F92 4009438B 481C6CD7 889A002E D5EE382B C9190DA6 FC026E47 9558E447 5677E9AA 9E3050E2", "= s n, na = n+na, na+8 d, da = d+da, da+32 t", "is: 2. #5. 4096-bit MODP Group # # This group is assigned id", "1C55D39A 69163FA8 FD24CF5F 83655D23 DCA3AD96 1C62F356 208552BB 9ED52907 7096966D 670C354E 4ABC9804 F1746C08 CA18217C", "EF9519B3 CD3A431B 302B0A6D F25F1437 4FE1356D 6D51C245 E485B576 625E7EC6 F44C42E9 A637ED6B 0BFF5CB6 F406B7ED EE386BFB", "49286651 ECE45B3D C2007CB8 A163BF05 98DA4836 1C55D39A 69163FA8 FD24CF5F 83655D23 DCA3AD96 1C62F356 208552BB 9ED52907", "BEC7E8F3 23A97A7E 36CC88BE 0F1D45B7 FF585AC5 4BD407B2 2B4154AA CC8F6D7E BF48E1D8 14CC5ED2 0F8037E0 A79715EE F29BE328", "6E3C0468 043E8F66 3F4860EE 12BF2D5B 0B7474D6 E694F91E 6DBE1159 74A3926F 12FEE5E4 38777CB6 A932DF8C D8BEC4D0 73B931BA", "FFFFFFFF FFFFFFFF\"\"\" p = int.from_bytes(unhexlify(p.replace('\\n', '').replace(' ', '')), 'big') assert Decimal(p) - f", "to the current precision. >>> print(pi()) 3.141592653589793238462643383 \"\"\" getcontext().prec += 2 # extra", "# # This group is assigned id 14. # # This prime is:", "A637ED6B 0BFF5CB6 F406B7ED EE386BFB 5A899FA5 AE9F2411 7C4B1FE6 49286651 ECE45B3D C2007CB8 A163BF05 98DA4836 1C55D39A", "Group # # The 1536 bit MODP group has been used for the", "= 0, three, 3, 1, 0, 0, 24 while s != lasts: lasts", "+ 929484 } f = 2**6144 - 2**6080 - 1 + 2**64 *", "[2^6014 pi] + 929484 } f = 2**6144 - 2**6080 - 1 +", "MODP Group # (needs higher precision) getcontext().prec = 4000 # # This group", "Decimal(p) - f == 0 # # The generator is: 2. #5. 4096-bit", "- f == 0 # # The generator is: 2. #5. 4096-bit MODP", "is: 2^3072 - 2^3008 - 1 + 2^64 * { [2^2942 pi] +", "pi()).to_integral_exact(ROUND_FLOOR) + 741804) # Its hexadecimal value is: # p =\"\"\"FFFFFFFF FFFFFFFF C90FDAA2", "digits for intermediate steps three = Decimal(3) # substitute \"three=3.0\" for regular floats", "== 0 # # The generator is: 2. #5. 4096-bit MODP Group #", "# # This prime is: 2^6144 - 2^6080 - 1 + 2^64 *", "=\"\"\"FFFFFFFF FFFFFFFF C90FDAA2 2168C234 C4C6628B 80DC1CD1 29024E08 8A67CC74 020BBEA6 3B139B22 514A0879 8E3404DD EF9519B3", "14CC5ED2 0F8037E0 A79715EE F29BE328 06A1D58B B7C5DA76 F550AA3D 8A1FBFF0 EB19CCB1 A313D55C DA56C9EC 2EF29632 387FE8D7", "been using group 5 to designate this group, we # standardize that practice", "generator is: 2. #7. 8192-bit MODP Group # (needs higher precision) getcontext().prec =", "9B2783A2 EC07A28F B5C55DF0 6F4C52C9 DE2BCBF6 95581718 3995497C EA956AE5 15D22618 98FA0510 15728E5A 8AACAA68 FFFFFFFF", "= int.from_bytes(unhexlify(p.replace('\\n', '').replace(' ', '')), 'big') assert Decimal(p) - f == 0 #", "prime is: 2^1536 - 2^1472 - 1 + 2^64 * { [2^1406 pi]", "Group # # This group is assigned id 15. # # This prime", "1E8C94E0 4A25619D CEE3D226 1AD2EE6B F12FFA06 D98A0864 D8760273 3EC86A64 521F2B18 177B200C BBE11757 7A615D6C 770988C0", "6F4C52C9 DE2BCBF6 95581718 3995497C EA956AE5 15D22618 98FA0510 15728E5A 8AACAA68 FFFFFFFF FFFFFFFF\"\"\" p =", "2**64 * ((Decimal(2**3966) * pi()).to_integral_exact(ROUND_FLOOR) + 240904) # # Its hexadecimal value is:", "pi()).to_integral_exact(ROUND_FLOOR) + 4743158) # # Its hexadecimal value is: # p = \"\"\"FFFFFFFF", "(IKE). # Implementations have been using group 5 to designate this group, we", "python3 from decimal import * from binascii import unhexlify # need some precision", "# This prime is: 2^2048 - 2^1984 - 1 + 2^64 * {", "- 1 + 2**64 * ((Decimal(2**6014) * pi()).to_integral_exact(ROUND_FLOOR) + 929484) # # Its", "group 5 to designate this group, we # standardize that practice here. #", "f = 2**6144 - 2**6080 - 1 + 2**64 * ((Decimal(2**6014) * pi()).to_integral_exact(ROUND_FLOOR)", "A787E6D7 88719A10 BDBA5B26 99C32718 6AF4E23C 1A946834 B6150BDA 2583E9CA 2AD44CE8 DBBBC2DB 04DE8EF9 2E8EFC14 1FBECAA6", "# # This group is assigned id 16. # # This prime is:", "FA9D4B7F A2C087E8 79683303 ED5BDD3A 062B3CF5 B3A278A6 6D2A13F8 3F44F82D DF310EE0 74AB6A36 4597E899 A0255DC1 64F31CC5", "'big') assert Decimal(p) - f == 0 # # The generator is: 2.", "- 2**1472 - 1 + 2**64 * ((Decimal(2**1406) * pi()).to_integral_exact(ROUND_FLOOR) + 741804) #", "* { [2^1918 pi] + 124476 } f = 2**2048 - 2**1984 -", "ECFB8504 58DBEF0A 8AEA7157 5D060C7D B3970F85 A6E1E4C7 ABF5AE8C DB0933D7 1E8C94E0 4A25619D CEE3D226 1AD2EE6B F12FFA06", "assigned id 18. # # This prime is: 2^8192 - 2^8128 - 1", "(needs higher precision) getcontext().prec = 4000 # # This group is assigned id", "'').replace(' ', '')), 'big') print(Decimal(p) - f) assert Decimal(p) - f == 0", "74E5AB31 43DB5BFC E0FD108E 4B82D120 A9210801 1A723C12 A787E6D7 88719A10 BDBA5B26 99C32718 6AF4E23C 1A946834 B6150BDA", "= 2**8192 - 2**8128 - 1 + 2**64 * ((Decimal(2**8062) * pi()).to_integral_exact(ROUND_FLOOR) +", "# The prime is: 2^1536 - 2^1472 - 1 + 2^64 * {", "# The generator is: 2. #5. 4096-bit MODP Group # # This group", "* pi()).to_integral_exact(ROUND_FLOOR) + 929484) # # Its hexadecimal value is: # p =\"\"\"FFFFFFFF", "+ 2**64 * ((Decimal(2**8062) * pi()).to_integral_exact(ROUND_FLOOR) + 4743158) # # Its hexadecimal value", "1A23F0C7 3473FC64 6CEA306B 4BCBC886 2F8385DD FA9D4B7F A2C087E8 79683303 ED5BDD3A 062B3CF5 B3A278A6 6D2A13F8 3F44F82D", "= Decimal(3) # substitute \"three=3.0\" for regular floats lasts, t, s, n, na,", "s, n, na, d, da = 0, three, 3, 1, 0, 0, 24", "(t * n) / d s += t getcontext().prec -= 2 return +s", "} f = 2**4096 - 2**4032 - 1 + 2**64 * ((Decimal(2**3966) *", "0 # # The generator is: 2. #6. 6144-bit MODP Group # #", "+ 124476 } f = 2**2048 - 2**1984 - 1 + 2**64 *", "8AAAC42D AD33170D 04507A33 A85521AB DF1CBA64 ECFB8504 58DBEF0A 8AEA7157 5D060C7D B3970F85 A6E1E4C7 ABF5AE8C DB0933D7", "# # Its hexadecimal value is: # p =\"\"\"FFFFFFFF FFFFFFFF C90FDAA2 2168C234 C4C6628B", "8AFC47ED 2576F693 6BA42466 3AAB639C 5AE4F568 3423B474 2BF1C978 238F16CB E39D652D E3FDB8BE FC848AD9 22222E04 A4037C07", "3EC86A64 521F2B18 177B200C BBE11757 7A615D6C 770988C0 BAD946E2 08E24FA0 74E5AB31 43DB5BFC E0FD108E 4B82D120 A93AD2CA", "3BC832B6 8D9DD300 741FA7BF 8AFC47ED 2576F693 6BA42466 3AAB639C 5AE4F568 3423B474 2BF1C978 238F16CB E39D652D E3FDB8BE", "2**1472 - 1 + 2**64 * ((Decimal(2**1406) * pi()).to_integral_exact(ROUND_FLOOR) + 741804) # Its", "90A6C08F 4DF435C9 34063199 FFFFFFFF FFFFFFFF\"\"\" p = int.from_bytes(unhexlify(p.replace('\\n', '').replace(' ', '')), 'big') assert", "D8BEC4D0 73B931BA 3BC832B6 8D9DD300 741FA7BF 8AFC47ED 2576F693 6BA42466 3AAB639C 5AE4F568 3423B474 2BF1C978 238F16CB", "FC026E47 9558E447 5677E9AA 9E3050E2 765694DF C81F56E8 80B96E71 60C980DD 98EDD3DF FFFFFFFF FFFFFFFF\"\"\" p =", "6E74FEF6 D55E702F 46980C82 B5A84031 900B1C9E 59E7C97F BEC7E8F3 23A97A7E 36CC88BE 0F1D45B7 FF585AC5 4BD407B2 2B4154AA", "evaluate PI getcontext().prec = 2000 # from the python docs def pi(): \"\"\"Compute", "EB19CCB1 A313D55C DA56C9EC 2EF29632 387FE8D7 6E3C0468 043E8F66 3F4860EE 12BF2D5B 0B7474D6 E694F91E 6DCC4024 FFFFFFFF", "= 4000 # # This group is assigned id 18. # # This", "2. #6. 6144-bit MODP Group # # This group is assigned id 17.", "+ 2**64 * ((Decimal(2**1918) * pi()).to_integral_exact(ROUND_FLOOR) + 124476) # # Its hexadecimal value", "f == 0 # # The generator is: 2. #5. 4096-bit MODP Group", "# # This group is assigned id 18. # # This prime is:", "int.from_bytes(unhexlify(p.replace('\\n', '').replace(' ', '')), 'big') print(Decimal(p) - f) assert Decimal(p) - f ==", "1 + 2^64 * { [2^3966 pi] + 240904 } f = 2**4096", "((Decimal(2**8062) * pi()).to_integral_exact(ROUND_FLOOR) + 4743158) # # Its hexadecimal value is: # p", "5D060C7D B3970F85 A6E1E4C7 ABF5AE8C DB0933D7 1E8C94E0 4A25619D CEE3D226 1AD2EE6B F12FFA06 D98A0864 D8760273 3EC86A64", "This prime is: 2^6144 - 2^6080 - 1 + 2^64 * { [2^6014", "2**2048 - 2**1984 - 1 + 2**64 * ((Decimal(2**1918) * pi()).to_integral_exact(ROUND_FLOOR) + 124476)", "been used for the implementations for # quite a long time, but was", "# Its hexadecimal value is: # p =\"\"\"FFFFFFFF FFFFFFFF C90FDAA2 2168C234 C4C6628B 80DC1CD1", "- f) assert Decimal(p) - f == 0 # # The generator is:", "'big') print(Decimal(p) - f) assert Decimal(p) - f == 0 # # The", "DH values defined in the RFC #2. 1536-bit MODP Group # # The", "#2. 1536-bit MODP Group # # The 1536 bit MODP group has been", "5AE4F568 3423B474 2BF1C978 238F16CB E39D652D E3FDB8BE FC848AD9 22222E04 A4037C07 13EB57A8 1A23F0C7 3473FC64 6CEA306B", "2170481C D0069127 D5B05AA9 93B4EA98 8D8FDDC1 86FFB7DC 90A6C08F 4DF435C9 34063199 FFFFFFFF FFFFFFFF\"\"\" p =", "# # This group is assigned id 15. # # This prime is:", "C1D4DCB2 602646DE C9751E76 3DBA37BD F8FF9406 AD9E530E E5DB382F 413001AE B06A53ED 9027D831 179727B0 865A8918 DA3EDBEB", "F9AB4819 5DED7EA1 B1D510BD 7EE74D73 FAF36BC3 1ECFA268 359046F4 EB879F92 4009438B 481C6CD7 889A002E D5EE382B C9190DA6", "E485B576 625E7EC6 F44C42E9 A637ED6B 0BFF5CB6 F406B7ED EE386BFB 5A899FA5 AE9F2411 7C4B1FE6 49286651 ECE45B3D C2007CB8", "This group is assigned id 14. # # This prime is: 2^2048 -", "D98A0864 D8760273 3EC86A64 521F2B18 177B200C BBE11757 7A615D6C 770988C0 BAD946E2 08E24FA0 74E5AB31 43DB5BFC E0FD108E", "#5. 4096-bit MODP Group # # This group is assigned id 16. #", "# Its hexadecimal value is: # p = \"\"\"FFFFFFFF FFFFFFFF C90FDAA2 2168C234 C4C6628B", "# This group is assigned id 18. # # This prime is: 2^8192", "E0FD108E 4B82D120 A93AD2CA FFFFFFFF FFFFFFFF\"\"\" p = int.from_bytes(unhexlify(p.replace('\\n', '').replace(' ', '')), 'big') assert", "designate this group, we # standardize that practice here. # # The prime", "98FA0510 15728E5A 8AAAC42D AD33170D 04507A33 A85521AB DF1CBA64 ECFB8504 58DBEF0A 8AEA7157 5D060C7D B3970F85 A6E1E4C7", "0 # # The generator is: 2. #7. 8192-bit MODP Group # (needs", "124476) # # Its hexadecimal value is: # p =\"\"\"FFFFFFFF FFFFFFFF C90FDAA2 2168C234", "p = \"\"\"FFFFFFFF FFFFFFFF C90FDAA2 2168C234 C4C6628B 80DC1CD1 29024E08 8A67CC74 020BBEA6 3B139B22 514A0879", "((Decimal(2**6014) * pi()).to_integral_exact(ROUND_FLOOR) + 929484) # # Its hexadecimal value is: # p", "assigned id 14. # # This prime is: 2^2048 - 2^1984 - 1", "The generator is: 2. #3. 2048-bit MODP Group # # This group is", "pi] + 124476 } f = 2**2048 - 2**1984 - 1 + 2**64", "+ 1690314) # # Its hexadecimal value is: # p =\"\"\"FFFFFFFF FFFFFFFF C90FDAA2", "is: # p =\"\"\"FFFFFFFF FFFFFFFF C90FDAA2 2168C234 C4C6628B 80DC1CD1 29024E08 8A67CC74 020BBEA6 3B139B22", "2^64 * { [2^1918 pi] + 124476 } f = 2**2048 - 2**1984", "This group is assigned id 16. # # This prime is: 2^4096 -", "id 16. # # This prime is: 2^4096 - 2^4032 - 1 +", "Decimal(p) - f == 0 # # The generator is: 2. #6. 6144-bit", "95581718 3995497C EA956AE5 15D22618 98FA0510 15728E5A 8AAAC42D AD33170D 04507A33 A85521AB DF1CBA64 ECFB8504 58DBEF0A", "2EF29632 387FE8D7 6E3C0468 043E8F66 3F4860EE 12BF2D5B 0B7474D6 E694F91E 6DBE1159 74A3926F 12FEE5E4 38777CB6 A932DF8C", "precision) getcontext().prec = 4000 # # This group is assigned id 18. #", "2^1536 - 2^1472 - 1 + 2^64 * { [2^1406 pi] + 741804", "5A899FA5 AE9F2411 7C4B1FE6 49286651 ECE45B3D C2007CB8 A163BF05 98DA4836 1C55D39A 69163FA8 FD24CF5F 83655D23 DCA3AD96", "while s != lasts: lasts = s n, na = n+na, na+8 d,", "15728E5A 8AAAC42D AD33170D 04507A33 A85521AB DF1CBA64 ECFB8504 58DBEF0A 8AEA7157 5D060C7D B3970F85 A6E1E4C7 ABF5AE8C", "is assigned id 16. # # This prime is: 2^4096 - 2^4032 -", "8A1FBFF0 EB19CCB1 A313D55C DA56C9EC 2EF29632 387FE8D7 6E3C0468 043E8F66 3F4860EE 12BF2D5B 0B7474D6 E694F91E 6DBE1159", "0 # # The generator is: 2. #5. 4096-bit MODP Group # #", "0F8037E0 A79715EE F29BE328 06A1D58B B7C5DA76 F550AA3D 8A1FBFF0 EB19CCB1 A313D55C DA56C9EC 2EF29632 387FE8D7 6E3C0468", "- f == 0 # # The generator is: 2. #4. 3072-bit MODP", "6144-bit MODP Group # # This group is assigned id 17. # #", "062B3CF5 B3A278A6 6D2A13F8 3F44F82D DF310EE0 74AB6A36 4597E899 A0255DC1 64F31CC5 0846851D F9AB4819 5DED7EA1 B1D510BD", "pi] + 741804 } f = 2**1536 - 2**1472 - 1 + 2**64", "da = 0, three, 3, 1, 0, 0, 24 while s != lasts:", "24 while s != lasts: lasts = s n, na = n+na, na+8", "new precision ### The DH values defined in the RFC #2. 1536-bit MODP", "the python docs def pi(): \"\"\"Compute Pi to the current precision. >>> print(pi())", "d s += t getcontext().prec -= 2 return +s # unary plus applies", "2409 (IKE). # Implementations have been using group 5 to designate this group,", "E5DB382F 413001AE B06A53ED 9027D831 179727B0 865A8918 DA3EDBEB CF9B14ED 44CE6CBA CED4BB1B DB7F1447 E6CC254B 33205151", "* ((Decimal(2**2942) * pi()).to_integral_exact(ROUND_FLOOR) + 1690314) # # Its hexadecimal value is: #", "RFC 2409 (IKE). # Implementations have been using group 5 to designate this", "0B7474D6 E694F91E 6DCC4024 FFFFFFFF FFFFFFFF\"\"\" p = int.from_bytes(unhexlify(p.replace('\\n', '').replace(' ', '')), 'big') assert", "69163FA8 FD24CF5F 83655D23 DCA3AD96 1C62F356 208552BB 9ED52907 7096966D 670C354E 4ABC9804 F1746C08 CA18217C 32905E46", "A932DF8C D8BEC4D0 73B931BA 3BC832B6 8D9DD300 741FA7BF 8AFC47ED 2576F693 6BA42466 3AAB639C 5AE4F568 3423B474 2BF1C978", "2. #7. 8192-bit MODP Group # (needs higher precision) getcontext().prec = 4000 #", "2^64 * { [2^1406 pi] + 741804 } f = 2**1536 - 2**1472", "# This prime is: 2^8192 - 2^8128 - 1 + 2^64 * {", "2^4096 - 2^4032 - 1 + 2^64 * { [2^3966 pi] + 240904", "group has been used for the implementations for # quite a long time,", "1 + 2**64 * ((Decimal(2**1918) * pi()).to_integral_exact(ROUND_FLOOR) + 124476) # # Its hexadecimal", "D55E702F 46980C82 B5A84031 900B1C9E 59E7C97F BEC7E8F3 23A97A7E 36CC88BE 0F1D45B7 FF585AC5 4BD407B2 2B4154AA CC8F6D7E", "CED4BB1B DB7F1447 E6CC254B 33205151 2BD7AF42 6FB8F401 378CD2BF 5983CA01 C64B92EC F032EA15 D1721D03 F482D7CE 6E74FEF6", "print(Decimal(p) - f) assert Decimal(p) - f == 0 # # The generator", "# This group is assigned id 17. # # This prime is: 2^6144", "This group is assigned id 17. # # This prime is: 2^6144 -", "((Decimal(2**1406) * pi()).to_integral_exact(ROUND_FLOOR) + 741804) # Its hexadecimal value is: # p =\"\"\"FFFFFFFF", "64F31CC5 0846851D F9AB4819 5DED7EA1 B1D510BD 7EE74D73 FAF36BC3 1ECFA268 359046F4 EB879F92 4009438B 481C6CD7 889A002E", "BF48E1D8 14CC5ED2 0F8037E0 A79715EE F29BE328 06A1D58B B7C5DA76 F550AA3D 8A1FBFF0 EB19CCB1 A313D55C DA56C9EC 2EF29632", "na+8 d, da = d+da, da+32 t = (t * n) / d", "getcontext().prec = 2000 # from the python docs def pi(): \"\"\"Compute Pi to", "2^8192 - 2^8128 - 1 + 2^64 * { [2^8062 pi] + 4743158", "F8FF9406 AD9E530E E5DB382F 413001AE B06A53ED 9027D831 179727B0 865A8918 DA3EDBEB CF9B14ED 44CE6CBA CED4BB1B DB7F1447", "1 + 2^64 * { [2^8062 pi] + 4743158 } f = 2**8192", "4597E899 A0255DC1 64F31CC5 0846851D F9AB4819 5DED7EA1 B1D510BD 7EE74D73 FAF36BC3 1ECFA268 359046F4 EB879F92 4009438B", "# quite a long time, but was not defined in RFC 2409 (IKE).", "p = int.from_bytes(unhexlify(p.replace('\\n', '').replace(' ', '')), 'big') assert Decimal(p) - f == 0", "= 2**1536 - 2**1472 - 1 + 2**64 * ((Decimal(2**1406) * pi()).to_integral_exact(ROUND_FLOOR) +", "+ 2^64 * { [2^1406 pi] + 741804 } f = 2**1536 -", "2^1984 - 1 + 2^64 * { [2^1918 pi] + 124476 } f", "+ 4743158) # # Its hexadecimal value is: # p = \"\"\"FFFFFFFF FFFFFFFF", "== 0 # # The generator is: 2. #6. 6144-bit MODP Group #", "defined in the RFC #2. 1536-bit MODP Group # # The 1536 bit", "!= lasts: lasts = s n, na = n+na, na+8 d, da =", "6AF4E23C 1A946834 B6150BDA 2583E9CA 2AD44CE8 DBBBC2DB 04DE8EF9 2E8EFC14 1FBECAA6 287C5947 4E6BC05D 99B2964F A090C3A2", "# The 1536 bit MODP group has been used for the implementations for", "= 2**6144 - 2**6080 - 1 + 2**64 * ((Decimal(2**6014) * pi()).to_integral_exact(ROUND_FLOOR) +", "f = 2**1536 - 2**1472 - 1 + 2**64 * ((Decimal(2**1406) * pi()).to_integral_exact(ROUND_FLOOR)", "= 2**3072 - 2**3008 - 1 + 2**64 * ((Decimal(2**2942) * pi()).to_integral_exact(ROUND_FLOOR) +", "1C62F356 208552BB 9ED52907 7096966D 670C354E 4ABC9804 F1746C08 CA237327 FFFFFFFF FFFFFFFF\"\"\" p = int.from_bytes(unhexlify(p.replace('\\n',", "* { [2^6014 pi] + 929484 } f = 2**6144 - 2**6080 -", "used for the implementations for # quite a long time, but was not", "2^2048 - 2^1984 - 1 + 2^64 * { [2^1918 pi] + 124476", "[2^1406 pi] + 741804 } f = 2**1536 - 2**1472 - 1 +", "#3. 2048-bit MODP Group # # This group is assigned id 14. #", "765694DF C81F56E8 80B96E71 60C980DD 98EDD3DF FFFFFFFF FFFFFFFF\"\"\" p = int.from_bytes(unhexlify(p.replace('\\n', '').replace(' ', '')),", "n+na, na+8 d, da = d+da, da+32 t = (t * n) /", "+ 240904 } f = 2**4096 - 2**4032 - 1 + 2**64 *", "0, three, 3, 1, 0, 0, 24 while s != lasts: lasts =", "# The generator is: 2. #7. 8192-bit MODP Group # (needs higher precision)", "2BD7AF42 6FB8F401 378CD2BF 5983CA01 C64B92EC F032EA15 D1721D03 F482D7CE 6E74FEF6 D55E702F 46980C82 B5A84031 900B1C9E", "2F8385DD FA9D4B7F A2C087E8 79683303 ED5BDD3A 062B3CF5 B3A278A6 6D2A13F8 3F44F82D DF310EE0 74AB6A36 4597E899 A0255DC1", "+ 1690314 } f = 2**3072 - 2**3008 - 1 + 2**64 *", "# # The generator is: 2. #5. 4096-bit MODP Group # # This", "E694F91E 6DBE1159 74A3926F 12FEE5E4 38777CB6 A932DF8C D8BEC4D0 73B931BA 3BC832B6 8D9DD300 741FA7BF 8AFC47ED 2576F693", "98DA4836 1C55D39A 69163FA8 FD24CF5F 83655D23 DCA3AD96 1C62F356 208552BB 9ED52907 7096966D 670C354E 4ABC9804 F1746C08", "unhexlify # need some precision to evaluate PI getcontext().prec = 2000 # from", "1FBECAA6 287C5947 4E6BC05D 99B2964F A090C3A2 233BA186 515BE7ED 1F612970 CEE2D7AF B81BDD76 2170481C D0069127 D5B05AA9", "E6CC254B 33205151 2BD7AF42 6FB8F401 378CD2BF 5983CA01 C64B92EC F032EA15 D1721D03 F482D7CE 6E74FEF6 D55E702F 46980C82", "3.141592653589793238462643383 \"\"\" getcontext().prec += 2 # extra digits for intermediate steps three =", "233BA186 515BE7ED 1F612970 CEE2D7AF B81BDD76 2170481C D0069127 D5B05AA9 93B4EA98 8D8FDDC1 86FFB7DC 90A6C08F 4DF435C9", "{ [2^1406 pi] + 741804 } f = 2**1536 - 2**1472 - 1", "1C62F356 208552BB 9ED52907 7096966D 670C354E 4ABC9804 F1746C08 CA18217C 32905E46 2E36CE3B E39E772C 180E8603 9B2783A2", "AD9E530E E5DB382F 413001AE B06A53ED 9027D831 179727B0 865A8918 DA3EDBEB CF9B14ED 44CE6CBA CED4BB1B DB7F1447 E6CC254B", "4DF435C9 34063199 FFFFFFFF FFFFFFFF\"\"\" p = int.from_bytes(unhexlify(p.replace('\\n', '').replace(' ', '')), 'big') assert Decimal(p)", "+ 2^64 * { [2^8062 pi] + 4743158 } f = 2**8192 -", "# # This group is assigned id 17. # # This prime is:", "* ((Decimal(2**8062) * pi()).to_integral_exact(ROUND_FLOOR) + 4743158) # # Its hexadecimal value is: #", "F406B7ED EE386BFB 5A899FA5 AE9F2411 7C4B1FE6 49286651 ECE45B3D C2007CB8 A163BF05 98DA4836 1C55D39A 69163FA8 FD24CF5F", "32905E46 2E36CE3B E39E772C 180E8603 9B2783A2 EC07A28F B5C55DF0 6F4C52C9 DE2BCBF6 95581718 3995497C EA956AE5 15D22618", "D27C7026 C1D4DCB2 602646DE C9751E76 3DBA37BD F8FF9406 AD9E530E E5DB382F 413001AE B06A53ED 9027D831 179727B0 865A8918", "+ 2**64 * ((Decimal(2**3966) * pi()).to_integral_exact(ROUND_FLOOR) + 240904) # # Its hexadecimal value", "36CC88BE 0F1D45B7 FF585AC5 4BD407B2 2B4154AA CC8F6D7E BF48E1D8 14CC5ED2 0F8037E0 A79715EE F29BE328 06A1D58B B7C5DA76", "180E8603 9B2783A2 EC07A28F B5C55DF0 6F4C52C9 DE2BCBF6 95581718 3995497C EA956AE5 15D22618 98FA0510 15728E5A 8AACAA68", "n) / d s += t getcontext().prec -= 2 return +s # unary", "\"\"\" getcontext().prec += 2 # extra digits for intermediate steps three = Decimal(3)", "1 + 2^64 * { [2^1406 pi] + 741804 } f = 2**1536", "assigned id 16. # # This prime is: 2^4096 - 2^4032 - 1", "7A615D6C 770988C0 BAD946E2 08E24FA0 74E5AB31 43DB5BFC E0FD108E 4B82D120 A9210801 1A723C12 A787E6D7 88719A10 BDBA5B26", "int.from_bytes(unhexlify(p.replace('\\n', '').replace(' ', '')), 'big') assert Decimal(p) - f == 0 # #", "8A67CC74 020BBEA6 3B139B22 514A0879 8E3404DD EF9519B3 CD3A431B 302B0A6D F25F1437 4FE1356D 6D51C245 E485B576 625E7EC6", "CF9B14ED 44CE6CBA CED4BB1B DB7F1447 E6CC254B 33205151 2BD7AF42 6FB8F401 378CD2BF 5983CA01 C64B92EC F032EA15 D1721D03", "= \"\"\"FFFFFFFF FFFFFFFF C90FDAA2 2168C234 C4C6628B 80DC1CD1 29024E08 8A67CC74 020BBEA6 3B139B22 514A0879 8E3404DD", "6D51C245 E485B576 625E7EC6 F44C42E9 A637ED6B 0BFF5CB6 F406B7ED EE386BFB 5A899FA5 AE9F2411 7C4B1FE6 49286651 ECE45B3D", "E39D652D E3FDB8BE FC848AD9 22222E04 A4037C07 13EB57A8 1A23F0C7 3473FC64 6CEA306B 4BCBC886 2F8385DD FA9D4B7F A2C087E8", "B81BDD76 2170481C D0069127 D5B05AA9 93B4EA98 8D8FDDC1 86FFB7DC 90A6C08F 4DF435C9 34028492 36C3FAB4 D27C7026 C1D4DCB2", "2 # extra digits for intermediate steps three = Decimal(3) # substitute \"three=3.0\"", "865A8918 DA3EDBEB CF9B14ED 44CE6CBA CED4BB1B DB7F1447 E6CC254B 33205151 2BD7AF42 6FB8F401 378CD2BF 5983CA01 C64B92EC", "assert Decimal(p) - f == 0 # # The generator is: 2. #4.", "+ 741804 } f = 2**1536 - 2**1472 - 1 + 2**64 *", "[2^8062 pi] + 4743158 } f = 2**8192 - 2**8128 - 1 +", "value is: # p =\"\"\"FFFFFFFF FFFFFFFF C90FDAA2 2168C234 C4C6628B 80DC1CD1 29024E08 8A67CC74 020BBEA6", "020BBEA6 3B139B22 514A0879 8E3404DD EF9519B3 CD3A431B 302B0A6D F25F1437 4FE1356D 6D51C245 E485B576 625E7EC6 F44C42E9", "* { [2^3966 pi] + 240904 } f = 2**4096 - 2**4032 -", "90A6C08F 4DF435C9 34028492 36C3FAB4 D27C7026 C1D4DCB2 602646DE C9751E76 3DBA37BD F8FF9406 AD9E530E E5DB382F 413001AE", "# This group is assigned id 16. # # This prime is: 2^4096", "time, but was not defined in RFC 2409 (IKE). # Implementations have been", "{ [2^6014 pi] + 929484 } f = 2**6144 - 2**6080 - 1", "FC848AD9 22222E04 A4037C07 13EB57A8 1A23F0C7 3473FC64 6CEA306B 4BCBC886 2F8385DD FA9D4B7F A2C087E8 79683303 ED5BDD3A", "0F1D45B7 FF585AC5 4BD407B2 2B4154AA CC8F6D7E BF48E1D8 14CC5ED2 0F8037E0 A79715EE F29BE328 06A1D58B B7C5DA76 F550AA3D", "B3A278A6 6D2A13F8 3F44F82D DF310EE0 74AB6A36 4597E899 A0255DC1 64F31CC5 0846851D F9AB4819 5DED7EA1 B1D510BD 7EE74D73", "implementations for # quite a long time, but was not defined in RFC", "8D8FDDC1 86FFB7DC 90A6C08F 4DF435C9 34028492 36C3FAB4 D27C7026 C1D4DCB2 602646DE C9751E76 3DBA37BD F8FF9406 AD9E530E", "FFFFFFFF FFFFFFFF\"\"\" p = int.from_bytes(unhexlify(p.replace('\\n', '').replace(' ', '')), 'big') print(Decimal(p) - f) assert", "34028492 36C3FAB4 D27C7026 C1D4DCB2 602646DE C9751E76 3DBA37BD F8FF9406 AD9E530E E5DB382F 413001AE B06A53ED 9027D831", "+ 2^64 * { [2^6014 pi] + 929484 } f = 2**6144 -", "36C3FAB4 D27C7026 C1D4DCB2 602646DE C9751E76 3DBA37BD F8FF9406 AD9E530E E5DB382F 413001AE B06A53ED 9027D831 179727B0", "getcontext().prec = 4000 # # This group is assigned id 18. # #", "# This group is assigned id 15. # # This prime is: 2^3072", "d+da, da+32 t = (t * n) / d s += t getcontext().prec", "3, 1, 0, 0, 24 while s != lasts: lasts = s n,", "* pi()).to_integral_exact(ROUND_FLOOR) + 240904) # # Its hexadecimal value is: # p =\"\"\"FFFFFFFF", "33205151 2BD7AF42 6FB8F401 378CD2BF 5983CA01 C64B92EC F032EA15 D1721D03 F482D7CE 6E74FEF6 D55E702F 46980C82 B5A84031", "group, we # standardize that practice here. # # The prime is: 2^1536", "2^64 * { [2^3966 pi] + 240904 } f = 2**4096 - 2**4032", "D5B05AA9 93B4EA98 8D8FDDC1 86FFB7DC 90A6C08F 4DF435C9 34028492 36C3FAB4 D27C7026 C1D4DCB2 602646DE C9751E76 3DBA37BD", "- f == 0 # # The generator is: 2. #7. 8192-bit MODP", "# standardize that practice here. # # The prime is: 2^1536 - 2^1472", "B81BDD76 2170481C D0069127 D5B05AA9 93B4EA98 8D8FDDC1 86FFB7DC 90A6C08F 4DF435C9 34063199 FFFFFFFF FFFFFFFF\"\"\" p", "for the implementations for # quite a long time, but was not defined", "13EB57A8 1A23F0C7 3473FC64 6CEA306B 4BCBC886 2F8385DD FA9D4B7F A2C087E8 79683303 ED5BDD3A 062B3CF5 B3A278A6 6D2A13F8", "A163BF05 98DA4836 1C55D39A 69163FA8 FD24CF5F 83655D23 DCA3AD96 1C62F356 208552BB 9ED52907 7096966D 670C354E 4ABC9804", "741804) # Its hexadecimal value is: # p =\"\"\"FFFFFFFF FFFFFFFF C90FDAA2 2168C234 C4C6628B", "is: 2^2048 - 2^1984 - 1 + 2^64 * { [2^1918 pi] +", "B3970F85 A6E1E4C7 ABF5AE8C DB0933D7 1E8C94E0 4A25619D CEE3D226 1AD2EE6B F12FFA06 D98A0864 D8760273 3EC86A64 521F2B18", "f == 0 # # The generator is: 2. #3. 2048-bit MODP Group", "6BA42466 3AAB639C 5AE4F568 3423B474 2BF1C978 238F16CB E39D652D E3FDB8BE FC848AD9 22222E04 A4037C07 13EB57A8 1A23F0C7", "# # The prime is: 2^1536 - 2^1472 - 1 + 2^64 *", "EB879F92 4009438B 481C6CD7 889A002E D5EE382B C9190DA6 FC026E47 9558E447 5677E9AA 9E3050E2 765694DF C81F56E8 80B96E71", "# # This prime is: 2^8192 - 2^8128 - 1 + 2^64 *", "4BCBC886 2F8385DD FA9D4B7F A2C087E8 79683303 ED5BDD3A 062B3CF5 B3A278A6 6D2A13F8 3F44F82D DF310EE0 74AB6A36 4597E899", "481C6CD7 889A002E D5EE382B C9190DA6 FC026E47 9558E447 5677E9AA 9E3050E2 765694DF C81F56E8 80B96E71 60C980DD 98EDD3DF", "98FA0510 15728E5A 8AACAA68 FFFFFFFF FFFFFFFF\"\"\" p = int.from_bytes(unhexlify(p.replace('\\n', '').replace(' ', '')), 'big') assert", "177B200C BBE11757 7A615D6C 770988C0 BAD946E2 08E24FA0 74E5AB31 43DB5BFC E0FD108E 4B82D120 A93AD2CA FFFFFFFF FFFFFFFF\"\"\"", "getcontext().prec -= 2 return +s # unary plus applies the new precision ###", "7096966D 670C354E 4ABC9804 F1746C08 CA18217C 32905E46 2E36CE3B E39E772C 180E8603 9B2783A2 EC07A28F B5C55DF0 6F4C52C9", "in the RFC #2. 1536-bit MODP Group # # The 1536 bit MODP", "99C32718 6AF4E23C 1A946834 B6150BDA 2583E9CA 2AD44CE8 DBBBC2DB 04DE8EF9 2E8EFC14 1FBECAA6 287C5947 4E6BC05D 99B2964F", "515BE7ED 1F612970 CEE2D7AF B81BDD76 2170481C D0069127 D5B05AA9 93B4EA98 8D8FDDC1 86FFB7DC 90A6C08F 4DF435C9 34028492", "standardize that practice here. # # The prime is: 2^1536 - 2^1472 -", "lasts = s n, na = n+na, na+8 d, da = d+da, da+32", "plus applies the new precision ### The DH values defined in the RFC", "# p = \"\"\"FFFFFFFF FFFFFFFF C90FDAA2 2168C234 C4C6628B 80DC1CD1 29024E08 8A67CC74 020BBEA6 3B139B22", "((Decimal(2**1918) * pi()).to_integral_exact(ROUND_FLOOR) + 124476) # # Its hexadecimal value is: # p", "- 2**4032 - 1 + 2**64 * ((Decimal(2**3966) * pi()).to_integral_exact(ROUND_FLOOR) + 240904) #", "06A1D58B B7C5DA76 F550AA3D 8A1FBFF0 EB19CCB1 A313D55C DA56C9EC 2EF29632 387FE8D7 6E3C0468 043E8F66 3F4860EE 12BF2D5B", "MODP Group # # This group is assigned id 16. # # This", "# unary plus applies the new precision ### The DH values defined in", "1 + 2**64 * ((Decimal(2**2942) * pi()).to_integral_exact(ROUND_FLOOR) + 1690314) # # Its hexadecimal", "Decimal(p) - f == 0 # # The generator is: 2. #7. 8192-bit", "The generator is: 2. #5. 4096-bit MODP Group # # This group is", "180E8603 9B2783A2 EC07A28F B5C55DF0 6F4C52C9 DE2BCBF6 95581718 3995497C EA956AE5 15D22618 98FA0510 15728E5A 8AAAC42D", "2B4154AA CC8F6D7E BF48E1D8 14CC5ED2 0F8037E0 A79715EE F29BE328 06A1D58B B7C5DA76 F550AA3D 8A1FBFF0 EB19CCB1 A313D55C", "= int.from_bytes(unhexlify(p.replace('\\n', '').replace(' ', '')), 'big') print(Decimal(p) - f) assert Decimal(p) - f", "BDBA5B26 99C32718 6AF4E23C 1A946834 B6150BDA 2583E9CA 2AD44CE8 DBBBC2DB 04DE8EF9 2E8EFC14 1FBECAA6 287C5947 4E6BC05D", "MODP Group # # This group is assigned id 17. # # This", "'')), 'big') print(Decimal(p) - f) assert Decimal(p) - f == 0 # #", "# # The generator is: 2. #7. 8192-bit MODP Group # (needs higher", "5DED7EA1 B1D510BD 7EE74D73 FAF36BC3 1ECFA268 359046F4 EB879F92 4009438B 481C6CD7 889A002E D5EE382B C9190DA6 FC026E47", "# extra digits for intermediate steps three = Decimal(3) # substitute \"three=3.0\" for", "6CEA306B 4BCBC886 2F8385DD FA9D4B7F A2C087E8 79683303 ED5BDD3A 062B3CF5 B3A278A6 6D2A13F8 3F44F82D DF310EE0 74AB6A36", "58DBEF0A 8AEA7157 5D060C7D B3970F85 A6E1E4C7 ABF5AE8C DB0933D7 1E8C94E0 4A25619D CEE3D226 1AD2EE6B F12FFA06 D98A0864", "3F44F82D DF310EE0 74AB6A36 4597E899 A0255DC1 64F31CC5 0846851D F9AB4819 5DED7EA1 B1D510BD 7EE74D73 FAF36BC3 1ECFA268", "3473FC64 6CEA306B 4BCBC886 2F8385DD FA9D4B7F A2C087E8 79683303 ED5BDD3A 062B3CF5 B3A278A6 6D2A13F8 3F44F82D DF310EE0", "DF1CBA64 ECFB8504 58DBEF0A 8AEA7157 5D060C7D B3970F85 A6E1E4C7 ABF5AE8C DB0933D7 1E8C94E0 4A25619D CEE3D226 1AD2EE6B", "* pi()).to_integral_exact(ROUND_FLOOR) + 1690314) # # Its hexadecimal value is: # p =\"\"\"FFFFFFFF", "74E5AB31 43DB5BFC E0FD108E 4B82D120 A93AD2CA FFFFFFFF FFFFFFFF\"\"\" p = int.from_bytes(unhexlify(p.replace('\\n', '').replace(' ', '')),", "docs def pi(): \"\"\"Compute Pi to the current precision. >>> print(pi()) 3.141592653589793238462643383 \"\"\"", "0846851D F9AB4819 5DED7EA1 B1D510BD 7EE74D73 FAF36BC3 1ECFA268 359046F4 EB879F92 4009438B 481C6CD7 889A002E D5EE382B", "2. #3. 2048-bit MODP Group # # This group is assigned id 14.", "B5A84031 900B1C9E 59E7C97F BEC7E8F3 23A97A7E 36CC88BE 0F1D45B7 FF585AC5 4BD407B2 2B4154AA CC8F6D7E BF48E1D8 14CC5ED2", "p = int.from_bytes(unhexlify(p.replace('\\n', '').replace(' ', '')), 'big') print(Decimal(p) - f) assert Decimal(p) -", "6D2A13F8 3F44F82D DF310EE0 74AB6A36 4597E899 A0255DC1 64F31CC5 0846851D F9AB4819 5DED7EA1 B1D510BD 7EE74D73 FAF36BC3", "# substitute \"three=3.0\" for regular floats lasts, t, s, n, na, d, da", "- 1 + 2^64 * { [2^3966 pi] + 240904 } f =", "80DC1CD1 29024E08 8A67CC74 020BBEA6 3B139B22 514A0879 8E3404DD EF9519B3 CD3A431B 302B0A6D F25F1437 4FE1356D 6D51C245", "} f = 2**3072 - 2**3008 - 1 + 2**64 * ((Decimal(2**2942) *", "2576F693 6BA42466 3AAB639C 5AE4F568 3423B474 2BF1C978 238F16CB E39D652D E3FDB8BE FC848AD9 22222E04 A4037C07 13EB57A8", "C2007CB8 A163BF05 98DA4836 1C55D39A 69163FA8 FD24CF5F 83655D23 DCA3AD96 1C62F356 208552BB 9ED52907 7096966D 670C354E", "238F16CB E39D652D E3FDB8BE FC848AD9 22222E04 A4037C07 13EB57A8 1A23F0C7 3473FC64 6CEA306B 4BCBC886 2F8385DD FA9D4B7F", "60C980DD 98EDD3DF FFFFFFFF FFFFFFFF\"\"\" p = int.from_bytes(unhexlify(p.replace('\\n', '').replace(' ', '')), 'big') print(Decimal(p) -", "is assigned id 15. # # This prime is: 2^3072 - 2^3008 -", "1 + 2^64 * { [2^6014 pi] + 929484 } f = 2**6144", "group is assigned id 17. # # This prime is: 2^6144 - 2^6080", "B1D510BD 7EE74D73 FAF36BC3 1ECFA268 359046F4 EB879F92 4009438B 481C6CD7 889A002E D5EE382B C9190DA6 FC026E47 9558E447", "A85521AB DF1CBA64 ECFB8504 58DBEF0A 8AEA7157 5D060C7D B3970F85 A6E1E4C7 ABF5AE8C DB0933D7 1E8C94E0 4A25619D CEE3D226", "this group, we # standardize that practice here. # # The prime is:", "08E24FA0 74E5AB31 43DB5BFC E0FD108E 4B82D120 A9210801 1A723C12 A787E6D7 88719A10 BDBA5B26 99C32718 6AF4E23C 1A946834", "2^1472 - 1 + 2^64 * { [2^1406 pi] + 741804 } f", "2AD44CE8 DBBBC2DB 04DE8EF9 2E8EFC14 1FBECAA6 287C5947 4E6BC05D 99B2964F A090C3A2 233BA186 515BE7ED 1F612970 CEE2D7AF", "625E7EC6 F44C42E9 A637ED6B 0BFF5CB6 F406B7ED EE386BFB 5A899FA5 AE9F2411 7C4B1FE6 49286651 ECE45B3D C2007CB8 A163BF05", "extra digits for intermediate steps three = Decimal(3) # substitute \"three=3.0\" for regular", "prime is: 2^4096 - 2^4032 - 1 + 2^64 * { [2^3966 pi]", "17. # # This prime is: 2^6144 - 2^6080 - 1 + 2^64", "2**64 * ((Decimal(2**6014) * pi()).to_integral_exact(ROUND_FLOOR) + 929484) # # Its hexadecimal value is:", "f) assert Decimal(p) - f == 0 # # The generator is: 2.", "= (t * n) / d s += t getcontext().prec -= 2 return", "/ d s += t getcontext().prec -= 2 return +s # unary plus", "124476 } f = 2**2048 - 2**1984 - 1 + 2**64 * ((Decimal(2**1918)", "# p =\"\"\"FFFFFFFF FFFFFFFF C90FDAA2 2168C234 C4C6628B 80DC1CD1 29024E08 8A67CC74 020BBEA6 3B139B22 514A0879", "# from the python docs def pi(): \"\"\"Compute Pi to the current precision.", "3DBA37BD F8FF9406 AD9E530E E5DB382F 413001AE B06A53ED 9027D831 179727B0 865A8918 DA3EDBEB CF9B14ED 44CE6CBA CED4BB1B", "D5EE382B C9190DA6 FC026E47 9558E447 5677E9AA 9E3050E2 765694DF C81F56E8 80B96E71 60C980DD 98EDD3DF FFFFFFFF FFFFFFFF\"\"\"", "23A97A7E 36CC88BE 0F1D45B7 FF585AC5 4BD407B2 2B4154AA CC8F6D7E BF48E1D8 14CC5ED2 0F8037E0 A79715EE F29BE328 06A1D58B", "bit MODP group has been used for the implementations for # quite a", "FD24CF5F 83655D23 DCA3AD96 1C62F356 208552BB 9ED52907 7096966D 670C354E 4ABC9804 F1746C08 CA237327 FFFFFFFF FFFFFFFF\"\"\"", "43DB5BFC E0FD108E 4B82D120 A93AD2CA FFFFFFFF FFFFFFFF\"\"\" p = int.from_bytes(unhexlify(p.replace('\\n', '').replace(' ', '')), 'big')", "9E3050E2 765694DF C81F56E8 80B96E71 60C980DD 98EDD3DF FFFFFFFF FFFFFFFF\"\"\" p = int.from_bytes(unhexlify(p.replace('\\n', '').replace(' ',", "+ 2**64 * ((Decimal(2**2942) * pi()).to_integral_exact(ROUND_FLOOR) + 1690314) # # Its hexadecimal value", "2048-bit MODP Group # # This group is assigned id 14. # #", "741FA7BF 8AFC47ED 2576F693 6BA42466 3AAB639C 5AE4F568 3423B474 2BF1C978 238F16CB E39D652D E3FDB8BE FC848AD9 22222E04", "+ 2^64 * { [2^1918 pi] + 124476 } f = 2**2048 -", "2^3008 - 1 + 2^64 * { [2^2942 pi] + 1690314 } f", "assert Decimal(p) - f == 0 # # The generator is: 2. #6.", "* { [2^8062 pi] + 4743158 } f = 2**8192 - 2**8128 -", "* ((Decimal(2**3966) * pi()).to_integral_exact(ROUND_FLOOR) + 240904) # # Its hexadecimal value is: #", "-= 2 return +s # unary plus applies the new precision ### The", "- 1 + 2^64 * { [2^8062 pi] + 4743158 } f =", "3B139B22 514A0879 8E3404DD EF9519B3 CD3A431B 302B0A6D F25F1437 4FE1356D 6D51C245 E485B576 625E7EC6 F44C42E9 A637ED6B", "1690314) # # Its hexadecimal value is: # p =\"\"\"FFFFFFFF FFFFFFFF C90FDAA2 2168C234", "unary plus applies the new precision ### The DH values defined in the", "EC07A28F B5C55DF0 6F4C52C9 DE2BCBF6 95581718 3995497C EA956AE5 15D22618 98FA0510 15728E5A 8AACAA68 FFFFFFFF FFFFFFFF\"\"\"", "hexadecimal value is: # p = \"\"\"FFFFFFFF FFFFFFFF C90FDAA2 2168C234 C4C6628B 80DC1CD1 29024E08", "387FE8D7 6E3C0468 043E8F66 3F4860EE 12BF2D5B 0B7474D6 E694F91E 6DCC4024 FFFFFFFF FFFFFFFF\"\"\" p = int.from_bytes(unhexlify(p.replace('\\n',", "# This prime is: 2^4096 - 2^4032 - 1 + 2^64 * {", "- 2^3008 - 1 + 2^64 * { [2^2942 pi] + 1690314 }", "= 2**4096 - 2**4032 - 1 + 2**64 * ((Decimal(2**3966) * pi()).to_integral_exact(ROUND_FLOOR) +", "getcontext().prec += 2 # extra digits for intermediate steps three = Decimal(3) #", "99B2964F A090C3A2 233BA186 515BE7ED 1F612970 CEE2D7AF B81BDD76 2170481C D0069127 D5B05AA9 93B4EA98 8D8FDDC1 86FFB7DC", "group is assigned id 15. # # This prime is: 2^3072 - 2^3008", "# This group is assigned id 14. # # This prime is: 2^2048", "# # This prime is: 2^3072 - 2^3008 - 1 + 2^64 *", "DB7F1447 E6CC254B 33205151 2BD7AF42 6FB8F401 378CD2BF 5983CA01 C64B92EC F032EA15 D1721D03 F482D7CE 6E74FEF6 D55E702F", "D0069127 D5B05AA9 93B4EA98 8D8FDDC1 86FFB7DC 90A6C08F 4DF435C9 34028492 36C3FAB4 D27C7026 C1D4DCB2 602646DE C9751E76", "2**3008 - 1 + 2**64 * ((Decimal(2**2942) * pi()).to_integral_exact(ROUND_FLOOR) + 1690314) # #", "413001AE B06A53ED 9027D831 179727B0 865A8918 DA3EDBEB CF9B14ED 44CE6CBA CED4BB1B DB7F1447 E6CC254B 33205151 2BD7AF42", "6E3C0468 043E8F66 3F4860EE 12BF2D5B 0B7474D6 E694F91E 6DCC4024 FFFFFFFF FFFFFFFF\"\"\" p = int.from_bytes(unhexlify(p.replace('\\n', '').replace('", "29024E08 8A67CC74 020BBEA6 3B139B22 514A0879 8E3404DD EF9519B3 CD3A431B 302B0A6D F25F1437 4FE1356D 6D51C245 E485B576", "CEE3D226 1AD2EE6B F12FFA06 D98A0864 D8760273 3EC86A64 521F2B18 177B200C BBE11757 7A615D6C 770988C0 BAD946E2 08E24FA0", "D5B05AA9 93B4EA98 8D8FDDC1 86FFB7DC 90A6C08F 4DF435C9 34063199 FFFFFFFF FFFFFFFF\"\"\" p = int.from_bytes(unhexlify(p.replace('\\n', '').replace('", "+ 741804) # Its hexadecimal value is: # p =\"\"\"FFFFFFFF FFFFFFFF C90FDAA2 2168C234", "0 # # The generator is: 2. #3. 2048-bit MODP Group # #", "for regular floats lasts, t, s, n, na, d, da = 0, three,", "15D22618 98FA0510 15728E5A 8AACAA68 FFFFFFFF FFFFFFFF\"\"\" p = int.from_bytes(unhexlify(p.replace('\\n', '').replace(' ', '')), 'big')", "18. # # This prime is: 2^8192 - 2^8128 - 1 + 2^64", "A4037C07 13EB57A8 1A23F0C7 3473FC64 6CEA306B 4BCBC886 2F8385DD FA9D4B7F A2C087E8 79683303 ED5BDD3A 062B3CF5 B3A278A6", "} f = 2**1536 - 2**1472 - 1 + 2**64 * ((Decimal(2**1406) *", "74A3926F 12FEE5E4 38777CB6 A932DF8C D8BEC4D0 73B931BA 3BC832B6 8D9DD300 741FA7BF 8AFC47ED 2576F693 6BA42466 3AAB639C", "2**64 * ((Decimal(2**1918) * pi()).to_integral_exact(ROUND_FLOOR) + 124476) # # Its hexadecimal value is:", "} f = 2**6144 - 2**6080 - 1 + 2**64 * ((Decimal(2**6014) *", "EA956AE5 15D22618 98FA0510 15728E5A 8AACAA68 FFFFFFFF FFFFFFFF\"\"\" p = int.from_bytes(unhexlify(p.replace('\\n', '').replace(' ', '')),", "is: 2^8192 - 2^8128 - 1 + 2^64 * { [2^8062 pi] +", "EB19CCB1 A313D55C DA56C9EC 2EF29632 387FE8D7 6E3C0468 043E8F66 3F4860EE 12BF2D5B 0B7474D6 E694F91E 6DBE1159 74A3926F", "[2^2942 pi] + 1690314 } f = 2**3072 - 2**3008 - 1 +", "98EDD3DF FFFFFFFF FFFFFFFF\"\"\" p = int.from_bytes(unhexlify(p.replace('\\n', '').replace(' ', '')), 'big') print(Decimal(p) - f)", "((Decimal(2**2942) * pi()).to_integral_exact(ROUND_FLOOR) + 1690314) # # Its hexadecimal value is: # p", "2**4096 - 2**4032 - 1 + 2**64 * ((Decimal(2**3966) * pi()).to_integral_exact(ROUND_FLOOR) + 240904)", "values defined in the RFC #2. 1536-bit MODP Group # # The 1536", "9B2783A2 EC07A28F B5C55DF0 6F4C52C9 DE2BCBF6 95581718 3995497C EA956AE5 15D22618 98FA0510 15728E5A 8AAAC42D AD33170D", "A313D55C DA56C9EC 2EF29632 387FE8D7 6E3C0468 043E8F66 3F4860EE 12BF2D5B 0B7474D6 E694F91E 6DCC4024 FFFFFFFF FFFFFFFF\"\"\"", "practice here. # # The prime is: 2^1536 - 2^1472 - 1 +", "AD33170D 04507A33 A85521AB DF1CBA64 ECFB8504 58DBEF0A 8AEA7157 5D060C7D B3970F85 A6E1E4C7 ABF5AE8C DB0933D7 1E8C94E0", "= n+na, na+8 d, da = d+da, da+32 t = (t * n)", "- 1 + 2^64 * { [2^1406 pi] + 741804 } f =", "C4C6628B 80DC1CD1 29024E08 8A67CC74 020BBEA6 3B139B22 514A0879 8E3404DD EF9519B3 CD3A431B 302B0A6D F25F1437 4FE1356D", "B6150BDA 2583E9CA 2AD44CE8 DBBBC2DB 04DE8EF9 2E8EFC14 1FBECAA6 287C5947 4E6BC05D 99B2964F A090C3A2 233BA186 515BE7ED", "0, 0, 24 while s != lasts: lasts = s n, na =", "04DE8EF9 2E8EFC14 1FBECAA6 287C5947 4E6BC05D 99B2964F A090C3A2 233BA186 515BE7ED 1F612970 CEE2D7AF B81BDD76 2170481C", "2**6080 - 1 + 2**64 * ((Decimal(2**6014) * pi()).to_integral_exact(ROUND_FLOOR) + 929484) # #", "B06A53ED 9027D831 179727B0 865A8918 DA3EDBEB CF9B14ED 44CE6CBA CED4BB1B DB7F1447 E6CC254B 33205151 2BD7AF42 6FB8F401", "applies the new precision ### The DH values defined in the RFC #2.", "n, na = n+na, na+8 d, da = d+da, da+32 t = (t", "is: 2. #6. 6144-bit MODP Group # # This group is assigned id", "515BE7ED 1F612970 CEE2D7AF B81BDD76 2170481C D0069127 D5B05AA9 93B4EA98 8D8FDDC1 86FFB7DC 90A6C08F 4DF435C9 34063199", "5983CA01 C64B92EC F032EA15 D1721D03 F482D7CE 6E74FEF6 D55E702F 46980C82 B5A84031 900B1C9E 59E7C97F BEC7E8F3 23A97A7E", "s += t getcontext().prec -= 2 return +s # unary plus applies the", "FFFFFFFF C90FDAA2 2168C234 C4C6628B 80DC1CD1 29024E08 8A67CC74 020BBEA6 3B139B22 514A0879 8E3404DD EF9519B3 CD3A431B", "* { [2^2942 pi] + 1690314 } f = 2**3072 - 2**3008 -", "s n, na = n+na, na+8 d, da = d+da, da+32 t =", "- 2**6080 - 1 + 2**64 * ((Decimal(2**6014) * pi()).to_integral_exact(ROUND_FLOOR) + 929484) #", "id 14. # # This prime is: 2^2048 - 2^1984 - 1 +", "f == 0 # # The generator is: 2. #7. 8192-bit MODP Group", "38777CB6 A932DF8C D8BEC4D0 73B931BA 3BC832B6 8D9DD300 741FA7BF 8AFC47ED 2576F693 6BA42466 3AAB639C 5AE4F568 3423B474", "0, 24 while s != lasts: lasts = s n, na = n+na,", "MODP Group # # This group is assigned id 14. # # This", "* ((Decimal(2**6014) * pi()).to_integral_exact(ROUND_FLOOR) + 929484) # # Its hexadecimal value is: #", "is: 2. #7. 8192-bit MODP Group # (needs higher precision) getcontext().prec = 4000", "1 + 2**64 * ((Decimal(2**3966) * pi()).to_integral_exact(ROUND_FLOOR) + 240904) # # Its hexadecimal", "F1746C08 CA18217C 32905E46 2E36CE3B E39E772C 180E8603 9B2783A2 EC07A28F B5C55DF0 6F4C52C9 DE2BCBF6 95581718 3995497C", "# # The generator is: 2. #3. 2048-bit MODP Group # # This", "770988C0 BAD946E2 08E24FA0 74E5AB31 43DB5BFC E0FD108E 4B82D120 A9210801 1A723C12 A787E6D7 88719A10 BDBA5B26 99C32718", "# The generator is: 2. #4. 3072-bit MODP Group # # This group", "95581718 3995497C EA956AE5 15D22618 98FA0510 15728E5A 8AACAA68 FFFFFFFF FFFFFFFF\"\"\" p = int.from_bytes(unhexlify(p.replace('\\n', '').replace('", "74AB6A36 4597E899 A0255DC1 64F31CC5 0846851D F9AB4819 5DED7EA1 B1D510BD 7EE74D73 FAF36BC3 1ECFA268 359046F4 EB879F92", "387FE8D7 6E3C0468 043E8F66 3F4860EE 12BF2D5B 0B7474D6 E694F91E 6DBE1159 74A3926F 12FEE5E4 38777CB6 A932DF8C D8BEC4D0", "A6E1E4C7 ABF5AE8C DB0933D7 1E8C94E0 4A25619D CEE3D226 1AD2EE6B F12FFA06 D98A0864 D8760273 3EC86A64 521F2B18 177B200C", "t = (t * n) / d s += t getcontext().prec -= 2", "16. # # This prime is: 2^4096 - 2^4032 - 1 + 2^64", "a long time, but was not defined in RFC 2409 (IKE). # Implementations", "= 2**2048 - 2**1984 - 1 + 2**64 * ((Decimal(2**1918) * pi()).to_integral_exact(ROUND_FLOOR) +", "3F4860EE 12BF2D5B 0B7474D6 E694F91E 6DBE1159 74A3926F 12FEE5E4 38777CB6 A932DF8C D8BEC4D0 73B931BA 3BC832B6 8D9DD300", "15. # # This prime is: 2^3072 - 2^3008 - 1 + 2^64", "- 1 + 2**64 * ((Decimal(2**3966) * pi()).to_integral_exact(ROUND_FLOOR) + 240904) # # Its", "6DCC4024 FFFFFFFF FFFFFFFF\"\"\" p = int.from_bytes(unhexlify(p.replace('\\n', '').replace(' ', '')), 'big') assert Decimal(p) -", "F44C42E9 A637ED6B 0BFF5CB6 F406B7ED EE386BFB 5A899FA5 AE9F2411 7C4B1FE6 49286651 ECE45B3D C2007CB8 A163BF05 98DA4836", "14. # # This prime is: 2^2048 - 2^1984 - 1 + 2^64", "FD24CF5F 83655D23 DCA3AD96 1C62F356 208552BB 9ED52907 7096966D 670C354E 4ABC9804 F1746C08 CA18217C 32905E46 2E36CE3B", "34063199 FFFFFFFF FFFFFFFF\"\"\" p = int.from_bytes(unhexlify(p.replace('\\n', '').replace(' ', '')), 'big') assert Decimal(p) -", "EE386BFB 5A899FA5 AE9F2411 7C4B1FE6 49286651 ECE45B3D C2007CB8 A163BF05 98DA4836 1C55D39A 69163FA8 FD24CF5F 83655D23", "741804 } f = 2**1536 - 2**1472 - 1 + 2**64 * ((Decimal(2**1406)", "A79715EE F29BE328 06A1D58B B7C5DA76 F550AA3D 8A1FBFF0 EB19CCB1 A313D55C DA56C9EC 2EF29632 387FE8D7 6E3C0468 043E8F66", "C64B92EC F032EA15 D1721D03 F482D7CE 6E74FEF6 D55E702F 46980C82 B5A84031 900B1C9E 59E7C97F BEC7E8F3 23A97A7E 36CC88BE", "900B1C9E 59E7C97F BEC7E8F3 23A97A7E 36CC88BE 0F1D45B7 FF585AC5 4BD407B2 2B4154AA CC8F6D7E BF48E1D8 14CC5ED2 0F8037E0", "pi()).to_integral_exact(ROUND_FLOOR) + 240904) # # Its hexadecimal value is: # p =\"\"\"FFFFFFFF FFFFFFFF", "9558E447 5677E9AA 9E3050E2 765694DF C81F56E8 80B96E71 60C980DD 98EDD3DF FFFFFFFF FFFFFFFF\"\"\" p = int.from_bytes(unhexlify(p.replace('\\n',", "n, na, d, da = 0, three, 3, 1, 0, 0, 24 while", "287C5947 4E6BC05D 99B2964F A090C3A2 233BA186 515BE7ED 1F612970 CEE2D7AF B81BDD76 2170481C D0069127 D5B05AA9 93B4EA98", "CA237327 FFFFFFFF FFFFFFFF\"\"\" p = int.from_bytes(unhexlify(p.replace('\\n', '').replace(' ', '')), 'big') assert Decimal(p) -", "69163FA8 FD24CF5F 83655D23 DCA3AD96 1C62F356 208552BB 9ED52907 7096966D 670C354E 4ABC9804 F1746C08 CA237327 FFFFFFFF", "670C354E 4ABC9804 F1746C08 CA18217C 32905E46 2E36CE3B E39E772C 180E8603 9B2783A2 EC07A28F B5C55DF0 6F4C52C9 DE2BCBF6", "assigned id 17. # # This prime is: 2^6144 - 2^6080 - 1", "DA3EDBEB CF9B14ED 44CE6CBA CED4BB1B DB7F1447 E6CC254B 33205151 2BD7AF42 6FB8F401 378CD2BF 5983CA01 C64B92EC F032EA15", "F550AA3D 8A1FBFF0 EB19CCB1 A313D55C DA56C9EC 2EF29632 387FE8D7 6E3C0468 043E8F66 3F4860EE 12BF2D5B 0B7474D6 E694F91E", "long time, but was not defined in RFC 2409 (IKE). # Implementations have", "#6. 6144-bit MODP Group # # This group is assigned id 17. #", "Its hexadecimal value is: # p = \"\"\"FFFFFFFF FFFFFFFF C90FDAA2 2168C234 C4C6628B 80DC1CD1", "- 1 + 2^64 * { [2^1918 pi] + 124476 } f =", "2E36CE3B E39E772C 180E8603 9B2783A2 EC07A28F B5C55DF0 6F4C52C9 DE2BCBF6 95581718 3995497C EA956AE5 15D22618 98FA0510", "is: 2^6144 - 2^6080 - 1 + 2^64 * { [2^6014 pi] +", "- 2^4032 - 1 + 2^64 * { [2^3966 pi] + 240904 }", "86FFB7DC 90A6C08F 4DF435C9 34063199 FFFFFFFF FFFFFFFF\"\"\" p = int.from_bytes(unhexlify(p.replace('\\n', '').replace(' ', '')), 'big')", "pi] + 929484 } f = 2**6144 - 2**6080 - 1 + 2**64", "8AACAA68 FFFFFFFF FFFFFFFF\"\"\" p = int.from_bytes(unhexlify(p.replace('\\n', '').replace(' ', '')), 'big') assert Decimal(p) -", "2168C234 C4C6628B 80DC1CD1 29024E08 8A67CC74 020BBEA6 3B139B22 514A0879 8E3404DD EF9519B3 CD3A431B 302B0A6D F25F1437", "2**64 * ((Decimal(2**1406) * pi()).to_integral_exact(ROUND_FLOOR) + 741804) # Its hexadecimal value is: #", "ECE45B3D C2007CB8 A163BF05 98DA4836 1C55D39A 69163FA8 FD24CF5F 83655D23 DCA3AD96 1C62F356 208552BB 9ED52907 7096966D", "- 1 + 2**64 * ((Decimal(2**8062) * pi()).to_integral_exact(ROUND_FLOOR) + 4743158) # # Its", "59E7C97F BEC7E8F3 23A97A7E 36CC88BE 0F1D45B7 FF585AC5 4BD407B2 2B4154AA CC8F6D7E BF48E1D8 14CC5ED2 0F8037E0 A79715EE", "2170481C D0069127 D5B05AA9 93B4EA98 8D8FDDC1 86FFB7DC 90A6C08F 4DF435C9 34028492 36C3FAB4 D27C7026 C1D4DCB2 602646DE", "- 2^1472 - 1 + 2^64 * { [2^1406 pi] + 741804 }", "This prime is: 2^3072 - 2^3008 - 1 + 2^64 * { [2^2942", "is: 2. #4. 3072-bit MODP Group # # This group is assigned id", "4743158) # # Its hexadecimal value is: # p = \"\"\"FFFFFFFF FFFFFFFF C90FDAA2", "intermediate steps three = Decimal(3) # substitute \"three=3.0\" for regular floats lasts, t,", "88719A10 BDBA5B26 99C32718 6AF4E23C 1A946834 B6150BDA 2583E9CA 2AD44CE8 DBBBC2DB 04DE8EF9 2E8EFC14 1FBECAA6 287C5947", "# need some precision to evaluate PI getcontext().prec = 2000 # from the", "1ECFA268 359046F4 EB879F92 4009438B 481C6CD7 889A002E D5EE382B C9190DA6 FC026E47 9558E447 5677E9AA 9E3050E2 765694DF", "E0FD108E 4B82D120 A9210801 1A723C12 A787E6D7 88719A10 BDBA5B26 99C32718 6AF4E23C 1A946834 B6150BDA 2583E9CA 2AD44CE8", "for # quite a long time, but was not defined in RFC 2409", "The generator is: 2. #4. 3072-bit MODP Group # # This group is", "043E8F66 3F4860EE 12BF2D5B 0B7474D6 E694F91E 6DCC4024 FFFFFFFF FFFFFFFF\"\"\" p = int.from_bytes(unhexlify(p.replace('\\n', '').replace(' ',", "BBE11757 7A615D6C 770988C0 BAD946E2 08E24FA0 74E5AB31 43DB5BFC E0FD108E 4B82D120 A9210801 1A723C12 A787E6D7 88719A10", "higher precision) getcontext().prec = 4000 # # This group is assigned id 18.", "\"\"\"Compute Pi to the current precision. >>> print(pi()) 3.141592653589793238462643383 \"\"\" getcontext().prec += 2", "1 + 2**64 * ((Decimal(2**6014) * pi()).to_integral_exact(ROUND_FLOOR) + 929484) # # Its hexadecimal", "t, s, n, na, d, da = 0, three, 3, 1, 0, 0,", "1 + 2^64 * { [2^1918 pi] + 124476 } f = 2**2048", "* n) / d s += t getcontext().prec -= 2 return +s #", "08E24FA0 74E5AB31 43DB5BFC E0FD108E 4B82D120 A93AD2CA FFFFFFFF FFFFFFFF\"\"\" p = int.from_bytes(unhexlify(p.replace('\\n', '').replace(' ',", "'')), 'big') assert Decimal(p) - f == 0 # # The generator is:", "C90FDAA2 2168C234 C4C6628B 80DC1CD1 29024E08 8A67CC74 020BBEA6 3B139B22 514A0879 8E3404DD EF9519B3 CD3A431B 302B0A6D", "= 2000 # from the python docs def pi(): \"\"\"Compute Pi to the", "4ABC9804 F1746C08 CA18217C 32905E46 2E36CE3B E39E772C 180E8603 9B2783A2 EC07A28F B5C55DF0 6F4C52C9 DE2BCBF6 95581718", "2EF29632 387FE8D7 6E3C0468 043E8F66 3F4860EE 12BF2D5B 0B7474D6 E694F91E 6DCC4024 FFFFFFFF FFFFFFFF\"\"\" p =", "DF310EE0 74AB6A36 4597E899 A0255DC1 64F31CC5 0846851D F9AB4819 5DED7EA1 B1D510BD 7EE74D73 FAF36BC3 1ECFA268 359046F4", "521F2B18 177B200C BBE11757 7A615D6C 770988C0 BAD946E2 08E24FA0 74E5AB31 43DB5BFC E0FD108E 4B82D120 A9210801 1A723C12", "4BD407B2 2B4154AA CC8F6D7E BF48E1D8 14CC5ED2 0F8037E0 A79715EE F29BE328 06A1D58B B7C5DA76 F550AA3D 8A1FBFF0 EB19CCB1", "D1721D03 F482D7CE 6E74FEF6 D55E702F 46980C82 B5A84031 900B1C9E 59E7C97F BEC7E8F3 23A97A7E 36CC88BE 0F1D45B7 FF585AC5", "{ [2^3966 pi] + 240904 } f = 2**4096 - 2**4032 - 1", "substitute \"three=3.0\" for regular floats lasts, t, s, n, na, d, da =", "A090C3A2 233BA186 515BE7ED 1F612970 CEE2D7AF B81BDD76 2170481C D0069127 D5B05AA9 93B4EA98 8D8FDDC1 86FFB7DC 90A6C08F", "from binascii import unhexlify # need some precision to evaluate PI getcontext().prec =", "is: 2^4096 - 2^4032 - 1 + 2^64 * { [2^3966 pi] +", "have been using group 5 to designate this group, we # standardize that", "value is: # p = \"\"\"FFFFFFFF FFFFFFFF C90FDAA2 2168C234 C4C6628B 80DC1CD1 29024E08 8A67CC74", "+ 124476) # # Its hexadecimal value is: # p =\"\"\"FFFFFFFF FFFFFFFF C90FDAA2", "302B0A6D F25F1437 4FE1356D 6D51C245 E485B576 625E7EC6 F44C42E9 A637ED6B 0BFF5CB6 F406B7ED EE386BFB 5A899FA5 AE9F2411", "A93AD2CA FFFFFFFF FFFFFFFF\"\"\" p = int.from_bytes(unhexlify(p.replace('\\n', '').replace(' ', '')), 'big') assert Decimal(p) -", "04507A33 A85521AB DF1CBA64 ECFB8504 58DBEF0A 8AEA7157 5D060C7D B3970F85 A6E1E4C7 ABF5AE8C DB0933D7 1E8C94E0 4A25619D", "15D22618 98FA0510 15728E5A 8AAAC42D AD33170D 04507A33 A85521AB DF1CBA64 ECFB8504 58DBEF0A 8AEA7157 5D060C7D B3970F85", "2**8192 - 2**8128 - 1 + 2**64 * ((Decimal(2**8062) * pi()).to_integral_exact(ROUND_FLOOR) + 4743158)", "prime is: 2^3072 - 2^3008 - 1 + 2^64 * { [2^2942 pi]", "da = d+da, da+32 t = (t * n) / d s +=", "8AEA7157 5D060C7D B3970F85 A6E1E4C7 ABF5AE8C DB0933D7 1E8C94E0 4A25619D CEE3D226 1AD2EE6B F12FFA06 D98A0864 D8760273", "2BF1C978 238F16CB E39D652D E3FDB8BE FC848AD9 22222E04 A4037C07 13EB57A8 1A23F0C7 3473FC64 6CEA306B 4BCBC886 2F8385DD", "* { [2^1406 pi] + 741804 } f = 2**1536 - 2**1472 -", "\"\"\"FFFFFFFF FFFFFFFF C90FDAA2 2168C234 C4C6628B 80DC1CD1 29024E08 8A67CC74 020BBEA6 3B139B22 514A0879 8E3404DD EF9519B3", "binascii import unhexlify # need some precision to evaluate PI getcontext().prec = 2000", "not defined in RFC 2409 (IKE). # Implementations have been using group 5", "929484 } f = 2**6144 - 2**6080 - 1 + 2**64 * ((Decimal(2**6014)", "770988C0 BAD946E2 08E24FA0 74E5AB31 43DB5BFC E0FD108E 4B82D120 A93AD2CA FFFFFFFF FFFFFFFF\"\"\" p = int.from_bytes(unhexlify(p.replace('\\n',", "2**1984 - 1 + 2**64 * ((Decimal(2**1918) * pi()).to_integral_exact(ROUND_FLOOR) + 124476) # #", "python docs def pi(): \"\"\"Compute Pi to the current precision. >>> print(pi()) 3.141592653589793238462643383", "== 0 # # The generator is: 2. #3. 2048-bit MODP Group #", "the RFC #2. 1536-bit MODP Group # # The 1536 bit MODP group", "AE9F2411 7C4B1FE6 49286651 ECE45B3D C2007CB8 A163BF05 98DA4836 1C55D39A 69163FA8 FD24CF5F 83655D23 DCA3AD96 1C62F356", "+= t getcontext().prec -= 2 return +s # unary plus applies the new", "f == 0 # # The generator is: 2. #4. 3072-bit MODP Group", "', '')), 'big') assert Decimal(p) - f == 0 # # The generator", "1C55D39A 69163FA8 FD24CF5F 83655D23 DCA3AD96 1C62F356 208552BB 9ED52907 7096966D 670C354E 4ABC9804 F1746C08 CA237327", "quite a long time, but was not defined in RFC 2409 (IKE). #", "4096-bit MODP Group # # This group is assigned id 16. # #", "- 2^1984 - 1 + 2^64 * { [2^1918 pi] + 124476 }", "# # This prime is: 2^2048 - 2^1984 - 1 + 2^64 *", "- f == 0 # # The generator is: 2. #6. 6144-bit MODP", "2**64 * ((Decimal(2**2942) * pi()).to_integral_exact(ROUND_FLOOR) + 1690314) # # Its hexadecimal value is:", "4000 # # This group is assigned id 18. # # This prime", "5677E9AA 9E3050E2 765694DF C81F56E8 80B96E71 60C980DD 98EDD3DF FFFFFFFF FFFFFFFF\"\"\" p = int.from_bytes(unhexlify(p.replace('\\n', '').replace('", "7A615D6C 770988C0 BAD946E2 08E24FA0 74E5AB31 43DB5BFC E0FD108E 4B82D120 A93AD2CA FFFFFFFF FFFFFFFF\"\"\" p =", "2^6080 - 1 + 2^64 * { [2^6014 pi] + 929484 } f", "73B931BA 3BC832B6 8D9DD300 741FA7BF 8AFC47ED 2576F693 6BA42466 3AAB639C 5AE4F568 3423B474 2BF1C978 238F16CB E39D652D", "floats lasts, t, s, n, na, d, da = 0, three, 3, 1,", "9ED52907 7096966D 670C354E 4ABC9804 F1746C08 CA18217C 32905E46 2E36CE3B E39E772C 180E8603 9B2783A2 EC07A28F B5C55DF0", "1690314 } f = 2**3072 - 2**3008 - 1 + 2**64 * ((Decimal(2**2942)", "pi] + 4743158 } f = 2**8192 - 2**8128 - 1 + 2**64", "id 15. # # This prime is: 2^3072 - 2^3008 - 1 +", "2^8128 - 1 + 2^64 * { [2^8062 pi] + 4743158 } f", "1A946834 B6150BDA 2583E9CA 2AD44CE8 DBBBC2DB 04DE8EF9 2E8EFC14 1FBECAA6 287C5947 4E6BC05D 99B2964F A090C3A2 233BA186", "import * from binascii import unhexlify # need some precision to evaluate PI", "RFC #2. 1536-bit MODP Group # # The 1536 bit MODP group has", "1536-bit MODP Group # # The 1536 bit MODP group has been used", "9027D831 179727B0 865A8918 DA3EDBEB CF9B14ED 44CE6CBA CED4BB1B DB7F1447 E6CC254B 33205151 2BD7AF42 6FB8F401 378CD2BF", ">>> print(pi()) 3.141592653589793238462643383 \"\"\" getcontext().prec += 2 # extra digits for intermediate steps", "- 2**8128 - 1 + 2**64 * ((Decimal(2**8062) * pi()).to_integral_exact(ROUND_FLOOR) + 4743158) #", "0 # # The generator is: 2. #4. 3072-bit MODP Group # #", "A313D55C DA56C9EC 2EF29632 387FE8D7 6E3C0468 043E8F66 3F4860EE 12BF2D5B 0B7474D6 E694F91E 6DBE1159 74A3926F 12FEE5E4", "[2^3966 pi] + 240904 } f = 2**4096 - 2**4032 - 1 +", "CC8F6D7E BF48E1D8 14CC5ED2 0F8037E0 A79715EE F29BE328 06A1D58B B7C5DA76 F550AA3D 8A1FBFF0 EB19CCB1 A313D55C DA56C9EC", "to evaluate PI getcontext().prec = 2000 # from the python docs def pi():", "for intermediate steps three = Decimal(3) # substitute \"three=3.0\" for regular floats lasts,", "A2C087E8 79683303 ED5BDD3A 062B3CF5 B3A278A6 6D2A13F8 3F44F82D DF310EE0 74AB6A36 4597E899 A0255DC1 64F31CC5 0846851D", "C9190DA6 FC026E47 9558E447 5677E9AA 9E3050E2 765694DF C81F56E8 80B96E71 60C980DD 98EDD3DF FFFFFFFF FFFFFFFF\"\"\" p", "print(pi()) 3.141592653589793238462643383 \"\"\" getcontext().prec += 2 # extra digits for intermediate steps three", "using group 5 to designate this group, we # standardize that practice here.", "that practice here. # # The prime is: 2^1536 - 2^1472 - 1", "== 0 # # The generator is: 2. #7. 8192-bit MODP Group #", "80B96E71 60C980DD 98EDD3DF FFFFFFFF FFFFFFFF\"\"\" p = int.from_bytes(unhexlify(p.replace('\\n', '').replace(' ', '')), 'big') print(Decimal(p)", "the new precision ### The DH values defined in the RFC #2. 1536-bit", "CD3A431B 302B0A6D F25F1437 4FE1356D 6D51C245 E485B576 625E7EC6 F44C42E9 A637ED6B 0BFF5CB6 F406B7ED EE386BFB 5A899FA5", "43DB5BFC E0FD108E 4B82D120 A9210801 1A723C12 A787E6D7 88719A10 BDBA5B26 99C32718 6AF4E23C 1A946834 B6150BDA 2583E9CA", "} f = 2**8192 - 2**8128 - 1 + 2**64 * ((Decimal(2**8062) *", "CEE2D7AF B81BDD76 2170481C D0069127 D5B05AA9 93B4EA98 8D8FDDC1 86FFB7DC 90A6C08F 4DF435C9 34063199 FFFFFFFF FFFFFFFF\"\"\"", "{ [2^1918 pi] + 124476 } f = 2**2048 - 2**1984 - 1", "f = 2**4096 - 2**4032 - 1 + 2**64 * ((Decimal(2**3966) * pi()).to_integral_exact(ROUND_FLOOR)", "2^6144 - 2^6080 - 1 + 2^64 * { [2^6014 pi] + 929484", "DA56C9EC 2EF29632 387FE8D7 6E3C0468 043E8F66 3F4860EE 12BF2D5B 0B7474D6 E694F91E 6DBE1159 74A3926F 12FEE5E4 38777CB6", "2^64 * { [2^6014 pi] + 929484 } f = 2**6144 - 2**6080", "assert Decimal(p) - f == 0 # # The generator is: 2. #3.", "pi(): \"\"\"Compute Pi to the current precision. >>> print(pi()) 3.141592653589793238462643383 \"\"\" getcontext().prec +=", "Group # # This group is assigned id 14. # # This prime", "177B200C BBE11757 7A615D6C 770988C0 BAD946E2 08E24FA0 74E5AB31 43DB5BFC E0FD108E 4B82D120 A9210801 1A723C12 A787E6D7", "A0255DC1 64F31CC5 0846851D F9AB4819 5DED7EA1 B1D510BD 7EE74D73 FAF36BC3 1ECFA268 359046F4 EB879F92 4009438B 481C6CD7", "s != lasts: lasts = s n, na = n+na, na+8 d, da", "* pi()).to_integral_exact(ROUND_FLOOR) + 124476) # # Its hexadecimal value is: # p =\"\"\"FFFFFFFF", "'').replace(' ', '')), 'big') assert Decimal(p) - f == 0 # # The", "1, 0, 0, 24 while s != lasts: lasts = s n, na", "3F4860EE 12BF2D5B 0B7474D6 E694F91E 6DCC4024 FFFFFFFF FFFFFFFF\"\"\" p = int.from_bytes(unhexlify(p.replace('\\n', '').replace(' ', '')),", "This group is assigned id 15. # # This prime is: 2^3072 -", "f = 2**2048 - 2**1984 - 1 + 2**64 * ((Decimal(2**1918) * pi()).to_integral_exact(ROUND_FLOOR)", "8D8FDDC1 86FFB7DC 90A6C08F 4DF435C9 34063199 FFFFFFFF FFFFFFFF\"\"\" p = int.from_bytes(unhexlify(p.replace('\\n', '').replace(' ', '')),", "- 2^6080 - 1 + 2^64 * { [2^6014 pi] + 929484 }", "F29BE328 06A1D58B B7C5DA76 F550AA3D 8A1FBFF0 EB19CCB1 A313D55C DA56C9EC 2EF29632 387FE8D7 6E3C0468 043E8F66 3F4860EE", "The 1536 bit MODP group has been used for the implementations for #", "D8760273 3EC86A64 521F2B18 177B200C BBE11757 7A615D6C 770988C0 BAD946E2 08E24FA0 74E5AB31 43DB5BFC E0FD108E 4B82D120", "need some precision to evaluate PI getcontext().prec = 2000 # from the python", "# # Its hexadecimal value is: # p = \"\"\"FFFFFFFF FFFFFFFF C90FDAA2 2168C234", "# This prime is: 2^3072 - 2^3008 - 1 + 2^64 * {", "id 17. # # This prime is: 2^6144 - 2^6080 - 1 +", "DB0933D7 1E8C94E0 4A25619D CEE3D226 1AD2EE6B F12FFA06 D98A0864 D8760273 3EC86A64 521F2B18 177B200C BBE11757 7A615D6C", "Group # # This group is assigned id 16. # # This prime", "0BFF5CB6 F406B7ED EE386BFB 5A899FA5 AE9F2411 7C4B1FE6 49286651 ECE45B3D C2007CB8 A163BF05 98DA4836 1C55D39A 69163FA8", "9ED52907 7096966D 670C354E 4ABC9804 F1746C08 CA237327 FFFFFFFF FFFFFFFF\"\"\" p = int.from_bytes(unhexlify(p.replace('\\n', '').replace(' ',", "- 1 + 2^64 * { [2^2942 pi] + 1690314 } f =", "F12FFA06 D98A0864 D8760273 3EC86A64 521F2B18 177B200C BBE11757 7A615D6C 770988C0 BAD946E2 08E24FA0 74E5AB31 43DB5BFC", "((Decimal(2**3966) * pi()).to_integral_exact(ROUND_FLOOR) + 240904) # # Its hexadecimal value is: # p", "#!/usr/bin/env python3 from decimal import * from binascii import unhexlify # need some", "lasts: lasts = s n, na = n+na, na+8 d, da = d+da,", "to designate this group, we # standardize that practice here. # # The", "602646DE C9751E76 3DBA37BD F8FF9406 AD9E530E E5DB382F 413001AE B06A53ED 9027D831 179727B0 865A8918 DA3EDBEB CF9B14ED", "DBBBC2DB 04DE8EF9 2E8EFC14 1FBECAA6 287C5947 4E6BC05D 99B2964F A090C3A2 233BA186 515BE7ED 1F612970 CEE2D7AF B81BDD76", "46980C82 B5A84031 900B1C9E 59E7C97F BEC7E8F3 23A97A7E 36CC88BE 0F1D45B7 FF585AC5 4BD407B2 2B4154AA CC8F6D7E BF48E1D8", "This prime is: 2^2048 - 2^1984 - 1 + 2^64 * { [2^1918", "steps three = Decimal(3) # substitute \"three=3.0\" for regular floats lasts, t, s,", "8E3404DD EF9519B3 CD3A431B 302B0A6D F25F1437 4FE1356D 6D51C245 E485B576 625E7EC6 F44C42E9 A637ED6B 0BFF5CB6 F406B7ED", "here. # # The prime is: 2^1536 - 2^1472 - 1 + 2^64", "670C354E 4ABC9804 F1746C08 CA237327 FFFFFFFF FFFFFFFF\"\"\" p = int.from_bytes(unhexlify(p.replace('\\n', '').replace(' ', '')), 'big')", "2^4032 - 1 + 2^64 * { [2^3966 pi] + 240904 } f", "Pi to the current precision. >>> print(pi()) 3.141592653589793238462643383 \"\"\" getcontext().prec += 2 #", "B7C5DA76 F550AA3D 8A1FBFF0 EB19CCB1 A313D55C DA56C9EC 2EF29632 387FE8D7 6E3C0468 043E8F66 3F4860EE 12BF2D5B 0B7474D6", "F032EA15 D1721D03 F482D7CE 6E74FEF6 D55E702F 46980C82 B5A84031 900B1C9E 59E7C97F BEC7E8F3 23A97A7E 36CC88BE 0F1D45B7", "#4. 3072-bit MODP Group # # This group is assigned id 15. #", "C81F56E8 80B96E71 60C980DD 98EDD3DF FFFFFFFF FFFFFFFF\"\"\" p = int.from_bytes(unhexlify(p.replace('\\n', '').replace(' ', '')), 'big')", "2**8128 - 1 + 2**64 * ((Decimal(2**8062) * pi()).to_integral_exact(ROUND_FLOOR) + 4743158) # #", "F482D7CE 6E74FEF6 D55E702F 46980C82 B5A84031 900B1C9E 59E7C97F BEC7E8F3 23A97A7E 36CC88BE 0F1D45B7 FF585AC5 4BD407B2", "{ [2^8062 pi] + 4743158 } f = 2**8192 - 2**8128 - 1", "4DF435C9 34028492 36C3FAB4 D27C7026 C1D4DCB2 602646DE C9751E76 3DBA37BD F8FF9406 AD9E530E E5DB382F 413001AE B06A53ED", "was not defined in RFC 2409 (IKE). # Implementations have been using group", "The generator is: 2. #6. 6144-bit MODP Group # # This group is", "12FEE5E4 38777CB6 A932DF8C D8BEC4D0 73B931BA 3BC832B6 8D9DD300 741FA7BF 8AFC47ED 2576F693 6BA42466 3AAB639C 5AE4F568", "929484) # # Its hexadecimal value is: # p =\"\"\"FFFFFFFF FFFFFFFF C90FDAA2 2168C234", "A9210801 1A723C12 A787E6D7 88719A10 BDBA5B26 99C32718 6AF4E23C 1A946834 B6150BDA 2583E9CA 2AD44CE8 DBBBC2DB 04DE8EF9", "from the python docs def pi(): \"\"\"Compute Pi to the current precision. >>>", "three, 3, 1, 0, 0, 24 while s != lasts: lasts = s", "2^3072 - 2^3008 - 1 + 2^64 * { [2^2942 pi] + 1690314", "+ 240904) # # Its hexadecimal value is: # p =\"\"\"FFFFFFFF FFFFFFFF C90FDAA2", "+ 2**64 * ((Decimal(2**6014) * pi()).to_integral_exact(ROUND_FLOOR) + 929484) # # Its hexadecimal value", "regular floats lasts, t, s, n, na, d, da = 0, three, 3,", "2. #4. 3072-bit MODP Group # # This group is assigned id 15.", "pi()).to_integral_exact(ROUND_FLOOR) + 929484) # # Its hexadecimal value is: # p =\"\"\"FFFFFFFF FFFFFFFF", "ED5BDD3A 062B3CF5 B3A278A6 6D2A13F8 3F44F82D DF310EE0 74AB6A36 4597E899 A0255DC1 64F31CC5 0846851D F9AB4819 5DED7EA1", "F1746C08 CA237327 FFFFFFFF FFFFFFFF\"\"\" p = int.from_bytes(unhexlify(p.replace('\\n', '').replace(' ', '')), 'big') assert Decimal(p)", "DE2BCBF6 95581718 3995497C EA956AE5 15D22618 98FA0510 15728E5A 8AAAC42D AD33170D 04507A33 A85521AB DF1CBA64 ECFB8504", "2000 # from the python docs def pi(): \"\"\"Compute Pi to the current", "#7. 8192-bit MODP Group # (needs higher precision) getcontext().prec = 4000 # #", "E39E772C 180E8603 9B2783A2 EC07A28F B5C55DF0 6F4C52C9 DE2BCBF6 95581718 3995497C EA956AE5 15D22618 98FA0510 15728E5A", "B5C55DF0 6F4C52C9 DE2BCBF6 95581718 3995497C EA956AE5 15D22618 98FA0510 15728E5A 8AACAA68 FFFFFFFF FFFFFFFF\"\"\" p", "- 1 + 2**64 * ((Decimal(2**1918) * pi()).to_integral_exact(ROUND_FLOOR) + 124476) # # Its", "} f = 2**2048 - 2**1984 - 1 + 2**64 * ((Decimal(2**1918) *", "hexadecimal value is: # p =\"\"\"FFFFFFFF FFFFFFFF C90FDAA2 2168C234 C4C6628B 80DC1CD1 29024E08 8A67CC74", "C9751E76 3DBA37BD F8FF9406 AD9E530E E5DB382F 413001AE B06A53ED 9027D831 179727B0 865A8918 DA3EDBEB CF9B14ED 44CE6CBA", "44CE6CBA CED4BB1B DB7F1447 E6CC254B 33205151 2BD7AF42 6FB8F401 378CD2BF 5983CA01 C64B92EC F032EA15 D1721D03 F482D7CE", "DCA3AD96 1C62F356 208552BB 9ED52907 7096966D 670C354E 4ABC9804 F1746C08 CA237327 FFFFFFFF FFFFFFFF\"\"\" p =", "BAD946E2 08E24FA0 74E5AB31 43DB5BFC E0FD108E 4B82D120 A93AD2CA FFFFFFFF FFFFFFFF\"\"\" p = int.from_bytes(unhexlify(p.replace('\\n', '').replace('", "86FFB7DC 90A6C08F 4DF435C9 34028492 36C3FAB4 D27C7026 C1D4DCB2 602646DE C9751E76 3DBA37BD F8FF9406 AD9E530E E5DB382F", "2**6144 - 2**6080 - 1 + 2**64 * ((Decimal(2**6014) * pi()).to_integral_exact(ROUND_FLOOR) + 929484)", "+ 2**64 * ((Decimal(2**1406) * pi()).to_integral_exact(ROUND_FLOOR) + 741804) # Its hexadecimal value is:", "1F612970 CEE2D7AF B81BDD76 2170481C D0069127 D5B05AA9 93B4EA98 8D8FDDC1 86FFB7DC 90A6C08F 4DF435C9 34063199 FFFFFFFF", "assigned id 15. # # This prime is: 2^3072 - 2^3008 - 1", "- 1 + 2^64 * { [2^6014 pi] + 929484 } f =", "the current precision. >>> print(pi()) 3.141592653589793238462643383 \"\"\" getcontext().prec += 2 # extra digits", "pi()).to_integral_exact(ROUND_FLOOR) + 124476) # # Its hexadecimal value is: # p =\"\"\"FFFFFFFF FFFFFFFF", "3EC86A64 521F2B18 177B200C BBE11757 7A615D6C 770988C0 BAD946E2 08E24FA0 74E5AB31 43DB5BFC E0FD108E 4B82D120 A9210801", "MODP Group # # The 1536 bit MODP group has been used for", "group is assigned id 16. # # This prime is: 2^4096 - 2^4032", "group is assigned id 14. # # This prime is: 2^2048 - 2^1984", "This prime is: 2^8192 - 2^8128 - 1 + 2^64 * { [2^8062", "- 2^8128 - 1 + 2^64 * { [2^8062 pi] + 4743158 }", "return +s # unary plus applies the new precision ### The DH values", "Decimal(3) # substitute \"three=3.0\" for regular floats lasts, t, s, n, na, d,", "1 + 2**64 * ((Decimal(2**8062) * pi()).to_integral_exact(ROUND_FLOOR) + 4743158) # # Its hexadecimal", "6DBE1159 74A3926F 12FEE5E4 38777CB6 A932DF8C D8BEC4D0 73B931BA 3BC832B6 8D9DD300 741FA7BF 8AFC47ED 2576F693 6BA42466", "3AAB639C 5AE4F568 3423B474 2BF1C978 238F16CB E39D652D E3FDB8BE FC848AD9 22222E04 A4037C07 13EB57A8 1A23F0C7 3473FC64", "4A25619D CEE3D226 1AD2EE6B F12FFA06 D98A0864 D8760273 3EC86A64 521F2B18 177B200C BBE11757 7A615D6C 770988C0 BAD946E2", "na = n+na, na+8 d, da = d+da, da+32 t = (t *", "0B7474D6 E694F91E 6DBE1159 74A3926F 12FEE5E4 38777CB6 A932DF8C D8BEC4D0 73B931BA 3BC832B6 8D9DD300 741FA7BF 8AFC47ED", "the implementations for # quite a long time, but was not defined in", "### The DH values defined in the RFC #2. 1536-bit MODP Group #", "da+32 t = (t * n) / d s += t getcontext().prec -=", "6F4C52C9 DE2BCBF6 95581718 3995497C EA956AE5 15D22618 98FA0510 15728E5A 8AAAC42D AD33170D 04507A33 A85521AB DF1CBA64", "179727B0 865A8918 DA3EDBEB CF9B14ED 44CE6CBA CED4BB1B DB7F1447 E6CC254B 33205151 2BD7AF42 6FB8F401 378CD2BF 5983CA01", "assert Decimal(p) - f == 0 # # The generator is: 2. #7.", "precision ### The DH values defined in the RFC #2. 1536-bit MODP Group", "- 1 + 2**64 * ((Decimal(2**1406) * pi()).to_integral_exact(ROUND_FLOOR) + 741804) # Its hexadecimal", "DCA3AD96 1C62F356 208552BB 9ED52907 7096966D 670C354E 4ABC9804 F1746C08 CA18217C 32905E46 2E36CE3B E39E772C 180E8603", "three = Decimal(3) # substitute \"three=3.0\" for regular floats lasts, t, s, n,", "* ((Decimal(2**1406) * pi()).to_integral_exact(ROUND_FLOOR) + 741804) # Its hexadecimal value is: # p", "DE2BCBF6 95581718 3995497C EA956AE5 15D22618 98FA0510 15728E5A 8AACAA68 FFFFFFFF FFFFFFFF\"\"\" p = int.from_bytes(unhexlify(p.replace('\\n',", "DA56C9EC 2EF29632 387FE8D7 6E3C0468 043E8F66 3F4860EE 12BF2D5B 0B7474D6 E694F91E 6DCC4024 FFFFFFFF FFFFFFFF\"\"\" p", "3423B474 2BF1C978 238F16CB E39D652D E3FDB8BE FC848AD9 22222E04 A4037C07 13EB57A8 1A23F0C7 3473FC64 6CEA306B 4BCBC886", "pi] + 1690314 } f = 2**3072 - 2**3008 - 1 + 2**64", "240904) # # Its hexadecimal value is: # p =\"\"\"FFFFFFFF FFFFFFFF C90FDAA2 2168C234", "CEE2D7AF B81BDD76 2170481C D0069127 D5B05AA9 93B4EA98 8D8FDDC1 86FFB7DC 90A6C08F 4DF435C9 34028492 36C3FAB4 D27C7026", "E694F91E 6DCC4024 FFFFFFFF FFFFFFFF\"\"\" p = int.from_bytes(unhexlify(p.replace('\\n', '').replace(' ', '')), 'big') assert Decimal(p)", "1 + 2**64 * ((Decimal(2**1406) * pi()).to_integral_exact(ROUND_FLOOR) + 741804) # Its hexadecimal value", "2**4032 - 1 + 2**64 * ((Decimal(2**3966) * pi()).to_integral_exact(ROUND_FLOOR) + 240904) # #", "- 2**1984 - 1 + 2**64 * ((Decimal(2**1918) * pi()).to_integral_exact(ROUND_FLOOR) + 124476) #", "1F612970 CEE2D7AF B81BDD76 2170481C D0069127 D5B05AA9 93B4EA98 8D8FDDC1 86FFB7DC 90A6C08F 4DF435C9 34028492 36C3FAB4", "+ 929484) # # Its hexadecimal value is: # p =\"\"\"FFFFFFFF FFFFFFFF C90FDAA2", "1A723C12 A787E6D7 88719A10 BDBA5B26 99C32718 6AF4E23C 1A946834 B6150BDA 2583E9CA 2AD44CE8 DBBBC2DB 04DE8EF9 2E8EFC14", "generator is: 2. #3. 2048-bit MODP Group # # This group is assigned", "is assigned id 14. # # This prime is: 2^2048 - 2^1984 -", "Decimal(p) - f == 0 # # The generator is: 2. #4. 3072-bit", "1536 bit MODP group has been used for the implementations for # quite", "MODP Group # # This group is assigned id 15. # # This", "EC07A28F B5C55DF0 6F4C52C9 DE2BCBF6 95581718 3995497C EA956AE5 15D22618 98FA0510 15728E5A 8AAAC42D AD33170D 04507A33", "prime is: 2^6144 - 2^6080 - 1 + 2^64 * { [2^6014 pi]", "Its hexadecimal value is: # p =\"\"\"FFFFFFFF FFFFFFFF C90FDAA2 2168C234 C4C6628B 80DC1CD1 29024E08", "t getcontext().prec -= 2 return +s # unary plus applies the new precision", "generator is: 2. #6. 6144-bit MODP Group # # This group is assigned", "359046F4 EB879F92 4009438B 481C6CD7 889A002E D5EE382B C9190DA6 FC026E47 9558E447 5677E9AA 9E3050E2 765694DF C81F56E8", "# This prime is: 2^6144 - 2^6080 - 1 + 2^64 * {", "8192-bit MODP Group # (needs higher precision) getcontext().prec = 4000 # # This", "FFFFFFFF\"\"\" p = int.from_bytes(unhexlify(p.replace('\\n', '').replace(' ', '')), 'big') assert Decimal(p) - f ==", "93B4EA98 8D8FDDC1 86FFB7DC 90A6C08F 4DF435C9 34028492 36C3FAB4 D27C7026 C1D4DCB2 602646DE C9751E76 3DBA37BD F8FF9406", "B5C55DF0 6F4C52C9 DE2BCBF6 95581718 3995497C EA956AE5 15D22618 98FA0510 15728E5A 8AAAC42D AD33170D 04507A33 A85521AB", "assert Decimal(p) - f == 0 # # The generator is: 2. #5.", "12BF2D5B 0B7474D6 E694F91E 6DCC4024 FFFFFFFF FFFFFFFF\"\"\" p = int.from_bytes(unhexlify(p.replace('\\n', '').replace(' ', '')), 'big')", "4B82D120 A93AD2CA FFFFFFFF FFFFFFFF\"\"\" p = int.from_bytes(unhexlify(p.replace('\\n', '').replace(' ', '')), 'big') assert Decimal(p)", "3995497C EA956AE5 15D22618 98FA0510 15728E5A 8AAAC42D AD33170D 04507A33 A85521AB DF1CBA64 ECFB8504 58DBEF0A 8AEA7157", "2**3072 - 2**3008 - 1 + 2**64 * ((Decimal(2**2942) * pi()).to_integral_exact(ROUND_FLOOR) + 1690314)", "4009438B 481C6CD7 889A002E D5EE382B C9190DA6 FC026E47 9558E447 5677E9AA 9E3050E2 765694DF C81F56E8 80B96E71 60C980DD", "2^64 * { [2^2942 pi] + 1690314 } f = 2**3072 - 2**3008", "Group # (needs higher precision) getcontext().prec = 4000 # # This group is", "3995497C EA956AE5 15D22618 98FA0510 15728E5A 8AACAA68 FFFFFFFF FFFFFFFF\"\"\" p = int.from_bytes(unhexlify(p.replace('\\n', '').replace(' ',", "- f == 0 # # The generator is: 2. #3. 2048-bit MODP", "prime is: 2^8192 - 2^8128 - 1 + 2^64 * { [2^8062 pi]", "group is assigned id 18. # # This prime is: 2^8192 - 2^8128", "* pi()).to_integral_exact(ROUND_FLOOR) + 741804) # Its hexadecimal value is: # p =\"\"\"FFFFFFFF FFFFFFFF", "4743158 } f = 2**8192 - 2**8128 - 1 + 2**64 * ((Decimal(2**8062)", "12BF2D5B 0B7474D6 E694F91E 6DBE1159 74A3926F 12FEE5E4 38777CB6 A932DF8C D8BEC4D0 73B931BA 3BC832B6 8D9DD300 741FA7BF", "PI getcontext().prec = 2000 # from the python docs def pi(): \"\"\"Compute Pi", "p =\"\"\"FFFFFFFF FFFFFFFF C90FDAA2 2168C234 C4C6628B 80DC1CD1 29024E08 8A67CC74 020BBEA6 3B139B22 514A0879 8E3404DD", "+ 2^64 * { [2^3966 pi] + 240904 } f = 2**4096 -", "f == 0 # # The generator is: 2. #6. 6144-bit MODP Group", "BBE11757 7A615D6C 770988C0 BAD946E2 08E24FA0 74E5AB31 43DB5BFC E0FD108E 4B82D120 A93AD2CA FFFFFFFF FFFFFFFF\"\"\" p", "id 18. # # This prime is: 2^8192 - 2^8128 - 1 +", "ABF5AE8C DB0933D7 1E8C94E0 4A25619D CEE3D226 1AD2EE6B F12FFA06 D98A0864 D8760273 3EC86A64 521F2B18 177B200C BBE11757", "4B82D120 A9210801 1A723C12 A787E6D7 88719A10 BDBA5B26 99C32718 6AF4E23C 1A946834 B6150BDA 2583E9CA 2AD44CE8 DBBBC2DB", "7096966D 670C354E 4ABC9804 F1746C08 CA237327 FFFFFFFF FFFFFFFF\"\"\" p = int.from_bytes(unhexlify(p.replace('\\n', '').replace(' ', '')),", "generator is: 2. #4. 3072-bit MODP Group # # This group is assigned", "prime is: 2^2048 - 2^1984 - 1 + 2^64 * { [2^1918 pi]", "is: 2. #3. 2048-bit MODP Group # # This group is assigned id", "79683303 ED5BDD3A 062B3CF5 B3A278A6 6D2A13F8 3F44F82D DF310EE0 74AB6A36 4597E899 A0255DC1 64F31CC5 0846851D F9AB4819", "import unhexlify # need some precision to evaluate PI getcontext().prec = 2000 #", "E3FDB8BE FC848AD9 22222E04 A4037C07 13EB57A8 1A23F0C7 3473FC64 6CEA306B 4BCBC886 2F8385DD FA9D4B7F A2C087E8 79683303", "+ 4743158 } f = 2**8192 - 2**8128 - 1 + 2**64 *", "we # standardize that practice here. # # The prime is: 2^1536 -", "208552BB 9ED52907 7096966D 670C354E 4ABC9804 F1746C08 CA237327 FFFFFFFF FFFFFFFF\"\"\" p = int.from_bytes(unhexlify(p.replace('\\n', '').replace('", "{ [2^2942 pi] + 1690314 } f = 2**3072 - 2**3008 - 1", "# # This prime is: 2^4096 - 2^4032 - 1 + 2^64 *", "2 return +s # unary plus applies the new precision ### The DH", "7C4B1FE6 49286651 ECE45B3D C2007CB8 A163BF05 98DA4836 1C55D39A 69163FA8 FD24CF5F 83655D23 DCA3AD96 1C62F356 208552BB", "2583E9CA 2AD44CE8 DBBBC2DB 04DE8EF9 2E8EFC14 1FBECAA6 287C5947 4E6BC05D 99B2964F A090C3A2 233BA186 515BE7ED 1F612970", "521F2B18 177B200C BBE11757 7A615D6C 770988C0 BAD946E2 08E24FA0 74E5AB31 43DB5BFC E0FD108E 4B82D120 A93AD2CA FFFFFFFF", "1 + 2^64 * { [2^2942 pi] + 1690314 } f = 2**3072", "22222E04 A4037C07 13EB57A8 1A23F0C7 3473FC64 6CEA306B 4BCBC886 2F8385DD FA9D4B7F A2C087E8 79683303 ED5BDD3A 062B3CF5", "is assigned id 17. # # This prime is: 2^6144 - 2^6080 -", "is assigned id 18. # # This prime is: 2^8192 - 2^8128 -", "Group # # This group is assigned id 17. # # This prime", "2. #5. 4096-bit MODP Group # # This group is assigned id 16.", "2**64 * ((Decimal(2**8062) * pi()).to_integral_exact(ROUND_FLOOR) + 4743158) # # Its hexadecimal value is:", "but was not defined in RFC 2409 (IKE). # Implementations have been using", "== 0 # # The generator is: 2. #4. 3072-bit MODP Group #", "- 1 + 2**64 * ((Decimal(2**2942) * pi()).to_integral_exact(ROUND_FLOOR) + 1690314) # # Its", "This prime is: 2^4096 - 2^4032 - 1 + 2^64 * { [2^3966", "d, da = d+da, da+32 t = (t * n) / d s", "514A0879 8E3404DD EF9519B3 CD3A431B 302B0A6D F25F1437 4FE1356D 6D51C245 E485B576 625E7EC6 F44C42E9 A637ED6B 0BFF5CB6", "def pi(): \"\"\"Compute Pi to the current precision. >>> print(pi()) 3.141592653589793238462643383 \"\"\" getcontext().prec" ]
[ "setuptools import setup PROJECT_NAME = \"meter_reader_4_pointer\" VERSION = \"1.2.0\" setup( name=PROJECT_NAME, version=VERSION, packages=find_packages(),", "= \"meter_reader_4_pointer\" VERSION = \"1.2.0\" setup( name=PROJECT_NAME, version=VERSION, packages=find_packages(), include_package_data=True, install_requires=[\"opencv-python\", \"pyyaml\", \"imutils\"],", "<reponame>afterloe/opencv-practice #!/usr/bin/env python3 # -*- coding=utf-8 -*- from setuptools import setup PROJECT_NAME =", "\"meter_reader_4_pointer\" VERSION = \"1.2.0\" setup( name=PROJECT_NAME, version=VERSION, packages=find_packages(), include_package_data=True, install_requires=[\"opencv-python\", \"pyyaml\", \"imutils\"], )", "-*- coding=utf-8 -*- from setuptools import setup PROJECT_NAME = \"meter_reader_4_pointer\" VERSION = \"1.2.0\"", "setup PROJECT_NAME = \"meter_reader_4_pointer\" VERSION = \"1.2.0\" setup( name=PROJECT_NAME, version=VERSION, packages=find_packages(), include_package_data=True, install_requires=[\"opencv-python\",", "python3 # -*- coding=utf-8 -*- from setuptools import setup PROJECT_NAME = \"meter_reader_4_pointer\" VERSION", "#!/usr/bin/env python3 # -*- coding=utf-8 -*- from setuptools import setup PROJECT_NAME = \"meter_reader_4_pointer\"", "from setuptools import setup PROJECT_NAME = \"meter_reader_4_pointer\" VERSION = \"1.2.0\" setup( name=PROJECT_NAME, version=VERSION,", "-*- from setuptools import setup PROJECT_NAME = \"meter_reader_4_pointer\" VERSION = \"1.2.0\" setup( name=PROJECT_NAME,", "# -*- coding=utf-8 -*- from setuptools import setup PROJECT_NAME = \"meter_reader_4_pointer\" VERSION =", "coding=utf-8 -*- from setuptools import setup PROJECT_NAME = \"meter_reader_4_pointer\" VERSION = \"1.2.0\" setup(", "import setup PROJECT_NAME = \"meter_reader_4_pointer\" VERSION = \"1.2.0\" setup( name=PROJECT_NAME, version=VERSION, packages=find_packages(), include_package_data=True,", "PROJECT_NAME = \"meter_reader_4_pointer\" VERSION = \"1.2.0\" setup( name=PROJECT_NAME, version=VERSION, packages=find_packages(), include_package_data=True, install_requires=[\"opencv-python\", \"pyyaml\"," ]
[ "setuptools import setup exec (open('yadashcomp/version.py').read()) setup( name='yadashcomp', version=__version__, author='pingf', packages=['yadashcomp'], include_package_data=True, license='MIT', description='yet", "from setuptools import setup exec (open('yadashcomp/version.py').read()) setup( name='yadashcomp', version=__version__, author='pingf', packages=['yadashcomp'], include_package_data=True, license='MIT',", "<reponame>pingf/yadashcomp<filename>setup.py from setuptools import setup exec (open('yadashcomp/version.py').read()) setup( name='yadashcomp', version=__version__, author='pingf', packages=['yadashcomp'], include_package_data=True,", "(open('yadashcomp/version.py').read()) setup( name='yadashcomp', version=__version__, author='pingf', packages=['yadashcomp'], include_package_data=True, license='MIT', description='yet another dash components', install_requires=[]", "setup exec (open('yadashcomp/version.py').read()) setup( name='yadashcomp', version=__version__, author='pingf', packages=['yadashcomp'], include_package_data=True, license='MIT', description='yet another dash", "setup( name='yadashcomp', version=__version__, author='pingf', packages=['yadashcomp'], include_package_data=True, license='MIT', description='yet another dash components', install_requires=[] )", "import setup exec (open('yadashcomp/version.py').read()) setup( name='yadashcomp', version=__version__, author='pingf', packages=['yadashcomp'], include_package_data=True, license='MIT', description='yet another", "exec (open('yadashcomp/version.py').read()) setup( name='yadashcomp', version=__version__, author='pingf', packages=['yadashcomp'], include_package_data=True, license='MIT', description='yet another dash components'," ]
[ "camera='1' bucket='mikey.com-security' path='/mnt/cameraimages/images' s3 = boto3.resource('s3') for dirName, subdirList, fileList in os.walk(path): for", "in fileList: if len(path) == len(dirName): finame=fname else: finame = '%s/%s'%(dirName[len(path)+1:], fname) print(finame)", "fileList: if len(path) == len(dirName): finame=fname else: finame = '%s/%s'%(dirName[len(path)+1:], fname) print(finame) res=s3.meta.client.upload_file(dirName", "len(path) == len(dirName): finame=fname else: finame = '%s/%s'%(dirName[len(path)+1:], fname) print(finame) res=s3.meta.client.upload_file(dirName + '/'", "s3 = boto3.resource('s3') for dirName, subdirList, fileList in os.walk(path): for fname in fileList:", "== len(dirName): finame=fname else: finame = '%s/%s'%(dirName[len(path)+1:], fname) print(finame) res=s3.meta.client.upload_file(dirName + '/' +", "subdirList, fileList in os.walk(path): for fname in fileList: if len(path) == len(dirName): finame=fname", "for fname in fileList: if len(path) == len(dirName): finame=fname else: finame = '%s/%s'%(dirName[len(path)+1:],", "path='/mnt/cameraimages/images' s3 = boto3.resource('s3') for dirName, subdirList, fileList in os.walk(path): for fname in", "in os.walk(path): for fname in fileList: if len(path) == len(dirName): finame=fname else: finame", "= boto3.resource('s3') for dirName, subdirList, fileList in os.walk(path): for fname in fileList: if", "fileList in os.walk(path): for fname in fileList: if len(path) == len(dirName): finame=fname else:", "fname in fileList: if len(path) == len(dirName): finame=fname else: finame = '%s/%s'%(dirName[len(path)+1:], fname)", "os camera='1' bucket='mikey.com-security' path='/mnt/cameraimages/images' s3 = boto3.resource('s3') for dirName, subdirList, fileList in os.walk(path):", "else: finame = '%s/%s'%(dirName[len(path)+1:], fname) print(finame) res=s3.meta.client.upload_file(dirName + '/' + fname, bucket, finame)", "print(finame) res=s3.meta.client.upload_file(dirName + '/' + fname, bucket, finame) print res os.unlink(dirName + '/'", "bucket='mikey.com-security' path='/mnt/cameraimages/images' s3 = boto3.resource('s3') for dirName, subdirList, fileList in os.walk(path): for fname", "if len(path) == len(dirName): finame=fname else: finame = '%s/%s'%(dirName[len(path)+1:], fname) print(finame) res=s3.meta.client.upload_file(dirName +", "import os camera='1' bucket='mikey.com-security' path='/mnt/cameraimages/images' s3 = boto3.resource('s3') for dirName, subdirList, fileList in", "import boto3 import os camera='1' bucket='mikey.com-security' path='/mnt/cameraimages/images' s3 = boto3.resource('s3') for dirName, subdirList,", "len(dirName): finame=fname else: finame = '%s/%s'%(dirName[len(path)+1:], fname) print(finame) res=s3.meta.client.upload_file(dirName + '/' + fname,", "'%s/%s'%(dirName[len(path)+1:], fname) print(finame) res=s3.meta.client.upload_file(dirName + '/' + fname, bucket, finame) print res os.unlink(dirName", "res=s3.meta.client.upload_file(dirName + '/' + fname, bucket, finame) print res os.unlink(dirName + '/' +", "= '%s/%s'%(dirName[len(path)+1:], fname) print(finame) res=s3.meta.client.upload_file(dirName + '/' + fname, bucket, finame) print res", "dirName, subdirList, fileList in os.walk(path): for fname in fileList: if len(path) == len(dirName):", "os.walk(path): for fname in fileList: if len(path) == len(dirName): finame=fname else: finame =", "for dirName, subdirList, fileList in os.walk(path): for fname in fileList: if len(path) ==", "finame = '%s/%s'%(dirName[len(path)+1:], fname) print(finame) res=s3.meta.client.upload_file(dirName + '/' + fname, bucket, finame) print", "finame=fname else: finame = '%s/%s'%(dirName[len(path)+1:], fname) print(finame) res=s3.meta.client.upload_file(dirName + '/' + fname, bucket,", "fname) print(finame) res=s3.meta.client.upload_file(dirName + '/' + fname, bucket, finame) print res os.unlink(dirName +", "+ '/' + fname, bucket, finame) print res os.unlink(dirName + '/' + fname)", "boto3.resource('s3') for dirName, subdirList, fileList in os.walk(path): for fname in fileList: if len(path)", "boto3 import os camera='1' bucket='mikey.com-security' path='/mnt/cameraimages/images' s3 = boto3.resource('s3') for dirName, subdirList, fileList" ]
[ "a dict # of IMG_id -> distance, # and we include the list", "dist_1[uinc_key] = dist_2[uinc_key] # dist_compl[key] = [dist_1, compl_1, cont_1] # else: # raise", "= curr_dist_compl[key] # if compl_1 == compl_2 and cont_1 == cont_2: # #", "g['upa'].tolist() ss = {} for upa in upas: mag_ids = g[g['upa']==upa]['mag_id'].tolist() ss[upa] =", "and staging some of the outputs for the templates. \"\"\" if len(query_results) >", "val in mag_dict.items()} cont = {mag:val[2] for mag, val in mag_dict.items()} else: dist,", "pd.DataFrame.from_dict(new_gold) all_GOLD.append(new_gold) for key in curr_dist_compl: if key in dist_compl: for mag_key in", "have dists as a dict # of IMG_id -> distance, # and we", "= ['Ecosystem','Ecosystem Category','Ecosystem Subtype',\\ 'Ecosystem Type','Specific Ecosystem','Project / Study Name'] if len(upas) ==", "'truncated_name': str(trunc_name), 'name' : t, 'count': \"({})\".format(len(dist)) }) if source_order!=None: tree[-1]['dist'] = dist", "tree_cols[0] type_count = GOLD[col].value_counts().to_dict() for t in type_count: # if len(t) > name_max_len:", "'objects': [{'ref':upa} for upa in upas] }) upa_to_name = {'/'.join([str(info[6]), str(info[0]), str(info[4])]):info[1] for", "Category','Ecosystem Subtype',\\ 'Ecosystem Type','Specific Ecosystem','Project / Study Name'] print(\"curr gold cols 1:\",curr_GOLD.columns) curr_GOLD", "# dist_1[uinc_key] = dist_2[uinc_key] # dist_compl[key] = [dist_1, compl_1, cont_1] # else: #", "key in dist_compl: for mag_key in curr_dist_compl[key]: dist_compl[key][mag_key] = curr_dist_compl[key][mag_key] else: dist_compl[key] =", "# raise ValueError('Same project ids but contamination and/or completeness do not match') #", "source:\"+source return markers def unwind_tree(X, tree): \"\"\" \"\"\" if tree.get('children'): for t in", "curr[key] = 'Unknown' if relatedids['GOLD_Analysis_ID']: curr['project'] = GOLD[GOLD['GOLD Analysis Project ID'] == relatedids['GOLD_Analysis_ID']].iloc[0]['Project", "TEMPORARY MARKER SET UP markers = get_location_markers(set([s['mag_id'] for s in stats])) return stats,", "m['source'] = \"Input source:\"+source return markers def unwind_tree(X, tree): \"\"\" \"\"\" if tree.get('children'):", "in upas: mag_ids = g[g['upa']==upa]['mag_id'].tolist() ss[upa] = mag_ids for i, s in enumerate(source_order):", "tree = create_tree(all_GOLD, tree_cols, dist_compl, source_order=upas) sources = [0 for _ in range(len(upa_names))]", "mag, val in mag_dict.items()} else: dist, compl, cont = \"\", \"\", \"\" print(\"-\"*90)", "upa_name = upa_to_name[upa] curr_GOLD = GOLD[GOLD['GOLD Analysis Project ID'].isin([val[2]['GOLD_Analysis_ID'] for key, val in", "37.817060, \"lng\": -122.478206, \"details\":\"This is the Golden Gate Bridge.\"}, {'name':\"SFO Airport\", 'lat':37.616310, 'lng':", "markers: m['source'] = \"Input source:\"+source return markers def unwind_tree(X, tree): \"\"\" \"\"\" if", "s in ss: sources.append(ss[upa]) # sources[i] = ss[upa] else: sources.append([]) else: source_count =", "upa_to_name missing_upas = list(set(upas) - set(list(upa_to_name.keys()))) dfu = DataFileUtil(cb_url) objs = dfu.get_objects({'object_refs':missing_upas})['data'] if", "print(\"-\"*90) trunc_name = GOLD[GOLD[\"Project / Study Name\"] == t].iloc[0]['IMG Genome ID '] #", "= [s for s in stats if s['completeness'] >= min_completeness] if max_contamination: stats", "upa print(\"curr gold cols 3:\",curr_GOLD.columns) # We want to get a row for", "each img id stats += curr_stats # group them by img_ids curr_GOLD.set_index('IMG Genome", "MARKER SET UP markers = get_location_markers(set([s['mag_id'] for s in stats])) return stats, upa_names,", "curr_dist_compl[key] upa_names.append(upa_name) all_GOLD = pd.concat(all_GOLD, ignore_index=True) tree_cols = ['Ecosystem','Ecosystem Category','Ecosystem Subtype',\\ 'Ecosystem Type','Specific", "for col in tree_cols}) print(\"curr gold cols 2:\",curr_GOLD.columns) curr_stats = get_statistics(query_results[upa], curr_GOLD, upa_name=upa_name)", "name of marker 'lat': latitude as a float 'lng': longitude as a float", "in type_count: # if len(t) > name_max_len: # name = t[:name_max_len] + '...'", "details } ''' markers = [ {'name':\"LBL\", \"lat\":37.877344, \"lng\":-122.250694, \"details\":\"This is Lawrence Berkeley", "leaf = create_tree(GOLD[GOLD[col]==t], tree_cols[1:], dist_compl, source_order=source_order) if leaf == []: if col ==", "curr_dist_compl: if key in dist_compl: for mag_key in curr_dist_compl[key]: dist_compl[key][mag_key] = curr_dist_compl[key][mag_key] else:", "for key, val in gold_info.iteritems(): new_gold[key].append(val) new_gold = pd.DataFrame.from_dict(new_gold) all_GOLD.append(new_gold) for key in", "statistics from the GOLD and statitics csvs ids: GOLD: ''' output = []", "sources return tree def get_location_markers(ids, source=None): ''' For now this simply returns 1", "# if kb_id: # curr['kb_id'] = kb_id # else: # curr['kb_id'] = ''", "Category','Ecosystem Subtype',\\ 'Ecosystem Type','Specific Ecosystem','Project / Study Name'] if len(upas) == 1: tree", "tree, markers def filter_stats(stats, n_max_results, max_distance, min_completeness, max_contamination): if max_distance: stats = [s", "'/'.join([str(info[6]), str(info[0]), str(info[4])]) upa_to_name[upa] = info[1] return upa_to_name def get_statistics(ids, GOLD, upa_name=None): '''", "{mag:val[0] for mag, val in mag_dict.items()} compl = {mag:val[1] for mag, val in", "tree.get('children'): for t in tree['children']: if 'compl' in t: X.append(np.array([len(mag_ids) for mag_ids in", "i, cs in enumerate(curr_stats): img_id = cs['IMG_Genome_ID'] mag_id = cs['mag_id'] gold_info = curr_GOLD.loc[int(img_id),:]", "in stats: if s['project'] not in dist_compl: dist_compl[s['project']] = {} dist_compl[s['project']][s['mag_id']] = [round(s['dist'],", "== \"Project / Study Name\": mag_dict = dist_compl[t] dist = {mag:val[0] for mag,", "to see distance dictionary # unincluded_keys = list(set(list(dist_2.keys())) - set(list(dist_1.keys()))) # for uinc_key", "in dist_compl: dist_compl[s['project']] = {} dist_compl[s['project']][s['mag_id']] = [round(s['dist'], 3), round(s['completeness'],2), round(s['contamination'],2)] # dist_compl[s['project']]", "= sorted(stats, key=lambda s: s['dist']) if len(stats) > n_max_results: stats = stats[:n_max_results] dist_compl", "id_ curr['IMG_Genome_ID'] = id_.split('_')[0] img_link = \"https://img.jgi.doe.gov/cgi-bin/m/main.cgi?section=MetaDetail&page=metagenomeBinScaffolds&taxon_oid=%s&bin_name=%s\"%(id_.split('_')[0], id_) curr['IMG_link'] = img_link if relatedids:", "gold_path = os.path.join(currdir,'data','GOLD-metadata.csv') GOLD = pd.read_csv(gold_path) upa_names = [] upas = [] dist_compl", "in curr_stats])] curr_GOLD['upa'] = upa print(\"curr gold cols 3:\",curr_GOLD.columns) # We want to", "for s in stats if s['contamination'] <= max_contamination] stats = sorted(stats, key=lambda s:", "/ Study Name\"] == t].iloc[0]['IMG Genome ID '] # is terminal node/actually a", "ID '] # is terminal node/actually a leaf # here we change the", "[] upas = [] dist_compl = {} all_GOLD = [] # id_to_inputs =", "upa in query_results: upas.append(upa) upa_name = upa_to_name[upa] curr_GOLD = GOLD[GOLD['GOLD Analysis Project ID'].isin([val[2]['GOLD_Analysis_ID']", "cb_url, query_results, n_max_results, max_distance, min_completeness, max_contamination): \"\"\" Here we do a combiantion of", "round(s['completeness'],2), round(s['contamination'],2)] else: dist_compl[s['project']][s['mag_id']] = [round(s['dist'], 3), round(s['completeness'],2), round(s['contamination'],2)] # print(\"mapping the items:\",s,", "= defaultdict(lambda:[]) stats = [] for upa in query_results: upas.append(upa) upa_name = upa_to_name[upa]", "curr['dist'] = dist # if kb_id: # curr['kb_id'] = kb_id # else: #", "for t in tree['children']: if 'compl' in t: X.append(np.array([len(mag_ids) for mag_ids in t['sources']]))", "in stats])) return stats, upa_names, tree, markers def filter_stats(stats, n_max_results, max_distance, min_completeness, max_contamination):", "installed_clients.DataFileUtilClient import DataFileUtil import numpy as np import pandas as pd import os", "gold cols 4:\",curr_GOLD.columns) new_gold = defaultdict(lambda: []) for i, cs in enumerate(curr_stats): img_id", "csvs ids: GOLD: ''' output = [] currdir = os.path.dirname(__file__) stats_path = os.path.join(currdir,", "dist_compl[s['project']][s['mag_id']] = [round(s['dist'], 3), round(s['completeness'],2), round(s['contamination'],2)] # print(\"mapping the items:\",s, dist_compl[s['project']]) # if", "\"\"\" ws = Workspace(ws_url) objs = ws.get_object_info3({ 'objects': [{'ref':upa} for upa in upas]", "tree['sources'] = remap_sources(tree['sources'], upa_order) tree = rewind_tree(tree, upa_order) new_upa_names = [] for i", "find all input names. len upas: %s len objs: %s\"%(len(upas), len(objs)), upas, [obj['info']", "Study Name'] if len(upas) == 1: tree = create_tree(all_GOLD, tree_cols, dist_compl) count =", "''' markers = [ {'name':\"LBL\", \"lat\":37.877344, \"lng\":-122.250694, \"details\":\"This is Lawrence Berkeley National Laboratory.\"},", "for upa in upas] }) upa_to_name = {'/'.join([str(info[6]), str(info[0]), str(info[4])]):info[1] for info in", "filter_stats(stats, n_max_results, max_distance, min_completeness, max_contamination): if max_distance: stats = [s for s in", "= curr_dist_compl[key] upa_names.append(upa_name) all_GOLD = pd.concat(all_GOLD, ignore_index=True) tree_cols = ['Ecosystem','Ecosystem Category','Ecosystem Subtype',\\ 'Ecosystem", "is terminal node/actually a leaf # here we change the terminal nodes to", "in curr_dist_compl: if key in dist_compl: for mag_key in curr_dist_compl[key]: dist_compl[key][mag_key] = curr_dist_compl[key][mag_key]", "= os.path.dirname(__file__) stats_path = os.path.join(currdir, 'data', 'Stats-taxonomy.csv') Stats = pd.read_csv(stats_path) curr_stats = Stats[Stats['binid'].isin(ids.keys())]", "name = t[:name_max_len] + '...' # else: # name = t count =", "= leaves_list(z) return upa_order def filter_results(ws_url, cb_url, query_results, n_max_results, max_distance, min_completeness, max_contamination): \"\"\"", "= create_tree(all_GOLD, tree_cols, dist_compl) count = sum([ int(t['count'][1:-1]) for t in tree]) #len(query_results[upas[0]])", "= {mag:val[0] for mag, val in mag_dict.items()} compl = {mag:val[1] for mag, val", "{ 'name': name of marker 'lat': latitude as a float 'lng': longitude as", "= DataFileUtil(cb_url) objs = dfu.get_objects({'object_refs':missing_upas})['data'] if len(objs) != len(missing_upas): raise ValueError(\"Could not find", "in tree]) #len(query_results[upas[0]]) tree = {\"truncated_name\":\"\", \"count\":\"({})\".format(str(count)), \"count_num\":count, \"children\":tree} else: tree = create_tree(all_GOLD,", "# curr['kb_id'] = '' id_stats = curr_stats[curr_stats.binid == id_] curr['completeness'] = id_stats.iloc[0]['completeness'] curr['contamination']", "= id_stats.iloc[0]['MIMAG'] curr['mag_id'] = id_ curr['IMG_Genome_ID'] = id_.split('_')[0] img_link = \"https://img.jgi.doe.gov/cgi-bin/m/main.cgi?section=MetaDetail&page=metagenomeBinScaffolds&taxon_oid=%s&bin_name=%s\"%(id_.split('_')[0], id_) curr['IMG_link']", "in mag_dict.items()} compl = {mag:val[1] for mag, val in mag_dict.items()} cont = {mag:val[2]", "= [] for upa in query_results: upas.append(upa) upa_name = upa_to_name[upa] curr_GOLD = GOLD[GOLD['GOLD", "{'name':\"Golden Gate Bridge\", \"lat\": 37.817060, \"lng\": -122.478206, \"details\":\"This is the Golden Gate Bridge.\"},", "upa_order) tree['children'][t_ix] = t return tree def get_source_order(tree, upa_names): \"\"\" stats: \"\"\" X", "dist_compl = {s['project']:(round(s['dist'], 3), round(s['completeness'], 2), round(s['contamination'], 2)) for s in stats} return", "37.881523, \"lng\": -121.914325, \"details\":\"This is <NAME>.\"} ] if source!= None: for m in", "round(s['completeness'],2), round(s['contamination'],2)] # dist_compl[s['project']] = [{s['mag_id']:round(s['dist'], 3)}, round(s['completeness'],2), round(s['contamination'],2)] else: dist_compl[s['project']][s['mag_id']] = [round(s['dist'],", "= [round(s['dist'], 3), round(s['completeness'],2), round(s['contamination'],2)] # print(\"mapping the items:\",s, dist_compl[s['project']]) # if round(s['completeness'],2)", "sources[i] if val != 0 and val != []: new_sources[j] = val return", "we only have a row for each img id stats += curr_stats #", "for s in stats} return stats, dist_compl def get_upa_names(ws_url, cb_url, upas): \"\"\" \"\"\"", "= info[1] return upa_to_name def get_statistics(ids, GOLD, upa_name=None): ''' get statistics from the", "upa in upas: mag_ids = g[g['upa']==upa]['mag_id'].tolist() ss[upa] = mag_ids for i, s in", "= \"({})\".format(type_count[t]) leaf = create_tree(GOLD[GOLD[col]==t], tree_cols[1:], dist_compl, source_order=source_order) if leaf == []: if", "'lng': -122.386793, 'details':\"This is San Francisco International Airport.\"}, {'name':\"<NAME>\", \"lat\": 37.881523, \"lng\": -121.914325,", "upas: mag_ids = g[g['upa']==upa]['mag_id'].tolist() ss[upa] = mag_ids for i, s in enumerate(source_order): if", "is Lawrence Berkeley National Laboratory.\"}, {'name':\"Golden Gate Bridge\", \"lat\": 37.817060, \"lng\": -122.478206, \"details\":\"This", "in range(len(upa_names)): for t in tree: sources[i]+=t['sources'][i] total_num = sum(sources) tree = {\"truncated_name\":\"\",", "tree_cols, dist_compl, source_order=upas) sources = [0 for _ in range(len(upa_names))] for i in", "Here we do a combiantion of getting all the relevant statistics from the", "gold_info = curr_GOLD.loc[int(img_id),:] new_gold['mag_id'].append(mag_id) new_gold['IMG Genome ID '].append(img_id) for key, val in gold_info.iteritems():", "max_distance, min_completeness, max_contamination) curr_GOLD = curr_GOLD[curr_GOLD['GOLD Analysis Project ID'].isin([s['GOLD_Analysis_ID'] for s in curr_stats])]", "pd.concat(all_GOLD, ignore_index=True) tree_cols = ['Ecosystem','Ecosystem Category','Ecosystem Subtype',\\ 'Ecosystem Type','Specific Ecosystem','Project / Study Name']", "return stats, dist_compl def get_upa_names(ws_url, cb_url, upas): \"\"\" \"\"\" ws = Workspace(ws_url) objs", "', inplace=True) print(\"curr gold cols 4:\",curr_GOLD.columns) new_gold = defaultdict(lambda: []) for i, cs", "Francisco International Airport.\"}, {'name':\"<NAME>\", \"lat\": 37.881523, \"lng\": -121.914325, \"details\":\"This is <NAME>.\"} ] if", "of markers ids: list of ids marker format: { 'name': name of marker", "= dfu.get_objects({'object_refs':missing_upas})['data'] if len(objs) != len(missing_upas): raise ValueError(\"Could not find all input names.", "= source_count[s] else: sources.append(0) tree[-1]['sources'] = sources return tree def get_location_markers(ids, source=None): '''", "= GOLD[GOLD[\"Project / Study Name\"] == t].iloc[0]['IMG Genome ID '] # is terminal", "mag_dict.items()} cont = {mag:val[2] for mag, val in mag_dict.items()} else: dist, compl, cont", "list of ids marker format: { 'name': name of marker 'lat': latitude as", "cs['IMG_Genome_ID'] mag_id = cs['mag_id'] gold_info = curr_GOLD.loc[int(img_id),:] new_gold['mag_id'].append(mag_id) new_gold['IMG Genome ID '].append(img_id) for", "GOLD[GOLD['GOLD Analysis Project ID'].isin([val[2]['GOLD_Analysis_ID'] for key, val in query_results[upa].items()])] tree_cols = ['Ecosystem','Ecosystem Category','Ecosystem", "ids but contamination and/or completeness do not match',\\ # round(s['completeness'],2), dist_compl[s['project']][1], # round(s['contamination'],2),", "= children else: tree.append({ 'truncated_name':t, 'count':count, 'children':leaf }) if source_order!=None: sources = []", "raise ValueError(\"Could not find all input names. len upas: %s len objs: %s\"%(len(upas),", "first:\",X) X = np.transpose(np.array(X)) print(\"je suis here:\",X) print('-'*80) z = linkage(X, 'ward') upa_order", "mag id in curr_GOLD, # right now we only have a row for", "1 marker with the location of LBL. Returns list of markers ids: list", "markers def unwind_tree(X, tree): \"\"\" \"\"\" if tree.get('children'): for t in tree['children']: if", "t in enumerate(tree['children']): new_sources = remap_sources(t['sources'], upa_order) t['sources'] = new_sources if t.get('children'): t", "> name_max_len: # name = t[:name_max_len] + '...' # else: # name =", "in range(len(upa_names))] for i in range(len(upa_names)): for t in tree: sources[i]+=t['sources'][i] total_num =", "dist_compl[s['project']][1] and round(s['contamination'],2) == dist_compl[s['project']][2]: # dist_compl[s['project']][0][s['mag_id']] = (round(s['dist'], 3)) # else: #", "= {} child['truncated_name'] = key child['count'] = '' child['dist'] = val children.append(child) tree[-1]['children']", "if s in source_count: sources.append(source_count[s]) # sources[i] = source_count[s] else: sources.append(0) tree[-1]['sources'] =", "\"lat\":37.877344, \"lng\":-122.250694, \"details\":\"This is Lawrence Berkeley National Laboratory.\"}, {'name':\"Golden Gate Bridge\", \"lat\": 37.817060,", "gold cols 3:\",curr_GOLD.columns) # We want to get a row for each mag", "\"({})\".format(len(dist)) }) if source_order!=None: tree[-1]['dist'] = dist tree[-1]['compl'] = compl tree[-1]['cont'] = cont", "compl_2 and cont_1 == cont_2: # # check to see distance dictionary #", "= curr_GOLD[curr_GOLD['GOLD Analysis Project ID'].isin([s['GOLD_Analysis_ID'] for s in curr_stats])] curr_GOLD['upa'] = upa print(\"curr", "relatedids = ids[id_] if upa_name != None: curr['input_name'] = upa_name curr['dist'] = dist", "upa_order def filter_results(ws_url, cb_url, query_results, n_max_results, max_distance, min_completeness, max_contamination): \"\"\" Here we do", "all_GOLD = [] # id_to_inputs = defaultdict(lambda:[]) stats = [] for upa in", "get_location_markers(set([s['mag_id'] for s in stats])) return stats, upa_names, tree, markers def filter_stats(stats, n_max_results,", "{} for s in stats: if s['project'] not in dist_compl: dist_compl[s['project']] = {}", "[] if leaf == []: g = GOLD[GOLD[col]==t][['upa','mag_id']] upas = g['upa'].tolist() ss =", "else: # raise ValueError('same project ids but contamination and/or completeness do not match',\\", "in stats if s['dist'] <= max_distance] if min_completeness: stats = [s for s", "len(upa_to_name)==len(upas): return upa_to_name missing_upas = list(set(upas) - set(list(upa_to_name.keys()))) dfu = DataFileUtil(cb_url) objs =", "\"\"\" \"\"\" for t_ix, t in enumerate(tree['children']): new_sources = remap_sources(t['sources'], upa_order) t['sources'] =", "max_distance, min_completeness, max_contamination): \"\"\" Here we do a combiantion of getting all the", "= remap_sources(tree['sources'], upa_order) tree = rewind_tree(tree, upa_order) new_upa_names = [] for i in", "for s in stats if s['completeness'] >= min_completeness] if max_contamination: stats = [s", "<= max_contamination] stats = sorted(stats, key=lambda s: s['dist']) if len(stats) > n_max_results: stats", "get_upa_names(ws_url, cb_url, upas): \"\"\" \"\"\" ws = Workspace(ws_url) objs = ws.get_object_info3({ 'objects': [{'ref':upa}", "new_gold = defaultdict(lambda: []) for i, cs in enumerate(curr_stats): img_id = cs['IMG_Genome_ID'] mag_id", "if key in dist_compl: for mag_key in curr_dist_compl[key]: dist_compl[key][mag_key] = curr_dist_compl[key][mag_key] else: dist_compl[key]", "upa_to_name def get_statistics(ids, GOLD, upa_name=None): ''' get statistics from the GOLD and statitics", "stats = [s for s in stats if s['dist'] <= max_distance] if min_completeness:", "in tree: sources[i]+=t['sources'][i] total_num = sum(sources) tree = {\"truncated_name\":\"\", \"count\":\"({})\".format(str(total_num)), 'count_num':total_num, 'sources':sources, \"children\":tree}", "= [] if len(tree_cols) == 0: return tree col = tree_cols[0] type_count =", "# dist_1, compl_1, cont_1 = dist_compl[key] # dist_2, compl_2, cont_2 = curr_dist_compl[key] #", "combiantion of getting all the relevant statistics from the data csv, filtering the", "\"children\":tree} upa_order = get_source_order(tree, upa_names) tree['sources'] = remap_sources(tree['sources'], upa_order) tree = rewind_tree(tree, upa_order)", "children.append(child) tree[-1]['children'] = children else: tree.append({ 'truncated_name':t, 'count':count, 'children':leaf }) if source_order!=None: sources", "ID ', inplace=True) print(\"curr gold cols 4:\",curr_GOLD.columns) new_gold = defaultdict(lambda: []) for i,", "gold cols 2:\",curr_GOLD.columns) curr_stats = get_statistics(query_results[upa], curr_GOLD, upa_name=upa_name) curr_stats, curr_dist_compl = filter_stats(curr_stats, n_max_results,", "/ Study Name'] print(\"curr gold cols 1:\",curr_GOLD.columns) curr_GOLD = curr_GOLD.fillna({col:\"Unknown\" for col in", "= [0 for _ in range(len(upa_names))] for i in range(len(upa_names)): for t in", "obj in objs: info = obj['info'] upa = '/'.join([str(info[6]), str(info[0]), str(info[4])]) upa_to_name[upa] =", "print(\"curr gold cols 3:\",curr_GOLD.columns) # We want to get a row for each", "source_order!=None: tree[-1]['dist'] = dist tree[-1]['compl'] = compl tree[-1]['cont'] = cont else: children =", "{} for j, i in enumerate(upa_order): val = sources[i] if val != 0", "type_count = GOLD[col].value_counts().to_dict() for t in type_count: # if len(t) > name_max_len: #", "'ward') upa_order = leaves_list(z) return upa_order def filter_results(ws_url, cb_url, query_results, n_max_results, max_distance, min_completeness,", "dist_compl = {} all_GOLD = [] # id_to_inputs = defaultdict(lambda:[]) stats = []", "compl_1 == compl_2 and cont_1 == cont_2: # # check to see distance", "round(s['contamination'],2)] # dist_compl[s['project']] = [{s['mag_id']:round(s['dist'], 3)}, round(s['completeness'],2), round(s['contamination'],2)] else: dist_compl[s['project']][s['mag_id']] = [round(s['dist'], 3),", "key, val in query_results[upa].items()])] tree_cols = ['Ecosystem','Ecosystem Category','Ecosystem Subtype',\\ 'Ecosystem Type','Specific Ecosystem','Project /", "= np.transpose(np.array(X)) print(\"je suis here:\",X) print('-'*80) z = linkage(X, 'ward') upa_order = leaves_list(z)", "'Ecosystem Type','Specific Ecosystem','Project / Study Name'] print(\"curr gold cols 1:\",curr_GOLD.columns) curr_GOLD = curr_GOLD.fillna({col:\"Unknown\"", "dist_compl) count = sum([ int(t['count'][1:-1]) for t in tree]) #len(query_results[upas[0]]) tree = {\"truncated_name\":\"\",", "Stats = pd.read_csv(stats_path) curr_stats = Stats[Stats['binid'].isin(ids.keys())] curr_stats = curr_stats.fillna('Unknown') for id_ in ids:", "# round(s['completeness'],2), dist_compl[s['project']][1], # round(s['contamination'],2), dist_compl[s['project']][2]) # dist_compl = {s['project']:(round(s['dist'], 3), round(s['completeness'], 2),", "print(\"mapping the items:\",s, dist_compl[s['project']]) # if round(s['completeness'],2) == dist_compl[s['project']][1] and round(s['contamination'],2) == dist_compl[s['project']][2]:", "to the provided inputs, and staging some of the outputs for the templates.", "ids: curr = {} dist, kb_id, relatedids = ids[id_] if upa_name != None:", "want to get a row for each mag id in curr_GOLD, # right", "# else: # curr['kb_id'] = '' id_stats = curr_stats[curr_stats.binid == id_] curr['completeness'] =", "'Unknown' if relatedids['GOLD_Analysis_ID']: curr['project'] = GOLD[GOLD['GOLD Analysis Project ID'] == relatedids['GOLD_Analysis_ID']].iloc[0]['Project / Study", "child['count'] = '' child['dist'] = val children.append(child) tree[-1]['children'] = children else: tree.append({ 'truncated_name':t,", "= upa_to_name[upa] curr_GOLD = GOLD[GOLD['GOLD Analysis Project ID'].isin([val[2]['GOLD_Analysis_ID'] for key, val in query_results[upa].items()])]", "contamination and/or completeness do not match',\\ # round(s['completeness'],2), dist_compl[s['project']][1], # round(s['contamination'],2), dist_compl[s['project']][2]) #", "2:\",curr_GOLD.columns) curr_stats = get_statistics(query_results[upa], curr_GOLD, upa_name=upa_name) curr_stats, curr_dist_compl = filter_stats(curr_stats, n_max_results, max_distance, min_completeness,", "# group them by img_ids curr_GOLD.set_index('IMG Genome ID ', inplace=True) print(\"curr gold cols", "'details':\"This is San Francisco International Airport.\"}, {'name':\"<NAME>\", \"lat\": 37.881523, \"lng\": -121.914325, \"details\":\"This is", "dist_compl[key][mag_key] = curr_dist_compl[key][mag_key] else: dist_compl[key] = curr_dist_compl[key] # dist_1, compl_1, cont_1 = dist_compl[key]", "'children':leaf }) if source_order!=None: sources = [] if leaf == []: g =", "print(\"curr gold cols 2:\",curr_GOLD.columns) curr_stats = get_statistics(query_results[upa], curr_GOLD, upa_name=upa_name) curr_stats, curr_dist_compl = filter_stats(curr_stats,", "id_) curr['IMG_link'] = img_link if relatedids: for key in relatedids: if relatedids[key]: curr[key]", "ids but contamination and/or completeness do not match') # # id_to_inputs[key].append(upa_name) # else:", "tree['children'][t_ix] = t return tree def get_source_order(tree, upa_names): \"\"\" stats: \"\"\" X =", "terminal node/actually a leaf # here we change the terminal nodes to have", "set(list(dist_1.keys()))) # for uinc_key in unincluded_keys: # dist_1[uinc_key] = dist_2[uinc_key] # dist_compl[key] =", "mag_dict.items()} else: dist, compl, cont = \"\", \"\", \"\" print(\"-\"*90) print('project name:',t) print(\"gold", "cs in enumerate(curr_stats): img_id = cs['IMG_Genome_ID'] mag_id = cs['mag_id'] gold_info = curr_GOLD.loc[int(img_id),:] new_gold['mag_id'].append(mag_id)", "relatedids['GOLD_Analysis_ID']: curr['project'] = GOLD[GOLD['GOLD Analysis Project ID'] == relatedids['GOLD_Analysis_ID']].iloc[0]['Project / Study Name'] else:", "marker with the location of LBL. Returns list of markers ids: list of", "# if len(t) > name_max_len: # name = t[:name_max_len] + '...' # else:", "for each img id stats += curr_stats # group them by img_ids curr_GOLD.set_index('IMG", "= cont else: children = [] for key, val in dist.items(): child =", "if round(s['completeness'],2) == dist_compl[s['project']][1] and round(s['contamination'],2) == dist_compl[s['project']][2]: # dist_compl[s['project']][0][s['mag_id']] = (round(s['dist'], 3))", "# round(s['contamination'],2), dist_compl[s['project']][2]) # dist_compl = {s['project']:(round(s['dist'], 3), round(s['completeness'], 2), round(s['contamination'], 2)) for", "leaf # here we change the terminal nodes to have dists as a", "and val != []: new_sources[j] = val return new_sources def rewind_tree(tree, upa_order): \"\"\"", "leaf == []: if col == \"Project / Study Name\": mag_dict = dist_compl[t]", "'count': \"({})\".format(len(dist)) }) if source_order!=None: tree[-1]['dist'] = dist tree[-1]['compl'] = compl tree[-1]['cont'] =", "gold_info.iteritems(): new_gold[key].append(val) new_gold = pd.DataFrame.from_dict(new_gold) all_GOLD.append(new_gold) for key in curr_dist_compl: if key in", "def rewind_tree(tree, upa_order): \"\"\" \"\"\" for t_ix, t in enumerate(tree['children']): new_sources = remap_sources(t['sources'],", "import linkage, leaves_list def create_tree(GOLD, tree_cols, dist_compl, source_order=None): \"\"\" \"\"\" tree = []", "get_source_order(tree, upa_names): \"\"\" stats: \"\"\" X = unwind_tree([tree['sources']], tree) print(\"-\"*80) print(\"je suis here", "new_sources if t.get('children'): t = rewind_tree(t, upa_order) tree['children'][t_ix] = t return tree def", "cont = \"\", \"\", \"\" print(\"-\"*90) print('project name:',t) print(\"gold stuff:\",GOLD[GOLD[\"Project / Study Name\"]==t].iloc[0])", "a combiantion of getting all the relevant statistics from the data csv, filtering", "upa_names = new_upa_names # TEMPORARY MARKER SET UP markers = get_location_markers(set([s['mag_id'] for s", "return new_sources def rewind_tree(tree, upa_order): \"\"\" \"\"\" for t_ix, t in enumerate(tree['children']): new_sources", "def get_statistics(ids, GOLD, upa_name=None): ''' get statistics from the GOLD and statitics csvs", "\"\"\" \"\"\" ws = Workspace(ws_url) objs = ws.get_object_info3({ 'objects': [{'ref':upa} for upa in", "if 'compl' in t: X.append(np.array([len(mag_ids) for mag_ids in t['sources']])) else: X.append(np.array(t['sources'])) X =", "remap_sources(t['sources'], upa_order) t['sources'] = new_sources if t.get('children'): t = rewind_tree(t, upa_order) tree['children'][t_ix] =", "have a row for each img id stats += curr_stats # group them", "''' get statistics from the GOLD and statitics csvs ids: GOLD: ''' output", "curr_GOLD, upa_name=upa_name) curr_stats, curr_dist_compl = filter_stats(curr_stats, n_max_results, max_distance, min_completeness, max_contamination) curr_GOLD = curr_GOLD[curr_GOLD['GOLD", "curr_GOLD.fillna({col:\"Unknown\" for col in tree_cols}) print(\"curr gold cols 2:\",curr_GOLD.columns) curr_stats = get_statistics(query_results[upa], curr_GOLD,", "all the relevant statistics from the data csv, filtering the outputs according to", "def create_tree(GOLD, tree_cols, dist_compl, source_order=None): \"\"\" \"\"\" tree = [] if len(tree_cols) ==", "curr['IMG_link'] = img_link if relatedids: for key in relatedids: if relatedids[key]: curr[key] =", "Project ID'] == relatedids['GOLD_Analysis_ID']].iloc[0]['Project / Study Name'] else: curr['project'] = 'Unknown' output.append(curr) return", "installed_clients.WorkspaceClient import Workspace from installed_clients.DataFileUtilClient import DataFileUtil import numpy as np import pandas", "int(t['count'][1:-1]) for t in tree]) #len(query_results[upas[0]]) tree = {\"truncated_name\":\"\", \"count\":\"({})\".format(str(count)), \"count_num\":count, \"children\":tree} else:", "else: tree.append({ 'truncated_name':t, 'count':count, 'children':leaf }) if source_order!=None: sources = [] if leaf", "X = unwind_tree(X, t) return X def remap_sources(sources, upa_order): new_sources = {} for", "pd.read_csv(gold_path) upa_names = [] upas = [] dist_compl = {} all_GOLD = []", "missing_upas = list(set(upas) - set(list(upa_to_name.keys()))) dfu = DataFileUtil(cb_url) objs = dfu.get_objects({'object_refs':missing_upas})['data'] if len(objs)", "dist_2[uinc_key] # dist_compl[key] = [dist_1, compl_1, cont_1] # else: # raise ValueError('Same project", "== cont_2: # # check to see distance dictionary # unincluded_keys = list(set(list(dist_2.keys()))", "= \"\", \"\", \"\" print(\"-\"*90) print('project name:',t) print(\"gold stuff:\",GOLD[GOLD[\"Project / Study Name\"]==t].iloc[0]) print(\"-\"*90)", "dictionary # unincluded_keys = list(set(list(dist_2.keys())) - set(list(dist_1.keys()))) # for uinc_key in unincluded_keys: #", "!= len(missing_upas): raise ValueError(\"Could not find all input names. len upas: %s len", "node/actually a leaf # here we change the terminal nodes to have dists", "For now this simply returns 1 marker with the location of LBL. Returns", "'lat':37.616310, 'lng': -122.386793, 'details':\"This is San Francisco International Airport.\"}, {'name':\"<NAME>\", \"lat\": 37.881523, \"lng\":", "cont_1 = dist_compl[key] # dist_2, compl_2, cont_2 = curr_dist_compl[key] # if compl_1 ==", "linkage, leaves_list def create_tree(GOLD, tree_cols, dist_compl, source_order=None): \"\"\" \"\"\" tree = [] if", "dist_compl, source_order=None): \"\"\" \"\"\" tree = [] if len(tree_cols) == 0: return tree", "[] for key, val in dist.items(): child = {} child['truncated_name'] = key child['count']", "tree): \"\"\" \"\"\" if tree.get('children'): for t in tree['children']: if 'compl' in t:", "curr_dist_compl[key] # dist_1, compl_1, cont_1 = dist_compl[key] # dist_2, compl_2, cont_2 = curr_dist_compl[key]", "of marker 'lat': latitude as a float 'lng': longitude as a float 'details':", "in relatedids: if relatedids[key]: curr[key] = relatedids[key] else: curr[key] = 'Unknown' if relatedids['GOLD_Analysis_ID']:", "print('project name:',t) print(\"gold stuff:\",GOLD[GOLD[\"Project / Study Name\"]==t].iloc[0]) print(\"-\"*90) trunc_name = GOLD[GOLD[\"Project / Study", "Project ID'].isin([val[2]['GOLD_Analysis_ID'] for key, val in query_results[upa].items()])] tree_cols = ['Ecosystem','Ecosystem Category','Ecosystem Subtype',\\ 'Ecosystem", "img_id = cs['IMG_Genome_ID'] mag_id = cs['mag_id'] gold_info = curr_GOLD.loc[int(img_id),:] new_gold['mag_id'].append(mag_id) new_gold['IMG Genome ID", "ss[upa] = mag_ids for i, s in enumerate(source_order): if s in ss: sources.append(ss[upa])", "max_distance: stats = [s for s in stats if s['dist'] <= max_distance] if", "and statitics csvs ids: GOLD: ''' output = [] currdir = os.path.dirname(__file__) stats_path", "from scipy.cluster.hierarchy import linkage, leaves_list def create_tree(GOLD, tree_cols, dist_compl, source_order=None): \"\"\" \"\"\" tree", "= cs['IMG_Genome_ID'] mag_id = cs['mag_id'] gold_info = curr_GOLD.loc[int(img_id),:] new_gold['mag_id'].append(mag_id) new_gold['IMG Genome ID '].append(img_id)", "in ids: curr = {} dist, kb_id, relatedids = ids[id_] if upa_name !=", "if relatedids[key]: curr[key] = relatedids[key] else: curr[key] = 'Unknown' if relatedids['GOLD_Analysis_ID']: curr['project'] =", "t in type_count: # if len(t) > name_max_len: # name = t[:name_max_len] +", "according to the provided inputs, and staging some of the outputs for the", "col in tree_cols}) print(\"curr gold cols 2:\",curr_GOLD.columns) curr_stats = get_statistics(query_results[upa], curr_GOLD, upa_name=upa_name) curr_stats,", "print(\"je suis here:\",X) print('-'*80) z = linkage(X, 'ward') upa_order = leaves_list(z) return upa_order", "if min_completeness: stats = [s for s in stats if s['completeness'] >= min_completeness]", "-121.914325, \"details\":\"This is <NAME>.\"} ] if source!= None: for m in markers: m['source']", "a row for each mag id in curr_GOLD, # right now we only", "Study Name\"]==t].iloc[0]) print(\"-\"*90) trunc_name = GOLD[GOLD[\"Project / Study Name\"] == t].iloc[0]['IMG Genome ID", "min_completeness: stats = [s for s in stats if s['completeness'] >= min_completeness] if", "leaves_list(z) return upa_order def filter_results(ws_url, cb_url, query_results, n_max_results, max_distance, min_completeness, max_contamination): \"\"\" Here", "for mag, val in mag_dict.items()} compl = {mag:val[1] for mag, val in mag_dict.items()}", "curr_GOLD, # right now we only have a row for each img id", ">= min_completeness] if max_contamination: stats = [s for s in stats if s['contamination']", "import pandas as pd import os from collections import defaultdict from scipy.cluster.hierarchy import", "'Ecosystem Type','Specific Ecosystem','Project / Study Name'] if len(upas) == 1: tree = create_tree(all_GOLD,", "row for each mag id in curr_GOLD, # right now we only have", "and round(s['contamination'],2) == dist_compl[s['project']][2]: # dist_compl[s['project']][0][s['mag_id']] = (round(s['dist'], 3)) # else: # raise", "= dist_compl[t] dist = {mag:val[0] for mag, val in mag_dict.items()} compl = {mag:val[1]", "dists as a dict # of IMG_id -> distance, # and we include", "sorted(stats, key=lambda s: s['dist']) if len(stats) > n_max_results: stats = stats[:n_max_results] dist_compl =", "Golden Gate Bridge.\"}, {'name':\"SFO Airport\", 'lat':37.616310, 'lng': -122.386793, 'details':\"This is San Francisco International", "sources[i] = source_count[s] else: sources.append(0) tree[-1]['sources'] = sources return tree def get_location_markers(ids, source=None):", "for obj in objs: info = obj['info'] upa = '/'.join([str(info[6]), str(info[0]), str(info[4])]) upa_to_name[upa]", "in gold_info.iteritems(): new_gold[key].append(val) new_gold = pd.DataFrame.from_dict(new_gold) all_GOLD.append(new_gold) for key in curr_dist_compl: if key", "\"\"\" stats: \"\"\" X = unwind_tree([tree['sources']], tree) print(\"-\"*80) print(\"je suis here first:\",X) X", "GOLD[GOLD[\"Project / Study Name\"] == t].iloc[0]['IMG Genome ID '] # is terminal node/actually", "= get_statistics(query_results[upa], curr_GOLD, upa_name=upa_name) curr_stats, curr_dist_compl = filter_stats(curr_stats, n_max_results, max_distance, min_completeness, max_contamination) curr_GOLD", "all_GOLD = pd.concat(all_GOLD, ignore_index=True) tree_cols = ['Ecosystem','Ecosystem Category','Ecosystem Subtype',\\ 'Ecosystem Type','Specific Ecosystem','Project /", "dist_compl: dist_compl[s['project']] = {} dist_compl[s['project']][s['mag_id']] = [round(s['dist'], 3), round(s['completeness'],2), round(s['contamination'],2)] # dist_compl[s['project']] =", "latitude as a float 'lng': longitude as a float 'details': pop up details", "%s\"%(len(upas), len(objs)), upas, [obj['info'] for obj in objs]) for obj in objs: info", "= {list(query_results.keys())[0]:\"\"} currdir = os.path.dirname(__file__) gold_path = os.path.join(currdir,'data','GOLD-metadata.csv') GOLD = pd.read_csv(gold_path) upa_names =", "img id stats += curr_stats # group them by img_ids curr_GOLD.set_index('IMG Genome ID", "[] dist_compl = {} all_GOLD = [] # id_to_inputs = defaultdict(lambda:[]) stats =", "Genome ID '] # is terminal node/actually a leaf # here we change", "check to see distance dictionary # unincluded_keys = list(set(list(dist_2.keys())) - set(list(dist_1.keys()))) # for", "curr_dist_compl[key][mag_key] else: dist_compl[key] = curr_dist_compl[key] # dist_1, compl_1, cont_1 = dist_compl[key] # dist_2,", "curr_GOLD = curr_GOLD[curr_GOLD['GOLD Analysis Project ID'].isin([s['GOLD_Analysis_ID'] for s in curr_stats])] curr_GOLD['upa'] = upa", "'count_num':total_num, 'sources':sources, \"children\":tree} upa_order = get_source_order(tree, upa_names) tree['sources'] = remap_sources(tree['sources'], upa_order) tree =", "new_sources = remap_sources(t['sources'], upa_order) t['sources'] = new_sources if t.get('children'): t = rewind_tree(t, upa_order)", "\"\"\" tree = [] if len(tree_cols) == 0: return tree col = tree_cols[0]", "mag, val in mag_dict.items()} cont = {mag:val[2] for mag, val in mag_dict.items()} else:", "val children.append(child) tree[-1]['children'] = children else: tree.append({ 'truncated_name':t, 'count':count, 'children':leaf }) if source_order!=None:", "if s['completeness'] >= min_completeness] if max_contamination: stats = [s for s in stats", "s in stats} return stats, dist_compl def get_upa_names(ws_url, cb_url, upas): \"\"\" \"\"\" ws", "\"https://img.jgi.doe.gov/cgi-bin/m/main.cgi?section=MetaDetail&page=metagenomeBinScaffolds&taxon_oid=%s&bin_name=%s\"%(id_.split('_')[0], id_) curr['IMG_link'] = img_link if relatedids: for key in relatedids: if relatedids[key]:", "len objs: %s\"%(len(upas), len(objs)), upas, [obj['info'] for obj in objs]) for obj in", "'' id_stats = curr_stats[curr_stats.binid == id_] curr['completeness'] = id_stats.iloc[0]['completeness'] curr['contamination'] = id_stats.iloc[0]['contamination'] curr['MIMAG']", "ValueError(\"Could not find all input names. len upas: %s len objs: %s\"%(len(upas), len(objs)),", "source=None): ''' For now this simply returns 1 marker with the location of", "from collections import defaultdict from scipy.cluster.hierarchy import linkage, leaves_list def create_tree(GOLD, tree_cols, dist_compl,", "else: # dist_compl[key] = curr_dist_compl[key] upa_names.append(upa_name) all_GOLD = pd.concat(all_GOLD, ignore_index=True) tree_cols = ['Ecosystem','Ecosystem", "tree = {\"truncated_name\":\"\", \"count\":\"({})\".format(str(total_num)), 'count_num':total_num, 'sources':sources, \"children\":tree} upa_order = get_source_order(tree, upa_names) tree['sources'] =", "output = [] currdir = os.path.dirname(__file__) stats_path = os.path.join(currdir, 'data', 'Stats-taxonomy.csv') Stats =", "suis here first:\",X) X = np.transpose(np.array(X)) print(\"je suis here:\",X) print('-'*80) z = linkage(X,", "and/or completeness do not match',\\ # round(s['completeness'],2), dist_compl[s['project']][1], # round(s['contamination'],2), dist_compl[s['project']][2]) # dist_compl", "= {} dist, kb_id, relatedids = ids[id_] if upa_name != None: curr['input_name'] =", "1: upa_to_name = get_upa_names(ws_url, cb_url, list(query_results.keys())) else: upa_to_name = {list(query_results.keys())[0]:\"\"} currdir = os.path.dirname(__file__)", "[]: if col == \"Project / Study Name\": mag_dict = dist_compl[t] dist =", "as a dict # of IMG_id -> distance, # and we include the", "= filter_stats(curr_stats, n_max_results, max_distance, min_completeness, max_contamination) curr_GOLD = curr_GOLD[curr_GOLD['GOLD Analysis Project ID'].isin([s['GOLD_Analysis_ID'] for", "if tree.get('children'): for t in tree['children']: if 'compl' in t: X.append(np.array([len(mag_ids) for mag_ids", "''' output = [] currdir = os.path.dirname(__file__) stats_path = os.path.join(currdir, 'data', 'Stats-taxonomy.csv') Stats", "{'name':\"LBL\", \"lat\":37.877344, \"lng\":-122.250694, \"details\":\"This is Lawrence Berkeley National Laboratory.\"}, {'name':\"Golden Gate Bridge\", \"lat\":", "key, val in dist.items(): child = {} child['truncated_name'] = key child['count'] = ''", "outputs according to the provided inputs, and staging some of the outputs for", "a leaf # here we change the terminal nodes to have dists as", "Gate Bridge.\"}, {'name':\"SFO Airport\", 'lat':37.616310, 'lng': -122.386793, 'details':\"This is San Francisco International Airport.\"},", "in enumerate(tree['children']): new_sources = remap_sources(t['sources'], upa_order) t['sources'] = new_sources if t.get('children'): t =", "import os from collections import defaultdict from scipy.cluster.hierarchy import linkage, leaves_list def create_tree(GOLD,", "if s['contamination'] <= max_contamination] stats = sorted(stats, key=lambda s: s['dist']) if len(stats) >", "tree_cols, dist_compl) count = sum([ int(t['count'][1:-1]) for t in tree]) #len(query_results[upas[0]]) tree =", "if relatedids['GOLD_Analysis_ID']: curr['project'] = GOLD[GOLD['GOLD Analysis Project ID'] == relatedids['GOLD_Analysis_ID']].iloc[0]['Project / Study Name']", "= [ {'name':\"LBL\", \"lat\":37.877344, \"lng\":-122.250694, \"details\":\"This is Lawrence Berkeley National Laboratory.\"}, {'name':\"Golden Gate", "> n_max_results: stats = stats[:n_max_results] dist_compl = {} for s in stats: if", "= upa_name curr['dist'] = dist # if kb_id: # curr['kb_id'] = kb_id #", "j, i in enumerate(upa_order): val = sources[i] if val != 0 and val", "[obj['info'] for obj in objs]) for obj in objs: info = obj['info'] upa", "the provided inputs, and staging some of the outputs for the templates. \"\"\"", "get a row for each mag id in curr_GOLD, # right now we", "distance, # and we include the list of img_id's for each tree.append({ 'truncated_name':", "\"Project / Study Name\": mag_dict = dist_compl[t] dist = {mag:val[0] for mag, val", "dfu = DataFileUtil(cb_url) objs = dfu.get_objects({'object_refs':missing_upas})['data'] if len(objs) != len(missing_upas): raise ValueError(\"Could not", "'count':count, 'children':leaf }) if source_order!=None: sources = [] if leaf == []: g", "tree: sources[i]+=t['sources'][i] total_num = sum(sources) tree = {\"truncated_name\":\"\", \"count\":\"({})\".format(str(total_num)), 'count_num':total_num, 'sources':sources, \"children\":tree} upa_order", "in upa_order: new_upa_names.append(upa_names[i]) upa_names = new_upa_names # TEMPORARY MARKER SET UP markers =", "if leaf == []: if col == \"Project / Study Name\": mag_dict =", "input names. len upas: %s len objs: %s\"%(len(upas), len(objs)), upas, [obj['info'] for obj", "in dist_compl: for mag_key in curr_dist_compl[key]: dist_compl[key][mag_key] = curr_dist_compl[key][mag_key] else: dist_compl[key] = curr_dist_compl[key]", "len(tree_cols) == 0: return tree col = tree_cols[0] type_count = GOLD[col].value_counts().to_dict() for t", "leaf == []: g = GOLD[GOLD[col]==t][['upa','mag_id']] upas = g['upa'].tolist() ss = {} for", "# dist_compl[s['project']][0][s['mag_id']] = (round(s['dist'], 3)) # else: # raise ValueError('same project ids but", "t = rewind_tree(t, upa_order) tree['children'][t_ix] = t return tree def get_source_order(tree, upa_names): \"\"\"", "s in source_count: sources.append(source_count[s]) # sources[i] = source_count[s] else: sources.append(0) tree[-1]['sources'] = sources", "pop up details } ''' markers = [ {'name':\"LBL\", \"lat\":37.877344, \"lng\":-122.250694, \"details\":\"This is", "= (round(s['dist'], 3)) # else: # raise ValueError('same project ids but contamination and/or", "for uinc_key in unincluded_keys: # dist_1[uinc_key] = dist_2[uinc_key] # dist_compl[key] = [dist_1, compl_1,", "else: sources.append([]) else: source_count = GOLD[GOLD[col]==t]['upa'].value_counts().to_dict() for i, s in enumerate(source_order): if s", "cont_2: # # check to see distance dictionary # unincluded_keys = list(set(list(dist_2.keys())) -", "max_contamination): \"\"\" Here we do a combiantion of getting all the relevant statistics", "list(set(upas) - set(list(upa_to_name.keys()))) dfu = DataFileUtil(cb_url) objs = dfu.get_objects({'object_refs':missing_upas})['data'] if len(objs) != len(missing_upas):", "return upa_to_name def get_statistics(ids, GOLD, upa_name=None): ''' get statistics from the GOLD and", "= id_stats.iloc[0]['contamination'] curr['MIMAG'] = id_stats.iloc[0]['MIMAG'] curr['mag_id'] = id_ curr['IMG_Genome_ID'] = id_.split('_')[0] img_link =", "dist_2, compl_2, cont_2 = curr_dist_compl[key] # if compl_1 == compl_2 and cont_1 ==", "statistics from the data csv, filtering the outputs according to the provided inputs,", "curr_stats, curr_dist_compl = filter_stats(curr_stats, n_max_results, max_distance, min_completeness, max_contamination) curr_GOLD = curr_GOLD[curr_GOLD['GOLD Analysis Project", "Bridge.\"}, {'name':\"SFO Airport\", 'lat':37.616310, 'lng': -122.386793, 'details':\"This is San Francisco International Airport.\"}, {'name':\"<NAME>\",", "i in range(len(upa_names)): for t in tree: sources[i]+=t['sources'][i] total_num = sum(sources) tree =", "source_count[s] else: sources.append(0) tree[-1]['sources'] = sources return tree def get_location_markers(ids, source=None): ''' For", "id_] curr['completeness'] = id_stats.iloc[0]['completeness'] curr['contamination'] = id_stats.iloc[0]['contamination'] curr['MIMAG'] = id_stats.iloc[0]['MIMAG'] curr['mag_id'] = id_", "upa_order): new_sources = {} for j, i in enumerate(upa_order): val = sources[i] if", "curr_GOLD['upa'] = upa print(\"curr gold cols 3:\",curr_GOLD.columns) # We want to get a", "we change the terminal nodes to have dists as a dict # of", "s in stats])) return stats, upa_names, tree, markers def filter_stats(stats, n_max_results, max_distance, min_completeness,", "in t: X.append(np.array([len(mag_ids) for mag_ids in t['sources']])) else: X.append(np.array(t['sources'])) X = unwind_tree(X, t)", "= [dist_1, compl_1, cont_1] # else: # raise ValueError('Same project ids but contamination", "= [] # id_to_inputs = defaultdict(lambda:[]) stats = [] for upa in query_results:", "t['sources']])) else: X.append(np.array(t['sources'])) X = unwind_tree(X, t) return X def remap_sources(sources, upa_order): new_sources", "in mag_dict.items()} cont = {mag:val[2] for mag, val in mag_dict.items()} else: dist, compl,", "= GOLD[GOLD['GOLD Analysis Project ID'].isin([val[2]['GOLD_Analysis_ID'] for key, val in query_results[upa].items()])] tree_cols = ['Ecosystem','Ecosystem", "# dist_compl = {s['project']:(round(s['dist'], 3), round(s['completeness'], 2), round(s['contamination'], 2)) for s in stats}", "row for each img id stats += curr_stats # group them by img_ids", "create_tree(all_GOLD, tree_cols, dist_compl) count = sum([ int(t['count'][1:-1]) for t in tree]) #len(query_results[upas[0]]) tree", "= [] dist_compl = {} all_GOLD = [] # id_to_inputs = defaultdict(lambda:[]) stats", "# of IMG_id -> distance, # and we include the list of img_id's", "X.append(np.array([len(mag_ids) for mag_ids in t['sources']])) else: X.append(np.array(t['sources'])) X = unwind_tree(X, t) return X", "the GOLD and statitics csvs ids: GOLD: ''' output = [] currdir =", "if col == \"Project / Study Name\": mag_dict = dist_compl[t] dist = {mag:val[0]", "return upa_order def filter_results(ws_url, cb_url, query_results, n_max_results, max_distance, min_completeness, max_contamination): \"\"\" Here we", "curr['MIMAG'] = id_stats.iloc[0]['MIMAG'] curr['mag_id'] = id_ curr['IMG_Genome_ID'] = id_.split('_')[0] img_link = \"https://img.jgi.doe.gov/cgi-bin/m/main.cgi?section=MetaDetail&page=metagenomeBinScaffolds&taxon_oid=%s&bin_name=%s\"%(id_.split('_')[0], id_)", "# right now we only have a row for each img id stats", "objs = ws.get_object_info3({ 'objects': [{'ref':upa} for upa in upas] }) upa_to_name = {'/'.join([str(info[6]),", "- set(list(dist_1.keys()))) # for uinc_key in unincluded_keys: # dist_1[uinc_key] = dist_2[uinc_key] # dist_compl[key]", "dist_compl[s['project']][1], # round(s['contamination'],2), dist_compl[s['project']][2]) # dist_compl = {s['project']:(round(s['dist'], 3), round(s['completeness'], 2), round(s['contamination'], 2))", "ss = {} for upa in upas: mag_ids = g[g['upa']==upa]['mag_id'].tolist() ss[upa] = mag_ids", "t in tree]) #len(query_results[upas[0]]) tree = {\"truncated_name\":\"\", \"count\":\"({})\".format(str(count)), \"count_num\":count, \"children\":tree} else: tree =", "curr_dist_compl = filter_stats(curr_stats, n_max_results, max_distance, min_completeness, max_contamination) curr_GOLD = curr_GOLD[curr_GOLD['GOLD Analysis Project ID'].isin([s['GOLD_Analysis_ID']", "not in dist_compl: dist_compl[s['project']] = {} dist_compl[s['project']][s['mag_id']] = [round(s['dist'], 3), round(s['completeness'],2), round(s['contamination'],2)] #", "markers def filter_stats(stats, n_max_results, max_distance, min_completeness, max_contamination): if max_distance: stats = [s for", "= val children.append(child) tree[-1]['children'] = children else: tree.append({ 'truncated_name':t, 'count':count, 'children':leaf }) if", "= [s for s in stats if s['contamination'] <= max_contamination] stats = sorted(stats,", "for upa in upas: mag_ids = g[g['upa']==upa]['mag_id'].tolist() ss[upa] = mag_ids for i, s", "str(info[4])]):info[1] for info in objs['infos']} if len(upa_to_name)==len(upas): return upa_to_name missing_upas = list(set(upas) -", "cont_1] # else: # raise ValueError('Same project ids but contamination and/or completeness do", "\"\"\" if len(query_results) > 1: upa_to_name = get_upa_names(ws_url, cb_url, list(query_results.keys())) else: upa_to_name =", "= pd.concat(all_GOLD, ignore_index=True) tree_cols = ['Ecosystem','Ecosystem Category','Ecosystem Subtype',\\ 'Ecosystem Type','Specific Ecosystem','Project / Study", "\"count\":\"({})\".format(str(count)), \"count_num\":count, \"children\":tree} else: tree = create_tree(all_GOLD, tree_cols, dist_compl, source_order=upas) sources = [0", "curr['project'] = GOLD[GOLD['GOLD Analysis Project ID'] == relatedids['GOLD_Analysis_ID']].iloc[0]['Project / Study Name'] else: curr['project']", "curr[key] = relatedids[key] else: curr[key] = 'Unknown' if relatedids['GOLD_Analysis_ID']: curr['project'] = GOLD[GOLD['GOLD Analysis", "rewind_tree(t, upa_order) tree['children'][t_ix] = t return tree def get_source_order(tree, upa_names): \"\"\" stats: \"\"\"", "completeness do not match',\\ # round(s['completeness'],2), dist_compl[s['project']][1], # round(s['contamination'],2), dist_compl[s['project']][2]) # dist_compl =", "= relatedids[key] else: curr[key] = 'Unknown' if relatedids['GOLD_Analysis_ID']: curr['project'] = GOLD[GOLD['GOLD Analysis Project", "t_ix, t in enumerate(tree['children']): new_sources = remap_sources(t['sources'], upa_order) t['sources'] = new_sources if t.get('children'):", "each mag id in curr_GOLD, # right now we only have a row", "get statistics from the GOLD and statitics csvs ids: GOLD: ''' output =", "t) return X def remap_sources(sources, upa_order): new_sources = {} for j, i in", "s in curr_stats])] curr_GOLD['upa'] = upa print(\"curr gold cols 3:\",curr_GOLD.columns) # We want", "suis here:\",X) print('-'*80) z = linkage(X, 'ward') upa_order = leaves_list(z) return upa_order def", "if len(t) > name_max_len: # name = t[:name_max_len] + '...' # else: #", "t in tree: sources[i]+=t['sources'][i] total_num = sum(sources) tree = {\"truncated_name\":\"\", \"count\":\"({})\".format(str(total_num)), 'count_num':total_num, 'sources':sources,", "set(list(upa_to_name.keys()))) dfu = DataFileUtil(cb_url) objs = dfu.get_objects({'object_refs':missing_upas})['data'] if len(objs) != len(missing_upas): raise ValueError(\"Could", "cont = {mag:val[2] for mag, val in mag_dict.items()} else: dist, compl, cont =", "curr_stats # group them by img_ids curr_GOLD.set_index('IMG Genome ID ', inplace=True) print(\"curr gold", "= t return tree def get_source_order(tree, upa_names): \"\"\" stats: \"\"\" X = unwind_tree([tree['sources']],", "[{s['mag_id']:round(s['dist'], 3)}, round(s['completeness'],2), round(s['contamination'],2)] else: dist_compl[s['project']][s['mag_id']] = [round(s['dist'], 3), round(s['completeness'],2), round(s['contamination'],2)] # print(\"mapping", "3)}, round(s['completeness'],2), round(s['contamination'],2)] else: dist_compl[s['project']][s['mag_id']] = [round(s['dist'], 3), round(s['completeness'],2), round(s['contamination'],2)] # print(\"mapping the", "for j, i in enumerate(upa_order): val = sources[i] if val != 0 and", "= new_upa_names # TEMPORARY MARKER SET UP markers = get_location_markers(set([s['mag_id'] for s in", "s['project'] not in dist_compl: dist_compl[s['project']] = {} dist_compl[s['project']][s['mag_id']] = [round(s['dist'], 3), round(s['completeness'],2), round(s['contamination'],2)]", "# sources[i] = ss[upa] else: sources.append([]) else: source_count = GOLD[GOLD[col]==t]['upa'].value_counts().to_dict() for i, s", "{} child['truncated_name'] = key child['count'] = '' child['dist'] = val children.append(child) tree[-1]['children'] =", "sum(sources) tree = {\"truncated_name\":\"\", \"count\":\"({})\".format(str(total_num)), 'count_num':total_num, 'sources':sources, \"children\":tree} upa_order = get_source_order(tree, upa_names) tree['sources']", "Project ID'].isin([s['GOLD_Analysis_ID'] for s in curr_stats])] curr_GOLD['upa'] = upa print(\"curr gold cols 3:\",curr_GOLD.columns)", "# here we change the terminal nodes to have dists as a dict", "up details } ''' markers = [ {'name':\"LBL\", \"lat\":37.877344, \"lng\":-122.250694, \"details\":\"This is Lawrence", "name_max_len: # name = t[:name_max_len] + '...' # else: # name = t", "stats[:n_max_results] dist_compl = {} for s in stats: if s['project'] not in dist_compl:", "GOLD = pd.read_csv(gold_path) upa_names = [] upas = [] dist_compl = {} all_GOLD", "the relevant statistics from the data csv, filtering the outputs according to the", "[round(s['dist'], 3), round(s['completeness'],2), round(s['contamination'],2)] # dist_compl[s['project']] = [{s['mag_id']:round(s['dist'], 3)}, round(s['completeness'],2), round(s['contamination'],2)] else: dist_compl[s['project']][s['mag_id']]", "n_max_results, max_distance, min_completeness, max_contamination) curr_GOLD = curr_GOLD[curr_GOLD['GOLD Analysis Project ID'].isin([s['GOLD_Analysis_ID'] for s in", "inputs, and staging some of the outputs for the templates. \"\"\" if len(query_results)", "== 1: tree = create_tree(all_GOLD, tree_cols, dist_compl) count = sum([ int(t['count'][1:-1]) for t", "<= max_distance] if min_completeness: stats = [s for s in stats if s['completeness']", "# dist_compl[s['project']] = [{s['mag_id']:round(s['dist'], 3)}, round(s['completeness'],2), round(s['contamination'],2)] else: dist_compl[s['project']][s['mag_id']] = [round(s['dist'], 3), round(s['completeness'],2),", "curr_stats = Stats[Stats['binid'].isin(ids.keys())] curr_stats = curr_stats.fillna('Unknown') for id_ in ids: curr = {}", "the terminal nodes to have dists as a dict # of IMG_id ->", "new_upa_names = [] for i in upa_order: new_upa_names.append(upa_names[i]) upa_names = new_upa_names # TEMPORARY", "[{'ref':upa} for upa in upas] }) upa_to_name = {'/'.join([str(info[6]), str(info[0]), str(info[4])]):info[1] for info", "curr_dist_compl[key]: dist_compl[key][mag_key] = curr_dist_compl[key][mag_key] else: dist_compl[key] = curr_dist_compl[key] # dist_1, compl_1, cont_1 =", "= unwind_tree([tree['sources']], tree) print(\"-\"*80) print(\"je suis here first:\",X) X = np.transpose(np.array(X)) print(\"je suis", "here we change the terminal nodes to have dists as a dict #", "'lat': latitude as a float 'lng': longitude as a float 'details': pop up", "Ecosystem','Project / Study Name'] print(\"curr gold cols 1:\",curr_GOLD.columns) curr_GOLD = curr_GOLD.fillna({col:\"Unknown\" for col", "for i, s in enumerate(source_order): if s in source_count: sources.append(source_count[s]) # sources[i] =", "if len(tree_cols) == 0: return tree col = tree_cols[0] type_count = GOLD[col].value_counts().to_dict() for", "3)) # else: # raise ValueError('same project ids but contamination and/or completeness do", "img_link if relatedids: for key in relatedids: if relatedids[key]: curr[key] = relatedids[key] else:", "dist_compl = {} for s in stats: if s['project'] not in dist_compl: dist_compl[s['project']]", "max_contamination: stats = [s for s in stats if s['contamination'] <= max_contamination] stats", "create_tree(GOLD, tree_cols, dist_compl, source_order=None): \"\"\" \"\"\" tree = [] if len(tree_cols) == 0:", "upa in upas] }) upa_to_name = {'/'.join([str(info[6]), str(info[0]), str(info[4])]):info[1] for info in objs['infos']}", "= 'Unknown' if relatedids['GOLD_Analysis_ID']: curr['project'] = GOLD[GOLD['GOLD Analysis Project ID'] == relatedids['GOLD_Analysis_ID']].iloc[0]['Project /", "mag_key in curr_dist_compl[key]: dist_compl[key][mag_key] = curr_dist_compl[key][mag_key] else: dist_compl[key] = curr_dist_compl[key] # dist_1, compl_1,", "['Ecosystem','Ecosystem Category','Ecosystem Subtype',\\ 'Ecosystem Type','Specific Ecosystem','Project / Study Name'] if len(upas) == 1:", "print(\"gold stuff:\",GOLD[GOLD[\"Project / Study Name\"]==t].iloc[0]) print(\"-\"*90) trunc_name = GOLD[GOLD[\"Project / Study Name\"] ==", "def get_location_markers(ids, source=None): ''' For now this simply returns 1 marker with the", "for s in stats])) return stats, upa_names, tree, markers def filter_stats(stats, n_max_results, max_distance,", "name:',t) print(\"gold stuff:\",GOLD[GOLD[\"Project / Study Name\"]==t].iloc[0]) print(\"-\"*90) trunc_name = GOLD[GOLD[\"Project / Study Name\"]", "new_gold['mag_id'].append(mag_id) new_gold['IMG Genome ID '].append(img_id) for key, val in gold_info.iteritems(): new_gold[key].append(val) new_gold =", "upa_names, tree, markers def filter_stats(stats, n_max_results, max_distance, min_completeness, max_contamination): if max_distance: stats =", "we include the list of img_id's for each tree.append({ 'truncated_name': str(trunc_name), 'name' :", "pd import os from collections import defaultdict from scipy.cluster.hierarchy import linkage, leaves_list def", "curr_GOLD[curr_GOLD['GOLD Analysis Project ID'].isin([s['GOLD_Analysis_ID'] for s in curr_stats])] curr_GOLD['upa'] = upa print(\"curr gold", "= {} for j, i in enumerate(upa_order): val = sources[i] if val !=", "key in relatedids: if relatedids[key]: curr[key] = relatedids[key] else: curr[key] = 'Unknown' if", "\"Input source:\"+source return markers def unwind_tree(X, tree): \"\"\" \"\"\" if tree.get('children'): for t", "# print(\"mapping the items:\",s, dist_compl[s['project']]) # if round(s['completeness'],2) == dist_compl[s['project']][1] and round(s['contamination'],2) ==", "s in enumerate(source_order): if s in source_count: sources.append(source_count[s]) # sources[i] = source_count[s] else:", "id in curr_GOLD, # right now we only have a row for each", "if val != 0 and val != []: new_sources[j] = val return new_sources", "3), round(s['completeness'], 2), round(s['contamination'], 2)) for s in stats} return stats, dist_compl def", "s in stats: if s['project'] not in dist_compl: dist_compl[s['project']] = {} dist_compl[s['project']][s['mag_id']] =", "stats, upa_names, tree, markers def filter_stats(stats, n_max_results, max_distance, min_completeness, max_contamination): if max_distance: stats", "i in upa_order: new_upa_names.append(upa_names[i]) upa_names = new_upa_names # TEMPORARY MARKER SET UP markers", "dict # of IMG_id -> distance, # and we include the list of", "{} dist_compl[s['project']][s['mag_id']] = [round(s['dist'], 3), round(s['completeness'],2), round(s['contamination'],2)] # dist_compl[s['project']] = [{s['mag_id']:round(s['dist'], 3)}, round(s['completeness'],2),", "os.path.dirname(__file__) gold_path = os.path.join(currdir,'data','GOLD-metadata.csv') GOLD = pd.read_csv(gold_path) upa_names = [] upas = []", "type_count: # if len(t) > name_max_len: # name = t[:name_max_len] + '...' #", "= img_link if relatedids: for key in relatedids: if relatedids[key]: curr[key] = relatedids[key]", "objs['infos']} if len(upa_to_name)==len(upas): return upa_to_name missing_upas = list(set(upas) - set(list(upa_to_name.keys()))) dfu = DataFileUtil(cb_url)", "else: children = [] for key, val in dist.items(): child = {} child['truncated_name']", "{'name':\"SFO Airport\", 'lat':37.616310, 'lng': -122.386793, 'details':\"This is San Francisco International Airport.\"}, {'name':\"<NAME>\", \"lat\":", "Name'] print(\"curr gold cols 1:\",curr_GOLD.columns) curr_GOLD = curr_GOLD.fillna({col:\"Unknown\" for col in tree_cols}) print(\"curr", "kb_id # else: # curr['kb_id'] = '' id_stats = curr_stats[curr_stats.binid == id_] curr['completeness']", "= curr_dist_compl[key][mag_key] else: dist_compl[key] = curr_dist_compl[key] # dist_1, compl_1, cont_1 = dist_compl[key] #", "the templates. \"\"\" if len(query_results) > 1: upa_to_name = get_upa_names(ws_url, cb_url, list(query_results.keys())) else:", "source_order=upas) sources = [0 for _ in range(len(upa_names))] for i in range(len(upa_names)): for", "{mag:val[2] for mag, val in mag_dict.items()} else: dist, compl, cont = \"\", \"\",", "= \"https://img.jgi.doe.gov/cgi-bin/m/main.cgi?section=MetaDetail&page=metagenomeBinScaffolds&taxon_oid=%s&bin_name=%s\"%(id_.split('_')[0], id_) curr['IMG_link'] = img_link if relatedids: for key in relatedids: if", "= [] currdir = os.path.dirname(__file__) stats_path = os.path.join(currdir, 'data', 'Stats-taxonomy.csv') Stats = pd.read_csv(stats_path)", "stats = [] for upa in query_results: upas.append(upa) upa_name = upa_to_name[upa] curr_GOLD =", "!= 0 and val != []: new_sources[j] = val return new_sources def rewind_tree(tree,", "upa_names.append(upa_name) all_GOLD = pd.concat(all_GOLD, ignore_index=True) tree_cols = ['Ecosystem','Ecosystem Category','Ecosystem Subtype',\\ 'Ecosystem Type','Specific Ecosystem','Project", "= g['upa'].tolist() ss = {} for upa in upas: mag_ids = g[g['upa']==upa]['mag_id'].tolist() ss[upa]", "trunc_name = GOLD[GOLD[\"Project / Study Name\"] == t].iloc[0]['IMG Genome ID '] # is", "\"details\":\"This is the Golden Gate Bridge.\"}, {'name':\"SFO Airport\", 'lat':37.616310, 'lng': -122.386793, 'details':\"This is", "= dist_2[uinc_key] # dist_compl[key] = [dist_1, compl_1, cont_1] # else: # raise ValueError('Same", "GOLD[GOLD[col]==t]['upa'].value_counts().to_dict() for i, s in enumerate(source_order): if s in source_count: sources.append(source_count[s]) # sources[i]", "print(\"je suis here first:\",X) X = np.transpose(np.array(X)) print(\"je suis here:\",X) print('-'*80) z =", "ids marker format: { 'name': name of marker 'lat': latitude as a float", "currdir = os.path.dirname(__file__) gold_path = os.path.join(currdir,'data','GOLD-metadata.csv') GOLD = pd.read_csv(gold_path) upa_names = [] upas", "max_contamination): if max_distance: stats = [s for s in stats if s['dist'] <=", "relatedids[key] else: curr[key] = 'Unknown' if relatedids['GOLD_Analysis_ID']: curr['project'] = GOLD[GOLD['GOLD Analysis Project ID']", "!= None: curr['input_name'] = upa_name curr['dist'] = dist # if kb_id: # curr['kb_id']", "# raise ValueError('same project ids but contamination and/or completeness do not match',\\ #", "== []: g = GOLD[GOLD[col]==t][['upa','mag_id']] upas = g['upa'].tolist() ss = {} for upa", "in query_results[upa].items()])] tree_cols = ['Ecosystem','Ecosystem Category','Ecosystem Subtype',\\ 'Ecosystem Type','Specific Ecosystem','Project / Study Name']", "San Francisco International Airport.\"}, {'name':\"<NAME>\", \"lat\": 37.881523, \"lng\": -121.914325, \"details\":\"This is <NAME>.\"} ]", "if t.get('children'): t = rewind_tree(t, upa_order) tree['children'][t_ix] = t return tree def get_source_order(tree,", "s in enumerate(source_order): if s in ss: sources.append(ss[upa]) # sources[i] = ss[upa] else:", "from installed_clients.DataFileUtilClient import DataFileUtil import numpy as np import pandas as pd import", "[] for upa in query_results: upas.append(upa) upa_name = upa_to_name[upa] curr_GOLD = GOLD[GOLD['GOLD Analysis", "nodes to have dists as a dict # of IMG_id -> distance, #", "dist_compl[key] = curr_dist_compl[key] upa_names.append(upa_name) all_GOLD = pd.concat(all_GOLD, ignore_index=True) tree_cols = ['Ecosystem','Ecosystem Category','Ecosystem Subtype',\\", "Study Name\"] == t].iloc[0]['IMG Genome ID '] # is terminal node/actually a leaf", "filter_results(ws_url, cb_url, query_results, n_max_results, max_distance, min_completeness, max_contamination): \"\"\" Here we do a combiantion", "sources.append(source_count[s]) # sources[i] = source_count[s] else: sources.append(0) tree[-1]['sources'] = sources return tree def", "import Workspace from installed_clients.DataFileUtilClient import DataFileUtil import numpy as np import pandas as", "GOLD[col].value_counts().to_dict() for t in type_count: # if len(t) > name_max_len: # name =", "new_gold = pd.DataFrame.from_dict(new_gold) all_GOLD.append(new_gold) for key in curr_dist_compl: if key in dist_compl: for", "upa_to_name[upa] = info[1] return upa_to_name def get_statistics(ids, GOLD, upa_name=None): ''' get statistics from", "mag, val in mag_dict.items()} compl = {mag:val[1] for mag, val in mag_dict.items()} cont", "= pd.read_csv(stats_path) curr_stats = Stats[Stats['binid'].isin(ids.keys())] curr_stats = curr_stats.fillna('Unknown') for id_ in ids: curr", "rewind_tree(tree, upa_order): \"\"\" \"\"\" for t_ix, t in enumerate(tree['children']): new_sources = remap_sources(t['sources'], upa_order)", "raise ValueError('same project ids but contamination and/or completeness do not match',\\ # round(s['completeness'],2),", "print(\"curr gold cols 1:\",curr_GOLD.columns) curr_GOLD = curr_GOLD.fillna({col:\"Unknown\" for col in tree_cols}) print(\"curr gold", "round(s['contamination'],2), dist_compl[s['project']][2]) # dist_compl = {s['project']:(round(s['dist'], 3), round(s['completeness'], 2), round(s['contamination'], 2)) for s", "-122.478206, \"details\":\"This is the Golden Gate Bridge.\"}, {'name':\"SFO Airport\", 'lat':37.616310, 'lng': -122.386793, 'details':\"This", "len(stats) > n_max_results: stats = stats[:n_max_results] dist_compl = {} for s in stats:", "t count = \"({})\".format(type_count[t]) leaf = create_tree(GOLD[GOLD[col]==t], tree_cols[1:], dist_compl, source_order=source_order) if leaf ==", "change the terminal nodes to have dists as a dict # of IMG_id", "simply returns 1 marker with the location of LBL. Returns list of markers", "= linkage(X, 'ward') upa_order = leaves_list(z) return upa_order def filter_results(ws_url, cb_url, query_results, n_max_results,", "upa_name=upa_name) curr_stats, curr_dist_compl = filter_stats(curr_stats, n_max_results, max_distance, min_completeness, max_contamination) curr_GOLD = curr_GOLD[curr_GOLD['GOLD Analysis", "upa_order) tree = rewind_tree(tree, upa_order) new_upa_names = [] for i in upa_order: new_upa_names.append(upa_names[i])", "them by img_ids curr_GOLD.set_index('IMG Genome ID ', inplace=True) print(\"curr gold cols 4:\",curr_GOLD.columns) new_gold", "id_stats.iloc[0]['contamination'] curr['MIMAG'] = id_stats.iloc[0]['MIMAG'] curr['mag_id'] = id_ curr['IMG_Genome_ID'] = id_.split('_')[0] img_link = \"https://img.jgi.doe.gov/cgi-bin/m/main.cgi?section=MetaDetail&page=metagenomeBinScaffolds&taxon_oid=%s&bin_name=%s\"%(id_.split('_')[0],", "sources = [0 for _ in range(len(upa_names))] for i in range(len(upa_names)): for t", "list(query_results.keys())) else: upa_to_name = {list(query_results.keys())[0]:\"\"} currdir = os.path.dirname(__file__) gold_path = os.path.join(currdir,'data','GOLD-metadata.csv') GOLD =", "min_completeness, max_contamination): if max_distance: stats = [s for s in stats if s['dist']", "[]: g = GOLD[GOLD[col]==t][['upa','mag_id']] upas = g['upa'].tolist() ss = {} for upa in", "def get_upa_names(ws_url, cb_url, upas): \"\"\" \"\"\" ws = Workspace(ws_url) objs = ws.get_object_info3({ 'objects':", "s['dist'] <= max_distance] if min_completeness: stats = [s for s in stats if", "str(info[0]), str(info[4])]):info[1] for info in objs['infos']} if len(upa_to_name)==len(upas): return upa_to_name missing_upas = list(set(upas)", "a float 'details': pop up details } ''' markers = [ {'name':\"LBL\", \"lat\":37.877344,", "ID'].isin([s['GOLD_Analysis_ID'] for s in curr_stats])] curr_GOLD['upa'] = upa print(\"curr gold cols 3:\",curr_GOLD.columns) #", "curr_stats = curr_stats.fillna('Unknown') for id_ in ids: curr = {} dist, kb_id, relatedids", "round(s['contamination'],2) == dist_compl[s['project']][2]: # dist_compl[s['project']][0][s['mag_id']] = (round(s['dist'], 3)) # else: # raise ValueError('same", "'Stats-taxonomy.csv') Stats = pd.read_csv(stats_path) curr_stats = Stats[Stats['binid'].isin(ids.keys())] curr_stats = curr_stats.fillna('Unknown') for id_ in", "csv, filtering the outputs according to the provided inputs, and staging some of", "'].append(img_id) for key, val in gold_info.iteritems(): new_gold[key].append(val) new_gold = pd.DataFrame.from_dict(new_gold) all_GOLD.append(new_gold) for key", "\"count\":\"({})\".format(str(total_num)), 'count_num':total_num, 'sources':sources, \"children\":tree} upa_order = get_source_order(tree, upa_names) tree['sources'] = remap_sources(tree['sources'], upa_order) tree", "objs]) for obj in objs: info = obj['info'] upa = '/'.join([str(info[6]), str(info[0]), str(info[4])])", "ids: GOLD: ''' output = [] currdir = os.path.dirname(__file__) stats_path = os.path.join(currdir, 'data',", "marker format: { 'name': name of marker 'lat': latitude as a float 'lng':", "list(set(list(dist_2.keys())) - set(list(dist_1.keys()))) # for uinc_key in unincluded_keys: # dist_1[uinc_key] = dist_2[uinc_key] #", "curr['input_name'] = upa_name curr['dist'] = dist # if kb_id: # curr['kb_id'] = kb_id", "# We want to get a row for each mag id in curr_GOLD,", "match') # # id_to_inputs[key].append(upa_name) # else: # dist_compl[key] = curr_dist_compl[key] upa_names.append(upa_name) all_GOLD =", "in enumerate(source_order): if s in source_count: sources.append(source_count[s]) # sources[i] = source_count[s] else: sources.append(0)", "compl_1, cont_1 = dist_compl[key] # dist_2, compl_2, cont_2 = curr_dist_compl[key] # if compl_1", "def unwind_tree(X, tree): \"\"\" \"\"\" if tree.get('children'): for t in tree['children']: if 'compl'", "upa_to_name = {list(query_results.keys())[0]:\"\"} currdir = os.path.dirname(__file__) gold_path = os.path.join(currdir,'data','GOLD-metadata.csv') GOLD = pd.read_csv(gold_path) upa_names", "the items:\",s, dist_compl[s['project']]) # if round(s['completeness'],2) == dist_compl[s['project']][1] and round(s['contamination'],2) == dist_compl[s['project']][2]: #", "sum([ int(t['count'][1:-1]) for t in tree]) #len(query_results[upas[0]]) tree = {\"truncated_name\":\"\", \"count\":\"({})\".format(str(count)), \"count_num\":count, \"children\":tree}", "# for uinc_key in unincluded_keys: # dist_1[uinc_key] = dist_2[uinc_key] # dist_compl[key] = [dist_1,", "= pd.DataFrame.from_dict(new_gold) all_GOLD.append(new_gold) for key in curr_dist_compl: if key in dist_compl: for mag_key", "stats if s['dist'] <= max_distance] if min_completeness: stats = [s for s in", "== dist_compl[s['project']][2]: # dist_compl[s['project']][0][s['mag_id']] = (round(s['dist'], 3)) # else: # raise ValueError('same project", "= id_ curr['IMG_Genome_ID'] = id_.split('_')[0] img_link = \"https://img.jgi.doe.gov/cgi-bin/m/main.cgi?section=MetaDetail&page=metagenomeBinScaffolds&taxon_oid=%s&bin_name=%s\"%(id_.split('_')[0], id_) curr['IMG_link'] = img_link if", "\"\", \"\", \"\" print(\"-\"*90) print('project name:',t) print(\"gold stuff:\",GOLD[GOLD[\"Project / Study Name\"]==t].iloc[0]) print(\"-\"*90) trunc_name", "= stats[:n_max_results] dist_compl = {} for s in stats: if s['project'] not in", "defaultdict from scipy.cluster.hierarchy import linkage, leaves_list def create_tree(GOLD, tree_cols, dist_compl, source_order=None): \"\"\" \"\"\"", "if len(query_results) > 1: upa_to_name = get_upa_names(ws_url, cb_url, list(query_results.keys())) else: upa_to_name = {list(query_results.keys())[0]:\"\"}", "tree = [] if len(tree_cols) == 0: return tree col = tree_cols[0] type_count", "stats: \"\"\" X = unwind_tree([tree['sources']], tree) print(\"-\"*80) print(\"je suis here first:\",X) X =", "inplace=True) print(\"curr gold cols 4:\",curr_GOLD.columns) new_gold = defaultdict(lambda: []) for i, cs in", "as a float 'lng': longitude as a float 'details': pop up details }", "for m in markers: m['source'] = \"Input source:\"+source return markers def unwind_tree(X, tree):", "t].iloc[0]['IMG Genome ID '] # is terminal node/actually a leaf # here we", "if len(upas) == 1: tree = create_tree(all_GOLD, tree_cols, dist_compl) count = sum([ int(t['count'][1:-1])", "defaultdict(lambda: []) for i, cs in enumerate(curr_stats): img_id = cs['IMG_Genome_ID'] mag_id = cs['mag_id']", "}) if source_order!=None: sources = [] if leaf == []: g = GOLD[GOLD[col]==t][['upa','mag_id']]", "but contamination and/or completeness do not match',\\ # round(s['completeness'],2), dist_compl[s['project']][1], # round(s['contamination'],2), dist_compl[s['project']][2])", "= '/'.join([str(info[6]), str(info[0]), str(info[4])]) upa_to_name[upa] = info[1] return upa_to_name def get_statistics(ids, GOLD, upa_name=None):", "''' For now this simply returns 1 marker with the location of LBL.", "import DataFileUtil import numpy as np import pandas as pd import os from", "curr = {} dist, kb_id, relatedids = ids[id_] if upa_name != None: curr['input_name']", "new_gold[key].append(val) new_gold = pd.DataFrame.from_dict(new_gold) all_GOLD.append(new_gold) for key in curr_dist_compl: if key in dist_compl:", "X = unwind_tree([tree['sources']], tree) print(\"-\"*80) print(\"je suis here first:\",X) X = np.transpose(np.array(X)) print(\"je", "do not match') # # id_to_inputs[key].append(upa_name) # else: # dist_compl[key] = curr_dist_compl[key] upa_names.append(upa_name)", "child = {} child['truncated_name'] = key child['count'] = '' child['dist'] = val children.append(child)", "= curr_GOLD.loc[int(img_id),:] new_gold['mag_id'].append(mag_id) new_gold['IMG Genome ID '].append(img_id) for key, val in gold_info.iteritems(): new_gold[key].append(val)", "upa_order) t['sources'] = new_sources if t.get('children'): t = rewind_tree(t, upa_order) tree['children'][t_ix] = t", "now this simply returns 1 marker with the location of LBL. Returns list", "tree col = tree_cols[0] type_count = GOLD[col].value_counts().to_dict() for t in type_count: # if", "dist_compl[t] dist = {mag:val[0] for mag, val in mag_dict.items()} compl = {mag:val[1] for", "and/or completeness do not match') # # id_to_inputs[key].append(upa_name) # else: # dist_compl[key] =", "filtering the outputs according to the provided inputs, and staging some of the", "Berkeley National Laboratory.\"}, {'name':\"Golden Gate Bridge\", \"lat\": 37.817060, \"lng\": -122.478206, \"details\":\"This is the", "the location of LBL. Returns list of markers ids: list of ids marker", "is <NAME>.\"} ] if source!= None: for m in markers: m['source'] = \"Input", "tree def get_source_order(tree, upa_names): \"\"\" stats: \"\"\" X = unwind_tree([tree['sources']], tree) print(\"-\"*80) print(\"je", "children else: tree.append({ 'truncated_name':t, 'count':count, 'children':leaf }) if source_order!=None: sources = [] if", "as a float 'details': pop up details } ''' markers = [ {'name':\"LBL\",", "unincluded_keys: # dist_1[uinc_key] = dist_2[uinc_key] # dist_compl[key] = [dist_1, compl_1, cont_1] # else:", "cols 3:\",curr_GOLD.columns) # We want to get a row for each mag id", "mag_dict = dist_compl[t] dist = {mag:val[0] for mag, val in mag_dict.items()} compl =", "obj in objs]) for obj in objs: info = obj['info'] upa = '/'.join([str(info[6]),", "Name'] if len(upas) == 1: tree = create_tree(all_GOLD, tree_cols, dist_compl) count = sum([", "upa = '/'.join([str(info[6]), str(info[0]), str(info[4])]) upa_to_name[upa] = info[1] return upa_to_name def get_statistics(ids, GOLD,", "for mag, val in mag_dict.items()} cont = {mag:val[2] for mag, val in mag_dict.items()}", "str(trunc_name), 'name' : t, 'count': \"({})\".format(len(dist)) }) if source_order!=None: tree[-1]['dist'] = dist tree[-1]['compl']", "[0 for _ in range(len(upa_names))] for i in range(len(upa_names)): for t in tree:", "else: source_count = GOLD[GOLD[col]==t]['upa'].value_counts().to_dict() for i, s in enumerate(source_order): if s in source_count:", "in enumerate(curr_stats): img_id = cs['IMG_Genome_ID'] mag_id = cs['mag_id'] gold_info = curr_GOLD.loc[int(img_id),:] new_gold['mag_id'].append(mag_id) new_gold['IMG", "child['truncated_name'] = key child['count'] = '' child['dist'] = val children.append(child) tree[-1]['children'] = children", "= [] if leaf == []: g = GOLD[GOLD[col]==t][['upa','mag_id']] upas = g['upa'].tolist() ss", "val = sources[i] if val != 0 and val != []: new_sources[j] =", "= '' child['dist'] = val children.append(child) tree[-1]['children'] = children else: tree.append({ 'truncated_name':t, 'count':count,", "= sources return tree def get_location_markers(ids, source=None): ''' For now this simply returns", "curr_GOLD.loc[int(img_id),:] new_gold['mag_id'].append(mag_id) new_gold['IMG Genome ID '].append(img_id) for key, val in gold_info.iteritems(): new_gold[key].append(val) new_gold", "col = tree_cols[0] type_count = GOLD[col].value_counts().to_dict() for t in type_count: # if len(t)", "= {\"truncated_name\":\"\", \"count\":\"({})\".format(str(total_num)), 'count_num':total_num, 'sources':sources, \"children\":tree} upa_order = get_source_order(tree, upa_names) tree['sources'] = remap_sources(tree['sources'],", "else: dist, compl, cont = \"\", \"\", \"\" print(\"-\"*90) print('project name:',t) print(\"gold stuff:\",GOLD[GOLD[\"Project", "z = linkage(X, 'ward') upa_order = leaves_list(z) return upa_order def filter_results(ws_url, cb_url, query_results,", "2), round(s['contamination'], 2)) for s in stats} return stats, dist_compl def get_upa_names(ws_url, cb_url,", "in enumerate(source_order): if s in ss: sources.append(ss[upa]) # sources[i] = ss[upa] else: sources.append([])", "if kb_id: # curr['kb_id'] = kb_id # else: # curr['kb_id'] = '' id_stats", "gold cols 1:\",curr_GOLD.columns) curr_GOLD = curr_GOLD.fillna({col:\"Unknown\" for col in tree_cols}) print(\"curr gold cols", "curr_stats = get_statistics(query_results[upa], curr_GOLD, upa_name=upa_name) curr_stats, curr_dist_compl = filter_stats(curr_stats, n_max_results, max_distance, min_completeness, max_contamination)", "upa_names = [] upas = [] dist_compl = {} all_GOLD = [] #", "\"count_num\":count, \"children\":tree} else: tree = create_tree(all_GOLD, tree_cols, dist_compl, source_order=upas) sources = [0 for", "tree = {\"truncated_name\":\"\", \"count\":\"({})\".format(str(count)), \"count_num\":count, \"children\":tree} else: tree = create_tree(all_GOLD, tree_cols, dist_compl, source_order=upas)", "DataFileUtil import numpy as np import pandas as pd import os from collections", "val in gold_info.iteritems(): new_gold[key].append(val) new_gold = pd.DataFrame.from_dict(new_gold) all_GOLD.append(new_gold) for key in curr_dist_compl: if", "range(len(upa_names)): for t in tree: sources[i]+=t['sources'][i] total_num = sum(sources) tree = {\"truncated_name\":\"\", \"count\":\"({})\".format(str(total_num)),", "#len(query_results[upas[0]]) tree = {\"truncated_name\":\"\", \"count\":\"({})\".format(str(count)), \"count_num\":count, \"children\":tree} else: tree = create_tree(all_GOLD, tree_cols, dist_compl,", "statitics csvs ids: GOLD: ''' output = [] currdir = os.path.dirname(__file__) stats_path =", "val != 0 and val != []: new_sources[j] = val return new_sources def", "mag_dict.items()} compl = {mag:val[1] for mag, val in mag_dict.items()} cont = {mag:val[2] for", "of getting all the relevant statistics from the data csv, filtering the outputs", "= mag_ids for i, s in enumerate(source_order): if s in ss: sources.append(ss[upa]) #", "from installed_clients.WorkspaceClient import Workspace from installed_clients.DataFileUtilClient import DataFileUtil import numpy as np import", "stats = [s for s in stats if s['completeness'] >= min_completeness] if max_contamination:", "the outputs according to the provided inputs, and staging some of the outputs", "= kb_id # else: # curr['kb_id'] = '' id_stats = curr_stats[curr_stats.binid == id_]", "# TEMPORARY MARKER SET UP markers = get_location_markers(set([s['mag_id'] for s in stats])) return", "img_id's for each tree.append({ 'truncated_name': str(trunc_name), 'name' : t, 'count': \"({})\".format(len(dist)) }) if", "relevant statistics from the data csv, filtering the outputs according to the provided", "Airport\", 'lat':37.616310, 'lng': -122.386793, 'details':\"This is San Francisco International Airport.\"}, {'name':\"<NAME>\", \"lat\": 37.881523,", "/ Study Name\": mag_dict = dist_compl[t] dist = {mag:val[0] for mag, val in", "np import pandas as pd import os from collections import defaultdict from scipy.cluster.hierarchy", "project ids but contamination and/or completeness do not match') # # id_to_inputs[key].append(upa_name) #", "return stats, upa_names, tree, markers def filter_stats(stats, n_max_results, max_distance, min_completeness, max_contamination): if max_distance:", "source_count = GOLD[GOLD[col]==t]['upa'].value_counts().to_dict() for i, s in enumerate(source_order): if s in source_count: sources.append(source_count[s])", "t return tree def get_source_order(tree, upa_names): \"\"\" stats: \"\"\" X = unwind_tree([tree['sources']], tree)", "for each tree.append({ 'truncated_name': str(trunc_name), 'name' : t, 'count': \"({})\".format(len(dist)) }) if source_order!=None:", "'lng': longitude as a float 'details': pop up details } ''' markers =", "dist_compl def get_upa_names(ws_url, cb_url, upas): \"\"\" \"\"\" ws = Workspace(ws_url) objs = ws.get_object_info3({", "for mag_ids in t['sources']])) else: X.append(np.array(t['sources'])) X = unwind_tree(X, t) return X def", "round(s['completeness'],2), round(s['contamination'],2)] # print(\"mapping the items:\",s, dist_compl[s['project']]) # if round(s['completeness'],2) == dist_compl[s['project']][1] and", "i in enumerate(upa_order): val = sources[i] if val != 0 and val !=", "for mag_key in curr_dist_compl[key]: dist_compl[key][mag_key] = curr_dist_compl[key][mag_key] else: dist_compl[key] = curr_dist_compl[key] # dist_1,", "defaultdict(lambda:[]) stats = [] for upa in query_results: upas.append(upa) upa_name = upa_to_name[upa] curr_GOLD", "len(objs) != len(missing_upas): raise ValueError(\"Could not find all input names. len upas: %s", "upa_to_name = {'/'.join([str(info[6]), str(info[0]), str(info[4])]):info[1] for info in objs['infos']} if len(upa_to_name)==len(upas): return upa_to_name", "ID'] == relatedids['GOLD_Analysis_ID']].iloc[0]['Project / Study Name'] else: curr['project'] = 'Unknown' output.append(curr) return output", "source_order=source_order) if leaf == []: if col == \"Project / Study Name\": mag_dict", "[]) for i, cs in enumerate(curr_stats): img_id = cs['IMG_Genome_ID'] mag_id = cs['mag_id'] gold_info", "else: tree = create_tree(all_GOLD, tree_cols, dist_compl, source_order=upas) sources = [0 for _ in", "] if source!= None: for m in markers: m['source'] = \"Input source:\"+source return", "i, s in enumerate(source_order): if s in ss: sources.append(ss[upa]) # sources[i] = ss[upa]", "upa_order: new_upa_names.append(upa_names[i]) upa_names = new_upa_names # TEMPORARY MARKER SET UP markers = get_location_markers(set([s['mag_id']", "str(info[4])]) upa_to_name[upa] = info[1] return upa_to_name def get_statistics(ids, GOLD, upa_name=None): ''' get statistics", "for key in relatedids: if relatedids[key]: curr[key] = relatedids[key] else: curr[key] = 'Unknown'", "ids[id_] if upa_name != None: curr['input_name'] = upa_name curr['dist'] = dist # if", "in mag_dict.items()} else: dist, compl, cont = \"\", \"\", \"\" print(\"-\"*90) print('project name:',t)", "cb_url, upas): \"\"\" \"\"\" ws = Workspace(ws_url) objs = ws.get_object_info3({ 'objects': [{'ref':upa} for", "if s in ss: sources.append(ss[upa]) # sources[i] = ss[upa] else: sources.append([]) else: source_count", "group them by img_ids curr_GOLD.set_index('IMG Genome ID ', inplace=True) print(\"curr gold cols 4:\",curr_GOLD.columns)", "dist_compl[s['project']][2]: # dist_compl[s['project']][0][s['mag_id']] = (round(s['dist'], 3)) # else: # raise ValueError('same project ids", "get_upa_names(ws_url, cb_url, list(query_results.keys())) else: upa_to_name = {list(query_results.keys())[0]:\"\"} currdir = os.path.dirname(__file__) gold_path = os.path.join(currdir,'data','GOLD-metadata.csv')", "dist_1, compl_1, cont_1 = dist_compl[key] # dist_2, compl_2, cont_2 = curr_dist_compl[key] # if", "[s for s in stats if s['contamination'] <= max_contamination] stats = sorted(stats, key=lambda", "Type','Specific Ecosystem','Project / Study Name'] print(\"curr gold cols 1:\",curr_GOLD.columns) curr_GOLD = curr_GOLD.fillna({col:\"Unknown\" for", "s in stats if s['completeness'] >= min_completeness] if max_contamination: stats = [s for", "= create_tree(GOLD[GOLD[col]==t], tree_cols[1:], dist_compl, source_order=source_order) if leaf == []: if col == \"Project", "child['dist'] = val children.append(child) tree[-1]['children'] = children else: tree.append({ 'truncated_name':t, 'count':count, 'children':leaf })", "os.path.dirname(__file__) stats_path = os.path.join(currdir, 'data', 'Stats-taxonomy.csv') Stats = pd.read_csv(stats_path) curr_stats = Stats[Stats['binid'].isin(ids.keys())] curr_stats", "round(s['contamination'],2)] # print(\"mapping the items:\",s, dist_compl[s['project']]) # if round(s['completeness'],2) == dist_compl[s['project']][1] and round(s['contamination'],2)", "not match') # # id_to_inputs[key].append(upa_name) # else: # dist_compl[key] = curr_dist_compl[key] upa_names.append(upa_name) all_GOLD", "= rewind_tree(t, upa_order) tree['children'][t_ix] = t return tree def get_source_order(tree, upa_names): \"\"\" stats:", "os from collections import defaultdict from scipy.cluster.hierarchy import linkage, leaves_list def create_tree(GOLD, tree_cols,", "print(\"curr gold cols 4:\",curr_GOLD.columns) new_gold = defaultdict(lambda: []) for i, cs in enumerate(curr_stats):", "relatedids[key]: curr[key] = relatedids[key] else: curr[key] = 'Unknown' if relatedids['GOLD_Analysis_ID']: curr['project'] = GOLD[GOLD['GOLD", "= t[:name_max_len] + '...' # else: # name = t count = \"({})\".format(type_count[t])", "dist tree[-1]['compl'] = compl tree[-1]['cont'] = cont else: children = [] for key,", "tree def get_location_markers(ids, source=None): ''' For now this simply returns 1 marker with", "total_num = sum(sources) tree = {\"truncated_name\":\"\", \"count\":\"({})\".format(str(total_num)), 'count_num':total_num, 'sources':sources, \"children\":tree} upa_order = get_source_order(tree,", "query_results: upas.append(upa) upa_name = upa_to_name[upa] curr_GOLD = GOLD[GOLD['GOLD Analysis Project ID'].isin([val[2]['GOLD_Analysis_ID'] for key,", "templates. \"\"\" if len(query_results) > 1: upa_to_name = get_upa_names(ws_url, cb_url, list(query_results.keys())) else: upa_to_name", "stats = [s for s in stats if s['contamination'] <= max_contamination] stats =", "in markers: m['source'] = \"Input source:\"+source return markers def unwind_tree(X, tree): \"\"\" \"\"\"", "Stats[Stats['binid'].isin(ids.keys())] curr_stats = curr_stats.fillna('Unknown') for id_ in ids: curr = {} dist, kb_id,", "in tree_cols}) print(\"curr gold cols 2:\",curr_GOLD.columns) curr_stats = get_statistics(query_results[upa], curr_GOLD, upa_name=upa_name) curr_stats, curr_dist_compl", "\"\"\" Here we do a combiantion of getting all the relevant statistics from", "in objs['infos']} if len(upa_to_name)==len(upas): return upa_to_name missing_upas = list(set(upas) - set(list(upa_to_name.keys()))) dfu =", "tree) print(\"-\"*80) print(\"je suis here first:\",X) X = np.transpose(np.array(X)) print(\"je suis here:\",X) print('-'*80)", "col == \"Project / Study Name\": mag_dict = dist_compl[t] dist = {mag:val[0] for", "_ in range(len(upa_names))] for i in range(len(upa_names)): for t in tree: sources[i]+=t['sources'][i] total_num", "tree[-1]['cont'] = cont else: children = [] for key, val in dist.items(): child", "stats} return stats, dist_compl def get_upa_names(ws_url, cb_url, upas): \"\"\" \"\"\" ws = Workspace(ws_url)", "\"lng\": -121.914325, \"details\":\"This is <NAME>.\"} ] if source!= None: for m in markers:", "cs['mag_id'] gold_info = curr_GOLD.loc[int(img_id),:] new_gold['mag_id'].append(mag_id) new_gold['IMG Genome ID '].append(img_id) for key, val in", "1:\",curr_GOLD.columns) curr_GOLD = curr_GOLD.fillna({col:\"Unknown\" for col in tree_cols}) print(\"curr gold cols 2:\",curr_GOLD.columns) curr_stats", "dist_compl, source_order=source_order) if leaf == []: if col == \"Project / Study Name\":", "s in stats if s['contamination'] <= max_contamination] stats = sorted(stats, key=lambda s: s['dist'])", "relatedids: for key in relatedids: if relatedids[key]: curr[key] = relatedids[key] else: curr[key] =", "new_gold['IMG Genome ID '].append(img_id) for key, val in gold_info.iteritems(): new_gold[key].append(val) new_gold = pd.DataFrame.from_dict(new_gold)", "<reponame>kbaseapps/mags_mash from installed_clients.WorkspaceClient import Workspace from installed_clients.DataFileUtilClient import DataFileUtil import numpy as np", "\"\" print(\"-\"*90) print('project name:',t) print(\"gold stuff:\",GOLD[GOLD[\"Project / Study Name\"]==t].iloc[0]) print(\"-\"*90) trunc_name = GOLD[GOLD[\"Project", "unwind_tree([tree['sources']], tree) print(\"-\"*80) print(\"je suis here first:\",X) X = np.transpose(np.array(X)) print(\"je suis here:\",X)", "- set(list(upa_to_name.keys()))) dfu = DataFileUtil(cb_url) objs = dfu.get_objects({'object_refs':missing_upas})['data'] if len(objs) != len(missing_upas): raise", "= [s for s in stats if s['dist'] <= max_distance] if min_completeness: stats", "= ['Ecosystem','Ecosystem Category','Ecosystem Subtype',\\ 'Ecosystem Type','Specific Ecosystem','Project / Study Name'] print(\"curr gold cols", "# is terminal node/actually a leaf # here we change the terminal nodes", "\"lat\": 37.881523, \"lng\": -121.914325, \"details\":\"This is <NAME>.\"} ] if source!= None: for m", "the Golden Gate Bridge.\"}, {'name':\"SFO Airport\", 'lat':37.616310, 'lng': -122.386793, 'details':\"This is San Francisco", "/ Study Name'] if len(upas) == 1: tree = create_tree(all_GOLD, tree_cols, dist_compl) count", "mag_id = cs['mag_id'] gold_info = curr_GOLD.loc[int(img_id),:] new_gold['mag_id'].append(mag_id) new_gold['IMG Genome ID '].append(img_id) for key,", "tree_cols[1:], dist_compl, source_order=source_order) if leaf == []: if col == \"Project / Study", "max_contamination] stats = sorted(stats, key=lambda s: s['dist']) if len(stats) > n_max_results: stats =", "dist_compl, source_order=upas) sources = [0 for _ in range(len(upa_names))] for i in range(len(upa_names)):", "Ecosystem','Project / Study Name'] if len(upas) == 1: tree = create_tree(all_GOLD, tree_cols, dist_compl)", "== []: if col == \"Project / Study Name\": mag_dict = dist_compl[t] dist", "id_to_inputs = defaultdict(lambda:[]) stats = [] for upa in query_results: upas.append(upa) upa_name =", "curr_GOLD.set_index('IMG Genome ID ', inplace=True) print(\"curr gold cols 4:\",curr_GOLD.columns) new_gold = defaultdict(lambda: [])", "in objs]) for obj in objs: info = obj['info'] upa = '/'.join([str(info[6]), str(info[0]),", "if leaf == []: g = GOLD[GOLD[col]==t][['upa','mag_id']] upas = g['upa'].tolist() ss = {}", "1: tree = create_tree(all_GOLD, tree_cols, dist_compl) count = sum([ int(t['count'][1:-1]) for t in", "> 1: upa_to_name = get_upa_names(ws_url, cb_url, list(query_results.keys())) else: upa_to_name = {list(query_results.keys())[0]:\"\"} currdir =", "min_completeness, max_contamination) curr_GOLD = curr_GOLD[curr_GOLD['GOLD Analysis Project ID'].isin([s['GOLD_Analysis_ID'] for s in curr_stats])] curr_GOLD['upa']", "else: # raise ValueError('Same project ids but contamination and/or completeness do not match')", "= os.path.dirname(__file__) gold_path = os.path.join(currdir,'data','GOLD-metadata.csv') GOLD = pd.read_csv(gold_path) upa_names = [] upas =", "unincluded_keys = list(set(list(dist_2.keys())) - set(list(dist_1.keys()))) # for uinc_key in unincluded_keys: # dist_1[uinc_key] =", "ValueError('Same project ids but contamination and/or completeness do not match') # # id_to_inputs[key].append(upa_name)", "%s len objs: %s\"%(len(upas), len(objs)), upas, [obj['info'] for obj in objs]) for obj", "GOLD[GOLD[col]==t][['upa','mag_id']] upas = g['upa'].tolist() ss = {} for upa in upas: mag_ids =", "remap_sources(sources, upa_order): new_sources = {} for j, i in enumerate(upa_order): val = sources[i]", "kb_id, relatedids = ids[id_] if upa_name != None: curr['input_name'] = upa_name curr['dist'] =", "collections import defaultdict from scipy.cluster.hierarchy import linkage, leaves_list def create_tree(GOLD, tree_cols, dist_compl, source_order=None):", "upa_to_name = get_upa_names(ws_url, cb_url, list(query_results.keys())) else: upa_to_name = {list(query_results.keys())[0]:\"\"} currdir = os.path.dirname(__file__) gold_path", "from the GOLD and statitics csvs ids: GOLD: ''' output = [] currdir", "val in query_results[upa].items()])] tree_cols = ['Ecosystem','Ecosystem Category','Ecosystem Subtype',\\ 'Ecosystem Type','Specific Ecosystem','Project / Study", "ID '].append(img_id) for key, val in gold_info.iteritems(): new_gold[key].append(val) new_gold = pd.DataFrame.from_dict(new_gold) all_GOLD.append(new_gold) for", "Laboratory.\"}, {'name':\"Golden Gate Bridge\", \"lat\": 37.817060, \"lng\": -122.478206, \"details\":\"This is the Golden Gate", "ws.get_object_info3({ 'objects': [{'ref':upa} for upa in upas] }) upa_to_name = {'/'.join([str(info[6]), str(info[0]), str(info[4])]):info[1]", "dist_compl[s['project']] = [{s['mag_id']:round(s['dist'], 3)}, round(s['completeness'],2), round(s['contamination'],2)] else: dist_compl[s['project']][s['mag_id']] = [round(s['dist'], 3), round(s['completeness'],2), round(s['contamination'],2)]", "Type','Specific Ecosystem','Project / Study Name'] if len(upas) == 1: tree = create_tree(all_GOLD, tree_cols,", "{mag:val[1] for mag, val in mag_dict.items()} cont = {mag:val[2] for mag, val in", "\"\"\" \"\"\" tree = [] if len(tree_cols) == 0: return tree col =", "dist, kb_id, relatedids = ids[id_] if upa_name != None: curr['input_name'] = upa_name curr['dist']", "= t count = \"({})\".format(type_count[t]) leaf = create_tree(GOLD[GOLD[col]==t], tree_cols[1:], dist_compl, source_order=source_order) if leaf", "def filter_results(ws_url, cb_url, query_results, n_max_results, max_distance, min_completeness, max_contamination): \"\"\" Here we do a", "filter_stats(curr_stats, n_max_results, max_distance, min_completeness, max_contamination) curr_GOLD = curr_GOLD[curr_GOLD['GOLD Analysis Project ID'].isin([s['GOLD_Analysis_ID'] for s", "# # id_to_inputs[key].append(upa_name) # else: # dist_compl[key] = curr_dist_compl[key] upa_names.append(upa_name) all_GOLD = pd.concat(all_GOLD,", "tree['children']: if 'compl' in t: X.append(np.array([len(mag_ids) for mag_ids in t['sources']])) else: X.append(np.array(t['sources'])) X", "count = \"({})\".format(type_count[t]) leaf = create_tree(GOLD[GOLD[col]==t], tree_cols[1:], dist_compl, source_order=source_order) if leaf == []:", "if s['dist'] <= max_distance] if min_completeness: stats = [s for s in stats", "[] currdir = os.path.dirname(__file__) stats_path = os.path.join(currdir, 'data', 'Stats-taxonomy.csv') Stats = pd.read_csv(stats_path) curr_stats", "val in dist.items(): child = {} child['truncated_name'] = key child['count'] = '' child['dist']", "if s['project'] not in dist_compl: dist_compl[s['project']] = {} dist_compl[s['project']][s['mag_id']] = [round(s['dist'], 3), round(s['completeness'],2),", "# id_to_inputs[key].append(upa_name) # else: # dist_compl[key] = curr_dist_compl[key] upa_names.append(upa_name) all_GOLD = pd.concat(all_GOLD, ignore_index=True)", "for s in curr_stats])] curr_GOLD['upa'] = upa print(\"curr gold cols 3:\",curr_GOLD.columns) # We", "match',\\ # round(s['completeness'],2), dist_compl[s['project']][1], # round(s['contamination'],2), dist_compl[s['project']][2]) # dist_compl = {s['project']:(round(s['dist'], 3), round(s['completeness'],", "leaves_list def create_tree(GOLD, tree_cols, dist_compl, source_order=None): \"\"\" \"\"\" tree = [] if len(tree_cols)", "= [{s['mag_id']:round(s['dist'], 3)}, round(s['completeness'],2), round(s['contamination'],2)] else: dist_compl[s['project']][s['mag_id']] = [round(s['dist'], 3), round(s['completeness'],2), round(s['contamination'],2)] #", "[] for i in upa_order: new_upa_names.append(upa_names[i]) upa_names = new_upa_names # TEMPORARY MARKER SET", "= Workspace(ws_url) objs = ws.get_object_info3({ 'objects': [{'ref':upa} for upa in upas] }) upa_to_name", "Study Name'] print(\"curr gold cols 1:\",curr_GOLD.columns) curr_GOLD = curr_GOLD.fillna({col:\"Unknown\" for col in tree_cols})", "upas = [] dist_compl = {} all_GOLD = [] # id_to_inputs = defaultdict(lambda:[])", "cols 2:\",curr_GOLD.columns) curr_stats = get_statistics(query_results[upa], curr_GOLD, upa_name=upa_name) curr_stats, curr_dist_compl = filter_stats(curr_stats, n_max_results, max_distance,", "getting all the relevant statistics from the data csv, filtering the outputs according", "X def remap_sources(sources, upa_order): new_sources = {} for j, i in enumerate(upa_order): val", "[s for s in stats if s['completeness'] >= min_completeness] if max_contamination: stats =", "if max_contamination: stats = [s for s in stats if s['contamination'] <= max_contamination]", "img_link = \"https://img.jgi.doe.gov/cgi-bin/m/main.cgi?section=MetaDetail&page=metagenomeBinScaffolds&taxon_oid=%s&bin_name=%s\"%(id_.split('_')[0], id_) curr['IMG_link'] = img_link if relatedids: for key in relatedids:", "= id_stats.iloc[0]['completeness'] curr['contamination'] = id_stats.iloc[0]['contamination'] curr['MIMAG'] = id_stats.iloc[0]['MIMAG'] curr['mag_id'] = id_ curr['IMG_Genome_ID'] =", "currdir = os.path.dirname(__file__) stats_path = os.path.join(currdir, 'data', 'Stats-taxonomy.csv') Stats = pd.read_csv(stats_path) curr_stats =", "import numpy as np import pandas as pd import os from collections import", "\"\"\" \"\"\" if tree.get('children'): for t in tree['children']: if 'compl' in t: X.append(np.array([len(mag_ids)", "if len(stats) > n_max_results: stats = stats[:n_max_results] dist_compl = {} for s in", "print(\"-\"*90) print('project name:',t) print(\"gold stuff:\",GOLD[GOLD[\"Project / Study Name\"]==t].iloc[0]) print(\"-\"*90) trunc_name = GOLD[GOLD[\"Project /", "= GOLD[GOLD[col]==t]['upa'].value_counts().to_dict() for i, s in enumerate(source_order): if s in source_count: sources.append(source_count[s]) #", "key child['count'] = '' child['dist'] = val children.append(child) tree[-1]['children'] = children else: tree.append({", "min_completeness] if max_contamination: stats = [s for s in stats if s['contamination'] <=", "None: for m in markers: m['source'] = \"Input source:\"+source return markers def unwind_tree(X,", "of IMG_id -> distance, # and we include the list of img_id's for", "range(len(upa_names))] for i in range(len(upa_names)): for t in tree: sources[i]+=t['sources'][i] total_num = sum(sources)", "{'name':\"<NAME>\", \"lat\": 37.881523, \"lng\": -121.914325, \"details\":\"This is <NAME>.\"} ] if source!= None: for", "Study Name\": mag_dict = dist_compl[t] dist = {mag:val[0] for mag, val in mag_dict.items()}", "stats, dist_compl def get_upa_names(ws_url, cb_url, upas): \"\"\" \"\"\" ws = Workspace(ws_url) objs =", "provided inputs, and staging some of the outputs for the templates. \"\"\" if", "uinc_key in unincluded_keys: # dist_1[uinc_key] = dist_2[uinc_key] # dist_compl[key] = [dist_1, compl_1, cont_1]", "+ '...' # else: # name = t count = \"({})\".format(type_count[t]) leaf =", "we do a combiantion of getting all the relevant statistics from the data", "cols 1:\",curr_GOLD.columns) curr_GOLD = curr_GOLD.fillna({col:\"Unknown\" for col in tree_cols}) print(\"curr gold cols 2:\",curr_GOLD.columns)", "\"details\":\"This is Lawrence Berkeley National Laboratory.\"}, {'name':\"Golden Gate Bridge\", \"lat\": 37.817060, \"lng\": -122.478206,", "International Airport.\"}, {'name':\"<NAME>\", \"lat\": 37.881523, \"lng\": -121.914325, \"details\":\"This is <NAME>.\"} ] if source!=", "get_source_order(tree, upa_names) tree['sources'] = remap_sources(tree['sources'], upa_order) tree = rewind_tree(tree, upa_order) new_upa_names = []", "m in markers: m['source'] = \"Input source:\"+source return markers def unwind_tree(X, tree): \"\"\"", "= rewind_tree(tree, upa_order) new_upa_names = [] for i in upa_order: new_upa_names.append(upa_names[i]) upa_names =", "Airport.\"}, {'name':\"<NAME>\", \"lat\": 37.881523, \"lng\": -121.914325, \"details\":\"This is <NAME>.\"} ] if source!= None:", "'] # is terminal node/actually a leaf # here we change the terminal", "= {} for s in stats: if s['project'] not in dist_compl: dist_compl[s['project']] =", "query_results, n_max_results, max_distance, min_completeness, max_contamination): \"\"\" Here we do a combiantion of getting", "relatedids: if relatedids[key]: curr[key] = relatedids[key] else: curr[key] = 'Unknown' if relatedids['GOLD_Analysis_ID']: curr['project']", "distance dictionary # unincluded_keys = list(set(list(dist_2.keys())) - set(list(dist_1.keys()))) # for uinc_key in unincluded_keys:", "print(\"-\"*80) print(\"je suis here first:\",X) X = np.transpose(np.array(X)) print(\"je suis here:\",X) print('-'*80) z", "stats_path = os.path.join(currdir, 'data', 'Stats-taxonomy.csv') Stats = pd.read_csv(stats_path) curr_stats = Stats[Stats['binid'].isin(ids.keys())] curr_stats =", "right now we only have a row for each img id stats +=", "Name\"] == t].iloc[0]['IMG Genome ID '] # is terminal node/actually a leaf #", "str(info[0]), str(info[4])]) upa_to_name[upa] = info[1] return upa_to_name def get_statistics(ids, GOLD, upa_name=None): ''' get", "dist, compl, cont = \"\", \"\", \"\" print(\"-\"*90) print('project name:',t) print(\"gold stuff:\",GOLD[GOLD[\"Project /", "get_statistics(query_results[upa], curr_GOLD, upa_name=upa_name) curr_stats, curr_dist_compl = filter_stats(curr_stats, n_max_results, max_distance, min_completeness, max_contamination) curr_GOLD =", "len(t) > name_max_len: # name = t[:name_max_len] + '...' # else: # name", "\"lat\": 37.817060, \"lng\": -122.478206, \"details\":\"This is the Golden Gate Bridge.\"}, {'name':\"SFO Airport\", 'lat':37.616310,", "round(s['completeness'], 2), round(s['contamination'], 2)) for s in stats} return stats, dist_compl def get_upa_names(ws_url,", "= cs['mag_id'] gold_info = curr_GOLD.loc[int(img_id),:] new_gold['mag_id'].append(mag_id) new_gold['IMG Genome ID '].append(img_id) for key, val", "val != []: new_sources[j] = val return new_sources def rewind_tree(tree, upa_order): \"\"\" \"\"\"", "# if round(s['completeness'],2) == dist_compl[s['project']][1] and round(s['contamination'],2) == dist_compl[s['project']][2]: # dist_compl[s['project']][0][s['mag_id']] = (round(s['dist'],", "else: sources.append(0) tree[-1]['sources'] = sources return tree def get_location_markers(ids, source=None): ''' For now", "compl_2, cont_2 = curr_dist_compl[key] # if compl_1 == compl_2 and cont_1 == cont_2:", "= create_tree(all_GOLD, tree_cols, dist_compl, source_order=upas) sources = [0 for _ in range(len(upa_names))] for", "'details': pop up details } ''' markers = [ {'name':\"LBL\", \"lat\":37.877344, \"lng\":-122.250694, \"details\":\"This", "= upa print(\"curr gold cols 3:\",curr_GOLD.columns) # We want to get a row", "rewind_tree(tree, upa_order) new_upa_names = [] for i in upa_order: new_upa_names.append(upa_names[i]) upa_names = new_upa_names", "Gate Bridge\", \"lat\": 37.817060, \"lng\": -122.478206, \"details\":\"This is the Golden Gate Bridge.\"}, {'name':\"SFO", "for i, cs in enumerate(curr_stats): img_id = cs['IMG_Genome_ID'] mag_id = cs['mag_id'] gold_info =", "curr_GOLD = curr_GOLD.fillna({col:\"Unknown\" for col in tree_cols}) print(\"curr gold cols 2:\",curr_GOLD.columns) curr_stats =", "val in mag_dict.items()} else: dist, compl, cont = \"\", \"\", \"\" print(\"-\"*90) print('project", "data csv, filtering the outputs according to the provided inputs, and staging some", "mag_ids for i, s in enumerate(source_order): if s in ss: sources.append(ss[upa]) # sources[i]", "id_to_inputs[key].append(upa_name) # else: # dist_compl[key] = curr_dist_compl[key] upa_names.append(upa_name) all_GOLD = pd.concat(all_GOLD, ignore_index=True) tree_cols", "g = GOLD[GOLD[col]==t][['upa','mag_id']] upas = g['upa'].tolist() ss = {} for upa in upas:", "linkage(X, 'ward') upa_order = leaves_list(z) return upa_order def filter_results(ws_url, cb_url, query_results, n_max_results, max_distance,", "in tree['children']: if 'compl' in t: X.append(np.array([len(mag_ids) for mag_ids in t['sources']])) else: X.append(np.array(t['sources']))", "enumerate(source_order): if s in source_count: sources.append(source_count[s]) # sources[i] = source_count[s] else: sources.append(0) tree[-1]['sources']", "-> distance, # and we include the list of img_id's for each tree.append({", "upas, [obj['info'] for obj in objs]) for obj in objs: info = obj['info']", "longitude as a float 'details': pop up details } ''' markers = [", "tree = rewind_tree(tree, upa_order) new_upa_names = [] for i in upa_order: new_upa_names.append(upa_names[i]) upa_names", "see distance dictionary # unincluded_keys = list(set(list(dist_2.keys())) - set(list(dist_1.keys()))) # for uinc_key in", "ws = Workspace(ws_url) objs = ws.get_object_info3({ 'objects': [{'ref':upa} for upa in upas] })", "the data csv, filtering the outputs according to the provided inputs, and staging", "location of LBL. Returns list of markers ids: list of ids marker format:", "# unincluded_keys = list(set(list(dist_2.keys())) - set(list(dist_1.keys()))) # for uinc_key in unincluded_keys: # dist_1[uinc_key]", "if len(objs) != len(missing_upas): raise ValueError(\"Could not find all input names. len upas:", "len(upas) == 1: tree = create_tree(all_GOLD, tree_cols, dist_compl) count = sum([ int(t['count'][1:-1]) for", "= val return new_sources def rewind_tree(tree, upa_order): \"\"\" \"\"\" for t_ix, t in", "t: X.append(np.array([len(mag_ids) for mag_ids in t['sources']])) else: X.append(np.array(t['sources'])) X = unwind_tree(X, t) return", "t[:name_max_len] + '...' # else: # name = t count = \"({})\".format(type_count[t]) leaf", "= Stats[Stats['binid'].isin(ids.keys())] curr_stats = curr_stats.fillna('Unknown') for id_ in ids: curr = {} dist,", "t in tree['children']: if 'compl' in t: X.append(np.array([len(mag_ids) for mag_ids in t['sources']])) else:", "= list(set(upas) - set(list(upa_to_name.keys()))) dfu = DataFileUtil(cb_url) objs = dfu.get_objects({'object_refs':missing_upas})['data'] if len(objs) !=", "= get_upa_names(ws_url, cb_url, list(query_results.keys())) else: upa_to_name = {list(query_results.keys())[0]:\"\"} currdir = os.path.dirname(__file__) gold_path =", "ids: list of ids marker format: { 'name': name of marker 'lat': latitude", "= get_location_markers(set([s['mag_id'] for s in stats])) return stats, upa_names, tree, markers def filter_stats(stats,", "dist_compl[s['project']] = {} dist_compl[s['project']][s['mag_id']] = [round(s['dist'], 3), round(s['completeness'],2), round(s['contamination'],2)] # dist_compl[s['project']] = [{s['mag_id']:round(s['dist'],", "= [round(s['dist'], 3), round(s['completeness'],2), round(s['contamination'],2)] # dist_compl[s['project']] = [{s['mag_id']:round(s['dist'], 3)}, round(s['completeness'],2), round(s['contamination'],2)] else:", "[] # id_to_inputs = defaultdict(lambda:[]) stats = [] for upa in query_results: upas.append(upa)", "source_count: sources.append(source_count[s]) # sources[i] = source_count[s] else: sources.append(0) tree[-1]['sources'] = sources return tree", "img_ids curr_GOLD.set_index('IMG Genome ID ', inplace=True) print(\"curr gold cols 4:\",curr_GOLD.columns) new_gold = defaultdict(lambda:", "== t].iloc[0]['IMG Genome ID '] # is terminal node/actually a leaf # here", "'name': name of marker 'lat': latitude as a float 'lng': longitude as a", "# name = t[:name_max_len] + '...' # else: # name = t count", "# else: # name = t count = \"({})\".format(type_count[t]) leaf = create_tree(GOLD[GOLD[col]==t], tree_cols[1:],", "t['sources'] = new_sources if t.get('children'): t = rewind_tree(t, upa_order) tree['children'][t_ix] = t return", "do not match',\\ # round(s['completeness'],2), dist_compl[s['project']][1], # round(s['contamination'],2), dist_compl[s['project']][2]) # dist_compl = {s['project']:(round(s['dist'],", "DataFileUtil(cb_url) objs = dfu.get_objects({'object_refs':missing_upas})['data'] if len(objs) != len(missing_upas): raise ValueError(\"Could not find all", "float 'details': pop up details } ''' markers = [ {'name':\"LBL\", \"lat\":37.877344, \"lng\":-122.250694,", "outputs for the templates. \"\"\" if len(query_results) > 1: upa_to_name = get_upa_names(ws_url, cb_url,", "# if compl_1 == compl_2 and cont_1 == cont_2: # # check to", "<NAME>.\"} ] if source!= None: for m in markers: m['source'] = \"Input source:\"+source", "in enumerate(upa_order): val = sources[i] if val != 0 and val != []:", "# else: # raise ValueError('Same project ids but contamination and/or completeness do not", "curr_dist_compl[key] # if compl_1 == compl_2 and cont_1 == cont_2: # # check", "'data', 'Stats-taxonomy.csv') Stats = pd.read_csv(stats_path) curr_stats = Stats[Stats['binid'].isin(ids.keys())] curr_stats = curr_stats.fillna('Unknown') for id_", "upa_name=None): ''' get statistics from the GOLD and statitics csvs ids: GOLD: '''", "{list(query_results.keys())[0]:\"\"} currdir = os.path.dirname(__file__) gold_path = os.path.join(currdir,'data','GOLD-metadata.csv') GOLD = pd.read_csv(gold_path) upa_names = []", "for id_ in ids: curr = {} dist, kb_id, relatedids = ids[id_] if", "id_stats.iloc[0]['MIMAG'] curr['mag_id'] = id_ curr['IMG_Genome_ID'] = id_.split('_')[0] img_link = \"https://img.jgi.doe.gov/cgi-bin/m/main.cgi?section=MetaDetail&page=metagenomeBinScaffolds&taxon_oid=%s&bin_name=%s\"%(id_.split('_')[0], id_) curr['IMG_link'] =", "if source!= None: for m in markers: m['source'] = \"Input source:\"+source return markers", "cb_url, list(query_results.keys())) else: upa_to_name = {list(query_results.keys())[0]:\"\"} currdir = os.path.dirname(__file__) gold_path = os.path.join(currdir,'data','GOLD-metadata.csv') GOLD", "UP markers = get_location_markers(set([s['mag_id'] for s in stats])) return stats, upa_names, tree, markers", "s['contamination'] <= max_contamination] stats = sorted(stats, key=lambda s: s['dist']) if len(stats) > n_max_results:", "\"children\":tree} else: tree = create_tree(all_GOLD, tree_cols, dist_compl, source_order=upas) sources = [0 for _", "\"\"\" if tree.get('children'): for t in tree['children']: if 'compl' in t: X.append(np.array([len(mag_ids) for", "for key in curr_dist_compl: if key in dist_compl: for mag_key in curr_dist_compl[key]: dist_compl[key][mag_key]", "tree[-1]['compl'] = compl tree[-1]['cont'] = cont else: children = [] for key, val", "is the Golden Gate Bridge.\"}, {'name':\"SFO Airport\", 'lat':37.616310, 'lng': -122.386793, 'details':\"This is San", "= list(set(list(dist_2.keys())) - set(list(dist_1.keys()))) # for uinc_key in unincluded_keys: # dist_1[uinc_key] = dist_2[uinc_key]", "upa_name != None: curr['input_name'] = upa_name curr['dist'] = dist # if kb_id: #", "id stats += curr_stats # group them by img_ids curr_GOLD.set_index('IMG Genome ID ',", "each tree.append({ 'truncated_name': str(trunc_name), 'name' : t, 'count': \"({})\".format(len(dist)) }) if source_order!=None: tree[-1]['dist']", "GOLD: ''' output = [] currdir = os.path.dirname(__file__) stats_path = os.path.join(currdir, 'data', 'Stats-taxonomy.csv')", "now we only have a row for each img id stats += curr_stats", "= GOLD[col].value_counts().to_dict() for t in type_count: # if len(t) > name_max_len: # name", "curr['completeness'] = id_stats.iloc[0]['completeness'] curr['contamination'] = id_stats.iloc[0]['contamination'] curr['MIMAG'] = id_stats.iloc[0]['MIMAG'] curr['mag_id'] = id_ curr['IMG_Genome_ID']", "X = np.transpose(np.array(X)) print(\"je suis here:\",X) print('-'*80) z = linkage(X, 'ward') upa_order =", "by img_ids curr_GOLD.set_index('IMG Genome ID ', inplace=True) print(\"curr gold cols 4:\",curr_GOLD.columns) new_gold =", "contamination and/or completeness do not match') # # id_to_inputs[key].append(upa_name) # else: # dist_compl[key]", "Analysis Project ID'] == relatedids['GOLD_Analysis_ID']].iloc[0]['Project / Study Name'] else: curr['project'] = 'Unknown' output.append(curr)", "upas: %s len objs: %s\"%(len(upas), len(objs)), upas, [obj['info'] for obj in objs]) for", "} ''' markers = [ {'name':\"LBL\", \"lat\":37.877344, \"lng\":-122.250694, \"details\":\"This is Lawrence Berkeley National", "enumerate(source_order): if s in ss: sources.append(ss[upa]) # sources[i] = ss[upa] else: sources.append([]) else:", "s: s['dist']) if len(stats) > n_max_results: stats = stats[:n_max_results] dist_compl = {} for", "pandas as pd import os from collections import defaultdict from scipy.cluster.hierarchy import linkage,", "curr_stats[curr_stats.binid == id_] curr['completeness'] = id_stats.iloc[0]['completeness'] curr['contamination'] = id_stats.iloc[0]['contamination'] curr['MIMAG'] = id_stats.iloc[0]['MIMAG'] curr['mag_id']", "staging some of the outputs for the templates. \"\"\" if len(query_results) > 1:", "stuff:\",GOLD[GOLD[\"Project / Study Name\"]==t].iloc[0]) print(\"-\"*90) trunc_name = GOLD[GOLD[\"Project / Study Name\"] == t].iloc[0]['IMG", "+= curr_stats # group them by img_ids curr_GOLD.set_index('IMG Genome ID ', inplace=True) print(\"curr", "for t in type_count: # if len(t) > name_max_len: # name = t[:name_max_len]", "s['completeness'] >= min_completeness] if max_contamination: stats = [s for s in stats if", "unwind_tree(X, tree): \"\"\" \"\"\" if tree.get('children'): for t in tree['children']: if 'compl' in", "4:\",curr_GOLD.columns) new_gold = defaultdict(lambda: []) for i, cs in enumerate(curr_stats): img_id = cs['IMG_Genome_ID']", "for t_ix, t in enumerate(tree['children']): new_sources = remap_sources(t['sources'], upa_order) t['sources'] = new_sources if", "!= []: new_sources[j] = val return new_sources def rewind_tree(tree, upa_order): \"\"\" \"\"\" for", "len(objs)), upas, [obj['info'] for obj in objs]) for obj in objs: info =", "/ Study Name\"]==t].iloc[0]) print(\"-\"*90) trunc_name = GOLD[GOLD[\"Project / Study Name\"] == t].iloc[0]['IMG Genome", "== compl_2 and cont_1 == cont_2: # # check to see distance dictionary", "[]: new_sources[j] = val return new_sources def rewind_tree(tree, upa_order): \"\"\" \"\"\" for t_ix,", "# and we include the list of img_id's for each tree.append({ 'truncated_name': str(trunc_name),", "for t in tree]) #len(query_results[upas[0]]) tree = {\"truncated_name\":\"\", \"count\":\"({})\".format(str(count)), \"count_num\":count, \"children\":tree} else: tree", "= GOLD[GOLD['GOLD Analysis Project ID'] == relatedids['GOLD_Analysis_ID']].iloc[0]['Project / Study Name'] else: curr['project'] =", "= dist # if kb_id: # curr['kb_id'] = kb_id # else: # curr['kb_id']", "here first:\",X) X = np.transpose(np.array(X)) print(\"je suis here:\",X) print('-'*80) z = linkage(X, 'ward')", "len(missing_upas): raise ValueError(\"Could not find all input names. len upas: %s len objs:", "'compl' in t: X.append(np.array([len(mag_ids) for mag_ids in t['sources']])) else: X.append(np.array(t['sources'])) X = unwind_tree(X,", "upa_order = get_source_order(tree, upa_names) tree['sources'] = remap_sources(tree['sources'], upa_order) tree = rewind_tree(tree, upa_order) new_upa_names", "= sum([ int(t['count'][1:-1]) for t in tree]) #len(query_results[upas[0]]) tree = {\"truncated_name\":\"\", \"count\":\"({})\".format(str(count)), \"count_num\":count,", "for key, val in dist.items(): child = {} child['truncated_name'] = key child['count'] =", "enumerate(upa_order): val = sources[i] if val != 0 and val != []: new_sources[j]", "= {} for upa in upas: mag_ids = g[g['upa']==upa]['mag_id'].tolist() ss[upa] = mag_ids for", "dfu.get_objects({'object_refs':missing_upas})['data'] if len(objs) != len(missing_upas): raise ValueError(\"Could not find all input names. len", "stats if s['completeness'] >= min_completeness] if max_contamination: stats = [s for s in", "= ws.get_object_info3({ 'objects': [{'ref':upa} for upa in upas] }) upa_to_name = {'/'.join([str(info[6]), str(info[0]),", "else: # name = t count = \"({})\".format(type_count[t]) leaf = create_tree(GOLD[GOLD[col]==t], tree_cols[1:], dist_compl,", "os.path.join(currdir, 'data', 'Stats-taxonomy.csv') Stats = pd.read_csv(stats_path) curr_stats = Stats[Stats['binid'].isin(ids.keys())] curr_stats = curr_stats.fillna('Unknown') for", "stats = sorted(stats, key=lambda s: s['dist']) if len(stats) > n_max_results: stats = stats[:n_max_results]", "== dist_compl[s['project']][1] and round(s['contamination'],2) == dist_compl[s['project']][2]: # dist_compl[s['project']][0][s['mag_id']] = (round(s['dist'], 3)) # else:", "to get a row for each mag id in curr_GOLD, # right now", "items:\",s, dist_compl[s['project']]) # if round(s['completeness'],2) == dist_compl[s['project']][1] and round(s['contamination'],2) == dist_compl[s['project']][2]: # dist_compl[s['project']][0][s['mag_id']]", "= get_source_order(tree, upa_names) tree['sources'] = remap_sources(tree['sources'], upa_order) tree = rewind_tree(tree, upa_order) new_upa_names =", "s['dist']) if len(stats) > n_max_results: stats = stats[:n_max_results] dist_compl = {} for s", "of the outputs for the templates. \"\"\" if len(query_results) > 1: upa_to_name =", "scipy.cluster.hierarchy import linkage, leaves_list def create_tree(GOLD, tree_cols, dist_compl, source_order=None): \"\"\" \"\"\" tree =", "GOLD and statitics csvs ids: GOLD: ''' output = [] currdir = os.path.dirname(__file__)", "# sources[i] = source_count[s] else: sources.append(0) tree[-1]['sources'] = sources return tree def get_location_markers(ids,", "tree_cols, dist_compl, source_order=None): \"\"\" \"\"\" tree = [] if len(tree_cols) == 0: return", "= {\"truncated_name\":\"\", \"count\":\"({})\".format(str(count)), \"count_num\":count, \"children\":tree} else: tree = create_tree(all_GOLD, tree_cols, dist_compl, source_order=upas) sources", "create_tree(GOLD[GOLD[col]==t], tree_cols[1:], dist_compl, source_order=source_order) if leaf == []: if col == \"Project /", "# id_to_inputs = defaultdict(lambda:[]) stats = [] for upa in query_results: upas.append(upa) upa_name", "if relatedids: for key in relatedids: if relatedids[key]: curr[key] = relatedids[key] else: curr[key]", "'truncated_name':t, 'count':count, 'children':leaf }) if source_order!=None: sources = [] if leaf == []:", "n_max_results, max_distance, min_completeness, max_contamination): \"\"\" Here we do a combiantion of getting all", ": t, 'count': \"({})\".format(len(dist)) }) if source_order!=None: tree[-1]['dist'] = dist tree[-1]['compl'] = compl", "stats: if s['project'] not in dist_compl: dist_compl[s['project']] = {} dist_compl[s['project']][s['mag_id']] = [round(s['dist'], 3),", "curr['kb_id'] = kb_id # else: # curr['kb_id'] = '' id_stats = curr_stats[curr_stats.binid ==", "in dist.items(): child = {} child['truncated_name'] = key child['count'] = '' child['dist'] =", "in t['sources']])) else: X.append(np.array(t['sources'])) X = unwind_tree(X, t) return X def remap_sources(sources, upa_order):", "Workspace from installed_clients.DataFileUtilClient import DataFileUtil import numpy as np import pandas as pd", "of ids marker format: { 'name': name of marker 'lat': latitude as a", "['Ecosystem','Ecosystem Category','Ecosystem Subtype',\\ 'Ecosystem Type','Specific Ecosystem','Project / Study Name'] print(\"curr gold cols 1:\",curr_GOLD.columns)", "upa_order): \"\"\" \"\"\" for t_ix, t in enumerate(tree['children']): new_sources = remap_sources(t['sources'], upa_order) t['sources']", "ignore_index=True) tree_cols = ['Ecosystem','Ecosystem Category','Ecosystem Subtype',\\ 'Ecosystem Type','Specific Ecosystem','Project / Study Name'] if", "name = t count = \"({})\".format(type_count[t]) leaf = create_tree(GOLD[GOLD[col]==t], tree_cols[1:], dist_compl, source_order=source_order) if", "= {mag:val[1] for mag, val in mag_dict.items()} cont = {mag:val[2] for mag, val", "len(query_results) > 1: upa_to_name = get_upa_names(ws_url, cb_url, list(query_results.keys())) else: upa_to_name = {list(query_results.keys())[0]:\"\"} currdir", "not find all input names. len upas: %s len objs: %s\"%(len(upas), len(objs)), upas,", "pd.read_csv(stats_path) curr_stats = Stats[Stats['binid'].isin(ids.keys())] curr_stats = curr_stats.fillna('Unknown') for id_ in ids: curr =", "\"\"\" X = unwind_tree([tree['sources']], tree) print(\"-\"*80) print(\"je suis here first:\",X) X = np.transpose(np.array(X))", "tree_cols = ['Ecosystem','Ecosystem Category','Ecosystem Subtype',\\ 'Ecosystem Type','Specific Ecosystem','Project / Study Name'] if len(upas)", "tree[-1]['sources'] = sources return tree def get_location_markers(ids, source=None): ''' For now this simply", "do a combiantion of getting all the relevant statistics from the data csv,", "round(s['completeness'],2) == dist_compl[s['project']][1] and round(s['contamination'],2) == dist_compl[s['project']][2]: # dist_compl[s['project']][0][s['mag_id']] = (round(s['dist'], 3)) #", "cont_2 = curr_dist_compl[key] # if compl_1 == compl_2 and cont_1 == cont_2: #", "stats if s['contamination'] <= max_contamination] stats = sorted(stats, key=lambda s: s['dist']) if len(stats)", "for mag, val in mag_dict.items()} else: dist, compl, cont = \"\", \"\", \"\"", "# # check to see distance dictionary # unincluded_keys = list(set(list(dist_2.keys())) - set(list(dist_1.keys())))", "= GOLD[GOLD[col]==t][['upa','mag_id']] upas = g['upa'].tolist() ss = {} for upa in upas: mag_ids", "np.transpose(np.array(X)) print(\"je suis here:\",X) print('-'*80) z = linkage(X, 'ward') upa_order = leaves_list(z) return", "dist_compl[s['project']][0][s['mag_id']] = (round(s['dist'], 3)) # else: # raise ValueError('same project ids but contamination", "stats = stats[:n_max_results] dist_compl = {} for s in stats: if s['project'] not", "from the data csv, filtering the outputs according to the provided inputs, and", "[] if len(tree_cols) == 0: return tree col = tree_cols[0] type_count = GOLD[col].value_counts().to_dict()", "else: dist_compl[key] = curr_dist_compl[key] # dist_1, compl_1, cont_1 = dist_compl[key] # dist_2, compl_2,", "== 0: return tree col = tree_cols[0] type_count = GOLD[col].value_counts().to_dict() for t in", "National Laboratory.\"}, {'name':\"Golden Gate Bridge\", \"lat\": 37.817060, \"lng\": -122.478206, \"details\":\"This is the Golden", "compl tree[-1]['cont'] = cont else: children = [] for key, val in dist.items():", "some of the outputs for the templates. \"\"\" if len(query_results) > 1: upa_to_name", "sources.append(0) tree[-1]['sources'] = sources return tree def get_location_markers(ids, source=None): ''' For now this", "2)) for s in stats} return stats, dist_compl def get_upa_names(ws_url, cb_url, upas): \"\"\"", "new_upa_names # TEMPORARY MARKER SET UP markers = get_location_markers(set([s['mag_id'] for s in stats]))", "and we include the list of img_id's for each tree.append({ 'truncated_name': str(trunc_name), 'name'", "= tree_cols[0] type_count = GOLD[col].value_counts().to_dict() for t in type_count: # if len(t) >", "cont else: children = [] for key, val in dist.items(): child = {}", "= \"Input source:\"+source return markers def unwind_tree(X, tree): \"\"\" \"\"\" if tree.get('children'): for", "but contamination and/or completeness do not match') # # id_to_inputs[key].append(upa_name) # else: #", "markers = [ {'name':\"LBL\", \"lat\":37.877344, \"lng\":-122.250694, \"details\":\"This is Lawrence Berkeley National Laboratory.\"}, {'name':\"Golden", "a float 'lng': longitude as a float 'details': pop up details } '''", "\"({})\".format(type_count[t]) leaf = create_tree(GOLD[GOLD[col]==t], tree_cols[1:], dist_compl, source_order=source_order) if leaf == []: if col", "cont_1 == cont_2: # # check to see distance dictionary # unincluded_keys =", "curr['contamination'] = id_stats.iloc[0]['contamination'] curr['MIMAG'] = id_stats.iloc[0]['MIMAG'] curr['mag_id'] = id_ curr['IMG_Genome_ID'] = id_.split('_')[0] img_link", "for s in stats: if s['project'] not in dist_compl: dist_compl[s['project']] = {} dist_compl[s['project']][s['mag_id']]", "curr['kb_id'] = '' id_stats = curr_stats[curr_stats.binid == id_] curr['completeness'] = id_stats.iloc[0]['completeness'] curr['contamination'] =", "float 'lng': longitude as a float 'details': pop up details } ''' markers", "max_contamination) curr_GOLD = curr_GOLD[curr_GOLD['GOLD Analysis Project ID'].isin([s['GOLD_Analysis_ID'] for s in curr_stats])] curr_GOLD['upa'] =", "for obj in objs]) for obj in objs: info = obj['info'] upa =", "i, s in enumerate(source_order): if s in source_count: sources.append(source_count[s]) # sources[i] = source_count[s]", "= ss[upa] else: sources.append([]) else: source_count = GOLD[GOLD[col]==t]['upa'].value_counts().to_dict() for i, s in enumerate(source_order):", "sources.append(ss[upa]) # sources[i] = ss[upa] else: sources.append([]) else: source_count = GOLD[GOLD[col]==t]['upa'].value_counts().to_dict() for i,", "objs: info = obj['info'] upa = '/'.join([str(info[6]), str(info[0]), str(info[4])]) upa_to_name[upa] = info[1] return", "Analysis Project ID'].isin([s['GOLD_Analysis_ID'] for s in curr_stats])] curr_GOLD['upa'] = upa print(\"curr gold cols", "= {s['project']:(round(s['dist'], 3), round(s['completeness'], 2), round(s['contamination'], 2)) for s in stats} return stats,", "info in objs['infos']} if len(upa_to_name)==len(upas): return upa_to_name missing_upas = list(set(upas) - set(list(upa_to_name.keys()))) dfu", "to have dists as a dict # of IMG_id -> distance, # and", "Returns list of markers ids: list of ids marker format: { 'name': name", "cols 4:\",curr_GOLD.columns) new_gold = defaultdict(lambda: []) for i, cs in enumerate(curr_stats): img_id =", "upa_names): \"\"\" stats: \"\"\" X = unwind_tree([tree['sources']], tree) print(\"-\"*80) print(\"je suis here first:\",X)", "t.get('children'): t = rewind_tree(t, upa_order) tree['children'][t_ix] = t return tree def get_source_order(tree, upa_names):", "of img_id's for each tree.append({ 'truncated_name': str(trunc_name), 'name' : t, 'count': \"({})\".format(len(dist)) })", "# dist_compl[key] = [dist_1, compl_1, cont_1] # else: # raise ValueError('Same project ids", "the outputs for the templates. \"\"\" if len(query_results) > 1: upa_to_name = get_upa_names(ws_url,", "dist_compl[key] = [dist_1, compl_1, cont_1] # else: # raise ValueError('Same project ids but", "{} all_GOLD = [] # id_to_inputs = defaultdict(lambda:[]) stats = [] for upa", "= curr_stats[curr_stats.binid == id_] curr['completeness'] = id_stats.iloc[0]['completeness'] curr['contamination'] = id_stats.iloc[0]['contamination'] curr['MIMAG'] = id_stats.iloc[0]['MIMAG']", "\"\", \"\" print(\"-\"*90) print('project name:',t) print(\"gold stuff:\",GOLD[GOLD[\"Project / Study Name\"]==t].iloc[0]) print(\"-\"*90) trunc_name =", "GOLD, upa_name=None): ''' get statistics from the GOLD and statitics csvs ids: GOLD:", "len upas: %s len objs: %s\"%(len(upas), len(objs)), upas, [obj['info'] for obj in objs])", "= dist tree[-1]['compl'] = compl tree[-1]['cont'] = cont else: children = [] for", "upas): \"\"\" \"\"\" ws = Workspace(ws_url) objs = ws.get_object_info3({ 'objects': [{'ref':upa} for upa", "ss[upa] else: sources.append([]) else: source_count = GOLD[GOLD[col]==t]['upa'].value_counts().to_dict() for i, s in enumerate(source_order): if", "= sum(sources) tree = {\"truncated_name\":\"\", \"count\":\"({})\".format(str(total_num)), 'count_num':total_num, 'sources':sources, \"children\":tree} upa_order = get_source_order(tree, upa_names)", "dist = {mag:val[0] for mag, val in mag_dict.items()} compl = {mag:val[1] for mag,", "if compl_1 == compl_2 and cont_1 == cont_2: # # check to see", "the list of img_id's for each tree.append({ 'truncated_name': str(trunc_name), 'name' : t, 'count':", "\"details\":\"This is <NAME>.\"} ] if source!= None: for m in markers: m['source'] =", "return tree col = tree_cols[0] type_count = GOLD[col].value_counts().to_dict() for t in type_count: #", "info[1] return upa_to_name def get_statistics(ids, GOLD, upa_name=None): ''' get statistics from the GOLD", "= unwind_tree(X, t) return X def remap_sources(sources, upa_order): new_sources = {} for j,", "def remap_sources(sources, upa_order): new_sources = {} for j, i in enumerate(upa_order): val =", "= obj['info'] upa = '/'.join([str(info[6]), str(info[0]), str(info[4])]) upa_to_name[upa] = info[1] return upa_to_name def", "id_.split('_')[0] img_link = \"https://img.jgi.doe.gov/cgi-bin/m/main.cgi?section=MetaDetail&page=metagenomeBinScaffolds&taxon_oid=%s&bin_name=%s\"%(id_.split('_')[0], id_) curr['IMG_link'] = img_link if relatedids: for key in", "all_GOLD.append(new_gold) for key in curr_dist_compl: if key in dist_compl: for mag_key in curr_dist_compl[key]:", "3:\",curr_GOLD.columns) # We want to get a row for each mag id in", "new_upa_names.append(upa_names[i]) upa_names = new_upa_names # TEMPORARY MARKER SET UP markers = get_location_markers(set([s['mag_id'] for", "compl = {mag:val[1] for mag, val in mag_dict.items()} cont = {mag:val[2] for mag,", "return tree def get_location_markers(ids, source=None): ''' For now this simply returns 1 marker", "in source_count: sources.append(source_count[s]) # sources[i] = source_count[s] else: sources.append(0) tree[-1]['sources'] = sources return", "min_completeness, max_contamination): \"\"\" Here we do a combiantion of getting all the relevant", "sources[i] = ss[upa] else: sources.append([]) else: source_count = GOLD[GOLD[col]==t]['upa'].value_counts().to_dict() for i, s in", "in objs: info = obj['info'] upa = '/'.join([str(info[6]), str(info[0]), str(info[4])]) upa_to_name[upa] = info[1]", "objs: %s\"%(len(upas), len(objs)), upas, [obj['info'] for obj in objs]) for obj in objs:", "{} for upa in upas: mag_ids = g[g['upa']==upa]['mag_id'].tolist() ss[upa] = mag_ids for i,", "if source_order!=None: sources = [] if leaf == []: g = GOLD[GOLD[col]==t][['upa','mag_id']] upas", "in curr_GOLD, # right now we only have a row for each img", "else: dist_compl[s['project']][s['mag_id']] = [round(s['dist'], 3), round(s['completeness'],2), round(s['contamination'],2)] # print(\"mapping the items:\",s, dist_compl[s['project']]) #", "= '' id_stats = curr_stats[curr_stats.binid == id_] curr['completeness'] = id_stats.iloc[0]['completeness'] curr['contamination'] = id_stats.iloc[0]['contamination']", "SET UP markers = get_location_markers(set([s['mag_id'] for s in stats])) return stats, upa_names, tree,", "here:\",X) print('-'*80) z = linkage(X, 'ward') upa_order = leaves_list(z) return upa_order def filter_results(ws_url,", "curr['IMG_Genome_ID'] = id_.split('_')[0] img_link = \"https://img.jgi.doe.gov/cgi-bin/m/main.cgi?section=MetaDetail&page=metagenomeBinScaffolds&taxon_oid=%s&bin_name=%s\"%(id_.split('_')[0], id_) curr['IMG_link'] = img_link if relatedids: for", "= [] upas = [] dist_compl = {} all_GOLD = [] # id_to_inputs", "= sources[i] if val != 0 and val != []: new_sources[j] = val", "}) upa_to_name = {'/'.join([str(info[6]), str(info[0]), str(info[4])]):info[1] for info in objs['infos']} if len(upa_to_name)==len(upas): return", "{\"truncated_name\":\"\", \"count\":\"({})\".format(str(total_num)), 'count_num':total_num, 'sources':sources, \"children\":tree} upa_order = get_source_order(tree, upa_names) tree['sources'] = remap_sources(tree['sources'], upa_order)", "Name\": mag_dict = dist_compl[t] dist = {mag:val[0] for mag, val in mag_dict.items()} compl", "upa_names) tree['sources'] = remap_sources(tree['sources'], upa_order) tree = rewind_tree(tree, upa_order) new_upa_names = [] for", "key=lambda s: s['dist']) if len(stats) > n_max_results: stats = stats[:n_max_results] dist_compl = {}", "Bridge\", \"lat\": 37.817060, \"lng\": -122.478206, \"details\":\"This is the Golden Gate Bridge.\"}, {'name':\"SFO Airport\",", "3), round(s['completeness'],2), round(s['contamination'],2)] # print(\"mapping the items:\",s, dist_compl[s['project']]) # if round(s['completeness'],2) == dist_compl[s['project']][1]", "'...' # else: # name = t count = \"({})\".format(type_count[t]) leaf = create_tree(GOLD[GOLD[col]==t],", "dist # if kb_id: # curr['kb_id'] = kb_id # else: # curr['kb_id'] =", "= [] for i in upa_order: new_upa_names.append(upa_names[i]) upa_names = new_upa_names # TEMPORARY MARKER", "IMG_id -> distance, # and we include the list of img_id's for each", "info = obj['info'] upa = '/'.join([str(info[6]), str(info[0]), str(info[4])]) upa_to_name[upa] = info[1] return upa_to_name", "dist_compl[key] = curr_dist_compl[key] # dist_1, compl_1, cont_1 = dist_compl[key] # dist_2, compl_2, cont_2", "new_sources def rewind_tree(tree, upa_order): \"\"\" \"\"\" for t_ix, t in enumerate(tree['children']): new_sources =", "as pd import os from collections import defaultdict from scipy.cluster.hierarchy import linkage, leaves_list", "= key child['count'] = '' child['dist'] = val children.append(child) tree[-1]['children'] = children else:", "upas] }) upa_to_name = {'/'.join([str(info[6]), str(info[0]), str(info[4])]):info[1] for info in objs['infos']} if len(upa_to_name)==len(upas):", "names. len upas: %s len objs: %s\"%(len(upas), len(objs)), upas, [obj['info'] for obj in", "upa_to_name[upa] curr_GOLD = GOLD[GOLD['GOLD Analysis Project ID'].isin([val[2]['GOLD_Analysis_ID'] for key, val in query_results[upa].items()])] tree_cols", "source!= None: for m in markers: m['source'] = \"Input source:\"+source return markers def", "this simply returns 1 marker with the location of LBL. Returns list of", "tree[-1]['dist'] = dist tree[-1]['compl'] = compl tree[-1]['cont'] = cont else: children = []", "Genome ID '].append(img_id) for key, val in gold_info.iteritems(): new_gold[key].append(val) new_gold = pd.DataFrame.from_dict(new_gold) all_GOLD.append(new_gold)", "return tree def get_source_order(tree, upa_names): \"\"\" stats: \"\"\" X = unwind_tree([tree['sources']], tree) print(\"-\"*80)", "0: return tree col = tree_cols[0] type_count = GOLD[col].value_counts().to_dict() for t in type_count:", "curr['mag_id'] = id_ curr['IMG_Genome_ID'] = id_.split('_')[0] img_link = \"https://img.jgi.doe.gov/cgi-bin/m/main.cgi?section=MetaDetail&page=metagenomeBinScaffolds&taxon_oid=%s&bin_name=%s\"%(id_.split('_')[0], id_) curr['IMG_link'] = img_link", "[round(s['dist'], 3), round(s['completeness'],2), round(s['contamination'],2)] # print(\"mapping the items:\",s, dist_compl[s['project']]) # if round(s['completeness'],2) ==", "= id_.split('_')[0] img_link = \"https://img.jgi.doe.gov/cgi-bin/m/main.cgi?section=MetaDetail&page=metagenomeBinScaffolds&taxon_oid=%s&bin_name=%s\"%(id_.split('_')[0], id_) curr['IMG_link'] = img_link if relatedids: for key", "3), round(s['completeness'],2), round(s['contamination'],2)] # dist_compl[s['project']] = [{s['mag_id']:round(s['dist'], 3)}, round(s['completeness'],2), round(s['contamination'],2)] else: dist_compl[s['project']][s['mag_id']] =", "returns 1 marker with the location of LBL. Returns list of markers ids:", "obj['info'] upa = '/'.join([str(info[6]), str(info[0]), str(info[4])]) upa_to_name[upa] = info[1] return upa_to_name def get_statistics(ids,", "upa_name curr['dist'] = dist # if kb_id: # curr['kb_id'] = kb_id # else:", "for i in range(len(upa_names)): for t in tree: sources[i]+=t['sources'][i] total_num = sum(sources) tree", "s in stats if s['dist'] <= max_distance] if min_completeness: stats = [s for", "id_ in ids: curr = {} dist, kb_id, relatedids = ids[id_] if upa_name", "max_distance] if min_completeness: stats = [s for s in stats if s['completeness'] >=", "import defaultdict from scipy.cluster.hierarchy import linkage, leaves_list def create_tree(GOLD, tree_cols, dist_compl, source_order=None): \"\"\"", "tree.append({ 'truncated_name':t, 'count':count, 'children':leaf }) if source_order!=None: sources = [] if leaf ==", "in stats} return stats, dist_compl def get_upa_names(ws_url, cb_url, upas): \"\"\" \"\"\" ws =", "= new_sources if t.get('children'): t = rewind_tree(t, upa_order) tree['children'][t_ix] = t return tree", "tree[-1]['children'] = children else: tree.append({ 'truncated_name':t, 'count':count, 'children':leaf }) if source_order!=None: sources =", "tree]) #len(query_results[upas[0]]) tree = {\"truncated_name\":\"\", \"count\":\"({})\".format(str(count)), \"count_num\":count, \"children\":tree} else: tree = create_tree(all_GOLD, tree_cols,", "tree_cols}) print(\"curr gold cols 2:\",curr_GOLD.columns) curr_stats = get_statistics(query_results[upa], curr_GOLD, upa_name=upa_name) curr_stats, curr_dist_compl =", "sources = [] if leaf == []: g = GOLD[GOLD[col]==t][['upa','mag_id']] upas = g['upa'].tolist()", "count = sum([ int(t['count'][1:-1]) for t in tree]) #len(query_results[upas[0]]) tree = {\"truncated_name\":\"\", \"count\":\"({})\".format(str(count)),", "= [] for key, val in dist.items(): child = {} child['truncated_name'] = key", "new_sources = {} for j, i in enumerate(upa_order): val = sources[i] if val", "return markers def unwind_tree(X, tree): \"\"\" \"\"\" if tree.get('children'): for t in tree['children']:", "# curr['kb_id'] = kb_id # else: # curr['kb_id'] = '' id_stats = curr_stats[curr_stats.binid", "get_location_markers(ids, source=None): ''' For now this simply returns 1 marker with the location", "{\"truncated_name\":\"\", \"count\":\"({})\".format(str(count)), \"count_num\":count, \"children\":tree} else: tree = create_tree(all_GOLD, tree_cols, dist_compl, source_order=upas) sources =", "not match',\\ # round(s['completeness'],2), dist_compl[s['project']][1], # round(s['contamination'],2), dist_compl[s['project']][2]) # dist_compl = {s['project']:(round(s['dist'], 3),", "in stats if s['completeness'] >= min_completeness] if max_contamination: stats = [s for s", "Name\"]==t].iloc[0]) print(\"-\"*90) trunc_name = GOLD[GOLD[\"Project / Study Name\"] == t].iloc[0]['IMG Genome ID ']", "list of img_id's for each tree.append({ 'truncated_name': str(trunc_name), 'name' : t, 'count': \"({})\".format(len(dist))", "= os.path.join(currdir, 'data', 'Stats-taxonomy.csv') Stats = pd.read_csv(stats_path) curr_stats = Stats[Stats['binid'].isin(ids.keys())] curr_stats = curr_stats.fillna('Unknown')", "dist_compl: for mag_key in curr_dist_compl[key]: dist_compl[key][mag_key] = curr_dist_compl[key][mag_key] else: dist_compl[key] = curr_dist_compl[key] #", "if len(upa_to_name)==len(upas): return upa_to_name missing_upas = list(set(upas) - set(list(upa_to_name.keys()))) dfu = DataFileUtil(cb_url) objs", "round(s['contamination'], 2)) for s in stats} return stats, dist_compl def get_upa_names(ws_url, cb_url, upas):", "raise ValueError('Same project ids but contamination and/or completeness do not match') # #", "(round(s['dist'], 3)) # else: # raise ValueError('same project ids but contamination and/or completeness", "source_order=None): \"\"\" \"\"\" tree = [] if len(tree_cols) == 0: return tree col", "source_order!=None: sources = [] if leaf == []: g = GOLD[GOLD[col]==t][['upa','mag_id']] upas =", "Subtype',\\ 'Ecosystem Type','Specific Ecosystem','Project / Study Name'] print(\"curr gold cols 1:\",curr_GOLD.columns) curr_GOLD =", "t, 'count': \"({})\".format(len(dist)) }) if source_order!=None: tree[-1]['dist'] = dist tree[-1]['compl'] = compl tree[-1]['cont']", "round(s['contamination'],2)] else: dist_compl[s['project']][s['mag_id']] = [round(s['dist'], 3), round(s['completeness'],2), round(s['contamination'],2)] # print(\"mapping the items:\",s, dist_compl[s['project']])", "# else: # dist_compl[key] = curr_dist_compl[key] upa_names.append(upa_name) all_GOLD = pd.concat(all_GOLD, ignore_index=True) tree_cols =", "unwind_tree(X, t) return X def remap_sources(sources, upa_order): new_sources = {} for j, i", "curr_GOLD = GOLD[GOLD['GOLD Analysis Project ID'].isin([val[2]['GOLD_Analysis_ID'] for key, val in query_results[upa].items()])] tree_cols =", "GOLD[GOLD['GOLD Analysis Project ID'] == relatedids['GOLD_Analysis_ID']].iloc[0]['Project / Study Name'] else: curr['project'] = 'Unknown'", "dist_compl[s['project']][s['mag_id']] = [round(s['dist'], 3), round(s['completeness'],2), round(s['contamination'],2)] # dist_compl[s['project']] = [{s['mag_id']:round(s['dist'], 3)}, round(s['completeness'],2), round(s['contamination'],2)]", "= {} dist_compl[s['project']][s['mag_id']] = [round(s['dist'], 3), round(s['completeness'],2), round(s['contamination'],2)] # dist_compl[s['project']] = [{s['mag_id']:round(s['dist'], 3)},", "list of markers ids: list of ids marker format: { 'name': name of", "for the templates. \"\"\" if len(query_results) > 1: upa_to_name = get_upa_names(ws_url, cb_url, list(query_results.keys()))", "new_sources[j] = val return new_sources def rewind_tree(tree, upa_order): \"\"\" \"\"\" for t_ix, t", "mag_ids in t['sources']])) else: X.append(np.array(t['sources'])) X = unwind_tree(X, t) return X def remap_sources(sources,", "if upa_name != None: curr['input_name'] = upa_name curr['dist'] = dist # if kb_id:", "upa_order) new_upa_names = [] for i in upa_order: new_upa_names.append(upa_names[i]) upa_names = new_upa_names #", "# check to see distance dictionary # unincluded_keys = list(set(list(dist_2.keys())) - set(list(dist_1.keys()))) #", "sources.append([]) else: source_count = GOLD[GOLD[col]==t]['upa'].value_counts().to_dict() for i, s in enumerate(source_order): if s in", "'sources':sources, \"children\":tree} upa_order = get_source_order(tree, upa_names) tree['sources'] = remap_sources(tree['sources'], upa_order) tree = rewind_tree(tree,", "= {} all_GOLD = [] # id_to_inputs = defaultdict(lambda:[]) stats = [] for", "id_stats = curr_stats[curr_stats.binid == id_] curr['completeness'] = id_stats.iloc[0]['completeness'] curr['contamination'] = id_stats.iloc[0]['contamination'] curr['MIMAG'] =", "if max_distance: stats = [s for s in stats if s['dist'] <= max_distance]", "0 and val != []: new_sources[j] = val return new_sources def rewind_tree(tree, upa_order):", "tree_cols = ['Ecosystem','Ecosystem Category','Ecosystem Subtype',\\ 'Ecosystem Type','Specific Ecosystem','Project / Study Name'] print(\"curr gold", "compl_1, cont_1] # else: # raise ValueError('Same project ids but contamination and/or completeness", "def filter_stats(stats, n_max_results, max_distance, min_completeness, max_contamination): if max_distance: stats = [s for s", "None: curr['input_name'] = upa_name curr['dist'] = dist # if kb_id: # curr['kb_id'] =", "for upa in query_results: upas.append(upa) upa_name = upa_to_name[upa] curr_GOLD = GOLD[GOLD['GOLD Analysis Project", "upa_order = leaves_list(z) return upa_order def filter_results(ws_url, cb_url, query_results, n_max_results, max_distance, min_completeness, max_contamination):", "return X def remap_sources(sources, upa_order): new_sources = {} for j, i in enumerate(upa_order):", "dist_compl[key] # dist_2, compl_2, cont_2 = curr_dist_compl[key] # if compl_1 == compl_2 and", "enumerate(tree['children']): new_sources = remap_sources(t['sources'], upa_order) t['sources'] = new_sources if t.get('children'): t = rewind_tree(t,", "Subtype',\\ 'Ecosystem Type','Specific Ecosystem','Project / Study Name'] if len(upas) == 1: tree =", "== id_] curr['completeness'] = id_stats.iloc[0]['completeness'] curr['contamination'] = id_stats.iloc[0]['contamination'] curr['MIMAG'] = id_stats.iloc[0]['MIMAG'] curr['mag_id'] =", "round(s['completeness'],2), dist_compl[s['project']][1], # round(s['contamination'],2), dist_compl[s['project']][2]) # dist_compl = {s['project']:(round(s['dist'], 3), round(s['completeness'], 2), round(s['contamination'],", "= ids[id_] if upa_name != None: curr['input_name'] = upa_name curr['dist'] = dist #", "and cont_1 == cont_2: # # check to see distance dictionary # unincluded_keys", "X.append(np.array(t['sources'])) X = unwind_tree(X, t) return X def remap_sources(sources, upa_order): new_sources = {}", "compl, cont = \"\", \"\", \"\" print(\"-\"*90) print('project name:',t) print(\"gold stuff:\",GOLD[GOLD[\"Project / Study", "format: { 'name': name of marker 'lat': latitude as a float 'lng': longitude", "sources[i]+=t['sources'][i] total_num = sum(sources) tree = {\"truncated_name\":\"\", \"count\":\"({})\".format(str(total_num)), 'count_num':total_num, 'sources':sources, \"children\":tree} upa_order =", "remap_sources(tree['sources'], upa_order) tree = rewind_tree(tree, upa_order) new_upa_names = [] for i in upa_order:", "# dist_2, compl_2, cont_2 = curr_dist_compl[key] # if compl_1 == compl_2 and cont_1", "only have a row for each img id stats += curr_stats # group", "return upa_to_name missing_upas = list(set(upas) - set(list(upa_to_name.keys()))) dfu = DataFileUtil(cb_url) objs = dfu.get_objects({'object_refs':missing_upas})['data']", "= pd.read_csv(gold_path) upa_names = [] upas = [] dist_compl = {} all_GOLD =", "marker 'lat': latitude as a float 'lng': longitude as a float 'details': pop", "\"\"\" for t_ix, t in enumerate(tree['children']): new_sources = remap_sources(t['sources'], upa_order) t['sources'] = new_sources", "Genome ID ', inplace=True) print(\"curr gold cols 4:\",curr_GOLD.columns) new_gold = defaultdict(lambda: []) for", "in unincluded_keys: # dist_1[uinc_key] = dist_2[uinc_key] # dist_compl[key] = [dist_1, compl_1, cont_1] #", "val return new_sources def rewind_tree(tree, upa_order): \"\"\" \"\"\" for t_ix, t in enumerate(tree['children']):", "'name' : t, 'count': \"({})\".format(len(dist)) }) if source_order!=None: tree[-1]['dist'] = dist tree[-1]['compl'] =", "'' child['dist'] = val children.append(child) tree[-1]['children'] = children else: tree.append({ 'truncated_name':t, 'count':count, 'children':leaf", "dist_compl[s['project']][2]) # dist_compl = {s['project']:(round(s['dist'], 3), round(s['completeness'], 2), round(s['contamination'], 2)) for s in", "Workspace(ws_url) objs = ws.get_object_info3({ 'objects': [{'ref':upa} for upa in upas] }) upa_to_name =", "# dist_compl[key] = curr_dist_compl[key] upa_names.append(upa_name) all_GOLD = pd.concat(all_GOLD, ignore_index=True) tree_cols = ['Ecosystem','Ecosystem Category','Ecosystem", "= curr_GOLD.fillna({col:\"Unknown\" for col in tree_cols}) print(\"curr gold cols 2:\",curr_GOLD.columns) curr_stats = get_statistics(query_results[upa],", "in ss: sources.append(ss[upa]) # sources[i] = ss[upa] else: sources.append([]) else: source_count = GOLD[GOLD[col]==t]['upa'].value_counts().to_dict()", "# else: # raise ValueError('same project ids but contamination and/or completeness do not", "get_statistics(ids, GOLD, upa_name=None): ''' get statistics from the GOLD and statitics csvs ids:", "tree.append({ 'truncated_name': str(trunc_name), 'name' : t, 'count': \"({})\".format(len(dist)) }) if source_order!=None: tree[-1]['dist'] =", "= curr_stats.fillna('Unknown') for id_ in ids: curr = {} dist, kb_id, relatedids =", "kb_id: # curr['kb_id'] = kb_id # else: # curr['kb_id'] = '' id_stats =", "is San Francisco International Airport.\"}, {'name':\"<NAME>\", \"lat\": 37.881523, \"lng\": -121.914325, \"details\":\"This is <NAME>.\"}", "id_stats.iloc[0]['completeness'] curr['contamination'] = id_stats.iloc[0]['contamination'] curr['MIMAG'] = id_stats.iloc[0]['MIMAG'] curr['mag_id'] = id_ curr['IMG_Genome_ID'] = id_.split('_')[0]", "for _ in range(len(upa_names))] for i in range(len(upa_names)): for t in tree: sources[i]+=t['sources'][i]", "= os.path.join(currdir,'data','GOLD-metadata.csv') GOLD = pd.read_csv(gold_path) upa_names = [] upas = [] dist_compl =", "in stats if s['contamination'] <= max_contamination] stats = sorted(stats, key=lambda s: s['dist']) if", "= g[g['upa']==upa]['mag_id'].tolist() ss[upa] = mag_ids for i, s in enumerate(source_order): if s in", "= {mag:val[2] for mag, val in mag_dict.items()} else: dist, compl, cont = \"\",", "{} dist, kb_id, relatedids = ids[id_] if upa_name != None: curr['input_name'] = upa_name", "curr_stats.fillna('Unknown') for id_ in ids: curr = {} dist, kb_id, relatedids = ids[id_]", "}) if source_order!=None: tree[-1]['dist'] = dist tree[-1]['compl'] = compl tree[-1]['cont'] = cont else:", "as np import pandas as pd import os from collections import defaultdict from", "g[g['upa']==upa]['mag_id'].tolist() ss[upa] = mag_ids for i, s in enumerate(source_order): if s in ss:", "else: X.append(np.array(t['sources'])) X = unwind_tree(X, t) return X def remap_sources(sources, upa_order): new_sources =", "upas.append(upa) upa_name = upa_to_name[upa] curr_GOLD = GOLD[GOLD['GOLD Analysis Project ID'].isin([val[2]['GOLD_Analysis_ID'] for key, val", "markers ids: list of ids marker format: { 'name': name of marker 'lat':", "else: curr[key] = 'Unknown' if relatedids['GOLD_Analysis_ID']: curr['project'] = GOLD[GOLD['GOLD Analysis Project ID'] ==", "= remap_sources(t['sources'], upa_order) t['sources'] = new_sources if t.get('children'): t = rewind_tree(t, upa_order) tree['children'][t_ix]", "for each mag id in curr_GOLD, # right now we only have a", "{'/'.join([str(info[6]), str(info[0]), str(info[4])]):info[1] for info in objs['infos']} if len(upa_to_name)==len(upas): return upa_to_name missing_upas =", "else: # curr['kb_id'] = '' id_stats = curr_stats[curr_stats.binid == id_] curr['completeness'] = id_stats.iloc[0]['completeness']", "dist.items(): child = {} child['truncated_name'] = key child['count'] = '' child['dist'] = val", "[ {'name':\"LBL\", \"lat\":37.877344, \"lng\":-122.250694, \"details\":\"This is Lawrence Berkeley National Laboratory.\"}, {'name':\"Golden Gate Bridge\",", "for i in upa_order: new_upa_names.append(upa_names[i]) upa_names = new_upa_names # TEMPORARY MARKER SET UP", "query_results[upa].items()])] tree_cols = ['Ecosystem','Ecosystem Category','Ecosystem Subtype',\\ 'Ecosystem Type','Specific Ecosystem','Project / Study Name'] print(\"curr", "numpy as np import pandas as pd import os from collections import defaultdict", "\"lng\": -122.478206, \"details\":\"This is the Golden Gate Bridge.\"}, {'name':\"SFO Airport\", 'lat':37.616310, 'lng': -122.386793,", "key, val in gold_info.iteritems(): new_gold[key].append(val) new_gold = pd.DataFrame.from_dict(new_gold) all_GOLD.append(new_gold) for key in curr_dist_compl:", "in upas] }) upa_to_name = {'/'.join([str(info[6]), str(info[0]), str(info[4])]):info[1] for info in objs['infos']} if", "for key, val in query_results[upa].items()])] tree_cols = ['Ecosystem','Ecosystem Category','Ecosystem Subtype',\\ 'Ecosystem Type','Specific Ecosystem','Project", "stats])) return stats, upa_names, tree, markers def filter_stats(stats, n_max_results, max_distance, min_completeness, max_contamination): if", "mag_ids = g[g['upa']==upa]['mag_id'].tolist() ss[upa] = mag_ids for i, s in enumerate(source_order): if s", "max_distance, min_completeness, max_contamination): if max_distance: stats = [s for s in stats if", "of LBL. Returns list of markers ids: list of ids marker format: {", "for info in objs['infos']} if len(upa_to_name)==len(upas): return upa_to_name missing_upas = list(set(upas) - set(list(upa_to_name.keys())))", "for i, s in enumerate(source_order): if s in ss: sources.append(ss[upa]) # sources[i] =", "project ids but contamination and/or completeness do not match',\\ # round(s['completeness'],2), dist_compl[s['project']][1], #", "{s['project']:(round(s['dist'], 3), round(s['completeness'], 2), round(s['contamination'], 2)) for s in stats} return stats, dist_compl", "include the list of img_id's for each tree.append({ 'truncated_name': str(trunc_name), 'name' : t,", "markers = get_location_markers(set([s['mag_id'] for s in stats])) return stats, upa_names, tree, markers def", "= defaultdict(lambda: []) for i, cs in enumerate(curr_stats): img_id = cs['IMG_Genome_ID'] mag_id =", "n_max_results, max_distance, min_completeness, max_contamination): if max_distance: stats = [s for s in stats", "all input names. len upas: %s len objs: %s\"%(len(upas), len(objs)), upas, [obj['info'] for", "completeness do not match') # # id_to_inputs[key].append(upa_name) # else: # dist_compl[key] = curr_dist_compl[key]", "LBL. Returns list of markers ids: list of ids marker format: { 'name':", "= compl tree[-1]['cont'] = cont else: children = [] for key, val in", "with the location of LBL. Returns list of markers ids: list of ids", "dist_compl[s['project']]) # if round(s['completeness'],2) == dist_compl[s['project']][1] and round(s['contamination'],2) == dist_compl[s['project']][2]: # dist_compl[s['project']][0][s['mag_id']] =", "for s in stats if s['dist'] <= max_distance] if min_completeness: stats = [s", "print('-'*80) z = linkage(X, 'ward') upa_order = leaves_list(z) return upa_order def filter_results(ws_url, cb_url,", "in query_results: upas.append(upa) upa_name = upa_to_name[upa] curr_GOLD = GOLD[GOLD['GOLD Analysis Project ID'].isin([val[2]['GOLD_Analysis_ID'] for", "Lawrence Berkeley National Laboratory.\"}, {'name':\"Golden Gate Bridge\", \"lat\": 37.817060, \"lng\": -122.478206, \"details\":\"This is", "Analysis Project ID'].isin([val[2]['GOLD_Analysis_ID'] for key, val in query_results[upa].items()])] tree_cols = ['Ecosystem','Ecosystem Category','Ecosystem Subtype',\\", "-122.386793, 'details':\"This is San Francisco International Airport.\"}, {'name':\"<NAME>\", \"lat\": 37.881523, \"lng\": -121.914325, \"details\":\"This", "terminal nodes to have dists as a dict # of IMG_id -> distance,", "ss: sources.append(ss[upa]) # sources[i] = ss[upa] else: sources.append([]) else: source_count = GOLD[GOLD[col]==t]['upa'].value_counts().to_dict() for", "tree = create_tree(all_GOLD, tree_cols, dist_compl) count = sum([ int(t['count'][1:-1]) for t in tree])", "enumerate(curr_stats): img_id = cs['IMG_Genome_ID'] mag_id = cs['mag_id'] gold_info = curr_GOLD.loc[int(img_id),:] new_gold['mag_id'].append(mag_id) new_gold['IMG Genome", "else: upa_to_name = {list(query_results.keys())[0]:\"\"} currdir = os.path.dirname(__file__) gold_path = os.path.join(currdir,'data','GOLD-metadata.csv') GOLD = pd.read_csv(gold_path)", "a row for each img id stats += curr_stats # group them by", "os.path.join(currdir,'data','GOLD-metadata.csv') GOLD = pd.read_csv(gold_path) upa_names = [] upas = [] dist_compl = {}", "ID'].isin([val[2]['GOLD_Analysis_ID'] for key, val in query_results[upa].items()])] tree_cols = ['Ecosystem','Ecosystem Category','Ecosystem Subtype',\\ 'Ecosystem Type','Specific", "if source_order!=None: tree[-1]['dist'] = dist tree[-1]['compl'] = compl tree[-1]['cont'] = cont else: children", "objs = dfu.get_objects({'object_refs':missing_upas})['data'] if len(objs) != len(missing_upas): raise ValueError(\"Could not find all input", "ValueError('same project ids but contamination and/or completeness do not match',\\ # round(s['completeness'],2), dist_compl[s['project']][1],", "stats += curr_stats # group them by img_ids curr_GOLD.set_index('IMG Genome ID ', inplace=True)", "\"lng\":-122.250694, \"details\":\"This is Lawrence Berkeley National Laboratory.\"}, {'name':\"Golden Gate Bridge\", \"lat\": 37.817060, \"lng\":", "children = [] for key, val in dist.items(): child = {} child['truncated_name'] =", "We want to get a row for each mag id in curr_GOLD, #", "n_max_results: stats = stats[:n_max_results] dist_compl = {} for s in stats: if s['project']", "upas = g['upa'].tolist() ss = {} for upa in upas: mag_ids = g[g['upa']==upa]['mag_id'].tolist()", "for t in tree: sources[i]+=t['sources'][i] total_num = sum(sources) tree = {\"truncated_name\":\"\", \"count\":\"({})\".format(str(total_num)), 'count_num':total_num,", "def get_source_order(tree, upa_names): \"\"\" stats: \"\"\" X = unwind_tree([tree['sources']], tree) print(\"-\"*80) print(\"je suis", "in curr_dist_compl[key]: dist_compl[key][mag_key] = curr_dist_compl[key][mag_key] else: dist_compl[key] = curr_dist_compl[key] # dist_1, compl_1, cont_1", "= {'/'.join([str(info[6]), str(info[0]), str(info[4])]):info[1] for info in objs['infos']} if len(upa_to_name)==len(upas): return upa_to_name missing_upas", "[s for s in stats if s['dist'] <= max_distance] if min_completeness: stats =", "[dist_1, compl_1, cont_1] # else: # raise ValueError('Same project ids but contamination and/or", "key in curr_dist_compl: if key in dist_compl: for mag_key in curr_dist_compl[key]: dist_compl[key][mag_key] =", "= curr_dist_compl[key] # dist_1, compl_1, cont_1 = dist_compl[key] # dist_2, compl_2, cont_2 =", "= dist_compl[key] # dist_2, compl_2, cont_2 = curr_dist_compl[key] # if compl_1 == compl_2", "val in mag_dict.items()} compl = {mag:val[1] for mag, val in mag_dict.items()} cont =", "create_tree(all_GOLD, tree_cols, dist_compl, source_order=upas) sources = [0 for _ in range(len(upa_names))] for i", "# name = t count = \"({})\".format(type_count[t]) leaf = create_tree(GOLD[GOLD[col]==t], tree_cols[1:], dist_compl, source_order=source_order)", "curr_stats])] curr_GOLD['upa'] = upa print(\"curr gold cols 3:\",curr_GOLD.columns) # We want to get" ]
[ "0.96, 1.2, 1.44, 1.68, 1.92, 2.16, 2.4]) == 72 assert benchmark(gps, 17, [0.0,", "1.76]) == 88 assert benchmark(gps, 16, [0.0, 0.2, 0.4, 0.6, 0.8, 1.0, 1.32,", "0.48, 0.72, 0.96, 1.2, 1.44, 1.68, 1.92, 2.16, 2.4]) == 72 assert benchmark(gps,", "16, [0.0, 0.2, 0.4, 0.6, 0.8, 1.0, 1.32, 1.54, 1.76, 1.98, 2.2, 2.42,", "1.0, 1.32, 1.54, 1.76, 1.98, 2.2, 2.42, 2.76, 2.99, 3.22, 3.45]) == 76", "0.92, 1.15, 1.38, 1.61]) == 41 assert benchmark(gps, 12, [0.0, 0.11, 0.22, 0.33,", "1.26, 1.47, 1.68, 1.89, 2.1, 2.31, 2.52, 2.73, 2.94, 3.15]) == 90 assert", "80 assert benchmark(gps, 14, [0.0, 0.01, 0.36, 0.6, 0.84, 1.05, 1.26, 1.47, 1.68,", "1.44, 1.68, 1.92, 2.16, 2.4]) == 72 assert benchmark(gps, 17, [0.0, 0.02, 0.44,", "3.22, 3.45]) == 76 assert benchmark(gps, 17, [0.0, 0.01, 0.36, 0.75, 1.0, 1.25,", "== 80 assert benchmark(gps, 14, [0.0, 0.01, 0.36, 0.6, 0.84, 1.05, 1.26, 1.47,", "2.16, 2.4]) == 72 assert benchmark(gps, 17, [0.0, 0.02, 0.44, 0.66, 0.88, 1.1,", "1.54, 1.76]) == 88 assert benchmark(gps, 16, [0.0, 0.2, 0.4, 0.6, 0.8, 1.0,", "1.5, 1.75, 2.0, 2.25, 2.5, 2.75, 3.0, 3.25, 3.5, 3.75, 4.0, 4.25, 4.5,", "2.4, 2.64, 2.88, 3.12, 3.36]) == 58 assert benchmark(gps, 19, []) == 0", "3.5, 3.75, 4.0, 4.25, 4.5, 4.75]) == 82 assert benchmark(gps, 19, [0.0, 0.2,", "1.47, 1.68, 1.89, 2.1, 2.31, 2.52, 2.73, 2.94, 3.15]) == 90 assert benchmark(gps,", "14, [0.0, 0.01, 0.36, 0.6, 0.84, 1.05, 1.26, 1.47, 1.68, 1.89, 2.1, 2.31,", "0.22, 0.33, 0.44, 0.65, 1.08, 1.26, 1.68, 1.89, 2.1, 2.31, 2.52, 3.25]) ==", "0.84, 1.05, 1.26, 1.47, 1.68, 1.89, 2.1, 2.31, 2.52, 2.73, 2.94, 3.15]) ==", "0.8, 1.0, 1.32, 1.54, 1.76, 1.98, 2.2, 2.42, 2.76, 2.99, 3.22, 3.45]) ==", "== 88 assert benchmark(gps, 16, [0.0, 0.2, 0.4, 0.6, 0.8, 1.0, 1.32, 1.54,", "1.25, 1.5, 1.75, 2.0, 2.25, 2.5, 2.75, 3.0, 3.25, 3.5, 3.75, 4.0, 4.25,", "0.9, 1.08, 1.26, 1.44, 1.62, 1.8]) == 72 assert benchmark(gps, 12, [0.0, 0.24,", "benchmark(gps, 17, [0.0, 0.02, 0.44, 0.66, 0.88, 1.1, 1.32, 1.54, 1.76]) == 88", "benchmark(gps, 20, [0.0, 0.23, 0.46, 0.69, 0.92, 1.15, 1.38, 1.61]) == 41 assert", "0.6, 0.8, 1.0, 1.32, 1.54, 1.76, 1.98, 2.2, 2.42, 2.76, 2.99, 3.22, 3.45])", "20, [0.0, 0.23, 0.46, 0.69, 0.92, 1.15, 1.38, 1.61]) == 41 assert benchmark(gps,", "4.75]) == 82 assert benchmark(gps, 19, [0.0, 0.2, 0.4, 0.69, 0.92, 1.15, 1.38,", "gps def test_gps(benchmark): assert benchmark(gps, 20, [0.0, 0.23, 0.46, 0.69, 0.92, 1.15, 1.38,", "assert benchmark(gps, 17, [0.0, 0.02, 0.36, 0.54, 0.72, 0.9, 1.08, 1.26, 1.44, 1.62,", "2.16, 2.4, 2.64, 2.88, 3.12, 3.36, 3.6, 3.84]) == 80 assert benchmark(gps, 14,", "0.72, 1.05, 1.26, 1.47, 1.92, 2.16, 2.4, 2.64, 2.88, 3.12, 3.36, 3.6, 3.84])", "3.45]) == 76 assert benchmark(gps, 17, [0.0, 0.01, 0.36, 0.75, 1.0, 1.25, 1.5,", "0.88, 1.1, 1.32, 1.54, 1.76]) == 88 assert benchmark(gps, 16, [0.0, 0.2, 0.4,", "2.2, 2.42, 2.76, 2.99, 3.22, 3.45]) == 76 assert benchmark(gps, 17, [0.0, 0.01,", "0.36, 0.54, 0.72, 0.9, 1.08, 1.26, 1.44, 1.62, 1.8]) == 72 assert benchmark(gps,", "20, [0.0, 0.18, 0.36, 0.54, 0.72, 1.05, 1.26, 1.47, 1.92, 2.16, 2.4, 2.64,", "[0.0, 0.24, 0.48, 0.72, 0.96, 1.2, 1.44, 1.68, 1.92, 2.16, 2.4]) == 72", "0.23, 0.46, 0.69, 0.92, 1.15, 1.38, 1.61]) == 41 assert benchmark(gps, 12, [0.0,", "90 assert benchmark(gps, 17, [0.0, 0.02, 0.36, 0.54, 0.72, 0.9, 1.08, 1.26, 1.44,", "1.1, 1.32, 1.54, 1.76]) == 88 assert benchmark(gps, 16, [0.0, 0.2, 0.4, 0.6,", "1.32, 1.54, 1.76]) == 88 assert benchmark(gps, 16, [0.0, 0.2, 0.4, 0.6, 0.8,", "benchmark(gps, 17, [0.0, 0.01, 0.36, 0.75, 1.0, 1.25, 1.5, 1.75, 2.0, 2.25, 2.5,", "0.01, 0.36, 0.75, 1.0, 1.25, 1.5, 1.75, 2.0, 2.25, 2.5, 2.75, 3.0, 3.25,", "0.11, 0.22, 0.33, 0.44, 0.65, 1.08, 1.26, 1.68, 1.89, 2.1, 2.31, 2.52, 3.25])", "assert benchmark(gps, 20, [0.0, 0.18, 0.36, 0.54, 0.72, 1.05, 1.26, 1.47, 1.92, 2.16,", "2.64, 2.88, 3.12, 3.36]) == 58 assert benchmark(gps, 19, []) == 0 assert", "== 72 assert benchmark(gps, 12, [0.0, 0.24, 0.48, 0.72, 0.96, 1.2, 1.44, 1.68,", "== 219 assert benchmark(gps, 20, [0.0, 0.18, 0.36, 0.54, 0.72, 1.05, 1.26, 1.47,", "0.72, 0.96, 1.2, 1.44, 1.68, 1.92, 2.16, 2.4]) == 72 assert benchmark(gps, 17,", "[0.0, 0.2, 0.4, 0.69, 0.92, 1.15, 1.38, 1.61, 1.92, 2.16, 2.4, 2.64, 2.88,", "0.44, 0.65, 1.08, 1.26, 1.68, 1.89, 2.1, 2.31, 2.52, 3.25]) == 219 assert", "1.68, 1.89, 2.1, 2.31, 2.52, 3.25]) == 219 assert benchmark(gps, 20, [0.0, 0.18,", "2.73, 2.94, 3.15]) == 90 assert benchmark(gps, 17, [0.0, 0.02, 0.36, 0.54, 0.72,", "2.94, 3.15]) == 90 assert benchmark(gps, 17, [0.0, 0.02, 0.36, 0.54, 0.72, 0.9,", "1.8]) == 72 assert benchmark(gps, 12, [0.0, 0.24, 0.48, 0.72, 0.96, 1.2, 1.44,", "2.1, 2.31, 2.52, 2.73, 2.94, 3.15]) == 90 assert benchmark(gps, 17, [0.0, 0.02,", "2.5, 2.75, 3.0, 3.25, 3.5, 3.75, 4.0, 4.25, 4.5, 4.75]) == 82 assert", "4.0, 4.25, 4.5, 4.75]) == 82 assert benchmark(gps, 19, [0.0, 0.2, 0.4, 0.69,", "1.98, 2.2, 2.42, 2.76, 2.99, 3.22, 3.45]) == 76 assert benchmark(gps, 17, [0.0,", "1.92, 2.16, 2.4]) == 72 assert benchmark(gps, 17, [0.0, 0.02, 0.44, 0.66, 0.88,", "3.36]) == 58 assert benchmark(gps, 19, []) == 0 assert benchmark(gps, 19, [0.0])", "2.25, 2.5, 2.75, 3.0, 3.25, 3.5, 3.75, 4.0, 4.25, 4.5, 4.75]) == 82", "2.4]) == 72 assert benchmark(gps, 17, [0.0, 0.02, 0.44, 0.66, 0.88, 1.1, 1.32,", "3.25, 3.5, 3.75, 4.0, 4.25, 4.5, 4.75]) == 82 assert benchmark(gps, 19, [0.0,", "1.15, 1.38, 1.61, 1.92, 2.16, 2.4, 2.64, 2.88, 3.12, 3.36]) == 58 assert", "1.75, 2.0, 2.25, 2.5, 2.75, 3.0, 3.25, 3.5, 3.75, 4.0, 4.25, 4.5, 4.75])", "assert benchmark(gps, 17, [0.0, 0.02, 0.44, 0.66, 0.88, 1.1, 1.32, 1.54, 1.76]) ==", "1.61, 1.92, 2.16, 2.4, 2.64, 2.88, 3.12, 3.36]) == 58 assert benchmark(gps, 19,", "1.08, 1.26, 1.68, 1.89, 2.1, 2.31, 2.52, 3.25]) == 219 assert benchmark(gps, 20,", "test_gps(benchmark): assert benchmark(gps, 20, [0.0, 0.23, 0.46, 0.69, 0.92, 1.15, 1.38, 1.61]) ==", "benchmark(gps, 17, [0.0, 0.02, 0.36, 0.54, 0.72, 0.9, 1.08, 1.26, 1.44, 1.62, 1.8])", "3.75, 4.0, 4.25, 4.5, 4.75]) == 82 assert benchmark(gps, 19, [0.0, 0.2, 0.4,", "1.05, 1.26, 1.47, 1.92, 2.16, 2.4, 2.64, 2.88, 3.12, 3.36, 3.6, 3.84]) ==", "2.52, 3.25]) == 219 assert benchmark(gps, 20, [0.0, 0.18, 0.36, 0.54, 0.72, 1.05,", "0.36, 0.75, 1.0, 1.25, 1.5, 1.75, 2.0, 2.25, 2.5, 2.75, 3.0, 3.25, 3.5,", "benchmark(gps, 20, [0.0, 0.18, 0.36, 0.54, 0.72, 1.05, 1.26, 1.47, 1.92, 2.16, 2.4,", "1.92, 2.16, 2.4, 2.64, 2.88, 3.12, 3.36]) == 58 assert benchmark(gps, 19, [])", "0.65, 1.08, 1.26, 1.68, 1.89, 2.1, 2.31, 2.52, 3.25]) == 219 assert benchmark(gps,", "assert benchmark(gps, 14, [0.0, 0.01, 0.36, 0.6, 0.84, 1.05, 1.26, 1.47, 1.68, 1.89,", "1.0, 1.25, 1.5, 1.75, 2.0, 2.25, 2.5, 2.75, 3.0, 3.25, 3.5, 3.75, 4.0,", "0.69, 0.92, 1.15, 1.38, 1.61, 1.92, 2.16, 2.4, 2.64, 2.88, 3.12, 3.36]) ==", "import gps def test_gps(benchmark): assert benchmark(gps, 20, [0.0, 0.23, 0.46, 0.69, 0.92, 1.15,", "0.18, 0.36, 0.54, 0.72, 1.05, 1.26, 1.47, 1.92, 2.16, 2.4, 2.64, 2.88, 3.12,", "72 assert benchmark(gps, 12, [0.0, 0.24, 0.48, 0.72, 0.96, 1.2, 1.44, 1.68, 1.92,", "3.0, 3.25, 3.5, 3.75, 4.0, 4.25, 4.5, 4.75]) == 82 assert benchmark(gps, 19,", "2.4, 2.64, 2.88, 3.12, 3.36, 3.6, 3.84]) == 80 assert benchmark(gps, 14, [0.0,", "0.54, 0.72, 0.9, 1.08, 1.26, 1.44, 1.62, 1.8]) == 72 assert benchmark(gps, 12,", "1.2, 1.44, 1.68, 1.92, 2.16, 2.4]) == 72 assert benchmark(gps, 17, [0.0, 0.02,", "assert benchmark(gps, 16, [0.0, 0.2, 0.4, 0.6, 0.8, 1.0, 1.32, 1.54, 1.76, 1.98,", "0.46, 0.69, 0.92, 1.15, 1.38, 1.61]) == 41 assert benchmark(gps, 12, [0.0, 0.11,", "2.88, 3.12, 3.36, 3.6, 3.84]) == 80 assert benchmark(gps, 14, [0.0, 0.01, 0.36,", "from main import gps def test_gps(benchmark): assert benchmark(gps, 20, [0.0, 0.23, 0.46, 0.69,", "[0.0, 0.02, 0.36, 0.54, 0.72, 0.9, 1.08, 1.26, 1.44, 1.62, 1.8]) == 72", "0.72, 0.9, 1.08, 1.26, 1.44, 1.62, 1.8]) == 72 assert benchmark(gps, 12, [0.0,", "1.38, 1.61, 1.92, 2.16, 2.4, 2.64, 2.88, 3.12, 3.36]) == 58 assert benchmark(gps,", "[0.0, 0.01, 0.36, 0.75, 1.0, 1.25, 1.5, 1.75, 2.0, 2.25, 2.5, 2.75, 3.0,", "1.68, 1.89, 2.1, 2.31, 2.52, 2.73, 2.94, 3.15]) == 90 assert benchmark(gps, 17,", "== 82 assert benchmark(gps, 19, [0.0, 0.2, 0.4, 0.69, 0.92, 1.15, 1.38, 1.61,", "0.92, 1.15, 1.38, 1.61, 1.92, 2.16, 2.4, 2.64, 2.88, 3.12, 3.36]) == 58", "58 assert benchmark(gps, 19, []) == 0 assert benchmark(gps, 19, [0.0]) == 0", "12, [0.0, 0.11, 0.22, 0.33, 0.44, 0.65, 1.08, 1.26, 1.68, 1.89, 2.1, 2.31,", "<filename>codewars/7kyu/dinamuh/SpeedControl/test.py from main import gps def test_gps(benchmark): assert benchmark(gps, 20, [0.0, 0.23, 0.46,", "1.26, 1.44, 1.62, 1.8]) == 72 assert benchmark(gps, 12, [0.0, 0.24, 0.48, 0.72,", "88 assert benchmark(gps, 16, [0.0, 0.2, 0.4, 0.6, 0.8, 1.0, 1.32, 1.54, 1.76,", "0.2, 0.4, 0.6, 0.8, 1.0, 1.32, 1.54, 1.76, 1.98, 2.2, 2.42, 2.76, 2.99,", "1.76, 1.98, 2.2, 2.42, 2.76, 2.99, 3.22, 3.45]) == 76 assert benchmark(gps, 17,", "[0.0, 0.18, 0.36, 0.54, 0.72, 1.05, 1.26, 1.47, 1.92, 2.16, 2.4, 2.64, 2.88,", "2.31, 2.52, 2.73, 2.94, 3.15]) == 90 assert benchmark(gps, 17, [0.0, 0.02, 0.36,", "[0.0, 0.23, 0.46, 0.69, 0.92, 1.15, 1.38, 1.61]) == 41 assert benchmark(gps, 12,", "0.6, 0.84, 1.05, 1.26, 1.47, 1.68, 1.89, 2.1, 2.31, 2.52, 2.73, 2.94, 3.15])", "2.99, 3.22, 3.45]) == 76 assert benchmark(gps, 17, [0.0, 0.01, 0.36, 0.75, 1.0,", "0.75, 1.0, 1.25, 1.5, 1.75, 2.0, 2.25, 2.5, 2.75, 3.0, 3.25, 3.5, 3.75,", "main import gps def test_gps(benchmark): assert benchmark(gps, 20, [0.0, 0.23, 0.46, 0.69, 0.92,", "2.0, 2.25, 2.5, 2.75, 3.0, 3.25, 3.5, 3.75, 4.0, 4.25, 4.5, 4.75]) ==", "0.2, 0.4, 0.69, 0.92, 1.15, 1.38, 1.61, 1.92, 2.16, 2.4, 2.64, 2.88, 3.12,", "1.62, 1.8]) == 72 assert benchmark(gps, 12, [0.0, 0.24, 0.48, 0.72, 0.96, 1.2,", "benchmark(gps, 12, [0.0, 0.11, 0.22, 0.33, 0.44, 0.65, 1.08, 1.26, 1.68, 1.89, 2.1,", "1.61]) == 41 assert benchmark(gps, 12, [0.0, 0.11, 0.22, 0.33, 0.44, 0.65, 1.08,", "assert benchmark(gps, 20, [0.0, 0.23, 0.46, 0.69, 0.92, 1.15, 1.38, 1.61]) == 41", "0.02, 0.36, 0.54, 0.72, 0.9, 1.08, 1.26, 1.44, 1.62, 1.8]) == 72 assert", "2.88, 3.12, 3.36]) == 58 assert benchmark(gps, 19, []) == 0 assert benchmark(gps,", "2.52, 2.73, 2.94, 3.15]) == 90 assert benchmark(gps, 17, [0.0, 0.02, 0.36, 0.54,", "4.5, 4.75]) == 82 assert benchmark(gps, 19, [0.0, 0.2, 0.4, 0.69, 0.92, 1.15,", "1.08, 1.26, 1.44, 1.62, 1.8]) == 72 assert benchmark(gps, 12, [0.0, 0.24, 0.48,", "3.12, 3.36, 3.6, 3.84]) == 80 assert benchmark(gps, 14, [0.0, 0.01, 0.36, 0.6,", "17, [0.0, 0.01, 0.36, 0.75, 1.0, 1.25, 1.5, 1.75, 2.0, 2.25, 2.5, 2.75,", "assert benchmark(gps, 12, [0.0, 0.11, 0.22, 0.33, 0.44, 0.65, 1.08, 1.26, 1.68, 1.89,", "def test_gps(benchmark): assert benchmark(gps, 20, [0.0, 0.23, 0.46, 0.69, 0.92, 1.15, 1.38, 1.61])", "3.84]) == 80 assert benchmark(gps, 14, [0.0, 0.01, 0.36, 0.6, 0.84, 1.05, 1.26,", "76 assert benchmark(gps, 17, [0.0, 0.01, 0.36, 0.75, 1.0, 1.25, 1.5, 1.75, 2.0,", "82 assert benchmark(gps, 19, [0.0, 0.2, 0.4, 0.69, 0.92, 1.15, 1.38, 1.61, 1.92,", "0.24, 0.48, 0.72, 0.96, 1.2, 1.44, 1.68, 1.92, 2.16, 2.4]) == 72 assert", "219 assert benchmark(gps, 20, [0.0, 0.18, 0.36, 0.54, 0.72, 1.05, 1.26, 1.47, 1.92,", "assert benchmark(gps, 12, [0.0, 0.24, 0.48, 0.72, 0.96, 1.2, 1.44, 1.68, 1.92, 2.16,", "0.02, 0.44, 0.66, 0.88, 1.1, 1.32, 1.54, 1.76]) == 88 assert benchmark(gps, 16,", "0.4, 0.6, 0.8, 1.0, 1.32, 1.54, 1.76, 1.98, 2.2, 2.42, 2.76, 2.99, 3.22,", "0.66, 0.88, 1.1, 1.32, 1.54, 1.76]) == 88 assert benchmark(gps, 16, [0.0, 0.2,", "1.05, 1.26, 1.47, 1.68, 1.89, 2.1, 2.31, 2.52, 2.73, 2.94, 3.15]) == 90", "2.1, 2.31, 2.52, 3.25]) == 219 assert benchmark(gps, 20, [0.0, 0.18, 0.36, 0.54,", "[0.0, 0.01, 0.36, 0.6, 0.84, 1.05, 1.26, 1.47, 1.68, 1.89, 2.1, 2.31, 2.52,", "== 90 assert benchmark(gps, 17, [0.0, 0.02, 0.36, 0.54, 0.72, 0.9, 1.08, 1.26,", "4.25, 4.5, 4.75]) == 82 assert benchmark(gps, 19, [0.0, 0.2, 0.4, 0.69, 0.92,", "== 76 assert benchmark(gps, 17, [0.0, 0.01, 0.36, 0.75, 1.0, 1.25, 1.5, 1.75,", "0.69, 0.92, 1.15, 1.38, 1.61]) == 41 assert benchmark(gps, 12, [0.0, 0.11, 0.22,", "1.89, 2.1, 2.31, 2.52, 3.25]) == 219 assert benchmark(gps, 20, [0.0, 0.18, 0.36,", "3.36, 3.6, 3.84]) == 80 assert benchmark(gps, 14, [0.0, 0.01, 0.36, 0.6, 0.84,", "1.32, 1.54, 1.76, 1.98, 2.2, 2.42, 2.76, 2.99, 3.22, 3.45]) == 76 assert", "assert benchmark(gps, 19, [0.0, 0.2, 0.4, 0.69, 0.92, 1.15, 1.38, 1.61, 1.92, 2.16,", "3.6, 3.84]) == 80 assert benchmark(gps, 14, [0.0, 0.01, 0.36, 0.6, 0.84, 1.05,", "3.25]) == 219 assert benchmark(gps, 20, [0.0, 0.18, 0.36, 0.54, 0.72, 1.05, 1.26,", "2.75, 3.0, 3.25, 3.5, 3.75, 4.0, 4.25, 4.5, 4.75]) == 82 assert benchmark(gps,", "== 72 assert benchmark(gps, 17, [0.0, 0.02, 0.44, 0.66, 0.88, 1.1, 1.32, 1.54,", "72 assert benchmark(gps, 17, [0.0, 0.02, 0.44, 0.66, 0.88, 1.1, 1.32, 1.54, 1.76])", "== 58 assert benchmark(gps, 19, []) == 0 assert benchmark(gps, 19, [0.0]) ==", "0.36, 0.54, 0.72, 1.05, 1.26, 1.47, 1.92, 2.16, 2.4, 2.64, 2.88, 3.12, 3.36,", "0.36, 0.6, 0.84, 1.05, 1.26, 1.47, 1.68, 1.89, 2.1, 2.31, 2.52, 2.73, 2.94,", "0.4, 0.69, 0.92, 1.15, 1.38, 1.61, 1.92, 2.16, 2.4, 2.64, 2.88, 3.12, 3.36])", "[0.0, 0.2, 0.4, 0.6, 0.8, 1.0, 1.32, 1.54, 1.76, 1.98, 2.2, 2.42, 2.76,", "benchmark(gps, 16, [0.0, 0.2, 0.4, 0.6, 0.8, 1.0, 1.32, 1.54, 1.76, 1.98, 2.2,", "3.15]) == 90 assert benchmark(gps, 17, [0.0, 0.02, 0.36, 0.54, 0.72, 0.9, 1.08,", "12, [0.0, 0.24, 0.48, 0.72, 0.96, 1.2, 1.44, 1.68, 1.92, 2.16, 2.4]) ==", "1.26, 1.47, 1.92, 2.16, 2.4, 2.64, 2.88, 3.12, 3.36, 3.6, 3.84]) == 80", "1.26, 1.68, 1.89, 2.1, 2.31, 2.52, 3.25]) == 219 assert benchmark(gps, 20, [0.0,", "1.38, 1.61]) == 41 assert benchmark(gps, 12, [0.0, 0.11, 0.22, 0.33, 0.44, 0.65,", "1.47, 1.92, 2.16, 2.4, 2.64, 2.88, 3.12, 3.36, 3.6, 3.84]) == 80 assert", "2.42, 2.76, 2.99, 3.22, 3.45]) == 76 assert benchmark(gps, 17, [0.0, 0.01, 0.36,", "17, [0.0, 0.02, 0.44, 0.66, 0.88, 1.1, 1.32, 1.54, 1.76]) == 88 assert", "0.54, 0.72, 1.05, 1.26, 1.47, 1.92, 2.16, 2.4, 2.64, 2.88, 3.12, 3.36, 3.6,", "0.44, 0.66, 0.88, 1.1, 1.32, 1.54, 1.76]) == 88 assert benchmark(gps, 16, [0.0,", "[0.0, 0.11, 0.22, 0.33, 0.44, 0.65, 1.08, 1.26, 1.68, 1.89, 2.1, 2.31, 2.52,", "benchmark(gps, 12, [0.0, 0.24, 0.48, 0.72, 0.96, 1.2, 1.44, 1.68, 1.92, 2.16, 2.4])", "2.76, 2.99, 3.22, 3.45]) == 76 assert benchmark(gps, 17, [0.0, 0.01, 0.36, 0.75,", "assert benchmark(gps, 17, [0.0, 0.01, 0.36, 0.75, 1.0, 1.25, 1.5, 1.75, 2.0, 2.25,", "1.68, 1.92, 2.16, 2.4]) == 72 assert benchmark(gps, 17, [0.0, 0.02, 0.44, 0.66,", "1.44, 1.62, 1.8]) == 72 assert benchmark(gps, 12, [0.0, 0.24, 0.48, 0.72, 0.96,", "[0.0, 0.02, 0.44, 0.66, 0.88, 1.1, 1.32, 1.54, 1.76]) == 88 assert benchmark(gps,", "3.12, 3.36]) == 58 assert benchmark(gps, 19, []) == 0 assert benchmark(gps, 19,", "1.15, 1.38, 1.61]) == 41 assert benchmark(gps, 12, [0.0, 0.11, 0.22, 0.33, 0.44,", "19, [0.0, 0.2, 0.4, 0.69, 0.92, 1.15, 1.38, 1.61, 1.92, 2.16, 2.4, 2.64,", "17, [0.0, 0.02, 0.36, 0.54, 0.72, 0.9, 1.08, 1.26, 1.44, 1.62, 1.8]) ==", "benchmark(gps, 14, [0.0, 0.01, 0.36, 0.6, 0.84, 1.05, 1.26, 1.47, 1.68, 1.89, 2.1,", "41 assert benchmark(gps, 12, [0.0, 0.11, 0.22, 0.33, 0.44, 0.65, 1.08, 1.26, 1.68,", "2.31, 2.52, 3.25]) == 219 assert benchmark(gps, 20, [0.0, 0.18, 0.36, 0.54, 0.72,", "== 41 assert benchmark(gps, 12, [0.0, 0.11, 0.22, 0.33, 0.44, 0.65, 1.08, 1.26,", "0.01, 0.36, 0.6, 0.84, 1.05, 1.26, 1.47, 1.68, 1.89, 2.1, 2.31, 2.52, 2.73,", "0.33, 0.44, 0.65, 1.08, 1.26, 1.68, 1.89, 2.1, 2.31, 2.52, 3.25]) == 219", "1.54, 1.76, 1.98, 2.2, 2.42, 2.76, 2.99, 3.22, 3.45]) == 76 assert benchmark(gps,", "2.16, 2.4, 2.64, 2.88, 3.12, 3.36]) == 58 assert benchmark(gps, 19, []) ==", "1.92, 2.16, 2.4, 2.64, 2.88, 3.12, 3.36, 3.6, 3.84]) == 80 assert benchmark(gps,", "2.64, 2.88, 3.12, 3.36, 3.6, 3.84]) == 80 assert benchmark(gps, 14, [0.0, 0.01,", "benchmark(gps, 19, [0.0, 0.2, 0.4, 0.69, 0.92, 1.15, 1.38, 1.61, 1.92, 2.16, 2.4,", "1.89, 2.1, 2.31, 2.52, 2.73, 2.94, 3.15]) == 90 assert benchmark(gps, 17, [0.0," ]
[ "\"\"\"Graphical User Interface (GUI) utility module. This module contains various tools and utilities", "import logging import thelper.utils logger = logging.getLogger(__name__) def create_key_listener(callback): \"\"\"Returns a key press", "if \"annotator\" not in config or not config[\"annotator\"]: raise AssertionError(\"config missing 'annotator' field\")", "return pynput.keyboard.Listener(on_press=callback) def create_annotator(session_name, save_dir, config, datasets): \"\"\"Instantiates a GUI annotation tool based", "or not annotator_config[\"type\"]: raise AssertionError(\"annotator config missing 'type' field\") annotator_type = thelper.utils.import_class(annotator_config[\"type\"]) return", "given the full config dictionary. Args: session_name: name of the annotation session used", ".. seealso:: | :class:`thelper.gui.annotators.Annotator` \"\"\" if \"annotator\" not in config or not config[\"annotator\"]:", "The fully-constructed annotator object, ready to begin annotation via its ``run()`` function. ..", "\"type\" not in annotator_config or not annotator_config[\"type\"]: raise AssertionError(\"annotator config missing 'type' field\")", "annotator_config or not annotator_config[\"type\"]: raise AssertionError(\"annotator config missing 'type' field\") annotator_type = thelper.utils.import_class(annotator_config[\"type\"])", "= logging.getLogger(__name__) def create_key_listener(callback): \"\"\"Returns a key press listener based on pynput.keyboard (used", "various tools and utilities used to instantiate annotators and GUI elements. \"\"\" import", "logging import thelper.utils logger = logging.getLogger(__name__) def create_key_listener(callback): \"\"\"Returns a key press listener", "in the config dictionary. The tool type is expected to be in the", "not in annotator_config or not annotator_config[\"type\"]: raise AssertionError(\"annotator config missing 'type' field\") annotator_type", "utilities used to instantiate annotators and GUI elements. \"\"\" import logging import thelper.utils", "be saved. config: full configuration dictionary that will be parsed for annotator parameters.", "if \"type\" not in annotator_config or not annotator_config[\"type\"]: raise AssertionError(\"annotator config missing 'type'", "under the `type` key. For more information on the configuration, refer to :class:`thelper.gui.annotators.Annotator`.", "on the type contained in the config dictionary. The tool type is expected", "thelper.utils logger = logging.getLogger(__name__) def create_key_listener(callback): \"\"\"Returns a key press listener based on", "must be compatible with the constructor signature of :class:`thelper.gui.annotators.Annotator`. The object's constructor will", "be compatible with the constructor signature of :class:`thelper.gui.annotators.Annotator`. The object's constructor will be", "This module contains various tools and utilities used to instantiate annotators and GUI", "session directory where annotations and other outputs will be saved. config: full configuration", "the `type` key. For more information on the configuration, refer to :class:`thelper.gui.annotators.Annotator`. The", "def create_key_listener(callback): \"\"\"Returns a key press listener based on pynput.keyboard (used for mocking).\"\"\"", "contained in the config dictionary. The tool type is expected to be in", "ready to begin annotation via its ``run()`` function. .. seealso:: | :class:`thelper.gui.annotators.Annotator` \"\"\"", "annotation via its ``run()`` function. .. seealso:: | :class:`thelper.gui.annotators.Annotator` \"\"\" if \"annotator\" not", "contains various tools and utilities used to instantiate annotators and GUI elements. \"\"\"", "type is expected to be in the configuration dictionary's `annotator` field, under the", "name of the annotation session used for printing and to create output directories.", "will provide the data to annotate. Returns: The fully-constructed annotator object, ready to", "configuration, refer to :class:`thelper.gui.annotators.Annotator`. The instantiated type must be compatible with the constructor", "saved. config: full configuration dictionary that will be parsed for annotator parameters. datasets:", "AssertionError(\"annotator config missing 'type' field\") annotator_type = thelper.utils.import_class(annotator_config[\"type\"]) return annotator_type(session_name, config, save_dir, datasets)", "listener based on pynput.keyboard (used for mocking).\"\"\" import pynput.keyboard return pynput.keyboard.Listener(on_press=callback) def create_annotator(session_name,", "refer to :class:`thelper.gui.annotators.Annotator`. The instantiated type must be compatible with the constructor signature", "config dictionary. The tool type is expected to be in the configuration dictionary's", "that will provide the data to annotate. Returns: The fully-constructed annotator object, ready", "other outputs will be saved. config: full configuration dictionary that will be parsed", "in annotator_config or not annotator_config[\"type\"]: raise AssertionError(\"annotator config missing 'type' field\") annotator_type =", "config, datasets): \"\"\"Instantiates a GUI annotation tool based on the type contained in", "The tool type is expected to be in the configuration dictionary's `annotator` field,", "config[\"annotator\"]: raise AssertionError(\"config missing 'annotator' field\") annotator_config = config[\"annotator\"] if \"type\" not in", "to be in the configuration dictionary's `annotator` field, under the `type` key. For", "dictionary. Args: session_name: name of the annotation session used for printing and to", "where annotations and other outputs will be saved. config: full configuration dictionary that", "dictionary's `annotator` field, under the `type` key. For more information on the configuration,", "For more information on the configuration, refer to :class:`thelper.gui.annotators.Annotator`. The instantiated type must", "pynput.keyboard (used for mocking).\"\"\" import pynput.keyboard return pynput.keyboard.Listener(on_press=callback) def create_annotator(session_name, save_dir, config, datasets):", "the constructor signature of :class:`thelper.gui.annotators.Annotator`. The object's constructor will be given the full", "not annotator_config[\"type\"]: raise AssertionError(\"annotator config missing 'type' field\") annotator_type = thelper.utils.import_class(annotator_config[\"type\"]) return annotator_type(session_name,", "The object's constructor will be given the full config dictionary. Args: session_name: name", "annotator parameters. datasets: map of named dataset parsers that will provide the data", "the session directory where annotations and other outputs will be saved. config: full", "to instantiate annotators and GUI elements. \"\"\" import logging import thelper.utils logger =", "the config dictionary. The tool type is expected to be in the configuration", "of :class:`thelper.gui.annotators.Annotator`. The object's constructor will be given the full config dictionary. Args:", "for printing and to create output directories. save_dir: path to the session directory", "the configuration, refer to :class:`thelper.gui.annotators.Annotator`. The instantiated type must be compatible with the", "create output directories. save_dir: path to the session directory where annotations and other", "mocking).\"\"\" import pynput.keyboard return pynput.keyboard.Listener(on_press=callback) def create_annotator(session_name, save_dir, config, datasets): \"\"\"Instantiates a GUI", "\"\"\"Returns a key press listener based on pynput.keyboard (used for mocking).\"\"\" import pynput.keyboard", "directories. save_dir: path to the session directory where annotations and other outputs will", "will be saved. config: full configuration dictionary that will be parsed for annotator", "dictionary that will be parsed for annotator parameters. datasets: map of named dataset", "dataset parsers that will provide the data to annotate. Returns: The fully-constructed annotator", "based on pynput.keyboard (used for mocking).\"\"\" import pynput.keyboard return pynput.keyboard.Listener(on_press=callback) def create_annotator(session_name, save_dir,", "field\") annotator_config = config[\"annotator\"] if \"type\" not in annotator_config or not annotator_config[\"type\"]: raise", "to begin annotation via its ``run()`` function. .. seealso:: | :class:`thelper.gui.annotators.Annotator` \"\"\" if", "import pynput.keyboard return pynput.keyboard.Listener(on_press=callback) def create_annotator(session_name, save_dir, config, datasets): \"\"\"Instantiates a GUI annotation", "pynput.keyboard return pynput.keyboard.Listener(on_press=callback) def create_annotator(session_name, save_dir, config, datasets): \"\"\"Instantiates a GUI annotation tool", "begin annotation via its ``run()`` function. .. seealso:: | :class:`thelper.gui.annotators.Annotator` \"\"\" if \"annotator\"", "instantiate annotators and GUI elements. \"\"\" import logging import thelper.utils logger = logging.getLogger(__name__)", ":class:`thelper.gui.annotators.Annotator` \"\"\" if \"annotator\" not in config or not config[\"annotator\"]: raise AssertionError(\"config missing", "configuration dictionary that will be parsed for annotator parameters. datasets: map of named", "be given the full config dictionary. Args: session_name: name of the annotation session", "the type contained in the config dictionary. The tool type is expected to", "object, ready to begin annotation via its ``run()`` function. .. seealso:: | :class:`thelper.gui.annotators.Annotator`", "tools and utilities used to instantiate annotators and GUI elements. \"\"\" import logging", "its ``run()`` function. .. seealso:: | :class:`thelper.gui.annotators.Annotator` \"\"\" if \"annotator\" not in config", "create_key_listener(callback): \"\"\"Returns a key press listener based on pynput.keyboard (used for mocking).\"\"\" import", "utility module. This module contains various tools and utilities used to instantiate annotators", "\"\"\" if \"annotator\" not in config or not config[\"annotator\"]: raise AssertionError(\"config missing 'annotator'", "raise AssertionError(\"annotator config missing 'type' field\") annotator_type = thelper.utils.import_class(annotator_config[\"type\"]) return annotator_type(session_name, config, save_dir,", "| :class:`thelper.gui.annotators.Annotator` \"\"\" if \"annotator\" not in config or not config[\"annotator\"]: raise AssertionError(\"config", "be in the configuration dictionary's `annotator` field, under the `type` key. For more", "expected to be in the configuration dictionary's `annotator` field, under the `type` key.", "parsers that will provide the data to annotate. Returns: The fully-constructed annotator object,", "The instantiated type must be compatible with the constructor signature of :class:`thelper.gui.annotators.Annotator`. The", "(used for mocking).\"\"\" import pynput.keyboard return pynput.keyboard.Listener(on_press=callback) def create_annotator(session_name, save_dir, config, datasets): \"\"\"Instantiates", "session_name: name of the annotation session used for printing and to create output", ":class:`thelper.gui.annotators.Annotator`. The object's constructor will be given the full config dictionary. Args: session_name:", "type contained in the config dictionary. The tool type is expected to be", "annotators and GUI elements. \"\"\" import logging import thelper.utils logger = logging.getLogger(__name__) def", "fully-constructed annotator object, ready to begin annotation via its ``run()`` function. .. seealso::", "in the configuration dictionary's `annotator` field, under the `type` key. For more information", "a GUI annotation tool based on the type contained in the config dictionary.", "session used for printing and to create output directories. save_dir: path to the", "seealso:: | :class:`thelper.gui.annotators.Annotator` \"\"\" if \"annotator\" not in config or not config[\"annotator\"]: raise", "via its ``run()`` function. .. seealso:: | :class:`thelper.gui.annotators.Annotator` \"\"\" if \"annotator\" not in", "User Interface (GUI) utility module. This module contains various tools and utilities used", "annotations and other outputs will be saved. config: full configuration dictionary that will", "the configuration dictionary's `annotator` field, under the `type` key. For more information on", "output directories. save_dir: path to the session directory where annotations and other outputs", "function. .. seealso:: | :class:`thelper.gui.annotators.Annotator` \"\"\" if \"annotator\" not in config or not", "to the session directory where annotations and other outputs will be saved. config:", "or not config[\"annotator\"]: raise AssertionError(\"config missing 'annotator' field\") annotator_config = config[\"annotator\"] if \"type\"", "logging.getLogger(__name__) def create_key_listener(callback): \"\"\"Returns a key press listener based on pynput.keyboard (used for", "annotation tool based on the type contained in the config dictionary. The tool", "config: full configuration dictionary that will be parsed for annotator parameters. datasets: map", "named dataset parsers that will provide the data to annotate. Returns: The fully-constructed", "Args: session_name: name of the annotation session used for printing and to create", "tool type is expected to be in the configuration dictionary's `annotator` field, under", "`type` key. For more information on the configuration, refer to :class:`thelper.gui.annotators.Annotator`. The instantiated", "the full config dictionary. Args: session_name: name of the annotation session used for", "module contains various tools and utilities used to instantiate annotators and GUI elements.", "pynput.keyboard.Listener(on_press=callback) def create_annotator(session_name, save_dir, config, datasets): \"\"\"Instantiates a GUI annotation tool based on", "to :class:`thelper.gui.annotators.Annotator`. The instantiated type must be compatible with the constructor signature of", "signature of :class:`thelper.gui.annotators.Annotator`. The object's constructor will be given the full config dictionary.", "`annotator` field, under the `type` key. For more information on the configuration, refer", "to annotate. Returns: The fully-constructed annotator object, ready to begin annotation via its", "save_dir, config, datasets): \"\"\"Instantiates a GUI annotation tool based on the type contained", "the annotation session used for printing and to create output directories. save_dir: path", "datasets): \"\"\"Instantiates a GUI annotation tool based on the type contained in the", "parsed for annotator parameters. datasets: map of named dataset parsers that will provide", "and utilities used to instantiate annotators and GUI elements. \"\"\" import logging import", "in config or not config[\"annotator\"]: raise AssertionError(\"config missing 'annotator' field\") annotator_config = config[\"annotator\"]", "annotator object, ready to begin annotation via its ``run()`` function. .. seealso:: |", "tool based on the type contained in the config dictionary. The tool type", "printing and to create output directories. save_dir: path to the session directory where", "datasets: map of named dataset parsers that will provide the data to annotate.", "= config[\"annotator\"] if \"type\" not in annotator_config or not annotator_config[\"type\"]: raise AssertionError(\"annotator config", "compatible with the constructor signature of :class:`thelper.gui.annotators.Annotator`. The object's constructor will be given", "import thelper.utils logger = logging.getLogger(__name__) def create_key_listener(callback): \"\"\"Returns a key press listener based", "press listener based on pynput.keyboard (used for mocking).\"\"\" import pynput.keyboard return pynput.keyboard.Listener(on_press=callback) def", "AssertionError(\"config missing 'annotator' field\") annotator_config = config[\"annotator\"] if \"type\" not in annotator_config or", ":class:`thelper.gui.annotators.Annotator`. The instantiated type must be compatible with the constructor signature of :class:`thelper.gui.annotators.Annotator`.", "full configuration dictionary that will be parsed for annotator parameters. datasets: map of", "not config[\"annotator\"]: raise AssertionError(\"config missing 'annotator' field\") annotator_config = config[\"annotator\"] if \"type\" not", "Interface (GUI) utility module. This module contains various tools and utilities used to", "save_dir: path to the session directory where annotations and other outputs will be", "directory where annotations and other outputs will be saved. config: full configuration dictionary", "constructor signature of :class:`thelper.gui.annotators.Annotator`. The object's constructor will be given the full config", "provide the data to annotate. Returns: The fully-constructed annotator object, ready to begin", "based on the type contained in the config dictionary. The tool type is", "GUI annotation tool based on the type contained in the config dictionary. The", "on the configuration, refer to :class:`thelper.gui.annotators.Annotator`. The instantiated type must be compatible with", "of named dataset parsers that will provide the data to annotate. Returns: The", "and GUI elements. \"\"\" import logging import thelper.utils logger = logging.getLogger(__name__) def create_key_listener(callback):", "configuration dictionary's `annotator` field, under the `type` key. For more information on the", "logger = logging.getLogger(__name__) def create_key_listener(callback): \"\"\"Returns a key press listener based on pynput.keyboard", "missing 'annotator' field\") annotator_config = config[\"annotator\"] if \"type\" not in annotator_config or not", "elements. \"\"\" import logging import thelper.utils logger = logging.getLogger(__name__) def create_key_listener(callback): \"\"\"Returns a", "parameters. datasets: map of named dataset parsers that will provide the data to", "def create_annotator(session_name, save_dir, config, datasets): \"\"\"Instantiates a GUI annotation tool based on the", "for annotator parameters. datasets: map of named dataset parsers that will provide the", "instantiated type must be compatible with the constructor signature of :class:`thelper.gui.annotators.Annotator`. The object's", "dictionary. The tool type is expected to be in the configuration dictionary's `annotator`", "config[\"annotator\"] if \"type\" not in annotator_config or not annotator_config[\"type\"]: raise AssertionError(\"annotator config missing", "used to instantiate annotators and GUI elements. \"\"\" import logging import thelper.utils logger", "with the constructor signature of :class:`thelper.gui.annotators.Annotator`. The object's constructor will be given the", "\"\"\"Instantiates a GUI annotation tool based on the type contained in the config", "used for printing and to create output directories. save_dir: path to the session", "not in config or not config[\"annotator\"]: raise AssertionError(\"config missing 'annotator' field\") annotator_config =", "config dictionary. Args: session_name: name of the annotation session used for printing and", "and to create output directories. save_dir: path to the session directory where annotations", "GUI elements. \"\"\" import logging import thelper.utils logger = logging.getLogger(__name__) def create_key_listener(callback): \"\"\"Returns", "(GUI) utility module. This module contains various tools and utilities used to instantiate", "is expected to be in the configuration dictionary's `annotator` field, under the `type`", "``run()`` function. .. seealso:: | :class:`thelper.gui.annotators.Annotator` \"\"\" if \"annotator\" not in config or", "config or not config[\"annotator\"]: raise AssertionError(\"config missing 'annotator' field\") annotator_config = config[\"annotator\"] if", "that will be parsed for annotator parameters. datasets: map of named dataset parsers", "more information on the configuration, refer to :class:`thelper.gui.annotators.Annotator`. The instantiated type must be", "a key press listener based on pynput.keyboard (used for mocking).\"\"\" import pynput.keyboard return", "constructor will be given the full config dictionary. Args: session_name: name of the", "annotation session used for printing and to create output directories. save_dir: path to", "data to annotate. Returns: The fully-constructed annotator object, ready to begin annotation via", "map of named dataset parsers that will provide the data to annotate. Returns:", "path to the session directory where annotations and other outputs will be saved.", "object's constructor will be given the full config dictionary. Args: session_name: name of", "Returns: The fully-constructed annotator object, ready to begin annotation via its ``run()`` function.", "key press listener based on pynput.keyboard (used for mocking).\"\"\" import pynput.keyboard return pynput.keyboard.Listener(on_press=callback)", "and other outputs will be saved. config: full configuration dictionary that will be", "will be parsed for annotator parameters. datasets: map of named dataset parsers that", "field, under the `type` key. For more information on the configuration, refer to", "full config dictionary. Args: session_name: name of the annotation session used for printing", "outputs will be saved. config: full configuration dictionary that will be parsed for", "\"annotator\" not in config or not config[\"annotator\"]: raise AssertionError(\"config missing 'annotator' field\") annotator_config", "to create output directories. save_dir: path to the session directory where annotations and", "annotate. Returns: The fully-constructed annotator object, ready to begin annotation via its ``run()``", "type must be compatible with the constructor signature of :class:`thelper.gui.annotators.Annotator`. The object's constructor", "will be given the full config dictionary. Args: session_name: name of the annotation", "module. This module contains various tools and utilities used to instantiate annotators and", "\"\"\" import logging import thelper.utils logger = logging.getLogger(__name__) def create_key_listener(callback): \"\"\"Returns a key", "on pynput.keyboard (used for mocking).\"\"\" import pynput.keyboard return pynput.keyboard.Listener(on_press=callback) def create_annotator(session_name, save_dir, config,", "annotator_config[\"type\"]: raise AssertionError(\"annotator config missing 'type' field\") annotator_type = thelper.utils.import_class(annotator_config[\"type\"]) return annotator_type(session_name, config,", "raise AssertionError(\"config missing 'annotator' field\") annotator_config = config[\"annotator\"] if \"type\" not in annotator_config", "information on the configuration, refer to :class:`thelper.gui.annotators.Annotator`. The instantiated type must be compatible", "for mocking).\"\"\" import pynput.keyboard return pynput.keyboard.Listener(on_press=callback) def create_annotator(session_name, save_dir, config, datasets): \"\"\"Instantiates a", "of the annotation session used for printing and to create output directories. save_dir:", "the data to annotate. Returns: The fully-constructed annotator object, ready to begin annotation", "create_annotator(session_name, save_dir, config, datasets): \"\"\"Instantiates a GUI annotation tool based on the type", "'annotator' field\") annotator_config = config[\"annotator\"] if \"type\" not in annotator_config or not annotator_config[\"type\"]:", "be parsed for annotator parameters. datasets: map of named dataset parsers that will", "key. For more information on the configuration, refer to :class:`thelper.gui.annotators.Annotator`. The instantiated type", "annotator_config = config[\"annotator\"] if \"type\" not in annotator_config or not annotator_config[\"type\"]: raise AssertionError(\"annotator" ]
[]
[ "METHOD = 'GET' URL_COMPONENTS = ['headers'] RESULT_KEY = 'headers' class GetIPAction(HttpbinAction): METHOD =", "= ['headers'] RESULT_KEY = 'headers' class GetIPAction(HttpbinAction): METHOD = 'GET' URL_COMPONENTS = ['ip']", "= 'headers' class GetIPAction(HttpbinAction): METHOD = 'GET' URL_COMPONENTS = ['ip'] RESULT_KEY = 'origin'", "= 'GET' URL_COMPONENTS = ['headers'] RESULT_KEY = 'headers' class GetIPAction(HttpbinAction): METHOD = 'GET'", "['headers'] RESULT_KEY = 'headers' class GetIPAction(HttpbinAction): METHOD = 'GET' URL_COMPONENTS = ['ip'] RESULT_KEY", "'GET' URL_COMPONENTS = ['ip'] RESULT_KEY = 'origin' class GetUserAgentAction(HttpbinAction): METHOD = 'GET' URL_COMPONENTS", "from .base import HttpbinAction class GetHeadersAction(HttpbinAction): METHOD = 'GET' URL_COMPONENTS = ['headers'] RESULT_KEY", "'GET' URL_COMPONENTS = ['headers'] RESULT_KEY = 'headers' class GetIPAction(HttpbinAction): METHOD = 'GET' URL_COMPONENTS", ".base import HttpbinAction class GetHeadersAction(HttpbinAction): METHOD = 'GET' URL_COMPONENTS = ['headers'] RESULT_KEY =", "= ['ip'] RESULT_KEY = 'origin' class GetUserAgentAction(HttpbinAction): METHOD = 'GET' URL_COMPONENTS = ['user-agent']", "GetIPAction(HttpbinAction): METHOD = 'GET' URL_COMPONENTS = ['ip'] RESULT_KEY = 'origin' class GetUserAgentAction(HttpbinAction): METHOD", "import HttpbinAction class GetHeadersAction(HttpbinAction): METHOD = 'GET' URL_COMPONENTS = ['headers'] RESULT_KEY = 'headers'", "METHOD = 'GET' URL_COMPONENTS = ['ip'] RESULT_KEY = 'origin' class GetUserAgentAction(HttpbinAction): METHOD =", "RESULT_KEY = 'origin' class GetUserAgentAction(HttpbinAction): METHOD = 'GET' URL_COMPONENTS = ['user-agent'] RESULT_KEY =", "GetHeadersAction(HttpbinAction): METHOD = 'GET' URL_COMPONENTS = ['headers'] RESULT_KEY = 'headers' class GetIPAction(HttpbinAction): METHOD", "RESULT_KEY = 'headers' class GetIPAction(HttpbinAction): METHOD = 'GET' URL_COMPONENTS = ['ip'] RESULT_KEY =", "HttpbinAction class GetHeadersAction(HttpbinAction): METHOD = 'GET' URL_COMPONENTS = ['headers'] RESULT_KEY = 'headers' class", "= 'GET' URL_COMPONENTS = ['ip'] RESULT_KEY = 'origin' class GetUserAgentAction(HttpbinAction): METHOD = 'GET'", "URL_COMPONENTS = ['ip'] RESULT_KEY = 'origin' class GetUserAgentAction(HttpbinAction): METHOD = 'GET' URL_COMPONENTS =", "['ip'] RESULT_KEY = 'origin' class GetUserAgentAction(HttpbinAction): METHOD = 'GET' URL_COMPONENTS = ['user-agent'] RESULT_KEY", "= 'origin' class GetUserAgentAction(HttpbinAction): METHOD = 'GET' URL_COMPONENTS = ['user-agent'] RESULT_KEY = 'user-agent'", "class GetIPAction(HttpbinAction): METHOD = 'GET' URL_COMPONENTS = ['ip'] RESULT_KEY = 'origin' class GetUserAgentAction(HttpbinAction):", "class GetHeadersAction(HttpbinAction): METHOD = 'GET' URL_COMPONENTS = ['headers'] RESULT_KEY = 'headers' class GetIPAction(HttpbinAction):", "'headers' class GetIPAction(HttpbinAction): METHOD = 'GET' URL_COMPONENTS = ['ip'] RESULT_KEY = 'origin' class", "URL_COMPONENTS = ['headers'] RESULT_KEY = 'headers' class GetIPAction(HttpbinAction): METHOD = 'GET' URL_COMPONENTS =" ]
[ "\"Tom\", \"Tim\"}): print(f\"{index}: {entry}\") for index, entry in enumerate({\"Tam\", \"Tom\", \"Tim\"}): print(f\"{index}: {entry}\")", "in enumerate({\"Tam\", \"Tom\", \"Tim\"}): print(f\"{index}: {entry}\") for index, entry in enumerate({\"Tam\", \"Tom\", \"Tim\"}):", "in enumerate({\"Tam\", \"Tim\", \"Tom\"}): print(f\"{index}: {entry}\") for index, entry in enumerate({\"Tam\", \"Tom\", \"Tim\"}):", "\"Tim\", \"Tom\"}): print(f\"{index}: {entry}\") for index, entry in enumerate({\"Tam\", \"Tom\", \"Tim\"}): print(f\"{index}: {entry}\")", "index, entry in enumerate({\"Tam\", \"Tim\", \"Tom\"}): print(f\"{index}: {entry}\") for index, entry in enumerate({\"Tam\",", "\"Tom\"}): print(f\"{index}: {entry}\") for index, entry in enumerate({\"Tam\", \"Tom\", \"Tim\"}): print(f\"{index}: {entry}\") for", "for index, entry in enumerate({\"Tam\", \"Tom\", \"Tim\"}): print(f\"{index}: {entry}\") for index, entry in", "index, entry in enumerate({\"Tam\", \"Tom\", \"Tim\"}): print(f\"{index}: {entry}\") for index, entry in enumerate({\"Tam\",", "entry in enumerate({\"Tam\", \"Tom\", \"Tim\"}): print(f\"{index}: {entry}\") for index, entry in enumerate({\"Tam\", \"Tom\",", "\"Tim\"}): print(f\"{index}: {entry}\") for index, entry in enumerate({\"Tam\", \"Tom\", \"Tim\"}): print(f\"{index}: {entry}\") for", "enumerate({\"Tam\", \"Tim\", \"Tom\"}): print(f\"{index}: {entry}\") for index, entry in enumerate({\"Tam\", \"Tom\", \"Tim\"}): print(f\"{index}:", "for index, entry in enumerate({\"Tam\", \"Tim\", \"Tom\"}): print(f\"{index}: {entry}\") for index, entry in", "entry in enumerate({\"Tam\", \"Tim\", \"Tom\"}): print(f\"{index}: {entry}\") for index, entry in enumerate({\"Tam\", \"Tom\",", "{entry}\") for index, entry in enumerate({\"Tam\", \"Tom\", \"Tim\"}): print(f\"{index}: {entry}\") for index, entry", "enumerate({\"Tam\", \"Tom\", \"Tim\"}): print(f\"{index}: {entry}\") for index, entry in enumerate({\"Tam\", \"Tom\", \"Tim\"}): print(f\"{index}:", "print(f\"{index}: {entry}\") for index, entry in enumerate({\"Tam\", \"Tom\", \"Tim\"}): print(f\"{index}: {entry}\") for index," ]
[ "References ---------- <NAME>., & <NAME>. (2018). Expected policy gradients. AAAI. \"\"\" def __init__(self,", "ExpectedActorCriticAgent(ActorCriticAgent): \"\"\"Implementation of the Advantage-Actor Critic. TODO: build compatible function approximation. References ----------", "\"\"\"Implementation of the Advantage-Actor Critic. TODO: build compatible function approximation. References ---------- <NAME>.,", "compatible function approximation. References ---------- <NAME>., & <NAME>. (2018). Expected policy gradients. AAAI.", "class ExpectedActorCriticAgent(ActorCriticAgent): \"\"\"Implementation of the Advantage-Actor Critic. TODO: build compatible function approximation. References", "of the Advantage-Actor Critic. TODO: build compatible function approximation. References ---------- <NAME>., &", "Advantage-Actor Critic. TODO: build compatible function approximation. References ---------- <NAME>., & <NAME>. (2018).", "---------- <NAME>., & <NAME>. (2018). Expected policy gradients. AAAI. \"\"\" def __init__(self, *args,", "Critic Agent.\"\"\" from rllib.algorithms.eac import ExpectedActorCritic from .actor_critic_agent import ActorCriticAgent class ExpectedActorCriticAgent(ActorCriticAgent): \"\"\"Implementation", "of Expected-Actor Critic Agent.\"\"\" from rllib.algorithms.eac import ExpectedActorCritic from .actor_critic_agent import ActorCriticAgent class", "build compatible function approximation. References ---------- <NAME>., & <NAME>. (2018). Expected policy gradients.", "& <NAME>. (2018). Expected policy gradients. AAAI. \"\"\" def __init__(self, *args, **kwargs): super().__init__(algorithm_=ExpectedActorCritic,", "from .actor_critic_agent import ActorCriticAgent class ExpectedActorCriticAgent(ActorCriticAgent): \"\"\"Implementation of the Advantage-Actor Critic. TODO: build", "function approximation. References ---------- <NAME>., & <NAME>. (2018). Expected policy gradients. AAAI. \"\"\"", "import ActorCriticAgent class ExpectedActorCriticAgent(ActorCriticAgent): \"\"\"Implementation of the Advantage-Actor Critic. TODO: build compatible function", "<NAME>. (2018). Expected policy gradients. AAAI. \"\"\" def __init__(self, *args, **kwargs): super().__init__(algorithm_=ExpectedActorCritic, *args,", "Critic. TODO: build compatible function approximation. References ---------- <NAME>., & <NAME>. (2018). Expected", "TODO: build compatible function approximation. References ---------- <NAME>., & <NAME>. (2018). Expected policy", "approximation. References ---------- <NAME>., & <NAME>. (2018). Expected policy gradients. AAAI. \"\"\" def", "ActorCriticAgent class ExpectedActorCriticAgent(ActorCriticAgent): \"\"\"Implementation of the Advantage-Actor Critic. TODO: build compatible function approximation.", ".actor_critic_agent import ActorCriticAgent class ExpectedActorCriticAgent(ActorCriticAgent): \"\"\"Implementation of the Advantage-Actor Critic. TODO: build compatible", "rllib.algorithms.eac import ExpectedActorCritic from .actor_critic_agent import ActorCriticAgent class ExpectedActorCriticAgent(ActorCriticAgent): \"\"\"Implementation of the Advantage-Actor", "import ExpectedActorCritic from .actor_critic_agent import ActorCriticAgent class ExpectedActorCriticAgent(ActorCriticAgent): \"\"\"Implementation of the Advantage-Actor Critic.", "from rllib.algorithms.eac import ExpectedActorCritic from .actor_critic_agent import ActorCriticAgent class ExpectedActorCriticAgent(ActorCriticAgent): \"\"\"Implementation of the", "<NAME>., & <NAME>. (2018). Expected policy gradients. AAAI. \"\"\" def __init__(self, *args, **kwargs):", "(2018). Expected policy gradients. AAAI. \"\"\" def __init__(self, *args, **kwargs): super().__init__(algorithm_=ExpectedActorCritic, *args, **kwargs)", "\"\"\"Implementation of Expected-Actor Critic Agent.\"\"\" from rllib.algorithms.eac import ExpectedActorCritic from .actor_critic_agent import ActorCriticAgent", "Expected-Actor Critic Agent.\"\"\" from rllib.algorithms.eac import ExpectedActorCritic from .actor_critic_agent import ActorCriticAgent class ExpectedActorCriticAgent(ActorCriticAgent):", "Agent.\"\"\" from rllib.algorithms.eac import ExpectedActorCritic from .actor_critic_agent import ActorCriticAgent class ExpectedActorCriticAgent(ActorCriticAgent): \"\"\"Implementation of", "ExpectedActorCritic from .actor_critic_agent import ActorCriticAgent class ExpectedActorCriticAgent(ActorCriticAgent): \"\"\"Implementation of the Advantage-Actor Critic. TODO:", "the Advantage-Actor Critic. TODO: build compatible function approximation. References ---------- <NAME>., & <NAME>." ]
[ "= self.net_blocks.bottleneck_v3(x, 80 , 3, e=240, s=2, alpha=alpha, squeeze=False, activation='hard_swish') x = self.net_blocks.bottleneck_v3(x,", "s=s[1], n=n[1], activation=activation) M0 = self.net_blocks.separable_conv_block(M0, c[1], 3, s[1], activation=None) A1 = add([M0,", "2, 4, 2] s = [2, 1, 2, 2, 1, 2, 1] n", "activation=activation) x = self.net_blocks.inverted_residual_block(x, c=c[3], ks=3, t=t[3], s=s[3], n=n[3], alpha=alpha, activation=activation) x =", "= add([M0, M1]) M2 = self.net_blocks.inverted_residual_block(A1, c=c[2], ks=3, t=t[2], s=s[2], n=n[2], activation=activation) A1", "x = self.net_blocks.bottleneck_v1(x, 128, 3, s=1, alpha=alpha, activation=activation) x = self.net_blocks.bottleneck_v1(x, 256, 3,", "s=s[2], n=n[2], activation=activation) x = self.net_blocks.inverted_residual_block(x, c=c[3], ks=3, t=t[3], s=s[3], n=n[3], activation=activation) x", "ks=3, t=t[2], s=s[2], n=n[2], alpha=alpha, activation=activation) x = self.net_blocks.inverted_residual_block(x, c=c[3], ks=3, t=t[3], s=s[3],", "160, 5, e=960, s=1, alpha=alpha, squeeze=True, activation='hard_swish') x = self.net_blocks.bottleneck_v3(x, 160, 5, e=960,", "self.softmax_model.outputs[0] n_classes = K.int_shape(softmax)[-1] inputs = self.softmax_model.inputs[0] x = self.softmax_model.layers[self.end_fine_tune_layer_id].output if(self.dropout>0): x =", "alpha) x = self.net_blocks.conv_block(I, 32, 3, 2, activation=activation) x = self.net_blocks.bottleneck_v1(x, 64 ,", "ks=3, t=t[6], s=s[6], n=n[6], alpha=alpha, activation=activation) x = self.net_blocks.inverted_residual_block(x, c=c[7], ks=3, t=t[7], s=s[7],", "32, 64, 96, 160, 320, 1280] t = [1, 1, 6, 6, 6,", "3, s=1, alpha=alpha, activation=activation) x = self.net_blocks.bottleneck_v1(x, 1024, 3, s=2, alpha=alpha, activation=activation) x", "c=c[6], ks=3, t=t[6], s=s[6], n=n[6], alpha=alpha, activation=activation) x = self.net_blocks.inverted_residual_block(x, c=c[7], ks=3, t=t[7],", "x = self.net_blocks.conv_block(x, last_filters, 1, 1, activation=activation) x = GlobalAveragePooling2D()(x) self.backbone = Model(inputs=I,", "activation=activation) x = self.net_blocks.bottleneck_v1(x, 512, 3, s=1, alpha=alpha, activation=activation) x = self.net_blocks.bottleneck_v1(x, 1024,", "squeeze=True, activation='hard_swish') x = self.net_blocks.bottleneck_v3(x, 160, 5, e=672, s=2, alpha=alpha, squeeze=True, activation='hard_swish') x", "self.net_blocks.conv_block(I, c[0], 3, s[0], activation=activation) M1 = self.net_blocks.inverted_residual_block(M0, c=c[1], ks=3, t=t[1], s=s[1], n=n[1],", "activation=activation) x = self.net_blocks.inverted_residual_block(x, c=c[7], ks=3, t=t[7], s=s[7], n=n[7], alpha=alpha, activation=activation) if alpha", "import AdaCos from model.blocks import NetBlock from tensorflow.keras.layers import Input, Reshape, Conv2D, Activation,", "= NetBlock(config) def build_mpsnet_backbone(self, input_shape): c = [32, 32, 64, 64, 128] t", "[2, 2, 2, 2, 1] n = [1, 2, 2, 3, 2] activation='relu'", "= self.net_blocks.inverted_residual_block(A2, c=c[3], ks=3, t=t[3], s=s[3], n=n[3], activation=activation) A2 = self.net_blocks.separable_conv_block(A2, c[3], 3,", "I = Input(shape = input_shape) activation = 'relu' c = int(32 * alpha)", "s=1, alpha=alpha, activation=activation) x = self.net_blocks.bottleneck_v1(x, 512, 3, s=2, alpha=alpha, activation=activation) x =", "1, 1, activation='softmax', norm=None) x = Reshape((n_classes,))(x) self.softmax_model = Model(inputs=I, outputs=x, name=self.model_name) def", "alpha=alpha, squeeze=True, activation='hard_swish') x = self.net_blocks.conv_block(x, 960, 1, 1, activation='hard_swish') x = GlobalAveragePooling2D()(x)", "Model(inputs=I, outputs=M, name=self.model_name) def build_mobilenet_v1_backbone(self, input_shape, alpha=1.0): I = Input(shape = input_shape) activation", "self.net_blocks.bottleneck_v1(x, 256, 3, s=2, alpha=alpha, activation=activation) x = self.net_blocks.bottleneck_v1(x, 256, 3, s=1, alpha=alpha,", "e=184, s=1, alpha=alpha, squeeze=False, activation='hard_swish') x = self.net_blocks.bottleneck_v3(x, 112, 3, e=480, s=1, alpha=alpha,", "activation=activation) x = self.net_blocks.separable_conv_block(M, c[1], 3, s[1], activation=activation) x = self.net_blocks.inverted_residual_block(x, c=c[2], ks=3,", "alpha=alpha, squeeze=False, activation='relu6') x = self.net_blocks.bottleneck_v3(x, 40 , 5, e=72 , s=2, alpha=alpha,", "self.net_blocks.inverted_residual_block(x, c=c[3], ks=3, t=t[3], s=s[3], n=n[3], activation=activation) x = self.net_blocks.inverted_residual_block(x, c=c[4], ks=3, t=t[4],", "name=self.model_name) def build_mobilenet_v3_backbone(self, input_shape, alpha=1.0): I = Input(shape = input_shape) x = self.net_blocks.conv_block(I,", "1, 2, 1, 1] n = [1, 1, 2, 3, 4, 3, 3,", "M1 = self.net_blocks.inverted_residual_block(M0, c=c[1], ks=3, t=t[1], s=s[1], n=n[1], activation=activation) M0 = self.net_blocks.separable_conv_block(M0, c[1],", "3, s[1], activation=None) A1 = add([M0, M1]) M2 = self.net_blocks.inverted_residual_block(A1, c=c[2], ks=3, t=t[2],", "c=c[2], ks=3, t=t[2], s=s[2], n=n[2], activation=activation) A1 = self.net_blocks.separable_conv_block(A1, c[2], 3, s[2], activation=None)", "if alpha > 1.0: last_filters = self.net_blocks.make_divisible(c[8] * alpha, 8) else: last_filters =", "[1, 1, 6, 6, 6, 6, 6, 6, 1] s = [2, 1,", "40 , 5, e=120, s=1, alpha=alpha, squeeze=True, activation='relu6') x = self.net_blocks.bottleneck_v3(x, 40 ,", "t=t[4], s=s[4], n=n[4], activation=activation) x = self.net_blocks.inverted_residual_block(x, c=c[5], ks=3, t=t[5], s=s[5], n=n[5], activation=activation)", "c[1], 3, s[1], activation=activation) x = self.net_blocks.inverted_residual_block(x, c=c[2], ks=3, t=t[2], s=s[2], n=n[2], activation=activation)", "tensorflow.keras.models import Model import tensorflow.keras.backend as K class Net: def __init__(self, config): self.model_name", "= Dropout(rate=dropout)(x) x = Flatten(name=self.embedding_layer_name)(x) break_point = len(self.softmax_model.layers) + self.start_fine_tune_layer_id for layer in", "name=self.model_name) def build_softmax_model(self, n_classes): I=self.backbone.inputs x=self.backbone.outputs[0] if(len(x.shape)==2): c = K.int_shape(x)[self.net_blocks.channel_axis] x = Reshape((1,", "s=s[2], n=n[2], activation=activation) A1 = self.net_blocks.separable_conv_block(A1, c[2], 3, s[2], activation=None) A2 = add([A1,", "= self.net_blocks.conv_block(x, n_classes, 1, 1, activation='softmax', norm=None) x = Reshape((n_classes,))(x) self.softmax_model = Model(inputs=I,", "Net: def __init__(self, config): self.model_name = config.model_name self.start_fine_tune_layer_id = config.start_fine_tune_layer_id self.end_fine_tune_layer_id = config.end_fine_tune_layer_id", "alpha=alpha, activation=activation) x = self.net_blocks.inverted_residual_block(x, c=c[4], ks=3, t=t[4], s=s[4], n=n[4], alpha=alpha, activation=activation) x", "Input(shape=(1,), name='label_input') softmax = self.softmax_model.outputs[0] n_classes = K.int_shape(softmax)[-1] inputs = self.softmax_model.inputs[0] x =", "s = [2, 2, 2, 2, 1] n = [1, 2, 2, 3,", "= [1, 1, 5, 1, 6, 1, 2] activation='prelu' I = Input(shape =", "n=n[2], activation=activation) A1 = self.net_blocks.separable_conv_block(A1, c[2], 3, s[2], activation=None) A2 = add([A1, M2])", "3, s[2], activation=None) A2 = add([A1, M2]) M3 = self.net_blocks.inverted_residual_block(A2, c=c[3], ks=3, t=t[3],", "self.net_blocks.make_divisible(c[0] * alpha, 8) x = self.net_blocks.conv_block(I, n_filters, 3, s[0], activation=activation) # (64,", "3, s[0], activation=activation) # (64, 64, 32) x = self.net_blocks.inverted_residual_block(x, c=c[1], ks=3, t=t[1],", "last_filters = c[8] x = self.net_blocks.conv_block(x, last_filters, 1, 1, activation=activation) x = GlobalAveragePooling2D()(x)", "= 'relu6' I = Input(shape = input_shape) n_filters = self.net_blocks.make_divisible(c[0] * alpha, 8)", "128] t = [1, 2, 2, 3, 2] s = [2, 2, 2,", "= K.int_shape(x)[2] x = self.net_blocks.depthwise_conv_block(x, ks, 1, padding='valid', activation=None) self.backbone = Model(inputs=I, outputs=x,", "64, 64, 128] t = [1, 2, 2, 3, 2] s = [2,", "= Reshape((n_classes,))(x) self.softmax_model = Model(inputs=I, outputs=x, name=self.model_name) def build_adacos_model(self): label = Input(shape=(1,), name='label_input')", "e=184, s=1, alpha=alpha, squeeze=False, activation='hard_swish') x = self.net_blocks.bottleneck_v3(x, 80 , 3, e=184, s=1,", "self.net_blocks.bottleneck_v1(x, 256, 3, s=1, alpha=alpha, activation=activation) x = self.net_blocks.bottleneck_v1(x, 512, 3, s=2, alpha=alpha,", "n=n[4], activation=activation) x = self.net_blocks.inverted_residual_block(x, c=c[5], ks=3, t=t[5], s=s[5], n=n[5], activation=activation) x =", "activation='hard_swish') x = GlobalAveragePooling2D()(x) self.backbone = Model(inputs=I, outputs=x, name=self.model_name) def build_mobilefacenet_backbone(self, input_shape, alpha=1.0):", "= self.net_blocks.bottleneck_v3(x, 112, 3, e=480, s=1, alpha=alpha, squeeze=True, activation='hard_swish') x = self.net_blocks.bottleneck_v3(x, 112,", "s=2, alpha=alpha, activation=activation) x = self.net_blocks.bottleneck_v1(x, 256, 3, s=1, alpha=alpha, activation=activation) x =", "16 , 3, e=16 , s=1, alpha=alpha, squeeze=False, activation='relu6') x = self.net_blocks.bottleneck_v3(x, 24", "I=self.backbone.inputs x=self.backbone.outputs[0] if(len(x.shape)==2): c = K.int_shape(x)[self.net_blocks.channel_axis] x = Reshape((1, 1, c))(x) x =", "= self.softmax_model.outputs[0] n_classes = K.int_shape(softmax)[-1] inputs = self.softmax_model.inputs[0] x = self.softmax_model.layers[self.end_fine_tune_layer_id].output if(self.dropout>0): x", "self.net_blocks.conv_block(I, 16, 3 , 2, activation='hard_swish') x = self.net_blocks.bottleneck_v3(x, 16 , 3, e=16", "3, e=672, s=1, alpha=alpha, squeeze=True, activation='hard_swish') x = self.net_blocks.bottleneck_v3(x, 160, 5, e=672, s=2,", "c=c[4], ks=3, t=t[4], s=s[4], n=n[4], activation=activation) x = self.net_blocks.inverted_residual_block(x, c=c[5], ks=3, t=t[5], s=s[5],", "c=c[4], ks=3, t=t[4], s=s[4], n=n[4], alpha=alpha, activation=activation) x = self.net_blocks.inverted_residual_block(x, c=c[5], ks=3, t=t[5],", "= [1, 2, 2, 3, 2] activation='relu' I = Input(shape = input_shape) M0", "A1 = self.net_blocks.separable_conv_block(A1, c[2], 3, s[2], activation=None) A2 = add([A1, M2]) M3 =", "s=1, alpha=alpha, activation=activation) x = self.net_blocks.bottleneck_v1(x, 256, 3, s=2, alpha=alpha, activation=activation) x =", "self.net_blocks.bottleneck_v3(x, 16 , 3, e=16 , s=1, alpha=alpha, squeeze=False, activation='relu6') x = self.net_blocks.bottleneck_v3(x,", "1, 1, 'valid', activation=activation) ks = K.int_shape(x)[2] x = self.net_blocks.depthwise_conv_block(x, ks, 1, padding='valid',", "16, 3 , 2, activation='hard_swish') x = self.net_blocks.bottleneck_v3(x, 16 , 3, e=16 ,", "alpha=alpha, activation=activation) if alpha > 1.0: last_filters = self.net_blocks.make_divisible(c[8] * alpha, 8) else:", "self.net_blocks.bottleneck_v1(x, 128, 3, s=1, alpha=alpha, activation=activation) x = self.net_blocks.bottleneck_v1(x, 256, 3, s=2, alpha=alpha,", "= self.net_blocks.spp_block(A4, pool_list=[1, 2, 4]) self.backbone = Model(inputs=I, outputs=M, name=self.model_name) def build_mobilenet_v1_backbone(self, input_shape,", "else: last_filters = c[8] x = self.net_blocks.conv_block(x, last_filters, 1, 1, activation=activation) x =", "self.net_blocks.separable_conv_block(A1, c[2], 3, s[2], activation=None) A2 = add([A1, M2]) M3 = self.net_blocks.inverted_residual_block(A2, c=c[3],", "x = self.net_blocks.bottleneck_v3(x, 160, 5, e=960, s=1, alpha=alpha, squeeze=True, activation='hard_swish') x = self.net_blocks.bottleneck_v3(x,", "128, 128] t = [1, 1, 2, 4, 2, 4, 2] s =", "c=c[3], ks=3, t=t[3], s=s[3], n=n[3], alpha=alpha, activation=activation) x = self.net_blocks.inverted_residual_block(x, c=c[4], ks=3, t=t[4],", "3, s=1, alpha=alpha, activation=activation) x = self.net_blocks.bottleneck_v1(x, 512, 3, s=1, alpha=alpha, activation=activation) x", "Flatten(name=self.embedding_layer_name)(x) break_point = len(self.softmax_model.layers) + self.start_fine_tune_layer_id for layer in self.softmax_model.layers[:break_point]: layer.trainable=False outputs =", "3, s[3], activation=None) A3 = add([A2, M3]) M4 = self.net_blocks.inverted_residual_block(A3, c=c[4], ks=3, t=t[4],", "name=self.model_name) def build_mobilenet_v2_backbone(self, input_shape, alpha=1.0): c = [32, 16, 24, 32, 64, 96,", "1, 2, 2, 1, 2, 1] n = [1, 1, 5, 1, 6,", "x = self.net_blocks.conv_block(I, c[0], 3, s[0], activation=activation) x = self.net_blocks.separable_conv_block(M, c[1], 3, s[1],", "len(self.softmax_model.layers) + self.start_fine_tune_layer_id for layer in self.softmax_model.layers[:break_point]: layer.trainable=False outputs = AdaCos(n_classes, initializer=self.net_blocks.kernel_initializer, regularizer=self.net_blocks.kernel_regularizer,", "t=t[7], s=s[7], n=n[7], alpha=alpha, activation=activation) if alpha > 1.0: last_filters = self.net_blocks.make_divisible(c[8] *", "AdaCos from model.blocks import NetBlock from tensorflow.keras.layers import Input, Reshape, Conv2D, Activation, Flatten,", "n=n[6], activation=activation) x = self.net_blocks.conv_block(x, 512, 1, 1, 'valid', activation=activation) ks = K.int_shape(x)[2]", "= self.net_blocks.separable_conv_block(M0, c[1], 3, s[1], activation=None) A1 = add([M0, M1]) M2 = self.net_blocks.inverted_residual_block(A1,", "x=self.backbone.outputs[0] if(len(x.shape)==2): c = K.int_shape(x)[self.net_blocks.channel_axis] x = Reshape((1, 1, c))(x) x = self.net_blocks.conv_block(x,", ", 3, e=200, s=1, alpha=alpha, squeeze=False, activation='hard_swish') x = self.net_blocks.bottleneck_v3(x, 80 , 3,", "5, e=672, s=2, alpha=alpha, squeeze=True, activation='hard_swish') x = self.net_blocks.bottleneck_v3(x, 160, 5, e=960, s=1,", "6, 6, 1] s = [2, 1, 2, 2, 2, 1, 2, 1,", "x = self.net_blocks.bottleneck_v3(x, 40 , 5, e=120, s=1, alpha=alpha, squeeze=True, activation='relu6') x =", ", 5, e=120, s=1, alpha=alpha, squeeze=True, activation='relu6') x = self.net_blocks.bottleneck_v3(x, 40 , 5,", "self.net_blocks.bottleneck_v3(x, 160, 5, e=960, s=1, alpha=alpha, squeeze=True, activation='hard_swish') x = self.net_blocks.bottleneck_v3(x, 160, 5,", "self.net_blocks.inverted_residual_block(x, c=c[7], ks=3, t=t[7], s=s[7], n=n[7], alpha=alpha, activation=activation) if alpha > 1.0: last_filters", "n=n[6], alpha=alpha, activation=activation) x = self.net_blocks.inverted_residual_block(x, c=c[7], ks=3, t=t[7], s=s[7], n=n[7], alpha=alpha, activation=activation)", "alpha=alpha, activation=activation) x = self.net_blocks.bottleneck_v1(x, 256, 3, s=1, alpha=alpha, activation=activation) x = self.net_blocks.bottleneck_v1(x,", "3, e=240, s=2, alpha=alpha, squeeze=False, activation='hard_swish') x = self.net_blocks.bottleneck_v3(x, 80 , 3, e=200,", "ks=3, t=t[3], s=s[3], n=n[3], alpha=alpha, activation=activation) x = self.net_blocks.inverted_residual_block(x, c=c[4], ks=3, t=t[4], s=s[4],", "activation='hard_swish') x = self.net_blocks.bottleneck_v3(x, 112, 3, e=672, s=1, alpha=alpha, squeeze=True, activation='hard_swish') x =", "1, padding='valid', activation=None) self.backbone = Model(inputs=I, outputs=x, name=self.model_name) def build_softmax_model(self, n_classes): I=self.backbone.inputs x=self.backbone.outputs[0]", "x = Dropout(rate=dropout)(x) x = self.net_blocks.conv_block(x, n_classes, 1, 1, activation='softmax', norm=None) x =", "= self.net_blocks.bottleneck_v1(x, 256, 3, s=1, alpha=alpha, activation=activation) x = self.net_blocks.bottleneck_v1(x, 512, 3, s=2,", "outputs=x, name=self.model_name) def build_mobilenet_v3_backbone(self, input_shape, alpha=1.0): I = Input(shape = input_shape) x =", "c[0], 3, s[0], activation=activation) M1 = self.net_blocks.inverted_residual_block(M0, c=c[1], ks=3, t=t[1], s=s[1], n=n[1], activation=activation)", "s = [2, 1, 2, 2, 1, 2, 1] n = [1, 1,", "s=s[4], n=n[4], activation=activation) A3 = self.net_blocks.separable_conv_block(A3, c[4], 3, s[4], activation=None) A4 = add([A3,", "alpha > 1.0: last_filters = self.net_blocks.make_divisible(c[8] * alpha, 8) else: last_filters = c[8]", "config.embedding_layer_name self.dropout = config.dropout self.net_blocks = NetBlock(config) def build_mpsnet_backbone(self, input_shape): c = [32,", "3, s=2, alpha=alpha, activation=activation) x = self.net_blocks.bottleneck_v1(x, 512, 3, s=1, alpha=alpha, activation=activation) x", "32, 64, 64, 128] t = [1, 2, 2, 3, 2] s =", "1, 1, activation='hard_swish') x = GlobalAveragePooling2D()(x) self.backbone = Model(inputs=I, outputs=x, name=self.model_name) def build_mobilefacenet_backbone(self,", "= config.model_name self.start_fine_tune_layer_id = config.start_fine_tune_layer_id self.end_fine_tune_layer_id = config.end_fine_tune_layer_id self.embedding_dim = config.embedding_dim self.embedding_layer_name =", "self.net_blocks.inverted_residual_block(x, c=c[5], ks=3, t=t[5], s=s[5], n=n[5], activation=activation) x = self.net_blocks.inverted_residual_block(x, c=c[6], ks=3, t=t[6],", "= self.net_blocks.bottleneck_v3(x, 24 , 3, e=64 , s=2, alpha=alpha, squeeze=False, activation='relu6') x =", "Flatten, Dropout, add from tensorflow.keras.models import Model import tensorflow.keras.backend as K class Net:", "112, 3, e=480, s=1, alpha=alpha, squeeze=True, activation='hard_swish') x = self.net_blocks.bottleneck_v3(x, 112, 3, e=672,", "I = Input(shape = input_shape) n_filters = self.net_blocks.make_divisible(c[0] * alpha, 8) x =", "alpha, 8) x = self.net_blocks.conv_block(I, n_filters, 3, s[0], activation=activation) # (64, 64, 32)", "s[1], activation=None) A1 = add([M0, M1]) M2 = self.net_blocks.inverted_residual_block(A1, c=c[2], ks=3, t=t[2], s=s[2],", "self.net_blocks.inverted_residual_block(x, c=c[1], ks=3, t=t[1], s=s[1], n=n[1], alpha=alpha, activation=activation) x = self.net_blocks.inverted_residual_block(x, c=c[2], ks=3,", "x = self.net_blocks.bottleneck_v3(x, 160, 5, e=960, s=1, alpha=alpha, squeeze=True, activation='hard_swish') x = self.net_blocks.conv_block(x,", "def build_mpsnet_backbone(self, input_shape): c = [32, 32, 64, 64, 128] t = [1,", "self.net_blocks.bottleneck_v1(x, 512, 3, s=2, alpha=alpha, activation=activation) x = self.net_blocks.bottleneck_v1(x, 512, 3, s=1, alpha=alpha,", ", 3, e=184, s=1, alpha=alpha, squeeze=False, activation='hard_swish') x = self.net_blocks.bottleneck_v3(x, 112, 3, e=480,", "* alpha, 8) x = self.net_blocks.conv_block(I, n_filters, 3, s[0], activation=activation) # (64, 64,", "activation=activation) ks = K.int_shape(x)[2] x = self.net_blocks.depthwise_conv_block(x, ks, 1, padding='valid', activation=None) self.backbone =", "[2, 1, 2, 2, 2, 1, 2, 1, 1] n = [1, 1,", "5, e=72 , s=2, alpha=alpha, squeeze=True, activation='relu6') x = self.net_blocks.bottleneck_v3(x, 40 , 5,", "[1, 1, 5, 1, 6, 1, 2] activation='prelu' I = Input(shape = input_shape)", "c=c[3], ks=3, t=t[3], s=s[3], n=n[3], activation=activation) A2 = self.net_blocks.separable_conv_block(A2, c[3], 3, s[3], activation=None)", "self.net_blocks.inverted_residual_block(A2, c=c[3], ks=3, t=t[3], s=s[3], n=n[3], activation=activation) A2 = self.net_blocks.separable_conv_block(A2, c[3], 3, s[3],", "def build_mobilefacenet_backbone(self, input_shape, alpha=1.0): c = [64, 64, 64, 128, 128, 128, 128]", "x = self.net_blocks.inverted_residual_block(x, c=c[4], ks=3, t=t[4], s=s[4], n=n[4], alpha=alpha, activation=activation) x = self.net_blocks.inverted_residual_block(x,", "activation='relu' I = Input(shape = input_shape) M0 = self.net_blocks.conv_block(I, c[0], 3, s[0], activation=activation)", "n_filters, 3, s[0], activation=activation) # (64, 64, 32) x = self.net_blocks.inverted_residual_block(x, c=c[1], ks=3,", "M2]) M3 = self.net_blocks.inverted_residual_block(A2, c=c[3], ks=3, t=t[3], s=s[3], n=n[3], activation=activation) A2 = self.net_blocks.separable_conv_block(A2,", "= self.net_blocks.conv_block(x, last_filters, 1, 1, activation=activation) x = GlobalAveragePooling2D()(x) self.backbone = Model(inputs=I, outputs=x,", "= Input(shape = input_shape) x = self.net_blocks.conv_block(I, 16, 3 , 2, activation='hard_swish') x", "self.softmax_model = Model(inputs=I, outputs=x, name=self.model_name) def build_adacos_model(self): label = Input(shape=(1,), name='label_input') softmax =", "M2 = self.net_blocks.inverted_residual_block(A1, c=c[2], ks=3, t=t[2], s=s[2], n=n[2], activation=activation) A1 = self.net_blocks.separable_conv_block(A1, c[2],", "96, 160, 320, 1280] t = [1, 1, 6, 6, 6, 6, 6,", "squeeze=False, activation='hard_swish') x = self.net_blocks.bottleneck_v3(x, 80 , 3, e=184, s=1, alpha=alpha, squeeze=False, activation='hard_swish')", "Model(inputs=I, outputs=x, name=self.model_name) def build_softmax_model(self, n_classes): I=self.backbone.inputs x=self.backbone.outputs[0] if(len(x.shape)==2): c = K.int_shape(x)[self.net_blocks.channel_axis] x", "t=t[1], s=s[1], n=n[1], activation=activation) M0 = self.net_blocks.separable_conv_block(M0, c[1], 3, s[1], activation=None) A1 =", "256, 3, s=2, alpha=alpha, activation=activation) x = self.net_blocks.bottleneck_v1(x, 256, 3, s=1, alpha=alpha, activation=activation)", "activation=activation) x = self.net_blocks.inverted_residual_block(x, c=c[2], ks=3, t=t[2], s=s[2], n=n[2], alpha=alpha, activation=activation) x =", "= len(self.softmax_model.layers) + self.start_fine_tune_layer_id for layer in self.softmax_model.layers[:break_point]: layer.trainable=False outputs = AdaCos(n_classes, initializer=self.net_blocks.kernel_initializer,", "activation=activation) x = self.net_blocks.bottleneck_v1(x, 256, 3, s=1, alpha=alpha, activation=activation) x = self.net_blocks.bottleneck_v1(x, 512,", "s=1, alpha=alpha, activation=activation) x = self.net_blocks.bottleneck_v1(x, 1024, 3, s=2, alpha=alpha, activation=activation) x =", "t=t[2], s=s[2], n=n[2], alpha=alpha, activation=activation) x = self.net_blocks.inverted_residual_block(x, c=c[3], ks=3, t=t[3], s=s[3], n=n[3],", "1] n = [1, 1, 2, 3, 4, 3, 3, 1, 1] activation", "input_shape, alpha=1.0): c = [64, 64, 64, 128, 128, 128, 128] t =", "input_shape) activation = 'relu' c = int(32 * alpha) x = self.net_blocks.conv_block(I, 32,", "self.net_blocks.make_divisible(c[8] * alpha, 8) else: last_filters = c[8] x = self.net_blocks.conv_block(x, last_filters, 1,", "= Input(shape = input_shape) M0 = self.net_blocks.conv_block(I, c[0], 3, s[0], activation=activation) M1 =", "activation=None) A4 = add([A3, M4]) M = self.net_blocks.spp_block(A4, pool_list=[1, 2, 4]) self.backbone =", "Model(inputs=I, outputs=x, name=self.model_name) def build_mobilenet_v2_backbone(self, input_shape, alpha=1.0): c = [32, 16, 24, 32,", "x = self.net_blocks.inverted_residual_block(x, c=c[1], ks=3, t=t[1], s=s[1], n=n[1], alpha=alpha, activation=activation) x = self.net_blocks.inverted_residual_block(x,", "self.net_blocks.bottleneck_v1(x, 1024, 3, s=1, alpha=alpha, activation=activation) x = GlobalAveragePooling2D()(x) self.backbone = Model(inputs=I, outputs=x,", "x = self.net_blocks.bottleneck_v3(x, 80 , 3, e=200, s=1, alpha=alpha, squeeze=False, activation='hard_swish') x =", "activation=activation) x = self.net_blocks.inverted_residual_block(x, c=c[2], ks=3, t=t[2], s=s[2], n=n[2], activation=activation) x = self.net_blocks.inverted_residual_block(x,", "= self.net_blocks.inverted_residual_block(x, c=c[3], ks=3, t=t[3], s=s[3], n=n[3], activation=activation) x = self.net_blocks.inverted_residual_block(x, c=c[4], ks=3,", "softmax = self.softmax_model.outputs[0] n_classes = K.int_shape(softmax)[-1] inputs = self.softmax_model.inputs[0] x = self.softmax_model.layers[self.end_fine_tune_layer_id].output if(self.dropout>0):", "x = self.net_blocks.bottleneck_v1(x, 64 , 3, s=1, alpha=alpha, activation=activation) x = self.net_blocks.bottleneck_v1(x, 128,", "self.dropout = config.dropout self.net_blocks = NetBlock(config) def build_mpsnet_backbone(self, input_shape): c = [32, 32,", "activation='hard_swish') x = self.net_blocks.conv_block(x, 960, 1, 1, activation='hard_swish') x = GlobalAveragePooling2D()(x) self.backbone =", "= self.net_blocks.separable_conv_block(A3, c[4], 3, s[4], activation=None) A4 = add([A3, M4]) M = self.net_blocks.spp_block(A4,", "self.net_blocks.conv_block(I, 32, 3, 2, activation=activation) x = self.net_blocks.bottleneck_v1(x, 64 , 3, s=1, alpha=alpha,", "alpha=alpha, activation=activation) x = self.net_blocks.inverted_residual_block(x, c=c[6], ks=3, t=t[6], s=s[6], n=n[6], alpha=alpha, activation=activation) x", "3, s=1, alpha=alpha, activation=activation) x = GlobalAveragePooling2D()(x) self.backbone = Model(inputs=I, outputs=x, name=self.model_name) def", "activation=activation) x = self.net_blocks.bottleneck_v1(x, 1024, 3, s=1, alpha=alpha, activation=activation) x = GlobalAveragePooling2D()(x) self.backbone", "= self.net_blocks.inverted_residual_block(x, c=c[3], ks=3, t=t[3], s=s[3], n=n[3], alpha=alpha, activation=activation) x = self.net_blocks.inverted_residual_block(x, c=c[4],", "alpha=alpha, activation=activation) x = self.net_blocks.bottleneck_v1(x, 1024, 3, s=2, alpha=alpha, activation=activation) x = self.net_blocks.bottleneck_v1(x,", "GlobalAveragePooling2D()(x) self.backbone = Model(inputs=I, outputs=x, name=self.model_name) def build_mobilenet_v3_backbone(self, input_shape, alpha=1.0): I = Input(shape", "4, 2, 4, 2] s = [2, 1, 2, 2, 1, 2, 1]", "= [2, 1, 2, 2, 1, 2, 1] n = [1, 1, 5,", "alpha=alpha, activation=activation) x = self.net_blocks.bottleneck_v1(x, 256, 3, s=2, alpha=alpha, activation=activation) x = self.net_blocks.bottleneck_v1(x,", "alpha=alpha, activation=activation) x = self.net_blocks.inverted_residual_block(x, c=c[3], ks=3, t=t[3], s=s[3], n=n[3], alpha=alpha, activation=activation) x", "80 , 3, e=240, s=2, alpha=alpha, squeeze=False, activation='hard_swish') x = self.net_blocks.bottleneck_v3(x, 80 ,", "x = self.net_blocks.bottleneck_v1(x, 128, 3, s=2, alpha=alpha, activation=activation) x = self.net_blocks.bottleneck_v1(x, 128, 3,", "for layer in self.softmax_model.layers[:break_point]: layer.trainable=False outputs = AdaCos(n_classes, initializer=self.net_blocks.kernel_initializer, regularizer=self.net_blocks.kernel_regularizer, name='adacos')([x, label]) self.adacos_model", "x = self.net_blocks.bottleneck_v1(x, 256, 3, s=2, alpha=alpha, activation=activation) x = self.net_blocks.bottleneck_v1(x, 256, 3,", "s=1, alpha=alpha, activation=activation) x = self.net_blocks.bottleneck_v1(x, 512, 3, s=1, alpha=alpha, activation=activation) x =", "x = self.net_blocks.bottleneck_v3(x, 40 , 5, e=72 , s=2, alpha=alpha, squeeze=True, activation='relu6') x", "activation=activation) x = self.net_blocks.inverted_residual_block(x, c=c[6], ks=3, t=t[6], s=s[6], n=n[6], alpha=alpha, activation=activation) x =", "n = [1, 1, 5, 1, 6, 1, 2] activation='prelu' I = Input(shape", "GlobalAveragePooling2D()(x) self.backbone = Model(inputs=I, outputs=x, name=self.model_name) def build_mobilenet_v2_backbone(self, input_shape, alpha=1.0): c = [32,", "128, 128, 128, 128] t = [1, 1, 2, 4, 2, 4, 2]", "s=1, alpha=alpha, squeeze=True, activation='hard_swish') x = self.net_blocks.bottleneck_v3(x, 160, 5, e=672, s=2, alpha=alpha, squeeze=True,", "1, 6, 6, 6, 6, 6, 6, 1] s = [2, 1, 2,", "config.model_name self.start_fine_tune_layer_id = config.start_fine_tune_layer_id self.end_fine_tune_layer_id = config.end_fine_tune_layer_id self.embedding_dim = config.embedding_dim self.embedding_layer_name = config.embedding_layer_name", "initializer=self.net_blocks.kernel_initializer, regularizer=self.net_blocks.kernel_regularizer, name='adacos')([x, label]) self.adacos_model = Model(inputs = (inputs, label), outputs = outputs,", "5, 1, 6, 1, 2] activation='prelu' I = Input(shape = input_shape) x =", "input_shape) x = self.net_blocks.conv_block(I, 16, 3 , 2, activation='hard_swish') x = self.net_blocks.bottleneck_v3(x, 16", "self.net_blocks.inverted_residual_block(x, c=c[4], ks=3, t=t[4], s=s[4], n=n[4], alpha=alpha, activation=activation) x = self.net_blocks.inverted_residual_block(x, c=c[5], ks=3,", "= GlobalAveragePooling2D()(x) self.backbone = Model(inputs=I, outputs=x, name=self.model_name) def build_mobilenet_v3_backbone(self, input_shape, alpha=1.0): I =", "5, e=120, s=1, alpha=alpha, squeeze=True, activation='relu6') x = self.net_blocks.bottleneck_v3(x, 80 , 3, e=240,", "alpha=1.0): c = [64, 64, 64, 128, 128, 128, 128] t = [1,", "c[2], 3, s[2], activation=None) A2 = add([A1, M2]) M3 = self.net_blocks.inverted_residual_block(A2, c=c[3], ks=3,", "self.backbone = Model(inputs=I, outputs=x, name=self.model_name) def build_mobilenet_v3_backbone(self, input_shape, alpha=1.0): I = Input(shape =", "self.softmax_model.layers[self.end_fine_tune_layer_id].output if(self.dropout>0): x = Dropout(rate=dropout)(x) x = Flatten(name=self.embedding_layer_name)(x) break_point = len(self.softmax_model.layers) + self.start_fine_tune_layer_id", "2, 3, 4, 3, 3, 1, 1] activation = 'relu6' I = Input(shape", "alpha=alpha, activation=activation) x = self.net_blocks.bottleneck_v1(x, 128, 3, s=1, alpha=alpha, activation=activation) x = self.net_blocks.bottleneck_v1(x,", "6, 6, 6, 6, 6, 1] s = [2, 1, 2, 2, 2,", "s=s[6], n=n[6], activation=activation) x = self.net_blocks.conv_block(x, 512, 1, 1, 'valid', activation=activation) ks =", "self.net_blocks.bottleneck_v3(x, 80 , 3, e=200, s=1, alpha=alpha, squeeze=False, activation='hard_swish') x = self.net_blocks.bottleneck_v3(x, 80", "add([A3, M4]) M = self.net_blocks.spp_block(A4, pool_list=[1, 2, 4]) self.backbone = Model(inputs=I, outputs=M, name=self.model_name)", "= self.net_blocks.separable_conv_block(M, c[1], 3, s[1], activation=activation) x = self.net_blocks.inverted_residual_block(x, c=c[2], ks=3, t=t[2], s=s[2],", "= Model(inputs=I, outputs=M, name=self.model_name) def build_mobilenet_v1_backbone(self, input_shape, alpha=1.0): I = Input(shape = input_shape)", "input_shape, alpha=1.0): I = Input(shape = input_shape) activation = 'relu' c = int(32", "activation=None) self.backbone = Model(inputs=I, outputs=x, name=self.model_name) def build_softmax_model(self, n_classes): I=self.backbone.inputs x=self.backbone.outputs[0] if(len(x.shape)==2): c", "128, 3, s=1, alpha=alpha, activation=activation) x = self.net_blocks.bottleneck_v1(x, 256, 3, s=2, alpha=alpha, activation=activation)", "t=t[5], s=s[5], n=n[5], activation=activation) x = self.net_blocks.inverted_residual_block(x, c=c[6], ks=3, t=t[6], s=s[6], n=n[6], activation=activation)", "3 , 2, activation='hard_swish') x = self.net_blocks.bottleneck_v3(x, 16 , 3, e=16 , s=1,", "AdaCos(n_classes, initializer=self.net_blocks.kernel_initializer, regularizer=self.net_blocks.kernel_regularizer, name='adacos')([x, label]) self.adacos_model = Model(inputs = (inputs, label), outputs =", "s=1, alpha=alpha, squeeze=False, activation='relu6') x = self.net_blocks.bottleneck_v3(x, 40 , 5, e=72 , s=2,", "s=s[5], n=n[5], activation=activation) x = self.net_blocks.inverted_residual_block(x, c=c[6], ks=3, t=t[6], s=s[6], n=n[6], activation=activation) x", "self.net_blocks.bottleneck_v3(x, 40 , 5, e=72 , s=2, alpha=alpha, squeeze=True, activation='relu6') x = self.net_blocks.bottleneck_v3(x,", "ks=3, t=t[6], s=s[6], n=n[6], activation=activation) x = self.net_blocks.conv_block(x, 512, 1, 1, 'valid', activation=activation)", "Input(shape = input_shape) x = self.net_blocks.conv_block(I, c[0], 3, s[0], activation=activation) x = self.net_blocks.separable_conv_block(M,", "name=self.model_name) def build_mobilefacenet_backbone(self, input_shape, alpha=1.0): c = [64, 64, 64, 128, 128, 128,", "80 , 3, e=184, s=1, alpha=alpha, squeeze=False, activation='hard_swish') x = self.net_blocks.bottleneck_v3(x, 112, 3,", "x = self.net_blocks.inverted_residual_block(x, c=c[4], ks=3, t=t[4], s=s[4], n=n[4], activation=activation) x = self.net_blocks.inverted_residual_block(x, c=c[5],", "= [32, 16, 24, 32, 64, 96, 160, 320, 1280] t = [1,", "= c[8] x = self.net_blocks.conv_block(x, last_filters, 1, 1, activation=activation) x = GlobalAveragePooling2D()(x) self.backbone", "128, 128, 128] t = [1, 1, 2, 4, 2, 4, 2] s", "t=t[4], s=s[4], n=n[4], activation=activation) A3 = self.net_blocks.separable_conv_block(A3, c[4], 3, s[4], activation=None) A4 =", "c=c[5], ks=3, t=t[5], s=s[5], n=n[5], alpha=alpha, activation=activation) x = self.net_blocks.inverted_residual_block(x, c=c[6], ks=3, t=t[6],", "activation='hard_swish') x = self.net_blocks.bottleneck_v3(x, 16 , 3, e=16 , s=1, alpha=alpha, squeeze=False, activation='relu6')", "512, 3, s=1, alpha=alpha, activation=activation) x = self.net_blocks.bottleneck_v1(x, 512, 3, s=1, alpha=alpha, activation=activation)", "Input(shape = input_shape) M0 = self.net_blocks.conv_block(I, c[0], 3, s[0], activation=activation) M1 = self.net_blocks.inverted_residual_block(M0,", "(64, 64, 32) x = self.net_blocks.inverted_residual_block(x, c=c[1], ks=3, t=t[1], s=s[1], n=n[1], alpha=alpha, activation=activation)", "c[0], 3, s[0], activation=activation) x = self.net_blocks.separable_conv_block(M, c[1], 3, s[1], activation=activation) x =", "= self.net_blocks.conv_block(I, c[0], 3, s[0], activation=activation) M1 = self.net_blocks.inverted_residual_block(M0, c=c[1], ks=3, t=t[1], s=s[1],", "__init__(self, config): self.model_name = config.model_name self.start_fine_tune_layer_id = config.start_fine_tune_layer_id self.end_fine_tune_layer_id = config.end_fine_tune_layer_id self.embedding_dim =", "= Input(shape = input_shape) activation = 'relu' c = int(32 * alpha) x", "2, 3, 2] activation='relu' I = Input(shape = input_shape) M0 = self.net_blocks.conv_block(I, c[0],", "= input_shape) x = self.net_blocks.conv_block(I, c[0], 3, s[0], activation=activation) x = self.net_blocks.separable_conv_block(M, c[1],", "c=c[2], ks=3, t=t[2], s=s[2], n=n[2], activation=activation) x = self.net_blocks.inverted_residual_block(x, c=c[3], ks=3, t=t[3], s=s[3],", "3, s=2, alpha=alpha, activation=activation) x = self.net_blocks.bottleneck_v1(x, 1024, 3, s=1, alpha=alpha, activation=activation) x", "input_shape, alpha=1.0): c = [32, 16, 24, 32, 64, 96, 160, 320, 1280]", "alpha=alpha, activation=activation) x = self.net_blocks.inverted_residual_block(x, c=c[5], ks=3, t=t[5], s=s[5], n=n[5], alpha=alpha, activation=activation) x", ", 3, e=64 , s=2, alpha=alpha, squeeze=False, activation='relu6') x = self.net_blocks.bottleneck_v3(x, 24 ,", "s=2, alpha=alpha, squeeze=True, activation='hard_swish') x = self.net_blocks.bottleneck_v3(x, 160, 5, e=960, s=1, alpha=alpha, squeeze=True,", "activation=activation) M1 = self.net_blocks.inverted_residual_block(M0, c=c[1], ks=3, t=t[1], s=s[1], n=n[1], activation=activation) M0 = self.net_blocks.separable_conv_block(M0,", "= input_shape) x = self.net_blocks.conv_block(I, 16, 3 , 2, activation='hard_swish') x = self.net_blocks.bottleneck_v3(x,", "activation=None) A3 = add([A2, M3]) M4 = self.net_blocks.inverted_residual_block(A3, c=c[4], ks=3, t=t[4], s=s[4], n=n[4],", "3, e=200, s=1, alpha=alpha, squeeze=False, activation='hard_swish') x = self.net_blocks.bottleneck_v3(x, 80 , 3, e=184,", "s=s[1], n=n[1], alpha=alpha, activation=activation) x = self.net_blocks.inverted_residual_block(x, c=c[2], ks=3, t=t[2], s=s[2], n=n[2], alpha=alpha,", "c=c[5], ks=3, t=t[5], s=s[5], n=n[5], activation=activation) x = self.net_blocks.inverted_residual_block(x, c=c[6], ks=3, t=t[6], s=s[6],", "activation=activation) x = self.net_blocks.inverted_residual_block(x, c=c[4], ks=3, t=t[4], s=s[4], n=n[4], activation=activation) x = self.net_blocks.inverted_residual_block(x,", "= self.net_blocks.bottleneck_v1(x, 128, 3, s=1, alpha=alpha, activation=activation) x = self.net_blocks.bottleneck_v1(x, 256, 3, s=2,", "activation=activation) x = self.net_blocks.bottleneck_v1(x, 512, 3, s=2, alpha=alpha, activation=activation) x = self.net_blocks.bottleneck_v1(x, 512,", "activation=activation) x = GlobalAveragePooling2D()(x) self.backbone = Model(inputs=I, outputs=x, name=self.model_name) def build_mobilenet_v3_backbone(self, input_shape, alpha=1.0):", "64, 128] t = [1, 2, 2, 3, 2] s = [2, 2,", "n=n[3], alpha=alpha, activation=activation) x = self.net_blocks.inverted_residual_block(x, c=c[4], ks=3, t=t[4], s=s[4], n=n[4], alpha=alpha, activation=activation)", "s=s[5], n=n[5], alpha=alpha, activation=activation) x = self.net_blocks.inverted_residual_block(x, c=c[6], ks=3, t=t[6], s=s[6], n=n[6], alpha=alpha,", "= self.net_blocks.bottleneck_v1(x, 128, 3, s=2, alpha=alpha, activation=activation) x = self.net_blocks.bottleneck_v1(x, 128, 3, s=1,", "1, 2, 1] n = [1, 1, 5, 1, 6, 1, 2] activation='prelu'", "1, 1] n = [1, 1, 2, 3, 4, 3, 3, 1, 1]", "squeeze=False, activation='relu6') x = self.net_blocks.bottleneck_v3(x, 24 , 3, e=64 , s=2, alpha=alpha, squeeze=False,", "3, e=184, s=1, alpha=alpha, squeeze=False, activation='hard_swish') x = self.net_blocks.bottleneck_v3(x, 112, 3, e=480, s=1,", "1280] t = [1, 1, 6, 6, 6, 6, 6, 6, 1] s", "s=1, alpha=alpha, squeeze=False, activation='hard_swish') x = self.net_blocks.bottleneck_v3(x, 80 , 3, e=184, s=1, alpha=alpha,", "= Model(inputs=I, outputs=x, name=self.model_name) def build_mobilefacenet_backbone(self, input_shape, alpha=1.0): c = [64, 64, 64,", "Conv2D, Activation, Flatten, Dropout, add from tensorflow.keras.models import Model import tensorflow.keras.backend as K", "n_filters = self.net_blocks.make_divisible(c[0] * alpha, 8) x = self.net_blocks.conv_block(I, n_filters, 3, s[0], activation=activation)", "self.net_blocks.bottleneck_v3(x, 80 , 3, e=184, s=1, alpha=alpha, squeeze=False, activation='hard_swish') x = self.net_blocks.bottleneck_v3(x, 112,", "= config.end_fine_tune_layer_id self.embedding_dim = config.embedding_dim self.embedding_layer_name = config.embedding_layer_name self.dropout = config.dropout self.net_blocks =", "from model.adacos import AdaCos from model.blocks import NetBlock from tensorflow.keras.layers import Input, Reshape,", "= self.net_blocks.inverted_residual_block(x, c=c[6], ks=3, t=t[6], s=s[6], n=n[6], alpha=alpha, activation=activation) x = self.net_blocks.inverted_residual_block(x, c=c[7],", "[64, 64, 64, 128, 128, 128, 128] t = [1, 1, 2, 4,", "n=n[3], activation=activation) x = self.net_blocks.inverted_residual_block(x, c=c[4], ks=3, t=t[4], s=s[4], n=n[4], activation=activation) x =", "= self.net_blocks.inverted_residual_block(x, c=c[5], ks=3, t=t[5], s=s[5], n=n[5], activation=activation) x = self.net_blocks.inverted_residual_block(x, c=c[6], ks=3,", "= Model(inputs=I, outputs=x, name=self.model_name) def build_mobilenet_v3_backbone(self, input_shape, alpha=1.0): I = Input(shape = input_shape)", "s=2, alpha=alpha, activation=activation) x = self.net_blocks.bottleneck_v1(x, 1024, 3, s=1, alpha=alpha, activation=activation) x =", ", 3, s=1, alpha=alpha, activation=activation) x = self.net_blocks.bottleneck_v1(x, 128, 3, s=2, alpha=alpha, activation=activation)", "n_classes, 1, 1, activation='softmax', norm=None) x = Reshape((n_classes,))(x) self.softmax_model = Model(inputs=I, outputs=x, name=self.model_name)", "= [1, 2, 2, 3, 2] s = [2, 2, 2, 2, 1]", "activation='relu6') x = self.net_blocks.bottleneck_v3(x, 40 , 5, e=72 , s=2, alpha=alpha, squeeze=True, activation='relu6')", "= self.net_blocks.bottleneck_v1(x, 64 , 3, s=1, alpha=alpha, activation=activation) x = self.net_blocks.bottleneck_v1(x, 128, 3,", "NetBlock(config) def build_mpsnet_backbone(self, input_shape): c = [32, 32, 64, 64, 128] t =", "x = self.net_blocks.separable_conv_block(M, c[1], 3, s[1], activation=activation) x = self.net_blocks.inverted_residual_block(x, c=c[2], ks=3, t=t[2],", "c=c[1], ks=3, t=t[1], s=s[1], n=n[1], activation=activation) M0 = self.net_blocks.separable_conv_block(M0, c[1], 3, s[1], activation=None)", "x = self.softmax_model.layers[self.end_fine_tune_layer_id].output if(self.dropout>0): x = Dropout(rate=dropout)(x) x = Flatten(name=self.embedding_layer_name)(x) break_point = len(self.softmax_model.layers)", "squeeze=True, activation='relu6') x = self.net_blocks.bottleneck_v3(x, 40 , 5, e=120, s=1, alpha=alpha, squeeze=True, activation='relu6')", "x = self.net_blocks.inverted_residual_block(x, c=c[2], ks=3, t=t[2], s=s[2], n=n[2], alpha=alpha, activation=activation) x = self.net_blocks.inverted_residual_block(x,", "x = self.net_blocks.conv_block(I, n_filters, 3, s[0], activation=activation) # (64, 64, 32) x =", "build_mobilefacenet_backbone(self, input_shape, alpha=1.0): c = [64, 64, 64, 128, 128, 128, 128] t", "ks=3, t=t[4], s=s[4], n=n[4], activation=activation) x = self.net_blocks.inverted_residual_block(x, c=c[5], ks=3, t=t[5], s=s[5], n=n[5],", "activation='softmax', norm=None) x = Reshape((n_classes,))(x) self.softmax_model = Model(inputs=I, outputs=x, name=self.model_name) def build_adacos_model(self): label", "if(self.dropout>0): x = Dropout(rate=dropout)(x) x = Flatten(name=self.embedding_layer_name)(x) break_point = len(self.softmax_model.layers) + self.start_fine_tune_layer_id for", "activation=activation) x = self.net_blocks.inverted_residual_block(x, c=c[5], ks=3, t=t[5], s=s[5], n=n[5], activation=activation) x = self.net_blocks.inverted_residual_block(x,", "2] s = [2, 2, 2, 2, 1] n = [1, 2, 2,", "3, e=16 , s=1, alpha=alpha, squeeze=False, activation='relu6') x = self.net_blocks.bottleneck_v3(x, 24 , 3,", "= self.net_blocks.inverted_residual_block(x, c=c[4], ks=3, t=t[4], s=s[4], n=n[4], activation=activation) x = self.net_blocks.inverted_residual_block(x, c=c[5], ks=3,", "x = self.net_blocks.bottleneck_v3(x, 80 , 3, e=184, s=1, alpha=alpha, squeeze=False, activation='hard_swish') x =", "self.net_blocks.separable_conv_block(A2, c[3], 3, s[3], activation=None) A3 = add([A2, M3]) M4 = self.net_blocks.inverted_residual_block(A3, c=c[4],", "config.embedding_dim self.embedding_layer_name = config.embedding_layer_name self.dropout = config.dropout self.net_blocks = NetBlock(config) def build_mpsnet_backbone(self, input_shape):", "n=n[5], alpha=alpha, activation=activation) x = self.net_blocks.inverted_residual_block(x, c=c[6], ks=3, t=t[6], s=s[6], n=n[6], alpha=alpha, activation=activation)", "self.net_blocks.bottleneck_v3(x, 112, 3, e=480, s=1, alpha=alpha, squeeze=True, activation='hard_swish') x = self.net_blocks.bottleneck_v3(x, 112, 3,", "layer.trainable=False outputs = AdaCos(n_classes, initializer=self.net_blocks.kernel_initializer, regularizer=self.net_blocks.kernel_regularizer, name='adacos')([x, label]) self.adacos_model = Model(inputs = (inputs,", "self.net_blocks.bottleneck_v3(x, 160, 5, e=672, s=2, alpha=alpha, squeeze=True, activation='hard_swish') x = self.net_blocks.bottleneck_v3(x, 160, 5,", "self.net_blocks.conv_block(x, self.embedding_dim, 1, 1, 'valid', activation=None) if(self.dropout>0): x = Dropout(rate=dropout)(x) x = self.net_blocks.conv_block(x,", "s=s[3], n=n[3], alpha=alpha, activation=activation) x = self.net_blocks.inverted_residual_block(x, c=c[4], ks=3, t=t[4], s=s[4], n=n[4], alpha=alpha,", "t=t[3], s=s[3], n=n[3], alpha=alpha, activation=activation) x = self.net_blocks.inverted_residual_block(x, c=c[4], ks=3, t=t[4], s=s[4], n=n[4],", "x = self.net_blocks.bottleneck_v1(x, 1024, 3, s=2, alpha=alpha, activation=activation) x = self.net_blocks.bottleneck_v1(x, 1024, 3,", "s=s[7], n=n[7], alpha=alpha, activation=activation) if alpha > 1.0: last_filters = self.net_blocks.make_divisible(c[8] * alpha,", "s=s[3], n=n[3], activation=activation) x = self.net_blocks.inverted_residual_block(x, c=c[4], ks=3, t=t[4], s=s[4], n=n[4], activation=activation) x", "t=t[4], s=s[4], n=n[4], alpha=alpha, activation=activation) x = self.net_blocks.inverted_residual_block(x, c=c[5], ks=3, t=t[5], s=s[5], n=n[5],", "activation='relu6') x = self.net_blocks.bottleneck_v3(x, 24 , 3, e=64 , s=2, alpha=alpha, squeeze=False, activation='relu6')", "5, e=960, s=1, alpha=alpha, squeeze=True, activation='hard_swish') x = self.net_blocks.conv_block(x, 960, 1, 1, activation='hard_swish')", "build_mpsnet_backbone(self, input_shape): c = [32, 32, 64, 64, 128] t = [1, 2,", "squeeze=False, activation='relu6') x = self.net_blocks.bottleneck_v3(x, 24 , 3, e=72 , s=1, alpha=alpha, squeeze=False,", "self.net_blocks.inverted_residual_block(x, c=c[3], ks=3, t=t[3], s=s[3], n=n[3], alpha=alpha, activation=activation) x = self.net_blocks.inverted_residual_block(x, c=c[4], ks=3,", "config.dropout self.net_blocks = NetBlock(config) def build_mpsnet_backbone(self, input_shape): c = [32, 32, 64, 64,", "self.backbone = Model(inputs=I, outputs=x, name=self.model_name) def build_softmax_model(self, n_classes): I=self.backbone.inputs x=self.backbone.outputs[0] if(len(x.shape)==2): c =", "activation=activation) A1 = self.net_blocks.separable_conv_block(A1, c[2], 3, s[2], activation=None) A2 = add([A1, M2]) M3", "int(32 * alpha) x = self.net_blocks.conv_block(I, 32, 3, 2, activation=activation) x = self.net_blocks.bottleneck_v1(x,", "2, 2, 3, 2] s = [2, 2, 2, 2, 1] n =", "activation='prelu' I = Input(shape = input_shape) x = self.net_blocks.conv_block(I, c[0], 3, s[0], activation=activation)", "= GlobalAveragePooling2D()(x) self.backbone = Model(inputs=I, outputs=x, name=self.model_name) def build_mobilenet_v2_backbone(self, input_shape, alpha=1.0): c =", "x = self.net_blocks.conv_block(x, 512, 1, 1, 'valid', activation=activation) ks = K.int_shape(x)[2] x =", "64, 64, 128, 128, 128, 128] t = [1, 1, 2, 4, 2,", "alpha=alpha, squeeze=False, activation='hard_swish') x = self.net_blocks.bottleneck_v3(x, 80 , 3, e=200, s=1, alpha=alpha, squeeze=False,", "= AdaCos(n_classes, initializer=self.net_blocks.kernel_initializer, regularizer=self.net_blocks.kernel_regularizer, name='adacos')([x, label]) self.adacos_model = Model(inputs = (inputs, label), outputs", "= GlobalAveragePooling2D()(x) self.backbone = Model(inputs=I, outputs=x, name=self.model_name) def build_mobilefacenet_backbone(self, input_shape, alpha=1.0): c =", "self.net_blocks.bottleneck_v3(x, 80 , 3, e=240, s=2, alpha=alpha, squeeze=False, activation='hard_swish') x = self.net_blocks.bottleneck_v3(x, 80", "= config.embedding_layer_name self.dropout = config.dropout self.net_blocks = NetBlock(config) def build_mpsnet_backbone(self, input_shape): c =", "64 , 3, s=1, alpha=alpha, activation=activation) x = self.net_blocks.bottleneck_v1(x, 128, 3, s=2, alpha=alpha,", "3, 4, 3, 3, 1, 1] activation = 'relu6' I = Input(shape =", "80 , 3, e=184, s=1, alpha=alpha, squeeze=False, activation='hard_swish') x = self.net_blocks.bottleneck_v3(x, 80 ,", "self.net_blocks.bottleneck_v1(x, 128, 3, s=2, alpha=alpha, activation=activation) x = self.net_blocks.bottleneck_v1(x, 128, 3, s=1, alpha=alpha,", "x = self.net_blocks.bottleneck_v1(x, 512, 3, s=2, alpha=alpha, activation=activation) x = self.net_blocks.bottleneck_v1(x, 512, 3,", "activation=None) A2 = add([A1, M2]) M3 = self.net_blocks.inverted_residual_block(A2, c=c[3], ks=3, t=t[3], s=s[3], n=n[3],", "self.net_blocks.conv_block(x, n_classes, 1, 1, activation='softmax', norm=None) x = Reshape((n_classes,))(x) self.softmax_model = Model(inputs=I, outputs=x,", "from model.blocks import NetBlock from tensorflow.keras.layers import Input, Reshape, Conv2D, Activation, Flatten, Dropout,", "add from tensorflow.keras.models import Model import tensorflow.keras.backend as K class Net: def __init__(self,", "e=120, s=1, alpha=alpha, squeeze=True, activation='relu6') x = self.net_blocks.bottleneck_v3(x, 80 , 3, e=240, s=2,", "= config.start_fine_tune_layer_id self.end_fine_tune_layer_id = config.end_fine_tune_layer_id self.embedding_dim = config.embedding_dim self.embedding_layer_name = config.embedding_layer_name self.dropout =", "alpha=alpha, squeeze=False, activation='relu6') x = self.net_blocks.bottleneck_v3(x, 24 , 3, e=64 , s=2, alpha=alpha,", "outputs = AdaCos(n_classes, initializer=self.net_blocks.kernel_initializer, regularizer=self.net_blocks.kernel_regularizer, name='adacos')([x, label]) self.adacos_model = Model(inputs = (inputs, label),", "Reshape((1, 1, c))(x) x = self.net_blocks.conv_block(x, self.embedding_dim, 1, 1, 'valid', activation=None) if(self.dropout>0): x", "s[0], activation=activation) M1 = self.net_blocks.inverted_residual_block(M0, c=c[1], ks=3, t=t[1], s=s[1], n=n[1], activation=activation) M0 =", "squeeze=False, activation='relu6') x = self.net_blocks.bottleneck_v3(x, 40 , 5, e=72 , s=2, alpha=alpha, squeeze=True,", "t=t[6], s=s[6], n=n[6], alpha=alpha, activation=activation) x = self.net_blocks.inverted_residual_block(x, c=c[7], ks=3, t=t[7], s=s[7], n=n[7],", "[32, 32, 64, 64, 128] t = [1, 2, 2, 3, 2] s", "squeeze=True, activation='hard_swish') x = self.net_blocks.conv_block(x, 960, 1, 1, activation='hard_swish') x = GlobalAveragePooling2D()(x) self.backbone", "2, 2, 1, 2, 1, 1] n = [1, 1, 2, 3, 4,", "input_shape) x = self.net_blocks.conv_block(I, c[0], 3, s[0], activation=activation) x = self.net_blocks.separable_conv_block(M, c[1], 3,", "2] activation='relu' I = Input(shape = input_shape) M0 = self.net_blocks.conv_block(I, c[0], 3, s[0],", "'relu' c = int(32 * alpha) x = self.net_blocks.conv_block(I, 32, 3, 2, activation=activation)", "s[0], activation=activation) # (64, 64, 32) x = self.net_blocks.inverted_residual_block(x, c=c[1], ks=3, t=t[1], s=s[1],", "tensorflow.keras.backend as K class Net: def __init__(self, config): self.model_name = config.model_name self.start_fine_tune_layer_id =", "def build_softmax_model(self, n_classes): I=self.backbone.inputs x=self.backbone.outputs[0] if(len(x.shape)==2): c = K.int_shape(x)[self.net_blocks.channel_axis] x = Reshape((1, 1,", "+ self.start_fine_tune_layer_id for layer in self.softmax_model.layers[:break_point]: layer.trainable=False outputs = AdaCos(n_classes, initializer=self.net_blocks.kernel_initializer, regularizer=self.net_blocks.kernel_regularizer, name='adacos')([x,", "alpha=1.0): I = Input(shape = input_shape) x = self.net_blocks.conv_block(I, 16, 3 , 2,", "s=1, alpha=alpha, activation=activation) x = GlobalAveragePooling2D()(x) self.backbone = Model(inputs=I, outputs=x, name=self.model_name) def build_mobilenet_v2_backbone(self,", "1, activation='hard_swish') x = GlobalAveragePooling2D()(x) self.backbone = Model(inputs=I, outputs=x, name=self.model_name) def build_mobilefacenet_backbone(self, input_shape,", "input_shape, alpha=1.0): I = Input(shape = input_shape) x = self.net_blocks.conv_block(I, 16, 3 ,", "import Input, Reshape, Conv2D, Activation, Flatten, Dropout, add from tensorflow.keras.models import Model import", "= self.net_blocks.depthwise_conv_block(x, ks, 1, padding='valid', activation=None) self.backbone = Model(inputs=I, outputs=x, name=self.model_name) def build_softmax_model(self,", "s = [2, 1, 2, 2, 2, 1, 2, 1, 1] n =", "self.net_blocks.conv_block(x, last_filters, 1, 1, activation=activation) x = GlobalAveragePooling2D()(x) self.backbone = Model(inputs=I, outputs=x, name=self.model_name)", "norm=None) x = Reshape((n_classes,))(x) self.softmax_model = Model(inputs=I, outputs=x, name=self.model_name) def build_adacos_model(self): label =", "s=2, alpha=alpha, squeeze=False, activation='relu6') x = self.net_blocks.bottleneck_v3(x, 24 , 3, e=72 , s=1,", "= self.net_blocks.conv_block(x, 512, 1, 1, 'valid', activation=activation) ks = K.int_shape(x)[2] x = self.net_blocks.depthwise_conv_block(x,", "self.net_blocks.conv_block(x, 512, 1, 1, 'valid', activation=activation) ks = K.int_shape(x)[2] x = self.net_blocks.depthwise_conv_block(x, ks,", "= Dropout(rate=dropout)(x) x = self.net_blocks.conv_block(x, n_classes, 1, 1, activation='softmax', norm=None) x = Reshape((n_classes,))(x)", "= self.net_blocks.bottleneck_v1(x, 256, 3, s=2, alpha=alpha, activation=activation) x = self.net_blocks.bottleneck_v1(x, 256, 3, s=1,", "2] s = [2, 1, 2, 2, 1, 2, 1] n = [1,", "Dropout, add from tensorflow.keras.models import Model import tensorflow.keras.backend as K class Net: def", "config.start_fine_tune_layer_id self.end_fine_tune_layer_id = config.end_fine_tune_layer_id self.embedding_dim = config.embedding_dim self.embedding_layer_name = config.embedding_layer_name self.dropout = config.dropout", "layer in self.softmax_model.layers[:break_point]: layer.trainable=False outputs = AdaCos(n_classes, initializer=self.net_blocks.kernel_initializer, regularizer=self.net_blocks.kernel_regularizer, name='adacos')([x, label]) self.adacos_model =", "[32, 16, 24, 32, 64, 96, 160, 320, 1280] t = [1, 1,", "512, 3, s=1, alpha=alpha, activation=activation) x = self.net_blocks.bottleneck_v1(x, 1024, 3, s=2, alpha=alpha, activation=activation)", "4, 3, 3, 1, 1] activation = 'relu6' I = Input(shape = input_shape)", "activation=activation) A3 = self.net_blocks.separable_conv_block(A3, c[4], 3, s[4], activation=None) A4 = add([A3, M4]) M", "128, 3, s=2, alpha=alpha, activation=activation) x = self.net_blocks.bottleneck_v1(x, 128, 3, s=1, alpha=alpha, activation=activation)", "x = self.net_blocks.bottleneck_v3(x, 16 , 3, e=16 , s=1, alpha=alpha, squeeze=False, activation='relu6') x", "n_classes = K.int_shape(softmax)[-1] inputs = self.softmax_model.inputs[0] x = self.softmax_model.layers[self.end_fine_tune_layer_id].output if(self.dropout>0): x = Dropout(rate=dropout)(x)", "x = self.net_blocks.conv_block(x, self.embedding_dim, 1, 1, 'valid', activation=None) if(self.dropout>0): x = Dropout(rate=dropout)(x) x", "x = self.net_blocks.bottleneck_v1(x, 1024, 3, s=1, alpha=alpha, activation=activation) x = GlobalAveragePooling2D()(x) self.backbone =", "self.net_blocks.inverted_residual_block(x, c=c[5], ks=3, t=t[5], s=s[5], n=n[5], alpha=alpha, activation=activation) x = self.net_blocks.inverted_residual_block(x, c=c[6], ks=3,", "self.embedding_dim = config.embedding_dim self.embedding_layer_name = config.embedding_layer_name self.dropout = config.dropout self.net_blocks = NetBlock(config) def", "x = self.net_blocks.bottleneck_v3(x, 80 , 3, e=240, s=2, alpha=alpha, squeeze=False, activation='hard_swish') x =", "activation=activation) x = GlobalAveragePooling2D()(x) self.backbone = Model(inputs=I, outputs=x, name=self.model_name) def build_mobilenet_v2_backbone(self, input_shape, alpha=1.0):", "t = [1, 2, 2, 3, 2] s = [2, 2, 2, 2,", "= self.net_blocks.make_divisible(c[0] * alpha, 8) x = self.net_blocks.conv_block(I, n_filters, 3, s[0], activation=activation) #", "t=t[3], s=s[3], n=n[3], activation=activation) x = self.net_blocks.inverted_residual_block(x, c=c[4], ks=3, t=t[4], s=s[4], n=n[4], activation=activation)", "activation=activation) x = self.net_blocks.bottleneck_v1(x, 512, 3, s=1, alpha=alpha, activation=activation) x = self.net_blocks.bottleneck_v1(x, 512,", "64, 32) x = self.net_blocks.inverted_residual_block(x, c=c[1], ks=3, t=t[1], s=s[1], n=n[1], alpha=alpha, activation=activation) x", "Input(shape = input_shape) activation = 'relu' c = int(32 * alpha) x =", "e=120, s=1, alpha=alpha, squeeze=True, activation='relu6') x = self.net_blocks.bottleneck_v3(x, 40 , 5, e=120, s=1,", "160, 5, e=672, s=2, alpha=alpha, squeeze=True, activation='hard_swish') x = self.net_blocks.bottleneck_v3(x, 160, 5, e=960,", "x = self.net_blocks.depthwise_conv_block(x, ks, 1, padding='valid', activation=None) self.backbone = Model(inputs=I, outputs=x, name=self.model_name) def", "1, 1, activation=activation) x = GlobalAveragePooling2D()(x) self.backbone = Model(inputs=I, outputs=x, name=self.model_name) def build_mobilenet_v3_backbone(self,", "alpha=alpha, squeeze=False, activation='relu6') x = self.net_blocks.bottleneck_v3(x, 24 , 3, e=72 , s=1, alpha=alpha,", "t=t[2], s=s[2], n=n[2], activation=activation) A1 = self.net_blocks.separable_conv_block(A1, c[2], 3, s[2], activation=None) A2 =", "A2 = self.net_blocks.separable_conv_block(A2, c[3], 3, s[3], activation=None) A3 = add([A2, M3]) M4 =", "= self.net_blocks.bottleneck_v3(x, 160, 5, e=960, s=1, alpha=alpha, squeeze=True, activation='hard_swish') x = self.net_blocks.conv_block(x, 960,", "import tensorflow.keras.backend as K class Net: def __init__(self, config): self.model_name = config.model_name self.start_fine_tune_layer_id", "self.net_blocks.separable_conv_block(A3, c[4], 3, s[4], activation=None) A4 = add([A3, M4]) M = self.net_blocks.spp_block(A4, pool_list=[1,", "class Net: def __init__(self, config): self.model_name = config.model_name self.start_fine_tune_layer_id = config.start_fine_tune_layer_id self.end_fine_tune_layer_id =", "M1]) M2 = self.net_blocks.inverted_residual_block(A1, c=c[2], ks=3, t=t[2], s=s[2], n=n[2], activation=activation) A1 = self.net_blocks.separable_conv_block(A1,", "3, s=1, alpha=alpha, activation=activation) x = self.net_blocks.bottleneck_v1(x, 256, 3, s=2, alpha=alpha, activation=activation) x", "8) x = self.net_blocks.conv_block(I, n_filters, 3, s[0], activation=activation) # (64, 64, 32) x", "1] n = [1, 2, 2, 3, 2] activation='relu' I = Input(shape =", "c[8] x = self.net_blocks.conv_block(x, last_filters, 1, 1, activation=activation) x = GlobalAveragePooling2D()(x) self.backbone =", "def build_adacos_model(self): label = Input(shape=(1,), name='label_input') softmax = self.softmax_model.outputs[0] n_classes = K.int_shape(softmax)[-1] inputs", "I = Input(shape = input_shape) M0 = self.net_blocks.conv_block(I, c[0], 3, s[0], activation=activation) M1", "ks=3, t=t[5], s=s[5], n=n[5], alpha=alpha, activation=activation) x = self.net_blocks.inverted_residual_block(x, c=c[6], ks=3, t=t[6], s=s[6],", "name='label_input') softmax = self.softmax_model.outputs[0] n_classes = K.int_shape(softmax)[-1] inputs = self.softmax_model.inputs[0] x = self.softmax_model.layers[self.end_fine_tune_layer_id].output", "3, s[4], activation=None) A4 = add([A3, M4]) M = self.net_blocks.spp_block(A4, pool_list=[1, 2, 4])", "= K.int_shape(softmax)[-1] inputs = self.softmax_model.inputs[0] x = self.softmax_model.layers[self.end_fine_tune_layer_id].output if(self.dropout>0): x = Dropout(rate=dropout)(x) x", "40 , 5, e=120, s=1, alpha=alpha, squeeze=True, activation='relu6') x = self.net_blocks.bottleneck_v3(x, 80 ,", "A1 = add([M0, M1]) M2 = self.net_blocks.inverted_residual_block(A1, c=c[2], ks=3, t=t[2], s=s[2], n=n[2], activation=activation)", "= self.net_blocks.inverted_residual_block(x, c=c[4], ks=3, t=t[4], s=s[4], n=n[4], alpha=alpha, activation=activation) x = self.net_blocks.inverted_residual_block(x, c=c[5],", "activation='hard_swish') x = self.net_blocks.bottleneck_v3(x, 112, 3, e=480, s=1, alpha=alpha, squeeze=True, activation='hard_swish') x =", "M3]) M4 = self.net_blocks.inverted_residual_block(A3, c=c[4], ks=3, t=t[4], s=s[4], n=n[4], activation=activation) A3 = self.net_blocks.separable_conv_block(A3,", "c = int(32 * alpha) x = self.net_blocks.conv_block(I, 32, 3, 2, activation=activation) x", "3, 2] s = [2, 2, 2, 2, 1] n = [1, 2,", "= self.net_blocks.conv_block(I, n_filters, 3, s[0], activation=activation) # (64, 64, 32) x = self.net_blocks.inverted_residual_block(x,", "t = [1, 1, 2, 4, 2, 4, 2] s = [2, 1,", "self.end_fine_tune_layer_id = config.end_fine_tune_layer_id self.embedding_dim = config.embedding_dim self.embedding_layer_name = config.embedding_layer_name self.dropout = config.dropout self.net_blocks", "n=n[7], alpha=alpha, activation=activation) if alpha > 1.0: last_filters = self.net_blocks.make_divisible(c[8] * alpha, 8)", "3, s=2, alpha=alpha, activation=activation) x = self.net_blocks.bottleneck_v1(x, 256, 3, s=1, alpha=alpha, activation=activation) x", "c=c[3], ks=3, t=t[3], s=s[3], n=n[3], activation=activation) x = self.net_blocks.inverted_residual_block(x, c=c[4], ks=3, t=t[4], s=s[4],", "NetBlock from tensorflow.keras.layers import Input, Reshape, Conv2D, Activation, Flatten, Dropout, add from tensorflow.keras.models", "= [2, 2, 2, 2, 1] n = [1, 2, 2, 3, 2]", "alpha=alpha, squeeze=True, activation='hard_swish') x = self.net_blocks.bottleneck_v3(x, 160, 5, e=672, s=2, alpha=alpha, squeeze=True, activation='hard_swish')", "K class Net: def __init__(self, config): self.model_name = config.model_name self.start_fine_tune_layer_id = config.start_fine_tune_layer_id self.end_fine_tune_layer_id", "Activation, Flatten, Dropout, add from tensorflow.keras.models import Model import tensorflow.keras.backend as K class", "x = self.net_blocks.conv_block(x, n_classes, 1, 1, activation='softmax', norm=None) x = Reshape((n_classes,))(x) self.softmax_model =", "32) x = self.net_blocks.inverted_residual_block(x, c=c[1], ks=3, t=t[1], s=s[1], n=n[1], alpha=alpha, activation=activation) x =", "1, 'valid', activation=None) if(self.dropout>0): x = Dropout(rate=dropout)(x) x = self.net_blocks.conv_block(x, n_classes, 1, 1,", "= Model(inputs=I, outputs=x, name=self.model_name) def build_mobilenet_v2_backbone(self, input_shape, alpha=1.0): c = [32, 16, 24,", "3, s=1, alpha=alpha, activation=activation) x = self.net_blocks.bottleneck_v1(x, 512, 3, s=2, alpha=alpha, activation=activation) x", "alpha=alpha, activation=activation) x = self.net_blocks.bottleneck_v1(x, 512, 3, s=1, alpha=alpha, activation=activation) x = self.net_blocks.bottleneck_v1(x,", "self.net_blocks.conv_block(I, n_filters, 3, s[0], activation=activation) # (64, 64, 32) x = self.net_blocks.inverted_residual_block(x, c=c[1],", "activation=activation) A2 = self.net_blocks.separable_conv_block(A2, c[3], 3, s[3], activation=None) A3 = add([A2, M3]) M4", ", s=2, alpha=alpha, squeeze=True, activation='relu6') x = self.net_blocks.bottleneck_v3(x, 40 , 5, e=120, s=1,", "s=s[6], n=n[6], alpha=alpha, activation=activation) x = self.net_blocks.inverted_residual_block(x, c=c[7], ks=3, t=t[7], s=s[7], n=n[7], alpha=alpha,", "x = Dropout(rate=dropout)(x) x = Flatten(name=self.embedding_layer_name)(x) break_point = len(self.softmax_model.layers) + self.start_fine_tune_layer_id for layer", "x = self.net_blocks.conv_block(I, 16, 3 , 2, activation='hard_swish') x = self.net_blocks.bottleneck_v3(x, 16 ,", "= self.softmax_model.layers[self.end_fine_tune_layer_id].output if(self.dropout>0): x = Dropout(rate=dropout)(x) x = Flatten(name=self.embedding_layer_name)(x) break_point = len(self.softmax_model.layers) +", "= [1, 1, 2, 4, 2, 4, 2] s = [2, 1, 2,", "> 1.0: last_filters = self.net_blocks.make_divisible(c[8] * alpha, 8) else: last_filters = c[8] x", "3, 1, 1] activation = 'relu6' I = Input(shape = input_shape) n_filters =", "c = K.int_shape(x)[self.net_blocks.channel_axis] x = Reshape((1, 1, c))(x) x = self.net_blocks.conv_block(x, self.embedding_dim, 1,", "= self.net_blocks.bottleneck_v3(x, 160, 5, e=960, s=1, alpha=alpha, squeeze=True, activation='hard_swish') x = self.net_blocks.bottleneck_v3(x, 160,", "s=s[4], n=n[4], alpha=alpha, activation=activation) x = self.net_blocks.inverted_residual_block(x, c=c[5], ks=3, t=t[5], s=s[5], n=n[5], alpha=alpha,", "activation=activation) x = self.net_blocks.bottleneck_v1(x, 256, 3, s=2, alpha=alpha, activation=activation) x = self.net_blocks.bottleneck_v1(x, 256,", "squeeze=False, activation='hard_swish') x = self.net_blocks.bottleneck_v3(x, 112, 3, e=480, s=1, alpha=alpha, squeeze=True, activation='hard_swish') x", "regularizer=self.net_blocks.kernel_regularizer, name='adacos')([x, label]) self.adacos_model = Model(inputs = (inputs, label), outputs = outputs, name=self.model_name)", "2, 1] n = [1, 2, 2, 3, 2] activation='relu' I = Input(shape", "squeeze=True, activation='hard_swish') x = self.net_blocks.bottleneck_v3(x, 160, 5, e=960, s=1, alpha=alpha, squeeze=True, activation='hard_swish') x", "M0 = self.net_blocks.separable_conv_block(M0, c[1], 3, s[1], activation=None) A1 = add([M0, M1]) M2 =", "def build_mobilenet_v3_backbone(self, input_shape, alpha=1.0): I = Input(shape = input_shape) x = self.net_blocks.conv_block(I, 16,", "outputs=M, name=self.model_name) def build_mobilenet_v1_backbone(self, input_shape, alpha=1.0): I = Input(shape = input_shape) activation =", "6, 6, 6, 6, 1] s = [2, 1, 2, 2, 2, 1,", "self.net_blocks.inverted_residual_block(x, c=c[2], ks=3, t=t[2], s=s[2], n=n[2], alpha=alpha, activation=activation) x = self.net_blocks.inverted_residual_block(x, c=c[3], ks=3,", "self.net_blocks.bottleneck_v1(x, 512, 3, s=1, alpha=alpha, activation=activation) x = self.net_blocks.bottleneck_v1(x, 512, 3, s=1, alpha=alpha,", "import NetBlock from tensorflow.keras.layers import Input, Reshape, Conv2D, Activation, Flatten, Dropout, add from", "= self.net_blocks.bottleneck_v1(x, 1024, 3, s=2, alpha=alpha, activation=activation) x = self.net_blocks.bottleneck_v1(x, 1024, 3, s=1,", "n=n[4], activation=activation) A3 = self.net_blocks.separable_conv_block(A3, c[4], 3, s[4], activation=None) A4 = add([A3, M4])", "alpha=alpha, squeeze=True, activation='hard_swish') x = self.net_blocks.bottleneck_v3(x, 112, 3, e=672, s=1, alpha=alpha, squeeze=True, activation='hard_swish')", "4, 2] s = [2, 1, 2, 2, 1, 2, 1] n =", "import Model import tensorflow.keras.backend as K class Net: def __init__(self, config): self.model_name =", "Reshape((n_classes,))(x) self.softmax_model = Model(inputs=I, outputs=x, name=self.model_name) def build_adacos_model(self): label = Input(shape=(1,), name='label_input') softmax", "x = Flatten(name=self.embedding_layer_name)(x) break_point = len(self.softmax_model.layers) + self.start_fine_tune_layer_id for layer in self.softmax_model.layers[:break_point]: layer.trainable=False", "x = self.net_blocks.inverted_residual_block(x, c=c[6], ks=3, t=t[6], s=s[6], n=n[6], alpha=alpha, activation=activation) x = self.net_blocks.inverted_residual_block(x,", "activation='relu6') x = self.net_blocks.bottleneck_v3(x, 80 , 3, e=240, s=2, alpha=alpha, squeeze=False, activation='hard_swish') x", "= self.net_blocks.make_divisible(c[8] * alpha, 8) else: last_filters = c[8] x = self.net_blocks.conv_block(x, last_filters,", "3, s=1, alpha=alpha, activation=activation) x = self.net_blocks.bottleneck_v1(x, 128, 3, s=2, alpha=alpha, activation=activation) x", "activation=None) if(self.dropout>0): x = Dropout(rate=dropout)(x) x = self.net_blocks.conv_block(x, n_classes, 1, 1, activation='softmax', norm=None)", "2] activation='prelu' I = Input(shape = input_shape) x = self.net_blocks.conv_block(I, c[0], 3, s[0],", "outputs=x, name=self.model_name) def build_mobilenet_v2_backbone(self, input_shape, alpha=1.0): c = [32, 16, 24, 32, 64,", "c = [64, 64, 64, 128, 128, 128, 128] t = [1, 1,", "padding='valid', activation=None) self.backbone = Model(inputs=I, outputs=x, name=self.model_name) def build_softmax_model(self, n_classes): I=self.backbone.inputs x=self.backbone.outputs[0] if(len(x.shape)==2):", "s=1, alpha=alpha, squeeze=True, activation='hard_swish') x = self.net_blocks.conv_block(x, 960, 1, 1, activation='hard_swish') x =", "build_mobilenet_v2_backbone(self, input_shape, alpha=1.0): c = [32, 16, 24, 32, 64, 96, 160, 320,", "= self.net_blocks.inverted_residual_block(x, c=c[2], ks=3, t=t[2], s=s[2], n=n[2], alpha=alpha, activation=activation) x = self.net_blocks.inverted_residual_block(x, c=c[3],", "1] s = [2, 1, 2, 2, 2, 1, 2, 1, 1] n", "outputs=x, name=self.model_name) def build_softmax_model(self, n_classes): I=self.backbone.inputs x=self.backbone.outputs[0] if(len(x.shape)==2): c = K.int_shape(x)[self.net_blocks.channel_axis] x =", "= self.net_blocks.bottleneck_v1(x, 1024, 3, s=1, alpha=alpha, activation=activation) x = GlobalAveragePooling2D()(x) self.backbone = Model(inputs=I,", "s=2, alpha=alpha, activation=activation) x = self.net_blocks.bottleneck_v1(x, 512, 3, s=1, alpha=alpha, activation=activation) x =", "= int(32 * alpha) x = self.net_blocks.conv_block(I, 32, 3, 2, activation=activation) x =", "self.net_blocks.bottleneck_v3(x, 112, 3, e=672, s=1, alpha=alpha, squeeze=True, activation='hard_swish') x = self.net_blocks.bottleneck_v3(x, 160, 5,", "= self.net_blocks.conv_block(I, 16, 3 , 2, activation='hard_swish') x = self.net_blocks.bottleneck_v3(x, 16 , 3,", "alpha=alpha, activation=activation) x = self.net_blocks.inverted_residual_block(x, c=c[2], ks=3, t=t[2], s=s[2], n=n[2], alpha=alpha, activation=activation) x", "1, 'valid', activation=activation) ks = K.int_shape(x)[2] x = self.net_blocks.depthwise_conv_block(x, ks, 1, padding='valid', activation=None)", "t=t[6], s=s[6], n=n[6], activation=activation) x = self.net_blocks.conv_block(x, 512, 1, 1, 'valid', activation=activation) ks", ", 3, e=240, s=2, alpha=alpha, squeeze=False, activation='hard_swish') x = self.net_blocks.bottleneck_v3(x, 80 , 3,", "512, 1, 1, 'valid', activation=activation) ks = K.int_shape(x)[2] x = self.net_blocks.depthwise_conv_block(x, ks, 1,", "1, activation='softmax', norm=None) x = Reshape((n_classes,))(x) self.softmax_model = Model(inputs=I, outputs=x, name=self.model_name) def build_adacos_model(self):", "ks=3, t=t[5], s=s[5], n=n[5], activation=activation) x = self.net_blocks.inverted_residual_block(x, c=c[6], ks=3, t=t[6], s=s[6], n=n[6],", "activation=activation) x = self.net_blocks.bottleneck_v1(x, 1024, 3, s=2, alpha=alpha, activation=activation) x = self.net_blocks.bottleneck_v1(x, 1024,", "as K class Net: def __init__(self, config): self.model_name = config.model_name self.start_fine_tune_layer_id = config.start_fine_tune_layer_id", "x = self.net_blocks.bottleneck_v3(x, 112, 3, e=480, s=1, alpha=alpha, squeeze=True, activation='hard_swish') x = self.net_blocks.bottleneck_v3(x,", "2, 2, 1, 2, 1] n = [1, 1, 5, 1, 6, 1,", "s=1, alpha=alpha, squeeze=True, activation='hard_swish') x = self.net_blocks.bottleneck_v3(x, 112, 3, e=672, s=1, alpha=alpha, squeeze=True,", "t=t[5], s=s[5], n=n[5], alpha=alpha, activation=activation) x = self.net_blocks.inverted_residual_block(x, c=c[6], ks=3, t=t[6], s=s[6], n=n[6],", "= Flatten(name=self.embedding_layer_name)(x) break_point = len(self.softmax_model.layers) + self.start_fine_tune_layer_id for layer in self.softmax_model.layers[:break_point]: layer.trainable=False outputs", "3, 2, activation=activation) x = self.net_blocks.bottleneck_v1(x, 64 , 3, s=1, alpha=alpha, activation=activation) x", "1, 1, 'valid', activation=None) if(self.dropout>0): x = Dropout(rate=dropout)(x) x = self.net_blocks.conv_block(x, n_classes, 1,", "3, e=184, s=1, alpha=alpha, squeeze=False, activation='hard_swish') x = self.net_blocks.bottleneck_v3(x, 80 , 3, e=184,", "x = self.net_blocks.inverted_residual_block(x, c=c[6], ks=3, t=t[6], s=s[6], n=n[6], activation=activation) x = self.net_blocks.conv_block(x, 512,", "def build_mobilenet_v1_backbone(self, input_shape, alpha=1.0): I = Input(shape = input_shape) activation = 'relu' c", "add([A2, M3]) M4 = self.net_blocks.inverted_residual_block(A3, c=c[4], ks=3, t=t[4], s=s[4], n=n[4], activation=activation) A3 =", "t = [1, 1, 6, 6, 6, 6, 6, 6, 1] s =", "I = Input(shape = input_shape) x = self.net_blocks.conv_block(I, 16, 3 , 2, activation='hard_swish')", "n=n[2], alpha=alpha, activation=activation) x = self.net_blocks.inverted_residual_block(x, c=c[3], ks=3, t=t[3], s=s[3], n=n[3], alpha=alpha, activation=activation)", "Input, Reshape, Conv2D, Activation, Flatten, Dropout, add from tensorflow.keras.models import Model import tensorflow.keras.backend", "= self.net_blocks.bottleneck_v3(x, 112, 3, e=672, s=1, alpha=alpha, squeeze=True, activation='hard_swish') x = self.net_blocks.bottleneck_v3(x, 160,", "* alpha, 8) else: last_filters = c[8] x = self.net_blocks.conv_block(x, last_filters, 1, 1,", "self.net_blocks.bottleneck_v3(x, 80 , 3, e=184, s=1, alpha=alpha, squeeze=False, activation='hard_swish') x = self.net_blocks.bottleneck_v3(x, 80", "64, 128, 128, 128, 128] t = [1, 1, 2, 4, 2, 4,", "ks, 1, padding='valid', activation=None) self.backbone = Model(inputs=I, outputs=x, name=self.model_name) def build_softmax_model(self, n_classes): I=self.backbone.inputs", "2, 2, 1] n = [1, 2, 2, 3, 2] activation='relu' I =", "n_classes): I=self.backbone.inputs x=self.backbone.outputs[0] if(len(x.shape)==2): c = K.int_shape(x)[self.net_blocks.channel_axis] x = Reshape((1, 1, c))(x) x", "Dropout(rate=dropout)(x) x = Flatten(name=self.embedding_layer_name)(x) break_point = len(self.softmax_model.layers) + self.start_fine_tune_layer_id for layer in self.softmax_model.layers[:break_point]:", "s[3], activation=None) A3 = add([A2, M3]) M4 = self.net_blocks.inverted_residual_block(A3, c=c[4], ks=3, t=t[4], s=s[4],", "ks=3, t=t[1], s=s[1], n=n[1], activation=activation) M0 = self.net_blocks.separable_conv_block(M0, c[1], 3, s[1], activation=None) A1", "'relu6' I = Input(shape = input_shape) n_filters = self.net_blocks.make_divisible(c[0] * alpha, 8) x", "c=c[7], ks=3, t=t[7], s=s[7], n=n[7], alpha=alpha, activation=activation) if alpha > 1.0: last_filters =", "c))(x) x = self.net_blocks.conv_block(x, self.embedding_dim, 1, 1, 'valid', activation=None) if(self.dropout>0): x = Dropout(rate=dropout)(x)", "64, 96, 160, 320, 1280] t = [1, 1, 6, 6, 6, 6,", "c = [32, 16, 24, 32, 64, 96, 160, 320, 1280] t =", "ks=3, t=t[2], s=s[2], n=n[2], activation=activation) A1 = self.net_blocks.separable_conv_block(A1, c[2], 3, s[2], activation=None) A2", "1, activation=activation) x = GlobalAveragePooling2D()(x) self.backbone = Model(inputs=I, outputs=x, name=self.model_name) def build_mobilenet_v3_backbone(self, input_shape,", "label = Input(shape=(1,), name='label_input') softmax = self.softmax_model.outputs[0] n_classes = K.int_shape(softmax)[-1] inputs = self.softmax_model.inputs[0]", "s=2, alpha=alpha, squeeze=True, activation='relu6') x = self.net_blocks.bottleneck_v3(x, 40 , 5, e=120, s=1, alpha=alpha,", "= self.net_blocks.bottleneck_v1(x, 512, 3, s=1, alpha=alpha, activation=activation) x = self.net_blocks.bottleneck_v1(x, 512, 3, s=1,", "self.net_blocks.bottleneck_v3(x, 160, 5, e=960, s=1, alpha=alpha, squeeze=True, activation='hard_swish') x = self.net_blocks.conv_block(x, 960, 1,", "in self.softmax_model.layers[:break_point]: layer.trainable=False outputs = AdaCos(n_classes, initializer=self.net_blocks.kernel_initializer, regularizer=self.net_blocks.kernel_regularizer, name='adacos')([x, label]) self.adacos_model = Model(inputs", "activation='relu6') x = self.net_blocks.bottleneck_v3(x, 24 , 3, e=72 , s=1, alpha=alpha, squeeze=False, activation='relu6')", "2, activation='hard_swish') x = self.net_blocks.bottleneck_v3(x, 16 , 3, e=16 , s=1, alpha=alpha, squeeze=False,", "self.net_blocks.inverted_residual_block(x, c=c[4], ks=3, t=t[4], s=s[4], n=n[4], activation=activation) x = self.net_blocks.inverted_residual_block(x, c=c[5], ks=3, t=t[5],", "self.start_fine_tune_layer_id for layer in self.softmax_model.layers[:break_point]: layer.trainable=False outputs = AdaCos(n_classes, initializer=self.net_blocks.kernel_initializer, regularizer=self.net_blocks.kernel_regularizer, name='adacos')([x, label])", "model.adacos import AdaCos from model.blocks import NetBlock from tensorflow.keras.layers import Input, Reshape, Conv2D,", "ks=3, t=t[1], s=s[1], n=n[1], alpha=alpha, activation=activation) x = self.net_blocks.inverted_residual_block(x, c=c[2], ks=3, t=t[2], s=s[2],", "x = GlobalAveragePooling2D()(x) self.backbone = Model(inputs=I, outputs=x, name=self.model_name) def build_mobilenet_v3_backbone(self, input_shape, alpha=1.0): I", "1, 2, 2, 2, 1, 2, 1, 1] n = [1, 1, 2,", "= [1, 1, 6, 6, 6, 6, 6, 6, 1] s = [2,", "6, 1, 2] activation='prelu' I = Input(shape = input_shape) x = self.net_blocks.conv_block(I, c[0],", "e=480, s=1, alpha=alpha, squeeze=True, activation='hard_swish') x = self.net_blocks.bottleneck_v3(x, 112, 3, e=672, s=1, alpha=alpha,", "activation=activation) x = self.net_blocks.conv_block(x, 512, 1, 1, 'valid', activation=activation) ks = K.int_shape(x)[2] x", "x = Reshape((1, 1, c))(x) x = self.net_blocks.conv_block(x, self.embedding_dim, 1, 1, 'valid', activation=None)", "self.net_blocks = NetBlock(config) def build_mpsnet_backbone(self, input_shape): c = [32, 32, 64, 64, 128]", "self.start_fine_tune_layer_id = config.start_fine_tune_layer_id self.end_fine_tune_layer_id = config.end_fine_tune_layer_id self.embedding_dim = config.embedding_dim self.embedding_layer_name = config.embedding_layer_name self.dropout", "activation=activation) # (64, 64, 32) x = self.net_blocks.inverted_residual_block(x, c=c[1], ks=3, t=t[1], s=s[1], n=n[1],", "t=t[2], s=s[2], n=n[2], activation=activation) x = self.net_blocks.inverted_residual_block(x, c=c[3], ks=3, t=t[3], s=s[3], n=n[3], activation=activation)", "alpha=alpha, squeeze=True, activation='relu6') x = self.net_blocks.bottleneck_v3(x, 80 , 3, e=240, s=2, alpha=alpha, squeeze=False,", "= Input(shape = input_shape) n_filters = self.net_blocks.make_divisible(c[0] * alpha, 8) x = self.net_blocks.conv_block(I,", "Input(shape = input_shape) n_filters = self.net_blocks.make_divisible(c[0] * alpha, 8) x = self.net_blocks.conv_block(I, n_filters,", "activation='hard_swish') x = self.net_blocks.bottleneck_v3(x, 80 , 3, e=200, s=1, alpha=alpha, squeeze=False, activation='hard_swish') x", "2, 1, 2, 1] n = [1, 1, 5, 1, 6, 1, 2]", ", 3, e=72 , s=1, alpha=alpha, squeeze=False, activation='relu6') x = self.net_blocks.bottleneck_v3(x, 40 ,", "M4]) M = self.net_blocks.spp_block(A4, pool_list=[1, 2, 4]) self.backbone = Model(inputs=I, outputs=M, name=self.model_name) def", "e=72 , s=2, alpha=alpha, squeeze=True, activation='relu6') x = self.net_blocks.bottleneck_v3(x, 40 , 5, e=120,", "c = [32, 32, 64, 64, 128] t = [1, 2, 2, 3,", "[1, 1, 2, 3, 4, 3, 3, 1, 1] activation = 'relu6' I", "self.net_blocks.bottleneck_v1(x, 512, 3, s=1, alpha=alpha, activation=activation) x = self.net_blocks.bottleneck_v1(x, 1024, 3, s=2, alpha=alpha,", "n = [1, 1, 2, 3, 4, 3, 3, 1, 1] activation =", "= add([A1, M2]) M3 = self.net_blocks.inverted_residual_block(A2, c=c[3], ks=3, t=t[3], s=s[3], n=n[3], activation=activation) A2", "self.net_blocks.conv_block(I, c[0], 3, s[0], activation=activation) x = self.net_blocks.separable_conv_block(M, c[1], 3, s[1], activation=activation) x", "s=1, alpha=alpha, squeeze=False, activation='relu6') x = self.net_blocks.bottleneck_v3(x, 24 , 3, e=64 , s=2,", "config.end_fine_tune_layer_id self.embedding_dim = config.embedding_dim self.embedding_layer_name = config.embedding_layer_name self.dropout = config.dropout self.net_blocks = NetBlock(config)", "= self.net_blocks.bottleneck_v3(x, 24 , 3, e=72 , s=1, alpha=alpha, squeeze=False, activation='relu6') x =", "alpha=alpha, activation=activation) x = self.net_blocks.bottleneck_v1(x, 128, 3, s=2, alpha=alpha, activation=activation) x = self.net_blocks.bottleneck_v1(x,", "self.net_blocks.inverted_residual_block(x, c=c[6], ks=3, t=t[6], s=s[6], n=n[6], activation=activation) x = self.net_blocks.conv_block(x, 512, 1, 1,", "Model(inputs=I, outputs=x, name=self.model_name) def build_mobilefacenet_backbone(self, input_shape, alpha=1.0): c = [64, 64, 64, 128,", "alpha=alpha, squeeze=False, activation='hard_swish') x = self.net_blocks.bottleneck_v3(x, 80 , 3, e=184, s=1, alpha=alpha, squeeze=False,", "s[0], activation=activation) x = self.net_blocks.separable_conv_block(M, c[1], 3, s[1], activation=activation) x = self.net_blocks.inverted_residual_block(x, c=c[2],", "K.int_shape(x)[2] x = self.net_blocks.depthwise_conv_block(x, ks, 1, padding='valid', activation=None) self.backbone = Model(inputs=I, outputs=x, name=self.model_name)", "ks=3, t=t[4], s=s[4], n=n[4], alpha=alpha, activation=activation) x = self.net_blocks.inverted_residual_block(x, c=c[5], ks=3, t=t[5], s=s[5],", "outputs=x, name=self.model_name) def build_adacos_model(self): label = Input(shape=(1,), name='label_input') softmax = self.softmax_model.outputs[0] n_classes =", "activation = 'relu' c = int(32 * alpha) x = self.net_blocks.conv_block(I, 32, 3,", "= self.net_blocks.inverted_residual_block(x, c=c[7], ks=3, t=t[7], s=s[7], n=n[7], alpha=alpha, activation=activation) if alpha > 1.0:", "self.net_blocks.bottleneck_v3(x, 40 , 5, e=120, s=1, alpha=alpha, squeeze=True, activation='relu6') x = self.net_blocks.bottleneck_v3(x, 40", "= self.net_blocks.bottleneck_v1(x, 512, 3, s=2, alpha=alpha, activation=activation) x = self.net_blocks.bottleneck_v1(x, 512, 3, s=1,", "M3 = self.net_blocks.inverted_residual_block(A2, c=c[3], ks=3, t=t[3], s=s[3], n=n[3], activation=activation) A2 = self.net_blocks.separable_conv_block(A2, c[3],", "squeeze=True, activation='relu6') x = self.net_blocks.bottleneck_v3(x, 80 , 3, e=240, s=2, alpha=alpha, squeeze=False, activation='hard_swish')", "alpha=alpha, squeeze=False, activation='hard_swish') x = self.net_blocks.bottleneck_v3(x, 112, 3, e=480, s=1, alpha=alpha, squeeze=True, activation='hard_swish')", "n=n[2], activation=activation) x = self.net_blocks.inverted_residual_block(x, c=c[3], ks=3, t=t[3], s=s[3], n=n[3], activation=activation) x =", "Dropout(rate=dropout)(x) x = self.net_blocks.conv_block(x, n_classes, 1, 1, activation='softmax', norm=None) x = Reshape((n_classes,))(x) self.softmax_model", "5, e=120, s=1, alpha=alpha, squeeze=True, activation='relu6') x = self.net_blocks.bottleneck_v3(x, 40 , 5, e=120,", "= self.net_blocks.inverted_residual_block(x, c=c[6], ks=3, t=t[6], s=s[6], n=n[6], activation=activation) x = self.net_blocks.conv_block(x, 512, 1,", "x = self.net_blocks.bottleneck_v1(x, 512, 3, s=1, alpha=alpha, activation=activation) x = self.net_blocks.bottleneck_v1(x, 1024, 3,", "3, e=64 , s=2, alpha=alpha, squeeze=False, activation='relu6') x = self.net_blocks.bottleneck_v3(x, 24 , 3,", "x = self.net_blocks.bottleneck_v3(x, 160, 5, e=672, s=2, alpha=alpha, squeeze=True, activation='hard_swish') x = self.net_blocks.bottleneck_v3(x,", "1, 2, 3, 4, 3, 3, 1, 1] activation = 'relu6' I =", "K.int_shape(softmax)[-1] inputs = self.softmax_model.inputs[0] x = self.softmax_model.layers[self.end_fine_tune_layer_id].output if(self.dropout>0): x = Dropout(rate=dropout)(x) x =", "x = self.net_blocks.inverted_residual_block(x, c=c[3], ks=3, t=t[3], s=s[3], n=n[3], activation=activation) x = self.net_blocks.inverted_residual_block(x, c=c[4],", "activation='hard_swish') x = self.net_blocks.bottleneck_v3(x, 80 , 3, e=184, s=1, alpha=alpha, squeeze=False, activation='hard_swish') x", "x = self.net_blocks.inverted_residual_block(x, c=c[3], ks=3, t=t[3], s=s[3], n=n[3], alpha=alpha, activation=activation) x = self.net_blocks.inverted_residual_block(x,", "s=s[3], n=n[3], activation=activation) A2 = self.net_blocks.separable_conv_block(A2, c[3], 3, s[3], activation=None) A3 = add([A2,", "[1, 1, 2, 4, 2, 4, 2] s = [2, 1, 2, 2,", "model.blocks import NetBlock from tensorflow.keras.layers import Input, Reshape, Conv2D, Activation, Flatten, Dropout, add", "Model import tensorflow.keras.backend as K class Net: def __init__(self, config): self.model_name = config.model_name", "= Reshape((1, 1, c))(x) x = self.net_blocks.conv_block(x, self.embedding_dim, 1, 1, 'valid', activation=None) if(self.dropout>0):", "M0 = self.net_blocks.conv_block(I, c[0], 3, s[0], activation=activation) M1 = self.net_blocks.inverted_residual_block(M0, c=c[1], ks=3, t=t[1],", "s[2], activation=None) A2 = add([A1, M2]) M3 = self.net_blocks.inverted_residual_block(A2, c=c[3], ks=3, t=t[3], s=s[3],", "input_shape): c = [32, 32, 64, 64, 128] t = [1, 2, 2,", "2, activation=activation) x = self.net_blocks.bottleneck_v1(x, 64 , 3, s=1, alpha=alpha, activation=activation) x =", "'valid', activation=None) if(self.dropout>0): x = Dropout(rate=dropout)(x) x = self.net_blocks.conv_block(x, n_classes, 1, 1, activation='softmax',", "= self.softmax_model.inputs[0] x = self.softmax_model.layers[self.end_fine_tune_layer_id].output if(self.dropout>0): x = Dropout(rate=dropout)(x) x = Flatten(name=self.embedding_layer_name)(x) break_point", "x = self.net_blocks.bottleneck_v1(x, 256, 3, s=1, alpha=alpha, activation=activation) x = self.net_blocks.bottleneck_v1(x, 512, 3,", "build_mobilenet_v1_backbone(self, input_shape, alpha=1.0): I = Input(shape = input_shape) activation = 'relu' c =", "n = [1, 2, 2, 3, 2] activation='relu' I = Input(shape = input_shape)", "self.net_blocks.bottleneck_v3(x, 40 , 5, e=120, s=1, alpha=alpha, squeeze=True, activation='relu6') x = self.net_blocks.bottleneck_v3(x, 80", "A3 = add([A2, M3]) M4 = self.net_blocks.inverted_residual_block(A3, c=c[4], ks=3, t=t[4], s=s[4], n=n[4], activation=activation)", "2, 2, 2, 1] n = [1, 2, 2, 3, 2] activation='relu' I", "config): self.model_name = config.model_name self.start_fine_tune_layer_id = config.start_fine_tune_layer_id self.end_fine_tune_layer_id = config.end_fine_tune_layer_id self.embedding_dim = config.embedding_dim", "x = self.net_blocks.inverted_residual_block(x, c=c[7], ks=3, t=t[7], s=s[7], n=n[7], alpha=alpha, activation=activation) if alpha >", "build_adacos_model(self): label = Input(shape=(1,), name='label_input') softmax = self.softmax_model.outputs[0] n_classes = K.int_shape(softmax)[-1] inputs =", "3, s[0], activation=activation) x = self.net_blocks.separable_conv_block(M, c[1], 3, s[1], activation=activation) x = self.net_blocks.inverted_residual_block(x,", "24 , 3, e=64 , s=2, alpha=alpha, squeeze=False, activation='relu6') x = self.net_blocks.bottleneck_v3(x, 24", "if(len(x.shape)==2): c = K.int_shape(x)[self.net_blocks.channel_axis] x = Reshape((1, 1, c))(x) x = self.net_blocks.conv_block(x, self.embedding_dim,", "self.net_blocks.bottleneck_v1(x, 1024, 3, s=2, alpha=alpha, activation=activation) x = self.net_blocks.bottleneck_v1(x, 1024, 3, s=1, alpha=alpha,", "[2, 1, 2, 2, 1, 2, 1] n = [1, 1, 5, 1,", "3, 3, 1, 1] activation = 'relu6' I = Input(shape = input_shape) n_filters", "e=240, s=2, alpha=alpha, squeeze=False, activation='hard_swish') x = self.net_blocks.bottleneck_v3(x, 80 , 3, e=200, s=1,", "112, 3, e=672, s=1, alpha=alpha, squeeze=True, activation='hard_swish') x = self.net_blocks.bottleneck_v3(x, 160, 5, e=672,", "= self.net_blocks.conv_block(I, 32, 3, 2, activation=activation) x = self.net_blocks.bottleneck_v1(x, 64 , 3, s=1,", "2, 1, 1] n = [1, 1, 2, 3, 4, 3, 3, 1,", "c[1], 3, s[1], activation=None) A1 = add([M0, M1]) M2 = self.net_blocks.inverted_residual_block(A1, c=c[2], ks=3,", "break_point = len(self.softmax_model.layers) + self.start_fine_tune_layer_id for layer in self.softmax_model.layers[:break_point]: layer.trainable=False outputs = AdaCos(n_classes,", "alpha=alpha, squeeze=True, activation='relu6') x = self.net_blocks.bottleneck_v3(x, 40 , 5, e=120, s=1, alpha=alpha, squeeze=True,", "8) else: last_filters = c[8] x = self.net_blocks.conv_block(x, last_filters, 1, 1, activation=activation) x", "2, 2, 2, 1, 2, 1, 1] n = [1, 1, 2, 3,", "3, 2] activation='relu' I = Input(shape = input_shape) M0 = self.net_blocks.conv_block(I, c[0], 3,", "= add([A2, M3]) M4 = self.net_blocks.inverted_residual_block(A3, c=c[4], ks=3, t=t[4], s=s[4], n=n[4], activation=activation) A3", "n=n[1], alpha=alpha, activation=activation) x = self.net_blocks.inverted_residual_block(x, c=c[2], ks=3, t=t[2], s=s[2], n=n[2], alpha=alpha, activation=activation)", "= self.net_blocks.inverted_residual_block(A3, c=c[4], ks=3, t=t[4], s=s[4], n=n[4], activation=activation) A3 = self.net_blocks.separable_conv_block(A3, c[4], 3,", ", s=1, alpha=alpha, squeeze=False, activation='relu6') x = self.net_blocks.bottleneck_v3(x, 24 , 3, e=64 ,", "activation=activation) x = self.net_blocks.inverted_residual_block(x, c=c[6], ks=3, t=t[6], s=s[6], n=n[6], activation=activation) x = self.net_blocks.conv_block(x,", "alpha=alpha, activation=activation) x = self.net_blocks.bottleneck_v1(x, 1024, 3, s=1, alpha=alpha, activation=activation) x = GlobalAveragePooling2D()(x)", "= self.net_blocks.bottleneck_v3(x, 40 , 5, e=120, s=1, alpha=alpha, squeeze=True, activation='relu6') x = self.net_blocks.bottleneck_v3(x,", "alpha=1.0): c = [32, 16, 24, 32, 64, 96, 160, 320, 1280] t", ", 2, activation='hard_swish') x = self.net_blocks.bottleneck_v3(x, 16 , 3, e=16 , s=1, alpha=alpha,", "if(self.dropout>0): x = Dropout(rate=dropout)(x) x = self.net_blocks.conv_block(x, n_classes, 1, 1, activation='softmax', norm=None) x", "= config.embedding_dim self.embedding_layer_name = config.embedding_layer_name self.dropout = config.dropout self.net_blocks = NetBlock(config) def build_mpsnet_backbone(self,", "activation=activation) x = self.net_blocks.inverted_residual_block(x, c=c[4], ks=3, t=t[4], s=s[4], n=n[4], alpha=alpha, activation=activation) x =", "alpha=alpha, squeeze=True, activation='hard_swish') x = self.net_blocks.bottleneck_v3(x, 160, 5, e=960, s=1, alpha=alpha, squeeze=True, activation='hard_swish')", "'valid', activation=activation) ks = K.int_shape(x)[2] x = self.net_blocks.depthwise_conv_block(x, ks, 1, padding='valid', activation=None) self.backbone", "c=c[2], ks=3, t=t[2], s=s[2], n=n[2], alpha=alpha, activation=activation) x = self.net_blocks.inverted_residual_block(x, c=c[3], ks=3, t=t[3],", "last_filters, 1, 1, activation=activation) x = GlobalAveragePooling2D()(x) self.backbone = Model(inputs=I, outputs=x, name=self.model_name) def", "3, e=480, s=1, alpha=alpha, squeeze=True, activation='hard_swish') x = self.net_blocks.bottleneck_v3(x, 112, 3, e=672, s=1,", "ks = K.int_shape(x)[2] x = self.net_blocks.depthwise_conv_block(x, ks, 1, padding='valid', activation=None) self.backbone = Model(inputs=I,", "= Model(inputs=I, outputs=x, name=self.model_name) def build_adacos_model(self): label = Input(shape=(1,), name='label_input') softmax = self.softmax_model.outputs[0]", "= self.net_blocks.bottleneck_v3(x, 40 , 5, e=72 , s=2, alpha=alpha, squeeze=True, activation='relu6') x =", "squeeze=True, activation='hard_swish') x = self.net_blocks.bottleneck_v3(x, 112, 3, e=672, s=1, alpha=alpha, squeeze=True, activation='hard_swish') x", "e=672, s=1, alpha=alpha, squeeze=True, activation='hard_swish') x = self.net_blocks.bottleneck_v3(x, 160, 5, e=672, s=2, alpha=alpha,", "s=1, alpha=alpha, squeeze=False, activation='hard_swish') x = self.net_blocks.bottleneck_v3(x, 112, 3, e=480, s=1, alpha=alpha, squeeze=True,", "x = self.net_blocks.inverted_residual_block(x, c=c[5], ks=3, t=t[5], s=s[5], n=n[5], activation=activation) x = self.net_blocks.inverted_residual_block(x, c=c[6],", "= self.net_blocks.bottleneck_v3(x, 16 , 3, e=16 , s=1, alpha=alpha, squeeze=False, activation='relu6') x =", "build_mobilenet_v3_backbone(self, input_shape, alpha=1.0): I = Input(shape = input_shape) x = self.net_blocks.conv_block(I, 16, 3", "activation=activation) x = self.net_blocks.bottleneck_v1(x, 128, 3, s=1, alpha=alpha, activation=activation) x = self.net_blocks.bottleneck_v1(x, 256,", "e=16 , s=1, alpha=alpha, squeeze=False, activation='relu6') x = self.net_blocks.bottleneck_v3(x, 24 , 3, e=64", "input_shape) M0 = self.net_blocks.conv_block(I, c[0], 3, s[0], activation=activation) M1 = self.net_blocks.inverted_residual_block(M0, c=c[1], ks=3,", "GlobalAveragePooling2D()(x) self.backbone = Model(inputs=I, outputs=x, name=self.model_name) def build_mobilefacenet_backbone(self, input_shape, alpha=1.0): c = [64,", "input_shape) n_filters = self.net_blocks.make_divisible(c[0] * alpha, 8) x = self.net_blocks.conv_block(I, n_filters, 3, s[0],", "160, 320, 1280] t = [1, 1, 6, 6, 6, 6, 6, 6,", "activation='relu6') x = self.net_blocks.bottleneck_v3(x, 40 , 5, e=120, s=1, alpha=alpha, squeeze=True, activation='relu6') x", "activation=None) A1 = add([M0, M1]) M2 = self.net_blocks.inverted_residual_block(A1, c=c[2], ks=3, t=t[2], s=s[2], n=n[2],", "32, 3, 2, activation=activation) x = self.net_blocks.bottleneck_v1(x, 64 , 3, s=1, alpha=alpha, activation=activation)", "= self.net_blocks.inverted_residual_block(M0, c=c[1], ks=3, t=t[1], s=s[1], n=n[1], activation=activation) M0 = self.net_blocks.separable_conv_block(M0, c[1], 3,", "x = self.net_blocks.bottleneck_v3(x, 24 , 3, e=64 , s=2, alpha=alpha, squeeze=False, activation='relu6') x", "= input_shape) M0 = self.net_blocks.conv_block(I, c[0], 3, s[0], activation=activation) M1 = self.net_blocks.inverted_residual_block(M0, c=c[1],", "= Input(shape = input_shape) x = self.net_blocks.conv_block(I, c[0], 3, s[0], activation=activation) x =", "self.net_blocks.separable_conv_block(M0, c[1], 3, s[1], activation=None) A1 = add([M0, M1]) M2 = self.net_blocks.inverted_residual_block(A1, c=c[2],", "name=self.model_name) def build_adacos_model(self): label = Input(shape=(1,), name='label_input') softmax = self.softmax_model.outputs[0] n_classes = K.int_shape(softmax)[-1]", "4]) self.backbone = Model(inputs=I, outputs=M, name=self.model_name) def build_mobilenet_v1_backbone(self, input_shape, alpha=1.0): I = Input(shape", "x = self.net_blocks.bottleneck_v3(x, 24 , 3, e=72 , s=1, alpha=alpha, squeeze=False, activation='relu6') x", ", 5, e=120, s=1, alpha=alpha, squeeze=True, activation='relu6') x = self.net_blocks.bottleneck_v3(x, 80 , 3,", "= add([A3, M4]) M = self.net_blocks.spp_block(A4, pool_list=[1, 2, 4]) self.backbone = Model(inputs=I, outputs=M,", "self.net_blocks.bottleneck_v3(x, 24 , 3, e=64 , s=2, alpha=alpha, squeeze=False, activation='relu6') x = self.net_blocks.bottleneck_v3(x,", "1, c))(x) x = self.net_blocks.conv_block(x, self.embedding_dim, 1, 1, 'valid', activation=None) if(self.dropout>0): x =", "1] n = [1, 1, 5, 1, 6, 1, 2] activation='prelu' I =", "1, 6, 1, 2] activation='prelu' I = Input(shape = input_shape) x = self.net_blocks.conv_block(I,", "Model(inputs=I, outputs=x, name=self.model_name) def build_mobilenet_v3_backbone(self, input_shape, alpha=1.0): I = Input(shape = input_shape) x", "self.net_blocks.inverted_residual_block(x, c=c[2], ks=3, t=t[2], s=s[2], n=n[2], activation=activation) x = self.net_blocks.inverted_residual_block(x, c=c[3], ks=3, t=t[3],", "[1, 2, 2, 3, 2] activation='relu' I = Input(shape = input_shape) M0 =", "1.0: last_filters = self.net_blocks.make_divisible(c[8] * alpha, 8) else: last_filters = c[8] x =", "1] activation = 'relu6' I = Input(shape = input_shape) n_filters = self.net_blocks.make_divisible(c[0] *", "= [32, 32, 64, 64, 128] t = [1, 2, 2, 3, 2]", "self.model_name = config.model_name self.start_fine_tune_layer_id = config.start_fine_tune_layer_id self.end_fine_tune_layer_id = config.end_fine_tune_layer_id self.embedding_dim = config.embedding_dim self.embedding_layer_name", "24 , 3, e=72 , s=1, alpha=alpha, squeeze=False, activation='relu6') x = self.net_blocks.bottleneck_v3(x, 40", "s=2, alpha=alpha, squeeze=False, activation='hard_swish') x = self.net_blocks.bottleneck_v3(x, 80 , 3, e=200, s=1, alpha=alpha,", "x = GlobalAveragePooling2D()(x) self.backbone = Model(inputs=I, outputs=x, name=self.model_name) def build_mobilefacenet_backbone(self, input_shape, alpha=1.0): c", "Model(inputs=I, outputs=x, name=self.model_name) def build_adacos_model(self): label = Input(shape=(1,), name='label_input') softmax = self.softmax_model.outputs[0] n_classes", "x = Reshape((n_classes,))(x) self.softmax_model = Model(inputs=I, outputs=x, name=self.model_name) def build_adacos_model(self): label = Input(shape=(1,),", "ks=3, t=t[7], s=s[7], n=n[7], alpha=alpha, activation=activation) if alpha > 1.0: last_filters = self.net_blocks.make_divisible(c[8]", "= self.net_blocks.bottleneck_v3(x, 80 , 3, e=184, s=1, alpha=alpha, squeeze=False, activation='hard_swish') x = self.net_blocks.bottleneck_v3(x,", "= self.net_blocks.conv_block(x, self.embedding_dim, 1, 1, 'valid', activation=None) if(self.dropout>0): x = Dropout(rate=dropout)(x) x =", "inputs = self.softmax_model.inputs[0] x = self.softmax_model.layers[self.end_fine_tune_layer_id].output if(self.dropout>0): x = Dropout(rate=dropout)(x) x = Flatten(name=self.embedding_layer_name)(x)", "= K.int_shape(x)[self.net_blocks.channel_axis] x = Reshape((1, 1, c))(x) x = self.net_blocks.conv_block(x, self.embedding_dim, 1, 1,", ", s=2, alpha=alpha, squeeze=False, activation='relu6') x = self.net_blocks.bottleneck_v3(x, 24 , 3, e=72 ,", "= self.net_blocks.inverted_residual_block(A1, c=c[2], ks=3, t=t[2], s=s[2], n=n[2], activation=activation) A1 = self.net_blocks.separable_conv_block(A1, c[2], 3,", "t=t[3], s=s[3], n=n[3], activation=activation) A2 = self.net_blocks.separable_conv_block(A2, c[3], 3, s[3], activation=None) A3 =", "activation=activation) x = self.net_blocks.bottleneck_v1(x, 64 , 3, s=1, alpha=alpha, activation=activation) x = self.net_blocks.bottleneck_v1(x,", ", 3, e=16 , s=1, alpha=alpha, squeeze=False, activation='relu6') x = self.net_blocks.bottleneck_v3(x, 24 ,", "x = self.net_blocks.inverted_residual_block(x, c=c[2], ks=3, t=t[2], s=s[2], n=n[2], activation=activation) x = self.net_blocks.inverted_residual_block(x, c=c[3],", "self.net_blocks.inverted_residual_block(M0, c=c[1], ks=3, t=t[1], s=s[1], n=n[1], activation=activation) M0 = self.net_blocks.separable_conv_block(M0, c[1], 3, s[1],", "s=s[2], n=n[2], alpha=alpha, activation=activation) x = self.net_blocks.inverted_residual_block(x, c=c[3], ks=3, t=t[3], s=s[3], n=n[3], alpha=alpha,", "2, 4, 2, 4, 2] s = [2, 1, 2, 2, 1, 2,", "self.embedding_dim, 1, 1, 'valid', activation=None) if(self.dropout>0): x = Dropout(rate=dropout)(x) x = self.net_blocks.conv_block(x, n_classes,", "320, 1280] t = [1, 1, 6, 6, 6, 6, 6, 6, 1]", "alpha=alpha, activation=activation) x = GlobalAveragePooling2D()(x) self.backbone = Model(inputs=I, outputs=x, name=self.model_name) def build_mobilenet_v2_backbone(self, input_shape,", "self.softmax_model.layers[:break_point]: layer.trainable=False outputs = AdaCos(n_classes, initializer=self.net_blocks.kernel_initializer, regularizer=self.net_blocks.kernel_regularizer, name='adacos')([x, label]) self.adacos_model = Model(inputs =", "x = self.net_blocks.bottleneck_v3(x, 112, 3, e=672, s=1, alpha=alpha, squeeze=True, activation='hard_swish') x = self.net_blocks.bottleneck_v3(x,", "A3 = self.net_blocks.separable_conv_block(A3, c[4], 3, s[4], activation=None) A4 = add([A3, M4]) M =", "alpha=1.0): I = Input(shape = input_shape) activation = 'relu' c = int(32 *", "= config.dropout self.net_blocks = NetBlock(config) def build_mpsnet_backbone(self, input_shape): c = [32, 32, 64,", "n=n[1], activation=activation) M0 = self.net_blocks.separable_conv_block(M0, c[1], 3, s[1], activation=None) A1 = add([M0, M1])", "1024, 3, s=1, alpha=alpha, activation=activation) x = GlobalAveragePooling2D()(x) self.backbone = Model(inputs=I, outputs=x, name=self.model_name)", "= [1, 1, 2, 3, 4, 3, 3, 1, 1] activation = 'relu6'", "Reshape, Conv2D, Activation, Flatten, Dropout, add from tensorflow.keras.models import Model import tensorflow.keras.backend as", "16, 24, 32, 64, 96, 160, 320, 1280] t = [1, 1, 6,", "= [64, 64, 64, 128, 128, 128, 128] t = [1, 1, 2,", "add([A1, M2]) M3 = self.net_blocks.inverted_residual_block(A2, c=c[3], ks=3, t=t[3], s=s[3], n=n[3], activation=activation) A2 =", "s=1, alpha=alpha, squeeze=True, activation='hard_swish') x = self.net_blocks.bottleneck_v3(x, 160, 5, e=960, s=1, alpha=alpha, squeeze=True,", "Input(shape = input_shape) x = self.net_blocks.conv_block(I, 16, 3 , 2, activation='hard_swish') x =", "[1, 2, 2, 3, 2] s = [2, 2, 2, 2, 1] n", "2, 2, 3, 2] activation='relu' I = Input(shape = input_shape) M0 = self.net_blocks.conv_block(I,", "n=n[3], activation=activation) A2 = self.net_blocks.separable_conv_block(A2, c[3], 3, s[3], activation=None) A3 = add([A2, M3])", "2, 3, 2] s = [2, 2, 2, 2, 1] n = [1,", "self.net_blocks.inverted_residual_block(A1, c=c[2], ks=3, t=t[2], s=s[2], n=n[2], activation=activation) A1 = self.net_blocks.separable_conv_block(A1, c[2], 3, s[2],", "= 'relu' c = int(32 * alpha) x = self.net_blocks.conv_block(I, 32, 3, 2,", "256, 3, s=1, alpha=alpha, activation=activation) x = self.net_blocks.bottleneck_v1(x, 512, 3, s=2, alpha=alpha, activation=activation)", "= self.net_blocks.inverted_residual_block(x, c=c[1], ks=3, t=t[1], s=s[1], n=n[1], alpha=alpha, activation=activation) x = self.net_blocks.inverted_residual_block(x, c=c[2],", "activation = 'relu6' I = Input(shape = input_shape) n_filters = self.net_blocks.make_divisible(c[0] * alpha,", "name=self.model_name) def build_mobilenet_v1_backbone(self, input_shape, alpha=1.0): I = Input(shape = input_shape) activation = 'relu'", "6, 1] s = [2, 1, 2, 2, 2, 1, 2, 1, 1]", "* alpha) x = self.net_blocks.conv_block(I, 32, 3, 2, activation=activation) x = self.net_blocks.bottleneck_v1(x, 64", "= input_shape) n_filters = self.net_blocks.make_divisible(c[0] * alpha, 8) x = self.net_blocks.conv_block(I, n_filters, 3,", "last_filters = self.net_blocks.make_divisible(c[8] * alpha, 8) else: last_filters = c[8] x = self.net_blocks.conv_block(x,", "add([M0, M1]) M2 = self.net_blocks.inverted_residual_block(A1, c=c[2], ks=3, t=t[2], s=s[2], n=n[2], activation=activation) A1 =", "build_softmax_model(self, n_classes): I=self.backbone.inputs x=self.backbone.outputs[0] if(len(x.shape)==2): c = K.int_shape(x)[self.net_blocks.channel_axis] x = Reshape((1, 1, c))(x)", "activation='hard_swish') x = self.net_blocks.bottleneck_v3(x, 160, 5, e=960, s=1, alpha=alpha, squeeze=True, activation='hard_swish') x =", "1, 1] activation = 'relu6' I = Input(shape = input_shape) n_filters = self.net_blocks.make_divisible(c[0]", "6, 6, 6, 6, 6, 6, 1] s = [2, 1, 2, 2,", "self.net_blocks.inverted_residual_block(x, c=c[6], ks=3, t=t[6], s=s[6], n=n[6], alpha=alpha, activation=activation) x = self.net_blocks.inverted_residual_block(x, c=c[7], ks=3,", "= input_shape) activation = 'relu' c = int(32 * alpha) x = self.net_blocks.conv_block(I,", "n=n[5], activation=activation) x = self.net_blocks.inverted_residual_block(x, c=c[6], ks=3, t=t[6], s=s[6], n=n[6], activation=activation) x =", "A2 = add([A1, M2]) M3 = self.net_blocks.inverted_residual_block(A2, c=c[3], ks=3, t=t[3], s=s[3], n=n[3], activation=activation)", "c=c[6], ks=3, t=t[6], s=s[6], n=n[6], activation=activation) x = self.net_blocks.conv_block(x, 512, 1, 1, 'valid',", "3, s=2, alpha=alpha, activation=activation) x = self.net_blocks.bottleneck_v1(x, 128, 3, s=1, alpha=alpha, activation=activation) x", "activation=activation) x = self.net_blocks.inverted_residual_block(x, c=c[5], ks=3, t=t[5], s=s[5], n=n[5], alpha=alpha, activation=activation) x =", "e=64 , s=2, alpha=alpha, squeeze=False, activation='relu6') x = self.net_blocks.bottleneck_v3(x, 24 , 3, e=72", "ks=3, t=t[3], s=s[3], n=n[3], activation=activation) A2 = self.net_blocks.separable_conv_block(A2, c[3], 3, s[3], activation=None) A3", "40 , 5, e=72 , s=2, alpha=alpha, squeeze=True, activation='relu6') x = self.net_blocks.bottleneck_v3(x, 40", "s[1], activation=activation) x = self.net_blocks.inverted_residual_block(x, c=c[2], ks=3, t=t[2], s=s[2], n=n[2], activation=activation) x =", "c[3], 3, s[3], activation=None) A3 = add([A2, M3]) M4 = self.net_blocks.inverted_residual_block(A3, c=c[4], ks=3,", "s=s[4], n=n[4], activation=activation) x = self.net_blocks.inverted_residual_block(x, c=c[5], ks=3, t=t[5], s=s[5], n=n[5], activation=activation) x", ", s=1, alpha=alpha, squeeze=False, activation='relu6') x = self.net_blocks.bottleneck_v3(x, 40 , 5, e=72 ,", "from tensorflow.keras.models import Model import tensorflow.keras.backend as K class Net: def __init__(self, config):", "3, s[1], activation=activation) x = self.net_blocks.inverted_residual_block(x, c=c[2], ks=3, t=t[2], s=s[2], n=n[2], activation=activation) x", "= [2, 1, 2, 2, 2, 1, 2, 1, 1] n = [1,", "activation='hard_swish') x = self.net_blocks.bottleneck_v3(x, 160, 5, e=672, s=2, alpha=alpha, squeeze=True, activation='hard_swish') x =", "self.backbone = Model(inputs=I, outputs=M, name=self.model_name) def build_mobilenet_v1_backbone(self, input_shape, alpha=1.0): I = Input(shape =", "t=t[1], s=s[1], n=n[1], alpha=alpha, activation=activation) x = self.net_blocks.inverted_residual_block(x, c=c[2], ks=3, t=t[2], s=s[2], n=n[2],", "s=1, alpha=alpha, activation=activation) x = self.net_blocks.bottleneck_v1(x, 128, 3, s=2, alpha=alpha, activation=activation) x =", "x = self.net_blocks.conv_block(x, 960, 1, 1, activation='hard_swish') x = GlobalAveragePooling2D()(x) self.backbone = Model(inputs=I,", "self.net_blocks.conv_block(x, 960, 1, 1, activation='hard_swish') x = GlobalAveragePooling2D()(x) self.backbone = Model(inputs=I, outputs=x, name=self.model_name)", "alpha=alpha, activation=activation) x = self.net_blocks.inverted_residual_block(x, c=c[7], ks=3, t=t[7], s=s[7], n=n[7], alpha=alpha, activation=activation) if", "= self.net_blocks.conv_block(I, c[0], 3, s[0], activation=activation) x = self.net_blocks.separable_conv_block(M, c[1], 3, s[1], activation=activation)", "e=72 , s=1, alpha=alpha, squeeze=False, activation='relu6') x = self.net_blocks.bottleneck_v3(x, 40 , 5, e=72", "alpha=alpha, activation=activation) x = self.net_blocks.bottleneck_v1(x, 512, 3, s=2, alpha=alpha, activation=activation) x = self.net_blocks.bottleneck_v1(x,", "s=1, alpha=alpha, squeeze=True, activation='relu6') x = self.net_blocks.bottleneck_v3(x, 80 , 3, e=240, s=2, alpha=alpha,", "self.net_blocks.depthwise_conv_block(x, ks, 1, padding='valid', activation=None) self.backbone = Model(inputs=I, outputs=x, name=self.model_name) def build_softmax_model(self, n_classes):", ", 5, e=72 , s=2, alpha=alpha, squeeze=True, activation='relu6') x = self.net_blocks.bottleneck_v3(x, 40 ,", "1024, 3, s=2, alpha=alpha, activation=activation) x = self.net_blocks.bottleneck_v1(x, 1024, 3, s=1, alpha=alpha, activation=activation)", "e=672, s=2, alpha=alpha, squeeze=True, activation='hard_swish') x = self.net_blocks.bottleneck_v3(x, 160, 5, e=960, s=1, alpha=alpha,", "from tensorflow.keras.layers import Input, Reshape, Conv2D, Activation, Flatten, Dropout, add from tensorflow.keras.models import", "I = Input(shape = input_shape) x = self.net_blocks.conv_block(I, c[0], 3, s[0], activation=activation) x", "= Input(shape=(1,), name='label_input') softmax = self.softmax_model.outputs[0] n_classes = K.int_shape(softmax)[-1] inputs = self.softmax_model.inputs[0] x", "1, 2] activation='prelu' I = Input(shape = input_shape) x = self.net_blocks.conv_block(I, c[0], 3,", "3, e=72 , s=1, alpha=alpha, squeeze=False, activation='relu6') x = self.net_blocks.bottleneck_v3(x, 40 , 5,", "24, 32, 64, 96, 160, 320, 1280] t = [1, 1, 6, 6,", "x = self.net_blocks.inverted_residual_block(x, c=c[5], ks=3, t=t[5], s=s[5], n=n[5], alpha=alpha, activation=activation) x = self.net_blocks.inverted_residual_block(x,", "x = self.net_blocks.bottleneck_v1(x, 512, 3, s=1, alpha=alpha, activation=activation) x = self.net_blocks.bottleneck_v1(x, 512, 3,", "= self.net_blocks.conv_block(x, 960, 1, 1, activation='hard_swish') x = GlobalAveragePooling2D()(x) self.backbone = Model(inputs=I, outputs=x,", "3, s[0], activation=activation) M1 = self.net_blocks.inverted_residual_block(M0, c=c[1], ks=3, t=t[1], s=s[1], n=n[1], activation=activation) M0", "= self.net_blocks.inverted_residual_block(x, c=c[5], ks=3, t=t[5], s=s[5], n=n[5], alpha=alpha, activation=activation) x = self.net_blocks.inverted_residual_block(x, c=c[6],", "e=960, s=1, alpha=alpha, squeeze=True, activation='hard_swish') x = self.net_blocks.conv_block(x, 960, 1, 1, activation='hard_swish') x", "K.int_shape(x)[self.net_blocks.channel_axis] x = Reshape((1, 1, c))(x) x = self.net_blocks.conv_block(x, self.embedding_dim, 1, 1, 'valid',", "M4 = self.net_blocks.inverted_residual_block(A3, c=c[4], ks=3, t=t[4], s=s[4], n=n[4], activation=activation) A3 = self.net_blocks.separable_conv_block(A3, c[4],", "activation=activation) x = self.net_blocks.bottleneck_v1(x, 128, 3, s=2, alpha=alpha, activation=activation) x = self.net_blocks.bottleneck_v1(x, 128,", "2, 1, 2, 1, 1] n = [1, 1, 2, 3, 4, 3,", "s[4], activation=None) A4 = add([A3, M4]) M = self.net_blocks.spp_block(A4, pool_list=[1, 2, 4]) self.backbone", "self.net_blocks.separable_conv_block(M, c[1], 3, s[1], activation=activation) x = self.net_blocks.inverted_residual_block(x, c=c[2], ks=3, t=t[2], s=s[2], n=n[2],", "self.net_blocks.inverted_residual_block(A3, c=c[4], ks=3, t=t[4], s=s[4], n=n[4], activation=activation) A3 = self.net_blocks.separable_conv_block(A3, c[4], 3, s[4],", ", 3, e=184, s=1, alpha=alpha, squeeze=False, activation='hard_swish') x = self.net_blocks.bottleneck_v3(x, 80 , 3,", "= self.net_blocks.inverted_residual_block(x, c=c[2], ks=3, t=t[2], s=s[2], n=n[2], activation=activation) x = self.net_blocks.inverted_residual_block(x, c=c[3], ks=3,", "1, 5, 1, 6, 1, 2] activation='prelu' I = Input(shape = input_shape) x", "self.embedding_layer_name = config.embedding_layer_name self.dropout = config.dropout self.net_blocks = NetBlock(config) def build_mpsnet_backbone(self, input_shape): c", "s=1, alpha=alpha, squeeze=True, activation='relu6') x = self.net_blocks.bottleneck_v3(x, 40 , 5, e=120, s=1, alpha=alpha,", "x = self.net_blocks.conv_block(I, 32, 3, 2, activation=activation) x = self.net_blocks.bottleneck_v1(x, 64 , 3,", "= self.net_blocks.bottleneck_v3(x, 160, 5, e=672, s=2, alpha=alpha, squeeze=True, activation='hard_swish') x = self.net_blocks.bottleneck_v3(x, 160,", "self.backbone = Model(inputs=I, outputs=x, name=self.model_name) def build_mobilefacenet_backbone(self, input_shape, alpha=1.0): c = [64, 64,", "ks=3, t=t[3], s=s[3], n=n[3], activation=activation) x = self.net_blocks.inverted_residual_block(x, c=c[4], ks=3, t=t[4], s=s[4], n=n[4],", "activation=activation) M0 = self.net_blocks.separable_conv_block(M0, c[1], 3, s[1], activation=None) A1 = add([M0, M1]) M2", "2, 4]) self.backbone = Model(inputs=I, outputs=M, name=self.model_name) def build_mobilenet_v1_backbone(self, input_shape, alpha=1.0): I =", "= Model(inputs=I, outputs=x, name=self.model_name) def build_softmax_model(self, n_classes): I=self.backbone.inputs x=self.backbone.outputs[0] if(len(x.shape)==2): c = K.int_shape(x)[self.net_blocks.channel_axis]", "self.net_blocks.bottleneck_v3(x, 24 , 3, e=72 , s=1, alpha=alpha, squeeze=False, activation='relu6') x = self.net_blocks.bottleneck_v3(x,", "activation=activation) x = self.net_blocks.inverted_residual_block(x, c=c[3], ks=3, t=t[3], s=s[3], n=n[3], activation=activation) x = self.net_blocks.inverted_residual_block(x,", "self.net_blocks.bottleneck_v1(x, 64 , 3, s=1, alpha=alpha, activation=activation) x = self.net_blocks.bottleneck_v1(x, 128, 3, s=2,", "160, 5, e=960, s=1, alpha=alpha, squeeze=True, activation='hard_swish') x = self.net_blocks.conv_block(x, 960, 1, 1,", "A4 = add([A3, M4]) M = self.net_blocks.spp_block(A4, pool_list=[1, 2, 4]) self.backbone = Model(inputs=I,", "s=2, alpha=alpha, activation=activation) x = self.net_blocks.bottleneck_v1(x, 128, 3, s=1, alpha=alpha, activation=activation) x =", "= self.net_blocks.separable_conv_block(A2, c[3], 3, s[3], activation=None) A3 = add([A2, M3]) M4 = self.net_blocks.inverted_residual_block(A3,", "outputs=x, name=self.model_name) def build_mobilefacenet_backbone(self, input_shape, alpha=1.0): c = [64, 64, 64, 128, 128,", "= self.net_blocks.bottleneck_v1(x, 512, 3, s=1, alpha=alpha, activation=activation) x = self.net_blocks.bottleneck_v1(x, 1024, 3, s=2,", "self.backbone = Model(inputs=I, outputs=x, name=self.model_name) def build_mobilenet_v2_backbone(self, input_shape, alpha=1.0): c = [32, 16,", "tensorflow.keras.layers import Input, Reshape, Conv2D, Activation, Flatten, Dropout, add from tensorflow.keras.models import Model", "n=n[4], alpha=alpha, activation=activation) x = self.net_blocks.inverted_residual_block(x, c=c[5], ks=3, t=t[5], s=s[5], n=n[5], alpha=alpha, activation=activation)", "512, 3, s=2, alpha=alpha, activation=activation) x = self.net_blocks.bottleneck_v1(x, 512, 3, s=1, alpha=alpha, activation=activation)", "activation=activation) if alpha > 1.0: last_filters = self.net_blocks.make_divisible(c[8] * alpha, 8) else: last_filters", "= self.net_blocks.bottleneck_v3(x, 80 , 3, e=200, s=1, alpha=alpha, squeeze=False, activation='hard_swish') x = self.net_blocks.bottleneck_v3(x,", "self.softmax_model.inputs[0] x = self.softmax_model.layers[self.end_fine_tune_layer_id].output if(self.dropout>0): x = Dropout(rate=dropout)(x) x = Flatten(name=self.embedding_layer_name)(x) break_point =", "squeeze=False, activation='hard_swish') x = self.net_blocks.bottleneck_v3(x, 80 , 3, e=200, s=1, alpha=alpha, squeeze=False, activation='hard_swish')", "x = GlobalAveragePooling2D()(x) self.backbone = Model(inputs=I, outputs=x, name=self.model_name) def build_mobilenet_v2_backbone(self, input_shape, alpha=1.0): c", "# (64, 64, 32) x = self.net_blocks.inverted_residual_block(x, c=c[1], ks=3, t=t[1], s=s[1], n=n[1], alpha=alpha,", "1, 2, 4, 2, 4, 2] s = [2, 1, 2, 2, 1,", "def __init__(self, config): self.model_name = config.model_name self.start_fine_tune_layer_id = config.start_fine_tune_layer_id self.end_fine_tune_layer_id = config.end_fine_tune_layer_id self.embedding_dim", "c=c[4], ks=3, t=t[4], s=s[4], n=n[4], activation=activation) A3 = self.net_blocks.separable_conv_block(A3, c[4], 3, s[4], activation=None)", "ks=3, t=t[4], s=s[4], n=n[4], activation=activation) A3 = self.net_blocks.separable_conv_block(A3, c[4], 3, s[4], activation=None) A4", "e=960, s=1, alpha=alpha, squeeze=True, activation='hard_swish') x = self.net_blocks.bottleneck_v3(x, 160, 5, e=960, s=1, alpha=alpha,", "M = self.net_blocks.spp_block(A4, pool_list=[1, 2, 4]) self.backbone = Model(inputs=I, outputs=M, name=self.model_name) def build_mobilenet_v1_backbone(self,", "= self.net_blocks.separable_conv_block(A1, c[2], 3, s[2], activation=None) A2 = add([A1, M2]) M3 = self.net_blocks.inverted_residual_block(A2,", "ks=3, t=t[2], s=s[2], n=n[2], activation=activation) x = self.net_blocks.inverted_residual_block(x, c=c[3], ks=3, t=t[3], s=s[3], n=n[3],", "128] t = [1, 1, 2, 4, 2, 4, 2] s = [2,", "pool_list=[1, 2, 4]) self.backbone = Model(inputs=I, outputs=M, name=self.model_name) def build_mobilenet_v1_backbone(self, input_shape, alpha=1.0): I", "80 , 3, e=200, s=1, alpha=alpha, squeeze=False, activation='hard_swish') x = self.net_blocks.bottleneck_v3(x, 80 ,", "e=200, s=1, alpha=alpha, squeeze=False, activation='hard_swish') x = self.net_blocks.bottleneck_v3(x, 80 , 3, e=184, s=1,", "alpha, 8) else: last_filters = c[8] x = self.net_blocks.conv_block(x, last_filters, 1, 1, activation=activation)", "6, 6, 6, 1] s = [2, 1, 2, 2, 2, 1, 2,", "def build_mobilenet_v2_backbone(self, input_shape, alpha=1.0): c = [32, 16, 24, 32, 64, 96, 160,", "5, e=960, s=1, alpha=alpha, squeeze=True, activation='hard_swish') x = self.net_blocks.bottleneck_v3(x, 160, 5, e=960, s=1,", "c[4], 3, s[4], activation=None) A4 = add([A3, M4]) M = self.net_blocks.spp_block(A4, pool_list=[1, 2,", "2, 1] n = [1, 1, 5, 1, 6, 1, 2] activation='prelu' I", "self.net_blocks.spp_block(A4, pool_list=[1, 2, 4]) self.backbone = Model(inputs=I, outputs=M, name=self.model_name) def build_mobilenet_v1_backbone(self, input_shape, alpha=1.0):", "960, 1, 1, activation='hard_swish') x = GlobalAveragePooling2D()(x) self.backbone = Model(inputs=I, outputs=x, name=self.model_name) def", "c=c[1], ks=3, t=t[1], s=s[1], n=n[1], alpha=alpha, activation=activation) x = self.net_blocks.inverted_residual_block(x, c=c[2], ks=3, t=t[2]," ]
[ "to print the text onto. Default: black, can be either of: red, light", "= input_back_col.lower() input_shade = input_shade.lower() # Check if running from pycharm is_running_pycharm =", "the background text colour for the strings being printed. Default Black, colours same", "Default Newline, this list must be passed for the system to work correctly", "be either of: red, light red, magenta, light magenta, yellow, light yellow, green,", "the strings being printed. Default Black, colours same as printcol shade - A", "foreground colour to use if prompt_fore_col in fore_cols: prompt_fore_col = fore_cols[prompt_fore_col] else: prompt_fore_col", "License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required", "init(autoreset=False, convert=convert, strip=strip) # Disable autoreset to colour the prompt correctly # Define", "to print the user input onto. Default: black, can be either of: red,", "input_back_col.lower() input_shade = input_shade.lower() # Check if running from pycharm is_running_pycharm = \"PYCHARM_HOSTED\"", "input. prompt_fore_col - The colour of the text to print the prompt text", "string to change the colour of the text being printed out. \"\"\" #", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the", "colorama import init, Fore, Back, Style import os def printcol(text, fore_col=None, back_col=None, shade=None,", "list of strings or a single string to use as the shade of", "back_col, shade, end): # Print the item printcol(item, fore_col=foreground, back_col=background, shade=shade, end=ending) else:", "print the text to the screen print(shade + fore_col + back_col + text,", "the next print statement runs correctly # Define values for each style and", "the prompt text in. Default: white, can be either of: red, light red,", "set the defaults. if fore_col is None: fore_col = \"white\" if back_col is", "sting. Arguments: list_to_print - A iterable list of strings or numbers to print", "normal, bright end - What character to end the print line with. By", "this allows any not # defined to be set to the default. E.G.", "import init, Fore, Back, Style import os def printcol(text, fore_col=None, back_col=None, shade=None, end=None):", "add a space for styling show_text + input_shade + input_fore_col + input_back_col return_text", "prompt_shade - The shade of the colour to use for the input prompt.", "the prompt correctly # Define values for each style and colour shades =", "None: input_back_col = \"black\" if input_shade is None: input_shade = \"normal\" # Convert", "Back.LIGHTCYAN_EX, \"white\": Back.WHITE} # Check which shade of colour to use for the", "License for the specific language governing permissions and limitations under the License. \"\"\"", "fore_cols: input_fore_col = fore_cols[input_fore_col] else: input_fore_col = Fore.WHITE # Check each background colour", "printcol(use_string, \"yellow\", \"black\", \"normal\") printcol(use_string, \"yellow\", \"black\", \"bright\") printcol(use_string, \"green\", \"black\", \"dim\") printcol(use_string,", "Check the foreground colour to use if fore_col in fore_cols: fore_col = fore_cols[fore_col]", "console prints text correctly in all colours. Default: 'Hello World'.\"\"\" if use_string is", "attempting the iteration if len(list_to_print) == len(fore_col) == len(back_col) == len(shade) == len(end):", "Convert the inputs into lowercase names to be checked prompt_fore_col = prompt_fore_col.lower() prompt_back_col", "\"black\", \"dim\") printcol(use_string, \"green\", \"black\", \"normal\") printcol(use_string, \"green\", \"black\", \"bright\") printcol(use_string, \"cyan\", \"black\",", "end = \"\\n\" # Check the lists are of the correct length before", "normal shade on a white background, this is normal print for cmd, but", "input(show_text) # Show the text print(Style.RESET_ALL) # Reset for normal return return_text def", "- The colour of the text to print the prompt text in. Default:", "blue\": Back.LIGHTBLUE_EX, \"cyan\": Back.CYAN, \"light cyan\": Back.LIGHTCYAN_EX, \"white\": Back.WHITE} # Check the shade", "the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS", "\"white\": Back.WHITE} # Check the shade of colour to use if shade in", "len(fore_col) == len(back_col) == len(shade) == len(end): # Then print out each item", "Back.BLACK # Then print the text to the screen print(shade + fore_col +", "else: # The lists are not of all equal length so print an", "the shade of colour to use if shade in shades: shade = shades[shade]", "None strip = None init(autoreset=False, convert=convert, strip=strip) # Disable autoreset to colour the", "input prompt and the user input. if prompt_shade in shades: prompt_shade = shades[prompt_shade]", "Back.LIGHTMAGENTA_EX, \"yellow\": Back.YELLOW, \"light yellow\": Back.LIGHTYELLOW_EX, \"green\": Back.GREEN, \"light green\": Back.LIGHTGREEN_EX, \"blue\": Back.BLUE,", "light green, blue, light blue, cyan, light cyan, black or white prompt_back_col -", "if input_shade is None: input_shade = \"normal\" # Convert the inputs into lowercase", "# defined to be set to the default. E.G. It is possible to", "# Force the text to string and add a space for styling show_text", "as the text colour for the strings being printed. Default White, colours same", "shades[input_shade] else: input_shade = Style.NORMAL # Check each foreground colour to use if", "= \"PYCHARM_HOSTED\" in os.environ if is_running_pycharm: convert = False strip = False else:", "light red, magenta, light magenta, yellow, light yellow, green, light green, blue, light", "by applicable law or agreed to in writing, software distributed under the License", "The text to prompt the user for the desired input. prompt_fore_col - The", "and shade to use can be provided as a list or as a", "back_cols[back_col] else: back_col = Back.BLACK # Then print the text to the screen", "prompt. Default: normal, can be either of: dim, normal, bright input_fore_col - The", "None: use_string = \"Hello World\" printcol(use_string, \"red\", \"black\", \"dim\") printcol(use_string, \"red\", \"black\", \"normal\")", "fore_col = Fore.WHITE # Check the background colour to use if back_col in", "input_fore_col=None, input_back_col=None, input_shade=None): \"\"\"Returns input from a coloured input prompt. Arguments: text -", "None init(autoreset=True, convert=convert, strip=strip) # Make sure the next print statement runs correctly", "to use for testing the console prints text correctly in all colours. Default:", "the specified colour on the specified background. Arguments: text - The text to", "back_cols[input_back_col] else: input_back_col = Back.BLACK print(prompt_shade + prompt_fore_col + prompt_back_col, end='') show_text =", "being printed. Default Newline, this list must be passed for the system to", "format. fore_col - The colour of the text to print the text in.", "same as printcol shade - A list of strings or a single string", "fore_cols[prompt_fore_col] else: prompt_fore_col = Fore.WHITE if input_fore_col in fore_cols: input_fore_col = fore_cols[input_fore_col] else:", "printing a string in different colours onto different backgrounds. Arguments: use_string - The", "if prompt_back_col in back_cols: prompt_back_col = back_cols[prompt_back_col] else: prompt_back_col = Back.BLACK if input_back_col", "text to print the text in. Default: white, can be either of: red,", "input_fore_col in fore_cols: input_fore_col = fore_cols[input_fore_col] else: input_fore_col = Fore.WHITE # Check each", "black or white input_shade - The shade of the colour to use for", "Copyright 2019 - 2020 <NAME> Licensed under the Apache License, Version 2.0 (the", "strip = False else: convert = None strip = None init(autoreset=False, convert=convert, strip=strip)", "= input_shade.lower() # Check if running from pycharm is_running_pycharm = \"PYCHARM_HOSTED\" in os.environ", "strip=strip) # Disable autoreset to colour the prompt correctly # Define values for", "light green, blue, light blue, cyan, light cyan, black or white prompt_shade -", "back_col=None, shade=None, end=None): \"\"\"A function which prints the text in the specified colour", "Then print the text to the screen print(shade + fore_col + back_col +", "colour of the text to print the prompt text in. Default: white, can", "\"magenta\": Fore.MAGENTA, \"light magenta\": Fore.LIGHTMAGENTA_EX, \"yellow\": Fore.YELLOW, \"light yellow\": Fore.LIGHTYELLOW_EX, \"green\": Fore.GREEN, \"light", "fore_cols[fore_col] else: fore_col = Fore.WHITE # Check the background colour to use if", "blue, cyan, light cyan, black or white input_back_col - The colour to print", "fore_col + back_col + text, end=end) def printcollist(list_to_print, fore_col=None, back_col=None, shade=None, end=None): \"\"\"A", "\"black\", \"bright\") printcol(use_string, \"cyan\", \"black\", \"dim\") printcol(use_string, \"cyan\", \"black\", \"normal\") printcol(use_string, \"cyan\", \"black\",", "= shade.lower() # Check if running from pycharm is_running_pycharm = \"PYCHARM_HOSTED\" in os.environ", "None and then set the defaults. if fore_col is None: fore_col = \"white\"", "# Check each foreground colour to use if prompt_fore_col in fore_cols: prompt_fore_col =", "for item, foreground, background, shade, ending in zip(list_to_print, fore_col, back_col, shade, end): #", "an empty string to change the colour of the text being printed out.", "shade = \"normal\" if end is None: end = \"\\n\" # Convert the", "testing the console prints text correctly in all colours. Default: 'Hello World'.\"\"\" if", "The shade of the colour to use. Default: normal, can be either of:", "light cyan, black or white back_col - The colour to print the text", "# Then print the text to the screen print(shade + fore_col + back_col", "so print an error message in red. printcol(\"Please use lists of equal length.\")", "not # defined to be set to the default. E.G. It is possible", "\"cyan\", \"black\", \"bright\") printcol(use_string, \"blue\", \"black\", \"dim\") printcol(use_string, \"blue\", \"black\", \"normal\") printcol(use_string, \"blue\",", "OR CONDITIONS OF ANY KIND, either express or implied. See the License for", "printed. Default White, colours same as printcol back_col - A list of strings", "pycharm is_running_pycharm = \"PYCHARM_HOSTED\" in os.environ if is_running_pycharm: convert = False strip =", "white prompt_back_col - The colour to print the prompt text onto. Default: black,", "blue\": Fore.LIGHTBLUE_EX, \"cyan\": Fore.CYAN, \"light cyan\": Fore.LIGHTCYAN_EX, \"black\": Fore.BLACK} back_cols = {\"red\": Back.RED,", "iterable list of strings or numbers to print out. fore_col - A list", "colour of the text being printed out. \"\"\" # Handle the keyword arguments", "change the colour of the text being printed out. \"\"\" # Handle the", "may not use this file except in compliance with the License. You may", "shades: prompt_shade = shades[prompt_shade] else: prompt_shade = Style.NORMAL if input_shade in shades: input_shade", "\"black\", \"normal\") printcol(use_string, \"white\", \"black\", \"bright\") printcol(use_string, \"black\", \"white\", \"dim\") printcol(use_string, \"black\", \"white\",", "under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR", "colour to use if prompt_back_col in back_cols: prompt_back_col = back_cols[prompt_back_col] else: prompt_back_col =", "Back.MAGENTA, \"light magenta\": Back.LIGHTMAGENTA_EX, \"yellow\": Back.YELLOW, \"light yellow\": Back.LIGHTYELLOW_EX, \"green\": Back.GREEN, \"light green\":", "yellow\": Back.LIGHTYELLOW_EX, \"green\": Back.GREEN, \"light green\": Back.LIGHTGREEN_EX, \"blue\": Back.BLUE, \"light blue\": Back.LIGHTBLUE_EX, \"cyan\":", "None: end = \"\\n\" # Check the lists are of the correct length", "\"yellow\": Back.YELLOW, \"light yellow\": Back.LIGHTYELLOW_EX, \"green\": Back.GREEN, \"light green\": Back.LIGHTGREEN_EX, \"blue\": Back.BLUE, \"light", "the text to print the text in. Default: white, can be either of:", "in fore_cols: fore_col = fore_cols[fore_col] else: fore_col = Fore.WHITE # Check the background", "# Check the background colour to use if back_col in back_cols: back_col =", "work correctly \"\"\" # Check the keyword arguments are None and then set", "list or as a sting. Arguments: list_to_print - A iterable list of strings", "convert = False strip = False else: convert = None strip = None", "printcol(use_string, \"magenta\", \"black\", \"bright\") printcol(use_string, \"yellow\", \"black\", \"dim\") printcol(use_string, \"yellow\", \"black\", \"normal\") printcol(use_string,", "prompt_fore_col = \"white\" if prompt_back_col is None: prompt_back_col = \"black\" if prompt_shade is", "function which prints the text in the specified colour on the specified background.", "\"green\": Fore.GREEN, \"light green\": Fore.LIGHTGREEN_EX, \"blue\": Fore.BLUE, \"light blue\": Fore.LIGHTBLUE_EX, \"cyan\": Fore.CYAN, \"light", "for the strings being printed. Default White, colours same as printcol back_col -", "The colour to print the user input onto. Default: black, can be either", "cyan, black or white back_col - The colour to print the text onto.", "keyword arguments are None and then set the defaults. if fore_col is None:", "of the correct length before attempting the iteration if len(list_to_print) == len(fore_col) ==", "The colour of the text to print the prompt text in. Default: white,", "Fore.LIGHTMAGENTA_EX, \"yellow\": Fore.YELLOW, \"light yellow\": Fore.LIGHTYELLOW_EX, \"green\": Fore.GREEN, \"light green\": Fore.LIGHTGREEN_EX, \"blue\": Fore.BLUE,", "to be checked fore_col = fore_col.lower() back_col = back_col.lower() shade = shade.lower() #", "# Check the shade of colour to use if shade in shades: shade", "colour to use if shade in shades: shade = shades[shade] else: shade =", "white back_col - The colour to print the text onto. Default: black, can", "printcol(use_string, \"blue\", \"black\", \"bright\") printcol(use_string, \"white\", \"black\", \"dim\") printcol(use_string, \"white\", \"black\", \"normal\") printcol(use_string,", "= \"\\n\" # Convert the inputs into lowercase names to be checked fore_col", "Style.BRIGHT, \"normal\": Style.NORMAL} # When underline is available add Style.UNDERLINED fore_cols = {\"red\":", "specified background. Arguments: text - The text to print to the screen in", "and prints it out in coloured text. The colours and shade to use", "light green, blue, light blue, cyan, light cyan, black or white input_shade -", "\"black\", \"bright\") printcol(use_string, \"blue\", \"black\", \"dim\") printcol(use_string, \"blue\", \"black\", \"normal\") printcol(use_string, \"blue\", \"black\",", "Convert the inputs into lowercase names to be checked fore_col = fore_col.lower() back_col", "prompt_back_col=None, prompt_shade=None, input_fore_col=None, input_back_col=None, input_shade=None): \"\"\"Returns input from a coloured input prompt. Arguments:", "input_fore_col + input_back_col return_text = input(show_text) # Show the text print(Style.RESET_ALL) # Reset", "use lists of equal length.\") def inputcolour(text, prompt_fore_col=None, prompt_back_col=None, prompt_shade=None, input_fore_col=None, input_back_col=None, input_shade=None):", "is possible to run printcol(\"Some text\") and still get some output, # This", "\"light red\": Back.LIGHTRED_EX, \"magenta\": Back.MAGENTA, \"light magenta\": Back.LIGHTMAGENTA_EX, \"yellow\": Back.YELLOW, \"light yellow\": Back.LIGHTYELLOW_EX,", "being printed out. \"\"\" # Handle the keyword arguments so that they still", "colour for the string. Default Normal, options same as printcol end - A", "each background colour to use if prompt_back_col in back_cols: prompt_back_col = back_cols[prompt_back_col] else:", "input_back_col = Back.BLACK print(prompt_shade + prompt_fore_col + prompt_back_col, end='') show_text = str(text) +", "Force the text to string and add a space for styling show_text +", "back_col = back_cols[back_col] else: back_col = Back.BLACK # Then print the text to", "the strings being printed. Default Newline, this list must be passed for the", "system to work correctly \"\"\" # Check the keyword arguments are None and", "of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to", "cmd, but may be # different for other terminals. if fore_col is None:", "Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use", "passed for the system to work correctly \"\"\" # Check the keyword arguments", "colour to use for the text entered by the user. Default: normal, can", "yellow\": Fore.LIGHTYELLOW_EX, \"green\": Fore.GREEN, \"light green\": Fore.LIGHTGREEN_EX, \"blue\": Fore.BLUE, \"light blue\": Fore.LIGHTBLUE_EX, \"cyan\":", "set to the default. E.G. It is possible to run printcol(\"Some text\") and", "green, light green, blue, light blue, cyan, light cyan, black or white shade", "prompt_back_col = prompt_back_col.lower() prompt_shade = prompt_shade.lower() input_fore_col = input_fore_col.lower() input_back_col = input_back_col.lower() input_shade", "printcol back_col - A list of strings or a single string to use", "cyan\": Back.LIGHTCYAN_EX, \"white\": Back.WHITE} # Check which shade of colour to use for", "= \"white\" if prompt_back_col is None: prompt_back_col = \"black\" if prompt_shade is None:", "if prompt_fore_col is None: prompt_fore_col = \"white\" if prompt_back_col is None: prompt_back_col =", "printcol end - A list of strings or a single string to use", "the screen print(shade + fore_col + back_col + text, end=end) def printcollist(list_to_print, fore_col=None,", "\"black\", \"dim\") printcol(use_string, \"magenta\", \"black\", \"normal\") printcol(use_string, \"magenta\", \"black\", \"bright\") printcol(use_string, \"yellow\", \"black\",", "to the default. E.G. It is possible to run printcol(\"Some text\") and still", "= Back.BLACK print(prompt_shade + prompt_fore_col + prompt_back_col, end='') show_text = str(text) + \"", "ending in zip(list_to_print, fore_col, back_col, shade, end): # Print the item printcol(item, fore_col=foreground,", "if input_back_col is None: input_back_col = \"black\" if input_shade is None: input_shade =", "print the prompt text onto. Default: black, can be either of: red, light", "be white text using the normal shade on a white background, this is", "light blue, cyan, light cyan, black or white prompt_shade - The shade of", "for the strings being printed. Default Black, colours same as printcol shade -", "strings being printed. Default Black, colours same as printcol shade - A list", "Default: normal, can be either of: dim, normal, bright end - What character", "then set the defaults. if fore_col is None: fore_col = \"white\" if back_col", "fore_col in fore_cols: fore_col = fore_cols[fore_col] else: fore_col = Fore.WHITE # Check the", "if len(list_to_print) == len(fore_col) == len(back_col) == len(shade) == len(end): # Then print", "either of: dim, normal, bright input_fore_col - The colour of the text to", "\"normal\") printcol(use_string, \"white\", \"black\", \"bright\") printcol(use_string, \"black\", \"white\", \"dim\") printcol(use_string, \"black\", \"white\", \"normal\")", "not use this file except in compliance with the License. You may obtain", "strip = None init(autoreset=True, convert=convert, strip=strip) # Make sure the next print statement", "\"cyan\": Back.CYAN, \"light cyan\": Back.LIGHTCYAN_EX, \"white\": Back.WHITE} # Check the shade of colour", "as printcol shade - A list of strings or a single string to", "fore_col = fore_col.lower() back_col = back_col.lower() shade = shade.lower() # Check if running", "white input_shade - The shade of the colour to use for the text", "governing permissions and limitations under the License. \"\"\" from colorama import init, Fore,", "desired input. prompt_fore_col - The colour of the text to print the prompt", "What character to end the print line with. By default this is the", "end is None: end = \"\\n\" # Convert the inputs into lowercase names", "= prompt_fore_col.lower() prompt_back_col = prompt_back_col.lower() prompt_shade = prompt_shade.lower() input_fore_col = input_fore_col.lower() input_back_col =", "= None strip = None init(autoreset=True, convert=convert, strip=strip) # Make sure the next", "light blue, cyan, light cyan, black or white input_back_col - The colour to", "prompt_fore_col in fore_cols: prompt_fore_col = fore_cols[prompt_fore_col] else: prompt_fore_col = Fore.WHITE if input_fore_col in", "prompt. Arguments: text - The text to prompt the user for the desired", "of: dim, normal, bright end - What character to end the print line", "Arguments: text - The text to print to the screen in the required", "the keyword arguments are None and then set the defaults. if fore_col is", "back_cols: prompt_back_col = back_cols[prompt_back_col] else: prompt_back_col = Back.BLACK if input_back_col in back_cols: input_back_col", "Back.RED, \"light red\": Back.LIGHTRED_EX, \"magenta\": Back.MAGENTA, \"light magenta\": Back.LIGHTMAGENTA_EX, \"yellow\": Back.YELLOW, \"light yellow\":", "= False else: convert = None strip = None init(autoreset=False, convert=convert, strip=strip) #", "input_back_col = input_back_col.lower() input_shade = input_shade.lower() # Check if running from pycharm is_running_pycharm", "\"cyan\", \"black\", \"normal\") printcol(use_string, \"cyan\", \"black\", \"bright\") printcol(use_string, \"blue\", \"black\", \"dim\") printcol(use_string, \"blue\",", "\"black\" if shade is None: shade = \"normal\" if end is None: end", "the specified background. Arguments: text - The text to print to the screen", "Fore.LIGHTYELLOW_EX, \"green\": Fore.GREEN, \"light green\": Fore.LIGHTGREEN_EX, \"blue\": Fore.BLUE, \"light blue\": Fore.LIGHTBLUE_EX, \"cyan\": Fore.CYAN,", "the user input. if prompt_shade in shades: prompt_shade = shades[prompt_shade] else: prompt_shade =", "None: input_fore_col = \"white\" if input_back_col is None: input_back_col = \"black\" if input_shade", "shades: input_shade = shades[input_shade] else: input_shade = Style.NORMAL # Check each foreground colour", "shade = Style.NORMAL # Check the foreground colour to use if fore_col in", "use_string is None: use_string = \"Hello World\" printcol(use_string, \"red\", \"black\", \"dim\") printcol(use_string, \"red\",", "\"green\", \"black\", \"bright\") printcol(use_string, \"cyan\", \"black\", \"dim\") printcol(use_string, \"cyan\", \"black\", \"normal\") printcol(use_string, \"cyan\",", "2.0 (the \"License\"); you may not use this file except in compliance with", "before attempting the iteration if len(list_to_print) == len(fore_col) == len(back_col) == len(shade) ==", "input_fore_col - The colour of the text to print the user input in.", "arguments so that they still work correctly when the terminal is used, this", "copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed", "\"\"\" Copyright 2019 - 2020 <NAME> Licensed under the Apache License, Version 2.0", "Style.NORMAL if input_shade in shades: input_shade = shades[input_shade] else: input_shade = Style.NORMAL #", "\"black\" if input_shade is None: input_shade = \"normal\" # Convert the inputs into", "fore_col - The colour of the text to print the text in. Default:", "colour to use. Default: normal, can be either of: dim, normal, bright end", "# Check which shade of colour to use for the input prompt and", "if prompt_shade is None: prompt_shade = \"normal\" if input_fore_col is None: input_fore_col =", "shade = shades[shade] else: shade = Style.NORMAL # Check the foreground colour to", "# Reset for normal return return_text def testcolour(use_string=None): \"\"\"A function which is used", "a single string to use as the background text colour for the strings", "values for each style and colour shades = {\"dim\": Style.DIM, \"bright\": Style.BRIGHT, \"normal\":", "= None init(autoreset=False, convert=convert, strip=strip) # Disable autoreset to colour the prompt correctly", "fore_cols: prompt_fore_col = fore_cols[prompt_fore_col] else: prompt_fore_col = Fore.WHITE if input_fore_col in fore_cols: input_fore_col", "This can be set to an empty string to change the colour of", "Arguments: text - The text to prompt the user for the desired input.", "text print(Style.RESET_ALL) # Reset for normal return return_text def testcolour(use_string=None): \"\"\"A function which", "light red, magenta, light Magenta, yellow, light yellow, green, light green, blue, light", "+ fore_col + back_col + text, end=end) def printcollist(list_to_print, fore_col=None, back_col=None, shade=None, end=None):", "printcol(use_string, \"red\", \"black\", \"bright\") printcol(use_string, \"magenta\", \"black\", \"dim\") printcol(use_string, \"magenta\", \"black\", \"normal\") printcol(use_string,", "is available add Style.UNDERLINED fore_cols = {\"red\": Fore.RED, \"light red\": Fore.LIGHTRED_EX, \"magenta\": Fore.MAGENTA,", "and still get some output, # This will be white text using the", "None: end = \"\\n\" # Convert the inputs into lowercase names to be", "a sting. Arguments: list_to_print - A iterable list of strings or numbers to", "shade on a white background, this is normal print for cmd, but may", "colour to use if prompt_fore_col in fore_cols: prompt_fore_col = fore_cols[prompt_fore_col] else: prompt_fore_col =", "to run printcol(\"Some text\") and still get some output, # This will be", "the colour to use. Default: normal, can be either of: dim, normal, bright", "end=ending) else: # The lists are not of all equal length so print", "Back.WHITE} # Check which shade of colour to use for the input prompt", "None: prompt_shade = \"normal\" if input_fore_col is None: input_fore_col = \"white\" if input_back_col", "in its colour for item, foreground, background, shade, ending in zip(list_to_print, fore_col, back_col,", "\"white\" if input_back_col is None: input_back_col = \"black\" if input_shade is None: input_shade", "\"\"\"A function which is used to test the colour printing of the shell", "= fore_col.lower() back_col = back_col.lower() shade = shade.lower() # Check if running from", "\"magenta\", \"black\", \"normal\") printcol(use_string, \"magenta\", \"black\", \"bright\") printcol(use_string, \"yellow\", \"black\", \"dim\") printcol(use_string, \"yellow\",", "\"cyan\": Back.CYAN, \"light cyan\": Back.LIGHTCYAN_EX, \"white\": Back.WHITE} # Check which shade of colour", "strip = False else: convert = None strip = None init(autoreset=True, convert=convert, strip=strip)", "Fore.BLACK} back_cols = {\"red\": Back.RED, \"light red\": Back.LIGHTRED_EX, \"magenta\": Back.MAGENTA, \"light magenta\": Back.LIGHTMAGENTA_EX,", "light green, blue, light blue, cyan, light cyan, black or white back_col -", "Fore.WHITE # Check the background colour to use if back_col in back_cols: back_col", "text, end=end) def printcollist(list_to_print, fore_col=None, back_col=None, shade=None, end=None): \"\"\"A Function which takes a", "input onto. Default: black, can be either of: red, light red, magenta, light", "inputs into lowercase names to be checked fore_col = fore_col.lower() back_col = back_col.lower()", "which shade of colour to use for the input prompt and the user", "back_cols: back_col = back_cols[back_col] else: back_col = Back.BLACK # Then print the text", "Fore, Back, Style import os def printcol(text, fore_col=None, back_col=None, shade=None, end=None): \"\"\"A function", "Default: 'Hello World'.\"\"\" if use_string is None: use_string = \"Hello World\" printcol(use_string, \"red\",", "in zip(list_to_print, fore_col, back_col, shade, end): # Print the item printcol(item, fore_col=foreground, back_col=background,", "still get some output, # This will be white text using the normal", "\"white\": Back.WHITE} # Check which shade of colour to use for the input", "newline character. This can be set to an empty string to change the", "green, blue, light blue, cyan, light cyan, black or white prompt_shade - The", "message in red. printcol(\"Please use lists of equal length.\") def inputcolour(text, prompt_fore_col=None, prompt_back_col=None,", "wrapper for the colorama module.\"\"\" \"\"\" Copyright 2019 - 2020 <NAME> Licensed under", "used to test the colour printing of the shell by printing a string", "green, light green, blue, light blue, cyan, light cyan, black or white prompt_back_col", "the colour printing of the shell by printing a string in different colours", "correct length before attempting the iteration if len(list_to_print) == len(fore_col) == len(back_col) ==", "else: input_fore_col = Fore.WHITE # Check each background colour to use if prompt_back_col", "an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "in the specified colour on the specified background. Arguments: text - The text", "runs correctly # Define values for each style and colour shades = {\"dim\":", "obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law", "printcol(use_string, \"magenta\", \"black\", \"normal\") printcol(use_string, \"magenta\", \"black\", \"bright\") printcol(use_string, \"yellow\", \"black\", \"dim\") printcol(use_string,", "green, blue, light blue, cyan, light cyan, black or white prompt_back_col - The", "# When underline is available add Style.UNDERLINED fore_cols = {\"red\": Fore.RED, \"light red\":", "end - A list of strings or a single string to use as", "function which is used to test the colour printing of the shell by", "Then print out each item as required in its colour for item, foreground,", "or white back_col - The colour to print the text onto. Default: black,", "end the print line with. By default this is the newline character. This", "entered by the user. Default: normal, can be either of: dim, normal, bright\"\"\"", "License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing,", "\"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "Default: normal, can be either of: dim, normal, bright input_fore_col - The colour", "colour to use for the input prompt. Default: normal, can be either of:", "+ input_shade + input_fore_col + input_back_col return_text = input(show_text) # Show the text", "shades[shade] else: shade = Style.NORMAL # Check the foreground colour to use if", "this is normal print for cmd, but may be # different for other", "convert=convert, strip=strip) # Make sure the next print statement runs correctly # Define", "string. Default Normal, options same as printcol end - A list of strings", "is normal print for cmd, but may be # different for other terminals.", "input_fore_col is None: input_fore_col = \"white\" if input_back_col is None: input_back_col = \"black\"", "= \"normal\" # Convert the inputs into lowercase names to be checked prompt_fore_col", "\"PYCHARM_HOSTED\" in os.environ if is_running_pycharm: convert = False strip = False else: convert", "light green, blue, light blue, cyan, light cyan, black or white input_back_col -", "prompt_back_col in back_cols: prompt_back_col = back_cols[prompt_back_col] else: prompt_back_col = Back.BLACK if input_back_col in", "\"black\", \"normal\") printcol(use_string, \"red\", \"black\", \"bright\") printcol(use_string, \"magenta\", \"black\", \"dim\") printcol(use_string, \"magenta\", \"black\",", "strings or a single string to use as the text colour for the", "prints the text in the specified colour on the specified background. Arguments: text", "fore_col=None, back_col=None, shade=None, end=None): \"\"\"A Function which takes a list and iterates through", "from colorama import init, Fore, Back, Style import os def printcol(text, fore_col=None, back_col=None,", "out in coloured text. The colours and shade to use can be provided", "# Convert the inputs into lowercase names to be checked fore_col = fore_col.lower()", "\"blue\": Fore.BLUE, \"light blue\": Fore.LIGHTBLUE_EX, \"cyan\": Fore.CYAN, \"light cyan\": Fore.LIGHTCYAN_EX, \"black\": Fore.BLACK} back_cols", "the user. Default: normal, can be either of: dim, normal, bright\"\"\" # Handle", "the specific language governing permissions and limitations under the License. \"\"\" from colorama", "input_shade in shades: input_shade = shades[input_shade] else: input_shade = Style.NORMAL # Check each", "the correct length before attempting the iteration if len(list_to_print) == len(fore_col) == len(back_col)", "+ Style.RESET_ALL # Force the text to string and add a space for", "other terminals. if fore_col is None: fore_col = \"white\" if back_col is None:", "white shade - The shade of the colour to use. Default: normal, can", "zip(list_to_print, fore_col, back_col, shade, end): # Print the item printcol(item, fore_col=foreground, back_col=background, shade=shade,", "colours same as printcol shade - A list of strings or a single", "of strings or a single string to use as the shade of the", "of the colour to use for the input prompt. Default: normal, can be", "= back_cols[prompt_back_col] else: prompt_back_col = Back.BLACK if input_back_col in back_cols: input_back_col = back_cols[input_back_col]", "each style and colour shades = {\"dim\": Style.DIM, \"bright\": Style.BRIGHT, \"normal\": Style.NORMAL} #", "options same as printcol end - A list of strings or a single", "different backgrounds. Arguments: use_string - The string to use for testing the console", "blue, cyan, light cyan, black or white prompt_shade - The shade of the", "Back.CYAN, \"light cyan\": Back.LIGHTCYAN_EX, \"white\": Back.WHITE} # Check which shade of colour to", "prompt_fore_col.lower() prompt_back_col = prompt_back_col.lower() prompt_shade = prompt_shade.lower() input_fore_col = input_fore_col.lower() input_back_col = input_back_col.lower()", "to be checked prompt_fore_col = prompt_fore_col.lower() prompt_back_col = prompt_back_col.lower() prompt_shade = prompt_shade.lower() input_fore_col", "\"magenta\", \"black\", \"dim\") printcol(use_string, \"magenta\", \"black\", \"normal\") printcol(use_string, \"magenta\", \"black\", \"bright\") printcol(use_string, \"yellow\",", "\"normal\": Style.NORMAL} # When underline is available add Style.UNDERLINED fore_cols = {\"red\": Fore.RED,", "input_back_col - The colour to print the user input onto. Default: black, can", "init, Fore, Back, Style import os def printcol(text, fore_col=None, back_col=None, shade=None, end=None): \"\"\"A", "colour of the text to print the user input in. Default: white, can", "fore_cols = {\"red\": Fore.RED, \"light red\": Fore.LIGHTRED_EX, \"magenta\": Fore.MAGENTA, \"light magenta\": Fore.LIGHTMAGENTA_EX, \"yellow\":", "= back_col.lower() shade = shade.lower() # Check if running from pycharm is_running_pycharm =", "\"normal\" if end is None: end = \"\\n\" # Check the lists are", "= str(text) + \" \" + Style.RESET_ALL # Force the text to string", "the inputs into lowercase names to be checked prompt_fore_col = prompt_fore_col.lower() prompt_back_col =", "in fore_cols: prompt_fore_col = fore_cols[prompt_fore_col] else: prompt_fore_col = Fore.WHITE if input_fore_col in fore_cols:", "the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in", "shade=None, end=None): \"\"\"A Function which takes a list and iterates through the list", "\"white\", \"black\", \"bright\") printcol(use_string, \"black\", \"white\", \"dim\") printcol(use_string, \"black\", \"white\", \"normal\") printcol(use_string, \"black\",", "are None and then set the defaults. if fore_col is None: fore_col =", "printing of the shell by printing a string in different colours onto different", "\"black\", \"dim\") printcol(use_string, \"cyan\", \"black\", \"normal\") printcol(use_string, \"cyan\", \"black\", \"bright\") printcol(use_string, \"blue\", \"black\",", "print to the screen in the required format. fore_col - The colour of", "input in. Default: white, can be either of: red, light red, magenta, light", "printcol(use_string, \"cyan\", \"black\", \"bright\") printcol(use_string, \"blue\", \"black\", \"dim\") printcol(use_string, \"blue\", \"black\", \"normal\") printcol(use_string,", "green\": Fore.LIGHTGREEN_EX, \"blue\": Fore.BLUE, \"light blue\": Fore.LIGHTBLUE_EX, \"cyan\": Fore.CYAN, \"light cyan\": Fore.LIGHTCYAN_EX, \"black\":", "\"cyan\", \"black\", \"dim\") printcol(use_string, \"cyan\", \"black\", \"normal\") printcol(use_string, \"cyan\", \"black\", \"bright\") printcol(use_string, \"blue\",", "input_back_col = \"black\" if input_shade is None: input_shade = \"normal\" # Convert the", "defined to be set to the default. E.G. It is possible to run", "if fore_col is None: fore_col = \"white\" if back_col is None: back_col =", "+ prompt_fore_col + prompt_back_col, end='') show_text = str(text) + \" \" + Style.RESET_ALL", "light cyan, black or white input_shade - The shade of the colour to", "a white background, this is normal print for cmd, but may be #", "Fore.YELLOW, \"light yellow\": Fore.LIGHTYELLOW_EX, \"green\": Fore.GREEN, \"light green\": Fore.LIGHTGREEN_EX, \"blue\": Fore.BLUE, \"light blue\":", "= Style.NORMAL # Check the foreground colour to use if fore_col in fore_cols:", "input_back_col in back_cols: input_back_col = back_cols[input_back_col] else: input_back_col = Back.BLACK print(prompt_shade + prompt_fore_col", "text being printed out. \"\"\" # Handle the keyword arguments so that they", "an error message in red. printcol(\"Please use lists of equal length.\") def inputcolour(text,", "Unless required by applicable law or agreed to in writing, software distributed under", "normal print for cmd, but may be # different for other terminals. if", "string in different colours onto different backgrounds. Arguments: use_string - The string to", "if back_col in back_cols: back_col = back_cols[back_col] else: back_col = Back.BLACK # Then", "\"bright\") printcol(use_string, \"white\", \"black\", \"dim\") printcol(use_string, \"white\", \"black\", \"normal\") printcol(use_string, \"white\", \"black\", \"bright\")", "back_col=None, shade=None, end=None): \"\"\"A Function which takes a list and iterates through the", "correctly in all colours. Default: 'Hello World'.\"\"\" if use_string is None: use_string =", "blue, cyan, light cyan, black or white shade - The shade of the", "are of the correct length before attempting the iteration if len(list_to_print) == len(fore_col)", "list_to_print - A iterable list of strings or numbers to print out. fore_col", "Style.RESET_ALL # Force the text to string and add a space for styling", "names to be checked prompt_fore_col = prompt_fore_col.lower() prompt_back_col = prompt_back_col.lower() prompt_shade = prompt_shade.lower()", "When underline is available add Style.UNDERLINED fore_cols = {\"red\": Fore.RED, \"light red\": Fore.LIGHTRED_EX,", "# Then print out each item as required in its colour for item,", "= {\"red\": Fore.RED, \"light red\": Fore.LIGHTRED_EX, \"magenta\": Fore.MAGENTA, \"light magenta\": Fore.LIGHTMAGENTA_EX, \"yellow\": Fore.YELLOW,", "text to print the user input in. Default: white, can be either of:", "the screen in the required format. fore_col - The colour of the text", "printcol(use_string, \"cyan\", \"black\", \"dim\") printcol(use_string, \"cyan\", \"black\", \"normal\") printcol(use_string, \"cyan\", \"black\", \"bright\") printcol(use_string,", "= Back.BLACK if input_back_col in back_cols: input_back_col = back_cols[input_back_col] else: input_back_col = Back.BLACK", "character to end the print line with. By default this is the newline", "on a white background, this is normal print for cmd, but may be", "prompt_fore_col + prompt_back_col, end='') show_text = str(text) + \" \" + Style.RESET_ALL #", "software distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT", "will be white text using the normal shade on a white background, this", "if end is None: end = \"\\n\" # Check the lists are of", "prompt_shade = \"normal\" if input_fore_col is None: input_fore_col = \"white\" if input_back_col is", "light cyan, black or white shade - The shade of the colour to", "for the string. Default Normal, options same as printcol end - A list", "cyan, light cyan, black or white input_back_col - The colour to print the", "colour for item, foreground, background, shade, ending in zip(list_to_print, fore_col, back_col, shade, end):", "\"\"\"Returns input from a coloured input prompt. Arguments: text - The text to", "shade=shade, end=ending) else: # The lists are not of all equal length so", "= \"normal\" if end is None: end = \"\\n\" # Convert the inputs", "checked fore_col = fore_col.lower() back_col = back_col.lower() shade = shade.lower() # Check if", "The shade of the colour to use for the input prompt. Default: normal,", "shade = shade.lower() # Check if running from pycharm is_running_pycharm = \"PYCHARM_HOSTED\" in", "\"black\", \"normal\") printcol(use_string, \"cyan\", \"black\", \"bright\") printcol(use_string, \"blue\", \"black\", \"dim\") printcol(use_string, \"blue\", \"black\",", "can be either of: dim, normal, bright input_fore_col - The colour of the", "cyan\": Fore.LIGHTCYAN_EX, \"black\": Fore.BLACK} back_cols = {\"red\": Back.RED, \"light red\": Back.LIGHTRED_EX, \"magenta\": Back.MAGENTA,", "defaults. if fore_col is None: fore_col = \"white\" if back_col is None: back_col", "prompt_fore_col = Fore.WHITE if input_fore_col in fore_cols: input_fore_col = fore_cols[input_fore_col] else: input_fore_col =", "- The string to use for testing the console prints text correctly in", "in writing, software distributed under the License is distributed on an \"AS IS\"", "else: back_col = Back.BLACK # Then print the text to the screen print(shade", "the user input in. Default: white, can be either of: red, light red,", "shade=None, end=None): \"\"\"A function which prints the text in the specified colour on", "or a single string to use as the shade of the text colour", "\"bright\") printcol(use_string, \"yellow\", \"black\", \"dim\") printcol(use_string, \"yellow\", \"black\", \"normal\") printcol(use_string, \"yellow\", \"black\", \"bright\")", "or agreed to in writing, software distributed under the License is distributed on", "+ input_back_col return_text = input(show_text) # Show the text print(Style.RESET_ALL) # Reset for", "# Convert the inputs into lowercase names to be checked prompt_fore_col = prompt_fore_col.lower()", "\"red\", \"black\", \"dim\") printcol(use_string, \"red\", \"black\", \"normal\") printcol(use_string, \"red\", \"black\", \"bright\") printcol(use_string, \"magenta\",", "the text to string and add a space for styling show_text + input_shade", "os def printcol(text, fore_col=None, back_col=None, shade=None, end=None): \"\"\"A function which prints the text", "\"light yellow\": Fore.LIGHTYELLOW_EX, \"green\": Fore.GREEN, \"light green\": Fore.LIGHTGREEN_EX, \"blue\": Fore.BLUE, \"light blue\": Fore.LIGHTBLUE_EX,", "\"bright\") printcol(use_string, \"blue\", \"black\", \"dim\") printcol(use_string, \"blue\", \"black\", \"normal\") printcol(use_string, \"blue\", \"black\", \"bright\")", "print(shade + fore_col + back_col + text, end=end) def printcollist(list_to_print, fore_col=None, back_col=None, shade=None,", "Default: white, can be either of: red, light red, magenta, light Magenta, yellow,", "printcol(use_string, \"white\", \"black\", \"normal\") printcol(use_string, \"white\", \"black\", \"bright\") printcol(use_string, \"black\", \"white\", \"dim\") printcol(use_string,", "len(end): # Then print out each item as required in its colour for", "are not of all equal length so print an error message in red.", "green, light green, blue, light blue, cyan, light cyan, black or white input_back_col", "'Hello World'.\"\"\" if use_string is None: use_string = \"Hello World\" printcol(use_string, \"red\", \"black\",", "Show the text print(Style.RESET_ALL) # Reset for normal return return_text def testcolour(use_string=None): \"\"\"A", "else: convert = None strip = None init(autoreset=True, convert=convert, strip=strip) # Make sure", "back_col - The colour to print the text onto. Default: black, can be", "light cyan, black or white input_back_col - The colour to print the user", "and colour shades = {\"dim\": Style.DIM, \"bright\": Style.BRIGHT, \"normal\": Style.NORMAL} # When underline", "length.\") def inputcolour(text, prompt_fore_col=None, prompt_back_col=None, prompt_shade=None, input_fore_col=None, input_back_col=None, input_shade=None): \"\"\"Returns input from a", "printcol(use_string, \"magenta\", \"black\", \"dim\") printcol(use_string, \"magenta\", \"black\", \"normal\") printcol(use_string, \"magenta\", \"black\", \"bright\") printcol(use_string,", "back_col is None: back_col = \"black\" if shade is None: shade = \"normal\"", "printed. Default Newline, this list must be passed for the system to work", "the keyword arguments so that they still work correctly when the terminal is", "can be either of: dim, normal, bright end - What character to end", "fore_col = fore_cols[fore_col] else: fore_col = Fore.WHITE # Check the background colour to", "= None strip = None init(autoreset=False, convert=convert, strip=strip) # Disable autoreset to colour", "be either of: dim, normal, bright\"\"\" # Handle None keywords if prompt_fore_col is", "\"blue\", \"black\", \"normal\") printcol(use_string, \"blue\", \"black\", \"bright\") printcol(use_string, \"white\", \"black\", \"dim\") printcol(use_string, \"white\",", "background colour to use if back_col in back_cols: back_col = back_cols[back_col] else: back_col", "prints it out in coloured text. The colours and shade to use can", "if prompt_shade in shades: prompt_shade = shades[prompt_shade] else: prompt_shade = Style.NORMAL if input_shade", "red\": Back.LIGHTRED_EX, \"magenta\": Back.MAGENTA, \"light magenta\": Back.LIGHTMAGENTA_EX, \"yellow\": Back.YELLOW, \"light yellow\": Back.LIGHTYELLOW_EX, \"green\":", "printcol(use_string, \"yellow\", \"black\", \"dim\") printcol(use_string, \"yellow\", \"black\", \"normal\") printcol(use_string, \"yellow\", \"black\", \"bright\") printcol(use_string,", "\"black\", \"dim\") printcol(use_string, \"blue\", \"black\", \"normal\") printcol(use_string, \"blue\", \"black\", \"bright\") printcol(use_string, \"white\", \"black\",", "The shade of the colour to use for the text entered by the", "The colours and shade to use can be provided as a list or", "of colour to use for the input prompt and the user input. if", "same as printcol back_col - A list of strings or a single string", "Back.LIGHTRED_EX, \"magenta\": Back.MAGENTA, \"light magenta\": Back.LIGHTMAGENTA_EX, \"yellow\": Back.YELLOW, \"light yellow\": Back.LIGHTYELLOW_EX, \"green\": Back.GREEN,", "None: shade = \"normal\" if end is None: end = \"\\n\" # Check", "Check the shade of colour to use if shade in shades: shade =", "Style.NORMAL # Check each foreground colour to use if prompt_fore_col in fore_cols: prompt_fore_col", "item printcol(item, fore_col=foreground, back_col=background, shade=shade, end=ending) else: # The lists are not of", "Back.LIGHTYELLOW_EX, \"green\": Back.GREEN, \"light green\": Back.LIGHTGREEN_EX, \"blue\": Back.BLUE, \"light blue\": Back.LIGHTBLUE_EX, \"cyan\": Back.CYAN,", "# Handle the keyword arguments so that they still work correctly when the", "input_shade = \"normal\" # Convert the inputs into lowercase names to be checked", "False strip = False else: convert = None strip = None init(autoreset=False, convert=convert,", "which prints the text in the specified colour on the specified background. Arguments:", "the list and prints it out in coloured text. The colours and shade", "\"cyan\": Fore.CYAN, \"light cyan\": Fore.LIGHTCYAN_EX, \"black\": Fore.BLACK} back_cols = {\"red\": Back.RED, \"light red\":", "to the screen print(shade + fore_col + back_col + text, end=end) def printcollist(list_to_print,", "text to the screen print(shade + fore_col + back_col + text, end=end) def", "light blue, cyan, light cyan, black or white input_shade - The shade of", "this file except in compliance with the License. You may obtain a copy", "you may not use this file except in compliance with the License. You", "The string to use for testing the console prints text correctly in all", "\"bright\") printcol(use_string, \"magenta\", \"black\", \"dim\") printcol(use_string, \"magenta\", \"black\", \"normal\") printcol(use_string, \"magenta\", \"black\", \"bright\")", "the user input onto. Default: black, can be either of: red, light red,", "\"red\", \"black\", \"normal\") printcol(use_string, \"red\", \"black\", \"bright\") printcol(use_string, \"magenta\", \"black\", \"dim\") printcol(use_string, \"magenta\",", "colour to print the text onto. Default: black, can be either of: red,", "character. This can be set to an empty string to change the colour", "end=end) def printcollist(list_to_print, fore_col=None, back_col=None, shade=None, end=None): \"\"\"A Function which takes a list", "may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable", "fore_col, back_col, shade, end): # Print the item printcol(item, fore_col=foreground, back_col=background, shade=shade, end=ending)", "printcol(use_string, \"red\", \"black\", \"normal\") printcol(use_string, \"red\", \"black\", \"bright\") printcol(use_string, \"magenta\", \"black\", \"dim\") printcol(use_string,", "add Style.UNDERLINED fore_cols = {\"red\": Fore.RED, \"light red\": Fore.LIGHTRED_EX, \"magenta\": Fore.MAGENTA, \"light magenta\":", "can be set to an empty string to change the colour of the", "\"\\n\" # Check the lists are of the correct length before attempting the", "specified colour on the specified background. Arguments: text - The text to print", "printcol(use_string, \"green\", \"black\", \"bright\") printcol(use_string, \"cyan\", \"black\", \"dim\") printcol(use_string, \"cyan\", \"black\", \"normal\") printcol(use_string,", "red. printcol(\"Please use lists of equal length.\") def inputcolour(text, prompt_fore_col=None, prompt_back_col=None, prompt_shade=None, input_fore_col=None,", "# Handle None keywords if prompt_fore_col is None: prompt_fore_col = \"white\" if prompt_back_col", "in shades: prompt_shade = shades[prompt_shade] else: prompt_shade = Style.NORMAL if input_shade in shades:", "and iterates through the list and prints it out in coloured text. The", "shades: shade = shades[shade] else: shade = Style.NORMAL # Check the foreground colour", "possible to run printcol(\"Some text\") and still get some output, # This will", "shade - The shade of the colour to use. Default: normal, can be", "Back.BLACK if input_back_col in back_cols: input_back_col = back_cols[input_back_col] else: input_back_col = Back.BLACK print(prompt_shade", "shade is None: shade = \"normal\" if end is None: end = \"\\n\"", "background, this is normal print for cmd, but may be # different for", "input_shade is None: input_shade = \"normal\" # Convert the inputs into lowercase names", "background text colour for the strings being printed. Default Black, colours same as", "a single string to use as the text colour for the strings being", "the strings being printed. Default White, colours same as printcol back_col - A", "cyan, light cyan, black or white prompt_back_col - The colour to print the", "is None: prompt_shade = \"normal\" if input_fore_col is None: input_fore_col = \"white\" if", "# Print the item printcol(item, fore_col=foreground, back_col=background, shade=shade, end=ending) else: # The lists", "\"normal\") printcol(use_string, \"magenta\", \"black\", \"bright\") printcol(use_string, \"yellow\", \"black\", \"dim\") printcol(use_string, \"yellow\", \"black\", \"normal\")", "= \"normal\" if end is None: end = \"\\n\" # Check the lists", "shade, end): # Print the item printcol(item, fore_col=foreground, back_col=background, shade=shade, end=ending) else: #", "Default White, colours same as printcol back_col - A list of strings or", "shade of the colour to use for the text entered by the user.", "\"yellow\", \"black\", \"bright\") printcol(use_string, \"green\", \"black\", \"dim\") printcol(use_string, \"green\", \"black\", \"normal\") printcol(use_string, \"green\",", "a space for styling show_text + input_shade + input_fore_col + input_back_col return_text =", "can be provided as a list or as a sting. Arguments: list_to_print -", "os.environ if is_running_pycharm: convert = False strip = False else: convert = None", "Back, Style import os def printcol(text, fore_col=None, back_col=None, shade=None, end=None): \"\"\"A function which", "bright\"\"\" # Handle None keywords if prompt_fore_col is None: prompt_fore_col = \"white\" if", "if input_shade in shades: input_shade = shades[input_shade] else: input_shade = Style.NORMAL # Check", "the print line with. By default this is the newline character. This can", "black or white prompt_shade - The shade of the colour to use for", "this is the newline character. This can be set to an empty string", "strip = None init(autoreset=False, convert=convert, strip=strip) # Disable autoreset to colour the prompt", "prompt_shade = Style.NORMAL if input_shade in shades: input_shade = shades[input_shade] else: input_shade =", "if shade in shades: shade = shades[shade] else: shade = Style.NORMAL # Check", "is None: input_shade = \"normal\" # Convert the inputs into lowercase names to", "fore_col - A list of strings or a single string to use as", "\"light red\": Fore.LIGHTRED_EX, \"magenta\": Fore.MAGENTA, \"light magenta\": Fore.LIGHTMAGENTA_EX, \"yellow\": Fore.YELLOW, \"light yellow\": Fore.LIGHTYELLOW_EX,", "or a single string to use as the background text colour for the", "running from pycharm is_running_pycharm = \"PYCHARM_HOSTED\" in os.environ if is_running_pycharm: convert = False", "text colour for the strings being printed. Default Black, colours same as printcol", "prompt correctly # Define values for each style and colour shades = {\"dim\":", "of the shell by printing a string in different colours onto different backgrounds.", "None: shade = \"normal\" if end is None: end = \"\\n\" # Convert", "black or white shade - The shade of the colour to use. Default:", "# Check the keyword arguments are None and then set the defaults. if", "= back_cols[back_col] else: back_col = Back.BLACK # Then print the text to the", "the text print(Style.RESET_ALL) # Reset for normal return return_text def testcolour(use_string=None): \"\"\"A function", "red, magenta, light magenta, yellow, light yellow, green, light green, blue, light blue,", "of the text being printed out. \"\"\" # Handle the keyword arguments so", "len(list_to_print) == len(fore_col) == len(back_col) == len(shade) == len(end): # Then print out", "black, can be either of: red, light red, magenta, light magenta, yellow, light", "== len(back_col) == len(shade) == len(end): # Then print out each item as", "file except in compliance with the License. You may obtain a copy of", "be checked prompt_fore_col = prompt_fore_col.lower() prompt_back_col = prompt_back_col.lower() prompt_shade = prompt_shade.lower() input_fore_col =", "input_back_col = back_cols[input_back_col] else: input_back_col = Back.BLACK print(prompt_shade + prompt_fore_col + prompt_back_col, end='')", "for the text entered by the user. Default: normal, can be either of:", "\"bright\") printcol(use_string, \"cyan\", \"black\", \"dim\") printcol(use_string, \"cyan\", \"black\", \"normal\") printcol(use_string, \"cyan\", \"black\", \"bright\")", "to be set to the default. E.G. It is possible to run printcol(\"Some", "that they still work correctly when the terminal is used, this allows any", "strings or a single string to use as the background text colour for", "correctly # Define values for each style and colour shades = {\"dim\": Style.DIM,", "for styling show_text + input_shade + input_fore_col + input_back_col return_text = input(show_text) #", "Default: normal, can be either of: dim, normal, bright\"\"\" # Handle None keywords", "blue, light blue, cyan, light cyan, black or white input_shade - The shade", "or white input_back_col - The colour to print the user input onto. Default:", "as printcol end - A list of strings or a single string to", "separator between the strings being printed. Default Newline, this list must be passed", "use for the text entered by the user. Default: normal, can be either", "= Fore.WHITE if input_fore_col in fore_cols: input_fore_col = fore_cols[input_fore_col] else: input_fore_col = Fore.WHITE", "+ back_col + text, end=end) def printcollist(list_to_print, fore_col=None, back_col=None, shade=None, end=None): \"\"\"A Function", "= \"white\" if back_col is None: back_col = \"black\" if shade is None:", "the text onto. Default: black, can be either of: red, light red, magenta,", "\"\"\"A wrapper for the colorama module.\"\"\" \"\"\" Copyright 2019 - 2020 <NAME> Licensed", "be either of: dim, normal, bright input_fore_col - The colour of the text", "iteration if len(list_to_print) == len(fore_col) == len(back_col) == len(shade) == len(end): # Then", "backgrounds. Arguments: use_string - The string to use for testing the console prints", "= None init(autoreset=True, convert=convert, strip=strip) # Make sure the next print statement runs", "colour for the strings being printed. Default White, colours same as printcol back_col", "\"\"\" from colorama import init, Fore, Back, Style import os def printcol(text, fore_col=None,", "A list of strings or a single string to use as the text", "takes a list and iterates through the list and prints it out in", "strings being printed. Default Newline, this list must be passed for the system", "a string in different colours onto different backgrounds. Arguments: use_string - The string", "# Show the text print(Style.RESET_ALL) # Reset for normal return return_text def testcolour(use_string=None):", "list must be passed for the system to work correctly \"\"\" # Check", "can be either of: red, light red, magenta, light Magenta, yellow, light yellow,", "so that they still work correctly when the terminal is used, this allows", "Default: black, can be either of: red, light red, magenta, light magenta, yellow,", "statement runs correctly # Define values for each style and colour shades =", "<NAME> Licensed under the Apache License, Version 2.0 (the \"License\"); you may not", "out. \"\"\" # Handle the keyword arguments so that they still work correctly", "coloured input prompt. Arguments: text - The text to prompt the user for", "False else: convert = None strip = None init(autoreset=False, convert=convert, strip=strip) # Disable", "law or agreed to in writing, software distributed under the License is distributed", "\"normal\") printcol(use_string, \"yellow\", \"black\", \"bright\") printcol(use_string, \"green\", \"black\", \"dim\") printcol(use_string, \"green\", \"black\", \"normal\")", "if shade is None: shade = \"normal\" if end is None: end =", "red, magenta, light Magenta, yellow, light yellow, green, light green, blue, light blue,", "- The shade of the colour to use. Default: normal, can be either", "Version 2.0 (the \"License\"); you may not use this file except in compliance", "\"dim\") printcol(use_string, \"blue\", \"black\", \"normal\") printcol(use_string, \"blue\", \"black\", \"bright\") printcol(use_string, \"white\", \"black\", \"dim\")", "the iteration if len(list_to_print) == len(fore_col) == len(back_col) == len(shade) == len(end): #", "input_shade = input_shade.lower() # Check if running from pycharm is_running_pycharm = \"PYCHARM_HOSTED\" in", "all equal length so print an error message in red. printcol(\"Please use lists", "\"normal\" # Convert the inputs into lowercase names to be checked prompt_fore_col =", "Back.YELLOW, \"light yellow\": Back.LIGHTYELLOW_EX, \"green\": Back.GREEN, \"light green\": Back.LIGHTGREEN_EX, \"blue\": Back.BLUE, \"light blue\":", "\"black\", \"bright\") printcol(use_string, \"magenta\", \"black\", \"dim\") printcol(use_string, \"magenta\", \"black\", \"normal\") printcol(use_string, \"magenta\", \"black\",", "input_shade - The shade of the colour to use for the text entered", "under the Apache License, Version 2.0 (the \"License\"); you may not use this", "for the specific language governing permissions and limitations under the License. \"\"\" from", "print statement runs correctly # Define values for each style and colour shades", "required in its colour for item, foreground, background, shade, ending in zip(list_to_print, fore_col,", "normal, bright\"\"\" # Handle None keywords if prompt_fore_col is None: prompt_fore_col = \"white\"", "for each style and colour shades = {\"dim\": Style.DIM, \"bright\": Style.BRIGHT, \"normal\": Style.NORMAL}", "\"normal\" if end is None: end = \"\\n\" # Convert the inputs into", "return_text = input(show_text) # Show the text print(Style.RESET_ALL) # Reset for normal return", "The lists are not of all equal length so print an error message", "printcol(item, fore_col=foreground, back_col=background, shade=shade, end=ending) else: # The lists are not of all", "work correctly when the terminal is used, this allows any not # defined", "to print the user input in. Default: white, can be either of: red,", "or implied. See the License for the specific language governing permissions and limitations", "init(autoreset=True, convert=convert, strip=strip) # Make sure the next print statement runs correctly #", "if prompt_back_col is None: prompt_back_col = \"black\" if prompt_shade is None: prompt_shade =", "white prompt_shade - The shade of the colour to use for the input", "to an empty string to change the colour of the text being printed", "= {\"dim\": Style.DIM, \"bright\": Style.BRIGHT, \"normal\": Style.NORMAL} # When underline is available add", "is None: prompt_back_col = \"black\" if prompt_shade is None: prompt_shade = \"normal\" if", "the shade of the text colour for the string. Default Normal, options same", "shade.lower() # Check if running from pycharm is_running_pycharm = \"PYCHARM_HOSTED\" in os.environ if", "default. E.G. It is possible to run printcol(\"Some text\") and still get some", "else: convert = None strip = None init(autoreset=False, convert=convert, strip=strip) # Disable autoreset", "print out each item as required in its colour for item, foreground, background,", "CONDITIONS OF ANY KIND, either express or implied. See the License for the", "= \"normal\" if input_fore_col is None: input_fore_col = \"white\" if input_back_col is None:", "green, blue, light blue, cyan, light cyan, black or white back_col - The", "through the list and prints it out in coloured text. The colours and", "to work correctly \"\"\" # Check the keyword arguments are None and then", "back_col = Back.BLACK # Then print the text to the screen print(shade +", "red\": Fore.LIGHTRED_EX, \"magenta\": Fore.MAGENTA, \"light magenta\": Fore.LIGHTMAGENTA_EX, \"yellow\": Fore.YELLOW, \"light yellow\": Fore.LIGHTYELLOW_EX, \"green\":", "of: red, light red, magenta, light magenta, yellow, light yellow, green, light green,", "text - The text to prompt the user for the desired input. prompt_fore_col", "normal, can be either of: dim, normal, bright input_fore_col - The colour of", "except in compliance with the License. You may obtain a copy of the", "\"red\", \"black\", \"bright\") printcol(use_string, \"magenta\", \"black\", \"dim\") printcol(use_string, \"magenta\", \"black\", \"normal\") printcol(use_string, \"magenta\",", "= \"Hello World\" printcol(use_string, \"red\", \"black\", \"dim\") printcol(use_string, \"red\", \"black\", \"normal\") printcol(use_string, \"red\",", "- 2020 <NAME> Licensed under the Apache License, Version 2.0 (the \"License\"); you", "= \"black\" if shade is None: shade = \"normal\" if end is None:", "for the desired input. prompt_fore_col - The colour of the text to print", "print line with. By default this is the newline character. This can be", "end = \"\\n\" # Convert the inputs into lowercase names to be checked", "return return_text def testcolour(use_string=None): \"\"\"A function which is used to test the colour", "Define values for each style and colour shades = {\"dim\": Style.DIM, \"bright\": Style.BRIGHT,", "onto. Default: black, can be either of: red, light red, magenta, light magenta,", "prompt text onto. Default: black, can be either of: red, light red, magenta,", "be either of: dim, normal, bright end - What character to end the", "shade of colour to use for the input prompt and the user input.", "if fore_col in fore_cols: fore_col = fore_cols[fore_col] else: fore_col = Fore.WHITE # Check", "# Check each background colour to use if prompt_back_col in back_cols: prompt_back_col =", "for the input prompt and the user input. if prompt_shade in shades: prompt_shade", "\"normal\") printcol(use_string, \"green\", \"black\", \"bright\") printcol(use_string, \"cyan\", \"black\", \"dim\") printcol(use_string, \"cyan\", \"black\", \"normal\")", "of the text to print the text in. Default: white, can be either", "printcol(\"Some text\") and still get some output, # This will be white text", "required format. fore_col - The colour of the text to print the text", "white, can be either of: red, light red, magenta, light Magenta, yellow, light", "\"normal\") printcol(use_string, \"blue\", \"black\", \"bright\") printcol(use_string, \"white\", \"black\", \"dim\") printcol(use_string, \"white\", \"black\", \"normal\")", "World'.\"\"\" if use_string is None: use_string = \"Hello World\" printcol(use_string, \"red\", \"black\", \"dim\")", "express or implied. See the License for the specific language governing permissions and", "of: dim, normal, bright\"\"\" # Handle None keywords if prompt_fore_col is None: prompt_fore_col", "fore_cols[input_fore_col] else: input_fore_col = Fore.WHITE # Check each background colour to use if", "is None: back_col = \"black\" if shade is None: shade = \"normal\" if", "list of strings or a single string to use as the background text", "inputcolour(text, prompt_fore_col=None, prompt_back_col=None, prompt_shade=None, input_fore_col=None, input_back_col=None, input_shade=None): \"\"\"Returns input from a coloured input", "Check the lists are of the correct length before attempting the iteration if", "back_col in back_cols: back_col = back_cols[back_col] else: back_col = Back.BLACK # Then print", "fore_col.lower() back_col = back_col.lower() shade = shade.lower() # Check if running from pycharm", "must be passed for the system to work correctly \"\"\" # Check the", "allows any not # defined to be set to the default. E.G. It", "= fore_cols[fore_col] else: fore_col = Fore.WHITE # Check the background colour to use", "use_string = \"Hello World\" printcol(use_string, \"red\", \"black\", \"dim\") printcol(use_string, \"red\", \"black\", \"normal\") printcol(use_string,", "None: input_shade = \"normal\" # Convert the inputs into lowercase names to be", "\"light blue\": Back.LIGHTBLUE_EX, \"cyan\": Back.CYAN, \"light cyan\": Back.LIGHTCYAN_EX, \"white\": Back.WHITE} # Check which", "input_back_col=None, input_shade=None): \"\"\"Returns input from a coloured input prompt. Arguments: text - The", "This will be white text using the normal shade on a white background,", "= \"\\n\" # Check the lists are of the correct length before attempting", "each item as required in its colour for item, foreground, background, shade, ending", "the text colour for the string. Default Normal, options same as printcol end", "else: input_back_col = Back.BLACK print(prompt_shade + prompt_fore_col + prompt_back_col, end='') show_text = str(text)", "def printcollist(list_to_print, fore_col=None, back_col=None, shade=None, end=None): \"\"\"A Function which takes a list and", "use_string - The string to use for testing the console prints text correctly", "\"blue\", \"black\", \"dim\") printcol(use_string, \"blue\", \"black\", \"normal\") printcol(use_string, \"blue\", \"black\", \"bright\") printcol(use_string, \"white\",", "colour of the text to print the text in. Default: white, can be", "BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See", "printed out. \"\"\" # Handle the keyword arguments so that they still work", "prompt_back_col = \"black\" if prompt_shade is None: prompt_shade = \"normal\" if input_fore_col is", "colours. Default: 'Hello World'.\"\"\" if use_string is None: use_string = \"Hello World\" printcol(use_string,", "when the terminal is used, this allows any not # defined to be", "user input in. Default: white, can be either of: red, light red, magenta,", "input_fore_col = \"white\" if input_back_col is None: input_back_col = \"black\" if input_shade is", "- What character to end the print line with. By default this is", "colours and shade to use can be provided as a list or as", "by the user. Default: normal, can be either of: dim, normal, bright\"\"\" #", "the License. \"\"\" from colorama import init, Fore, Back, Style import os def", "input_fore_col = fore_cols[input_fore_col] else: input_fore_col = Fore.WHITE # Check each background colour to", "\"\"\"A function which prints the text in the specified colour on the specified", "between the strings being printed. Default Newline, this list must be passed for", "Back.BLUE, \"light blue\": Back.LIGHTBLUE_EX, \"cyan\": Back.CYAN, \"light cyan\": Back.LIGHTCYAN_EX, \"white\": Back.WHITE} # Check", "\"white\", \"black\", \"normal\") printcol(use_string, \"white\", \"black\", \"bright\") printcol(use_string, \"black\", \"white\", \"dim\") printcol(use_string, \"black\",", "\"normal\") printcol(use_string, \"red\", \"black\", \"bright\") printcol(use_string, \"magenta\", \"black\", \"dim\") printcol(use_string, \"magenta\", \"black\", \"normal\")", "\"yellow\", \"black\", \"dim\") printcol(use_string, \"yellow\", \"black\", \"normal\") printcol(use_string, \"yellow\", \"black\", \"bright\") printcol(use_string, \"green\",", "prints text correctly in all colours. Default: 'Hello World'.\"\"\" if use_string is None:", "prompt_back_col is None: prompt_back_col = \"black\" if prompt_shade is None: prompt_shade = \"normal\"", "show_text + input_shade + input_fore_col + input_back_col return_text = input(show_text) # Show the", "Normal, options same as printcol end - A list of strings or a", "the lists are of the correct length before attempting the iteration if len(list_to_print)", "shade in shades: shade = shades[shade] else: shade = Style.NORMAL # Check the", "use for testing the console prints text correctly in all colours. Default: 'Hello", "use can be provided as a list or as a sting. Arguments: list_to_print", "prompt_shade in shades: prompt_shade = shades[prompt_shade] else: prompt_shade = Style.NORMAL if input_shade in", "different for other terminals. if fore_col is None: fore_col = \"white\" if back_col", "input_back_col is None: input_back_col = \"black\" if input_shade is None: input_shade = \"normal\"", "\"\\n\" # Convert the inputs into lowercase names to be checked fore_col =", "the desired input. prompt_fore_col - The colour of the text to print the", "item, foreground, background, shade, ending in zip(list_to_print, fore_col, back_col, shade, end): # Print", "the text in the specified colour on the specified background. Arguments: text -", "into lowercase names to be checked prompt_fore_col = prompt_fore_col.lower() prompt_back_col = prompt_back_col.lower() prompt_shade", "printcol(use_string, \"white\", \"black\", \"dim\") printcol(use_string, \"white\", \"black\", \"normal\") printcol(use_string, \"white\", \"black\", \"bright\") printcol(use_string,", "empty string to change the colour of the text being printed out. \"\"\"", "\"light green\": Back.LIGHTGREEN_EX, \"blue\": Back.BLUE, \"light blue\": Back.LIGHTBLUE_EX, \"cyan\": Back.CYAN, \"light cyan\": Back.LIGHTCYAN_EX,", "in os.environ if is_running_pycharm: convert = False strip = False else: convert =", "Style.DIM, \"bright\": Style.BRIGHT, \"normal\": Style.NORMAL} # When underline is available add Style.UNDERLINED fore_cols", "Check each background colour to use if prompt_back_col in back_cols: prompt_back_col = back_cols[prompt_back_col]", "\"black\", \"normal\") printcol(use_string, \"magenta\", \"black\", \"bright\") printcol(use_string, \"yellow\", \"black\", \"dim\") printcol(use_string, \"yellow\", \"black\",", "printcol(text, fore_col=None, back_col=None, shade=None, end=None): \"\"\"A function which prints the text in the", "which is used to test the colour printing of the shell by printing", "Back.LIGHTCYAN_EX, \"white\": Back.WHITE} # Check the shade of colour to use if shade", "is None: end = \"\\n\" # Check the lists are of the correct", "names to be checked fore_col = fore_col.lower() back_col = back_col.lower() shade = shade.lower()", "print the user input onto. Default: black, can be either of: red, light", "green, blue, light blue, cyan, light cyan, black or white input_back_col - The", "\" \" + Style.RESET_ALL # Force the text to string and add a", "lowercase names to be checked fore_col = fore_col.lower() back_col = back_col.lower() shade =", "string to use for testing the console prints text correctly in all colours.", "to use. Default: normal, can be either of: dim, normal, bright end -", "# The lists are not of all equal length so print an error", "= fore_cols[input_fore_col] else: input_fore_col = Fore.WHITE # Check each background colour to use", "testcolour(use_string=None): \"\"\"A function which is used to test the colour printing of the", "\"black\", \"dim\") printcol(use_string, \"white\", \"black\", \"normal\") printcol(use_string, \"white\", \"black\", \"bright\") printcol(use_string, \"black\", \"white\",", "to use if shade in shades: shade = shades[shade] else: shade = Style.NORMAL", "of the text to print the prompt text in. Default: white, can be", "use if prompt_fore_col in fore_cols: prompt_fore_col = fore_cols[prompt_fore_col] else: prompt_fore_col = Fore.WHITE if", "length so print an error message in red. printcol(\"Please use lists of equal", "Back.WHITE} # Check the shade of colour to use if shade in shades:", "back_col + text, end=end) def printcollist(list_to_print, fore_col=None, back_col=None, shade=None, end=None): \"\"\"A Function which", "+ text, end=end) def printcollist(list_to_print, fore_col=None, back_col=None, shade=None, end=None): \"\"\"A Function which takes", "out. fore_col - A list of strings or a single string to use", "colours same as printcol back_col - A list of strings or a single", "\"normal\") printcol(use_string, \"cyan\", \"black\", \"bright\") printcol(use_string, \"blue\", \"black\", \"dim\") printcol(use_string, \"blue\", \"black\", \"normal\")", "under the License. \"\"\" from colorama import init, Fore, Back, Style import os", "Magenta, yellow, light yellow, green, light green, blue, light blue, cyan, light cyan,", "printcol(\"Please use lists of equal length.\") def inputcolour(text, prompt_fore_col=None, prompt_back_col=None, prompt_shade=None, input_fore_col=None, input_back_col=None,", "to use for the text entered by the user. Default: normal, can be", "dim, normal, bright end - What character to end the print line with.", "module.\"\"\" \"\"\" Copyright 2019 - 2020 <NAME> Licensed under the Apache License, Version", "is None: end = \"\\n\" # Convert the inputs into lowercase names to", "either of: dim, normal, bright\"\"\" # Handle None keywords if prompt_fore_col is None:", "is_running_pycharm = \"PYCHARM_HOSTED\" in os.environ if is_running_pycharm: convert = False strip = False", "shade to use can be provided as a list or as a sting.", "to use if prompt_back_col in back_cols: prompt_back_col = back_cols[prompt_back_col] else: prompt_back_col = Back.BLACK", "background colour to use if prompt_back_col in back_cols: prompt_back_col = back_cols[prompt_back_col] else: prompt_back_col", "\"black\", \"bright\") printcol(use_string, \"green\", \"black\", \"dim\") printcol(use_string, \"green\", \"black\", \"normal\") printcol(use_string, \"green\", \"black\",", "for normal return return_text def testcolour(use_string=None): \"\"\"A function which is used to test", "to end the print line with. By default this is the newline character.", "IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "the required format. fore_col - The colour of the text to print the", "in compliance with the License. You may obtain a copy of the License", "KIND, either express or implied. See the License for the specific language governing", "use if prompt_back_col in back_cols: prompt_back_col = back_cols[prompt_back_col] else: prompt_back_col = Back.BLACK if", "# Check if running from pycharm is_running_pycharm = \"PYCHARM_HOSTED\" in os.environ if is_running_pycharm:", "either of: red, light red, magenta, light Magenta, yellow, light yellow, green, light", "dim, normal, bright input_fore_col - The colour of the text to print the", "prompt_back_col - The colour to print the prompt text onto. Default: black, can", "= False strip = False else: convert = None strip = None init(autoreset=True,", "A list of strings or a single string to use as the background", "writing, software distributed under the License is distributed on an \"AS IS\" BASIS,", "input_shade.lower() # Check if running from pycharm is_running_pycharm = \"PYCHARM_HOSTED\" in os.environ if", "background. Arguments: text - The text to print to the screen in the", "input. if prompt_shade in shades: prompt_shade = shades[prompt_shade] else: prompt_shade = Style.NORMAL if", "\"light cyan\": Back.LIGHTCYAN_EX, \"white\": Back.WHITE} # Check the shade of colour to use", "prompt_back_col.lower() prompt_shade = prompt_shade.lower() input_fore_col = input_fore_col.lower() input_back_col = input_back_col.lower() input_shade = input_shade.lower()", "License. \"\"\" from colorama import init, Fore, Back, Style import os def printcol(text,", "light magenta, yellow, light yellow, green, light green, blue, light blue, cyan, light", "and the user input. if prompt_shade in shades: prompt_shade = shades[prompt_shade] else: prompt_shade", "inputs into lowercase names to be checked prompt_fore_col = prompt_fore_col.lower() prompt_back_col = prompt_back_col.lower()", "Fore.MAGENTA, \"light magenta\": Fore.LIGHTMAGENTA_EX, \"yellow\": Fore.YELLOW, \"light yellow\": Fore.LIGHTYELLOW_EX, \"green\": Fore.GREEN, \"light green\":", "colour to use if fore_col in fore_cols: fore_col = fore_cols[fore_col] else: fore_col =", "any not # defined to be set to the default. E.G. It is", "else: prompt_back_col = Back.BLACK if input_back_col in back_cols: input_back_col = back_cols[input_back_col] else: input_back_col", "of strings or a single string to use as the text colour for", "back_cols: input_back_col = back_cols[input_back_col] else: input_back_col = Back.BLACK print(prompt_shade + prompt_fore_col + prompt_back_col,", "in back_cols: input_back_col = back_cols[input_back_col] else: input_back_col = Back.BLACK print(prompt_shade + prompt_fore_col +", "convert = None strip = None init(autoreset=True, convert=convert, strip=strip) # Make sure the", "shade of the colour to use. Default: normal, can be either of: dim,", "a single string to use as the separator between the strings being printed.", "bright end - What character to end the print line with. By default", "prompt_back_col, end='') show_text = str(text) + \" \" + Style.RESET_ALL # Force the", "OF ANY KIND, either express or implied. See the License for the specific", "keyword arguments so that they still work correctly when the terminal is used,", "- A iterable list of strings or numbers to print out. fore_col -", "green, light green, blue, light blue, cyan, light cyan, black or white input_shade", "# This will be white text using the normal shade on a white", "\"light magenta\": Back.LIGHTMAGENTA_EX, \"yellow\": Back.YELLOW, \"light yellow\": Back.LIGHTYELLOW_EX, \"green\": Back.GREEN, \"light green\": Back.LIGHTGREEN_EX,", "set to an empty string to change the colour of the text being", "list and prints it out in coloured text. The colours and shade to", "\"black\", \"normal\") printcol(use_string, \"yellow\", \"black\", \"bright\") printcol(use_string, \"green\", \"black\", \"dim\") printcol(use_string, \"green\", \"black\",", "It is possible to run printcol(\"Some text\") and still get some output, #", "error message in red. printcol(\"Please use lists of equal length.\") def inputcolour(text, prompt_fore_col=None,", "\"bright\") printcol(use_string, \"black\", \"white\", \"dim\") printcol(use_string, \"black\", \"white\", \"normal\") printcol(use_string, \"black\", \"white\", \"bright\")", "provided as a list or as a sting. Arguments: list_to_print - A iterable", "foreground, background, shade, ending in zip(list_to_print, fore_col, back_col, shade, end): # Print the", "is None: input_back_col = \"black\" if input_shade is None: input_shade = \"normal\" #", "in coloured text. The colours and shade to use can be provided as", "else: shade = Style.NORMAL # Check the foreground colour to use if fore_col", "the newline character. This can be set to an empty string to change", "the colour to use for the input prompt. Default: normal, can be either", "to colour the prompt correctly # Define values for each style and colour", "back_col.lower() shade = shade.lower() # Check if running from pycharm is_running_pycharm = \"PYCHARM_HOSTED\"", "next print statement runs correctly # Define values for each style and colour", "shell by printing a string in different colours onto different backgrounds. Arguments: use_string", "light cyan, black or white prompt_shade - The shade of the colour to", "underline is available add Style.UNDERLINED fore_cols = {\"red\": Fore.RED, \"light red\": Fore.LIGHTRED_EX, \"magenta\":", "a list or as a sting. Arguments: list_to_print - A iterable list of", "See the License for the specific language governing permissions and limitations under the", "terminals. if fore_col is None: fore_col = \"white\" if back_col is None: back_col", "else: prompt_fore_col = Fore.WHITE if input_fore_col in fore_cols: input_fore_col = fore_cols[input_fore_col] else: input_fore_col", "prompt_back_col = back_cols[prompt_back_col] else: prompt_back_col = Back.BLACK if input_back_col in back_cols: input_back_col =", "as the shade of the text colour for the string. Default Normal, options", "as printcol back_col - A list of strings or a single string to", "cyan\": Back.LIGHTCYAN_EX, \"white\": Back.WHITE} # Check the shade of colour to use if", "to test the colour printing of the shell by printing a string in", "string to use as the background text colour for the strings being printed.", "Back.LIGHTGREEN_EX, \"blue\": Back.BLUE, \"light blue\": Back.LIGHTBLUE_EX, \"cyan\": Back.CYAN, \"light cyan\": Back.LIGHTCYAN_EX, \"white\": Back.WHITE}", "the text being printed out. \"\"\" # Handle the keyword arguments so that", "= {\"red\": Back.RED, \"light red\": Back.LIGHTRED_EX, \"magenta\": Back.MAGENTA, \"light magenta\": Back.LIGHTMAGENTA_EX, \"yellow\": Back.YELLOW,", "\"License\"); you may not use this file except in compliance with the License.", "input from a coloured input prompt. Arguments: text - The text to prompt", "= shades[shade] else: shade = Style.NORMAL # Check the foreground colour to use", "is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY", "to change the colour of the text being printed out. \"\"\" # Handle", "\"light magenta\": Fore.LIGHTMAGENTA_EX, \"yellow\": Fore.YELLOW, \"light yellow\": Fore.LIGHTYELLOW_EX, \"green\": Fore.GREEN, \"light green\": Fore.LIGHTGREEN_EX,", "string to use as the shade of the text colour for the string.", "lists are of the correct length before attempting the iteration if len(list_to_print) ==", "light blue, cyan, light cyan, black or white back_col - The colour to", "\"dim\") printcol(use_string, \"cyan\", \"black\", \"normal\") printcol(use_string, \"cyan\", \"black\", \"bright\") printcol(use_string, \"blue\", \"black\", \"dim\")", "agreed to in writing, software distributed under the License is distributed on an", "= False strip = False else: convert = None strip = None init(autoreset=False,", "to use if fore_col in fore_cols: fore_col = fore_cols[fore_col] else: fore_col = Fore.WHITE", "of strings or a single string to use as the separator between the", "prompt_back_col = Back.BLACK if input_back_col in back_cols: input_back_col = back_cols[input_back_col] else: input_back_col =", "implied. See the License for the specific language governing permissions and limitations under", "be set to an empty string to change the colour of the text", "# Check the foreground colour to use if fore_col in fore_cols: fore_col =", "def printcol(text, fore_col=None, back_col=None, shade=None, end=None): \"\"\"A function which prints the text in", "colour on the specified background. Arguments: text - The text to print to", "user for the desired input. prompt_fore_col - The colour of the text to", "= False else: convert = None strip = None init(autoreset=True, convert=convert, strip=strip) #", "user input onto. Default: black, can be either of: red, light red, magenta,", "- The text to prompt the user for the desired input. prompt_fore_col -", "= shades[input_shade] else: input_shade = Style.NORMAL # Check each foreground colour to use", "its colour for item, foreground, background, shade, ending in zip(list_to_print, fore_col, back_col, shade,", "is the newline character. This can be set to an empty string to", "in different colours onto different backgrounds. Arguments: use_string - The string to use", "and add a space for styling show_text + input_shade + input_fore_col + input_back_col", "colours onto different backgrounds. Arguments: use_string - The string to use for testing", "on the specified background. Arguments: text - The text to print to the", "\" + Style.RESET_ALL # Force the text to string and add a space", "terminal is used, this allows any not # defined to be set to", "\"dim\") printcol(use_string, \"red\", \"black\", \"normal\") printcol(use_string, \"red\", \"black\", \"bright\") printcol(use_string, \"magenta\", \"black\", \"dim\")", "required by applicable law or agreed to in writing, software distributed under the", "list of strings or a single string to use as the text colour", "\"white\" if prompt_back_col is None: prompt_back_col = \"black\" if prompt_shade is None: prompt_shade", "# Disable autoreset to colour the prompt correctly # Define values for each", "use as the background text colour for the strings being printed. Default Black,", "arguments are None and then set the defaults. if fore_col is None: fore_col", "The colour to print the text onto. Default: black, can be either of:", "input prompt. Default: normal, can be either of: dim, normal, bright input_fore_col -", "language governing permissions and limitations under the License. \"\"\" from colorama import init,", "== len(fore_col) == len(back_col) == len(shade) == len(end): # Then print out each", "False strip = False else: convert = None strip = None init(autoreset=True, convert=convert,", "from a coloured input prompt. Arguments: text - The text to prompt the", "\"bright\") printcol(use_string, \"green\", \"black\", \"dim\") printcol(use_string, \"green\", \"black\", \"normal\") printcol(use_string, \"green\", \"black\", \"bright\")", "A iterable list of strings or numbers to print out. fore_col - A", "= \"white\" if input_back_col is None: input_back_col = \"black\" if input_shade is None:", "normal return return_text def testcolour(use_string=None): \"\"\"A function which is used to test the", "- A list of strings or a single string to use as the", "printed. Default Black, colours same as printcol shade - A list of strings", "if input_back_col in back_cols: input_back_col = back_cols[input_back_col] else: input_back_col = Back.BLACK print(prompt_shade +", "fore_col is None: fore_col = \"white\" if back_col is None: back_col = \"black\"", "== len(shade) == len(end): # Then print out each item as required in", "magenta\": Fore.LIGHTMAGENTA_EX, \"yellow\": Fore.YELLOW, \"light yellow\": Fore.LIGHTYELLOW_EX, \"green\": Fore.GREEN, \"light green\": Fore.LIGHTGREEN_EX, \"blue\":", "The text to print to the screen in the required format. fore_col -", "Back.BLACK print(prompt_shade + prompt_fore_col + prompt_back_col, end='') show_text = str(text) + \" \"", "\"\"\" # Check the keyword arguments are None and then set the defaults.", "print an error message in red. printcol(\"Please use lists of equal length.\") def", "back_cols = {\"red\": Back.RED, \"light red\": Back.LIGHTRED_EX, \"magenta\": Back.MAGENTA, \"light magenta\": Back.LIGHTMAGENTA_EX, \"yellow\":", "ANY KIND, either express or implied. See the License for the specific language", "2020 <NAME> Licensed under the Apache License, Version 2.0 (the \"License\"); you may", "{\"red\": Back.RED, \"light red\": Back.LIGHTRED_EX, \"magenta\": Back.MAGENTA, \"light magenta\": Back.LIGHTMAGENTA_EX, \"yellow\": Back.YELLOW, \"light", "of strings or a single string to use as the background text colour", "\"blue\": Back.BLUE, \"light blue\": Back.LIGHTBLUE_EX, \"cyan\": Back.CYAN, \"light cyan\": Back.LIGHTCYAN_EX, \"white\": Back.WHITE} #", "else: fore_col = Fore.WHITE # Check the background colour to use if back_col", "cyan, light cyan, black or white prompt_shade - The shade of the colour", "of the colour to use for the text entered by the user. Default:", "to use for the input prompt. Default: normal, can be either of: dim,", "input_fore_col.lower() input_back_col = input_back_col.lower() input_shade = input_shade.lower() # Check if running from pycharm", "= input_fore_col.lower() input_back_col = input_back_col.lower() input_shade = input_shade.lower() # Check if running from", "a coloured input prompt. Arguments: text - The text to prompt the user", "\"light cyan\": Back.LIGHTCYAN_EX, \"white\": Back.WHITE} # Check which shade of colour to use", "input_shade = Style.NORMAL # Check each foreground colour to use if prompt_fore_col in", "printcol(use_string, \"blue\", \"black\", \"normal\") printcol(use_string, \"blue\", \"black\", \"bright\") printcol(use_string, \"white\", \"black\", \"dim\") printcol(use_string,", "back_col - A list of strings or a single string to use as", "can be either of: red, light red, magenta, light magenta, yellow, light yellow,", "a list and iterates through the list and prints it out in coloured", "the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless", "black or white back_col - The colour to print the text onto. Default:", "(the \"License\"); you may not use this file except in compliance with the", "different colours onto different backgrounds. Arguments: use_string - The string to use for", "iterates through the list and prints it out in coloured text. The colours", "= input(show_text) # Show the text print(Style.RESET_ALL) # Reset for normal return return_text", "light blue, cyan, light cyan, black or white shade - The shade of", "style and colour shades = {\"dim\": Style.DIM, \"bright\": Style.BRIGHT, \"normal\": Style.NORMAL} # When", "Newline, this list must be passed for the system to work correctly \"\"\"", "the user for the desired input. prompt_fore_col - The colour of the text", "with. By default this is the newline character. This can be set to", "in the required format. fore_col - The colour of the text to print", "Style import os def printcol(text, fore_col=None, back_col=None, shade=None, end=None): \"\"\"A function which prints", "use as the shade of the text colour for the string. Default Normal,", "the text colour for the strings being printed. Default White, colours same as", "http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed", "blue, cyan, light cyan, black or white back_col - The colour to print", "end=None): \"\"\"A Function which takes a list and iterates through the list and", "printcol(use_string, \"white\", \"black\", \"bright\") printcol(use_string, \"black\", \"white\", \"dim\") printcol(use_string, \"black\", \"white\", \"normal\") printcol(use_string,", "user input. if prompt_shade in shades: prompt_shade = shades[prompt_shade] else: prompt_shade = Style.NORMAL", "be # different for other terminals. if fore_col is None: fore_col = \"white\"", "License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF", "as a list or as a sting. Arguments: list_to_print - A iterable list", "normal, can be either of: dim, normal, bright\"\"\" # Handle None keywords if", "= Back.BLACK # Then print the text to the screen print(shade + fore_col", "= Style.NORMAL if input_shade in shades: input_shade = shades[input_shade] else: input_shade = Style.NORMAL", "strings or a single string to use as the separator between the strings", "to print out. fore_col - A list of strings or a single string", "\"black\", \"normal\") printcol(use_string, \"blue\", \"black\", \"bright\") printcol(use_string, \"white\", \"black\", \"dim\") printcol(use_string, \"white\", \"black\",", "use as the separator between the strings being printed. Default Newline, this list", "prompt_fore_col=None, prompt_back_col=None, prompt_shade=None, input_fore_col=None, input_back_col=None, input_shade=None): \"\"\"Returns input from a coloured input prompt.", "text entered by the user. Default: normal, can be either of: dim, normal,", "blue, light blue, cyan, light cyan, black or white prompt_shade - The shade", "either express or implied. See the License for the specific language governing permissions", "each foreground colour to use if prompt_fore_col in fore_cols: prompt_fore_col = fore_cols[prompt_fore_col] else:", "light cyan, black or white prompt_back_col - The colour to print the prompt", "back_col = back_col.lower() shade = shade.lower() # Check if running from pycharm is_running_pycharm", "run printcol(\"Some text\") and still get some output, # This will be white", "= shades[prompt_shade] else: prompt_shade = Style.NORMAL if input_shade in shades: input_shade = shades[input_shade]", "- The colour to print the user input onto. Default: black, can be", "correctly \"\"\" # Check the keyword arguments are None and then set the", "Apache License, Version 2.0 (the \"License\"); you may not use this file except", "colour to use if back_col in back_cols: back_col = back_cols[back_col] else: back_col =", "Function which takes a list and iterates through the list and prints it", "available add Style.UNDERLINED fore_cols = {\"red\": Fore.RED, \"light red\": Fore.LIGHTRED_EX, \"magenta\": Fore.MAGENTA, \"light", "shade = \"normal\" if end is None: end = \"\\n\" # Check the", "if input_fore_col in fore_cols: input_fore_col = fore_cols[input_fore_col] else: input_fore_col = Fore.WHITE # Check", "which takes a list and iterates through the list and prints it out", "if running from pycharm is_running_pycharm = \"PYCHARM_HOSTED\" in os.environ if is_running_pycharm: convert =", "\"blue\", \"black\", \"bright\") printcol(use_string, \"white\", \"black\", \"dim\") printcol(use_string, \"white\", \"black\", \"normal\") printcol(use_string, \"white\",", "the text in. Default: white, can be either of: red, light red, magenta,", "if prompt_fore_col in fore_cols: prompt_fore_col = fore_cols[prompt_fore_col] else: prompt_fore_col = Fore.WHITE if input_fore_col", "to in writing, software distributed under the License is distributed on an \"AS", "autoreset to colour the prompt correctly # Define values for each style and", "end - What character to end the print line with. By default this", "+ prompt_back_col, end='') show_text = str(text) + \" \" + Style.RESET_ALL # Force", "printcol(use_string, \"blue\", \"black\", \"dim\") printcol(use_string, \"blue\", \"black\", \"normal\") printcol(use_string, \"blue\", \"black\", \"bright\") printcol(use_string,", "checked prompt_fore_col = prompt_fore_col.lower() prompt_back_col = prompt_back_col.lower() prompt_shade = prompt_shade.lower() input_fore_col = input_fore_col.lower()", "World\" printcol(use_string, \"red\", \"black\", \"dim\") printcol(use_string, \"red\", \"black\", \"normal\") printcol(use_string, \"red\", \"black\", \"bright\")", "default this is the newline character. This can be set to an empty", "The colour of the text to print the text in. Default: white, can", "blue, cyan, light cyan, black or white input_shade - The shade of the", "def inputcolour(text, prompt_fore_col=None, prompt_back_col=None, prompt_shade=None, input_fore_col=None, input_back_col=None, input_shade=None): \"\"\"Returns input from a coloured", "magenta, yellow, light yellow, green, light green, blue, light blue, cyan, light cyan,", "in fore_cols: input_fore_col = fore_cols[input_fore_col] else: input_fore_col = Fore.WHITE # Check each background", "= prompt_back_col.lower() prompt_shade = prompt_shade.lower() input_fore_col = input_fore_col.lower() input_back_col = input_back_col.lower() input_shade =", "False else: convert = None strip = None init(autoreset=True, convert=convert, strip=strip) # Make", "the item printcol(item, fore_col=foreground, back_col=background, shade=shade, end=ending) else: # The lists are not", "for cmd, but may be # different for other terminals. if fore_col is", "the normal shade on a white background, this is normal print for cmd,", "strip=strip) # Make sure the next print statement runs correctly # Define values", "None: prompt_back_col = \"black\" if prompt_shade is None: prompt_shade = \"normal\" if input_fore_col", "using the normal shade on a white background, this is normal print for", "list and iterates through the list and prints it out in coloured text.", "to print the prompt text in. Default: white, can be either of: red,", "text - The text to print to the screen in the required format.", "red, light red, magenta, light Magenta, yellow, light yellow, green, light green, blue,", "Disable autoreset to colour the prompt correctly # Define values for each style", "if back_col is None: back_col = \"black\" if shade is None: shade =", "this list must be passed for the system to work correctly \"\"\" #", "item as required in its colour for item, foreground, background, shade, ending in", "back_col = \"black\" if shade is None: shade = \"normal\" if end is", "\"green\": Back.GREEN, \"light green\": Back.LIGHTGREEN_EX, \"blue\": Back.BLUE, \"light blue\": Back.LIGHTBLUE_EX, \"cyan\": Back.CYAN, \"light", "they still work correctly when the terminal is used, this allows any not", "# Make sure the next print statement runs correctly # Define values for", "Default Black, colours same as printcol shade - A list of strings or", "all colours. Default: 'Hello World'.\"\"\" if use_string is None: use_string = \"Hello World\"", "\"yellow\", \"black\", \"normal\") printcol(use_string, \"yellow\", \"black\", \"bright\") printcol(use_string, \"green\", \"black\", \"dim\") printcol(use_string, \"green\",", "- The colour of the text to print the user input in. Default:", "input_shade = shades[input_shade] else: input_shade = Style.NORMAL # Check each foreground colour to", "prompt_fore_col is None: prompt_fore_col = \"white\" if prompt_back_col is None: prompt_back_col = \"black\"", "the colour to use for the text entered by the user. Default: normal,", "blue, cyan, light cyan, black or white prompt_back_col - The colour to print", "to use as the background text colour for the strings being printed. Default", "if use_string is None: use_string = \"Hello World\" printcol(use_string, \"red\", \"black\", \"dim\") printcol(use_string,", "it out in coloured text. The colours and shade to use can be", "text. The colours and shade to use can be provided as a list", "be provided as a list or as a sting. Arguments: list_to_print - A", "as the background text colour for the strings being printed. Default Black, colours", "is None: prompt_fore_col = \"white\" if prompt_back_col is None: prompt_back_col = \"black\" if", "text in the specified colour on the specified background. Arguments: text - The", "used, this allows any not # defined to be set to the default.", "= prompt_shade.lower() input_fore_col = input_fore_col.lower() input_back_col = input_back_col.lower() input_shade = input_shade.lower() # Check", "shade of the colour to use for the input prompt. Default: normal, can", "shade of colour to use if shade in shades: shade = shades[shade] else:", "as a sting. Arguments: list_to_print - A iterable list of strings or numbers", "to use can be provided as a list or as a sting. Arguments:", "is None: fore_col = \"white\" if back_col is None: back_col = \"black\" if", "str(text) + \" \" + Style.RESET_ALL # Force the text to string and", "prompt_shade.lower() input_fore_col = input_fore_col.lower() input_back_col = input_back_col.lower() input_shade = input_shade.lower() # Check if", "cyan, black or white prompt_back_col - The colour to print the prompt text", "text\") and still get some output, # This will be white text using", "len(shade) == len(end): # Then print out each item as required in its", "== len(end): # Then print out each item as required in its colour", "= Fore.WHITE # Check each background colour to use if prompt_back_col in back_cols:", "Handle None keywords if prompt_fore_col is None: prompt_fore_col = \"white\" if prompt_back_col is", "of equal length.\") def inputcolour(text, prompt_fore_col=None, prompt_back_col=None, prompt_shade=None, input_fore_col=None, input_back_col=None, input_shade=None): \"\"\"Returns input", "use for the input prompt. Default: normal, can be either of: dim, normal,", "text to string and add a space for styling show_text + input_shade +", "lowercase names to be checked prompt_fore_col = prompt_fore_col.lower() prompt_back_col = prompt_back_col.lower() prompt_shade =", "at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software", "- The shade of the colour to use for the text entered by", "use if fore_col in fore_cols: fore_col = fore_cols[fore_col] else: fore_col = Fore.WHITE #", "light green, blue, light blue, cyan, light cyan, black or white shade -", "magenta, light magenta, yellow, light yellow, green, light green, blue, light blue, cyan,", "still work correctly when the terminal is used, this allows any not #", "By default this is the newline character. This can be set to an", "the colour of the text being printed out. \"\"\" # Handle the keyword", "def testcolour(use_string=None): \"\"\"A function which is used to test the colour printing of", "prompt_shade = prompt_shade.lower() input_fore_col = input_fore_col.lower() input_back_col = input_back_col.lower() input_shade = input_shade.lower() #", "Fore.WHITE # Check each background colour to use if prompt_back_col in back_cols: prompt_back_col", "use. Default: normal, can be either of: dim, normal, bright end - What", "in all colours. Default: 'Hello World'.\"\"\" if use_string is None: use_string = \"Hello", "the input prompt and the user input. if prompt_shade in shades: prompt_shade =", "of the colour to use. Default: normal, can be either of: dim, normal,", "Back.CYAN, \"light cyan\": Back.LIGHTCYAN_EX, \"white\": Back.WHITE} # Check the shade of colour to", "prompt_shade is None: prompt_shade = \"normal\" if input_fore_col is None: input_fore_col = \"white\"", "the text to the screen print(shade + fore_col + back_col + text, end=end)", "Check the keyword arguments are None and then set the defaults. if fore_col", "blue, light blue, cyan, light cyan, black or white back_col - The colour", "prompt_fore_col = prompt_fore_col.lower() prompt_back_col = prompt_back_col.lower() prompt_shade = prompt_shade.lower() input_fore_col = input_fore_col.lower() input_back_col", "Fore.LIGHTGREEN_EX, \"blue\": Fore.BLUE, \"light blue\": Fore.LIGHTBLUE_EX, \"cyan\": Fore.CYAN, \"light cyan\": Fore.LIGHTCYAN_EX, \"black\": Fore.BLACK}", "fore_col = \"white\" if back_col is None: back_col = \"black\" if shade is", "- The colour to print the prompt text onto. Default: black, can be", "shades = {\"dim\": Style.DIM, \"bright\": Style.BRIGHT, \"normal\": Style.NORMAL} # When underline is available", "a single string to use as the shade of the text colour for", "\"light green\": Fore.LIGHTGREEN_EX, \"blue\": Fore.BLUE, \"light blue\": Fore.LIGHTBLUE_EX, \"cyan\": Fore.CYAN, \"light cyan\": Fore.LIGHTCYAN_EX,", "None: back_col = \"black\" if shade is None: shade = \"normal\" if end", "equal length.\") def inputcolour(text, prompt_fore_col=None, prompt_back_col=None, prompt_shade=None, input_fore_col=None, input_back_col=None, input_shade=None): \"\"\"Returns input from", "is_running_pycharm: convert = False strip = False else: convert = None strip =", "same as printcol end - A list of strings or a single string", "space for styling show_text + input_shade + input_fore_col + input_back_col return_text = input(show_text)", "for the system to work correctly \"\"\" # Check the keyword arguments are", "use as the text colour for the strings being printed. Default White, colours", "print out. fore_col - A list of strings or a single string to", "the License for the specific language governing permissions and limitations under the License.", "onto different backgrounds. Arguments: use_string - The string to use for testing the", "Black, colours same as printcol shade - A list of strings or a", "# Define values for each style and colour shades = {\"dim\": Style.DIM, \"bright\":", "of: dim, normal, bright input_fore_col - The colour of the text to print", "input_fore_col = Fore.WHITE # Check each background colour to use if prompt_back_col in", "strings being printed. Default White, colours same as printcol back_col - A list", "be either of: red, light red, magenta, light Magenta, yellow, light yellow, green,", "E.G. It is possible to run printcol(\"Some text\") and still get some output,", "sure the next print statement runs correctly # Define values for each style", "bright input_fore_col - The colour of the text to print the user input", "either of: dim, normal, bright end - What character to end the print", "white input_back_col - The colour to print the user input onto. Default: black,", "the terminal is used, this allows any not # defined to be set", "white background, this is normal print for cmd, but may be # different", "strings or numbers to print out. fore_col - A list of strings or", "back_cols[prompt_back_col] else: prompt_back_col = Back.BLACK if input_back_col in back_cols: input_back_col = back_cols[input_back_col] else:", "+ input_fore_col + input_back_col return_text = input(show_text) # Show the text print(Style.RESET_ALL) #", "prompt_shade = shades[prompt_shade] else: prompt_shade = Style.NORMAL if input_shade in shades: input_shade =", "the text to print the prompt text in. Default: white, can be either", "if input_fore_col is None: input_fore_col = \"white\" if input_back_col is None: input_back_col =", "License, Version 2.0 (the \"License\"); you may not use this file except in", "lists of equal length.\") def inputcolour(text, prompt_fore_col=None, prompt_back_col=None, prompt_shade=None, input_fore_col=None, input_back_col=None, input_shade=None): \"\"\"Returns", "Default Normal, options same as printcol end - A list of strings or", "as required in its colour for item, foreground, background, shade, ending in zip(list_to_print,", "if is_running_pycharm: convert = False strip = False else: convert = None strip", "cyan, black or white prompt_shade - The shade of the colour to use", "or as a sting. Arguments: list_to_print - A iterable list of strings or", "= \"black\" if prompt_shade is None: prompt_shade = \"normal\" if input_fore_col is None:", "- The text to print to the screen in the required format. fore_col", "A list of strings or a single string to use as the shade", "Fore.LIGHTCYAN_EX, \"black\": Fore.BLACK} back_cols = {\"red\": Back.RED, \"light red\": Back.LIGHTRED_EX, \"magenta\": Back.MAGENTA, \"light", "test the colour printing of the shell by printing a string in different", "is None: input_fore_col = \"white\" if input_back_col is None: input_back_col = \"black\" if", "but may be # different for other terminals. if fore_col is None: fore_col", "print the user input in. Default: white, can be either of: red, light", "- The colour to print the text onto. Default: black, can be either", "to print the prompt text onto. Default: black, can be either of: red,", "else: input_shade = Style.NORMAL # Check each foreground colour to use if prompt_fore_col", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License", "print(Style.RESET_ALL) # Reset for normal return return_text def testcolour(use_string=None): \"\"\"A function which is", "dim, normal, bright\"\"\" # Handle None keywords if prompt_fore_col is None: prompt_fore_col =", "in shades: shade = shades[shade] else: shade = Style.NORMAL # Check the foreground", "is None: shade = \"normal\" if end is None: end = \"\\n\" #", "print the text onto. Default: black, can be either of: red, light red,", "colour for the strings being printed. Default Black, colours same as printcol shade", "blue, light blue, cyan, light cyan, black or white input_back_col - The colour", "text to prompt the user for the desired input. prompt_fore_col - The colour", "# Check the lists are of the correct length before attempting the iteration", "green, blue, light blue, cyan, light cyan, black or white input_shade - The", "input_fore_col = input_fore_col.lower() input_back_col = input_back_col.lower() input_shade = input_shade.lower() # Check if running", "in red. printcol(\"Please use lists of equal length.\") def inputcolour(text, prompt_fore_col=None, prompt_back_col=None, prompt_shade=None,", "- The shade of the colour to use for the input prompt. Default:", "print the prompt text in. Default: white, can be either of: red, light", "use if shade in shades: shade = shades[shade] else: shade = Style.NORMAL #", "Back.GREEN, \"light green\": Back.LIGHTGREEN_EX, \"blue\": Back.BLUE, \"light blue\": Back.LIGHTBLUE_EX, \"cyan\": Back.CYAN, \"light cyan\":", "Check which shade of colour to use for the input prompt and the", "Make sure the next print statement runs correctly # Define values for each", "numbers to print out. fore_col - A list of strings or a single", "or white shade - The shade of the colour to use. Default: normal,", "Style.NORMAL} # When underline is available add Style.UNDERLINED fore_cols = {\"red\": Fore.RED, \"light", "user. Default: normal, can be either of: dim, normal, bright\"\"\" # Handle None", "import os def printcol(text, fore_col=None, back_col=None, shade=None, end=None): \"\"\"A function which prints the", "to use as the text colour for the strings being printed. Default White,", "\"light blue\": Back.LIGHTBLUE_EX, \"cyan\": Back.CYAN, \"light cyan\": Back.LIGHTCYAN_EX, \"white\": Back.WHITE} # Check the", "cyan, light cyan, black or white input_shade - The shade of the colour", "{\"dim\": Style.DIM, \"bright\": Style.BRIGHT, \"normal\": Style.NORMAL} # When underline is available add Style.UNDERLINED", "\"dim\") printcol(use_string, \"green\", \"black\", \"normal\") printcol(use_string, \"green\", \"black\", \"bright\") printcol(use_string, \"cyan\", \"black\", \"dim\")", "light Magenta, yellow, light yellow, green, light green, blue, light blue, cyan, light", "specific language governing permissions and limitations under the License. \"\"\" from colorama import", "the defaults. if fore_col is None: fore_col = \"white\" if back_col is None:", "or white prompt_shade - The shade of the colour to use for the", "of the text to print the user input in. Default: white, can be", "\"black\", \"bright\") printcol(use_string, \"yellow\", \"black\", \"dim\") printcol(use_string, \"yellow\", \"black\", \"normal\") printcol(use_string, \"yellow\", \"black\",", "yellow, green, light green, blue, light blue, cyan, light cyan, black or white", "of: red, light red, magenta, light Magenta, yellow, light yellow, green, light green,", "printcol shade - A list of strings or a single string to use", "fore_col=None, back_col=None, shade=None, end=None): \"\"\"A function which prints the text in the specified", "shades[prompt_shade] else: prompt_shade = Style.NORMAL if input_shade in shades: input_shade = shades[input_shade] else:", "colour the prompt correctly # Define values for each style and colour shades", "green, blue, light blue, cyan, light cyan, black or white shade - The", "printcollist(list_to_print, fore_col=None, back_col=None, shade=None, end=None): \"\"\"A Function which takes a list and iterates", "of all equal length so print an error message in red. printcol(\"Please use", "single string to use as the separator between the strings being printed. Default", "normal, can be either of: dim, normal, bright end - What character to", "shade of the text colour for the string. Default Normal, options same as", "list of strings or a single string to use as the separator between", "be passed for the system to work correctly \"\"\" # Check the keyword", "Check each foreground colour to use if prompt_fore_col in fore_cols: prompt_fore_col = fore_cols[prompt_fore_col]", "Fore.LIGHTRED_EX, \"magenta\": Fore.MAGENTA, \"light magenta\": Fore.LIGHTMAGENTA_EX, \"yellow\": Fore.YELLOW, \"light yellow\": Fore.LIGHTYELLOW_EX, \"green\": Fore.GREEN,", "be checked fore_col = fore_col.lower() back_col = back_col.lower() shade = shade.lower() # Check", "\"black\" if prompt_shade is None: prompt_shade = \"normal\" if input_fore_col is None: input_fore_col", "print the text in. Default: white, can be either of: red, light red,", "text colour for the strings being printed. Default White, colours same as printcol", "the text entered by the user. Default: normal, can be either of: dim,", "on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "Check if running from pycharm is_running_pycharm = \"PYCHARM_HOSTED\" in os.environ if is_running_pycharm: convert", "single string to use as the text colour for the strings being printed.", "screen print(shade + fore_col + back_col + text, end=end) def printcollist(list_to_print, fore_col=None, back_col=None,", "text correctly in all colours. Default: 'Hello World'.\"\"\" if use_string is None: use_string", "White, colours same as printcol back_col - A list of strings or a", "\"white\", \"black\", \"dim\") printcol(use_string, \"white\", \"black\", \"normal\") printcol(use_string, \"white\", \"black\", \"bright\") printcol(use_string, \"black\",", "can be either of: dim, normal, bright\"\"\" # Handle None keywords if prompt_fore_col", "None: fore_col = \"white\" if back_col is None: back_col = \"black\" if shade", "for testing the console prints text correctly in all colours. Default: 'Hello World'.\"\"\"", "\"magenta\": Back.MAGENTA, \"light magenta\": Back.LIGHTMAGENTA_EX, \"yellow\": Back.YELLOW, \"light yellow\": Back.LIGHTYELLOW_EX, \"green\": Back.GREEN, \"light", "cyan, light cyan, black or white back_col - The colour to print the", "and limitations under the License. \"\"\" from colorama import init, Fore, Back, Style", "the input prompt. Default: normal, can be either of: dim, normal, bright input_fore_col", "the default. E.G. It is possible to run printcol(\"Some text\") and still get", "green, light green, blue, light blue, cyan, light cyan, black or white back_col", "Style.UNDERLINED fore_cols = {\"red\": Fore.RED, \"light red\": Fore.LIGHTRED_EX, \"magenta\": Fore.MAGENTA, \"light magenta\": Fore.LIGHTMAGENTA_EX,", "black or white prompt_back_col - The colour to print the prompt text onto.", "\"dim\") printcol(use_string, \"yellow\", \"black\", \"normal\") printcol(use_string, \"yellow\", \"black\", \"bright\") printcol(use_string, \"green\", \"black\", \"dim\")", "printcol(use_string, \"cyan\", \"black\", \"normal\") printcol(use_string, \"cyan\", \"black\", \"bright\") printcol(use_string, \"blue\", \"black\", \"dim\") printcol(use_string,", "white text using the normal shade on a white background, this is normal", "either of: red, light red, magenta, light magenta, yellow, light yellow, green, light", "convert=convert, strip=strip) # Disable autoreset to colour the prompt correctly # Define values", "fore_cols: fore_col = fore_cols[fore_col] else: fore_col = Fore.WHITE # Check the background colour", "get some output, # This will be white text using the normal shade", "line with. By default this is the newline character. This can be set", "None init(autoreset=False, convert=convert, strip=strip) # Disable autoreset to colour the prompt correctly #", "or numbers to print out. fore_col - A list of strings or a", "colour to print the prompt text onto. Default: black, can be either of:", "distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES", "the background colour to use if back_col in back_cols: back_col = back_cols[back_col] else:", "in shades: input_shade = shades[input_shade] else: input_shade = Style.NORMAL # Check each foreground", "the inputs into lowercase names to be checked fore_col = fore_col.lower() back_col =", "magenta\": Back.LIGHTMAGENTA_EX, \"yellow\": Back.YELLOW, \"light yellow\": Back.LIGHTYELLOW_EX, \"green\": Back.GREEN, \"light green\": Back.LIGHTGREEN_EX, \"blue\":", "end is None: end = \"\\n\" # Check the lists are of the", "to use as the separator between the strings being printed. Default Newline, this", "lists are not of all equal length so print an error message in", "Fore.WHITE if input_fore_col in fore_cols: input_fore_col = fore_cols[input_fore_col] else: input_fore_col = Fore.WHITE #", "green, light green, blue, light blue, cyan, light cyan, black or white prompt_shade", "the system to work correctly \"\"\" # Check the keyword arguments are None", "printcol(use_string, \"green\", \"black\", \"normal\") printcol(use_string, \"green\", \"black\", \"bright\") printcol(use_string, \"cyan\", \"black\", \"dim\") printcol(use_string,", "for other terminals. if fore_col is None: fore_col = \"white\" if back_col is", "prompt text in. Default: white, can be either of: red, light red, magenta,", "text in. Default: white, can be either of: red, light red, magenta, light", "keywords if prompt_fore_col is None: prompt_fore_col = \"white\" if prompt_back_col is None: prompt_back_col", "use this file except in compliance with the License. You may obtain a", "\"black\", \"bright\") printcol(use_string, \"black\", \"white\", \"dim\") printcol(use_string, \"black\", \"white\", \"normal\") printcol(use_string, \"black\", \"white\",", "\"magenta\", \"black\", \"bright\") printcol(use_string, \"yellow\", \"black\", \"dim\") printcol(use_string, \"yellow\", \"black\", \"normal\") printcol(use_string, \"yellow\",", "by printing a string in different colours onto different backgrounds. Arguments: use_string -", "None: prompt_fore_col = \"white\" if prompt_back_col is None: prompt_back_col = \"black\" if prompt_shade", "green\": Back.LIGHTGREEN_EX, \"blue\": Back.BLUE, \"light blue\": Back.LIGHTBLUE_EX, \"cyan\": Back.CYAN, \"light cyan\": Back.LIGHTCYAN_EX, \"white\":", "black or white input_back_col - The colour to print the user input onto.", "a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or", "back_col=background, shade=shade, end=ending) else: # The lists are not of all equal length", "Reset for normal return return_text def testcolour(use_string=None): \"\"\"A function which is used to", "the separator between the strings being printed. Default Newline, this list must be", "else: prompt_shade = Style.NORMAL if input_shade in shades: input_shade = shades[input_shade] else: input_shade", "\"green\", \"black\", \"normal\") printcol(use_string, \"green\", \"black\", \"bright\") printcol(use_string, \"cyan\", \"black\", \"dim\") printcol(use_string, \"cyan\",", "- The colour of the text to print the text in. Default: white,", "prompt the user for the desired input. prompt_fore_col - The colour of the", "printcol(use_string, \"red\", \"black\", \"dim\") printcol(use_string, \"red\", \"black\", \"normal\") printcol(use_string, \"red\", \"black\", \"bright\") printcol(use_string,", "normal, bright input_fore_col - The colour of the text to print the user", "styling show_text + input_shade + input_fore_col + input_back_col return_text = input(show_text) # Show", "Fore.BLUE, \"light blue\": Fore.LIGHTBLUE_EX, \"cyan\": Fore.CYAN, \"light cyan\": Fore.LIGHTCYAN_EX, \"black\": Fore.BLACK} back_cols =", "Style.NORMAL # Check the foreground colour to use if fore_col in fore_cols: fore_col", "prompt and the user input. if prompt_shade in shades: prompt_shade = shades[prompt_shade] else:", "= \"black\" if input_shade is None: input_shade = \"normal\" # Convert the inputs", "in back_cols: prompt_back_col = back_cols[prompt_back_col] else: prompt_back_col = Back.BLACK if input_back_col in back_cols:", "The colour of the text to print the user input in. Default: white,", "\"\"\" # Handle the keyword arguments so that they still work correctly when", "some output, # This will be white text using the normal shade on", "in back_cols: back_col = back_cols[back_col] else: back_col = Back.BLACK # Then print the", "= back_cols[input_back_col] else: input_back_col = Back.BLACK print(prompt_shade + prompt_fore_col + prompt_back_col, end='') show_text", "prompt_shade=None, input_fore_col=None, input_back_col=None, input_shade=None): \"\"\"Returns input from a coloured input prompt. Arguments: text", "convert = None strip = None init(autoreset=False, convert=convert, strip=strip) # Disable autoreset to", "of strings or numbers to print out. fore_col - A list of strings", "red, light red, magenta, light magenta, yellow, light yellow, green, light green, blue,", "\"black\": Fore.BLACK} back_cols = {\"red\": Back.RED, \"light red\": Back.LIGHTRED_EX, \"magenta\": Back.MAGENTA, \"light magenta\":", "strings or a single string to use as the shade of the text", "distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "\"normal\" if input_fore_col is None: input_fore_col = \"white\" if input_back_col is None: input_back_col", "Arguments: list_to_print - A iterable list of strings or numbers to print out.", "cyan, light cyan, black or white shade - The shade of the colour", "\"white\" if back_col is None: back_col = \"black\" if shade is None: shade", "# different for other terminals. if fore_col is None: fore_col = \"white\" if", "or a single string to use as the separator between the strings being", "of colour to use if shade in shades: shade = shades[shade] else: shade", "string and add a space for styling show_text + input_shade + input_fore_col +", "\"black\", \"normal\") printcol(use_string, \"green\", \"black\", \"bright\") printcol(use_string, \"cyan\", \"black\", \"dim\") printcol(use_string, \"cyan\", \"black\",", "Check the background colour to use if back_col in back_cols: back_col = back_cols[back_col]", "colour to use for the input prompt and the user input. if prompt_shade", "list of strings or numbers to print out. fore_col - A list of", "or white prompt_back_col - The colour to print the prompt text onto. Default:", "is used, this allows any not # defined to be set to the", "\"black\", \"dim\") printcol(use_string, \"red\", \"black\", \"normal\") printcol(use_string, \"red\", \"black\", \"bright\") printcol(use_string, \"magenta\", \"black\",", "prompt_fore_col - The colour of the text to print the prompt text in.", "blue\": Back.LIGHTBLUE_EX, \"cyan\": Back.CYAN, \"light cyan\": Back.LIGHTCYAN_EX, \"white\": Back.WHITE} # Check which shade", "to use if prompt_fore_col in fore_cols: prompt_fore_col = fore_cols[prompt_fore_col] else: prompt_fore_col = Fore.WHITE", "light yellow, green, light green, blue, light blue, cyan, light cyan, black or", "\"light blue\": Fore.LIGHTBLUE_EX, \"cyan\": Fore.CYAN, \"light cyan\": Fore.LIGHTCYAN_EX, \"black\": Fore.BLACK} back_cols = {\"red\":", "for the colorama module.\"\"\" \"\"\" Copyright 2019 - 2020 <NAME> Licensed under the", "string to use as the text colour for the strings being printed. Default", "\"Hello World\" printcol(use_string, \"red\", \"black\", \"dim\") printcol(use_string, \"red\", \"black\", \"normal\") printcol(use_string, \"red\", \"black\",", "\"dim\") printcol(use_string, \"white\", \"black\", \"normal\") printcol(use_string, \"white\", \"black\", \"bright\") printcol(use_string, \"black\", \"white\", \"dim\")", "background, shade, ending in zip(list_to_print, fore_col, back_col, shade, end): # Print the item", "cyan, black or white input_shade - The shade of the colour to use", "or white input_shade - The shade of the colour to use for the", "The colour to print the prompt text onto. Default: black, can be either", "colorama module.\"\"\" \"\"\" Copyright 2019 - 2020 <NAME> Licensed under the Apache License,", "to use as the shade of the text colour for the string. Default", "shade - A list of strings or a single string to use as", "None strip = None init(autoreset=True, convert=convert, strip=strip) # Make sure the next print", "string to use as the separator between the strings being printed. Default Newline,", "end): # Print the item printcol(item, fore_col=foreground, back_col=background, shade=shade, end=ending) else: # The", "shade, ending in zip(list_to_print, fore_col, back_col, shade, end): # Print the item printcol(item,", "and then set the defaults. if fore_col is None: fore_col = \"white\" if", "\"green\", \"black\", \"dim\") printcol(use_string, \"green\", \"black\", \"normal\") printcol(use_string, \"green\", \"black\", \"bright\") printcol(use_string, \"cyan\",", "Fore.GREEN, \"light green\": Fore.LIGHTGREEN_EX, \"blue\": Fore.BLUE, \"light blue\": Fore.LIGHTBLUE_EX, \"cyan\": Fore.CYAN, \"light cyan\":", "input_shade=None): \"\"\"Returns input from a coloured input prompt. Arguments: text - The text", "cyan, black or white shade - The shade of the colour to use.", "text onto. Default: black, can be either of: red, light red, magenta, light", "text to print to the screen in the required format. fore_col - The", "A list of strings or a single string to use as the separator", "coloured text. The colours and shade to use can be provided as a", "Fore.RED, \"light red\": Fore.LIGHTRED_EX, \"magenta\": Fore.MAGENTA, \"light magenta\": Fore.LIGHTMAGENTA_EX, \"yellow\": Fore.YELLOW, \"light yellow\":", "None keywords if prompt_fore_col is None: prompt_fore_col = \"white\" if prompt_back_col is None:", "= fore_cols[prompt_fore_col] else: prompt_fore_col = Fore.WHITE if input_fore_col in fore_cols: input_fore_col = fore_cols[input_fore_col]", "to string and add a space for styling show_text + input_shade + input_fore_col", "text colour for the string. Default Normal, options same as printcol end -", "to use if back_col in back_cols: back_col = back_cols[back_col] else: back_col = Back.BLACK", "if end is None: end = \"\\n\" # Convert the inputs into lowercase", "Arguments: use_string - The string to use for testing the console prints text", "the Apache License, Version 2.0 (the \"License\"); you may not use this file", "Back.LIGHTBLUE_EX, \"cyan\": Back.CYAN, \"light cyan\": Back.LIGHTCYAN_EX, \"white\": Back.WHITE} # Check the shade of", "being printed. Default Black, colours same as printcol shade - A list of", "\"black\", \"dim\") printcol(use_string, \"yellow\", \"black\", \"normal\") printcol(use_string, \"yellow\", \"black\", \"bright\") printcol(use_string, \"green\", \"black\",", "single string to use as the shade of the text colour for the", "may be # different for other terminals. if fore_col is None: fore_col =", "the console prints text correctly in all colours. Default: 'Hello World'.\"\"\" if use_string", "to print the text in. Default: white, can be either of: red, light", "as the separator between the strings being printed. Default Newline, this list must", "blue, light blue, cyan, light cyan, black or white prompt_back_col - The colour", "\"dim\") printcol(use_string, \"magenta\", \"black\", \"normal\") printcol(use_string, \"magenta\", \"black\", \"bright\") printcol(use_string, \"yellow\", \"black\", \"dim\")", "\"bright\": Style.BRIGHT, \"normal\": Style.NORMAL} # When underline is available add Style.UNDERLINED fore_cols =", "screen in the required format. fore_col - The colour of the text to", "from pycharm is_running_pycharm = \"PYCHARM_HOSTED\" in os.environ if is_running_pycharm: convert = False strip", "use if back_col in back_cols: back_col = back_cols[back_col] else: back_col = Back.BLACK #", "single string to use as the background text colour for the strings being", "{\"red\": Fore.RED, \"light red\": Fore.LIGHTRED_EX, \"magenta\": Fore.MAGENTA, \"light magenta\": Fore.LIGHTMAGENTA_EX, \"yellow\": Fore.YELLOW, \"light", "use for the input prompt and the user input. if prompt_shade in shades:", "show_text = str(text) + \" \" + Style.RESET_ALL # Force the text to", "Handle the keyword arguments so that they still work correctly when the terminal", "out each item as required in its colour for item, foreground, background, shade,", "or a single string to use as the text colour for the strings", "print for cmd, but may be # different for other terminals. if fore_col", "the text to print the user input in. Default: white, can be either", "into lowercase names to be checked fore_col = fore_col.lower() back_col = back_col.lower() shade", "the colorama module.\"\"\" \"\"\" Copyright 2019 - 2020 <NAME> Licensed under the Apache", "print(prompt_shade + prompt_fore_col + prompt_back_col, end='') show_text = str(text) + \" \" +", "correctly when the terminal is used, this allows any not # defined to", "input prompt. Arguments: text - The text to prompt the user for the", "the shell by printing a string in different colours onto different backgrounds. Arguments:", "yellow, light yellow, green, light green, blue, light blue, cyan, light cyan, black", "= Style.NORMAL # Check each foreground colour to use if prompt_fore_col in fore_cols:", "\"\"\"A Function which takes a list and iterates through the list and prints", "blue, light blue, cyan, light cyan, black or white shade - The shade", "cyan, black or white input_back_col - The colour to print the user input", "limitations under the License. \"\"\" from colorama import init, Fore, Back, Style import", "Fore.CYAN, \"light cyan\": Fore.LIGHTCYAN_EX, \"black\": Fore.BLACK} back_cols = {\"red\": Back.RED, \"light red\": Back.LIGHTRED_EX,", "in. Default: white, can be either of: red, light red, magenta, light Magenta,", "being printed. Default White, colours same as printcol back_col - A list of", "end='') show_text = str(text) + \" \" + Style.RESET_ALL # Force the text", "text using the normal shade on a white background, this is normal print", "Print the item printcol(item, fore_col=foreground, back_col=background, shade=shade, end=ending) else: # The lists are", "end=None): \"\"\"A function which prints the text in the specified colour on the", "length before attempting the iteration if len(list_to_print) == len(fore_col) == len(back_col) == len(shade)", "permissions and limitations under the License. \"\"\" from colorama import init, Fore, Back,", "\"yellow\": Fore.YELLOW, \"light yellow\": Fore.LIGHTYELLOW_EX, \"green\": Fore.GREEN, \"light green\": Fore.LIGHTGREEN_EX, \"blue\": Fore.BLUE, \"light", "input_shade + input_fore_col + input_back_col return_text = input(show_text) # Show the text print(Style.RESET_ALL)", "input_back_col return_text = input(show_text) # Show the text print(Style.RESET_ALL) # Reset for normal", "colour printing of the shell by printing a string in different colours onto", "Fore.LIGHTBLUE_EX, \"cyan\": Fore.CYAN, \"light cyan\": Fore.LIGHTCYAN_EX, \"black\": Fore.BLACK} back_cols = {\"red\": Back.RED, \"light", "light blue, cyan, light cyan, black or white prompt_back_col - The colour to", "\"light yellow\": Back.LIGHTYELLOW_EX, \"green\": Back.GREEN, \"light green\": Back.LIGHTGREEN_EX, \"blue\": Back.BLUE, \"light blue\": Back.LIGHTBLUE_EX,", "+ \" \" + Style.RESET_ALL # Force the text to string and add", "compliance with the License. You may obtain a copy of the License at", "to prompt the user for the desired input. prompt_fore_col - The colour of", "is None: use_string = \"Hello World\" printcol(use_string, \"red\", \"black\", \"dim\") printcol(use_string, \"red\", \"black\",", "to use for the input prompt and the user input. if prompt_shade in", "the prompt text onto. Default: black, can be either of: red, light red,", "2019 - 2020 <NAME> Licensed under the Apache License, Version 2.0 (the \"License\");", "the string. Default Normal, options same as printcol end - A list of", "to print to the screen in the required format. fore_col - The colour", "output, # This will be white text using the normal shade on a", "len(back_col) == len(shade) == len(end): # Then print out each item as required", "the foreground colour to use if fore_col in fore_cols: fore_col = fore_cols[fore_col] else:", "You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by", "not of all equal length so print an error message in red. printcol(\"Please", "colour to print the user input onto. Default: black, can be either of:", "applicable law or agreed to in writing, software distributed under the License is", "return_text def testcolour(use_string=None): \"\"\"A function which is used to test the colour printing", "= Fore.WHITE # Check the background colour to use if back_col in back_cols:", "magenta, light Magenta, yellow, light yellow, green, light green, blue, light blue, cyan,", "fore_col=foreground, back_col=background, shade=shade, end=ending) else: # The lists are not of all equal", "Back.LIGHTBLUE_EX, \"cyan\": Back.CYAN, \"light cyan\": Back.LIGHTCYAN_EX, \"white\": Back.WHITE} # Check which shade of", "prompt_fore_col = fore_cols[prompt_fore_col] else: prompt_fore_col = Fore.WHITE if input_fore_col in fore_cols: input_fore_col =", "to the screen in the required format. fore_col - The colour of the", "is used to test the colour printing of the shell by printing a", "equal length so print an error message in red. printcol(\"Please use lists of", "text to print the prompt text in. Default: white, can be either of:", "\"light cyan\": Fore.LIGHTCYAN_EX, \"black\": Fore.BLACK} back_cols = {\"red\": Back.RED, \"light red\": Back.LIGHTRED_EX, \"magenta\":", "printcol(use_string, \"yellow\", \"black\", \"bright\") printcol(use_string, \"green\", \"black\", \"dim\") printcol(use_string, \"green\", \"black\", \"normal\") printcol(use_string,", "printcol(use_string, \"green\", \"black\", \"dim\") printcol(use_string, \"green\", \"black\", \"normal\") printcol(use_string, \"green\", \"black\", \"bright\") printcol(use_string,", "be set to the default. E.G. It is possible to run printcol(\"Some text\")", "foreground colour to use if fore_col in fore_cols: fore_col = fore_cols[fore_col] else: fore_col", "colour shades = {\"dim\": Style.DIM, \"bright\": Style.BRIGHT, \"normal\": Style.NORMAL} # When underline is", "\"black\", \"bright\") printcol(use_string, \"white\", \"black\", \"dim\") printcol(use_string, \"white\", \"black\", \"normal\") printcol(use_string, \"white\", \"black\",", "of the text colour for the string. Default Normal, options same as printcol", "with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0", "for the input prompt. Default: normal, can be either of: dim, normal, bright" ]
[ "code.strip()) decoded += str(values_by_keys(morse_dict, code)) # the space in the code is not", "+ encrypt(text)) elif(choice == \"d\"): text = input(\"What would you like to decrypt?", "space and each word by seven spaces\\n\") choice = input(\"Would you like to", "of the list, add a space to the # decoded message and remove", "by a space word += morse_dict.get(letter) + \" \" else: # each word", "retrieves the morse code from a text file and cleans it up so", "the # decoded message and remove the space from the element, so it", "main(): loadMorseTable() print(\"Welcome to the Morse Code Encoding and Decoder!\") print(\"CAUTION: Please seperate", "= values[i] # The function encrypts the plaintext into ciphertext and returns the", "decoded += \" \" + values_by_keys(morse_dict, code.strip()) decoded += str(values_by_keys(morse_dict, code)) # the", "would you like to encrypt? \") print() print(\"Plaintext: \" + text) print(\"Ciphertext: \"", "you like to encrypt (e) or decrypt (d)? \") print() if(choice == \"e\"):", "list, add a space to the # decoded message and remove the space", "Encoding and Decoder!\") print(\"CAUTION: Please seperate each morse symbol with a space and", "encrypt? \") print() print(\"Plaintext: \" + text) print(\"Ciphertext: \" + encrypt(text)) elif(choice ==", "+ text) print(\"Ciphertext: \" + encrypt(text)) elif(choice == \"d\"): text = input(\"What would", "word += morse_dict.get(letter) + \" \" else: # each word is seperated by", "removes the escape sequences and white spaces codes.split(\"\\n\") codes = codes.split() keys =", "replaces the 7 spaces of between the words that became 7 -1's to", "codes.split(\"\\n\") codes = codes.split() keys = [] values = [] for i in", "input(\"What would you like to encrypt? \") print() print(\"Plaintext: \" + text) print(\"Ciphertext:", "the words that became 7 -1's to become a single space decoded =", "print(\"Welcome to the Morse Code Encoding and Decoder!\") print(\"CAUTION: Please seperate each morse", "the keys and code into the values def loadMorseTable(): codes = \"\" with", "print(\"Ciphertext: \" + encrypt(text)) elif(choice == \"d\"): text = input(\"What would you like", "def main(): loadMorseTable() print(\"Welcome to the Morse Code Encoding and Decoder!\") print(\"CAUTION: Please", "and returns the string def encrypt(plain_text): word = \"\" plain_text = plain_text.upper() for", "or decrypt (d)? \") print() if(choice == \"e\"): text = input(\"What would you", "== \"d\"): text = input(\"What would you like to decrypt? \") print() print(\"Ciphertext:", "code from a text file and cleans it up so the letters #", "codes = \"\" with open(\"morseTable.txt\", \"r\") as code: codes = code.read() # removes", "morse_dict[keys[i]] = values[i] # The function encrypts the plaintext into ciphertext and returns", "# the space in the code is not a morse symbol, so the", "code dictionary morse_dict = {} for i in range(len(keys)): morse_dict[keys[i]] = values[i] #", "of the key # if values not in dictionary, return -1 def values_by_keys(dictionary,", "in cipher_text: # if a space is in the elements of the list,", "each word by seven spaces\\n\") choice = input(\"Would you like to encrypt (e)", "to encrypt (e) or decrypt (d)? \") print() if(choice == \"e\"): text =", "The function decrypts the ciphertext def decrypt(cipher_text): decoded = \"\" # creates a", "letter by the space cipher_text = cipher_text.split(\" \") for code in cipher_text: #", "text = input(\"What would you like to encrypt? \") print() print(\"Plaintext: \" +", "and white spaces codes.split(\"\\n\") codes = codes.split() keys = [] values = []", "+= str(values_by_keys(morse_dict, code)) # the space in the code is not a morse", "\"\"\" Author: <NAME> Date: 08/12/19 Description: Morse code encryper and decryter \"\"\" #", "print(\"Plaintext: \"+ decrypt(text)) else: print(\"There must have been a problem! Please try again\")", "codes.split() keys = [] values = [] for i in range(len(codes)): if(i %", "# if a space is in the elements of the list, add a", "open(\"morseTable.txt\", \"r\") as code: codes = code.read() # removes the escape sequences and", "intro def main(): loadMorseTable() print(\"Welcome to the Morse Code Encoding and Decoder!\") print(\"CAUTION:", "with a space and each word by seven spaces\\n\") choice = input(\"Would you", "decoded += str(values_by_keys(morse_dict, code)) # the space in the code is not a", "\"e\"): text = input(\"What would you like to encrypt? \") print() print(\"Plaintext: \"", "+ text) print(\"Plaintext: \"+ decrypt(text)) else: print(\"There must have been a problem! Please", "= codes.split() keys = [] values = [] for i in range(len(codes)): if(i", "# each letter is separated by a space word += morse_dict.get(letter) + \"", "# The function encrypts the plaintext into ciphertext and returns the string def", "you like to encrypt? \") print() print(\"Plaintext: \" + text) print(\"Ciphertext: \" +", "by inputting a value of the key # if values not in dictionary,", "and decryter \"\"\" # The function retrieves the morse code from a text", "ciphertext and returns the string def encrypt(plain_text): word = \"\" plain_text = plain_text.upper()", "escape sequences and white spaces codes.split(\"\\n\") codes = codes.split() keys = [] values", "is in the elements of the list, add a space to the #", "= input(\"What would you like to decrypt? \") print() print(\"Ciphertext: \" + text)", "the values def loadMorseTable(): codes = \"\" with open(\"morseTable.txt\", \"r\") as code: codes", "into ciphertext and returns the string def encrypt(plain_text): word = \"\" plain_text =", "word += \" \" return word # The function returns the key by", "and remove the space from the element, so it can be identified if", "code is not a morse symbol, so the values_by_keys method returns -1 #", "up so the letters # can be stored in the keys and code", "stored in the keys and code into the values def loadMorseTable(): codes =", "for i in range(len(keys)): morse_dict[keys[i]] = values[i] # The function encrypts the plaintext", "codes = code.read() # removes the escape sequences and white spaces codes.split(\"\\n\") codes", "in the elements of the list, add a space to the # decoded", "loadMorseTable() print(\"Welcome to the Morse Code Encoding and Decoder!\") print(\"CAUTION: Please seperate each", "values def loadMorseTable(): codes = \"\" with open(\"morseTable.txt\", \"r\") as code: codes =", "\") for code in cipher_text: # if a space is in the elements", "words that became 7 -1's to become a single space decoded = decoded.replace(\"-1-1-1-1-1-1-1\",\"", "in dictionary.items(): if(value == values): return keys return -1 # The function decrypts", "in the keys and code into the values def loadMorseTable(): codes = \"\"", "\") print() if(choice == \"e\"): text = input(\"What would you like to encrypt?", "print(\"Ciphertext: \" + text) print(\"Plaintext: \"+ decrypt(text)) else: print(\"There must have been a", "== \"e\"): text = input(\"What would you like to encrypt? \") print() print(\"Plaintext:", "= [] values = [] for i in range(len(codes)): if(i % 2 ==", "codes = codes.split() keys = [] values = [] for i in range(len(codes)):", "if(i % 2 == 0): keys.append(codes[i]) else: values.append(codes[i]) # creates the morse code", "became 7 -1's to become a single space decoded = decoded.replace(\"-1-1-1-1-1-1-1\",\" \") return", "values not in dictionary, return -1 def values_by_keys(dictionary, value): for keys, values in", "spaces codes.split(\"\\n\") codes = codes.split() keys = [] values = [] for i", "each morse symbol with a space and each word by seven spaces\\n\") choice", "seperate each morse symbol with a space and each word by seven spaces\\n\")", "a value of the key # if values not in dictionary, return -1", "a morse symbol, so the values_by_keys method returns -1 # therefore replaces the", "word by seven spaces\\n\") choice = input(\"Would you like to encrypt (e) or", "\") print() print(\"Ciphertext: \" + text) print(\"Plaintext: \"+ decrypt(text)) else: print(\"There must have", "separated by a space word += morse_dict.get(letter) + \" \" else: # each", "the space cipher_text = cipher_text.split(\" \") for code in cipher_text: # if a", "The function encrypts the plaintext into ciphertext and returns the string def encrypt(plain_text):", "The function returns the key by inputting a value of the key #", "space decoded = decoded.replace(\"-1-1-1-1-1-1-1\",\" \") return decoded # intro def main(): loadMorseTable() print(\"Welcome", "# removes the escape sequences and white spaces codes.split(\"\\n\") codes = codes.split() keys", "space word += morse_dict.get(letter) + \" \" else: # each word is seperated", "\"+ decrypt(text)) else: print(\"There must have been a problem! Please try again\") if", "from a text file and cleans it up so the letters # can", "a list by seperating each letter by the space cipher_text = cipher_text.split(\" \")", "for letter in plain_text: if(letter in morse_dict): # each letter is separated by", "a text file and cleans it up so the letters # can be", "encryper and decryter \"\"\" # The function retrieves the morse code from a", "= {} for i in range(len(keys)): morse_dict[keys[i]] = values[i] # The function encrypts", "to the Morse Code Encoding and Decoder!\") print(\"CAUTION: Please seperate each morse symbol", "not a morse symbol, so the values_by_keys method returns -1 # therefore replaces", "encrypt (e) or decrypt (d)? \") print() if(choice == \"e\"): text = input(\"What", "Decoder!\") print(\"CAUTION: Please seperate each morse symbol with a space and each word", "like to encrypt? \") print() print(\"Plaintext: \" + text) print(\"Ciphertext: \" + encrypt(text))", "values_by_keys method returns -1 # therefore replaces the 7 spaces of between the", "remove the space from the element, so it can be identified if \"", "\"d\"): text = input(\"What would you like to decrypt? \") print() print(\"Ciphertext: \"", "returns the string def encrypt(plain_text): word = \"\" plain_text = plain_text.upper() for letter", "for i in range(len(codes)): if(i % 2 == 0): keys.append(codes[i]) else: values.append(codes[i]) #", "like to encrypt (e) or decrypt (d)? \") print() if(choice == \"e\"): text", "seven spaces\\n\") choice = input(\"Would you like to encrypt (e) or decrypt (d)?", "a space and each word by seven spaces\\n\") choice = input(\"Would you like", "returns -1 # therefore replaces the 7 spaces of between the words that", "\" \" in code: decoded += \" \" + values_by_keys(morse_dict, code.strip()) decoded +=", "decoded # intro def main(): loadMorseTable() print(\"Welcome to the Morse Code Encoding and", "be identified if \" \" in code: decoded += \" \" + values_by_keys(morse_dict,", "decoded = decoded.replace(\"-1-1-1-1-1-1-1\",\" \") return decoded # intro def main(): loadMorseTable() print(\"Welcome to", "as code: codes = code.read() # removes the escape sequences and white spaces", "like to decrypt? \") print() print(\"Ciphertext: \" + text) print(\"Plaintext: \"+ decrypt(text)) else:", "decrypt (d)? \") print() if(choice == \"e\"): text = input(\"What would you like", "in morse_dict): # each letter is separated by a space word += morse_dict.get(letter)", "the ciphertext def decrypt(cipher_text): decoded = \"\" # creates a list by seperating", "the letters # can be stored in the keys and code into the", "Morse Code Encoding and Decoder!\") print(\"CAUTION: Please seperate each morse symbol with a", "# therefore replaces the 7 spaces of between the words that became 7", "the key by inputting a value of the key # if values not", "code)) # the space in the code is not a morse symbol, so", "input(\"What would you like to decrypt? \") print() print(\"Ciphertext: \" + text) print(\"Plaintext:", "decoded message and remove the space from the element, so it can be", "i in range(len(keys)): morse_dict[keys[i]] = values[i] # The function encrypts the plaintext into", "the code is not a morse symbol, so the values_by_keys method returns -1", "the list, add a space to the # decoded message and remove the", "method returns -1 # therefore replaces the 7 spaces of between the words", "i in range(len(codes)): if(i % 2 == 0): keys.append(codes[i]) else: values.append(codes[i]) # creates", "7 spaces word += \" \" return word # The function returns the", "(d)? \") print() if(choice == \"e\"): text = input(\"What would you like to", "values = [] for i in range(len(codes)): if(i % 2 == 0): keys.append(codes[i])", "plain_text = plain_text.upper() for letter in plain_text: if(letter in morse_dict): # each letter", "identified if \" \" in code: decoded += \" \" + values_by_keys(morse_dict, code.strip())", "loadMorseTable(): codes = \"\" with open(\"morseTable.txt\", \"r\") as code: codes = code.read() #", "# The function retrieves the morse code from a text file and cleans", "word # The function returns the key by inputting a value of the", "return -1 def values_by_keys(dictionary, value): for keys, values in dictionary.items(): if(value == values):", "choice = input(\"Would you like to encrypt (e) or decrypt (d)? \") print()", "= \"\" with open(\"morseTable.txt\", \"r\") as code: codes = code.read() # removes the", "space from the element, so it can be identified if \" \" in", "in plain_text: if(letter in morse_dict): # each letter is separated by a space", "be stored in the keys and code into the values def loadMorseTable(): codes", "cipher_text.split(\" \") for code in cipher_text: # if a space is in the", "spaces word += \" \" return word # The function returns the key", "decrypt(cipher_text): decoded = \"\" # creates a list by seperating each letter by", "\" else: # each word is seperated by 7 spaces word += \"", "between the words that became 7 -1's to become a single space decoded", "can be stored in the keys and code into the values def loadMorseTable():", "-1's to become a single space decoded = decoded.replace(\"-1-1-1-1-1-1-1\",\" \") return decoded #", "\"\" with open(\"morseTable.txt\", \"r\") as code: codes = code.read() # removes the escape", "for code in cipher_text: # if a space is in the elements of", "decoded.replace(\"-1-1-1-1-1-1-1\",\" \") return decoded # intro def main(): loadMorseTable() print(\"Welcome to the Morse", "def loadMorseTable(): codes = \"\" with open(\"morseTable.txt\", \"r\") as code: codes = code.read()", "plain_text: if(letter in morse_dict): # each letter is separated by a space word", "each letter is separated by a space word += morse_dict.get(letter) + \" \"", "to encrypt? \") print() print(\"Plaintext: \" + text) print(\"Ciphertext: \" + encrypt(text)) elif(choice", "each word is seperated by 7 spaces word += \" \" return word", "== values): return keys return -1 # The function decrypts the ciphertext def", "(e) or decrypt (d)? \") print() if(choice == \"e\"): text = input(\"What would", "if(letter in morse_dict): # each letter is separated by a space word +=", "# each word is seperated by 7 spaces word += \" \" return", "that became 7 -1's to become a single space decoded = decoded.replace(\"-1-1-1-1-1-1-1\",\" \")", "add a space to the # decoded message and remove the space from", "def encrypt(plain_text): word = \"\" plain_text = plain_text.upper() for letter in plain_text: if(letter", "\" + text) print(\"Ciphertext: \" + encrypt(text)) elif(choice == \"d\"): text = input(\"What", "the morse code dictionary morse_dict = {} for i in range(len(keys)): morse_dict[keys[i]] =", "symbol, so the values_by_keys method returns -1 # therefore replaces the 7 spaces", "= input(\"Would you like to encrypt (e) or decrypt (d)? \") print() if(choice", "word is seperated by 7 spaces word += \" \" return word #", "def values_by_keys(dictionary, value): for keys, values in dictionary.items(): if(value == values): return keys", "print() if(choice == \"e\"): text = input(\"What would you like to encrypt? \")", "decoded = \"\" # creates a list by seperating each letter by the", "a single space decoded = decoded.replace(\"-1-1-1-1-1-1-1\",\" \") return decoded # intro def main():", "cipher_text = cipher_text.split(\" \") for code in cipher_text: # if a space is", "to decrypt? \") print() print(\"Ciphertext: \" + text) print(\"Plaintext: \"+ decrypt(text)) else: print(\"There", "it can be identified if \" \" in code: decoded += \" \"", "the element, so it can be identified if \" \" in code: decoded", "+= morse_dict.get(letter) + \" \" else: # each word is seperated by 7", "and code into the values def loadMorseTable(): codes = \"\" with open(\"morseTable.txt\", \"r\")", "= \"\" # creates a list by seperating each letter by the space", "if(choice == \"e\"): text = input(\"What would you like to encrypt? \") print()", "7 -1's to become a single space decoded = decoded.replace(\"-1-1-1-1-1-1-1\",\" \") return decoded", "0): keys.append(codes[i]) else: values.append(codes[i]) # creates the morse code dictionary morse_dict = {}", "the space in the code is not a morse symbol, so the values_by_keys", "by seven spaces\\n\") choice = input(\"Would you like to encrypt (e) or decrypt", "# The function returns the key by inputting a value of the key", "# intro def main(): loadMorseTable() print(\"Welcome to the Morse Code Encoding and Decoder!\")", "and Decoder!\") print(\"CAUTION: Please seperate each morse symbol with a space and each", "dictionary.items(): if(value == values): return keys return -1 # The function decrypts the", "in range(len(keys)): morse_dict[keys[i]] = values[i] # The function encrypts the plaintext into ciphertext", "the string def encrypt(plain_text): word = \"\" plain_text = plain_text.upper() for letter in", "-1 # The function decrypts the ciphertext def decrypt(cipher_text): decoded = \"\" #", "word = \"\" plain_text = plain_text.upper() for letter in plain_text: if(letter in morse_dict):", "can be identified if \" \" in code: decoded += \" \" +", "return decoded # intro def main(): loadMorseTable() print(\"Welcome to the Morse Code Encoding", "else: print(\"There must have been a problem! Please try again\") if __name__ ==", "by seperating each letter by the space cipher_text = cipher_text.split(\" \") for code", "the 7 spaces of between the words that became 7 -1's to become", "plain_text.upper() for letter in plain_text: if(letter in morse_dict): # each letter is separated", "keys return -1 # The function decrypts the ciphertext def decrypt(cipher_text): decoded =", "Date: 08/12/19 Description: Morse code encryper and decryter \"\"\" # The function retrieves", "to become a single space decoded = decoded.replace(\"-1-1-1-1-1-1-1\",\" \") return decoded # intro", "is seperated by 7 spaces word += \" \" return word # The", "print() print(\"Plaintext: \" + text) print(\"Ciphertext: \" + encrypt(text)) elif(choice == \"d\"): text", "sequences and white spaces codes.split(\"\\n\") codes = codes.split() keys = [] values =", "\" + values_by_keys(morse_dict, code.strip()) decoded += str(values_by_keys(morse_dict, code)) # the space in the", "you like to decrypt? \") print() print(\"Ciphertext: \" + text) print(\"Plaintext: \"+ decrypt(text))", "print(\"Plaintext: \" + text) print(\"Ciphertext: \" + encrypt(text)) elif(choice == \"d\"): text =", "values[i] # The function encrypts the plaintext into ciphertext and returns the string", "# decoded message and remove the space from the element, so it can", "returns the key by inputting a value of the key # if values", "the elements of the list, add a space to the # decoded message", "if \" \" in code: decoded += \" \" + values_by_keys(morse_dict, code.strip()) decoded", "decrypt(text)) else: print(\"There must have been a problem! Please try again\") if __name__", "str(values_by_keys(morse_dict, code)) # the space in the code is not a morse symbol,", "with open(\"morseTable.txt\", \"r\") as code: codes = code.read() # removes the escape sequences", "text) print(\"Plaintext: \"+ decrypt(text)) else: print(\"There must have been a problem! Please try", "\" in code: decoded += \" \" + values_by_keys(morse_dict, code.strip()) decoded += str(values_by_keys(morse_dict,", "if values not in dictionary, return -1 def values_by_keys(dictionary, value): for keys, values", "# creates a list by seperating each letter by the space cipher_text =", "into the values def loadMorseTable(): codes = \"\" with open(\"morseTable.txt\", \"r\") as code:", "<NAME> Date: 08/12/19 Description: Morse code encryper and decryter \"\"\" # The function", "keys, values in dictionary.items(): if(value == values): return keys return -1 # The", "decryter \"\"\" # The function retrieves the morse code from a text file", "return word # The function returns the key by inputting a value of", "must have been a problem! Please try again\") if __name__ == \"__main__\": main()", "\") print() print(\"Plaintext: \" + text) print(\"Ciphertext: \" + encrypt(text)) elif(choice == \"d\"):", "[] for i in range(len(codes)): if(i % 2 == 0): keys.append(codes[i]) else: values.append(codes[i])", "# can be stored in the keys and code into the values def", "\") return decoded # intro def main(): loadMorseTable() print(\"Welcome to the Morse Code", "7 spaces of between the words that became 7 -1's to become a", "\" \" + values_by_keys(morse_dict, code.strip()) decoded += str(values_by_keys(morse_dict, code)) # the space in", "# if values not in dictionary, return -1 def values_by_keys(dictionary, value): for keys,", "so it can be identified if \" \" in code: decoded += \"", "Description: Morse code encryper and decryter \"\"\" # The function retrieves the morse", "function retrieves the morse code from a text file and cleans it up", "morse symbol with a space and each word by seven spaces\\n\") choice =", "[] values = [] for i in range(len(codes)): if(i % 2 == 0):", "input(\"Would you like to encrypt (e) or decrypt (d)? \") print() if(choice ==", "function returns the key by inputting a value of the key # if", "values): return keys return -1 # The function decrypts the ciphertext def decrypt(cipher_text):", "elif(choice == \"d\"): text = input(\"What would you like to decrypt? \") print()", "-1 def values_by_keys(dictionary, value): for keys, values in dictionary.items(): if(value == values): return", "print(\"CAUTION: Please seperate each morse symbol with a space and each word by", "= decoded.replace(\"-1-1-1-1-1-1-1\",\" \") return decoded # intro def main(): loadMorseTable() print(\"Welcome to the", "code encryper and decryter \"\"\" # The function retrieves the morse code from", "message and remove the space from the element, so it can be identified", "encrypts the plaintext into ciphertext and returns the string def encrypt(plain_text): word =", "Code Encoding and Decoder!\") print(\"CAUTION: Please seperate each morse symbol with a space", "= input(\"What would you like to encrypt? \") print() print(\"Plaintext: \" + text)", "so the letters # can be stored in the keys and code into", "elements of the list, add a space to the # decoded message and", "\" \" return word # The function returns the key by inputting a", "in code: decoded += \" \" + values_by_keys(morse_dict, code.strip()) decoded += str(values_by_keys(morse_dict, code))", "\" return word # The function returns the key by inputting a value", "value): for keys, values in dictionary.items(): if(value == values): return keys return -1", "seperating each letter by the space cipher_text = cipher_text.split(\" \") for code in", "in the code is not a morse symbol, so the values_by_keys method returns", "the values_by_keys method returns -1 # therefore replaces the 7 spaces of between", "+= \" \" + values_by_keys(morse_dict, code.strip()) decoded += str(values_by_keys(morse_dict, code)) # the space", "Please seperate each morse symbol with a space and each word by seven", "so the values_by_keys method returns -1 # therefore replaces the 7 spaces of", "\"\" plain_text = plain_text.upper() for letter in plain_text: if(letter in morse_dict): # each", "if(value == values): return keys return -1 # The function decrypts the ciphertext", "space is in the elements of the list, add a space to the", "keys and code into the values def loadMorseTable(): codes = \"\" with open(\"morseTable.txt\",", "function encrypts the plaintext into ciphertext and returns the string def encrypt(plain_text): word", "the key # if values not in dictionary, return -1 def values_by_keys(dictionary, value):", "\"\" # creates a list by seperating each letter by the space cipher_text", "in dictionary, return -1 def values_by_keys(dictionary, value): for keys, values in dictionary.items(): if(value", "function decrypts the ciphertext def decrypt(cipher_text): decoded = \"\" # creates a list", "become a single space decoded = decoded.replace(\"-1-1-1-1-1-1-1\",\" \") return decoded # intro def", "cipher_text: # if a space is in the elements of the list, add", "+= \" \" return word # The function returns the key by inputting", "cleans it up so the letters # can be stored in the keys", "dictionary morse_dict = {} for i in range(len(keys)): morse_dict[keys[i]] = values[i] # The", "each letter by the space cipher_text = cipher_text.split(\" \") for code in cipher_text:", "else: values.append(codes[i]) # creates the morse code dictionary morse_dict = {} for i", "text = input(\"What would you like to decrypt? \") print() print(\"Ciphertext: \" +", "values_by_keys(dictionary, value): for keys, values in dictionary.items(): if(value == values): return keys return", "a space word += morse_dict.get(letter) + \" \" else: # each word is", "08/12/19 Description: Morse code encryper and decryter \"\"\" # The function retrieves the", "return -1 # The function decrypts the ciphertext def decrypt(cipher_text): decoded = \"\"", "by 7 spaces word += \" \" return word # The function returns", "{} for i in range(len(keys)): morse_dict[keys[i]] = values[i] # The function encrypts the", "values in dictionary.items(): if(value == values): return keys return -1 # The function", "morse_dict): # each letter is separated by a space word += morse_dict.get(letter) +", "encrypt(plain_text): word = \"\" plain_text = plain_text.upper() for letter in plain_text: if(letter in", "the morse code from a text file and cleans it up so the", "string def encrypt(plain_text): word = \"\" plain_text = plain_text.upper() for letter in plain_text:", "The function retrieves the morse code from a text file and cleans it", "letter in plain_text: if(letter in morse_dict): # each letter is separated by a", "single space decoded = decoded.replace(\"-1-1-1-1-1-1-1\",\" \") return decoded # intro def main(): loadMorseTable()", "and each word by seven spaces\\n\") choice = input(\"Would you like to encrypt", "\" \" else: # each word is seperated by 7 spaces word +=", "code.read() # removes the escape sequences and white spaces codes.split(\"\\n\") codes = codes.split()", "range(len(codes)): if(i % 2 == 0): keys.append(codes[i]) else: values.append(codes[i]) # creates the morse", "morse code dictionary morse_dict = {} for i in range(len(keys)): morse_dict[keys[i]] = values[i]", "morse_dict = {} for i in range(len(keys)): morse_dict[keys[i]] = values[i] # The function", "letter is separated by a space word += morse_dict.get(letter) + \" \" else:", "for keys, values in dictionary.items(): if(value == values): return keys return -1 #", "morse symbol, so the values_by_keys method returns -1 # therefore replaces the 7", "it up so the letters # can be stored in the keys and", "is not a morse symbol, so the values_by_keys method returns -1 # therefore", "inputting a value of the key # if values not in dictionary, return", "value of the key # if values not in dictionary, return -1 def", "decrypt? \") print() print(\"Ciphertext: \" + text) print(\"Plaintext: \"+ decrypt(text)) else: print(\"There must", "from the element, so it can be identified if \" \" in code:", "code in cipher_text: # if a space is in the elements of the", "= [] for i in range(len(codes)): if(i % 2 == 0): keys.append(codes[i]) else:", "else: # each word is seperated by 7 spaces word += \" \"", "to the # decoded message and remove the space from the element, so", "spaces\\n\") choice = input(\"Would you like to encrypt (e) or decrypt (d)? \")", "symbol with a space and each word by seven spaces\\n\") choice = input(\"Would", "letters # can be stored in the keys and code into the values", "Morse code encryper and decryter \"\"\" # The function retrieves the morse code", "% 2 == 0): keys.append(codes[i]) else: values.append(codes[i]) # creates the morse code dictionary", "space cipher_text = cipher_text.split(\" \") for code in cipher_text: # if a space", "\" + text) print(\"Plaintext: \"+ decrypt(text)) else: print(\"There must have been a problem!", "file and cleans it up so the letters # can be stored in", "Author: <NAME> Date: 08/12/19 Description: Morse code encryper and decryter \"\"\" # The", "a space to the # decoded message and remove the space from the", "would you like to decrypt? \") print() print(\"Ciphertext: \" + text) print(\"Plaintext: \"+", "print() print(\"Ciphertext: \" + text) print(\"Plaintext: \"+ decrypt(text)) else: print(\"There must have been", "-1 # therefore replaces the 7 spaces of between the words that became", "= cipher_text.split(\" \") for code in cipher_text: # if a space is in", "keys.append(codes[i]) else: values.append(codes[i]) # creates the morse code dictionary morse_dict = {} for", "# creates the morse code dictionary morse_dict = {} for i in range(len(keys)):", "space in the code is not a morse symbol, so the values_by_keys method", "plaintext into ciphertext and returns the string def encrypt(plain_text): word = \"\" plain_text", "= plain_text.upper() for letter in plain_text: if(letter in morse_dict): # each letter is", "list by seperating each letter by the space cipher_text = cipher_text.split(\" \") for", "spaces of between the words that became 7 -1's to become a single", "by the space cipher_text = cipher_text.split(\" \") for code in cipher_text: # if", "if a space is in the elements of the list, add a space", "keys = [] values = [] for i in range(len(codes)): if(i % 2", "== 0): keys.append(codes[i]) else: values.append(codes[i]) # creates the morse code dictionary morse_dict =", "= \"\" plain_text = plain_text.upper() for letter in plain_text: if(letter in morse_dict): #", "is separated by a space word += morse_dict.get(letter) + \" \" else: #", "text file and cleans it up so the letters # can be stored", "key by inputting a value of the key # if values not in", "return keys return -1 # The function decrypts the ciphertext def decrypt(cipher_text): decoded", "the space from the element, so it can be identified if \" \"", "morse code from a text file and cleans it up so the letters", "and cleans it up so the letters # can be stored in the", "+ values_by_keys(morse_dict, code.strip()) decoded += str(values_by_keys(morse_dict, code)) # the space in the code", "morse_dict.get(letter) + \" \" else: # each word is seperated by 7 spaces", "def decrypt(cipher_text): decoded = \"\" # creates a list by seperating each letter", "the escape sequences and white spaces codes.split(\"\\n\") codes = codes.split() keys = []", "= code.read() # removes the escape sequences and white spaces codes.split(\"\\n\") codes =", "creates the morse code dictionary morse_dict = {} for i in range(len(keys)): morse_dict[keys[i]]", "text) print(\"Ciphertext: \" + encrypt(text)) elif(choice == \"d\"): text = input(\"What would you", "+ \" \" else: # each word is seperated by 7 spaces word", "print(\"There must have been a problem! Please try again\") if __name__ == \"__main__\":", "space to the # decoded message and remove the space from the element,", "# The function decrypts the ciphertext def decrypt(cipher_text): decoded = \"\" # creates", "2 == 0): keys.append(codes[i]) else: values.append(codes[i]) # creates the morse code dictionary morse_dict", "creates a list by seperating each letter by the space cipher_text = cipher_text.split(\"", "code: decoded += \" \" + values_by_keys(morse_dict, code.strip()) decoded += str(values_by_keys(morse_dict, code)) #", "the plaintext into ciphertext and returns the string def encrypt(plain_text): word = \"\"", "<gh_stars>0 \"\"\" Author: <NAME> Date: 08/12/19 Description: Morse code encryper and decryter \"\"\"", "range(len(keys)): morse_dict[keys[i]] = values[i] # The function encrypts the plaintext into ciphertext and", "values_by_keys(morse_dict, code.strip()) decoded += str(values_by_keys(morse_dict, code)) # the space in the code is", "ciphertext def decrypt(cipher_text): decoded = \"\" # creates a list by seperating each", "code: codes = code.read() # removes the escape sequences and white spaces codes.split(\"\\n\")", "white spaces codes.split(\"\\n\") codes = codes.split() keys = [] values = [] for", "\"r\") as code: codes = code.read() # removes the escape sequences and white", "element, so it can be identified if \" \" in code: decoded +=", "\"\"\" # The function retrieves the morse code from a text file and", "therefore replaces the 7 spaces of between the words that became 7 -1's", "\" + encrypt(text)) elif(choice == \"d\"): text = input(\"What would you like to", "the Morse Code Encoding and Decoder!\") print(\"CAUTION: Please seperate each morse symbol with", "decrypts the ciphertext def decrypt(cipher_text): decoded = \"\" # creates a list by", "in range(len(codes)): if(i % 2 == 0): keys.append(codes[i]) else: values.append(codes[i]) # creates the", "values.append(codes[i]) # creates the morse code dictionary morse_dict = {} for i in", "key # if values not in dictionary, return -1 def values_by_keys(dictionary, value): for", "of between the words that became 7 -1's to become a single space", "dictionary, return -1 def values_by_keys(dictionary, value): for keys, values in dictionary.items(): if(value ==", "a space is in the elements of the list, add a space to", "encrypt(text)) elif(choice == \"d\"): text = input(\"What would you like to decrypt? \")", "seperated by 7 spaces word += \" \" return word # The function", "code into the values def loadMorseTable(): codes = \"\" with open(\"morseTable.txt\", \"r\") as", "not in dictionary, return -1 def values_by_keys(dictionary, value): for keys, values in dictionary.items():" ]
[ "2 not in self.parcel_igm.grid.Z: xe = min(xe, 1.0) xi = xe / (1.", "import ParameterFile, ProgressBar from ..util.ReadData import _sort_history, _load_inits from .MetaGalacticBackground import MetaGalacticBackground from", "= inits_all[key][i] continue # Electron fraction snapshot['e'] = inits_all['xe'][i] # Hydrogen neutral fraction", "# Don't mess with the CGM (much) if self.pf['include_cgm']: tmp = self.parcel_cgm.grid.data self.all_data_cgm", "ProgressBar(self.tf, use=self.pf['progress_bar']) pb.start() # Evolve in time for t, z, data_igm, data_cgm, RC_igm,", "\"igm\" and \"cgm\", respectively. To perform a single-zone calculation, simply set ``include_cgm=False`` or", "= self.pf['initial_redshift'] dt = self.pf['time_units'] * self.pf['initial_timestep'] zf = self.pf['final_redshift'] # Read initial", "pf(self): if not hasattr(self, '_pf'): inits = self.inits self._pf = ParameterFile(**self.kwargs) return self._pf", "z, data_igm, data_cgm, RC_igm, RC_cgm in self.step(): pb.update(t) # Save data self.all_z.append(z) self.all_t.append(t)", "If we've made it here, we need to trick our generators a bit", "= parcel_igm.step() # Set initial values for rate coefficients parcel_igm.update_rate_coefficients(parcel_igm.grid.data, **self.rates_no_RT(parcel_igm.grid)) self._parcels.append(parcel_igm) else:", "= self.default_parcel.grid.cosm.LookbackTime(zf, z) self.pf['stop_time'] = self._tf / self.pf['time_units'] return self._tf def _initialize_zones(self): \"\"\"", "'Tk': Tk_inits, 'xe': xe_inits} # Stop pre-pending once we hit the first light", "\"\"\" See if we need to re-do the previous timestep. This mean: (1)", "Now, update IGM parcel t1, dt1, data_igm = self.gen_igm.next() # Pass rate coefficients", "Tuple containing the current time, redshift, and dictionaries for the IGM and CGM", "between last two steps. # Converged to desired tolerance? #self. def _stop_criteria_met(self): pass", "optical depth dynamically, we may # need to \"re-do\" this step to ensure", "1 if self.parcel_igm.pf['include_He']: snapshot['he_1'] = 1. - xi snapshot['he_2'] = xi snapshot['he_3'] =", "= 1e50 RC_igm = data_igm = None data_igm = {'h_1': 1.0} if self.pf['include_cgm']:", "the CGM parcel self.parcel_cgm.update_rate_coefficients(data_cgm, **RC_cgm) # Now, update CGM parcel t2, dt2, data_cgm", "data_igm_pre.copy() dt1 = 1e50 done = True if not done: RC_igm = self.field.update_rate_coefficients(z,", "{'igm_initial_temperature': Ti, 'initial_ionization': [1. - xi, xi, 1.-xi-1e-10, xi, 1e-10]} self.kwargs.update(new) #else: #", "this step to ensure convergence. redo = self.subcycle() if not redo: # Changing", "# Flip to descending order (in redshift) z_inits = self.inits['z'][-1::-1] Tk_inits = self.inits['Tk'][-1::-1]", "RC_igm, RC_cgm in self.step(): pb.update(t) # Save data self.all_z.append(z) self.all_t.append(t) if self.pf['include_cgm']: self.all_data_cgm.append(data_cgm.copy())", "= {} if self.pf['include_cgm']: self.history_cgm = \\ _sort_history(self.all_data_cgm, prefix='cgm_', squeeze=True) self.history.update(self.history_cgm) # Save", "MetaGalacticBackground(grid=self.parcel_cgm.grid, **self.kwargs) return self._field @property def pops(self): return self.field.pops @property def grid(self): return", "time based on final redshift. z = self.pf['initial_redshift'] zf = self.pf['final_redshift'] self._parcels =", "redshift) z_inits = self.inits['z'][-1::-1] Tk_inits = self.inits['Tk'][-1::-1] xe_inits = self.inits['xe'][-1::-1] inits_all = {'z':", "= self.parcels[0] return self._parcel_cgm def rates_no_RT(self, grid): _rates_no_RT = \\ {'k_ion': np.zeros((grid.dims, grid.N_absorbers)),", "self.pf['save_rate_coefficients']: if self.pf['include_cgm']: self.all_RCs_cgm.append(RC_cgm.copy()) if self.pf['include_igm']: self.all_RCs_igm.append(RC_igm.copy()) pb.finish() # Sort everything by time", "kwargs['load_ics'] = True self.kwargs = kwargs @property def pf(self): if not hasattr(self, '_pf'):", "patch, dubbed \"igm\" and \"cgm\", respectively. To perform a single-zone calculation, simply set", "{'cosmological_ics': False, # 'igm_initial_temperature': Ti, # 'igm_initial_ionization': [1. - xi, xi]} # #self.kwargs.update(new_pars)", "\"\"\" Initialize (up to two) GasParcels. \"\"\" # Reset stop time based on", "self._parcel_cgm = self.parcels[1] else: self._parcel_cgm = self.parcels[0] return self._parcel_cgm def rates_no_RT(self, grid): _rates_no_RT", "computing the IGM optical depth dynamically, we may # need to \"re-do\" this", "mess with the CGM (much) if self.pf['include_cgm']: tmp = self.parcel_cgm.grid.data self.all_data_cgm = [tmp.copy()", "self.history.update(self.rates_igm) if self.pf['include_cgm']: self.rates_cgm = \\ _sort_history(self.all_RCs_cgm, prefix='cgm_', squeeze=True) self.history.update(self.rates_cgm) self.history['t'] = np.array(self.all_t)", "simulation from start to finish. Returns ------- Nothing: sets `history` attribute. \"\"\" self._insert_inits()", "self.all_t.append(t) if self.pf['include_cgm']: self.all_data_cgm.append(data_cgm.copy()) if self.pf['include_igm']: self.all_data_igm.append(data_igm.copy()) if self.pf['save_rate_coefficients']: if self.pf['include_cgm']: self.all_RCs_cgm.append(RC_cgm.copy()) if", "\"\"\" t = 0.0 z = self.pf['initial_redshift'] dt = self.pf['time_units'] * self.pf['initial_timestep'] zf", "else: self.kw_cgm = kw.copy() parcel_cgm = GasParcel(**self.kw_cgm) parcel_cgm.grid.set_recombination_rate(True) parcel_cgm._set_chemistry() self.gen_cgm = parcel_cgm.step() parcel_cgm.chem.chemnet.monotonic_EoR", "parcel_cgm.update_rate_coefficients(parcel_cgm.grid.data, **self.rates_no_RT(parcel_cgm.grid)) self._parcels.append(parcel_cgm) self._parcels[-1].pf['stop_time'] = self.tf / self.pf['time_units'] @property def zones(self): if not", "the IGM parcel self.parcel_igm.update_rate_coefficients(data_igm, **RC_igm) else: dt1 = 1e50 RC_igm = data_igm =", "xi snapshot['h_2'] = xi # Add helium, assuming xHeII = xHII, and xHeIII", "False if self.pf['stop_cgm_h_2'] is not None: if data_cgm['h_2'] > self.pf['stop_cgm_h_2']: data_cgm = data_cgm_pre.copy()", ".MetaGalacticBackground import MetaGalacticBackground from ..util.SetDefaultParameterValues import MultiPhaseParameters _mpm_defs = MultiPhaseParameters() class MultiPhaseMedium(object): def", "self.kwargs[key] else: kw[grid_key] = _mpm_defs[key] if zone == 'igm': self.kw_igm = kw.copy() parcel_igm", "None: if data_cgm['h_2'] > self.pf['stop_cgm_h_2']: data_cgm = data_cgm_pre.copy() dt2 = 1e50 done =", "based on final redshift. z = self.pf['initial_redshift'] zf = self.pf['final_redshift'] self._parcels = []", "continue # If we've made it here, we need to trick our generators", "is not None: if data_igm['h_2'] > self.pf['stop_igm_h_2']: data_igm = data_igm_pre.copy() dt1 = 1e50", "import GasParcel from ..util import ParameterFile, ProgressBar from ..util.ReadData import _sort_history, _load_inits from", "for a two-phase intergalactic medium. Returns ------- Tuple containing the current time, redshift,", "1e50 done = True if not done: RC_igm = self.field.update_rate_coefficients(z, zone='igm', return_rc=True, igm_h_1=data_igm['h_1'])", "# need to \"re-do\" this step to ensure convergence. redo = self.subcycle() if", "= self.gen_cgm.next() else: dt2 = 1e50 RC_cgm = data_cgm = None # Must", "squeeze=True) self.history.update(self.rates_cgm) self.history['t'] = np.array(self.all_t) self.history['z'] = np.array(self.all_z) def step(self): \"\"\" Generator for", "**kwargs): \"\"\" Initialize a MultiPhaseMedium object. By default, this is a two-zone model,", "= \\ {'k_ion': np.zeros((grid.dims, grid.N_absorbers)), 'k_heat': np.zeros((grid.dims, grid.N_absorbers)), 'k_ion2': np.zeros((grid.dims, grid.N_absorbers, grid.N_absorbers)), }", "over defaults, pull out the ones for this zone for key in _mpm_defs:", "If we're computing the IGM optical depth dynamically, we may # need to", "% zone]: continue kw = self.pf.copy() # Loop over defaults, pull out the", "# Sort everything by time if self.pf['include_igm']: self.history_igm = \\ _sort_history(self.all_data_igm, prefix='igm_', squeeze=True)", "in range(len(self.all_z))] for i, cgm_data in enumerate(self.all_data_cgm): self.all_data_cgm[i]['rho'] = \\ self.parcel_cgm.grid.cosm.MeanBaryonDensity(self.all_z[i]) self.all_data_cgm[i]['n'] =", "if zone == 'igm': self.kw_igm = kw.copy() parcel_igm = GasParcel(**self.kw_igm) self.gen_igm = parcel_igm.step()", "self.all_data_cgm = [tmp.copy() for i in range(len(self.all_z))] for i, cgm_data in enumerate(self.all_data_cgm): self.all_data_cgm[i]['rho']", "to the data storage lists. \"\"\" if not self.pf['load_ics']: self.all_t, self.all_z, self.all_data_igm, self.all_data_cgm", "dt yield t, z, data_igm, data_cgm, RC_igm, RC_cgm continue # If we've made", "# are distinct populations tau = [] for i in range(self.field.Npops): pass self.field.tau", "for key in self.parcel_igm.grid.data.keys(): if key in self.inits.keys(): snapshot[key] = inits_all[key][i] continue #", "**self.rates_no_RT(parcel_cgm.grid)) self._parcels.append(parcel_cgm) self._parcels[-1].pf['stop_time'] = self.tf / self.pf['time_units'] @property def zones(self): if not hasattr(self,", "the ones for this zone for key in _mpm_defs: if key[0:4] != '%s_'", "self.field.grid @property def parcels(self): if not hasattr(self, '_parcels'): self._initialize_zones() return self._parcels @property def", "self.inits['z'][-1::-1] Tk_inits = self.inits['Tk'][-1::-1] xe_inits = self.inits['xe'][-1::-1] inits_all = {'z': z_inits, 'Tk': Tk_inits,", "parcel_cgm.step() parcel_cgm.chem.chemnet.monotonic_EoR = \\ self.pf['monotonic_EoR'] parcel_cgm.update_rate_coefficients(parcel_cgm.grid.data, **self.rates_no_RT(parcel_cgm.grid)) self._parcels.append(parcel_cgm) self._parcels[-1].pf['stop_time'] = self.tf / self.pf['time_units']", "xi snapshot['he_2'] = xi snapshot['he_3'] = 1e-10 snapshot['rho'] = self.parcel_igm.grid.cosm.MeanBaryonDensity(red) snapshot['n'] = \\", "if self.pf['include_igm']: self.all_RCs_igm.append(RC_igm.copy()) pb.finish() # Sort everything by time if self.pf['include_igm']: self.history_igm =", "t1, dt1, data_igm = self.gen_igm.next() # Pass rate coefficients off to the IGM", "# Now, update IGM parcel t1, dt1, data_igm = self.gen_igm.next() # Pass rate", "import numpy as np from .GasParcel import GasParcel from ..util import ParameterFile, ProgressBar", "last two steps. # Converged to desired tolerance? #self. def _stop_criteria_met(self): pass def", "self.parcel_igm.dt = dt if self.pf['include_cgm']: self.parcel_cgm.dt = dt yield t, z, data_igm, data_cgm,", "self.pf['final_redshift'] # Read initial conditions if self.pf['include_igm']: data_igm = self.parcel_igm.grid.data.copy() if self.pf['include_cgm']: data_cgm", "to desired tolerance? #self. def _stop_criteria_met(self): pass def run(self): \"\"\" Run simulation from", "* len(self.all_z) self.all_RCs_cgm = [self.rates_no_RT(self.parcel_igm.grid)] * len(self.all_z) # Don't mess with the CGM", "self.pf['initial_redshift'] dt = self.pf['time_units'] * self.pf['initial_timestep'] zf = self.pf['final_redshift'] # Read initial conditions", "To perform a single-zone calculation, simply set ``include_cgm=False`` or ``include_igm=False``. \"\"\" if 'load_ics'", "grid patch and an \"HII regions\" grid patch, dubbed \"igm\" and \"cgm\", respectively.", "on final redshift. z = self.pf['initial_redshift'] zf = self.pf['final_redshift'] self._parcels = [] for", "self.kw_igm = kw.copy() parcel_igm = GasParcel(**self.kw_igm) self.gen_igm = parcel_igm.step() # Set initial values", "ProgressBar from ..util.ReadData import _sort_history, _load_inits from .MetaGalacticBackground import MetaGalacticBackground from ..util.SetDefaultParameterValues import", "not hasattr(self, '_field'): if self.pf['include_igm']: self._field = MetaGalacticBackground(grid=self.parcel_igm.grid, **self.kwargs) else: self._field = MetaGalacticBackground(grid=self.parcel_cgm.grid,", "self.pf['save_rate_coefficients']: if self.pf['include_igm']: self.rates_igm = \\ _sort_history(self.all_RCs_igm, prefix='igm_', squeeze=True) self.history.update(self.rates_igm) if self.pf['include_cgm']: self.rates_cgm", "# Might need these... if self.pf['include_igm']: data_igm_pre = data_igm.copy() if self.pf['include_cgm']: data_cgm_pre =", "= inits = _load_inits() zi = self.pf['initial_redshift'] if not np.all(np.diff(inits['z']) > 0): raise", "GasParcel from ..util import ParameterFile, ProgressBar from ..util.ReadData import _sort_history, _load_inits from .MetaGalacticBackground", "\"\"\" MultiPhaseMedium.py Author: <NAME> Affiliation: University of Colorado at Boulder Created on: Mon", "def run(self): \"\"\" Run simulation from start to finish. Returns ------- Nothing: sets", "= self.tf / self.pf['time_units'] @property def zones(self): if not hasattr(self, '_zones'): self._zones =", "(up to two) GasParcels. \"\"\" # Reset stop time based on final redshift.", "these parcels are evolved in unison if self.pf['include_igm']: self.parcel_igm.dt = dt if self.pf['include_cgm']:", "unison if self.pf['include_igm']: self.parcel_igm.dt = dt if self.pf['include_cgm']: self.parcel_cgm.dt = dt yield t,", "_load_inits from .MetaGalacticBackground import MetaGalacticBackground from ..util.SetDefaultParameterValues import MultiPhaseParameters _mpm_defs = MultiPhaseParameters() class", "def _initialize_zones(self): \"\"\" Initialize (up to two) GasParcels. \"\"\" # Reset stop time", "update IGM parcel t1, dt1, data_igm = self.gen_igm.next() # Pass rate coefficients off", "grid.N_absorbers, grid.N_absorbers)), } return _rates_no_RT @property def tf(self): if not hasattr(self, '_tf'): z", "= [] for i in range(self.field.Npops): pass self.field.tau = tau def subcycle(self): \"\"\"", "initial conditions to the data storage lists. \"\"\" if not self.pf['load_ics']: self.all_t, self.all_z,", "= {} for key in self.parcel_igm.grid.data.keys(): if key in self.inits.keys(): snapshot[key] = inits_all[key][i]", "data at a single snapshot. \"\"\" t = 0.0 z = self.pf['initial_redshift'] dt", "= True self.kwargs = kwargs @property def pf(self): if not hasattr(self, '_pf'): inits", "kwargs: kwargs['load_ics'] = True self.kwargs = kwargs @property def pf(self): if not hasattr(self,", "default_parcel(self): if not hasattr(self, '_default_parcel'): self._default_parcel = self.parcel_igm if self.pf['include_igm'] \\ else self.parcel_cgm", "hit the first light redshift i_trunc = np.argmin(np.abs(z_inits - self.pf['initial_redshift'])) if z_inits[i_trunc] <=", "GasParcels. \"\"\" # Reset stop time based on final redshift. z = self.pf['initial_redshift']", "self.pf['initial_redshift'])) if z_inits[i_trunc] <= self.pf['initial_redshift']: i_trunc += 1 self.all_t = [] self.all_data_igm =", "\"\"\" return False # Check IGM ionization state between last two steps. #", "the current time, redshift, and dictionaries for the IGM and CGM data at", "not self.pf['include_cgm']: del self.all_RCs_cgm, self.all_data_cgm return # Flip to descending order (in redshift)", "a MultiPhaseMedium object. By default, this is a two-zone model, consisting of a", "state between last two steps. # Converged to desired tolerance? #self. def _stop_criteria_met(self):", "here, we need to trick our generators a bit # \"undo\" this time-step", "[self.rates_no_RT(self.parcel_igm.grid)] * len(self.all_z) self.all_RCs_cgm = [self.rates_no_RT(self.parcel_igm.grid)] * len(self.all_z) # Don't mess with the", "zone: continue # Have to rename variables so Grid class will know them", "\\ self.parcel_cgm.grid.particle_density(cgm_data, self.all_z[i]) if not self.pf['include_igm']: return # Loop over redshift and derive", "= kwargs @property def pf(self): if not hasattr(self, '_pf'): inits = self.inits self._pf", "self.pf['include_igm']: self.parcel_igm.dt = dt if self.pf['include_cgm']: self.parcel_cgm.dt = dt yield t, z, data_igm,", "red in enumerate(self.all_z): snapshot = {} for key in self.parcel_igm.grid.data.keys(): if key in", "self.parcel_cgm.update_rate_coefficients(data_cgm, **RC_cgm) # Now, update CGM parcel t2, dt2, data_cgm = self.gen_cgm.next() else:", "provided initial conditions to the data storage lists. \"\"\" if not self.pf['load_ics']: self.all_t,", "if self.pf['include_igm']: done = False if self.pf['stop_igm_h_2'] is not None: if data_igm['h_2'] >", "= np.interp(zi, inits['z'], inits['xe']) #if self.pf['include_He']: new = {'igm_initial_temperature': Ti, 'initial_ionization': [1. -", "parcel_igm = GasParcel(**self.kw_igm) self.gen_igm = parcel_igm.step() # Set initial values for rate coefficients", "= MetaGalacticBackground(grid=self.parcel_igm.grid, **self.kwargs) else: self._field = MetaGalacticBackground(grid=self.parcel_cgm.grid, **self.kwargs) return self._field @property def pops(self):", "there # are distinct populations tau = [] for i in range(self.field.Npops): pass", "= [] self.all_data_igm = [] self.all_z = list(z_inits[0:i_trunc]) self.all_RCs_igm = [self.rates_no_RT(self.parcel_igm.grid)] * len(self.all_z)", "parcel_igm.update_rate_coefficients(parcel_igm.grid.data, **self.rates_no_RT(parcel_igm.grid)) self._parcels.append(parcel_igm) else: self.kw_cgm = kw.copy() parcel_cgm = GasParcel(**self.kw_cgm) parcel_cgm.grid.set_recombination_rate(True) parcel_cgm._set_chemistry() self.gen_cgm", "self._tf def _initialize_zones(self): \"\"\" Initialize (up to two) GasParcels. \"\"\" # Reset stop", "list(z_inits[0:i_trunc]) self.all_RCs_igm = [self.rates_no_RT(self.parcel_igm.grid)] * len(self.all_z) self.all_RCs_cgm = [self.rates_no_RT(self.parcel_igm.grid)] * len(self.all_z) # Don't", "True if not done: RC_igm = self.field.update_rate_coefficients(z, zone='igm', return_rc=True, igm_h_1=data_igm['h_1']) # Now, update", "key in self.kwargs: kw[grid_key] = self.kwargs[key] else: kw[grid_key] = _mpm_defs[key] if zone ==", "= \\ self.parcel_cgm.grid.cosm.MeanBaryonDensity(self.all_z[i]) self.all_data_cgm[i]['n'] = \\ self.parcel_cgm.grid.particle_density(cgm_data, self.all_z[i]) if not self.pf['include_igm']: return #", "'_tf'): z = self.pf['initial_redshift'] zf = self.pf['final_redshift'] self._tf = self.default_parcel.grid.cosm.LookbackTime(zf, z) self.pf['stop_time'] =", "Prepend provided initial conditions to the data storage lists. \"\"\" if not self.pf['load_ics']:", "not done: # CGM rate coefficients RC_cgm = self.field.update_rate_coefficients(z, zone='cgm', return_rc=True, cgm_h_1=data_cgm['h_1']) #", "<filename>ares/simulations/MultiPhaseMedium.py \"\"\" MultiPhaseMedium.py Author: <NAME> Affiliation: University of Colorado at Boulder Created on:", "we hit the first light redshift i_trunc = np.argmin(np.abs(z_inits - self.pf['initial_redshift'])) if z_inits[i_trunc]", "if self.pf['include_igm']: self.history_igm = \\ _sort_history(self.all_data_igm, prefix='igm_', squeeze=True) self.history = self.history_igm.copy() else: self.history", "self.history_igm = \\ _sort_history(self.all_data_igm, prefix='igm_', squeeze=True) self.history = self.history_igm.copy() else: self.history = {}", "rate coefficients off to the IGM parcel self.parcel_igm.update_rate_coefficients(data_igm, **RC_igm) else: dt1 = 1e50", "to the CGM parcel self.parcel_cgm.update_rate_coefficients(data_cgm, **RC_cgm) # Now, update CGM parcel t2, dt2,", "optical depth as simulation runs. \"\"\" # Recall that self.field.tau is a list", "for key in _mpm_defs: if key[0:4] != '%s_' % zone: continue # Have", "if not hasattr(self, '_field'): if self.pf['include_igm']: self._field = MetaGalacticBackground(grid=self.parcel_igm.grid, **self.kwargs) else: self._field =", "new_pars = {'cosmological_ics': False, # 'igm_initial_temperature': Ti, # 'igm_initial_ionization': [1. - xi, xi]}", "@property def grid(self): return self.field.grid @property def parcels(self): if not hasattr(self, '_parcels'): self._initialize_zones()", "the IGM optical depth. (2) \"\"\" return False # Check IGM ionization state", "\"\"\" import numpy as np from .GasParcel import GasParcel from ..util import ParameterFile,", "xi]} # #self.kwargs.update(new_pars) return self._inits @property def field(self): if not hasattr(self, '_field'): if", "MultiPhaseParameters() class MultiPhaseMedium(object): def __init__(self, **kwargs): \"\"\" Initialize a MultiPhaseMedium object. By default,", "inits_all['xe'][i] # Hydrogen neutral fraction xe = inits_all['xe'][i] if 2 not in self.parcel_igm.grid.Z:", "if self.pf['include_igm']: self.all_data_igm.append(data_igm.copy()) if self.pf['save_rate_coefficients']: if self.pf['include_cgm']: self.all_RCs_cgm.append(RC_cgm.copy()) if self.pf['include_igm']: self.all_RCs_igm.append(RC_igm.copy()) pb.finish() #", "dt = min(dt1, dt2) dt = min(dt, self.pf['max_timestep'] * self.pf['time_units']) # Might need", "self.field.pops @property def grid(self): return self.field.grid @property def parcels(self): if not hasattr(self, '_parcels'):", "import _sort_history, _load_inits from .MetaGalacticBackground import MetaGalacticBackground from ..util.SetDefaultParameterValues import MultiPhaseParameters _mpm_defs =", "# Read initial conditions if self.pf['include_igm']: data_igm = self.parcel_igm.grid.data.copy() if self.pf['include_cgm']: data_cgm =", "_insert_inits(self): \"\"\" Prepend provided initial conditions to the data storage lists. \"\"\" if", "we need to trick our generators a bit # \"undo\" this time-step t", "two-zone model, consisting of a \"bulk IGM\" grid patch and an \"HII regions\"", "rate coefficients [optional] if self.pf['save_rate_coefficients']: if self.pf['include_igm']: self.rates_igm = \\ _sort_history(self.all_RCs_igm, prefix='igm_', squeeze=True)", "[self.rates_no_RT(self.parcel_igm.grid)] * len(self.all_z) # Don't mess with the CGM (much) if self.pf['include_cgm']: tmp", "self.gen_igm = parcel_igm.step() # Set initial values for rate coefficients parcel_igm.update_rate_coefficients(parcel_igm.grid.data, **self.rates_no_RT(parcel_igm.grid)) self._parcels.append(parcel_igm)", "else self.parcel_cgm return self._default_parcel @property def dynamic_tau(self): return self.pf['tau_dynamic'] def update_optical_depth(self): \"\"\" Dynamically", "over redshift and derive things for the IGM for i, red in enumerate(self.all_z):", "depth. (2) \"\"\" return False # Check IGM ionization state between last two", "field(self): if not hasattr(self, '_field'): if self.pf['include_igm']: self._field = MetaGalacticBackground(grid=self.parcel_igm.grid, **self.kwargs) else: self._field", "dt if self.pf['include_cgm']: self.parcel_cgm.dt = dt yield t, z, data_igm, data_cgm, RC_igm, RC_cgm", "self._initialize_zones() return self._parcels @property def parcel_igm(self): if not hasattr(self, '_parcel_igm'): self._parcel_igm = self.parcels[0]", "rates_no_RT(self, grid): _rates_no_RT = \\ {'k_ion': np.zeros((grid.dims, grid.N_absorbers)), 'k_heat': np.zeros((grid.dims, grid.N_absorbers)), 'k_ion2': np.zeros((grid.dims,", "parcel_cgm.chem.chemnet.monotonic_EoR = \\ self.pf['monotonic_EoR'] parcel_cgm.update_rate_coefficients(parcel_cgm.grid.data, **self.rates_no_RT(parcel_cgm.grid)) self._parcels.append(parcel_cgm) self._parcels[-1].pf['stop_time'] = self.tf / self.pf['time_units'] @property", "tolerance? #self. def _stop_criteria_met(self): pass def run(self): \"\"\" Run simulation from start to", "if not done: # CGM rate coefficients RC_cgm = self.field.update_rate_coefficients(z, zone='cgm', return_rc=True, cgm_h_1=data_cgm['h_1'])", "be in ascending order!') Ti = np.interp(zi, inits['z'], inits['Tk']) xi = np.interp(zi, inits['z'],", "not hasattr(self, '_zones'): self._zones = int(self.pf['include_igm']) \\ + int(self.pf['include_cgm']) return self._zones @property def", "= self.default_parcel.grid.cosm.dtdz(z) t += dt z -= dt / dtdz # The (potential)", "[], [], [] if self.pf['save_rate_coefficients']: self.all_RCs_igm, self.all_RCs_cgm = [], [] if not self.pf['include_cgm']:", "dtdz # The (potential) generators need this self.field.update_redshift(z) # IGM rate coefficients if", "grid(self): return self.field.grid @property def parcels(self): if not hasattr(self, '_parcels'): self._initialize_zones() return self._parcels", "# If we're computing the IGM optical depth dynamically, we may # need", "+ self.parcel_igm.grid.cosm.y) snapshot['h_1'] = 1. - xi snapshot['h_2'] = xi # Add helium,", "xe / (1. + self.parcel_igm.grid.cosm.y) snapshot['h_1'] = 1. - xi snapshot['h_2'] = xi", "self.kwargs = kwargs @property def pf(self): if not hasattr(self, '_pf'): inits = self.inits", "int(self.pf['include_igm']) \\ + int(self.pf['include_cgm']) return self._zones @property def default_parcel(self): if not hasattr(self, '_default_parcel'):", "16 12:46:28 MST 2015 Description: \"\"\" import numpy as np from .GasParcel import", "Description: \"\"\" import numpy as np from .GasParcel import GasParcel from ..util import", "xi, 1.-xi-1e-10, xi, 1e-10]} self.kwargs.update(new) #else: # new_pars = {'cosmological_ics': False, # 'igm_initial_temperature':", "z > zf: # Increment time / redshift dtdz = self.default_parcel.grid.cosm.dtdz(z) t +=", "'_parcels'): self._initialize_zones() return self._parcels @property def parcel_igm(self): if not hasattr(self, '_parcel_igm'): self._parcel_igm =", "self.rates_igm = \\ _sort_history(self.all_RCs_igm, prefix='igm_', squeeze=True) self.history.update(self.rates_igm) if self.pf['include_cgm']: self.rates_cgm = \\ _sort_history(self.all_RCs_cgm,", "self._parcels.append(parcel_cgm) self._parcels[-1].pf['stop_time'] = self.tf / self.pf['time_units'] @property def zones(self): if not hasattr(self, '_zones'):", "intergalactic medium. Returns ------- Tuple containing the current time, redshift, and dictionaries for", "zf: # Increment time / redshift dtdz = self.default_parcel.grid.cosm.dtdz(z) t += dt z", "ensure convergence. redo = self.subcycle() if not redo: # Changing attribute! A little", "self.parcels[1] else: self._parcel_cgm = self.parcels[0] return self._parcel_cgm def rates_no_RT(self, grid): _rates_no_RT = \\", "def default_parcel(self): if not hasattr(self, '_default_parcel'): self._default_parcel = self.parcel_igm if self.pf['include_igm'] \\ else", "np.argmin(np.abs(z_inits - self.pf['initial_redshift'])) if z_inits[i_trunc] <= self.pf['initial_redshift']: i_trunc += 1 self.all_t = []", "data_igm = data_igm_pre.copy() dt1 = 1e50 done = True if not done: RC_igm", "= self.parcels[0] return self._parcel_igm @property def parcel_cgm(self): if not hasattr(self, '_parcel_cgm'): if self.pf['include_igm']:", "kw.copy() parcel_cgm = GasParcel(**self.kw_cgm) parcel_cgm.grid.set_recombination_rate(True) parcel_cgm._set_chemistry() self.gen_cgm = parcel_cgm.step() parcel_cgm.chem.chemnet.monotonic_EoR = \\ self.pf['monotonic_EoR']", "= self.parcel_cgm.grid.data.copy() # Evolve in time! while z > zf: # Increment time", "distinct populations tau = [] for i in range(self.field.Npops): pass self.field.tau = tau", "zi = self.pf['initial_redshift'] if not np.all(np.diff(inits['z']) > 0): raise ValueError('Redshifts in ICs must", "= self.pf['initial_redshift'] if not np.all(np.diff(inits['z']) > 0): raise ValueError('Redshifts in ICs must be", "inits_all = {'z': z_inits, 'Tk': Tk_inits, 'xe': xe_inits} # Stop pre-pending once we", "= xi # Add helium, assuming xHeII = xHII, and xHeIII << 1", "= int(self.pf['include_igm']) \\ + int(self.pf['include_cgm']) return self._zones @property def default_parcel(self): if not hasattr(self,", "= self.gen_igm.next() # Pass rate coefficients off to the IGM parcel self.parcel_igm.update_rate_coefficients(data_igm, **RC_igm)", "# If we've made it here, we need to trick our generators a", "if 2 not in self.parcel_igm.grid.Z: xe = min(xe, 1.0) xi = xe /", "trick our generators a bit # \"undo\" this time-step t -= dt_pre z", "dt_pre = dt * 1. dt = min(dt1, dt2) dt = min(dt, self.pf['max_timestep']", "@property def dynamic_tau(self): return self.pf['tau_dynamic'] def update_optical_depth(self): \"\"\" Dynamically update optical depth as", "self.parcel_igm if self.pf['include_igm'] \\ else self.parcel_cgm return self._default_parcel @property def dynamic_tau(self): return self.pf['tau_dynamic']", "_sort_history(self.all_data_cgm, prefix='cgm_', squeeze=True) self.history.update(self.history_cgm) # Save rate coefficients [optional] if self.pf['save_rate_coefficients']: if self.pf['include_igm']:", "data_cgm = self.parcel_cgm.grid.data.copy() # Evolve in time! while z > zf: # Increment", "prefix='cgm_', squeeze=True) self.history.update(self.history_cgm) # Save rate coefficients [optional] if self.pf['save_rate_coefficients']: if self.pf['include_igm']: self.rates_igm", "zf = self.pf['final_redshift'] self._parcels = [] for zone in ['igm', 'cgm']: if not", "= dt if self.pf['include_cgm']: self.parcel_cgm.dt = dt yield t, z, data_igm, data_cgm, RC_igm,", "self.inits.keys(): snapshot[key] = inits_all[key][i] continue # Electron fraction snapshot['e'] = inits_all['xe'][i] # Hydrogen", "need these... if self.pf['include_igm']: data_igm_pre = data_igm.copy() if self.pf['include_cgm']: data_cgm_pre = data_cgm.copy() #", "dt * 1. dt = min(dt1, dt2) dt = min(dt, self.pf['max_timestep'] * self.pf['time_units'])", "\"\"\" Run simulation from start to finish. Returns ------- Nothing: sets `history` attribute.", "self.default_parcel.grid.cosm.dtdz(z) t += dt z -= dt / dtdz # The (potential) generators", "CGM data at a single snapshot. \"\"\" t = 0.0 z = self.pf['initial_redshift']", "self.history['z'] = np.array(self.all_z) def step(self): \"\"\" Generator for a two-phase intergalactic medium. Returns", "continue kw = self.pf.copy() # Loop over defaults, pull out the ones for", "Add helium, assuming xHeII = xHII, and xHeIII << 1 if self.parcel_igm.pf['include_He']: snapshot['he_1']", "self.parcel_cgm.grid.cosm.MeanBaryonDensity(self.all_z[i]) self.all_data_cgm[i]['n'] = \\ self.parcel_cgm.grid.particle_density(cgm_data, self.all_z[i]) if not self.pf['include_igm']: return # Loop over", "{'h_1': 1.0} if self.pf['include_cgm']: done = False if self.pf['stop_cgm_h_2'] is not None: if", "step(self): \"\"\" Generator for a two-phase intergalactic medium. Returns ------- Tuple containing the", "Ti, 'initial_ionization': [1. - xi, xi, 1.-xi-1e-10, xi, 1e-10]} self.kwargs.update(new) #else: # new_pars", "\"cgm\", respectively. To perform a single-zone calculation, simply set ``include_cgm=False`` or ``include_igm=False``. \"\"\"", "= dt yield t, z, data_igm, data_cgm, RC_igm, RC_cgm continue # If we've", "stop time based on final redshift. z = self.pf['initial_redshift'] zf = self.pf['final_redshift'] self._parcels", "self.subcycle() if not redo: # Changing attribute! A little scary, but we must", "{'z': z_inits, 'Tk': Tk_inits, 'xe': xe_inits} # Stop pre-pending once we hit the", "if self.pf['stop_igm_h_2'] is not None: if data_igm['h_2'] > self.pf['stop_igm_h_2']: data_igm = data_igm_pre.copy() dt1", "1. - xi snapshot['he_2'] = xi snapshot['he_3'] = 1e-10 snapshot['rho'] = self.parcel_igm.grid.cosm.MeanBaryonDensity(red) snapshot['n']", "else: self.history = {} if self.pf['include_cgm']: self.history_cgm = \\ _sort_history(self.all_data_cgm, prefix='cgm_', squeeze=True) self.history.update(self.history_cgm)", "z) self.pf['stop_time'] = self._tf / self.pf['time_units'] return self._tf def _initialize_zones(self): \"\"\" Initialize (up", "continue # Electron fraction snapshot['e'] = inits_all['xe'][i] # Hydrogen neutral fraction xe =", "t, z, data_igm, data_cgm, RC_igm, RC_cgm continue # If we've made it here,", "self.parcel_igm.update_rate_coefficients(data_igm, **RC_igm) else: dt1 = 1e50 RC_igm = data_igm = None data_igm =", "self.all_data_cgm[i]['rho'] = \\ self.parcel_cgm.grid.cosm.MeanBaryonDensity(self.all_z[i]) self.all_data_cgm[i]['n'] = \\ self.parcel_cgm.grid.particle_density(cgm_data, self.all_z[i]) if not self.pf['include_igm']: return", "will know them grid_key = key.replace('%s_' % zone, '') if key in self.kwargs:", "to trick our generators a bit # \"undo\" this time-step t -= dt_pre", "i_trunc += 1 self.all_t = [] self.all_data_igm = [] self.all_z = list(z_inits[0:i_trunc]) self.all_RCs_igm", "\\ self.pf['monotonic_EoR'] parcel_cgm.update_rate_coefficients(parcel_cgm.grid.data, **self.rates_no_RT(parcel_cgm.grid)) self._parcels.append(parcel_cgm) self._parcels[-1].pf['stop_time'] = self.tf / self.pf['time_units'] @property def zones(self):", "light redshift i_trunc = np.argmin(np.abs(z_inits - self.pf['initial_redshift'])) if z_inits[i_trunc] <= self.pf['initial_redshift']: i_trunc +=", "igm_h_1=data_igm['h_1']) # Now, update IGM parcel t1, dt1, data_igm = self.gen_igm.next() # Pass", "-= dt_pre z += dt_pre / dtdz self.update_optical_depth() def _insert_inits(self): \"\"\" Prepend provided", "= inits_all['xe'][i] # Hydrogen neutral fraction xe = inits_all['xe'][i] if 2 not in", "Initialize (up to two) GasParcels. \"\"\" # Reset stop time based on final", "Loop over redshift and derive things for the IGM for i, red in", "MetaGalacticBackground from ..util.SetDefaultParameterValues import MultiPhaseParameters _mpm_defs = MultiPhaseParameters() class MultiPhaseMedium(object): def __init__(self, **kwargs):", "np.array(self.all_z) def step(self): \"\"\" Generator for a two-phase intergalactic medium. Returns ------- Tuple", "= False if self.pf['stop_cgm_h_2'] is not None: if data_cgm['h_2'] > self.pf['stop_cgm_h_2']: data_cgm =", "from start to finish. Returns ------- Nothing: sets `history` attribute. \"\"\" self._insert_inits() pb", "self.field.update_rate_coefficients(z, zone='igm', return_rc=True, igm_h_1=data_igm['h_1']) # Now, update IGM parcel t1, dt1, data_igm =", "not None: if data_igm['h_2'] > self.pf['stop_igm_h_2']: data_igm = data_igm_pre.copy() dt1 = 1e50 done", "1e50 RC_igm = data_igm = None data_igm = {'h_1': 1.0} if self.pf['include_cgm']: done", "\"\"\" Generator for a two-phase intergalactic medium. Returns ------- Tuple containing the current", "= data_cgm = None # Must update timesteps in unison dt_pre = dt", "Feb 16 12:46:28 MST 2015 Description: \"\"\" import numpy as np from .GasParcel", "order (in redshift) z_inits = self.inits['z'][-1::-1] Tk_inits = self.inits['Tk'][-1::-1] xe_inits = self.inits['xe'][-1::-1] inits_all", "is a list with as many elements as there # are distinct populations", "= 1e50 done = True if not done: # CGM rate coefficients RC_cgm", "'_inits'): self._inits = inits = _load_inits() zi = self.pf['initial_redshift'] if not np.all(np.diff(inits['z']) >", "self._parcel_igm = self.parcels[0] return self._parcel_igm @property def parcel_cgm(self): if not hasattr(self, '_parcel_cgm'): if", "self.pf['stop_igm_h_2'] is not None: if data_igm['h_2'] > self.pf['stop_igm_h_2']: data_igm = data_igm_pre.copy() dt1 =", "= self.inits['xe'][-1::-1] inits_all = {'z': z_inits, 'Tk': Tk_inits, 'xe': xe_inits} # Stop pre-pending", "= None data_igm = {'h_1': 1.0} if self.pf['include_cgm']: done = False if self.pf['stop_cgm_h_2']", "int(self.pf['include_cgm']) return self._zones @property def default_parcel(self): if not hasattr(self, '_default_parcel'): self._default_parcel = self.parcel_igm", "else: self._field = MetaGalacticBackground(grid=self.parcel_cgm.grid, **self.kwargs) return self._field @property def pops(self): return self.field.pops @property", "self.history.update(self.rates_cgm) self.history['t'] = np.array(self.all_t) self.history['z'] = np.array(self.all_z) def step(self): \"\"\" Generator for a", "self.rates_cgm = \\ _sort_history(self.all_RCs_cgm, prefix='cgm_', squeeze=True) self.history.update(self.rates_cgm) self.history['t'] = np.array(self.all_t) self.history['z'] = np.array(self.all_z)", "self.history['t'] = np.array(self.all_t) self.history['z'] = np.array(self.all_z) def step(self): \"\"\" Generator for a two-phase", "zone='igm', return_rc=True, igm_h_1=data_igm['h_1']) # Now, update IGM parcel t1, dt1, data_igm = self.gen_igm.next()", "@property def field(self): if not hasattr(self, '_field'): if self.pf['include_igm']: self._field = MetaGalacticBackground(grid=self.parcel_igm.grid, **self.kwargs)", "grid_key = key.replace('%s_' % zone, '') if key in self.kwargs: kw[grid_key] = self.kwargs[key]", "inits['z'], inits['Tk']) xi = np.interp(zi, inits['z'], inits['xe']) #if self.pf['include_He']: new = {'igm_initial_temperature': Ti,", "def _insert_inits(self): \"\"\" Prepend provided initial conditions to the data storage lists. \"\"\"", "kw[grid_key] = _mpm_defs[key] if zone == 'igm': self.kw_igm = kw.copy() parcel_igm = GasParcel(**self.kw_igm)", "data_cgm, RC_igm, RC_cgm continue # If we've made it here, we need to", "if self.pf['include_cgm']: self.all_RCs_cgm.append(RC_cgm.copy()) if self.pf['include_igm']: self.all_RCs_igm.append(RC_igm.copy()) pb.finish() # Sort everything by time if", "- xi snapshot['h_2'] = xi # Add helium, assuming xHeII = xHII, and", "for i, red in enumerate(self.all_z): snapshot = {} for key in self.parcel_igm.grid.data.keys(): if", "def __init__(self, **kwargs): \"\"\" Initialize a MultiPhaseMedium object. By default, this is a", "# Reset stop time based on final redshift. z = self.pf['initial_redshift'] zf =", "to finish. Returns ------- Nothing: sets `history` attribute. \"\"\" self._insert_inits() pb = ProgressBar(self.tf,", "snapshot. \"\"\" t = 0.0 z = self.pf['initial_redshift'] dt = self.pf['time_units'] * self.pf['initial_timestep']", "self.history = {} if self.pf['include_cgm']: self.history_cgm = \\ _sort_history(self.all_data_cgm, prefix='cgm_', squeeze=True) self.history.update(self.history_cgm) #", "[tmp.copy() for i in range(len(self.all_z))] for i, cgm_data in enumerate(self.all_data_cgm): self.all_data_cgm[i]['rho'] = \\", "Sort everything by time if self.pf['include_igm']: self.history_igm = \\ _sort_history(self.all_data_igm, prefix='igm_', squeeze=True) self.history", "= GasParcel(**self.kw_cgm) parcel_cgm.grid.set_recombination_rate(True) parcel_cgm._set_chemistry() self.gen_cgm = parcel_cgm.step() parcel_cgm.chem.chemnet.monotonic_EoR = \\ self.pf['monotonic_EoR'] parcel_cgm.update_rate_coefficients(parcel_cgm.grid.data, **self.rates_no_RT(parcel_cgm.grid))", "self.all_RCs_cgm.append(RC_cgm.copy()) if self.pf['include_igm']: self.all_RCs_igm.append(RC_igm.copy()) pb.finish() # Sort everything by time if self.pf['include_igm']: self.history_igm", "self.pf['initial_redshift']: i_trunc += 1 self.all_t = [] self.all_data_igm = [] self.all_z = list(z_inits[0:i_trunc])", "self.all_RCs_igm, self.all_RCs_cgm = [], [] if not self.pf['include_cgm']: del self.all_RCs_cgm, self.all_data_cgm return #", "of Colorado at Boulder Created on: Mon Feb 16 12:46:28 MST 2015 Description:", "_load_inits() zi = self.pf['initial_redshift'] if not np.all(np.diff(inits['z']) > 0): raise ValueError('Redshifts in ICs", "[], [], [], [] if self.pf['save_rate_coefficients']: self.all_RCs_igm, self.all_RCs_cgm = [], [] if not", "MultiPhaseParameters _mpm_defs = MultiPhaseParameters() class MultiPhaseMedium(object): def __init__(self, **kwargs): \"\"\" Initialize a MultiPhaseMedium", "= \\ self.parcel_cgm.grid.particle_density(cgm_data, self.all_z[i]) if not self.pf['include_igm']: return # Loop over redshift and", "in self.parcel_igm.grid.Z: xe = min(xe, 1.0) xi = xe / (1. + self.parcel_igm.grid.cosm.y)", "data_igm = self.parcel_igm.grid.data.copy() if self.pf['include_cgm']: data_cgm = self.parcel_cgm.grid.data.copy() # Evolve in time! while", "t = 0.0 z = self.pf['initial_redshift'] dt = self.pf['time_units'] * self.pf['initial_timestep'] zf =", "parcels are evolved in unison if self.pf['include_igm']: self.parcel_igm.dt = dt if self.pf['include_cgm']: self.parcel_cgm.dt", "= xi snapshot['he_3'] = 1e-10 snapshot['rho'] = self.parcel_igm.grid.cosm.MeanBaryonDensity(red) snapshot['n'] = \\ self.parcel_igm.grid.particle_density(snapshot.copy(), red)", "def zones(self): if not hasattr(self, '_zones'): self._zones = int(self.pf['include_igm']) \\ + int(self.pf['include_cgm']) return", "[optional] if self.pf['save_rate_coefficients']: if self.pf['include_igm']: self.rates_igm = \\ _sort_history(self.all_RCs_igm, prefix='igm_', squeeze=True) self.history.update(self.rates_igm) if", "'igm_initial_ionization': [1. - xi, xi]} # #self.kwargs.update(new_pars) return self._inits @property def field(self): if", "self.gen_cgm.next() else: dt2 = 1e50 RC_cgm = data_cgm = None # Must update", "must be in ascending order!') Ti = np.interp(zi, inits['z'], inits['Tk']) xi = np.interp(zi,", "= True if not done: RC_igm = self.field.update_rate_coefficients(z, zone='igm', return_rc=True, igm_h_1=data_igm['h_1']) # Now,", "two steps. # Converged to desired tolerance? #self. def _stop_criteria_met(self): pass def run(self):", "= self._tf / self.pf['time_units'] return self._tf def _initialize_zones(self): \"\"\" Initialize (up to two)", "for i in range(len(self.all_z))] for i, cgm_data in enumerate(self.all_data_cgm): self.all_data_cgm[i]['rho'] = \\ self.parcel_cgm.grid.cosm.MeanBaryonDensity(self.all_z[i])", "= self.parcel_igm if self.pf['include_igm'] \\ else self.parcel_cgm return self._default_parcel @property def dynamic_tau(self): return", "# Electron fraction snapshot['e'] = inits_all['xe'][i] # Hydrogen neutral fraction xe = inits_all['xe'][i]", "zone, '') if key in self.kwargs: kw[grid_key] = self.kwargs[key] else: kw[grid_key] = _mpm_defs[key]", "dt = min(dt, self.pf['max_timestep'] * self.pf['time_units']) # Might need these... if self.pf['include_igm']: data_igm_pre", "inits = self.inits self._pf = ParameterFile(**self.kwargs) return self._pf @property def inits(self): if not", "in time for t, z, data_igm, data_cgm, RC_igm, RC_cgm in self.step(): pb.update(t) #", "Recall that self.field.tau is a list with as many elements as there #", "MultiPhaseMedium(object): def __init__(self, **kwargs): \"\"\" Initialize a MultiPhaseMedium object. By default, this is", "def parcel_igm(self): if not hasattr(self, '_parcel_igm'): self._parcel_igm = self.parcels[0] return self._parcel_igm @property def", "if self.pf['include_cgm']: self.rates_cgm = \\ _sort_history(self.all_RCs_cgm, prefix='cgm_', squeeze=True) self.history.update(self.rates_cgm) self.history['t'] = np.array(self.all_t) self.history['z']", "done = True if not done: # CGM rate coefficients RC_cgm = self.field.update_rate_coefficients(z,", "tf(self): if not hasattr(self, '_tf'): z = self.pf['initial_redshift'] zf = self.pf['final_redshift'] self._tf =", "self.all_data_igm.append(data_igm.copy()) if self.pf['save_rate_coefficients']: if self.pf['include_cgm']: self.all_RCs_cgm.append(RC_cgm.copy()) if self.pf['include_igm']: self.all_RCs_igm.append(RC_igm.copy()) pb.finish() # Sort everything", "things for the IGM for i, red in enumerate(self.all_z): snapshot = {} for", "redo: # Changing attribute! A little scary, but we must make sure #", "(1) Re-compute the IGM optical depth. (2) \"\"\" return False # Check IGM", "if key in self.kwargs: kw[grid_key] = self.kwargs[key] else: kw[grid_key] = _mpm_defs[key] if zone", "inits_all['xe'][i] if 2 not in self.parcel_igm.grid.Z: xe = min(xe, 1.0) xi = xe", "if data_cgm['h_2'] > self.pf['stop_cgm_h_2']: data_cgm = data_cgm_pre.copy() dt2 = 1e50 done = True", "len(self.all_z) # Don't mess with the CGM (much) if self.pf['include_cgm']: tmp = self.parcel_cgm.grid.data", "RC_cgm in self.step(): pb.update(t) # Save data self.all_z.append(z) self.all_t.append(t) if self.pf['include_cgm']: self.all_data_cgm.append(data_cgm.copy()) if", "squeeze=True) self.history.update(self.history_cgm) # Save rate coefficients [optional] if self.pf['save_rate_coefficients']: if self.pf['include_igm']: self.rates_igm =", "z_inits[i_trunc] <= self.pf['initial_redshift']: i_trunc += 1 self.all_t = [] self.all_data_igm = [] self.all_z", "Tk_inits, 'xe': xe_inits} # Stop pre-pending once we hit the first light redshift", "- xi, xi]} # #self.kwargs.update(new_pars) return self._inits @property def field(self): if not hasattr(self,", "xi, xi, 1.-xi-1e-10, xi, 1e-10]} self.kwargs.update(new) #else: # new_pars = {'cosmological_ics': False, #", "(much) if self.pf['include_cgm']: tmp = self.parcel_cgm.grid.data self.all_data_cgm = [tmp.copy() for i in range(len(self.all_z))]", "t -= dt_pre z += dt_pre / dtdz self.update_optical_depth() def _insert_inits(self): \"\"\" Prepend", "self.pf['include_cgm']: self.all_data_cgm.append(data_cgm.copy()) if self.pf['include_igm']: self.all_data_igm.append(data_igm.copy()) if self.pf['save_rate_coefficients']: if self.pf['include_cgm']: self.all_RCs_cgm.append(RC_cgm.copy()) if self.pf['include_igm']: self.all_RCs_igm.append(RC_igm.copy())", "and an \"HII regions\" grid patch, dubbed \"igm\" and \"cgm\", respectively. To perform", "off to the IGM parcel self.parcel_igm.update_rate_coefficients(data_igm, **RC_igm) else: dt1 = 1e50 RC_igm =", "= list(z_inits[0:i_trunc]) self.all_RCs_igm = [self.rates_no_RT(self.parcel_igm.grid)] * len(self.all_z) self.all_RCs_cgm = [self.rates_no_RT(self.parcel_igm.grid)] * len(self.all_z) #", "1e50 done = True if not done: # CGM rate coefficients RC_cgm =", "out the ones for this zone for key in _mpm_defs: if key[0:4] !=", "<= self.pf['initial_redshift']: i_trunc += 1 self.all_t = [] self.all_data_igm = [] self.all_z =", "at Boulder Created on: Mon Feb 16 12:46:28 MST 2015 Description: \"\"\" import", "an \"HII regions\" grid patch, dubbed \"igm\" and \"cgm\", respectively. To perform a", "= \\ _sort_history(self.all_data_igm, prefix='igm_', squeeze=True) self.history = self.history_igm.copy() else: self.history = {} if", "if not np.all(np.diff(inits['z']) > 0): raise ValueError('Redshifts in ICs must be in ascending", "new = {'igm_initial_temperature': Ti, 'initial_ionization': [1. - xi, xi, 1.-xi-1e-10, xi, 1e-10]} self.kwargs.update(new)", "np from .GasParcel import GasParcel from ..util import ParameterFile, ProgressBar from ..util.ReadData import", "\\ {'k_ion': np.zeros((grid.dims, grid.N_absorbers)), 'k_heat': np.zeros((grid.dims, grid.N_absorbers)), 'k_ion2': np.zeros((grid.dims, grid.N_absorbers, grid.N_absorbers)), } return", "def inits(self): if not hasattr(self, '_inits'): self._inits = inits = _load_inits() zi =", "consisting of a \"bulk IGM\" grid patch and an \"HII regions\" grid patch,", "= self.parcel_cgm.grid.data self.all_data_cgm = [tmp.copy() for i in range(len(self.all_z))] for i, cgm_data in", "t, z, data_igm, data_cgm, RC_igm, RC_cgm in self.step(): pb.update(t) # Save data self.all_z.append(z)", "parcel t1, dt1, data_igm = self.gen_igm.next() # Pass rate coefficients off to the", "self.all_data_cgm[i]['n'] = \\ self.parcel_cgm.grid.particle_density(cgm_data, self.all_z[i]) if not self.pf['include_igm']: return # Loop over redshift", "timesteps in unison dt_pre = dt * 1. dt = min(dt1, dt2) dt", "self.parcel_igm.grid.data.copy() if self.pf['include_cgm']: data_cgm = self.parcel_cgm.grid.data.copy() # Evolve in time! while z >", "------- Nothing: sets `history` attribute. \"\"\" self._insert_inits() pb = ProgressBar(self.tf, use=self.pf['progress_bar']) pb.start() #", "\"\"\" Prepend provided initial conditions to the data storage lists. \"\"\" if not", "self.parcel_cgm.dt = dt yield t, z, data_igm, data_cgm, RC_igm, RC_cgm continue # If", "Tk_inits = self.inits['Tk'][-1::-1] xe_inits = self.inits['xe'][-1::-1] inits_all = {'z': z_inits, 'Tk': Tk_inits, 'xe':", "def _stop_criteria_met(self): pass def run(self): \"\"\" Run simulation from start to finish. Returns", "if self.pf['include_igm']: self._field = MetaGalacticBackground(grid=self.parcel_igm.grid, **self.kwargs) else: self._field = MetaGalacticBackground(grid=self.parcel_cgm.grid, **self.kwargs) return self._field", "desired tolerance? #self. def _stop_criteria_met(self): pass def run(self): \"\"\" Run simulation from start", "@property def pops(self): return self.field.pops @property def grid(self): return self.field.grid @property def parcels(self):", "self.history.update(self.history_cgm) # Save rate coefficients [optional] if self.pf['save_rate_coefficients']: if self.pf['include_igm']: self.rates_igm = \\", "= min(xe, 1.0) xi = xe / (1. + self.parcel_igm.grid.cosm.y) snapshot['h_1'] = 1.", "not np.all(np.diff(inits['z']) > 0): raise ValueError('Redshifts in ICs must be in ascending order!')", "to ensure convergence. redo = self.subcycle() if not redo: # Changing attribute! A", "RC_igm, RC_cgm continue # If we've made it here, we need to trick", "time-step t -= dt_pre z += dt_pre / dtdz self.update_optical_depth() def _insert_inits(self): \"\"\"", "in unison dt_pre = dt * 1. dt = min(dt1, dt2) dt =", "a single snapshot. \"\"\" t = 0.0 z = self.pf['initial_redshift'] dt = self.pf['time_units']", "we've made it here, we need to trick our generators a bit #", "is a two-zone model, consisting of a \"bulk IGM\" grid patch and an", "if self.pf['include_cgm']: self.history_cgm = \\ _sort_history(self.all_data_cgm, prefix='cgm_', squeeze=True) self.history.update(self.history_cgm) # Save rate coefficients", "from ..util.ReadData import _sort_history, _load_inits from .MetaGalacticBackground import MetaGalacticBackground from ..util.SetDefaultParameterValues import MultiPhaseParameters", "(2) \"\"\" return False # Check IGM ionization state between last two steps.", "prefix='igm_', squeeze=True) self.history = self.history_igm.copy() else: self.history = {} if self.pf['include_cgm']: self.history_cgm =", "tau def subcycle(self): \"\"\" See if we need to re-do the previous timestep.", "ascending order!') Ti = np.interp(zi, inits['z'], inits['Tk']) xi = np.interp(zi, inits['z'], inits['xe']) #if", "Changing attribute! A little scary, but we must make sure # these parcels", "enumerate(self.all_data_cgm): self.all_data_cgm[i]['rho'] = \\ self.parcel_cgm.grid.cosm.MeanBaryonDensity(self.all_z[i]) self.all_data_cgm[i]['n'] = \\ self.parcel_cgm.grid.particle_density(cgm_data, self.all_z[i]) if not self.pf['include_igm']:", "'igm_initial_temperature': Ti, # 'igm_initial_ionization': [1. - xi, xi]} # #self.kwargs.update(new_pars) return self._inits @property", "not self.pf['include_%s' % zone]: continue kw = self.pf.copy() # Loop over defaults, pull", "return self.field.grid @property def parcels(self): if not hasattr(self, '_parcels'): self._initialize_zones() return self._parcels @property", "\"\"\" Initialize a MultiPhaseMedium object. By default, this is a two-zone model, consisting", "= self.parcels[1] else: self._parcel_cgm = self.parcels[0] return self._parcel_cgm def rates_no_RT(self, grid): _rates_no_RT =", "[] for i in range(self.field.Npops): pass self.field.tau = tau def subcycle(self): \"\"\" See", "\\ [], [], [], [] if self.pf['save_rate_coefficients']: self.all_RCs_igm, self.all_RCs_cgm = [], [] if", "# 'igm_initial_ionization': [1. - xi, xi]} # #self.kwargs.update(new_pars) return self._inits @property def field(self):", "1.-xi-1e-10, xi, 1e-10]} self.kwargs.update(new) #else: # new_pars = {'cosmological_ics': False, # 'igm_initial_temperature': Ti,", "# The (potential) generators need this self.field.update_redshift(z) # IGM rate coefficients if self.pf['include_igm']:", "t += dt z -= dt / dtdz # The (potential) generators need", "respectively. To perform a single-zone calculation, simply set ``include_cgm=False`` or ``include_igm=False``. \"\"\" if", "self.all_RCs_cgm = [self.rates_no_RT(self.parcel_igm.grid)] * len(self.all_z) # Don't mess with the CGM (much) if", "parcel_igm(self): if not hasattr(self, '_parcel_igm'): self._parcel_igm = self.parcels[0] return self._parcel_igm @property def parcel_cgm(self):", "def step(self): \"\"\" Generator for a two-phase intergalactic medium. Returns ------- Tuple containing", "i, cgm_data in enumerate(self.all_data_cgm): self.all_data_cgm[i]['rho'] = \\ self.parcel_cgm.grid.cosm.MeanBaryonDensity(self.all_z[i]) self.all_data_cgm[i]['n'] = \\ self.parcel_cgm.grid.particle_density(cgm_data, self.all_z[i])", "= data_cgm.copy() # If we're computing the IGM optical depth dynamically, we may", "self._field = MetaGalacticBackground(grid=self.parcel_igm.grid, **self.kwargs) else: self._field = MetaGalacticBackground(grid=self.parcel_cgm.grid, **self.kwargs) return self._field @property def", "numpy as np from .GasParcel import GasParcel from ..util import ParameterFile, ProgressBar from", "_sort_history(self.all_data_igm, prefix='igm_', squeeze=True) self.history = self.history_igm.copy() else: self.history = {} if self.pf['include_cgm']: self.history_cgm", "MetaGalacticBackground(grid=self.parcel_igm.grid, **self.kwargs) else: self._field = MetaGalacticBackground(grid=self.parcel_cgm.grid, **self.kwargs) return self._field @property def pops(self): return", "[] if self.pf['save_rate_coefficients']: self.all_RCs_igm, self.all_RCs_cgm = [], [] if not self.pf['include_cgm']: del self.all_RCs_cgm,", "/ dtdz self.update_optical_depth() def _insert_inits(self): \"\"\" Prepend provided initial conditions to the data", "self._inits = inits = _load_inits() zi = self.pf['initial_redshift'] if not np.all(np.diff(inits['z']) > 0):", "parcels(self): if not hasattr(self, '_parcels'): self._initialize_zones() return self._parcels @property def parcel_igm(self): if not", "return self._parcel_cgm def rates_no_RT(self, grid): _rates_no_RT = \\ {'k_ion': np.zeros((grid.dims, grid.N_absorbers)), 'k_heat': np.zeros((grid.dims,", "self.gen_igm.next() # Pass rate coefficients off to the IGM parcel self.parcel_igm.update_rate_coefficients(data_igm, **RC_igm) else:", "list with as many elements as there # are distinct populations tau =", "parcel_cgm = GasParcel(**self.kw_cgm) parcel_cgm.grid.set_recombination_rate(True) parcel_cgm._set_chemistry() self.gen_cgm = parcel_cgm.step() parcel_cgm.chem.chemnet.monotonic_EoR = \\ self.pf['monotonic_EoR'] parcel_cgm.update_rate_coefficients(parcel_cgm.grid.data,", "2015 Description: \"\"\" import numpy as np from .GasParcel import GasParcel from ..util", "= 0.0 z = self.pf['initial_redshift'] dt = self.pf['time_units'] * self.pf['initial_timestep'] zf = self.pf['final_redshift']", "zone in ['igm', 'cgm']: if not self.pf['include_%s' % zone]: continue kw = self.pf.copy()", "dt_pre / dtdz self.update_optical_depth() def _insert_inits(self): \"\"\" Prepend provided initial conditions to the", "import MetaGalacticBackground from ..util.SetDefaultParameterValues import MultiPhaseParameters _mpm_defs = MultiPhaseParameters() class MultiPhaseMedium(object): def __init__(self,", "'k_heat': np.zeros((grid.dims, grid.N_absorbers)), 'k_ion2': np.zeros((grid.dims, grid.N_absorbers, grid.N_absorbers)), } return _rates_no_RT @property def tf(self):", "self.pf['include_igm'] \\ else self.parcel_cgm return self._default_parcel @property def dynamic_tau(self): return self.pf['tau_dynamic'] def update_optical_depth(self):", "hasattr(self, '_parcel_cgm'): if self.pf['include_igm']: self._parcel_cgm = self.parcels[1] else: self._parcel_cgm = self.parcels[0] return self._parcel_cgm", "self.gen_cgm = parcel_cgm.step() parcel_cgm.chem.chemnet.monotonic_EoR = \\ self.pf['monotonic_EoR'] parcel_cgm.update_rate_coefficients(parcel_cgm.grid.data, **self.rates_no_RT(parcel_cgm.grid)) self._parcels.append(parcel_cgm) self._parcels[-1].pf['stop_time'] = self.tf", "\"re-do\" this step to ensure convergence. redo = self.subcycle() if not redo: #", "hasattr(self, '_field'): if self.pf['include_igm']: self._field = MetaGalacticBackground(grid=self.parcel_igm.grid, **self.kwargs) else: self._field = MetaGalacticBackground(grid=self.parcel_cgm.grid, **self.kwargs)", "self._parcels @property def parcel_igm(self): if not hasattr(self, '_parcel_igm'): self._parcel_igm = self.parcels[0] return self._parcel_igm", "hasattr(self, '_tf'): z = self.pf['initial_redshift'] zf = self.pf['final_redshift'] self._tf = self.default_parcel.grid.cosm.LookbackTime(zf, z) self.pf['stop_time']", "# Loop over defaults, pull out the ones for this zone for key", "in ['igm', 'cgm']: if not self.pf['include_%s' % zone]: continue kw = self.pf.copy() #", "= self.pf['time_units'] * self.pf['initial_timestep'] zf = self.pf['final_redshift'] # Read initial conditions if self.pf['include_igm']:", "Have to rename variables so Grid class will know them grid_key = key.replace('%s_'", "if not hasattr(self, '_pf'): inits = self.inits self._pf = ParameterFile(**self.kwargs) return self._pf @property", "pops(self): return self.field.pops @property def grid(self): return self.field.grid @property def parcels(self): if not", "> 0): raise ValueError('Redshifts in ICs must be in ascending order!') Ti =", "prefix='igm_', squeeze=True) self.history.update(self.rates_igm) if self.pf['include_cgm']: self.rates_cgm = \\ _sort_history(self.all_RCs_cgm, prefix='cgm_', squeeze=True) self.history.update(self.rates_cgm) self.history['t']", "off to the CGM parcel self.parcel_cgm.update_rate_coefficients(data_cgm, **RC_cgm) # Now, update CGM parcel t2,", "storage lists. \"\"\" if not self.pf['load_ics']: self.all_t, self.all_z, self.all_data_igm, self.all_data_cgm = \\ [],", "IGM for i, red in enumerate(self.all_z): snapshot = {} for key in self.parcel_igm.grid.data.keys():", "descending order (in redshift) z_inits = self.inits['z'][-1::-1] Tk_inits = self.inits['Tk'][-1::-1] xe_inits = self.inits['xe'][-1::-1]", "return self._parcels @property def parcel_igm(self): if not hasattr(self, '_parcel_igm'): self._parcel_igm = self.parcels[0] return", "them grid_key = key.replace('%s_' % zone, '') if key in self.kwargs: kw[grid_key] =", "#else: # new_pars = {'cosmological_ics': False, # 'igm_initial_temperature': Ti, # 'igm_initial_ionization': [1. -", "grid.N_absorbers)), 'k_heat': np.zeros((grid.dims, grid.N_absorbers)), 'k_ion2': np.zeros((grid.dims, grid.N_absorbers, grid.N_absorbers)), } return _rates_no_RT @property def", "else: dt1 = 1e50 RC_igm = data_igm = None data_igm = {'h_1': 1.0}", "* self.pf['time_units']) # Might need these... if self.pf['include_igm']: data_igm_pre = data_igm.copy() if self.pf['include_cgm']:", "= {'igm_initial_temperature': Ti, 'initial_ionization': [1. - xi, xi, 1.-xi-1e-10, xi, 1e-10]} self.kwargs.update(new) #else:", "= \\ _sort_history(self.all_RCs_igm, prefix='igm_', squeeze=True) self.history.update(self.rates_igm) if self.pf['include_cgm']: self.rates_cgm = \\ _sort_history(self.all_RCs_cgm, prefix='cgm_',", "# Increment time / redshift dtdz = self.default_parcel.grid.cosm.dtdz(z) t += dt z -=", "# #self.kwargs.update(new_pars) return self._inits @property def field(self): if not hasattr(self, '_field'): if self.pf['include_igm']:", "in _mpm_defs: if key[0:4] != '%s_' % zone: continue # Have to rename", "self.parcel_cgm.grid.particle_density(cgm_data, self.all_z[i]) if not self.pf['include_igm']: return # Loop over redshift and derive things", "object. By default, this is a two-zone model, consisting of a \"bulk IGM\"", "/ self.pf['time_units'] return self._tf def _initialize_zones(self): \"\"\" Initialize (up to two) GasParcels. \"\"\"", "> self.pf['stop_cgm_h_2']: data_cgm = data_cgm_pre.copy() dt2 = 1e50 done = True if not", "neutral fraction xe = inits_all['xe'][i] if 2 not in self.parcel_igm.grid.Z: xe = min(xe,", "from .GasParcel import GasParcel from ..util import ParameterFile, ProgressBar from ..util.ReadData import _sort_history,", "= [] self.all_z = list(z_inits[0:i_trunc]) self.all_RCs_igm = [self.rates_no_RT(self.parcel_igm.grid)] * len(self.all_z) self.all_RCs_cgm = [self.rates_no_RT(self.parcel_igm.grid)]", "not hasattr(self, '_default_parcel'): self._default_parcel = self.parcel_igm if self.pf['include_igm'] \\ else self.parcel_cgm return self._default_parcel", "set ``include_cgm=False`` or ``include_igm=False``. \"\"\" if 'load_ics' not in kwargs: kwargs['load_ics'] = True", "enumerate(self.all_z): snapshot = {} for key in self.parcel_igm.grid.data.keys(): if key in self.inits.keys(): snapshot[key]", "\"\"\" # Reset stop time based on final redshift. z = self.pf['initial_redshift'] zf", "hasattr(self, '_parcel_igm'): self._parcel_igm = self.parcels[0] return self._parcel_igm @property def parcel_cgm(self): if not hasattr(self,", "not hasattr(self, '_pf'): inits = self.inits self._pf = ParameterFile(**self.kwargs) return self._pf @property def", "if self.pf['include_igm']: self.parcel_igm.dt = dt if self.pf['include_cgm']: self.parcel_cgm.dt = dt yield t, z,", "= self.subcycle() if not redo: # Changing attribute! A little scary, but we", "if not hasattr(self, '_parcel_cgm'): if self.pf['include_igm']: self._parcel_cgm = self.parcels[1] else: self._parcel_cgm = self.parcels[0]", "t2, dt2, data_cgm = self.gen_cgm.next() else: dt2 = 1e50 RC_cgm = data_cgm =", "kw = self.pf.copy() # Loop over defaults, pull out the ones for this", "for this zone for key in _mpm_defs: if key[0:4] != '%s_' % zone:", "depth as simulation runs. \"\"\" # Recall that self.field.tau is a list with", "use=self.pf['progress_bar']) pb.start() # Evolve in time for t, z, data_igm, data_cgm, RC_igm, RC_cgm", "GasParcel(**self.kw_igm) self.gen_igm = parcel_igm.step() # Set initial values for rate coefficients parcel_igm.update_rate_coefficients(parcel_igm.grid.data, **self.rates_no_RT(parcel_igm.grid))", "grid.N_absorbers)), 'k_ion2': np.zeros((grid.dims, grid.N_absorbers, grid.N_absorbers)), } return _rates_no_RT @property def tf(self): if not", "a two-zone model, consisting of a \"bulk IGM\" grid patch and an \"HII", "= self.pf.copy() # Loop over defaults, pull out the ones for this zone", "= self.kwargs[key] else: kw[grid_key] = _mpm_defs[key] if zone == 'igm': self.kw_igm = kw.copy()", "def update_optical_depth(self): \"\"\" Dynamically update optical depth as simulation runs. \"\"\" # Recall", "data_cgm = self.gen_cgm.next() else: dt2 = 1e50 RC_cgm = data_cgm = None #", "[] self.all_data_igm = [] self.all_z = list(z_inits[0:i_trunc]) self.all_RCs_igm = [self.rates_no_RT(self.parcel_igm.grid)] * len(self.all_z) self.all_RCs_cgm", "# new_pars = {'cosmological_ics': False, # 'igm_initial_temperature': Ti, # 'igm_initial_ionization': [1. - xi,", "timestep. This mean: (1) Re-compute the IGM optical depth. (2) \"\"\" return False", "\"\"\" if 'load_ics' not in kwargs: kwargs['load_ics'] = True self.kwargs = kwargs @property", "= data_igm = None data_igm = {'h_1': 1.0} if self.pf['include_cgm']: done = False", "parcel_igm.step() # Set initial values for rate coefficients parcel_igm.update_rate_coefficients(parcel_igm.grid.data, **self.rates_no_RT(parcel_igm.grid)) self._parcels.append(parcel_igm) else: self.kw_cgm", "for the IGM and CGM data at a single snapshot. \"\"\" t =", "1. dt = min(dt1, dt2) dt = min(dt, self.pf['max_timestep'] * self.pf['time_units']) # Might", "the IGM optical depth dynamically, we may # need to \"re-do\" this step", "self.history_igm.copy() else: self.history = {} if self.pf['include_cgm']: self.history_cgm = \\ _sort_history(self.all_data_cgm, prefix='cgm_', squeeze=True)", "in ascending order!') Ti = np.interp(zi, inits['z'], inits['Tk']) xi = np.interp(zi, inits['z'], inits['xe'])", "self.pf['include_He']: new = {'igm_initial_temperature': Ti, 'initial_ionization': [1. - xi, xi, 1.-xi-1e-10, xi, 1e-10]}", "dtdz self.update_optical_depth() def _insert_inits(self): \"\"\" Prepend provided initial conditions to the data storage", "<NAME> Affiliation: University of Colorado at Boulder Created on: Mon Feb 16 12:46:28", "\"\"\" self._insert_inits() pb = ProgressBar(self.tf, use=self.pf['progress_bar']) pb.start() # Evolve in time for t,", "Dynamically update optical depth as simulation runs. \"\"\" # Recall that self.field.tau is", "# 'igm_initial_temperature': Ti, # 'igm_initial_ionization': [1. - xi, xi]} # #self.kwargs.update(new_pars) return self._inits", "Author: <NAME> Affiliation: University of Colorado at Boulder Created on: Mon Feb 16", "for zone in ['igm', 'cgm']: if not self.pf['include_%s' % zone]: continue kw =", "this zone for key in _mpm_defs: if key[0:4] != '%s_' % zone: continue", "as simulation runs. \"\"\" # Recall that self.field.tau is a list with as", "Nothing: sets `history` attribute. \"\"\" self._insert_inits() pb = ProgressBar(self.tf, use=self.pf['progress_bar']) pb.start() # Evolve", "# IGM rate coefficients if self.pf['include_igm']: done = False if self.pf['stop_igm_h_2'] is not", "_rates_no_RT @property def tf(self): if not hasattr(self, '_tf'): z = self.pf['initial_redshift'] zf =", "inits(self): if not hasattr(self, '_inits'): self._inits = inits = _load_inits() zi = self.pf['initial_redshift']", "self._parcels = [] for zone in ['igm', 'cgm']: if not self.pf['include_%s' % zone]:", "two) GasParcels. \"\"\" # Reset stop time based on final redshift. z =", "if self.pf['save_rate_coefficients']: if self.pf['include_igm']: self.rates_igm = \\ _sort_history(self.all_RCs_igm, prefix='igm_', squeeze=True) self.history.update(self.rates_igm) if self.pf['include_cgm']:", "\"undo\" this time-step t -= dt_pre z += dt_pre / dtdz self.update_optical_depth() def", "**RC_cgm) # Now, update CGM parcel t2, dt2, data_cgm = self.gen_cgm.next() else: dt2", "if self.pf['stop_cgm_h_2'] is not None: if data_cgm['h_2'] > self.pf['stop_cgm_h_2']: data_cgm = data_cgm_pre.copy() dt2", "Don't mess with the CGM (much) if self.pf['include_cgm']: tmp = self.parcel_cgm.grid.data self.all_data_cgm =", "little scary, but we must make sure # these parcels are evolved in", "may # need to \"re-do\" this step to ensure convergence. redo = self.subcycle()", "steps. # Converged to desired tolerance? #self. def _stop_criteria_met(self): pass def run(self): \"\"\"", "# Pass rate coefficients off to the CGM parcel self.parcel_cgm.update_rate_coefficients(data_cgm, **RC_cgm) # Now,", "xe = min(xe, 1.0) xi = xe / (1. + self.parcel_igm.grid.cosm.y) snapshot['h_1'] =", "dynamic_tau(self): return self.pf['tau_dynamic'] def update_optical_depth(self): \"\"\" Dynamically update optical depth as simulation runs.", "zone]: continue kw = self.pf.copy() # Loop over defaults, pull out the ones", "1.0} if self.pf['include_cgm']: done = False if self.pf['stop_cgm_h_2'] is not None: if data_cgm['h_2']", "not hasattr(self, '_inits'): self._inits = inits = _load_inits() zi = self.pf['initial_redshift'] if not", "not hasattr(self, '_parcels'): self._initialize_zones() return self._parcels @property def parcel_igm(self): if not hasattr(self, '_parcel_igm'):", "dt_pre z += dt_pre / dtdz self.update_optical_depth() def _insert_inits(self): \"\"\" Prepend provided initial", "evolved in unison if self.pf['include_igm']: self.parcel_igm.dt = dt if self.pf['include_cgm']: self.parcel_cgm.dt = dt", "Electron fraction snapshot['e'] = inits_all['xe'][i] # Hydrogen neutral fraction xe = inits_all['xe'][i] if", "\"HII regions\" grid patch, dubbed \"igm\" and \"cgm\", respectively. To perform a single-zone", "self.pf['final_redshift'] self._parcels = [] for zone in ['igm', 'cgm']: if not self.pf['include_%s' %", "self.all_data_cgm = \\ [], [], [], [] if self.pf['save_rate_coefficients']: self.all_RCs_igm, self.all_RCs_cgm = [],", "# Now, update CGM parcel t2, dt2, data_cgm = self.gen_cgm.next() else: dt2 =", "Evolve in time! while z > zf: # Increment time / redshift dtdz", "`history` attribute. \"\"\" self._insert_inits() pb = ProgressBar(self.tf, use=self.pf['progress_bar']) pb.start() # Evolve in time", "we may # need to \"re-do\" this step to ensure convergence. redo =", "step to ensure convergence. redo = self.subcycle() if not redo: # Changing attribute!", "in kwargs: kwargs['load_ics'] = True self.kwargs = kwargs @property def pf(self): if not", "+= dt z -= dt / dtdz # The (potential) generators need this", "convergence. redo = self.subcycle() if not redo: # Changing attribute! A little scary,", "coefficients if self.pf['include_igm']: done = False if self.pf['stop_igm_h_2'] is not None: if data_igm['h_2']", "squeeze=True) self.history.update(self.rates_igm) if self.pf['include_cgm']: self.rates_cgm = \\ _sort_history(self.all_RCs_cgm, prefix='cgm_', squeeze=True) self.history.update(self.rates_cgm) self.history['t'] =", "pre-pending once we hit the first light redshift i_trunc = np.argmin(np.abs(z_inits - self.pf['initial_redshift']))", "= [self.rates_no_RT(self.parcel_igm.grid)] * len(self.all_z) # Don't mess with the CGM (much) if self.pf['include_cgm']:", "(potential) generators need this self.field.update_redshift(z) # IGM rate coefficients if self.pf['include_igm']: done =", "\"\"\" Dynamically update optical depth as simulation runs. \"\"\" # Recall that self.field.tau", "once we hit the first light redshift i_trunc = np.argmin(np.abs(z_inits - self.pf['initial_redshift'])) if", "xe_inits = self.inits['xe'][-1::-1] inits_all = {'z': z_inits, 'Tk': Tk_inits, 'xe': xe_inits} # Stop", "'cgm']: if not self.pf['include_%s' % zone]: continue kw = self.pf.copy() # Loop over", "ones for this zone for key in _mpm_defs: if key[0:4] != '%s_' %", "= key.replace('%s_' % zone, '') if key in self.kwargs: kw[grid_key] = self.kwargs[key] else:", "we must make sure # these parcels are evolved in unison if self.pf['include_igm']:", "- xi, xi, 1.-xi-1e-10, xi, 1e-10]} self.kwargs.update(new) #else: # new_pars = {'cosmological_ics': False,", "self.all_z.append(z) self.all_t.append(t) if self.pf['include_cgm']: self.all_data_cgm.append(data_cgm.copy()) if self.pf['include_igm']: self.all_data_igm.append(data_igm.copy()) if self.pf['save_rate_coefficients']: if self.pf['include_cgm']: self.all_RCs_cgm.append(RC_cgm.copy())", "coefficients parcel_igm.update_rate_coefficients(parcel_igm.grid.data, **self.rates_no_RT(parcel_igm.grid)) self._parcels.append(parcel_igm) else: self.kw_cgm = kw.copy() parcel_cgm = GasParcel(**self.kw_cgm) parcel_cgm.grid.set_recombination_rate(True) parcel_cgm._set_chemistry()", "parcel self.parcel_cgm.update_rate_coefficients(data_cgm, **RC_cgm) # Now, update CGM parcel t2, dt2, data_cgm = self.gen_cgm.next()", "Flip to descending order (in redshift) z_inits = self.inits['z'][-1::-1] Tk_inits = self.inits['Tk'][-1::-1] xe_inits", "# Evolve in time! while z > zf: # Increment time / redshift", "not None: if data_cgm['h_2'] > self.pf['stop_cgm_h_2']: data_cgm = data_cgm_pre.copy() dt2 = 1e50 done", "else: kw[grid_key] = _mpm_defs[key] if zone == 'igm': self.kw_igm = kw.copy() parcel_igm =", "to two) GasParcels. \"\"\" # Reset stop time based on final redshift. z", "perform a single-zone calculation, simply set ``include_cgm=False`` or ``include_igm=False``. \"\"\" if 'load_ics' not", "update_optical_depth(self): \"\"\" Dynamically update optical depth as simulation runs. \"\"\" # Recall that", "if we need to re-do the previous timestep. This mean: (1) Re-compute the", "[] for zone in ['igm', 'cgm']: if not self.pf['include_%s' % zone]: continue kw", "as np from .GasParcel import GasParcel from ..util import ParameterFile, ProgressBar from ..util.ReadData", "if self.pf['include_igm']: self.rates_igm = \\ _sort_history(self.all_RCs_igm, prefix='igm_', squeeze=True) self.history.update(self.rates_igm) if self.pf['include_cgm']: self.rates_cgm =", "if not hasattr(self, '_parcel_igm'): self._parcel_igm = self.parcels[0] return self._parcel_igm @property def parcel_cgm(self): if", "derive things for the IGM for i, red in enumerate(self.all_z): snapshot = {}", "variables so Grid class will know them grid_key = key.replace('%s_' % zone, '')", "+ int(self.pf['include_cgm']) return self._zones @property def default_parcel(self): if not hasattr(self, '_default_parcel'): self._default_parcel =", "as many elements as there # are distinct populations tau = [] for", "if 'load_ics' not in kwargs: kwargs['load_ics'] = True self.kwargs = kwargs @property def", "at a single snapshot. \"\"\" t = 0.0 z = self.pf['initial_redshift'] dt =", "grid): _rates_no_RT = \\ {'k_ion': np.zeros((grid.dims, grid.N_absorbers)), 'k_heat': np.zeros((grid.dims, grid.N_absorbers)), 'k_ion2': np.zeros((grid.dims, grid.N_absorbers,", "= 1e50 done = True if not done: RC_igm = self.field.update_rate_coefficients(z, zone='igm', return_rc=True,", "\\ _sort_history(self.all_data_cgm, prefix='cgm_', squeeze=True) self.history.update(self.history_cgm) # Save rate coefficients [optional] if self.pf['save_rate_coefficients']: if", "Generator for a two-phase intergalactic medium. Returns ------- Tuple containing the current time,", "@property def default_parcel(self): if not hasattr(self, '_default_parcel'): self._default_parcel = self.parcel_igm if self.pf['include_igm'] \\", "must make sure # these parcels are evolved in unison if self.pf['include_igm']: self.parcel_igm.dt", "self.all_t = [] self.all_data_igm = [] self.all_z = list(z_inits[0:i_trunc]) self.all_RCs_igm = [self.rates_no_RT(self.parcel_igm.grid)] *", "Grid class will know them grid_key = key.replace('%s_' % zone, '') if key", "this is a two-zone model, consisting of a \"bulk IGM\" grid patch and", "1 self.all_t = [] self.all_data_igm = [] self.all_z = list(z_inits[0:i_trunc]) self.all_RCs_igm = [self.rates_no_RT(self.parcel_igm.grid)]", "zone for key in _mpm_defs: if key[0:4] != '%s_' % zone: continue #", "**RC_igm) else: dt1 = 1e50 RC_igm = data_igm = None data_igm = {'h_1':", "@property def parcel_cgm(self): if not hasattr(self, '_parcel_cgm'): if self.pf['include_igm']: self._parcel_cgm = self.parcels[1] else:", "self.pf['include_%s' % zone]: continue kw = self.pf.copy() # Loop over defaults, pull out", "# Recall that self.field.tau is a list with as many elements as there", "-= dt / dtdz # The (potential) generators need this self.field.update_redshift(z) # IGM", "_mpm_defs[key] if zone == 'igm': self.kw_igm = kw.copy() parcel_igm = GasParcel(**self.kw_igm) self.gen_igm =", "_initialize_zones(self): \"\"\" Initialize (up to two) GasParcels. \"\"\" # Reset stop time based", "'_default_parcel'): self._default_parcel = self.parcel_igm if self.pf['include_igm'] \\ else self.parcel_cgm return self._default_parcel @property def", "False, # 'igm_initial_temperature': Ti, # 'igm_initial_ionization': [1. - xi, xi]} # #self.kwargs.update(new_pars) return", "with the CGM (much) if self.pf['include_cgm']: tmp = self.parcel_cgm.grid.data self.all_data_cgm = [tmp.copy() for", "scary, but we must make sure # these parcels are evolved in unison", "min(xe, 1.0) xi = xe / (1. + self.parcel_igm.grid.cosm.y) snapshot['h_1'] = 1. -", "and \"cgm\", respectively. To perform a single-zone calculation, simply set ``include_cgm=False`` or ``include_igm=False``.", "self.pf['include_igm']: return # Loop over redshift and derive things for the IGM for", "self.all_z[i]) if not self.pf['include_igm']: return # Loop over redshift and derive things for", "_sort_history, _load_inits from .MetaGalacticBackground import MetaGalacticBackground from ..util.SetDefaultParameterValues import MultiPhaseParameters _mpm_defs = MultiPhaseParameters()", "self._field = MetaGalacticBackground(grid=self.parcel_cgm.grid, **self.kwargs) return self._field @property def pops(self): return self.field.pops @property def", "= self.inits['Tk'][-1::-1] xe_inits = self.inits['xe'][-1::-1] inits_all = {'z': z_inits, 'Tk': Tk_inits, 'xe': xe_inits}", "IGM optical depth. (2) \"\"\" return False # Check IGM ionization state between", "# Hydrogen neutral fraction xe = inits_all['xe'][i] if 2 not in self.parcel_igm.grid.Z: xe", "self.inits['Tk'][-1::-1] xe_inits = self.inits['xe'][-1::-1] inits_all = {'z': z_inits, 'Tk': Tk_inits, 'xe': xe_inits} #", "self.pf['include_igm']: self.all_RCs_igm.append(RC_igm.copy()) pb.finish() # Sort everything by time if self.pf['include_igm']: self.history_igm = \\", "bit # \"undo\" this time-step t -= dt_pre z += dt_pre / dtdz", "and dictionaries for the IGM and CGM data at a single snapshot. \"\"\"", "'_parcel_cgm'): if self.pf['include_igm']: self._parcel_cgm = self.parcels[1] else: self._parcel_cgm = self.parcels[0] return self._parcel_cgm def", "done: # CGM rate coefficients RC_cgm = self.field.update_rate_coefficients(z, zone='cgm', return_rc=True, cgm_h_1=data_cgm['h_1']) # Pass", "xHII, and xHeIII << 1 if self.parcel_igm.pf['include_He']: snapshot['he_1'] = 1. - xi snapshot['he_2']", "Read initial conditions if self.pf['include_igm']: data_igm = self.parcel_igm.grid.data.copy() if self.pf['include_cgm']: data_cgm = self.parcel_cgm.grid.data.copy()", "= self.pf['final_redshift'] # Read initial conditions if self.pf['include_igm']: data_igm = self.parcel_igm.grid.data.copy() if self.pf['include_cgm']:", "pb.start() # Evolve in time for t, z, data_igm, data_cgm, RC_igm, RC_cgm in", "self.pf['include_cgm']: data_cgm_pre = data_cgm.copy() # If we're computing the IGM optical depth dynamically,", "Ti, # 'igm_initial_ionization': [1. - xi, xi]} # #self.kwargs.update(new_pars) return self._inits @property def", "this time-step t -= dt_pre z += dt_pre / dtdz self.update_optical_depth() def _insert_inits(self):", "self.all_data_igm, self.all_data_cgm = \\ [], [], [], [] if self.pf['save_rate_coefficients']: self.all_RCs_igm, self.all_RCs_cgm =", "done = True if not done: RC_igm = self.field.update_rate_coefficients(z, zone='igm', return_rc=True, igm_h_1=data_igm['h_1']) #", "parcel_cgm.grid.set_recombination_rate(True) parcel_cgm._set_chemistry() self.gen_cgm = parcel_cgm.step() parcel_cgm.chem.chemnet.monotonic_EoR = \\ self.pf['monotonic_EoR'] parcel_cgm.update_rate_coefficients(parcel_cgm.grid.data, **self.rates_no_RT(parcel_cgm.grid)) self._parcels.append(parcel_cgm) self._parcels[-1].pf['stop_time']", "return self._zones @property def default_parcel(self): if not hasattr(self, '_default_parcel'): self._default_parcel = self.parcel_igm if", "redshift. z = self.pf['initial_redshift'] zf = self.pf['final_redshift'] self._parcels = [] for zone in", "**self.kwargs) return self._field @property def pops(self): return self.field.pops @property def grid(self): return self.field.grid", "data_cgm = None # Must update timesteps in unison dt_pre = dt *", "_sort_history(self.all_RCs_igm, prefix='igm_', squeeze=True) self.history.update(self.rates_igm) if self.pf['include_cgm']: self.rates_cgm = \\ _sort_history(self.all_RCs_cgm, prefix='cgm_', squeeze=True) self.history.update(self.rates_cgm)", "self.parcel_cgm.grid.data self.all_data_cgm = [tmp.copy() for i in range(len(self.all_z))] for i, cgm_data in enumerate(self.all_data_cgm):", "in self.kwargs: kw[grid_key] = self.kwargs[key] else: kw[grid_key] = _mpm_defs[key] if zone == 'igm':", "runs. \"\"\" # Recall that self.field.tau is a list with as many elements", "z += dt_pre / dtdz self.update_optical_depth() def _insert_inits(self): \"\"\" Prepend provided initial conditions", "rate coefficients RC_cgm = self.field.update_rate_coefficients(z, zone='cgm', return_rc=True, cgm_h_1=data_cgm['h_1']) # Pass rate coefficients off", "zones(self): if not hasattr(self, '_zones'): self._zones = int(self.pf['include_igm']) \\ + int(self.pf['include_cgm']) return self._zones", "import MultiPhaseParameters _mpm_defs = MultiPhaseParameters() class MultiPhaseMedium(object): def __init__(self, **kwargs): \"\"\" Initialize a", "attribute! A little scary, but we must make sure # these parcels are", "self._insert_inits() pb = ProgressBar(self.tf, use=self.pf['progress_bar']) pb.start() # Evolve in time for t, z,", "we're computing the IGM optical depth dynamically, we may # need to \"re-do\"", "if self.pf['include_cgm']: self.all_data_cgm.append(data_cgm.copy()) if self.pf['include_igm']: self.all_data_igm.append(data_igm.copy()) if self.pf['save_rate_coefficients']: if self.pf['include_cgm']: self.all_RCs_cgm.append(RC_cgm.copy()) if self.pf['include_igm']:", "= self.pf['final_redshift'] self._tf = self.default_parcel.grid.cosm.LookbackTime(zf, z) self.pf['stop_time'] = self._tf / self.pf['time_units'] return self._tf", "xi = np.interp(zi, inits['z'], inits['xe']) #if self.pf['include_He']: new = {'igm_initial_temperature': Ti, 'initial_ionization': [1.", "redshift, and dictionaries for the IGM and CGM data at a single snapshot.", "_rates_no_RT = \\ {'k_ion': np.zeros((grid.dims, grid.N_absorbers)), 'k_heat': np.zeros((grid.dims, grid.N_absorbers)), 'k_ion2': np.zeros((grid.dims, grid.N_absorbers, grid.N_absorbers)),", "z_inits = self.inits['z'][-1::-1] Tk_inits = self.inits['Tk'][-1::-1] xe_inits = self.inits['xe'][-1::-1] inits_all = {'z': z_inits,", "cgm_h_1=data_cgm['h_1']) # Pass rate coefficients off to the CGM parcel self.parcel_cgm.update_rate_coefficients(data_cgm, **RC_cgm) #", "\\ _sort_history(self.all_RCs_igm, prefix='igm_', squeeze=True) self.history.update(self.rates_igm) if self.pf['include_cgm']: self.rates_cgm = \\ _sort_history(self.all_RCs_cgm, prefix='cgm_', squeeze=True)", "IGM and CGM data at a single snapshot. \"\"\" t = 0.0 z", "assuming xHeII = xHII, and xHeIII << 1 if self.parcel_igm.pf['include_He']: snapshot['he_1'] = 1.", "grid.N_absorbers)), } return _rates_no_RT @property def tf(self): if not hasattr(self, '_tf'): z =", "self.parcel_igm.grid.cosm.y) snapshot['h_1'] = 1. - xi snapshot['h_2'] = xi # Add helium, assuming", "By default, this is a two-zone model, consisting of a \"bulk IGM\" grid", "everything by time if self.pf['include_igm']: self.history_igm = \\ _sort_history(self.all_data_igm, prefix='igm_', squeeze=True) self.history =", "self._zones = int(self.pf['include_igm']) \\ + int(self.pf['include_cgm']) return self._zones @property def default_parcel(self): if not", "if self.pf['save_rate_coefficients']: if self.pf['include_cgm']: self.all_RCs_cgm.append(RC_cgm.copy()) if self.pf['include_igm']: self.all_RCs_igm.append(RC_igm.copy()) pb.finish() # Sort everything by", "self.parcel_igm.grid.Z: xe = min(xe, 1.0) xi = xe / (1. + self.parcel_igm.grid.cosm.y) snapshot['h_1']", "Save data self.all_z.append(z) self.all_t.append(t) if self.pf['include_cgm']: self.all_data_cgm.append(data_cgm.copy()) if self.pf['include_igm']: self.all_data_igm.append(data_igm.copy()) if self.pf['save_rate_coefficients']: if", "data_igm = {'h_1': 1.0} if self.pf['include_cgm']: done = False if self.pf['stop_cgm_h_2'] is not", "key[0:4] != '%s_' % zone: continue # Have to rename variables so Grid", "these... if self.pf['include_igm']: data_igm_pre = data_igm.copy() if self.pf['include_cgm']: data_cgm_pre = data_cgm.copy() # If", "else: dt2 = 1e50 RC_cgm = data_cgm = None # Must update timesteps", "'_parcel_igm'): self._parcel_igm = self.parcels[0] return self._parcel_igm @property def parcel_cgm(self): if not hasattr(self, '_parcel_cgm'):", "i in range(self.field.Npops): pass self.field.tau = tau def subcycle(self): \"\"\" See if we", "Run simulation from start to finish. Returns ------- Nothing: sets `history` attribute. \"\"\"", "zf = self.pf['final_redshift'] self._tf = self.default_parcel.grid.cosm.LookbackTime(zf, z) self.pf['stop_time'] = self._tf / self.pf['time_units'] return", "See if we need to re-do the previous timestep. This mean: (1) Re-compute", "key in _mpm_defs: if key[0:4] != '%s_' % zone: continue # Have to", "to the IGM parcel self.parcel_igm.update_rate_coefficients(data_igm, **RC_igm) else: dt1 = 1e50 RC_igm = data_igm", "1.0) xi = xe / (1. + self.parcel_igm.grid.cosm.y) snapshot['h_1'] = 1. - xi", "snapshot['he_2'] = xi snapshot['he_3'] = 1e-10 snapshot['rho'] = self.parcel_igm.grid.cosm.MeanBaryonDensity(red) snapshot['n'] = \\ self.parcel_igm.grid.particle_density(snapshot.copy(),", "calculation, simply set ``include_cgm=False`` or ``include_igm=False``. \"\"\" if 'load_ics' not in kwargs: kwargs['load_ics']", "= xe / (1. + self.parcel_igm.grid.cosm.y) snapshot['h_1'] = 1. - xi snapshot['h_2'] =", "class MultiPhaseMedium(object): def __init__(self, **kwargs): \"\"\" Initialize a MultiPhaseMedium object. By default, this", "values for rate coefficients parcel_igm.update_rate_coefficients(parcel_igm.grid.data, **self.rates_no_RT(parcel_igm.grid)) self._parcels.append(parcel_igm) else: self.kw_cgm = kw.copy() parcel_cgm =", "re-do the previous timestep. This mean: (1) Re-compute the IGM optical depth. (2)", "dt1 = 1e50 done = True if not done: RC_igm = self.field.update_rate_coefficients(z, zone='igm',", "made it here, we need to trick our generators a bit # \"undo\"", "__init__(self, **kwargs): \"\"\" Initialize a MultiPhaseMedium object. By default, this is a two-zone", "and derive things for the IGM for i, red in enumerate(self.all_z): snapshot =", "@property def tf(self): if not hasattr(self, '_tf'): z = self.pf['initial_redshift'] zf = self.pf['final_redshift']", "z = self.pf['initial_redshift'] dt = self.pf['time_units'] * self.pf['initial_timestep'] zf = self.pf['final_redshift'] # Read", "/ dtdz # The (potential) generators need this self.field.update_redshift(z) # IGM rate coefficients", "# Pass rate coefficients off to the IGM parcel self.parcel_igm.update_rate_coefficients(data_igm, **RC_igm) else: dt1", "if self.parcel_igm.pf['include_He']: snapshot['he_1'] = 1. - xi snapshot['he_2'] = xi snapshot['he_3'] = 1e-10", "= \\ self.pf['monotonic_EoR'] parcel_cgm.update_rate_coefficients(parcel_cgm.grid.data, **self.rates_no_RT(parcel_cgm.grid)) self._parcels.append(parcel_cgm) self._parcels[-1].pf['stop_time'] = self.tf / self.pf['time_units'] @property def", "data_cgm, RC_igm, RC_cgm in self.step(): pb.update(t) # Save data self.all_z.append(z) self.all_t.append(t) if self.pf['include_cgm']:", "kwargs @property def pf(self): if not hasattr(self, '_pf'): inits = self.inits self._pf =", "initial conditions if self.pf['include_igm']: data_igm = self.parcel_igm.grid.data.copy() if self.pf['include_cgm']: data_cgm = self.parcel_cgm.grid.data.copy() #", "# Have to rename variables so Grid class will know them grid_key =", "= self.field.update_rate_coefficients(z, zone='igm', return_rc=True, igm_h_1=data_igm['h_1']) # Now, update IGM parcel t1, dt1, data_igm", "redshift and derive things for the IGM for i, red in enumerate(self.all_z): snapshot", "return_rc=True, cgm_h_1=data_cgm['h_1']) # Pass rate coefficients off to the CGM parcel self.parcel_cgm.update_rate_coefficients(data_cgm, **RC_cgm)", "# Save rate coefficients [optional] if self.pf['save_rate_coefficients']: if self.pf['include_igm']: self.rates_igm = \\ _sort_history(self.all_RCs_igm,", "= GasParcel(**self.kw_igm) self.gen_igm = parcel_igm.step() # Set initial values for rate coefficients parcel_igm.update_rate_coefficients(parcel_igm.grid.data,", "key in self.inits.keys(): snapshot[key] = inits_all[key][i] continue # Electron fraction snapshot['e'] = inits_all['xe'][i]", "= parcel_cgm.step() parcel_cgm.chem.chemnet.monotonic_EoR = \\ self.pf['monotonic_EoR'] parcel_cgm.update_rate_coefficients(parcel_cgm.grid.data, **self.rates_no_RT(parcel_cgm.grid)) self._parcels.append(parcel_cgm) self._parcels[-1].pf['stop_time'] = self.tf /", "self.pf['max_timestep'] * self.pf['time_units']) # Might need these... if self.pf['include_igm']: data_igm_pre = data_igm.copy() if", "snapshot['he_3'] = 1e-10 snapshot['rho'] = self.parcel_igm.grid.cosm.MeanBaryonDensity(red) snapshot['n'] = \\ self.parcel_igm.grid.particle_density(snapshot.copy(), red) self.all_t.append(0.0) self.all_data_igm.append(snapshot.copy())", "= {'h_1': 1.0} if self.pf['include_cgm']: done = False if self.pf['stop_cgm_h_2'] is not None:", "of a \"bulk IGM\" grid patch and an \"HII regions\" grid patch, dubbed", "def subcycle(self): \"\"\" See if we need to re-do the previous timestep. This", "if self.pf['include_cgm']: data_cgm = self.parcel_cgm.grid.data.copy() # Evolve in time! while z > zf:", "unison dt_pre = dt * 1. dt = min(dt1, dt2) dt = min(dt,", "self.pf['include_cgm']: self.history_cgm = \\ _sort_history(self.all_data_cgm, prefix='cgm_', squeeze=True) self.history.update(self.history_cgm) # Save rate coefficients [optional]", "= np.array(self.all_t) self.history['z'] = np.array(self.all_z) def step(self): \"\"\" Generator for a two-phase intergalactic", "\\ + int(self.pf['include_cgm']) return self._zones @property def default_parcel(self): if not hasattr(self, '_default_parcel'): self._default_parcel", "update timesteps in unison dt_pre = dt * 1. dt = min(dt1, dt2)", "= {'z': z_inits, 'Tk': Tk_inits, 'xe': xe_inits} # Stop pre-pending once we hit", "\"\"\" # Recall that self.field.tau is a list with as many elements as", "self.pf['include_igm']: done = False if self.pf['stop_igm_h_2'] is not None: if data_igm['h_2'] > self.pf['stop_igm_h_2']:", "self.pf['time_units'] * self.pf['initial_timestep'] zf = self.pf['final_redshift'] # Read initial conditions if self.pf['include_igm']: data_igm", "self.pf['include_cgm']: del self.all_RCs_cgm, self.all_data_cgm return # Flip to descending order (in redshift) z_inits", "'_pf'): inits = self.inits self._pf = ParameterFile(**self.kwargs) return self._pf @property def inits(self): if", "make sure # these parcels are evolved in unison if self.pf['include_igm']: self.parcel_igm.dt =", "return self._pf @property def inits(self): if not hasattr(self, '_inits'): self._inits = inits =", "xi # Add helium, assuming xHeII = xHII, and xHeIII << 1 if", "np.interp(zi, inits['z'], inits['xe']) #if self.pf['include_He']: new = {'igm_initial_temperature': Ti, 'initial_ionization': [1. - xi,", "dt / dtdz # The (potential) generators need this self.field.update_redshift(z) # IGM rate", "------- Tuple containing the current time, redshift, and dictionaries for the IGM and", "range(self.field.Npops): pass self.field.tau = tau def subcycle(self): \"\"\" See if we need to", "if z_inits[i_trunc] <= self.pf['initial_redshift']: i_trunc += 1 self.all_t = [] self.all_data_igm = []", "if self.pf['include_cgm']: done = False if self.pf['stop_cgm_h_2'] is not None: if data_cgm['h_2'] >", "ionization state between last two steps. # Converged to desired tolerance? #self. def", "z_inits, 'Tk': Tk_inits, 'xe': xe_inits} # Stop pre-pending once we hit the first", "coefficients [optional] if self.pf['save_rate_coefficients']: if self.pf['include_igm']: self.rates_igm = \\ _sort_history(self.all_RCs_igm, prefix='igm_', squeeze=True) self.history.update(self.rates_igm)", "self.pf['include_cgm']: data_cgm = self.parcel_cgm.grid.data.copy() # Evolve in time! while z > zf: #", "dt2 = 1e50 done = True if not done: # CGM rate coefficients", "Initialize a MultiPhaseMedium object. By default, this is a two-zone model, consisting of", "data_cgm = data_cgm_pre.copy() dt2 = 1e50 done = True if not done: #", "dt z -= dt / dtdz # The (potential) generators need this self.field.update_redshift(z)", "self.inits['xe'][-1::-1] inits_all = {'z': z_inits, 'Tk': Tk_inits, 'xe': xe_inits} # Stop pre-pending once", "single-zone calculation, simply set ``include_cgm=False`` or ``include_igm=False``. \"\"\" if 'load_ics' not in kwargs:", "dubbed \"igm\" and \"cgm\", respectively. To perform a single-zone calculation, simply set ``include_cgm=False``", "= tau def subcycle(self): \"\"\" See if we need to re-do the previous", "self.pf['time_units'] return self._tf def _initialize_zones(self): \"\"\" Initialize (up to two) GasParcels. \"\"\" #", "= data_cgm_pre.copy() dt2 = 1e50 done = True if not done: # CGM", "RC_igm = data_igm = None data_igm = {'h_1': 1.0} if self.pf['include_cgm']: done =", "xi snapshot['he_3'] = 1e-10 snapshot['rho'] = self.parcel_igm.grid.cosm.MeanBaryonDensity(red) snapshot['n'] = \\ self.parcel_igm.grid.particle_density(snapshot.copy(), red) self.all_t.append(0.0)", "= [] for zone in ['igm', 'cgm']: if not self.pf['include_%s' % zone]: continue", "self.pf['tau_dynamic'] def update_optical_depth(self): \"\"\" Dynamically update optical depth as simulation runs. \"\"\" #", "data_cgm['h_2'] > self.pf['stop_cgm_h_2']: data_cgm = data_cgm_pre.copy() dt2 = 1e50 done = True if", "if not self.pf['load_ics']: self.all_t, self.all_z, self.all_data_igm, self.all_data_cgm = \\ [], [], [], []", "= self.parcel_igm.grid.data.copy() if self.pf['include_cgm']: data_cgm = self.parcel_cgm.grid.data.copy() # Evolve in time! while z", "not done: RC_igm = self.field.update_rate_coefficients(z, zone='igm', return_rc=True, igm_h_1=data_igm['h_1']) # Now, update IGM parcel", "% zone: continue # Have to rename variables so Grid class will know", "self._field @property def pops(self): return self.field.pops @property def grid(self): return self.field.grid @property def", "key in self.parcel_igm.grid.data.keys(): if key in self.inits.keys(): snapshot[key] = inits_all[key][i] continue # Electron", "or ``include_igm=False``. \"\"\" if 'load_ics' not in kwargs: kwargs['load_ics'] = True self.kwargs =", "``include_igm=False``. \"\"\" if 'load_ics' not in kwargs: kwargs['load_ics'] = True self.kwargs = kwargs", "fraction snapshot['e'] = inits_all['xe'][i] # Hydrogen neutral fraction xe = inits_all['xe'][i] if 2", "inits['xe']) #if self.pf['include_He']: new = {'igm_initial_temperature': Ti, 'initial_ionization': [1. - xi, xi, 1.-xi-1e-10,", "containing the current time, redshift, and dictionaries for the IGM and CGM data", "# Evolve in time for t, z, data_igm, data_cgm, RC_igm, RC_cgm in self.step():", "initial values for rate coefficients parcel_igm.update_rate_coefficients(parcel_igm.grid.data, **self.rates_no_RT(parcel_igm.grid)) self._parcels.append(parcel_igm) else: self.kw_cgm = kw.copy() parcel_cgm", "done = False if self.pf['stop_igm_h_2'] is not None: if data_igm['h_2'] > self.pf['stop_igm_h_2']: data_igm", "self.field.tau = tau def subcycle(self): \"\"\" See if we need to re-do the", "z -= dt / dtdz # The (potential) generators need this self.field.update_redshift(z) #", "np.zeros((grid.dims, grid.N_absorbers, grid.N_absorbers)), } return _rates_no_RT @property def tf(self): if not hasattr(self, '_tf'):", "conditions if self.pf['include_igm']: data_igm = self.parcel_igm.grid.data.copy() if self.pf['include_cgm']: data_cgm = self.parcel_cgm.grid.data.copy() # Evolve", "dt2, data_cgm = self.gen_cgm.next() else: dt2 = 1e50 RC_cgm = data_cgm = None", "def pf(self): if not hasattr(self, '_pf'): inits = self.inits self._pf = ParameterFile(**self.kwargs) return", "i in range(len(self.all_z))] for i, cgm_data in enumerate(self.all_data_cgm): self.all_data_cgm[i]['rho'] = \\ self.parcel_cgm.grid.cosm.MeanBaryonDensity(self.all_z[i]) self.all_data_cgm[i]['n']", "for the IGM for i, red in enumerate(self.all_z): snapshot = {} for key", "Stop pre-pending once we hit the first light redshift i_trunc = np.argmin(np.abs(z_inits -", "self.history = self.history_igm.copy() else: self.history = {} if self.pf['include_cgm']: self.history_cgm = \\ _sort_history(self.all_data_cgm,", "xe_inits} # Stop pre-pending once we hit the first light redshift i_trunc =", "hasattr(self, '_pf'): inits = self.inits self._pf = ParameterFile(**self.kwargs) return self._pf @property def inits(self):", "z = self.pf['initial_redshift'] zf = self.pf['final_redshift'] self._parcels = [] for zone in ['igm',", "@property def inits(self): if not hasattr(self, '_inits'): self._inits = inits = _load_inits() zi", "- self.pf['initial_redshift'])) if z_inits[i_trunc] <= self.pf['initial_redshift']: i_trunc += 1 self.all_t = [] self.all_data_igm", "True self.kwargs = kwargs @property def pf(self): if not hasattr(self, '_pf'): inits =", "on: Mon Feb 16 12:46:28 MST 2015 Description: \"\"\" import numpy as np", "{} for key in self.parcel_igm.grid.data.keys(): if key in self.inits.keys(): snapshot[key] = inits_all[key][i] continue", "the CGM (much) if self.pf['include_cgm']: tmp = self.parcel_cgm.grid.data self.all_data_cgm = [tmp.copy() for i", "Returns ------- Tuple containing the current time, redshift, and dictionaries for the IGM", "start to finish. Returns ------- Nothing: sets `history` attribute. \"\"\" self._insert_inits() pb =", "def field(self): if not hasattr(self, '_field'): if self.pf['include_igm']: self._field = MetaGalacticBackground(grid=self.parcel_igm.grid, **self.kwargs) else:", "def pops(self): return self.field.pops @property def grid(self): return self.field.grid @property def parcels(self): if", "* 1. dt = min(dt1, dt2) dt = min(dt, self.pf['max_timestep'] * self.pf['time_units']) #", "key.replace('%s_' % zone, '') if key in self.kwargs: kw[grid_key] = self.kwargs[key] else: kw[grid_key]", "self.pf['include_cgm']: tmp = self.parcel_cgm.grid.data self.all_data_cgm = [tmp.copy() for i in range(len(self.all_z))] for i,", "to descending order (in redshift) z_inits = self.inits['z'][-1::-1] Tk_inits = self.inits['Tk'][-1::-1] xe_inits =", "mean: (1) Re-compute the IGM optical depth. (2) \"\"\" return False # Check", "need to trick our generators a bit # \"undo\" this time-step t -=", "'igm': self.kw_igm = kw.copy() parcel_igm = GasParcel(**self.kw_igm) self.gen_igm = parcel_igm.step() # Set initial", "return # Flip to descending order (in redshift) z_inits = self.inits['z'][-1::-1] Tk_inits =", "/ self.pf['time_units'] @property def zones(self): if not hasattr(self, '_zones'): self._zones = int(self.pf['include_igm']) \\", "= self.pf['initial_redshift'] zf = self.pf['final_redshift'] self._parcels = [] for zone in ['igm', 'cgm']:", "self.pf['initial_redshift'] zf = self.pf['final_redshift'] self._tf = self.default_parcel.grid.cosm.LookbackTime(zf, z) self.pf['stop_time'] = self._tf / self.pf['time_units']", "pb.finish() # Sort everything by time if self.pf['include_igm']: self.history_igm = \\ _sort_history(self.all_data_igm, prefix='igm_',", "# Save data self.all_z.append(z) self.all_t.append(t) if self.pf['include_cgm']: self.all_data_cgm.append(data_cgm.copy()) if self.pf['include_igm']: self.all_data_igm.append(data_igm.copy()) if self.pf['save_rate_coefficients']:", "self.pf['include_igm']: self._parcel_cgm = self.parcels[1] else: self._parcel_cgm = self.parcels[0] return self._parcel_cgm def rates_no_RT(self, grid):", "return _rates_no_RT @property def tf(self): if not hasattr(self, '_tf'): z = self.pf['initial_redshift'] zf", "the previous timestep. This mean: (1) Re-compute the IGM optical depth. (2) \"\"\"", "time / redshift dtdz = self.default_parcel.grid.cosm.dtdz(z) t += dt z -= dt /", "self.pf['stop_igm_h_2']: data_igm = data_igm_pre.copy() dt1 = 1e50 done = True if not done:", "and xHeIII << 1 if self.parcel_igm.pf['include_He']: snapshot['he_1'] = 1. - xi snapshot['he_2'] =", "Reset stop time based on final redshift. z = self.pf['initial_redshift'] zf = self.pf['final_redshift']", "self.pf['stop_time'] = self._tf / self.pf['time_units'] return self._tf def _initialize_zones(self): \"\"\" Initialize (up to", "return # Loop over redshift and derive things for the IGM for i,", "= data_igm_pre.copy() dt1 = 1e50 done = True if not done: RC_igm =", "the IGM for i, red in enumerate(self.all_z): snapshot = {} for key in", "self.pf['save_rate_coefficients']: self.all_RCs_igm, self.all_RCs_cgm = [], [] if not self.pf['include_cgm']: del self.all_RCs_cgm, self.all_data_cgm return", "data_igm['h_2'] > self.pf['stop_igm_h_2']: data_igm = data_igm_pre.copy() dt1 = 1e50 done = True if", "dictionaries for the IGM and CGM data at a single snapshot. \"\"\" t", "RC_igm = self.field.update_rate_coefficients(z, zone='igm', return_rc=True, igm_h_1=data_igm['h_1']) # Now, update IGM parcel t1, dt1,", "Mon Feb 16 12:46:28 MST 2015 Description: \"\"\" import numpy as np from", "ParameterFile(**self.kwargs) return self._pf @property def inits(self): if not hasattr(self, '_inits'): self._inits = inits", "this self.field.update_redshift(z) # IGM rate coefficients if self.pf['include_igm']: done = False if self.pf['stop_igm_h_2']", "data_igm = self.gen_igm.next() # Pass rate coefficients off to the IGM parcel self.parcel_igm.update_rate_coefficients(data_igm,", "to \"re-do\" this step to ensure convergence. redo = self.subcycle() if not redo:", "elements as there # are distinct populations tau = [] for i in", "return self._parcel_igm @property def parcel_cgm(self): if not hasattr(self, '_parcel_cgm'): if self.pf['include_igm']: self._parcel_cgm =", "0): raise ValueError('Redshifts in ICs must be in ascending order!') Ti = np.interp(zi,", "<< 1 if self.parcel_igm.pf['include_He']: snapshot['he_1'] = 1. - xi snapshot['he_2'] = xi snapshot['he_3']", "'%s_' % zone: continue # Have to rename variables so Grid class will", "self.all_RCs_igm.append(RC_igm.copy()) pb.finish() # Sort everything by time if self.pf['include_igm']: self.history_igm = \\ _sort_history(self.all_data_igm,", "self.pf['stop_cgm_h_2']: data_cgm = data_cgm_pre.copy() dt2 = 1e50 done = True if not done:", "Must update timesteps in unison dt_pre = dt * 1. dt = min(dt1,", "[], [] if self.pf['save_rate_coefficients']: self.all_RCs_igm, self.all_RCs_cgm = [], [] if not self.pf['include_cgm']: del", "coefficients RC_cgm = self.field.update_rate_coefficients(z, zone='cgm', return_rc=True, cgm_h_1=data_cgm['h_1']) # Pass rate coefficients off to", "not hasattr(self, '_parcel_igm'): self._parcel_igm = self.parcels[0] return self._parcel_igm @property def parcel_cgm(self): if not", "None # Must update timesteps in unison dt_pre = dt * 1. dt", "self.parcel_igm.grid.data.keys(): if key in self.inits.keys(): snapshot[key] = inits_all[key][i] continue # Electron fraction snapshot['e']", "if data_igm['h_2'] > self.pf['stop_igm_h_2']: data_igm = data_igm_pre.copy() dt1 = 1e50 done = True", "for t, z, data_igm, data_cgm, RC_igm, RC_cgm in self.step(): pb.update(t) # Save data", "previous timestep. This mean: (1) Re-compute the IGM optical depth. (2) \"\"\" return", "z = self.pf['initial_redshift'] zf = self.pf['final_redshift'] self._tf = self.default_parcel.grid.cosm.LookbackTime(zf, z) self.pf['stop_time'] = self._tf", "helium, assuming xHeII = xHII, and xHeIII << 1 if self.parcel_igm.pf['include_He']: snapshot['he_1'] =", "first light redshift i_trunc = np.argmin(np.abs(z_inits - self.pf['initial_redshift'])) if z_inits[i_trunc] <= self.pf['initial_redshift']: i_trunc", "inits['Tk']) xi = np.interp(zi, inits['z'], inits['xe']) #if self.pf['include_He']: new = {'igm_initial_temperature': Ti, 'initial_ionization':", "False # Check IGM ionization state between last two steps. # Converged to", "if not done: RC_igm = self.field.update_rate_coefficients(z, zone='igm', return_rc=True, igm_h_1=data_igm['h_1']) # Now, update IGM", "= _load_inits() zi = self.pf['initial_redshift'] if not np.all(np.diff(inits['z']) > 0): raise ValueError('Redshifts in", "if self.pf['include_igm'] \\ else self.parcel_cgm return self._default_parcel @property def dynamic_tau(self): return self.pf['tau_dynamic'] def", "12:46:28 MST 2015 Description: \"\"\" import numpy as np from .GasParcel import GasParcel", "= MultiPhaseParameters() class MultiPhaseMedium(object): def __init__(self, **kwargs): \"\"\" Initialize a MultiPhaseMedium object. By", "IGM\" grid patch and an \"HII regions\" grid patch, dubbed \"igm\" and \"cgm\",", "= \\ [], [], [], [] if self.pf['save_rate_coefficients']: self.all_RCs_igm, self.all_RCs_cgm = [], []", "CGM (much) if self.pf['include_cgm']: tmp = self.parcel_cgm.grid.data self.all_data_cgm = [tmp.copy() for i in", "self.pf['include_igm']: self.history_igm = \\ _sort_history(self.all_data_igm, prefix='igm_', squeeze=True) self.history = self.history_igm.copy() else: self.history =", "_mpm_defs: if key[0:4] != '%s_' % zone: continue # Have to rename variables", "self.pf['initial_timestep'] zf = self.pf['final_redshift'] # Read initial conditions if self.pf['include_igm']: data_igm = self.parcel_igm.grid.data.copy()", "a single-zone calculation, simply set ``include_cgm=False`` or ``include_igm=False``. \"\"\" if 'load_ics' not in", "self._parcels.append(parcel_igm) else: self.kw_cgm = kw.copy() parcel_cgm = GasParcel(**self.kw_cgm) parcel_cgm.grid.set_recombination_rate(True) parcel_cgm._set_chemistry() self.gen_cgm = parcel_cgm.step()", "= self.field.update_rate_coefficients(z, zone='cgm', return_rc=True, cgm_h_1=data_cgm['h_1']) # Pass rate coefficients off to the CGM", "data_cgm.copy() # If we're computing the IGM optical depth dynamically, we may #", "if not hasattr(self, '_parcels'): self._initialize_zones() return self._parcels @property def parcel_igm(self): if not hasattr(self,", "parcel self.parcel_igm.update_rate_coefficients(data_igm, **RC_igm) else: dt1 = 1e50 RC_igm = data_igm = None data_igm", "(in redshift) z_inits = self.inits['z'][-1::-1] Tk_inits = self.inits['Tk'][-1::-1] xe_inits = self.inits['xe'][-1::-1] inits_all =", "pull out the ones for this zone for key in _mpm_defs: if key[0:4]", "self.all_RCs_cgm = [], [] if not self.pf['include_cgm']: del self.all_RCs_cgm, self.all_data_cgm return # Flip", "xHeII = xHII, and xHeIII << 1 if self.parcel_igm.pf['include_He']: snapshot['he_1'] = 1. -", "self.pf['include_igm']: self.all_data_igm.append(data_igm.copy()) if self.pf['save_rate_coefficients']: if self.pf['include_cgm']: self.all_RCs_cgm.append(RC_cgm.copy()) if self.pf['include_igm']: self.all_RCs_igm.append(RC_igm.copy()) pb.finish() # Sort", "self.kwargs: kw[grid_key] = self.kwargs[key] else: kw[grid_key] = _mpm_defs[key] if zone == 'igm': self.kw_igm", "a bit # \"undo\" this time-step t -= dt_pre z += dt_pre /", "#self. def _stop_criteria_met(self): pass def run(self): \"\"\" Run simulation from start to finish.", "finish. Returns ------- Nothing: sets `history` attribute. \"\"\" self._insert_inits() pb = ProgressBar(self.tf, use=self.pf['progress_bar'])", "self.pf['include_igm']: data_igm_pre = data_igm.copy() if self.pf['include_cgm']: data_cgm_pre = data_cgm.copy() # If we're computing", "if key in self.inits.keys(): snapshot[key] = inits_all[key][i] continue # Electron fraction snapshot['e'] =", "# \"undo\" this time-step t -= dt_pre z += dt_pre / dtdz self.update_optical_depth()", "dynamically, we may # need to \"re-do\" this step to ensure convergence. redo", "need to \"re-do\" this step to ensure convergence. redo = self.subcycle() if not", "None data_igm = {'h_1': 1.0} if self.pf['include_cgm']: done = False if self.pf['stop_cgm_h_2'] is", "CGM rate coefficients RC_cgm = self.field.update_rate_coefficients(z, zone='cgm', return_rc=True, cgm_h_1=data_cgm['h_1']) # Pass rate coefficients", "MultiPhaseMedium.py Author: <NAME> Affiliation: University of Colorado at Boulder Created on: Mon Feb", "= np.argmin(np.abs(z_inits - self.pf['initial_redshift'])) if z_inits[i_trunc] <= self.pf['initial_redshift']: i_trunc += 1 self.all_t =", "regions\" grid patch, dubbed \"igm\" and \"cgm\", respectively. To perform a single-zone calculation,", "zone == 'igm': self.kw_igm = kw.copy() parcel_igm = GasParcel(**self.kw_igm) self.gen_igm = parcel_igm.step() #", "many elements as there # are distinct populations tau = [] for i", "Save rate coefficients [optional] if self.pf['save_rate_coefficients']: if self.pf['include_igm']: self.rates_igm = \\ _sort_history(self.all_RCs_igm, prefix='igm_',", "fraction xe = inits_all['xe'][i] if 2 not in self.parcel_igm.grid.Z: xe = min(xe, 1.0)", "parcel t2, dt2, data_cgm = self.gen_cgm.next() else: dt2 = 1e50 RC_cgm = data_cgm", "return self.field.pops @property def grid(self): return self.field.grid @property def parcels(self): if not hasattr(self,", "@property def parcels(self): if not hasattr(self, '_parcels'): self._initialize_zones() return self._parcels @property def parcel_igm(self):", "This mean: (1) Re-compute the IGM optical depth. (2) \"\"\" return False #", "update optical depth as simulation runs. \"\"\" # Recall that self.field.tau is a", "i, red in enumerate(self.all_z): snapshot = {} for key in self.parcel_igm.grid.data.keys(): if key", "data_cgm_pre = data_cgm.copy() # If we're computing the IGM optical depth dynamically, we", "= _mpm_defs[key] if zone == 'igm': self.kw_igm = kw.copy() parcel_igm = GasParcel(**self.kw_igm) self.gen_igm", "single snapshot. \"\"\" t = 0.0 z = self.pf['initial_redshift'] dt = self.pf['time_units'] *", "to re-do the previous timestep. This mean: (1) Re-compute the IGM optical depth.", "= data_igm.copy() if self.pf['include_cgm']: data_cgm_pre = data_cgm.copy() # If we're computing the IGM", "= inits_all['xe'][i] if 2 not in self.parcel_igm.grid.Z: xe = min(xe, 1.0) xi =", "return self._default_parcel @property def dynamic_tau(self): return self.pf['tau_dynamic'] def update_optical_depth(self): \"\"\" Dynamically update optical", "pass def run(self): \"\"\" Run simulation from start to finish. Returns ------- Nothing:", "self._default_parcel @property def dynamic_tau(self): return self.pf['tau_dynamic'] def update_optical_depth(self): \"\"\" Dynamically update optical depth", "in enumerate(self.all_z): snapshot = {} for key in self.parcel_igm.grid.data.keys(): if key in self.inits.keys():", "+= dt_pre / dtdz self.update_optical_depth() def _insert_inits(self): \"\"\" Prepend provided initial conditions to", "time for t, z, data_igm, data_cgm, RC_igm, RC_cgm in self.step(): pb.update(t) # Save", "\"bulk IGM\" grid patch and an \"HII regions\" grid patch, dubbed \"igm\" and", "[1. - xi, xi]} # #self.kwargs.update(new_pars) return self._inits @property def field(self): if not", "if not redo: # Changing attribute! A little scary, but we must make", "data_igm = None data_igm = {'h_1': 1.0} if self.pf['include_cgm']: done = False if", "1e-10]} self.kwargs.update(new) #else: # new_pars = {'cosmological_ics': False, # 'igm_initial_temperature': Ti, # 'igm_initial_ionization':", "data storage lists. \"\"\" if not self.pf['load_ics']: self.all_t, self.all_z, self.all_data_igm, self.all_data_cgm = \\", "if self.pf['include_cgm']: tmp = self.parcel_cgm.grid.data self.all_data_cgm = [tmp.copy() for i in range(len(self.all_z))] for", "Check IGM ionization state between last two steps. # Converged to desired tolerance?", "= kw.copy() parcel_cgm = GasParcel(**self.kw_cgm) parcel_cgm.grid.set_recombination_rate(True) parcel_cgm._set_chemistry() self.gen_cgm = parcel_cgm.step() parcel_cgm.chem.chemnet.monotonic_EoR = \\", "'_field'): if self.pf['include_igm']: self._field = MetaGalacticBackground(grid=self.parcel_igm.grid, **self.kwargs) else: self._field = MetaGalacticBackground(grid=self.parcel_cgm.grid, **self.kwargs) return", "we need to re-do the previous timestep. This mean: (1) Re-compute the IGM", "{'k_ion': np.zeros((grid.dims, grid.N_absorbers)), 'k_heat': np.zeros((grid.dims, grid.N_absorbers)), 'k_ion2': np.zeros((grid.dims, grid.N_absorbers, grid.N_absorbers)), } return _rates_no_RT", "1. - xi snapshot['h_2'] = xi # Add helium, assuming xHeII = xHII,", "a two-phase intergalactic medium. Returns ------- Tuple containing the current time, redshift, and", "self.parcel_cgm return self._default_parcel @property def dynamic_tau(self): return self.pf['tau_dynamic'] def update_optical_depth(self): \"\"\" Dynamically update", "#if self.pf['include_He']: new = {'igm_initial_temperature': Ti, 'initial_ionization': [1. - xi, xi, 1.-xi-1e-10, xi,", "'_zones'): self._zones = int(self.pf['include_igm']) \\ + int(self.pf['include_cgm']) return self._zones @property def default_parcel(self): if", "- xi snapshot['he_2'] = xi snapshot['he_3'] = 1e-10 snapshot['rho'] = self.parcel_igm.grid.cosm.MeanBaryonDensity(red) snapshot['n'] =", "= np.interp(zi, inits['z'], inits['Tk']) xi = np.interp(zi, inits['z'], inits['xe']) #if self.pf['include_He']: new =", "self.parcel_cgm.grid.data.copy() # Evolve in time! while z > zf: # Increment time /", "None: if data_igm['h_2'] > self.pf['stop_igm_h_2']: data_igm = data_igm_pre.copy() dt1 = 1e50 done =", "xi, 1e-10]} self.kwargs.update(new) #else: # new_pars = {'cosmological_ics': False, # 'igm_initial_temperature': Ti, #", "in time! while z > zf: # Increment time / redshift dtdz =", "grid patch, dubbed \"igm\" and \"cgm\", respectively. To perform a single-zone calculation, simply", "self.inits self._pf = ParameterFile(**self.kwargs) return self._pf @property def inits(self): if not hasattr(self, '_inits'):", "self.field.update_rate_coefficients(z, zone='cgm', return_rc=True, cgm_h_1=data_cgm['h_1']) # Pass rate coefficients off to the CGM parcel", "self.field.tau is a list with as many elements as there # are distinct", "dt2 = 1e50 RC_cgm = data_cgm = None # Must update timesteps in", "= [tmp.copy() for i in range(len(self.all_z))] for i, cgm_data in enumerate(self.all_data_cgm): self.all_data_cgm[i]['rho'] =", "self.update_optical_depth() def _insert_inits(self): \"\"\" Prepend provided initial conditions to the data storage lists.", "as there # are distinct populations tau = [] for i in range(self.field.Npops):", "current time, redshift, and dictionaries for the IGM and CGM data at a", "time, redshift, and dictionaries for the IGM and CGM data at a single", "..util import ParameterFile, ProgressBar from ..util.ReadData import _sort_history, _load_inits from .MetaGalacticBackground import MetaGalacticBackground", "z, data_igm, data_cgm, RC_igm, RC_cgm continue # If we've made it here, we", "= 1e50 RC_cgm = data_cgm = None # Must update timesteps in unison", "* self.pf['initial_timestep'] zf = self.pf['final_redshift'] # Read initial conditions if self.pf['include_igm']: data_igm =", "Loop over defaults, pull out the ones for this zone for key in", "order!') Ti = np.interp(zi, inits['z'], inits['Tk']) xi = np.interp(zi, inits['z'], inits['xe']) #if self.pf['include_He']:", "it here, we need to trick our generators a bit # \"undo\" this", "Now, update CGM parcel t2, dt2, data_cgm = self.gen_cgm.next() else: dt2 = 1e50", "..util.ReadData import _sort_history, _load_inits from .MetaGalacticBackground import MetaGalacticBackground from ..util.SetDefaultParameterValues import MultiPhaseParameters _mpm_defs", "dt1, data_igm = self.gen_igm.next() # Pass rate coefficients off to the IGM parcel", "# Check IGM ionization state between last two steps. # Converged to desired", "self.parcels[0] return self._parcel_cgm def rates_no_RT(self, grid): _rates_no_RT = \\ {'k_ion': np.zeros((grid.dims, grid.N_absorbers)), 'k_heat':", "depth dynamically, we may # need to \"re-do\" this step to ensure convergence.", "self.tf / self.pf['time_units'] @property def zones(self): if not hasattr(self, '_zones'): self._zones = int(self.pf['include_igm'])", "for i in range(self.field.Npops): pass self.field.tau = tau def subcycle(self): \"\"\" See if", "_stop_criteria_met(self): pass def run(self): \"\"\" Run simulation from start to finish. Returns -------", "cgm_data in enumerate(self.all_data_cgm): self.all_data_cgm[i]['rho'] = \\ self.parcel_cgm.grid.cosm.MeanBaryonDensity(self.all_z[i]) self.all_data_cgm[i]['n'] = \\ self.parcel_cgm.grid.particle_density(cgm_data, self.all_z[i]) if", "if self.pf['include_igm']: data_igm_pre = data_igm.copy() if self.pf['include_cgm']: data_cgm_pre = data_cgm.copy() # If we're", "self.kwargs.update(new) #else: # new_pars = {'cosmological_ics': False, # 'igm_initial_temperature': Ti, # 'igm_initial_ionization': [1.", "= self.history_igm.copy() else: self.history = {} if self.pf['include_cgm']: self.history_cgm = \\ _sort_history(self.all_data_cgm, prefix='cgm_',", "min(dt, self.pf['max_timestep'] * self.pf['time_units']) # Might need these... if self.pf['include_igm']: data_igm_pre = data_igm.copy()", "ParameterFile, ProgressBar from ..util.ReadData import _sort_history, _load_inits from .MetaGalacticBackground import MetaGalacticBackground from ..util.SetDefaultParameterValues", "not self.pf['include_igm']: return # Loop over redshift and derive things for the IGM", "!= '%s_' % zone: continue # Have to rename variables so Grid class", "Pass rate coefficients off to the IGM parcel self.parcel_igm.update_rate_coefficients(data_igm, **RC_igm) else: dt1 =", "# Add helium, assuming xHeII = xHII, and xHeIII << 1 if self.parcel_igm.pf['include_He']:", "time if self.pf['include_igm']: self.history_igm = \\ _sort_history(self.all_data_igm, prefix='igm_', squeeze=True) self.history = self.history_igm.copy() else:", "del self.all_RCs_cgm, self.all_data_cgm return # Flip to descending order (in redshift) z_inits =", "two-phase intergalactic medium. Returns ------- Tuple containing the current time, redshift, and dictionaries", "'k_ion2': np.zeros((grid.dims, grid.N_absorbers, grid.N_absorbers)), } return _rates_no_RT @property def tf(self): if not hasattr(self,", "\\ _sort_history(self.all_data_igm, prefix='igm_', squeeze=True) self.history = self.history_igm.copy() else: self.history = {} if self.pf['include_cgm']:", "/ (1. + self.parcel_igm.grid.cosm.y) snapshot['h_1'] = 1. - xi snapshot['h_2'] = xi #", "# CGM rate coefficients RC_cgm = self.field.update_rate_coefficients(z, zone='cgm', return_rc=True, cgm_h_1=data_cgm['h_1']) # Pass rate", "% zone, '') if key in self.kwargs: kw[grid_key] = self.kwargs[key] else: kw[grid_key] =", "The (potential) generators need this self.field.update_redshift(z) # IGM rate coefficients if self.pf['include_igm']: done", "by time if self.pf['include_igm']: self.history_igm = \\ _sort_history(self.all_data_igm, prefix='igm_', squeeze=True) self.history = self.history_igm.copy()", "snapshot['he_1'] = 1. - xi snapshot['he_2'] = xi snapshot['he_3'] = 1e-10 snapshot['rho'] =", "_mpm_defs = MultiPhaseParameters() class MultiPhaseMedium(object): def __init__(self, **kwargs): \"\"\" Initialize a MultiPhaseMedium object.", "self._tf = self.default_parcel.grid.cosm.LookbackTime(zf, z) self.pf['stop_time'] = self._tf / self.pf['time_units'] return self._tf def _initialize_zones(self):", "[], [] if not self.pf['include_cgm']: del self.all_RCs_cgm, self.all_data_cgm return # Flip to descending", "continue # Have to rename variables so Grid class will know them grid_key", "coefficients off to the CGM parcel self.parcel_cgm.update_rate_coefficients(data_cgm, **RC_cgm) # Now, update CGM parcel", "Returns ------- Nothing: sets `history` attribute. \"\"\" self._insert_inits() pb = ProgressBar(self.tf, use=self.pf['progress_bar']) pb.start()", "self.parcel_igm.pf['include_He']: snapshot['he_1'] = 1. - xi snapshot['he_2'] = xi snapshot['he_3'] = 1e-10 snapshot['rho']", "@property def parcel_igm(self): if not hasattr(self, '_parcel_igm'): self._parcel_igm = self.parcels[0] return self._parcel_igm @property", "Evolve in time for t, z, data_igm, data_cgm, RC_igm, RC_cgm in self.step(): pb.update(t)", "= self.inits['z'][-1::-1] Tk_inits = self.inits['Tk'][-1::-1] xe_inits = self.inits['xe'][-1::-1] inits_all = {'z': z_inits, 'Tk':", "+= 1 self.all_t = [] self.all_data_igm = [] self.all_z = list(z_inits[0:i_trunc]) self.all_RCs_igm =", "= min(dt1, dt2) dt = min(dt, self.pf['max_timestep'] * self.pf['time_units']) # Might need these...", "default, this is a two-zone model, consisting of a \"bulk IGM\" grid patch", "= 1. - xi snapshot['he_2'] = xi snapshot['he_3'] = 1e-10 snapshot['rho'] = self.parcel_igm.grid.cosm.MeanBaryonDensity(red)", "Hydrogen neutral fraction xe = inits_all['xe'][i] if 2 not in self.parcel_igm.grid.Z: xe =", "a list with as many elements as there # are distinct populations tau", "range(len(self.all_z))] for i, cgm_data in enumerate(self.all_data_cgm): self.all_data_cgm[i]['rho'] = \\ self.parcel_cgm.grid.cosm.MeanBaryonDensity(self.all_z[i]) self.all_data_cgm[i]['n'] = \\", "'initial_ionization': [1. - xi, xi, 1.-xi-1e-10, xi, 1e-10]} self.kwargs.update(new) #else: # new_pars =", "def grid(self): return self.field.grid @property def parcels(self): if not hasattr(self, '_parcels'): self._initialize_zones() return", "data_igm_pre = data_igm.copy() if self.pf['include_cgm']: data_cgm_pre = data_cgm.copy() # If we're computing the", "np.zeros((grid.dims, grid.N_absorbers)), 'k_ion2': np.zeros((grid.dims, grid.N_absorbers, grid.N_absorbers)), } return _rates_no_RT @property def tf(self): if", "# Converged to desired tolerance? #self. def _stop_criteria_met(self): pass def run(self): \"\"\" Run", "= dt * 1. dt = min(dt1, dt2) dt = min(dt, self.pf['max_timestep'] *", "are evolved in unison if self.pf['include_igm']: self.parcel_igm.dt = dt if self.pf['include_cgm']: self.parcel_cgm.dt =", "self.pf['include_cgm']: self.parcel_cgm.dt = dt yield t, z, data_igm, data_cgm, RC_igm, RC_cgm continue #", "self.all_z = list(z_inits[0:i_trunc]) self.all_RCs_igm = [self.rates_no_RT(self.parcel_igm.grid)] * len(self.all_z) self.all_RCs_cgm = [self.rates_no_RT(self.parcel_igm.grid)] * len(self.all_z)", "our generators a bit # \"undo\" this time-step t -= dt_pre z +=", "data_igm, data_cgm, RC_igm, RC_cgm continue # If we've made it here, we need", "= False if self.pf['stop_igm_h_2'] is not None: if data_igm['h_2'] > self.pf['stop_igm_h_2']: data_igm =", "sets `history` attribute. \"\"\" self._insert_inits() pb = ProgressBar(self.tf, use=self.pf['progress_bar']) pb.start() # Evolve in", "in enumerate(self.all_data_cgm): self.all_data_cgm[i]['rho'] = \\ self.parcel_cgm.grid.cosm.MeanBaryonDensity(self.all_z[i]) self.all_data_cgm[i]['n'] = \\ self.parcel_cgm.grid.particle_density(cgm_data, self.all_z[i]) if not", "# Must update timesteps in unison dt_pre = dt * 1. dt =", "xe = inits_all['xe'][i] if 2 not in self.parcel_igm.grid.Z: xe = min(xe, 1.0) xi", "self.pf['initial_redshift'] if not np.all(np.diff(inits['z']) > 0): raise ValueError('Redshifts in ICs must be in", "= self.pf['final_redshift'] self._parcels = [] for zone in ['igm', 'cgm']: if not self.pf['include_%s'", "def parcel_cgm(self): if not hasattr(self, '_parcel_cgm'): if self.pf['include_igm']: self._parcel_cgm = self.parcels[1] else: self._parcel_cgm", "done = False if self.pf['stop_cgm_h_2'] is not None: if data_cgm['h_2'] > self.pf['stop_cgm_h_2']: data_cgm", "Set initial values for rate coefficients parcel_igm.update_rate_coefficients(parcel_igm.grid.data, **self.rates_no_RT(parcel_igm.grid)) self._parcels.append(parcel_igm) else: self.kw_cgm = kw.copy()", "not hasattr(self, '_parcel_cgm'): if self.pf['include_igm']: self._parcel_cgm = self.parcels[1] else: self._parcel_cgm = self.parcels[0] return", "RC_cgm continue # If we've made it here, we need to trick our", "} return _rates_no_RT @property def tf(self): if not hasattr(self, '_tf'): z = self.pf['initial_redshift']", "while z > zf: # Increment time / redshift dtdz = self.default_parcel.grid.cosm.dtdz(z) t", "= min(dt, self.pf['max_timestep'] * self.pf['time_units']) # Might need these... if self.pf['include_igm']: data_igm_pre =", "the IGM and CGM data at a single snapshot. \"\"\" t = 0.0", "time! while z > zf: # Increment time / redshift dtdz = self.default_parcel.grid.cosm.dtdz(z)", "done: RC_igm = self.field.update_rate_coefficients(z, zone='igm', return_rc=True, igm_h_1=data_igm['h_1']) # Now, update IGM parcel t1,", "ICs must be in ascending order!') Ti = np.interp(zi, inits['z'], inits['Tk']) xi =", "parcel_cgm._set_chemistry() self.gen_cgm = parcel_cgm.step() parcel_cgm.chem.chemnet.monotonic_EoR = \\ self.pf['monotonic_EoR'] parcel_cgm.update_rate_coefficients(parcel_cgm.grid.data, **self.rates_no_RT(parcel_cgm.grid)) self._parcels.append(parcel_cgm) self._parcels[-1].pf['stop_time'] =", "patch and an \"HII regions\" grid patch, dubbed \"igm\" and \"cgm\", respectively. To", "hasattr(self, '_default_parcel'): self._default_parcel = self.parcel_igm if self.pf['include_igm'] \\ else self.parcel_cgm return self._default_parcel @property", "self.history_cgm = \\ _sort_history(self.all_data_cgm, prefix='cgm_', squeeze=True) self.history.update(self.history_cgm) # Save rate coefficients [optional] if", "``include_cgm=False`` or ``include_igm=False``. \"\"\" if 'load_ics' not in kwargs: kwargs['load_ics'] = True self.kwargs", "self.pf['initial_redshift'] zf = self.pf['final_redshift'] self._parcels = [] for zone in ['igm', 'cgm']: if", "IGM optical depth dynamically, we may # need to \"re-do\" this step to", "self.pf['time_units'] @property def zones(self): if not hasattr(self, '_zones'): self._zones = int(self.pf['include_igm']) \\ +", "but we must make sure # these parcels are evolved in unison if", "= [self.rates_no_RT(self.parcel_igm.grid)] * len(self.all_z) self.all_RCs_cgm = [self.rates_no_RT(self.parcel_igm.grid)] * len(self.all_z) # Don't mess with", "Might need these... if self.pf['include_igm']: data_igm_pre = data_igm.copy() if self.pf['include_cgm']: data_cgm_pre = data_cgm.copy()", "data_cgm_pre.copy() dt2 = 1e50 done = True if not done: # CGM rate", "= [], [] if not self.pf['include_cgm']: del self.all_RCs_cgm, self.all_data_cgm return # Flip to", "zone='cgm', return_rc=True, cgm_h_1=data_cgm['h_1']) # Pass rate coefficients off to the CGM parcel self.parcel_cgm.update_rate_coefficients(data_cgm,", "generators need this self.field.update_redshift(z) # IGM rate coefficients if self.pf['include_igm']: done = False", "[] if not self.pf['include_cgm']: del self.all_RCs_cgm, self.all_data_cgm return # Flip to descending order", "> self.pf['stop_igm_h_2']: data_igm = data_igm_pre.copy() dt1 = 1e50 done = True if not", "redo = self.subcycle() if not redo: # Changing attribute! A little scary, but", "Increment time / redshift dtdz = self.default_parcel.grid.cosm.dtdz(z) t += dt z -= dt", "def parcels(self): if not hasattr(self, '_parcels'): self._initialize_zones() return self._parcels @property def parcel_igm(self): if", "if self.pf['include_cgm']: data_cgm_pre = data_cgm.copy() # If we're computing the IGM optical depth", "final redshift. z = self.pf['initial_redshift'] zf = self.pf['final_redshift'] self._parcels = [] for zone", "True if not done: # CGM rate coefficients RC_cgm = self.field.update_rate_coefficients(z, zone='cgm', return_rc=True,", "return self._tf def _initialize_zones(self): \"\"\" Initialize (up to two) GasParcels. \"\"\" # Reset", "hasattr(self, '_zones'): self._zones = int(self.pf['include_igm']) \\ + int(self.pf['include_cgm']) return self._zones @property def default_parcel(self):", "dt = self.pf['time_units'] * self.pf['initial_timestep'] zf = self.pf['final_redshift'] # Read initial conditions if", "for i, cgm_data in enumerate(self.all_data_cgm): self.all_data_cgm[i]['rho'] = \\ self.parcel_cgm.grid.cosm.MeanBaryonDensity(self.all_z[i]) self.all_data_cgm[i]['n'] = \\ self.parcel_cgm.grid.particle_density(cgm_data,", "self.pf['include_igm']: self._field = MetaGalacticBackground(grid=self.parcel_igm.grid, **self.kwargs) else: self._field = MetaGalacticBackground(grid=self.parcel_cgm.grid, **self.kwargs) return self._field @property", "data_igm, data_cgm, RC_igm, RC_cgm in self.step(): pb.update(t) # Save data self.all_z.append(z) self.all_t.append(t) if", "in self.step(): pb.update(t) # Save data self.all_z.append(z) self.all_t.append(t) if self.pf['include_cgm']: self.all_data_cgm.append(data_cgm.copy()) if self.pf['include_igm']:", "a \"bulk IGM\" grid patch and an \"HII regions\" grid patch, dubbed \"igm\"", "= np.array(self.all_z) def step(self): \"\"\" Generator for a two-phase intergalactic medium. Returns -------", "np.all(np.diff(inits['z']) > 0): raise ValueError('Redshifts in ICs must be in ascending order!') Ti", "'') if key in self.kwargs: kw[grid_key] = self.kwargs[key] else: kw[grid_key] = _mpm_defs[key] if", "self.all_data_cgm return # Flip to descending order (in redshift) z_inits = self.inits['z'][-1::-1] Tk_inits", "self.parcels[0] return self._parcel_igm @property def parcel_cgm(self): if not hasattr(self, '_parcel_cgm'): if self.pf['include_igm']: self._parcel_cgm", "self.default_parcel.grid.cosm.LookbackTime(zf, z) self.pf['stop_time'] = self._tf / self.pf['time_units'] return self._tf def _initialize_zones(self): \"\"\" Initialize", "subcycle(self): \"\"\" See if we need to re-do the previous timestep. This mean:", "class will know them grid_key = key.replace('%s_' % zone, '') if key in", "@property def zones(self): if not hasattr(self, '_zones'): self._zones = int(self.pf['include_igm']) \\ + int(self.pf['include_cgm'])", "if not self.pf['include_%s' % zone]: continue kw = self.pf.copy() # Loop over defaults,", "not redo: # Changing attribute! A little scary, but we must make sure", "rename variables so Grid class will know them grid_key = key.replace('%s_' % zone,", "\\ else self.parcel_cgm return self._default_parcel @property def dynamic_tau(self): return self.pf['tau_dynamic'] def update_optical_depth(self): \"\"\"", "= None # Must update timesteps in unison dt_pre = dt * 1.", "Pass rate coefficients off to the CGM parcel self.parcel_cgm.update_rate_coefficients(data_cgm, **RC_cgm) # Now, update", "not in kwargs: kwargs['load_ics'] = True self.kwargs = kwargs @property def pf(self): if", "snapshot['h_2'] = xi # Add helium, assuming xHeII = xHII, and xHeIII <<", "self.kw_cgm = kw.copy() parcel_cgm = GasParcel(**self.kw_cgm) parcel_cgm.grid.set_recombination_rate(True) parcel_cgm._set_chemistry() self.gen_cgm = parcel_cgm.step() parcel_cgm.chem.chemnet.monotonic_EoR =", "IGM parcel self.parcel_igm.update_rate_coefficients(data_igm, **RC_igm) else: dt1 = 1e50 RC_igm = data_igm = None", "in ICs must be in ascending order!') Ti = np.interp(zi, inits['z'], inits['Tk']) xi", "* len(self.all_z) # Don't mess with the CGM (much) if self.pf['include_cgm']: tmp =", "if not hasattr(self, '_zones'): self._zones = int(self.pf['include_igm']) \\ + int(self.pf['include_cgm']) return self._zones @property", "not hasattr(self, '_tf'): z = self.pf['initial_redshift'] zf = self.pf['final_redshift'] self._tf = self.default_parcel.grid.cosm.LookbackTime(zf, z)", "kw.copy() parcel_igm = GasParcel(**self.kw_igm) self.gen_igm = parcel_igm.step() # Set initial values for rate", "update CGM parcel t2, dt2, data_cgm = self.gen_cgm.next() else: dt2 = 1e50 RC_cgm", "medium. Returns ------- Tuple containing the current time, redshift, and dictionaries for the", "inits = _load_inits() zi = self.pf['initial_redshift'] if not np.all(np.diff(inits['z']) > 0): raise ValueError('Redshifts", "self.all_data_igm = [] self.all_z = list(z_inits[0:i_trunc]) self.all_RCs_igm = [self.rates_no_RT(self.parcel_igm.grid)] * len(self.all_z) self.all_RCs_cgm =", "np.interp(zi, inits['z'], inits['Tk']) xi = np.interp(zi, inits['z'], inits['xe']) #if self.pf['include_He']: new = {'igm_initial_temperature':", "..util.SetDefaultParameterValues import MultiPhaseParameters _mpm_defs = MultiPhaseParameters() class MultiPhaseMedium(object): def __init__(self, **kwargs): \"\"\" Initialize", "**self.rates_no_RT(parcel_igm.grid)) self._parcels.append(parcel_igm) else: self.kw_cgm = kw.copy() parcel_cgm = GasParcel(**self.kw_cgm) parcel_cgm.grid.set_recombination_rate(True) parcel_cgm._set_chemistry() self.gen_cgm =", "CGM parcel self.parcel_cgm.update_rate_coefficients(data_cgm, **RC_cgm) # Now, update CGM parcel t2, dt2, data_cgm =", "attribute. \"\"\" self._insert_inits() pb = ProgressBar(self.tf, use=self.pf['progress_bar']) pb.start() # Evolve in time for", "'xe': xe_inits} # Stop pre-pending once we hit the first light redshift i_trunc", "MultiPhaseMedium object. By default, this is a two-zone model, consisting of a \"bulk", "# Set initial values for rate coefficients parcel_igm.update_rate_coefficients(parcel_igm.grid.data, **self.rates_no_RT(parcel_igm.grid)) self._parcels.append(parcel_igm) else: self.kw_cgm =", "= 1. - xi snapshot['h_2'] = xi # Add helium, assuming xHeII =", "> zf: # Increment time / redshift dtdz = self.default_parcel.grid.cosm.dtdz(z) t += dt", "IGM parcel t1, dt1, data_igm = self.gen_igm.next() # Pass rate coefficients off to", "know them grid_key = key.replace('%s_' % zone, '') if key in self.kwargs: kw[grid_key]", "self.pf['include_cgm']: done = False if self.pf['stop_cgm_h_2'] is not None: if data_cgm['h_2'] > self.pf['stop_cgm_h_2']:", "return self._field @property def pops(self): return self.field.pops @property def grid(self): return self.field.grid @property", "'load_ics' not in kwargs: kwargs['load_ics'] = True self.kwargs = kwargs @property def pf(self):", "dt1 = 1e50 RC_igm = data_igm = None data_igm = {'h_1': 1.0} if", "(1. + self.parcel_igm.grid.cosm.y) snapshot['h_1'] = 1. - xi snapshot['h_2'] = xi # Add", "rate coefficients off to the CGM parcel self.parcel_cgm.update_rate_coefficients(data_cgm, **RC_cgm) # Now, update CGM", "lists. \"\"\" if not self.pf['load_ics']: self.all_t, self.all_z, self.all_data_igm, self.all_data_cgm = \\ [], [],", "0.0 z = self.pf['initial_redshift'] dt = self.pf['time_units'] * self.pf['initial_timestep'] zf = self.pf['final_redshift'] #", "snapshot['h_1'] = 1. - xi snapshot['h_2'] = xi # Add helium, assuming xHeII", "= ProgressBar(self.tf, use=self.pf['progress_bar']) pb.start() # Evolve in time for t, z, data_igm, data_cgm,", "\\ _sort_history(self.all_RCs_cgm, prefix='cgm_', squeeze=True) self.history.update(self.rates_cgm) self.history['t'] = np.array(self.all_t) self.history['z'] = np.array(self.all_z) def step(self):", "Colorado at Boulder Created on: Mon Feb 16 12:46:28 MST 2015 Description: \"\"\"", "defaults, pull out the ones for this zone for key in _mpm_defs: if", "if not hasattr(self, '_tf'): z = self.pf['initial_redshift'] zf = self.pf['final_redshift'] self._tf = self.default_parcel.grid.cosm.LookbackTime(zf,", "i_trunc = np.argmin(np.abs(z_inits - self.pf['initial_redshift'])) if z_inits[i_trunc] <= self.pf['initial_redshift']: i_trunc += 1 self.all_t", "data self.all_z.append(z) self.all_t.append(t) if self.pf['include_cgm']: self.all_data_cgm.append(data_cgm.copy()) if self.pf['include_igm']: self.all_data_igm.append(data_igm.copy()) if self.pf['save_rate_coefficients']: if self.pf['include_cgm']:", "simulation runs. \"\"\" # Recall that self.field.tau is a list with as many", "else: self._parcel_cgm = self.parcels[0] return self._parcel_cgm def rates_no_RT(self, grid): _rates_no_RT = \\ {'k_ion':", "optical depth. (2) \"\"\" return False # Check IGM ionization state between last", "= kw.copy() parcel_igm = GasParcel(**self.kw_igm) self.gen_igm = parcel_igm.step() # Set initial values for", "parcel_cgm(self): if not hasattr(self, '_parcel_cgm'): if self.pf['include_igm']: self._parcel_cgm = self.parcels[1] else: self._parcel_cgm =", "is not None: if data_cgm['h_2'] > self.pf['stop_cgm_h_2']: data_cgm = data_cgm_pre.copy() dt2 = 1e50", "data_igm.copy() if self.pf['include_cgm']: data_cgm_pre = data_cgm.copy() # If we're computing the IGM optical", "dt2) dt = min(dt, self.pf['max_timestep'] * self.pf['time_units']) # Might need these... if self.pf['include_igm']:", "['igm', 'cgm']: if not self.pf['include_%s' % zone]: continue kw = self.pf.copy() # Loop", "self.pf['include_igm']: data_igm = self.parcel_igm.grid.data.copy() if self.pf['include_cgm']: data_cgm = self.parcel_cgm.grid.data.copy() # Evolve in time!", "= self.pf['initial_redshift'] zf = self.pf['final_redshift'] self._tf = self.default_parcel.grid.cosm.LookbackTime(zf, z) self.pf['stop_time'] = self._tf /", "inits['z'], inits['xe']) #if self.pf['include_He']: new = {'igm_initial_temperature': Ti, 'initial_ionization': [1. - xi, xi,", "if not self.pf['include_cgm']: del self.all_RCs_cgm, self.all_data_cgm return # Flip to descending order (in", "self.pf['load_ics']: self.all_t, self.all_z, self.all_data_igm, self.all_data_cgm = \\ [], [], [], [] if self.pf['save_rate_coefficients']:", "self._inits @property def field(self): if not hasattr(self, '_field'): if self.pf['include_igm']: self._field = MetaGalacticBackground(grid=self.parcel_igm.grid,", "snapshot = {} for key in self.parcel_igm.grid.data.keys(): if key in self.inits.keys(): snapshot[key] =", "return self._inits @property def field(self): if not hasattr(self, '_field'): if self.pf['include_igm']: self._field =", "Ti = np.interp(zi, inits['z'], inits['Tk']) xi = np.interp(zi, inits['z'], inits['xe']) #if self.pf['include_He']: new", "= MetaGalacticBackground(grid=self.parcel_cgm.grid, **self.kwargs) return self._field @property def pops(self): return self.field.pops @property def grid(self):", "self.pf['include_igm']: self.rates_igm = \\ _sort_history(self.all_RCs_igm, prefix='igm_', squeeze=True) self.history.update(self.rates_igm) if self.pf['include_cgm']: self.rates_cgm = \\", "= self.inits self._pf = ParameterFile(**self.kwargs) return self._pf @property def inits(self): if not hasattr(self,", "self._parcels[-1].pf['stop_time'] = self.tf / self.pf['time_units'] @property def zones(self): if not hasattr(self, '_zones'): self._zones", "self._tf / self.pf['time_units'] return self._tf def _initialize_zones(self): \"\"\" Initialize (up to two) GasParcels.", "raise ValueError('Redshifts in ICs must be in ascending order!') Ti = np.interp(zi, inits['z'],", "xi, xi]} # #self.kwargs.update(new_pars) return self._inits @property def field(self): if not hasattr(self, '_field'):", "xi = xe / (1. + self.parcel_igm.grid.cosm.y) snapshot['h_1'] = 1. - xi snapshot['h_2']", "Affiliation: University of Colorado at Boulder Created on: Mon Feb 16 12:46:28 MST", "/ redshift dtdz = self.default_parcel.grid.cosm.dtdz(z) t += dt z -= dt / dtdz", "snapshot['e'] = inits_all['xe'][i] # Hydrogen neutral fraction xe = inits_all['xe'][i] if 2 not", "hasattr(self, '_inits'): self._inits = inits = _load_inits() zi = self.pf['initial_redshift'] if not np.all(np.diff(inits['z'])", "if self.pf['include_igm']: self._parcel_cgm = self.parcels[1] else: self._parcel_cgm = self.parcels[0] return self._parcel_cgm def rates_no_RT(self,", "False if self.pf['stop_igm_h_2'] is not None: if data_igm['h_2'] > self.pf['stop_igm_h_2']: data_igm = data_igm_pre.copy()", "self._pf @property def inits(self): if not hasattr(self, '_inits'): self._inits = inits = _load_inits()", "= True if not done: # CGM rate coefficients RC_cgm = self.field.update_rate_coefficients(z, zone='cgm',", "in self.inits.keys(): snapshot[key] = inits_all[key][i] continue # Electron fraction snapshot['e'] = inits_all['xe'][i] #", "np.zeros((grid.dims, grid.N_absorbers)), 'k_heat': np.zeros((grid.dims, grid.N_absorbers)), 'k_ion2': np.zeros((grid.dims, grid.N_absorbers, grid.N_absorbers)), } return _rates_no_RT @property", "self.pf['stop_cgm_h_2'] is not None: if data_cgm['h_2'] > self.pf['stop_cgm_h_2']: data_cgm = data_cgm_pre.copy() dt2 =", "xHeIII << 1 if self.parcel_igm.pf['include_He']: snapshot['he_1'] = 1. - xi snapshot['he_2'] = xi", "**self.kwargs) else: self._field = MetaGalacticBackground(grid=self.parcel_cgm.grid, **self.kwargs) return self._field @property def pops(self): return self.field.pops", "in self.parcel_igm.grid.data.keys(): if key in self.inits.keys(): snapshot[key] = inits_all[key][i] continue # Electron fraction", "return False # Check IGM ionization state between last two steps. # Converged", "self.all_RCs_igm = [self.rates_no_RT(self.parcel_igm.grid)] * len(self.all_z) self.all_RCs_cgm = [self.rates_no_RT(self.parcel_igm.grid)] * len(self.all_z) # Don't mess", "min(dt1, dt2) dt = min(dt, self.pf['max_timestep'] * self.pf['time_units']) # Might need these... if", "# Loop over redshift and derive things for the IGM for i, red", "self.pf['include_cgm']: self.all_RCs_cgm.append(RC_cgm.copy()) if self.pf['include_igm']: self.all_RCs_igm.append(RC_igm.copy()) pb.finish() # Sort everything by time if self.pf['include_igm']:", "the data storage lists. \"\"\" if not self.pf['load_ics']: self.all_t, self.all_z, self.all_data_igm, self.all_data_cgm =", "conditions to the data storage lists. \"\"\" if not self.pf['load_ics']: self.all_t, self.all_z, self.all_data_igm,", "dtdz = self.default_parcel.grid.cosm.dtdz(z) t += dt z -= dt / dtdz # The", "self._parcel_cgm = self.parcels[0] return self._parcel_cgm def rates_no_RT(self, grid): _rates_no_RT = \\ {'k_ion': np.zeros((grid.dims,", "self.pf['monotonic_EoR'] parcel_cgm.update_rate_coefficients(parcel_cgm.grid.data, **self.rates_no_RT(parcel_cgm.grid)) self._parcels.append(parcel_cgm) self._parcels[-1].pf['stop_time'] = self.tf / self.pf['time_units'] @property def zones(self): if", "for rate coefficients parcel_igm.update_rate_coefficients(parcel_igm.grid.data, **self.rates_no_RT(parcel_igm.grid)) self._parcels.append(parcel_igm) else: self.kw_cgm = kw.copy() parcel_cgm = GasParcel(**self.kw_cgm)", "squeeze=True) self.history = self.history_igm.copy() else: self.history = {} if self.pf['include_cgm']: self.history_cgm = \\", "tau = [] for i in range(self.field.Npops): pass self.field.tau = tau def subcycle(self):", "self.all_RCs_cgm, self.all_data_cgm return # Flip to descending order (in redshift) z_inits = self.inits['z'][-1::-1]", "redshift i_trunc = np.argmin(np.abs(z_inits - self.pf['initial_redshift'])) if z_inits[i_trunc] <= self.pf['initial_redshift']: i_trunc += 1", "Converged to desired tolerance? #self. def _stop_criteria_met(self): pass def run(self): \"\"\" Run simulation", "generators a bit # \"undo\" this time-step t -= dt_pre z += dt_pre", "if not hasattr(self, '_inits'): self._inits = inits = _load_inits() zi = self.pf['initial_redshift'] if", "# Changing attribute! A little scary, but we must make sure # these", "IGM ionization state between last two steps. # Converged to desired tolerance? #self.", "sure # these parcels are evolved in unison if self.pf['include_igm']: self.parcel_igm.dt = dt", "[] self.all_z = list(z_inits[0:i_trunc]) self.all_RCs_igm = [self.rates_no_RT(self.parcel_igm.grid)] * len(self.all_z) self.all_RCs_cgm = [self.rates_no_RT(self.parcel_igm.grid)] *", "if not hasattr(self, '_default_parcel'): self._default_parcel = self.parcel_igm if self.pf['include_igm'] \\ else self.parcel_cgm return", "snapshot[key] = inits_all[key][i] continue # Electron fraction snapshot['e'] = inits_all['xe'][i] # Hydrogen neutral", "@property def pf(self): if not hasattr(self, '_pf'): inits = self.inits self._pf = ParameterFile(**self.kwargs)", "are distinct populations tau = [] for i in range(self.field.Npops): pass self.field.tau =", "\"\"\" if not self.pf['load_ics']: self.all_t, self.all_z, self.all_data_igm, self.all_data_cgm = \\ [], [], [],", "if not self.pf['include_igm']: return # Loop over redshift and derive things for the", "MST 2015 Description: \"\"\" import numpy as np from .GasParcel import GasParcel from", "in range(self.field.Npops): pass self.field.tau = tau def subcycle(self): \"\"\" See if we need", "_sort_history(self.all_RCs_cgm, prefix='cgm_', squeeze=True) self.history.update(self.rates_cgm) self.history['t'] = np.array(self.all_t) self.history['z'] = np.array(self.all_z) def step(self): \"\"\"", "yield t, z, data_igm, data_cgm, RC_igm, RC_cgm continue # If we've made it", "with as many elements as there # are distinct populations tau = []", "def tf(self): if not hasattr(self, '_tf'): z = self.pf['initial_redshift'] zf = self.pf['final_redshift'] self._tf", "if key[0:4] != '%s_' % zone: continue # Have to rename variables so", "self.step(): pb.update(t) # Save data self.all_z.append(z) self.all_t.append(t) if self.pf['include_cgm']: self.all_data_cgm.append(data_cgm.copy()) if self.pf['include_igm']: self.all_data_igm.append(data_igm.copy())", "1e50 RC_cgm = data_cgm = None # Must update timesteps in unison dt_pre", "pb = ProgressBar(self.tf, use=self.pf['progress_bar']) pb.start() # Evolve in time for t, z, data_igm,", "= ParameterFile(**self.kwargs) return self._pf @property def inits(self): if not hasattr(self, '_inits'): self._inits =", "self._parcel_igm @property def parcel_cgm(self): if not hasattr(self, '_parcel_cgm'): if self.pf['include_igm']: self._parcel_cgm = self.parcels[1]", "run(self): \"\"\" Run simulation from start to finish. Returns ------- Nothing: sets `history`", "in unison if self.pf['include_igm']: self.parcel_igm.dt = dt if self.pf['include_cgm']: self.parcel_cgm.dt = dt yield", "the first light redshift i_trunc = np.argmin(np.abs(z_inits - self.pf['initial_redshift'])) if z_inits[i_trunc] <= self.pf['initial_redshift']:", "University of Colorado at Boulder Created on: Mon Feb 16 12:46:28 MST 2015", "to rename variables so Grid class will know them grid_key = key.replace('%s_' %", "so Grid class will know them grid_key = key.replace('%s_' % zone, '') if", "GasParcel(**self.kw_cgm) parcel_cgm.grid.set_recombination_rate(True) parcel_cgm._set_chemistry() self.gen_cgm = parcel_cgm.step() parcel_cgm.chem.chemnet.monotonic_EoR = \\ self.pf['monotonic_EoR'] parcel_cgm.update_rate_coefficients(parcel_cgm.grid.data, **self.rates_no_RT(parcel_cgm.grid)) self._parcels.append(parcel_cgm)", "return self.pf['tau_dynamic'] def update_optical_depth(self): \"\"\" Dynamically update optical depth as simulation runs. \"\"\"", "pass self.field.tau = tau def subcycle(self): \"\"\" See if we need to re-do", "simply set ``include_cgm=False`` or ``include_igm=False``. \"\"\" if 'load_ics' not in kwargs: kwargs['load_ics'] =", "need to re-do the previous timestep. This mean: (1) Re-compute the IGM optical", "[1. - xi, xi, 1.-xi-1e-10, xi, 1e-10]} self.kwargs.update(new) #else: # new_pars = {'cosmological_ics':", "= \\ _sort_history(self.all_RCs_cgm, prefix='cgm_', squeeze=True) self.history.update(self.rates_cgm) self.history['t'] = np.array(self.all_t) self.history['z'] = np.array(self.all_z) def", "not in self.parcel_igm.grid.Z: xe = min(xe, 1.0) xi = xe / (1. +", "zf = self.pf['final_redshift'] # Read initial conditions if self.pf['include_igm']: data_igm = self.parcel_igm.grid.data.copy() if", "inits_all[key][i] continue # Electron fraction snapshot['e'] = inits_all['xe'][i] # Hydrogen neutral fraction xe", "def rates_no_RT(self, grid): _rates_no_RT = \\ {'k_ion': np.zeros((grid.dims, grid.N_absorbers)), 'k_heat': np.zeros((grid.dims, grid.N_absorbers)), 'k_ion2':", "hasattr(self, '_parcels'): self._initialize_zones() return self._parcels @property def parcel_igm(self): if not hasattr(self, '_parcel_igm'): self._parcel_igm", "rate coefficients if self.pf['include_igm']: done = False if self.pf['stop_igm_h_2'] is not None: if", "need this self.field.update_redshift(z) # IGM rate coefficients if self.pf['include_igm']: done = False if", "kw[grid_key] = self.kwargs[key] else: kw[grid_key] = _mpm_defs[key] if zone == 'igm': self.kw_igm =", "prefix='cgm_', squeeze=True) self.history.update(self.rates_cgm) self.history['t'] = np.array(self.all_t) self.history['z'] = np.array(self.all_z) def step(self): \"\"\" Generator", "ValueError('Redshifts in ICs must be in ascending order!') Ti = np.interp(zi, inits['z'], inits['Tk'])", "self._default_parcel = self.parcel_igm if self.pf['include_igm'] \\ else self.parcel_cgm return self._default_parcel @property def dynamic_tau(self):", "and CGM data at a single snapshot. \"\"\" t = 0.0 z =", "= xHII, and xHeIII << 1 if self.parcel_igm.pf['include_He']: snapshot['he_1'] = 1. - xi", "Created on: Mon Feb 16 12:46:28 MST 2015 Description: \"\"\" import numpy as", "#self.kwargs.update(new_pars) return self._inits @property def field(self): if not hasattr(self, '_field'): if self.pf['include_igm']: self._field", "from ..util.SetDefaultParameterValues import MultiPhaseParameters _mpm_defs = MultiPhaseParameters() class MultiPhaseMedium(object): def __init__(self, **kwargs): \"\"\"", "self.pf['include_cgm']: self.rates_cgm = \\ _sort_history(self.all_RCs_cgm, prefix='cgm_', squeeze=True) self.history.update(self.rates_cgm) self.history['t'] = np.array(self.all_t) self.history['z'] =", "self.pf['time_units']) # Might need these... if self.pf['include_igm']: data_igm_pre = data_igm.copy() if self.pf['include_cgm']: data_cgm_pre", "Re-compute the IGM optical depth. (2) \"\"\" return False # Check IGM ionization", "pb.update(t) # Save data self.all_z.append(z) self.all_t.append(t) if self.pf['include_cgm']: self.all_data_cgm.append(data_cgm.copy()) if self.pf['include_igm']: self.all_data_igm.append(data_igm.copy()) if", "\\ self.parcel_cgm.grid.cosm.MeanBaryonDensity(self.all_z[i]) self.all_data_cgm[i]['n'] = \\ self.parcel_cgm.grid.particle_density(cgm_data, self.all_z[i]) if not self.pf['include_igm']: return # Loop", "= \\ _sort_history(self.all_data_cgm, prefix='cgm_', squeeze=True) self.history.update(self.history_cgm) # Save rate coefficients [optional] if self.pf['save_rate_coefficients']:", "# these parcels are evolved in unison if self.pf['include_igm']: self.parcel_igm.dt = dt if", "self._pf = ParameterFile(**self.kwargs) return self._pf @property def inits(self): if not hasattr(self, '_inits'): self._inits", "RC_cgm = data_cgm = None # Must update timesteps in unison dt_pre =", "RC_cgm = self.field.update_rate_coefficients(z, zone='cgm', return_rc=True, cgm_h_1=data_cgm['h_1']) # Pass rate coefficients off to the", "not self.pf['load_ics']: self.all_t, self.all_z, self.all_data_igm, self.all_data_cgm = \\ [], [], [], [] if", "coefficients off to the IGM parcel self.parcel_igm.update_rate_coefficients(data_igm, **RC_igm) else: dt1 = 1e50 RC_igm", ".GasParcel import GasParcel from ..util import ParameterFile, ProgressBar from ..util.ReadData import _sort_history, _load_inits", "populations tau = [] for i in range(self.field.Npops): pass self.field.tau = tau def", "# Stop pre-pending once we hit the first light redshift i_trunc = np.argmin(np.abs(z_inits", "tmp = self.parcel_cgm.grid.data self.all_data_cgm = [tmp.copy() for i in range(len(self.all_z))] for i, cgm_data", "from .MetaGalacticBackground import MetaGalacticBackground from ..util.SetDefaultParameterValues import MultiPhaseParameters _mpm_defs = MultiPhaseParameters() class MultiPhaseMedium(object):", "def dynamic_tau(self): return self.pf['tau_dynamic'] def update_optical_depth(self): \"\"\" Dynamically update optical depth as simulation", "return_rc=True, igm_h_1=data_igm['h_1']) # Now, update IGM parcel t1, dt1, data_igm = self.gen_igm.next() #", "self._parcel_cgm def rates_no_RT(self, grid): _rates_no_RT = \\ {'k_ion': np.zeros((grid.dims, grid.N_absorbers)), 'k_heat': np.zeros((grid.dims, grid.N_absorbers)),", "== 'igm': self.kw_igm = kw.copy() parcel_igm = GasParcel(**self.kw_igm) self.gen_igm = parcel_igm.step() # Set", "IGM rate coefficients if self.pf['include_igm']: done = False if self.pf['stop_igm_h_2'] is not None:", "if self.pf['save_rate_coefficients']: self.all_RCs_igm, self.all_RCs_cgm = [], [] if not self.pf['include_cgm']: del self.all_RCs_cgm, self.all_data_cgm", "CGM parcel t2, dt2, data_cgm = self.gen_cgm.next() else: dt2 = 1e50 RC_cgm =", "Boulder Created on: Mon Feb 16 12:46:28 MST 2015 Description: \"\"\" import numpy", "self.field.update_redshift(z) # IGM rate coefficients if self.pf['include_igm']: done = False if self.pf['stop_igm_h_2'] is", "= {'cosmological_ics': False, # 'igm_initial_temperature': Ti, # 'igm_initial_ionization': [1. - xi, xi]} #", "self._zones @property def default_parcel(self): if not hasattr(self, '_default_parcel'): self._default_parcel = self.parcel_igm if self.pf['include_igm']", "self.all_t, self.all_z, self.all_data_igm, self.all_data_cgm = \\ [], [], [], [] if self.pf['save_rate_coefficients']: self.all_RCs_igm,", "self.all_data_cgm.append(data_cgm.copy()) if self.pf['include_igm']: self.all_data_igm.append(data_igm.copy()) if self.pf['save_rate_coefficients']: if self.pf['include_cgm']: self.all_RCs_cgm.append(RC_cgm.copy()) if self.pf['include_igm']: self.all_RCs_igm.append(RC_igm.copy()) pb.finish()", "rate coefficients parcel_igm.update_rate_coefficients(parcel_igm.grid.data, **self.rates_no_RT(parcel_igm.grid)) self._parcels.append(parcel_igm) else: self.kw_cgm = kw.copy() parcel_cgm = GasParcel(**self.kw_cgm) parcel_cgm.grid.set_recombination_rate(True)", "redshift dtdz = self.default_parcel.grid.cosm.dtdz(z) t += dt z -= dt / dtdz #", "model, consisting of a \"bulk IGM\" grid patch and an \"HII regions\" grid", "A little scary, but we must make sure # these parcels are evolved", "self.all_z, self.all_data_igm, self.all_data_cgm = \\ [], [], [], [] if self.pf['save_rate_coefficients']: self.all_RCs_igm, self.all_RCs_cgm", "{} if self.pf['include_cgm']: self.history_cgm = \\ _sort_history(self.all_data_cgm, prefix='cgm_', squeeze=True) self.history.update(self.history_cgm) # Save rate", "if self.pf['include_cgm']: self.parcel_cgm.dt = dt yield t, z, data_igm, data_cgm, RC_igm, RC_cgm continue", "if self.pf['include_igm']: data_igm = self.parcel_igm.grid.data.copy() if self.pf['include_cgm']: data_cgm = self.parcel_cgm.grid.data.copy() # Evolve in", "np.array(self.all_t) self.history['z'] = np.array(self.all_z) def step(self): \"\"\" Generator for a two-phase intergalactic medium.", "len(self.all_z) self.all_RCs_cgm = [self.rates_no_RT(self.parcel_igm.grid)] * len(self.all_z) # Don't mess with the CGM (much)", "self.pf['final_redshift'] self._tf = self.default_parcel.grid.cosm.LookbackTime(zf, z) self.pf['stop_time'] = self._tf / self.pf['time_units'] return self._tf def", "self.pf.copy() # Loop over defaults, pull out the ones for this zone for", "that self.field.tau is a list with as many elements as there # are", "from ..util import ParameterFile, ProgressBar from ..util.ReadData import _sort_history, _load_inits from .MetaGalacticBackground import" ]
[ "= [ ('web', '0041_auto_20180808_1450'), ] operations = [ migrations.AlterField( model_name='resource', name='post_id', field=models.IntegerField(default=0), ),", "from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('web', '0041_auto_20180808_1450'), ]", "import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('web',", "migrations, models class Migration(migrations.Migration): dependencies = [ ('web', '0041_auto_20180808_1450'), ] operations = [", "Generated by Django 1.11.15 on 2018-08-08 15:00 from __future__ import unicode_literals from django.db", "-*- # Generated by Django 1.11.15 on 2018-08-08 15:00 from __future__ import unicode_literals", "[ ('web', '0041_auto_20180808_1450'), ] operations = [ migrations.AlterField( model_name='resource', name='post_id', field=models.IntegerField(default=0), ), ]", "# -*- coding: utf-8 -*- # Generated by Django 1.11.15 on 2018-08-08 15:00", "dependencies = [ ('web', '0041_auto_20180808_1450'), ] operations = [ migrations.AlterField( model_name='resource', name='post_id', field=models.IntegerField(default=0),", "Django 1.11.15 on 2018-08-08 15:00 from __future__ import unicode_literals from django.db import migrations,", "models class Migration(migrations.Migration): dependencies = [ ('web', '0041_auto_20180808_1450'), ] operations = [ migrations.AlterField(", "utf-8 -*- # Generated by Django 1.11.15 on 2018-08-08 15:00 from __future__ import", "class Migration(migrations.Migration): dependencies = [ ('web', '0041_auto_20180808_1450'), ] operations = [ migrations.AlterField( model_name='resource',", "by Django 1.11.15 on 2018-08-08 15:00 from __future__ import unicode_literals from django.db import", "import migrations, models class Migration(migrations.Migration): dependencies = [ ('web', '0041_auto_20180808_1450'), ] operations =", "unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('web', '0041_auto_20180808_1450'),", "2018-08-08 15:00 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration):", "<filename>web/migrations/0042_auto_20180808_1500.py # -*- coding: utf-8 -*- # Generated by Django 1.11.15 on 2018-08-08", "-*- coding: utf-8 -*- # Generated by Django 1.11.15 on 2018-08-08 15:00 from", "django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('web', '0041_auto_20180808_1450'), ] operations", "from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies =", "# Generated by Django 1.11.15 on 2018-08-08 15:00 from __future__ import unicode_literals from", "coding: utf-8 -*- # Generated by Django 1.11.15 on 2018-08-08 15:00 from __future__", "Migration(migrations.Migration): dependencies = [ ('web', '0041_auto_20180808_1450'), ] operations = [ migrations.AlterField( model_name='resource', name='post_id',", "__future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [", "15:00 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies", "1.11.15 on 2018-08-08 15:00 from __future__ import unicode_literals from django.db import migrations, models", "on 2018-08-08 15:00 from __future__ import unicode_literals from django.db import migrations, models class" ]
[ "\"\"\" # can use arbitrary dataframe since columns will be the same. tag_groups", "one dataframe. \"\"\" def __init__( self, save_folder: str, num_steps: int, log_overlaps: bool, ode_log_path:", "def _setup_data(self): \"\"\"Setup data from relevant dataframes. Here, in the unified case, full", "str, network_log_path: str, ): \"\"\" Class constructor. Args: save_folder: path to folder for", "\"\"\" Class constructor. Args: save_folder: path to folder for saving plots. num_steps: total", "network etc.) to dataframes with results. save_path: path to save the plot. \"\"\"", "be the same. tag_groups = self._collate_tags(tags=list(list(data.values())[0].keys())) # e.g. [error, overlap, ...] group_names =", "{}/{}\".format(graph_index + 1, num_graphs)) group_name = group_names[graph_index] keys = group_key_names[graph_index] data_collection = {", "both). Args: data: mapping from type of results (ode, network etc.) to dataframes", "constants.SIM: self._network_logger, }, save_path=os.path.join(self._save_folder, constants.OVERLAY_PDF), ) def _make_plot( self, data: Dict[str, Union[pd.DataFrame, Dict[str,", "plotting generalisation errors, overlaps etc. For case when logging is done in 'unified'", "\"\"\"Class for plotting generalisation errors, overlaps etc. For case when logging is done", "total number of steps in the training run (used for scaling axes). log_overlaps:", "Dict[str, pd.DataFrame]]], save_path: str, ) -> None: \"\"\"Make plots for a set of", "+ 1, num_graphs)) group_name = group_names[graph_index] keys = group_key_names[graph_index] data_collection = { data_type:", "errors, overlaps etc. For case when logging is done in 'unified' fashion i.e.", "not to plot ode data. log_network: whether ot not to plot network data.", "range(num_rows): for col in range(num_columns): graph_index = (row) * num_columns + col if", "self._network_logger_path is not None: self._make_plot( data={ constants.ODE: self._ode_logger, constants.SIM: self._network_logger, }, save_path=os.path.join(self._save_folder, constants.OVERLAY_PDF),", "str, ) -> None: \"\"\"Make plots for a set of results (e.g. ode", "whether ot not to plot ode data. log_network: whether ot not to plot", "...] group_key_names = list(tag_groups.values()) # e.g. num_graphs = len(tag_groups) num_rows = self.GRAPH_LAYOUT[0] num_columns", "< num_graphs: print(\"Plotting graph {}/{}\".format(graph_index + 1, num_graphs)) group_name = group_names[graph_index] keys =", "use arbitrary dataframe since columns will be the same. tag_groups = self._collate_tags(tags=list(list(data.values())[0].keys())) #", "ode data. log_network: whether ot not to plot network data. \"\"\" self._ode_logger_path =", "number of steps in the training run (used for scaling axes). log_overlaps: whether", "self._ode_logger_path is not None: self._ode_logger = pd.read_csv(self._ode_logger_path, index_col=0) if self._network_logger_path is not None:", "data: Dict[str, Union[pd.DataFrame, Dict[str, pd.DataFrame]]], save_path: str, ) -> None: \"\"\"Make plots for", "loaded into memory. \"\"\" if self._ode_logger_path is not None: self._ode_logger = pd.read_csv(self._ode_logger_path, index_col=0)", "a set of results (e.g. ode or network or both). Args: data: mapping", "from typing import Union import pandas as pd from cata import constants from", "save_path: path to save the plot. \"\"\" # can use arbitrary dataframe since", "run (used for scaling axes). log_overlaps: whether or not to plot overlaps (or", "keys = group_key_names[graph_index] data_collection = { data_type: {key: data[data_type][key].dropna() for key in keys}", "as pd from cata import constants from cata.plotters import base_plotter class UnifiedPlotter(base_plotter.BasePlotter): \"\"\"Class", "typing import Union import pandas as pd from cata import constants from cata.plotters", "...] group_names = list(tag_groups.keys()) # e.g. [[error_1, error_2, ...], [overlap_1, overlap_2, ...], ...]", "{key: data[data_type][key].dropna() for key in keys} for data_type in data.keys() } fig =", "ode_log_path: str, network_log_path: str, ): \"\"\" Class constructor. Args: save_folder: path to folder", "save_path: str, ) -> None: \"\"\"Make plots for a set of results (e.g.", "overlaps etc. For case when logging is done in 'unified' fashion i.e. all", "= list(tag_groups.values()) # e.g. num_graphs = len(tag_groups) num_rows = self.GRAPH_LAYOUT[0] num_columns = self.GRAPH_LAYOUT[1]", "all into one dataframe. \"\"\" def __init__( self, save_folder: str, num_steps: int, log_overlaps:", "from cata import constants from cata.plotters import base_plotter class UnifiedPlotter(base_plotter.BasePlotter): \"\"\"Class for plotting", "from typing import Dict from typing import Optional from typing import Union import", "): \"\"\" Class constructor. Args: save_folder: path to folder for saving plots. num_steps:", "or both.\"\"\" if self._ode_logger_path is not None: self._make_plot( data={constants.ODE: self._ode_logger}, save_path=os.path.join(self._save_folder, constants.ODE_PDF), )", "col in range(num_columns): graph_index = (row) * num_columns + col if graph_index <", "of results (e.g. ode or network or both). Args: data: mapping from type", "self._ode_logger_path is not None: self._make_plot( data={constants.ODE: self._ode_logger}, save_path=os.path.join(self._save_folder, constants.ODE_PDF), ) if self._network_logger_path is", "with results. save_path: path to save the plot. \"\"\" # can use arbitrary", "\"\"\" self._ode_logger_path = ode_log_path self._network_logger_path = network_log_path super().__init__( save_folder=save_folder, num_steps=num_steps, log_overlaps=log_overlaps ) def", "constants from cata.plotters import base_plotter class UnifiedPlotter(base_plotter.BasePlotter): \"\"\"Class for plotting generalisation errors, overlaps", "None: self._make_plot( data={constants.SIM: self._network_logger}, save_path=os.path.join(self._save_folder, constants.NETWORK_PDF), ) if self._ode_logger_path is not None and", "plots for a set of results (e.g. ode or network or both). Args:", "}, save_path=os.path.join(self._save_folder, constants.OVERLAY_PDF), ) def _make_plot( self, data: Dict[str, Union[pd.DataFrame, Dict[str, pd.DataFrame]]], save_path:", "overlap_2, ...], ...] group_key_names = list(tag_groups.values()) # e.g. num_graphs = len(tag_groups) num_rows =", "self._network_logger_path is not None: self._network_logger = pd.read_csv(self._network_logger_path) def make_plots(self) -> None: \"\"\"Orchestration method", "1, num_graphs)) group_name = group_names[graph_index] keys = group_key_names[graph_index] data_collection = { data_type: {key:", "None: self._network_logger = pd.read_csv(self._network_logger_path) def make_plots(self) -> None: \"\"\"Orchestration method for plotting ode", "class UnifiedPlotter(base_plotter.BasePlotter): \"\"\"Class for plotting generalisation errors, overlaps etc. For case when logging", "num_steps: total number of steps in the training run (used for scaling axes).", ") -> None: \"\"\"Make plots for a set of results (e.g. ode or", "or network or both). Args: data: mapping from type of results (ode, network", "if self._network_logger_path is not None: self._network_logger = pd.read_csv(self._network_logger_path) def make_plots(self) -> None: \"\"\"Orchestration", "is not None: self._make_plot( data={ constants.ODE: self._ode_logger, constants.SIM: self._network_logger, }, save_path=os.path.join(self._save_folder, constants.OVERLAY_PDF), )", "fig, spec = self._get_figure_skeleton( height=4, width=5, num_columns=num_columns, num_rows=num_rows ) for row in range(num_rows):", "pandas as pd from cata import constants from cata.plotters import base_plotter class UnifiedPlotter(base_plotter.BasePlotter):", "tag_groups = self._collate_tags(tags=list(list(data.values())[0].keys())) # e.g. [error, overlap, ...] group_names = list(tag_groups.keys()) # e.g.", "\"\"\"Setup data from relevant dataframes. Here, in the unified case, full dataset is", "= self.GRAPH_LAYOUT[1] fig, spec = self._get_figure_skeleton( height=4, width=5, num_columns=num_columns, num_rows=num_rows ) for row", "save the plot. \"\"\" # can use arbitrary dataframe since columns will be", "log_overlaps=log_overlaps ) def _setup_data(self): \"\"\"Setup data from relevant dataframes. Here, in the unified", "generalisation errors, overlaps etc. For case when logging is done in 'unified' fashion", "for scaling axes). log_overlaps: whether or not to plot overlaps (or just errors).", "if self._ode_logger_path is not None: self._make_plot( data={constants.ODE: self._ode_logger}, save_path=os.path.join(self._save_folder, constants.ODE_PDF), ) if self._network_logger_path", "same. tag_groups = self._collate_tags(tags=list(list(data.values())[0].keys())) # e.g. [error, overlap, ...] group_names = list(tag_groups.keys()) #", "self._network_logger_path = network_log_path super().__init__( save_folder=save_folder, num_steps=num_steps, log_overlaps=log_overlaps ) def _setup_data(self): \"\"\"Setup data from", "or both). Args: data: mapping from type of results (ode, network etc.) to", "-> None: \"\"\"Make plots for a set of results (e.g. ode or network", "to folder for saving plots. num_steps: total number of steps in the training", "is not None: self._make_plot( data={constants.SIM: self._network_logger}, save_path=os.path.join(self._save_folder, constants.NETWORK_PDF), ) if self._ode_logger_path is not", "_setup_data(self): \"\"\"Setup data from relevant dataframes. Here, in the unified case, full dataset", "None: self._ode_logger = pd.read_csv(self._ode_logger_path, index_col=0) if self._network_logger_path is not None: self._network_logger = pd.read_csv(self._network_logger_path)", "typing import Optional from typing import Union import pandas as pd from cata", "num_graphs)) group_name = group_names[graph_index] keys = group_key_names[graph_index] data_collection = { data_type: {key: data[data_type][key].dropna()", "log_ode: whether ot not to plot ode data. log_network: whether ot not to", "plot. \"\"\" # can use arbitrary dataframe since columns will be the same.", "_make_plot( self, data: Dict[str, Union[pd.DataFrame, Dict[str, pd.DataFrame]]], save_path: str, ) -> None: \"\"\"Make", "fashion i.e. all into one dataframe. \"\"\" def __init__( self, save_folder: str, num_steps:", "For case when logging is done in 'unified' fashion i.e. all into one", "not None: self._make_plot( data={constants.ODE: self._ode_logger}, save_path=os.path.join(self._save_folder, constants.ODE_PDF), ) if self._network_logger_path is not None:", "import base_plotter class UnifiedPlotter(base_plotter.BasePlotter): \"\"\"Class for plotting generalisation errors, overlaps etc. For case", "when logging is done in 'unified' fashion i.e. all into one dataframe. \"\"\"", "into memory. \"\"\" if self._ode_logger_path is not None: self._ode_logger = pd.read_csv(self._ode_logger_path, index_col=0) if", "save_folder: path to folder for saving plots. num_steps: total number of steps in", "data_type in data.keys() } fig = self._plot_scalar( fig=fig, spec=spec, row=row, col=col, tag_group_name=group_name, data_collection=data_collection,", "= network_log_path super().__init__( save_folder=save_folder, num_steps=num_steps, log_overlaps=log_overlaps ) def _setup_data(self): \"\"\"Setup data from relevant", "self._make_plot( data={constants.SIM: self._network_logger}, save_path=os.path.join(self._save_folder, constants.NETWORK_PDF), ) if self._ode_logger_path is not None and self._network_logger_path", "not None and self._network_logger_path is not None: self._make_plot( data={ constants.ODE: self._ode_logger, constants.SIM: self._network_logger,", "index_col=0) if self._network_logger_path is not None: self._network_logger = pd.read_csv(self._network_logger_path) def make_plots(self) -> None:", "None and self._network_logger_path is not None: self._make_plot( data={ constants.ODE: self._ode_logger, constants.SIM: self._network_logger, },", "bool, ode_log_path: str, network_log_path: str, ): \"\"\" Class constructor. Args: save_folder: path to", "folder for saving plots. num_steps: total number of steps in the training run", "...], ...] group_key_names = list(tag_groups.values()) # e.g. num_graphs = len(tag_groups) num_rows = self.GRAPH_LAYOUT[0]", "self._network_logger_path is not None: self._make_plot( data={constants.SIM: self._network_logger}, save_path=os.path.join(self._save_folder, constants.NETWORK_PDF), ) if self._ode_logger_path is", "self._ode_logger_path = ode_log_path self._network_logger_path = network_log_path super().__init__( save_folder=save_folder, num_steps=num_steps, log_overlaps=log_overlaps ) def _setup_data(self):", "in range(num_columns): graph_index = (row) * num_columns + col if graph_index < num_graphs:", ") if self._network_logger_path is not None: self._make_plot( data={constants.SIM: self._network_logger}, save_path=os.path.join(self._save_folder, constants.NETWORK_PDF), ) if", "group_key_names = list(tag_groups.values()) # e.g. num_graphs = len(tag_groups) num_rows = self.GRAPH_LAYOUT[0] num_columns =", "since columns will be the same. tag_groups = self._collate_tags(tags=list(list(data.values())[0].keys())) # e.g. [error, overlap,", "dataframes. Here, in the unified case, full dataset is loaded into memory. \"\"\"", "path to save the plot. \"\"\" # can use arbitrary dataframe since columns", "group_names = list(tag_groups.keys()) # e.g. [[error_1, error_2, ...], [overlap_1, overlap_2, ...], ...] group_key_names", "saving plots. num_steps: total number of steps in the training run (used for", "data: mapping from type of results (ode, network etc.) to dataframes with results.", "data={constants.SIM: self._network_logger}, save_path=os.path.join(self._save_folder, constants.NETWORK_PDF), ) if self._ode_logger_path is not None and self._network_logger_path is", "height=4, width=5, num_columns=num_columns, num_rows=num_rows ) for row in range(num_rows): for col in range(num_columns):", "or not to plot overlaps (or just errors). log_ode: whether ot not to", "[error, overlap, ...] group_names = list(tag_groups.keys()) # e.g. [[error_1, error_2, ...], [overlap_1, overlap_2,", "save_path=os.path.join(self._save_folder, constants.OVERLAY_PDF), ) def _make_plot( self, data: Dict[str, Union[pd.DataFrame, Dict[str, pd.DataFrame]]], save_path: str,", "group_names[graph_index] keys = group_key_names[graph_index] data_collection = { data_type: {key: data[data_type][key].dropna() for key in", "results (e.g. ode or network or both). Args: data: mapping from type of", "case, full dataset is loaded into memory. \"\"\" if self._ode_logger_path is not None:", "not to plot network data. \"\"\" self._ode_logger_path = ode_log_path self._network_logger_path = network_log_path super().__init__(", "is not None: self._ode_logger = pd.read_csv(self._ode_logger_path, index_col=0) if self._network_logger_path is not None: self._network_logger", "num_rows=num_rows ) for row in range(num_rows): for col in range(num_columns): graph_index = (row)", "for a set of results (e.g. ode or network or both). Args: data:", "i.e. all into one dataframe. \"\"\" def __init__( self, save_folder: str, num_steps: int,", "graph_index < num_graphs: print(\"Plotting graph {}/{}\".format(graph_index + 1, num_graphs)) group_name = group_names[graph_index] keys", "{ data_type: {key: data[data_type][key].dropna() for key in keys} for data_type in data.keys() }", "the same. tag_groups = self._collate_tags(tags=list(list(data.values())[0].keys())) # e.g. [error, overlap, ...] group_names = list(tag_groups.keys())", "self, data: Dict[str, Union[pd.DataFrame, Dict[str, pd.DataFrame]]], save_path: str, ) -> None: \"\"\"Make plots", "data_type: {key: data[data_type][key].dropna() for key in keys} for data_type in data.keys() } fig", "list(tag_groups.keys()) # e.g. [[error_1, error_2, ...], [overlap_1, overlap_2, ...], ...] group_key_names = list(tag_groups.values())", "the unified case, full dataset is loaded into memory. \"\"\" if self._ode_logger_path is", "print(\"Plotting graph {}/{}\".format(graph_index + 1, num_graphs)) group_name = group_names[graph_index] keys = group_key_names[graph_index] data_collection", "(used for scaling axes). log_overlaps: whether or not to plot overlaps (or just", "-> None: \"\"\"Orchestration method for plotting ode logs, network logs, or both.\"\"\" if", "pd.read_csv(self._ode_logger_path, index_col=0) if self._network_logger_path is not None: self._network_logger = pd.read_csv(self._network_logger_path) def make_plots(self) ->", "plot overlaps (or just errors). log_ode: whether ot not to plot ode data.", "unified case, full dataset is loaded into memory. \"\"\" if self._ode_logger_path is not", "Args: data: mapping from type of results (ode, network etc.) to dataframes with", "plotting ode logs, network logs, or both.\"\"\" if self._ode_logger_path is not None: self._make_plot(", "the plot. \"\"\" # can use arbitrary dataframe since columns will be the", "self._get_figure_skeleton( height=4, width=5, num_columns=num_columns, num_rows=num_rows ) for row in range(num_rows): for col in", "num_columns + col if graph_index < num_graphs: print(\"Plotting graph {}/{}\".format(graph_index + 1, num_graphs))", "def _make_plot( self, data: Dict[str, Union[pd.DataFrame, Dict[str, pd.DataFrame]]], save_path: str, ) -> None:", "self._network_logger}, save_path=os.path.join(self._save_folder, constants.NETWORK_PDF), ) if self._ode_logger_path is not None and self._network_logger_path is not", "make_plots(self) -> None: \"\"\"Orchestration method for plotting ode logs, network logs, or both.\"\"\"", "data from relevant dataframes. Here, in the unified case, full dataset is loaded", "\"\"\"Orchestration method for plotting ode logs, network logs, or both.\"\"\" if self._ode_logger_path is", "if self._ode_logger_path is not None and self._network_logger_path is not None: self._make_plot( data={ constants.ODE:", "= self._get_figure_skeleton( height=4, width=5, num_columns=num_columns, num_rows=num_rows ) for row in range(num_rows): for col", "Dict from typing import Optional from typing import Union import pandas as pd", "self._ode_logger}, save_path=os.path.join(self._save_folder, constants.ODE_PDF), ) if self._network_logger_path is not None: self._make_plot( data={constants.SIM: self._network_logger}, save_path=os.path.join(self._save_folder,", "num_graphs = len(tag_groups) num_rows = self.GRAPH_LAYOUT[0] num_columns = self.GRAPH_LAYOUT[1] fig, spec = self._get_figure_skeleton(", "just errors). log_ode: whether ot not to plot ode data. log_network: whether ot", "import pandas as pd from cata import constants from cata.plotters import base_plotter class", "\"\"\" def __init__( self, save_folder: str, num_steps: int, log_overlaps: bool, ode_log_path: str, network_log_path:", "constructor. Args: save_folder: path to folder for saving plots. num_steps: total number of", "dataframes with results. save_path: path to save the plot. \"\"\" # can use", "for plotting generalisation errors, overlaps etc. For case when logging is done in", "= list(tag_groups.keys()) # e.g. [[error_1, error_2, ...], [overlap_1, overlap_2, ...], ...] group_key_names =", "not None: self._ode_logger = pd.read_csv(self._ode_logger_path, index_col=0) if self._network_logger_path is not None: self._network_logger =", "__init__( self, save_folder: str, num_steps: int, log_overlaps: bool, ode_log_path: str, network_log_path: str, ):", "in keys} for data_type in data.keys() } fig = self._plot_scalar( fig=fig, spec=spec, row=row,", "to plot ode data. log_network: whether ot not to plot network data. \"\"\"", "import Dict from typing import Optional from typing import Union import pandas as", "Optional from typing import Union import pandas as pd from cata import constants", "log_overlaps: bool, ode_log_path: str, network_log_path: str, ): \"\"\" Class constructor. Args: save_folder: path", "ot not to plot network data. \"\"\" self._ode_logger_path = ode_log_path self._network_logger_path = network_log_path", "# e.g. [[error_1, error_2, ...], [overlap_1, overlap_2, ...], ...] group_key_names = list(tag_groups.values()) #", "overlap, ...] group_names = list(tag_groups.keys()) # e.g. [[error_1, error_2, ...], [overlap_1, overlap_2, ...],", "for data_type in data.keys() } fig = self._plot_scalar( fig=fig, spec=spec, row=row, col=col, tag_group_name=group_name,", "type of results (ode, network etc.) to dataframes with results. save_path: path to", "into one dataframe. \"\"\" def __init__( self, save_folder: str, num_steps: int, log_overlaps: bool,", "self._make_plot( data={ constants.ODE: self._ode_logger, constants.SIM: self._network_logger, }, save_path=os.path.join(self._save_folder, constants.OVERLAY_PDF), ) def _make_plot( self,", "not to plot overlaps (or just errors). log_ode: whether ot not to plot", "data={ constants.ODE: self._ode_logger, constants.SIM: self._network_logger, }, save_path=os.path.join(self._save_folder, constants.OVERLAY_PDF), ) def _make_plot( self, data:", "case when logging is done in 'unified' fashion i.e. all into one dataframe.", "from typing import Optional from typing import Union import pandas as pd from", "network_log_path: str, ): \"\"\" Class constructor. Args: save_folder: path to folder for saving", "in the unified case, full dataset is loaded into memory. \"\"\" if self._ode_logger_path", "columns will be the same. tag_groups = self._collate_tags(tags=list(list(data.values())[0].keys())) # e.g. [error, overlap, ...]", "log_overlaps: whether or not to plot overlaps (or just errors). log_ode: whether ot", "Here, in the unified case, full dataset is loaded into memory. \"\"\" if", "logging is done in 'unified' fashion i.e. all into one dataframe. \"\"\" def", "row in range(num_rows): for col in range(num_columns): graph_index = (row) * num_columns +", "results (ode, network etc.) to dataframes with results. save_path: path to save the", ") def _make_plot( self, data: Dict[str, Union[pd.DataFrame, Dict[str, pd.DataFrame]]], save_path: str, ) ->", "to dataframes with results. save_path: path to save the plot. \"\"\" # can", "if graph_index < num_graphs: print(\"Plotting graph {}/{}\".format(graph_index + 1, num_graphs)) group_name = group_names[graph_index]", "range(num_columns): graph_index = (row) * num_columns + col if graph_index < num_graphs: print(\"Plotting", "errors). log_ode: whether ot not to plot ode data. log_network: whether ot not", "ode logs, network logs, or both.\"\"\" if self._ode_logger_path is not None: self._make_plot( data={constants.ODE:", "= self.GRAPH_LAYOUT[0] num_columns = self.GRAPH_LAYOUT[1] fig, spec = self._get_figure_skeleton( height=4, width=5, num_columns=num_columns, num_rows=num_rows", "to plot overlaps (or just errors). log_ode: whether ot not to plot ode", "= group_key_names[graph_index] data_collection = { data_type: {key: data[data_type][key].dropna() for key in keys} for", "for plotting ode logs, network logs, or both.\"\"\" if self._ode_logger_path is not None:", "self._collate_tags(tags=list(list(data.values())[0].keys())) # e.g. [error, overlap, ...] group_names = list(tag_groups.keys()) # e.g. [[error_1, error_2,", "for saving plots. num_steps: total number of steps in the training run (used", "os from typing import Dict from typing import Optional from typing import Union", "# e.g. num_graphs = len(tag_groups) num_rows = self.GRAPH_LAYOUT[0] num_columns = self.GRAPH_LAYOUT[1] fig, spec", "* num_columns + col if graph_index < num_graphs: print(\"Plotting graph {}/{}\".format(graph_index + 1,", "num_steps: int, log_overlaps: bool, ode_log_path: str, network_log_path: str, ): \"\"\" Class constructor. Args:", "from relevant dataframes. Here, in the unified case, full dataset is loaded into", "pd.read_csv(self._network_logger_path) def make_plots(self) -> None: \"\"\"Orchestration method for plotting ode logs, network logs,", "network_log_path super().__init__( save_folder=save_folder, num_steps=num_steps, log_overlaps=log_overlaps ) def _setup_data(self): \"\"\"Setup data from relevant dataframes.", "cata.plotters import base_plotter class UnifiedPlotter(base_plotter.BasePlotter): \"\"\"Class for plotting generalisation errors, overlaps etc. For", "pd from cata import constants from cata.plotters import base_plotter class UnifiedPlotter(base_plotter.BasePlotter): \"\"\"Class for", "in the training run (used for scaling axes). log_overlaps: whether or not to", "of steps in the training run (used for scaling axes). log_overlaps: whether or", "relevant dataframes. Here, in the unified case, full dataset is loaded into memory.", "is not None: self._make_plot( data={constants.ODE: self._ode_logger}, save_path=os.path.join(self._save_folder, constants.ODE_PDF), ) if self._network_logger_path is not", "axes). log_overlaps: whether or not to plot overlaps (or just errors). log_ode: whether", "(or just errors). log_ode: whether ot not to plot ode data. log_network: whether", "log_network: whether ot not to plot network data. \"\"\" self._ode_logger_path = ode_log_path self._network_logger_path", "data_collection = { data_type: {key: data[data_type][key].dropna() for key in keys} for data_type in", "arbitrary dataframe since columns will be the same. tag_groups = self._collate_tags(tags=list(list(data.values())[0].keys())) # e.g.", "plot network data. \"\"\" self._ode_logger_path = ode_log_path self._network_logger_path = network_log_path super().__init__( save_folder=save_folder, num_steps=num_steps,", "self._network_logger, }, save_path=os.path.join(self._save_folder, constants.OVERLAY_PDF), ) def _make_plot( self, data: Dict[str, Union[pd.DataFrame, Dict[str, pd.DataFrame]]],", "results. save_path: path to save the plot. \"\"\" # can use arbitrary dataframe", "= group_names[graph_index] keys = group_key_names[graph_index] data_collection = { data_type: {key: data[data_type][key].dropna() for key", "str, num_steps: int, log_overlaps: bool, ode_log_path: str, network_log_path: str, ): \"\"\" Class constructor.", "dataset is loaded into memory. \"\"\" if self._ode_logger_path is not None: self._ode_logger =", "col if graph_index < num_graphs: print(\"Plotting graph {}/{}\".format(graph_index + 1, num_graphs)) group_name =", "spec = self._get_figure_skeleton( height=4, width=5, num_columns=num_columns, num_rows=num_rows ) for row in range(num_rows): for", "both.\"\"\" if self._ode_logger_path is not None: self._make_plot( data={constants.ODE: self._ode_logger}, save_path=os.path.join(self._save_folder, constants.ODE_PDF), ) if", "= len(tag_groups) num_rows = self.GRAPH_LAYOUT[0] num_columns = self.GRAPH_LAYOUT[1] fig, spec = self._get_figure_skeleton( height=4,", "= { data_type: {key: data[data_type][key].dropna() for key in keys} for data_type in data.keys()", "cata import constants from cata.plotters import base_plotter class UnifiedPlotter(base_plotter.BasePlotter): \"\"\"Class for plotting generalisation", "whether or not to plot overlaps (or just errors). log_ode: whether ot not", "= self._collate_tags(tags=list(list(data.values())[0].keys())) # e.g. [error, overlap, ...] group_names = list(tag_groups.keys()) # e.g. [[error_1,", "} fig = self._plot_scalar( fig=fig, spec=spec, row=row, col=col, tag_group_name=group_name, data_collection=data_collection, ) fig.savefig(save_path, dpi=100)", "(row) * num_columns + col if graph_index < num_graphs: print(\"Plotting graph {}/{}\".format(graph_index +", "if self._network_logger_path is not None: self._make_plot( data={constants.SIM: self._network_logger}, save_path=os.path.join(self._save_folder, constants.NETWORK_PDF), ) if self._ode_logger_path", "ot not to plot ode data. log_network: whether ot not to plot network", "set of results (e.g. ode or network or both). Args: data: mapping from", "self._ode_logger, constants.SIM: self._network_logger, }, save_path=os.path.join(self._save_folder, constants.OVERLAY_PDF), ) def _make_plot( self, data: Dict[str, Union[pd.DataFrame,", "training run (used for scaling axes). log_overlaps: whether or not to plot overlaps", "[[error_1, error_2, ...], [overlap_1, overlap_2, ...], ...] group_key_names = list(tag_groups.values()) # e.g. num_graphs", "e.g. num_graphs = len(tag_groups) num_rows = self.GRAPH_LAYOUT[0] num_columns = self.GRAPH_LAYOUT[1] fig, spec =", "not None: self._make_plot( data={ constants.ODE: self._ode_logger, constants.SIM: self._network_logger, }, save_path=os.path.join(self._save_folder, constants.OVERLAY_PDF), ) def", "None: self._make_plot( data={ constants.ODE: self._ode_logger, constants.SIM: self._network_logger, }, save_path=os.path.join(self._save_folder, constants.OVERLAY_PDF), ) def _make_plot(", "scaling axes). log_overlaps: whether or not to plot overlaps (or just errors). log_ode:", "is loaded into memory. \"\"\" if self._ode_logger_path is not None: self._ode_logger = pd.read_csv(self._ode_logger_path,", "ode_log_path self._network_logger_path = network_log_path super().__init__( save_folder=save_folder, num_steps=num_steps, log_overlaps=log_overlaps ) def _setup_data(self): \"\"\"Setup data", "data. \"\"\" self._ode_logger_path = ode_log_path self._network_logger_path = network_log_path super().__init__( save_folder=save_folder, num_steps=num_steps, log_overlaps=log_overlaps )", "None: self._make_plot( data={constants.ODE: self._ode_logger}, save_path=os.path.join(self._save_folder, constants.ODE_PDF), ) if self._network_logger_path is not None: self._make_plot(", "keys} for data_type in data.keys() } fig = self._plot_scalar( fig=fig, spec=spec, row=row, col=col,", "memory. \"\"\" if self._ode_logger_path is not None: self._ode_logger = pd.read_csv(self._ode_logger_path, index_col=0) if self._network_logger_path", "save_path=os.path.join(self._save_folder, constants.ODE_PDF), ) if self._network_logger_path is not None: self._make_plot( data={constants.SIM: self._network_logger}, save_path=os.path.join(self._save_folder, constants.NETWORK_PDF),", "def __init__( self, save_folder: str, num_steps: int, log_overlaps: bool, ode_log_path: str, network_log_path: str,", ") def _setup_data(self): \"\"\"Setup data from relevant dataframes. Here, in the unified case,", "group_key_names[graph_index] data_collection = { data_type: {key: data[data_type][key].dropna() for key in keys} for data_type", "dataframe since columns will be the same. tag_groups = self._collate_tags(tags=list(list(data.values())[0].keys())) # e.g. [error,", "base_plotter class UnifiedPlotter(base_plotter.BasePlotter): \"\"\"Class for plotting generalisation errors, overlaps etc. For case when", "UnifiedPlotter(base_plotter.BasePlotter): \"\"\"Class for plotting generalisation errors, overlaps etc. For case when logging is", "overlaps (or just errors). log_ode: whether ot not to plot ode data. log_network:", "self, save_folder: str, num_steps: int, log_overlaps: bool, ode_log_path: str, network_log_path: str, ): \"\"\"", "import Optional from typing import Union import pandas as pd from cata import", "from type of results (ode, network etc.) to dataframes with results. save_path: path", "num_columns=num_columns, num_rows=num_rows ) for row in range(num_rows): for col in range(num_columns): graph_index =", "constants.NETWORK_PDF), ) if self._ode_logger_path is not None and self._network_logger_path is not None: self._make_plot(", "will be the same. tag_groups = self._collate_tags(tags=list(list(data.values())[0].keys())) # e.g. [error, overlap, ...] group_names", "error_2, ...], [overlap_1, overlap_2, ...], ...] group_key_names = list(tag_groups.values()) # e.g. num_graphs =", "in 'unified' fashion i.e. all into one dataframe. \"\"\" def __init__( self, save_folder:", "method for plotting ode logs, network logs, or both.\"\"\" if self._ode_logger_path is not", "not None: self._make_plot( data={constants.SIM: self._network_logger}, save_path=os.path.join(self._save_folder, constants.NETWORK_PDF), ) if self._ode_logger_path is not None", "num_graphs: print(\"Plotting graph {}/{}\".format(graph_index + 1, num_graphs)) group_name = group_names[graph_index] keys = group_key_names[graph_index]", "Args: save_folder: path to folder for saving plots. num_steps: total number of steps", "from cata.plotters import base_plotter class UnifiedPlotter(base_plotter.BasePlotter): \"\"\"Class for plotting generalisation errors, overlaps etc.", "for row in range(num_rows): for col in range(num_columns): graph_index = (row) * num_columns", "is not None: self._network_logger = pd.read_csv(self._network_logger_path) def make_plots(self) -> None: \"\"\"Orchestration method for", "data[data_type][key].dropna() for key in keys} for data_type in data.keys() } fig = self._plot_scalar(", "for key in keys} for data_type in data.keys() } fig = self._plot_scalar( fig=fig,", "self.GRAPH_LAYOUT[1] fig, spec = self._get_figure_skeleton( height=4, width=5, num_columns=num_columns, num_rows=num_rows ) for row in", "super().__init__( save_folder=save_folder, num_steps=num_steps, log_overlaps=log_overlaps ) def _setup_data(self): \"\"\"Setup data from relevant dataframes. Here,", "mapping from type of results (ode, network etc.) to dataframes with results. save_path:", "network or both). Args: data: mapping from type of results (ode, network etc.)", "def make_plots(self) -> None: \"\"\"Orchestration method for plotting ode logs, network logs, or", "graph_index = (row) * num_columns + col if graph_index < num_graphs: print(\"Plotting graph", "is done in 'unified' fashion i.e. all into one dataframe. \"\"\" def __init__(", "done in 'unified' fashion i.e. all into one dataframe. \"\"\" def __init__( self,", "network data. \"\"\" self._ode_logger_path = ode_log_path self._network_logger_path = network_log_path super().__init__( save_folder=save_folder, num_steps=num_steps, log_overlaps=log_overlaps", "(ode, network etc.) to dataframes with results. save_path: path to save the plot.", "of results (ode, network etc.) to dataframes with results. save_path: path to save", "str, ): \"\"\" Class constructor. Args: save_folder: path to folder for saving plots.", "network logs, or both.\"\"\" if self._ode_logger_path is not None: self._make_plot( data={constants.ODE: self._ode_logger}, save_path=os.path.join(self._save_folder,", "data. log_network: whether ot not to plot network data. \"\"\" self._ode_logger_path = ode_log_path", "data={constants.ODE: self._ode_logger}, save_path=os.path.join(self._save_folder, constants.ODE_PDF), ) if self._network_logger_path is not None: self._make_plot( data={constants.SIM: self._network_logger},", "self._ode_logger_path is not None and self._network_logger_path is not None: self._make_plot( data={ constants.ODE: self._ode_logger,", "ode or network or both). Args: data: mapping from type of results (ode,", "'unified' fashion i.e. all into one dataframe. \"\"\" def __init__( self, save_folder: str,", "can use arbitrary dataframe since columns will be the same. tag_groups = self._collate_tags(tags=list(list(data.values())[0].keys()))", "is not None and self._network_logger_path is not None: self._make_plot( data={ constants.ODE: self._ode_logger, constants.SIM:", "# can use arbitrary dataframe since columns will be the same. tag_groups =", "len(tag_groups) num_rows = self.GRAPH_LAYOUT[0] num_columns = self.GRAPH_LAYOUT[1] fig, spec = self._get_figure_skeleton( height=4, width=5,", "key in keys} for data_type in data.keys() } fig = self._plot_scalar( fig=fig, spec=spec,", "int, log_overlaps: bool, ode_log_path: str, network_log_path: str, ): \"\"\" Class constructor. Args: save_folder:", "if self._ode_logger_path is not None: self._ode_logger = pd.read_csv(self._ode_logger_path, index_col=0) if self._network_logger_path is not", "list(tag_groups.values()) # e.g. num_graphs = len(tag_groups) num_rows = self.GRAPH_LAYOUT[0] num_columns = self.GRAPH_LAYOUT[1] fig,", "save_folder=save_folder, num_steps=num_steps, log_overlaps=log_overlaps ) def _setup_data(self): \"\"\"Setup data from relevant dataframes. Here, in", "constants.OVERLAY_PDF), ) def _make_plot( self, data: Dict[str, Union[pd.DataFrame, Dict[str, pd.DataFrame]]], save_path: str, )", "save_path=os.path.join(self._save_folder, constants.NETWORK_PDF), ) if self._ode_logger_path is not None and self._network_logger_path is not None:", "num_columns = self.GRAPH_LAYOUT[1] fig, spec = self._get_figure_skeleton( height=4, width=5, num_columns=num_columns, num_rows=num_rows ) for", "None: \"\"\"Orchestration method for plotting ode logs, network logs, or both.\"\"\" if self._ode_logger_path", "pd.DataFrame]]], save_path: str, ) -> None: \"\"\"Make plots for a set of results", "logs, or both.\"\"\" if self._ode_logger_path is not None: self._make_plot( data={constants.ODE: self._ode_logger}, save_path=os.path.join(self._save_folder, constants.ODE_PDF),", "(e.g. ode or network or both). Args: data: mapping from type of results", "etc. For case when logging is done in 'unified' fashion i.e. all into", "whether ot not to plot network data. \"\"\" self._ode_logger_path = ode_log_path self._network_logger_path =", "full dataset is loaded into memory. \"\"\" if self._ode_logger_path is not None: self._ode_logger", "to save the plot. \"\"\" # can use arbitrary dataframe since columns will", "= pd.read_csv(self._ode_logger_path, index_col=0) if self._network_logger_path is not None: self._network_logger = pd.read_csv(self._network_logger_path) def make_plots(self)", "num_rows = self.GRAPH_LAYOUT[0] num_columns = self.GRAPH_LAYOUT[1] fig, spec = self._get_figure_skeleton( height=4, width=5, num_columns=num_columns,", "constants.ODE: self._ode_logger, constants.SIM: self._network_logger, }, save_path=os.path.join(self._save_folder, constants.OVERLAY_PDF), ) def _make_plot( self, data: Dict[str,", "and self._network_logger_path is not None: self._make_plot( data={ constants.ODE: self._ode_logger, constants.SIM: self._network_logger, }, save_path=os.path.join(self._save_folder,", "path to folder for saving plots. num_steps: total number of steps in the", "self._make_plot( data={constants.ODE: self._ode_logger}, save_path=os.path.join(self._save_folder, constants.ODE_PDF), ) if self._network_logger_path is not None: self._make_plot( data={constants.SIM:", ") for row in range(num_rows): for col in range(num_columns): graph_index = (row) *", "logs, network logs, or both.\"\"\" if self._ode_logger_path is not None: self._make_plot( data={constants.ODE: self._ode_logger},", "Dict[str, Union[pd.DataFrame, Dict[str, pd.DataFrame]]], save_path: str, ) -> None: \"\"\"Make plots for a", "graph {}/{}\".format(graph_index + 1, num_graphs)) group_name = group_names[graph_index] keys = group_key_names[graph_index] data_collection =", "etc.) to dataframes with results. save_path: path to save the plot. \"\"\" #", "Union import pandas as pd from cata import constants from cata.plotters import base_plotter", "plot ode data. log_network: whether ot not to plot network data. \"\"\" self._ode_logger_path", "Union[pd.DataFrame, Dict[str, pd.DataFrame]]], save_path: str, ) -> None: \"\"\"Make plots for a set", "the training run (used for scaling axes). log_overlaps: whether or not to plot", "e.g. [[error_1, error_2, ...], [overlap_1, overlap_2, ...], ...] group_key_names = list(tag_groups.values()) # e.g.", "import Union import pandas as pd from cata import constants from cata.plotters import", "\"\"\" if self._ode_logger_path is not None: self._ode_logger = pd.read_csv(self._ode_logger_path, index_col=0) if self._network_logger_path is", "group_name = group_names[graph_index] keys = group_key_names[graph_index] data_collection = { data_type: {key: data[data_type][key].dropna() for", "width=5, num_columns=num_columns, num_rows=num_rows ) for row in range(num_rows): for col in range(num_columns): graph_index", "\"\"\"Make plots for a set of results (e.g. ode or network or both).", "= pd.read_csv(self._network_logger_path) def make_plots(self) -> None: \"\"\"Orchestration method for plotting ode logs, network", "in data.keys() } fig = self._plot_scalar( fig=fig, spec=spec, row=row, col=col, tag_group_name=group_name, data_collection=data_collection, )", "dataframe. \"\"\" def __init__( self, save_folder: str, num_steps: int, log_overlaps: bool, ode_log_path: str,", "num_steps=num_steps, log_overlaps=log_overlaps ) def _setup_data(self): \"\"\"Setup data from relevant dataframes. Here, in the", "steps in the training run (used for scaling axes). log_overlaps: whether or not", "to plot network data. \"\"\" self._ode_logger_path = ode_log_path self._network_logger_path = network_log_path super().__init__( save_folder=save_folder,", "self._ode_logger = pd.read_csv(self._ode_logger_path, index_col=0) if self._network_logger_path is not None: self._network_logger = pd.read_csv(self._network_logger_path) def", "in range(num_rows): for col in range(num_columns): graph_index = (row) * num_columns + col", "[overlap_1, overlap_2, ...], ...] group_key_names = list(tag_groups.values()) # e.g. num_graphs = len(tag_groups) num_rows", "not None: self._network_logger = pd.read_csv(self._network_logger_path) def make_plots(self) -> None: \"\"\"Orchestration method for plotting", "save_folder: str, num_steps: int, log_overlaps: bool, ode_log_path: str, network_log_path: str, ): \"\"\" Class", "plots. num_steps: total number of steps in the training run (used for scaling", "for col in range(num_columns): graph_index = (row) * num_columns + col if graph_index", "Class constructor. Args: save_folder: path to folder for saving plots. num_steps: total number", "None: \"\"\"Make plots for a set of results (e.g. ode or network or", "import os from typing import Dict from typing import Optional from typing import", "= (row) * num_columns + col if graph_index < num_graphs: print(\"Plotting graph {}/{}\".format(graph_index", ") if self._ode_logger_path is not None and self._network_logger_path is not None: self._make_plot( data={", "+ col if graph_index < num_graphs: print(\"Plotting graph {}/{}\".format(graph_index + 1, num_graphs)) group_name", "self._network_logger = pd.read_csv(self._network_logger_path) def make_plots(self) -> None: \"\"\"Orchestration method for plotting ode logs,", "e.g. [error, overlap, ...] group_names = list(tag_groups.keys()) # e.g. [[error_1, error_2, ...], [overlap_1,", "= ode_log_path self._network_logger_path = network_log_path super().__init__( save_folder=save_folder, num_steps=num_steps, log_overlaps=log_overlaps ) def _setup_data(self): \"\"\"Setup", "typing import Dict from typing import Optional from typing import Union import pandas", "constants.ODE_PDF), ) if self._network_logger_path is not None: self._make_plot( data={constants.SIM: self._network_logger}, save_path=os.path.join(self._save_folder, constants.NETWORK_PDF), )", "...], [overlap_1, overlap_2, ...], ...] group_key_names = list(tag_groups.values()) # e.g. num_graphs = len(tag_groups)", "# e.g. [error, overlap, ...] group_names = list(tag_groups.keys()) # e.g. [[error_1, error_2, ...],", "import constants from cata.plotters import base_plotter class UnifiedPlotter(base_plotter.BasePlotter): \"\"\"Class for plotting generalisation errors,", "data.keys() } fig = self._plot_scalar( fig=fig, spec=spec, row=row, col=col, tag_group_name=group_name, data_collection=data_collection, ) fig.savefig(save_path,", "self.GRAPH_LAYOUT[0] num_columns = self.GRAPH_LAYOUT[1] fig, spec = self._get_figure_skeleton( height=4, width=5, num_columns=num_columns, num_rows=num_rows )" ]
[]
[ "Use '\\n'.join(settings_list). affil (str, optional): Comma-separated string of author affiliations to their repos.", "GH_TOKEN = os.environ[\"GH_TOKEN\"] headers = {\"Authorization\": f\"token {GH_TOKEN}\"} def query_gh_gpl_api(query: str) -> dict:", "file was set as '{config_path}' but no such file exists.\" ) elif config_path:", "import yaml if os.path.exists(\"gh_token.py\"): from gh_token import GH_TOKEN else: GH_TOKEN = os.environ[\"GH_TOKEN\"] headers", "} }\"\"\".replace( \"{settings}\", settings ).replace( \"{affil}\", affil ) def load_config(config_path: str = None)", "not os.path.exists(config_path): raise FileNotFoundError( f\"Path to config file was set as '{config_path}' but", "str) -> dict: \"\"\"Query the GitHub GraphQL API. Args: query (str): Multi-line query", "Defaults to \"OWNER\". Returns: str: GraphQL query. \"\"\" return \"\"\"{ viewer { repositories(first:", "additional logins of your GitHub organizations to query for repos - boolean whether", "100) { nodes { login repositories(first: 100) { nodes { name nameWithOwner isArchived", "if os.path.exists(\"gh_token.py\"): from gh_token import GH_TOKEN else: GH_TOKEN = os.environ[\"GH_TOKEN\"] headers = {\"Authorization\":", "returned by the API. \"\"\" response = requests.post( \"https://api.github.com/graphql\", json={\"query\": query}, headers=headers ).json()", "load_config(config_path: str = None) -> tuple[dict[str, Any], list[str], bool]: \"\"\"Load .repo-config.(yml|yaml). Returns: tuple[dict[str,", ") def load_config(config_path: str = None) -> tuple[dict[str, Any], list[str], bool]: \"\"\"Load .repo-config.(yml|yaml).", "well \"\"\" config = {} if config_path and not os.path.exists(config_path): raise FileNotFoundError( f\"Path", "data returned by the API. \"\"\" response = requests.post( \"https://api.github.com/graphql\", json={\"query\": query}, headers=headers", "nodes { login repositories(first: 100) { nodes { name nameWithOwner isArchived isFork {settings}", "requests.post( \"https://api.github.com/graphql\", json={\"query\": query}, headers=headers ).json() if \"errors\" in response: err = response[\"errors\"][0][\"message\"]", "= \"OWNER\") -> str: \"\"\"Construct GraphQL query from settings list. Args: settings (str):", "according to the GraphQL API, separated by new lines. Use '\\n'.join(settings_list). affil (str,", "query from settings list. Args: settings (str): Names of repo settings according to", "} } } } }\"\"\".replace( \"{settings}\", settings ).replace( \"{affil}\", affil ) def load_config(config_path:", "yaml if os.path.exists(\"gh_token.py\"): from gh_token import GH_TOKEN else: GH_TOKEN = os.environ[\"GH_TOKEN\"] headers =", "def pretty_print(dic: dict) -> None: \"\"\"Pretty print a dictionary in YAML format. Useful", "requests import yaml if os.path.exists(\"gh_token.py\"): from gh_token import GH_TOKEN else: GH_TOKEN = os.environ[\"GH_TOKEN\"]", "config[\"settings\"] orgs = config[\"orgs\"] or [] skipForks = config[\"skipForks\"] or True return settings,", "os.path.exists(path): with open(path) as file: config = yaml.safe_load(file.read()) if config == {}: raise", "exists.\" ) elif config_path: with open(config_path) as file: config = yaml.safe_load(file.read()) for path", "with open(config_path) as file: config = yaml.safe_load(file.read()) for path in (\".repo-config.yml\", \".repo-config.yaml\"): if", "affiliations: [{affil}]) { nodes { name nameWithOwner isArchived isFork {settings} } } organizations(first:", "to query for repos - boolean whether or not apply settings to repos", "repos you forked as well \"\"\" config = {} if config_path and not", "} } }\"\"\".replace( \"{settings}\", settings ).replace( \"{affil}\", affil ) def load_config(config_path: str =", "by new lines. Use '\\n'.join(settings_list). affil (str, optional): Comma-separated string of author affiliations", "affil (str, optional): Comma-separated string of author affiliations to their repos. One or", "optional.\" ) settings = config[\"settings\"] orgs = config[\"orgs\"] or [] skipForks = config[\"skipForks\"]", "dictionary in YAML format. Useful for development and debugging. \"\"\" print(yaml.dump(dic)) def get_gql_query(settings:", "settings ).replace( \"{affil}\", affil ) def load_config(config_path: str = None) -> tuple[dict[str, Any],", "for an example \" \"config file. All fields except 'settings' are optional.\" )", "error '{err}'.\") else: return response[\"data\"] def pretty_print(dic: dict) -> None: \"\"\"Pretty print a", "Any], list[str], bool]: - Dictionary of GitHub settings to apply to all your", "are optional.\" ) settings = config[\"settings\"] orgs = config[\"orgs\"] or [] skipForks =", "print a dictionary in YAML format. Useful for development and debugging. \"\"\" print(yaml.dump(dic))", "= {\"Authorization\": f\"token {GH_TOKEN}\"} def query_gh_gpl_api(query: str) -> dict: \"\"\"Query the GitHub GraphQL", "from gh_token import GH_TOKEN else: GH_TOKEN = os.environ[\"GH_TOKEN\"] headers = {\"Authorization\": f\"token {GH_TOKEN}\"}", "''' Raises: Exception: If the query returned an error message. Returns: dict: The", "== {}: raise ValueError( \"No config file could be found. See https://git.io/JWa5o for", "login } } ''' Raises: Exception: If the query returned an error message.", "tuple[dict[str, Any], list[str], bool]: - Dictionary of GitHub settings to apply to all", "{GH_TOKEN}\"} def query_gh_gpl_api(query: str) -> dict: \"\"\"Query the GitHub GraphQL API. Args: query", "os.path.exists(config_path): raise FileNotFoundError( f\"Path to config file was set as '{config_path}' but no", "https://git.io/JWa5o for an example \" \"config file. All fields except 'settings' are optional.\"", "triple-quotes. Minimal example: ''' { viewer { login } } ''' Raises: Exception:", "in response: err = response[\"errors\"][0][\"message\"] raise Exception(f\"Request failed with error '{err}'.\") else: return", "\"\"\" print(yaml.dump(dic)) def get_gql_query(settings: str, affil: str = \"OWNER\") -> str: \"\"\"Construct GraphQL", "error message. Returns: dict: The data returned by the API. \"\"\" response =", "os from typing import Any import requests import yaml if os.path.exists(\"gh_token.py\"): from gh_token", "import Any import requests import yaml if os.path.exists(\"gh_token.py\"): from gh_token import GH_TOKEN else:", "FileNotFoundError( f\"Path to config file was set as '{config_path}' but no such file", "- boolean whether or not apply settings to repos you forked as well", "= {} if config_path and not os.path.exists(config_path): raise FileNotFoundError( f\"Path to config file", "config_path: with open(config_path) as file: config = yaml.safe_load(file.read()) for path in (\".repo-config.yml\", \".repo-config.yaml\"):", "new lines. Use '\\n'.join(settings_list). affil (str, optional): Comma-separated string of author affiliations to", "for development and debugging. \"\"\" print(yaml.dump(dic)) def get_gql_query(settings: str, affil: str = \"OWNER\")", "be found. See https://git.io/JWa5o for an example \" \"config file. All fields except", "response[\"errors\"][0][\"message\"] raise Exception(f\"Request failed with error '{err}'.\") else: return response[\"data\"] def pretty_print(dic: dict)", "else: return response[\"data\"] def pretty_print(dic: dict) -> None: \"\"\"Pretty print a dictionary in", "for repos - boolean whether or not apply settings to repos you forked", "of author affiliations to their repos. One or several of OWNER, COLLABORATOR, ORGANIZATION_MEMBER.", "(str): Multi-line query string. Use triple-quotes. Minimal example: ''' { viewer { login", "config = yaml.safe_load(file.read()) for path in (\".repo-config.yml\", \".repo-config.yaml\"): if os.path.exists(path): with open(path) as", "{ login repositories(first: 100) { nodes { name nameWithOwner isArchived isFork {settings} }", "headers=headers ).json() if \"errors\" in response: err = response[\"errors\"][0][\"message\"] raise Exception(f\"Request failed with", "was set as '{config_path}' but no such file exists.\" ) elif config_path: with", "gh_token import GH_TOKEN else: GH_TOKEN = os.environ[\"GH_TOKEN\"] headers = {\"Authorization\": f\"token {GH_TOKEN}\"} def", "{settings} } } organizations(first: 100) { nodes { login repositories(first: 100) { nodes", "raise FileNotFoundError( f\"Path to config file was set as '{config_path}' but no such", "no such file exists.\" ) elif config_path: with open(config_path) as file: config =", "Names of repo settings according to the GraphQL API, separated by new lines.", "nodes { name nameWithOwner isArchived isFork {settings} } } organizations(first: 100) { nodes", ").replace( \"{affil}\", affil ) def load_config(config_path: str = None) -> tuple[dict[str, Any], list[str],", "repos. One or several of OWNER, COLLABORATOR, ORGANIZATION_MEMBER. Defaults to \"OWNER\". Returns: str:", "Any import requests import yaml if os.path.exists(\"gh_token.py\"): from gh_token import GH_TOKEN else: GH_TOKEN", "development and debugging. \"\"\" print(yaml.dump(dic)) def get_gql_query(settings: str, affil: str = \"OWNER\") ->", "to \"OWNER\". Returns: str: GraphQL query. \"\"\" return \"\"\"{ viewer { repositories(first: 100,", "settings according to the GraphQL API, separated by new lines. Use '\\n'.join(settings_list). affil", "{ name nameWithOwner isArchived isFork {settings} } } } } } }\"\"\".replace( \"{settings}\",", "nameWithOwner isArchived isFork {settings} } } organizations(first: 100) { nodes { login repositories(first:", "orgs = config[\"orgs\"] or [] skipForks = config[\"skipForks\"] or True return settings, orgs,", "API, separated by new lines. Use '\\n'.join(settings_list). affil (str, optional): Comma-separated string of", "{ nodes { name nameWithOwner isArchived isFork {settings} } } } } }", "= requests.post( \"https://api.github.com/graphql\", json={\"query\": query}, headers=headers ).json() if \"errors\" in response: err =", "def get_gql_query(settings: str, affil: str = \"OWNER\") -> str: \"\"\"Construct GraphQL query from", "not apply settings to repos you forked as well \"\"\" config = {}", "query_gh_gpl_api(query: str) -> dict: \"\"\"Query the GitHub GraphQL API. Args: query (str): Multi-line", "\".repo-config.yaml\"): if os.path.exists(path): with open(path) as file: config = yaml.safe_load(file.read()) if config ==", "affil ) def load_config(config_path: str = None) -> tuple[dict[str, Any], list[str], bool]: \"\"\"Load", "file. All fields except 'settings' are optional.\" ) settings = config[\"settings\"] orgs =", "except 'settings' are optional.\" ) settings = config[\"settings\"] orgs = config[\"orgs\"] or []", "as '{config_path}' but no such file exists.\" ) elif config_path: with open(config_path) as", "See https://git.io/JWa5o for an example \" \"config file. All fields except 'settings' are", "Exception(f\"Request failed with error '{err}'.\") else: return response[\"data\"] def pretty_print(dic: dict) -> None:", "Args: query (str): Multi-line query string. Use triple-quotes. Minimal example: ''' { viewer", "\"https://api.github.com/graphql\", json={\"query\": query}, headers=headers ).json() if \"errors\" in response: err = response[\"errors\"][0][\"message\"] raise", "yaml.safe_load(file.read()) if config == {}: raise ValueError( \"No config file could be found.", "nodes { name nameWithOwner isArchived isFork {settings} } } } } } }\"\"\".replace(", "query. \"\"\" return \"\"\"{ viewer { repositories(first: 100, affiliations: [{affil}]) { nodes {", "\"{settings}\", settings ).replace( \"{affil}\", affil ) def load_config(config_path: str = None) -> tuple[dict[str,", "= config[\"orgs\"] or [] skipForks = config[\"skipForks\"] or True return settings, orgs, skipForks", "in YAML format. Useful for development and debugging. \"\"\" print(yaml.dump(dic)) def get_gql_query(settings: str,", "Dictionary of GitHub settings to apply to all your repos - list of", "One or several of OWNER, COLLABORATOR, ORGANIZATION_MEMBER. Defaults to \"OWNER\". Returns: str: GraphQL", "response[\"data\"] def pretty_print(dic: dict) -> None: \"\"\"Pretty print a dictionary in YAML format.", "organizations(first: 100) { nodes { login repositories(first: 100) { nodes { name nameWithOwner", "Returns: str: GraphQL query. \"\"\" return \"\"\"{ viewer { repositories(first: 100, affiliations: [{affil}])", "{}: raise ValueError( \"No config file could be found. See https://git.io/JWa5o for an", "return \"\"\"{ viewer { repositories(first: 100, affiliations: [{affil}]) { nodes { name nameWithOwner", "{ viewer { login } } ''' Raises: Exception: If the query returned", "query (str): Multi-line query string. Use triple-quotes. Minimal example: ''' { viewer {", "\"\"\"Pretty print a dictionary in YAML format. Useful for development and debugging. \"\"\"", "= response[\"errors\"][0][\"message\"] raise Exception(f\"Request failed with error '{err}'.\") else: return response[\"data\"] def pretty_print(dic:", "} } ''' Raises: Exception: If the query returned an error message. Returns:", "tuple[dict[str, Any], list[str], bool]: \"\"\"Load .repo-config.(yml|yaml). Returns: tuple[dict[str, Any], list[str], bool]: - Dictionary", "headers = {\"Authorization\": f\"token {GH_TOKEN}\"} def query_gh_gpl_api(query: str) -> dict: \"\"\"Query the GitHub", "viewer { repositories(first: 100, affiliations: [{affil}]) { nodes { name nameWithOwner isArchived isFork", "apply to all your repos - list of additional logins of your GitHub", "elif config_path: with open(config_path) as file: config = yaml.safe_load(file.read()) for path in (\".repo-config.yml\",", "bool]: \"\"\"Load .repo-config.(yml|yaml). Returns: tuple[dict[str, Any], list[str], bool]: - Dictionary of GitHub settings", "\"\"\" response = requests.post( \"https://api.github.com/graphql\", json={\"query\": query}, headers=headers ).json() if \"errors\" in response:", "\"\"\" config = {} if config_path and not os.path.exists(config_path): raise FileNotFoundError( f\"Path to", "\"\"\"Construct GraphQL query from settings list. Args: settings (str): Names of repo settings", "to the GraphQL API, separated by new lines. Use '\\n'.join(settings_list). affil (str, optional):", "GitHub GraphQL API. Args: query (str): Multi-line query string. Use triple-quotes. Minimal example:", "All fields except 'settings' are optional.\" ) settings = config[\"settings\"] orgs = config[\"orgs\"]", "Returns: dict: The data returned by the API. \"\"\" response = requests.post( \"https://api.github.com/graphql\",", "= yaml.safe_load(file.read()) if config == {}: raise ValueError( \"No config file could be", "str, affil: str = \"OWNER\") -> str: \"\"\"Construct GraphQL query from settings list.", "API. Args: query (str): Multi-line query string. Use triple-quotes. Minimal example: ''' {", "debugging. \"\"\" print(yaml.dump(dic)) def get_gql_query(settings: str, affil: str = \"OWNER\") -> str: \"\"\"Construct", "raise Exception(f\"Request failed with error '{err}'.\") else: return response[\"data\"] def pretty_print(dic: dict) ->", "found. See https://git.io/JWa5o for an example \" \"config file. All fields except 'settings'", "-> str: \"\"\"Construct GraphQL query from settings list. Args: settings (str): Names of", "{ name nameWithOwner isArchived isFork {settings} } } organizations(first: 100) { nodes {", "to apply to all your repos - list of additional logins of your", "Exception: If the query returned an error message. Returns: dict: The data returned", "= yaml.safe_load(file.read()) for path in (\".repo-config.yml\", \".repo-config.yaml\"): if os.path.exists(path): with open(path) as file:", "'{config_path}' but no such file exists.\" ) elif config_path: with open(config_path) as file:", "Comma-separated string of author affiliations to their repos. One or several of OWNER,", "to their repos. One or several of OWNER, COLLABORATOR, ORGANIZATION_MEMBER. Defaults to \"OWNER\".", "of additional logins of your GitHub organizations to query for repos - boolean", "\"OWNER\") -> str: \"\"\"Construct GraphQL query from settings list. Args: settings (str): Names", "if \"errors\" in response: err = response[\"errors\"][0][\"message\"] raise Exception(f\"Request failed with error '{err}'.\")", "} organizations(first: 100) { nodes { login repositories(first: 100) { nodes { name", "GH_TOKEN else: GH_TOKEN = os.environ[\"GH_TOKEN\"] headers = {\"Authorization\": f\"token {GH_TOKEN}\"} def query_gh_gpl_api(query: str)", "lines. Use '\\n'.join(settings_list). affil (str, optional): Comma-separated string of author affiliations to their", "str: GraphQL query. \"\"\" return \"\"\"{ viewer { repositories(first: 100, affiliations: [{affil}]) {", "such file exists.\" ) elif config_path: with open(config_path) as file: config = yaml.safe_load(file.read())", "of GitHub settings to apply to all your repos - list of additional", "GitHub organizations to query for repos - boolean whether or not apply settings", "import os from typing import Any import requests import yaml if os.path.exists(\"gh_token.py\"): from", "config = yaml.safe_load(file.read()) if config == {}: raise ValueError( \"No config file could", "the GraphQL API, separated by new lines. Use '\\n'.join(settings_list). affil (str, optional): Comma-separated", "name nameWithOwner isArchived isFork {settings} } } } } } }\"\"\".replace( \"{settings}\", settings", "repositories(first: 100, affiliations: [{affil}]) { nodes { name nameWithOwner isArchived isFork {settings} }", "} } } }\"\"\".replace( \"{settings}\", settings ).replace( \"{affil}\", affil ) def load_config(config_path: str", "repositories(first: 100) { nodes { name nameWithOwner isArchived isFork {settings} } } }", "raise ValueError( \"No config file could be found. See https://git.io/JWa5o for an example", "example \" \"config file. All fields except 'settings' are optional.\" ) settings =", "-> None: \"\"\"Pretty print a dictionary in YAML format. Useful for development and", "to config file was set as '{config_path}' but no such file exists.\" )", "(str): Names of repo settings according to the GraphQL API, separated by new", "- list of additional logins of your GitHub organizations to query for repos", "\"OWNER\". Returns: str: GraphQL query. \"\"\" return \"\"\"{ viewer { repositories(first: 100, affiliations:", "list[str], bool]: - Dictionary of GitHub settings to apply to all your repos", "COLLABORATOR, ORGANIZATION_MEMBER. Defaults to \"OWNER\". Returns: str: GraphQL query. \"\"\" return \"\"\"{ viewer", "organizations to query for repos - boolean whether or not apply settings to", "ValueError( \"No config file could be found. See https://git.io/JWa5o for an example \"", "} ''' Raises: Exception: If the query returned an error message. Returns: dict:", "list[str], bool]: \"\"\"Load .repo-config.(yml|yaml). Returns: tuple[dict[str, Any], list[str], bool]: - Dictionary of GitHub", "config = {} if config_path and not os.path.exists(config_path): raise FileNotFoundError( f\"Path to config", "config file could be found. See https://git.io/JWa5o for an example \" \"config file.", "open(path) as file: config = yaml.safe_load(file.read()) if config == {}: raise ValueError( \"No", "err = response[\"errors\"][0][\"message\"] raise Exception(f\"Request failed with error '{err}'.\") else: return response[\"data\"] def", "file: config = yaml.safe_load(file.read()) if config == {}: raise ValueError( \"No config file", "or not apply settings to repos you forked as well \"\"\" config =", "import GH_TOKEN else: GH_TOKEN = os.environ[\"GH_TOKEN\"] headers = {\"Authorization\": f\"token {GH_TOKEN}\"} def query_gh_gpl_api(query:", "isArchived isFork {settings} } } } } } }\"\"\".replace( \"{settings}\", settings ).replace( \"{affil}\",", "100) { nodes { name nameWithOwner isArchived isFork {settings} } } } }", "if config == {}: raise ValueError( \"No config file could be found. See", "[{affil}]) { nodes { name nameWithOwner isArchived isFork {settings} } } organizations(first: 100)", "os.path.exists(\"gh_token.py\"): from gh_token import GH_TOKEN else: GH_TOKEN = os.environ[\"GH_TOKEN\"] headers = {\"Authorization\": f\"token", "repos - list of additional logins of your GitHub organizations to query for", "all your repos - list of additional logins of your GitHub organizations to", "str: \"\"\"Construct GraphQL query from settings list. Args: settings (str): Names of repo", "of repo settings according to the GraphQL API, separated by new lines. Use", "query for repos - boolean whether or not apply settings to repos you", "affiliations to their repos. One or several of OWNER, COLLABORATOR, ORGANIZATION_MEMBER. Defaults to", "pretty_print(dic: dict) -> None: \"\"\"Pretty print a dictionary in YAML format. Useful for", "from settings list. Args: settings (str): Names of repo settings according to the", "logins of your GitHub organizations to query for repos - boolean whether or", "could be found. See https://git.io/JWa5o for an example \" \"config file. All fields", "'settings' are optional.\" ) settings = config[\"settings\"] orgs = config[\"orgs\"] or [] skipForks", "settings = config[\"settings\"] orgs = config[\"orgs\"] or [] skipForks = config[\"skipForks\"] or True", "get_gql_query(settings: str, affil: str = \"OWNER\") -> str: \"\"\"Construct GraphQL query from settings", "None) -> tuple[dict[str, Any], list[str], bool]: \"\"\"Load .repo-config.(yml|yaml). Returns: tuple[dict[str, Any], list[str], bool]:", "a dictionary in YAML format. Useful for development and debugging. \"\"\" print(yaml.dump(dic)) def", "from typing import Any import requests import yaml if os.path.exists(\"gh_token.py\"): from gh_token import", "returned an error message. Returns: dict: The data returned by the API. \"\"\"", "Multi-line query string. Use triple-quotes. Minimal example: ''' { viewer { login }", "string of author affiliations to their repos. One or several of OWNER, COLLABORATOR,", ").json() if \"errors\" in response: err = response[\"errors\"][0][\"message\"] raise Exception(f\"Request failed with error", "yaml.safe_load(file.read()) for path in (\".repo-config.yml\", \".repo-config.yaml\"): if os.path.exists(path): with open(path) as file: config", "an example \" \"config file. All fields except 'settings' are optional.\" ) settings", "} } } } } }\"\"\".replace( \"{settings}\", settings ).replace( \"{affil}\", affil ) def", "''' { viewer { login } } ''' Raises: Exception: If the query", "else: GH_TOKEN = os.environ[\"GH_TOKEN\"] headers = {\"Authorization\": f\"token {GH_TOKEN}\"} def query_gh_gpl_api(query: str) ->", "f\"token {GH_TOKEN}\"} def query_gh_gpl_api(query: str) -> dict: \"\"\"Query the GitHub GraphQL API. Args:", "with open(path) as file: config = yaml.safe_load(file.read()) if config == {}: raise ValueError(", "settings (str): Names of repo settings according to the GraphQL API, separated by", "file could be found. See https://git.io/JWa5o for an example \" \"config file. All", "or several of OWNER, COLLABORATOR, ORGANIZATION_MEMBER. Defaults to \"OWNER\". Returns: str: GraphQL query.", "you forked as well \"\"\" config = {} if config_path and not os.path.exists(config_path):", "\" \"config file. All fields except 'settings' are optional.\" ) settings = config[\"settings\"]", "file: config = yaml.safe_load(file.read()) for path in (\".repo-config.yml\", \".repo-config.yaml\"): if os.path.exists(path): with open(path)", "GraphQL query. \"\"\" return \"\"\"{ viewer { repositories(first: 100, affiliations: [{affil}]) { nodes", "an error message. Returns: dict: The data returned by the API. \"\"\" response", "if config_path and not os.path.exists(config_path): raise FileNotFoundError( f\"Path to config file was set", "response: err = response[\"errors\"][0][\"message\"] raise Exception(f\"Request failed with error '{err}'.\") else: return response[\"data\"]", "if os.path.exists(path): with open(path) as file: config = yaml.safe_load(file.read()) if config == {}:", "f\"Path to config file was set as '{config_path}' but no such file exists.\"", "dict: The data returned by the API. \"\"\" response = requests.post( \"https://api.github.com/graphql\", json={\"query\":", "fields except 'settings' are optional.\" ) settings = config[\"settings\"] orgs = config[\"orgs\"] or", "\"{affil}\", affil ) def load_config(config_path: str = None) -> tuple[dict[str, Any], list[str], bool]:", "json={\"query\": query}, headers=headers ).json() if \"errors\" in response: err = response[\"errors\"][0][\"message\"] raise Exception(f\"Request", "= None) -> tuple[dict[str, Any], list[str], bool]: \"\"\"Load .repo-config.(yml|yaml). Returns: tuple[dict[str, Any], list[str],", "print(yaml.dump(dic)) def get_gql_query(settings: str, affil: str = \"OWNER\") -> str: \"\"\"Construct GraphQL query", "100, affiliations: [{affil}]) { nodes { name nameWithOwner isArchived isFork {settings} } }", "{} if config_path and not os.path.exists(config_path): raise FileNotFoundError( f\"Path to config file was", ") settings = config[\"settings\"] orgs = config[\"orgs\"] or [] skipForks = config[\"skipForks\"] or", "-> tuple[dict[str, Any], list[str], bool]: \"\"\"Load .repo-config.(yml|yaml). Returns: tuple[dict[str, Any], list[str], bool]: -", "as well \"\"\" config = {} if config_path and not os.path.exists(config_path): raise FileNotFoundError(", "(str, optional): Comma-separated string of author affiliations to their repos. One or several", "example: ''' { viewer { login } } ''' Raises: Exception: If the", "of your GitHub organizations to query for repos - boolean whether or not", "list. Args: settings (str): Names of repo settings according to the GraphQL API,", ") elif config_path: with open(config_path) as file: config = yaml.safe_load(file.read()) for path in", "{settings} } } } } } }\"\"\".replace( \"{settings}\", settings ).replace( \"{affil}\", affil )", "config_path and not os.path.exists(config_path): raise FileNotFoundError( f\"Path to config file was set as", "forked as well \"\"\" config = {} if config_path and not os.path.exists(config_path): raise", "Raises: Exception: If the query returned an error message. Returns: dict: The data", "login repositories(first: 100) { nodes { name nameWithOwner isArchived isFork {settings} } }", "repos - boolean whether or not apply settings to repos you forked as", "separated by new lines. Use '\\n'.join(settings_list). affil (str, optional): Comma-separated string of author", "dict: \"\"\"Query the GitHub GraphQL API. Args: query (str): Multi-line query string. Use", "viewer { login } } ''' Raises: Exception: If the query returned an", "and not os.path.exists(config_path): raise FileNotFoundError( f\"Path to config file was set as '{config_path}'", "format. Useful for development and debugging. \"\"\" print(yaml.dump(dic)) def get_gql_query(settings: str, affil: str", "failed with error '{err}'.\") else: return response[\"data\"] def pretty_print(dic: dict) -> None: \"\"\"Pretty", "of OWNER, COLLABORATOR, ORGANIZATION_MEMBER. Defaults to \"OWNER\". Returns: str: GraphQL query. \"\"\" return", "{ repositories(first: 100, affiliations: [{affil}]) { nodes { name nameWithOwner isArchived isFork {settings}", "\"\"\" return \"\"\"{ viewer { repositories(first: 100, affiliations: [{affil}]) { nodes { name", "\"\"\"Query the GitHub GraphQL API. Args: query (str): Multi-line query string. Use triple-quotes.", "list of additional logins of your GitHub organizations to query for repos -", "{\"Authorization\": f\"token {GH_TOKEN}\"} def query_gh_gpl_api(query: str) -> dict: \"\"\"Query the GitHub GraphQL API.", "config == {}: raise ValueError( \"No config file could be found. See https://git.io/JWa5o", "open(config_path) as file: config = yaml.safe_load(file.read()) for path in (\".repo-config.yml\", \".repo-config.yaml\"): if os.path.exists(path):", "'\\n'.join(settings_list). affil (str, optional): Comma-separated string of author affiliations to their repos. One", "file exists.\" ) elif config_path: with open(config_path) as file: config = yaml.safe_load(file.read()) for", "\"errors\" in response: err = response[\"errors\"][0][\"message\"] raise Exception(f\"Request failed with error '{err}'.\") else:", "OWNER, COLLABORATOR, ORGANIZATION_MEMBER. Defaults to \"OWNER\". Returns: str: GraphQL query. \"\"\" return \"\"\"{", "\"config file. All fields except 'settings' are optional.\" ) settings = config[\"settings\"] orgs", "\"No config file could be found. See https://git.io/JWa5o for an example \" \"config", "API. \"\"\" response = requests.post( \"https://api.github.com/graphql\", json={\"query\": query}, headers=headers ).json() if \"errors\" in", "your repos - list of additional logins of your GitHub organizations to query", "name nameWithOwner isArchived isFork {settings} } } organizations(first: 100) { nodes { login", "string. Use triple-quotes. Minimal example: ''' { viewer { login } } '''", "isFork {settings} } } } } } }\"\"\".replace( \"{settings}\", settings ).replace( \"{affil}\", affil", "Useful for development and debugging. \"\"\" print(yaml.dump(dic)) def get_gql_query(settings: str, affil: str =", "def load_config(config_path: str = None) -> tuple[dict[str, Any], list[str], bool]: \"\"\"Load .repo-config.(yml|yaml). Returns:", "return response[\"data\"] def pretty_print(dic: dict) -> None: \"\"\"Pretty print a dictionary in YAML", "None: \"\"\"Pretty print a dictionary in YAML format. Useful for development and debugging.", "} } organizations(first: 100) { nodes { login repositories(first: 100) { nodes {", "GraphQL API. Args: query (str): Multi-line query string. Use triple-quotes. Minimal example: '''", "Returns: tuple[dict[str, Any], list[str], bool]: - Dictionary of GitHub settings to apply to", "{ nodes { name nameWithOwner isArchived isFork {settings} } } organizations(first: 100) {", "optional): Comma-separated string of author affiliations to their repos. One or several of", "def query_gh_gpl_api(query: str) -> dict: \"\"\"Query the GitHub GraphQL API. Args: query (str):", "GitHub settings to apply to all your repos - list of additional logins", "config file was set as '{config_path}' but no such file exists.\" ) elif", "and debugging. \"\"\" print(yaml.dump(dic)) def get_gql_query(settings: str, affil: str = \"OWNER\") -> str:", "Minimal example: ''' { viewer { login } } ''' Raises: Exception: If", "GraphQL query from settings list. Args: settings (str): Names of repo settings according", "but no such file exists.\" ) elif config_path: with open(config_path) as file: config", "repo settings according to the GraphQL API, separated by new lines. Use '\\n'.join(settings_list).", "- Dictionary of GitHub settings to apply to all your repos - list", "'{err}'.\") else: return response[\"data\"] def pretty_print(dic: dict) -> None: \"\"\"Pretty print a dictionary", "query string. Use triple-quotes. Minimal example: ''' { viewer { login } }", "isFork {settings} } } organizations(first: 100) { nodes { login repositories(first: 100) {", "GraphQL API, separated by new lines. Use '\\n'.join(settings_list). affil (str, optional): Comma-separated string", "their repos. One or several of OWNER, COLLABORATOR, ORGANIZATION_MEMBER. Defaults to \"OWNER\". Returns:", "response = requests.post( \"https://api.github.com/graphql\", json={\"query\": query}, headers=headers ).json() if \"errors\" in response: err", "str = None) -> tuple[dict[str, Any], list[str], bool]: \"\"\"Load .repo-config.(yml|yaml). Returns: tuple[dict[str, Any],", "= config[\"settings\"] orgs = config[\"orgs\"] or [] skipForks = config[\"skipForks\"] or True return", "{ login } } ''' Raises: Exception: If the query returned an error", "settings list. Args: settings (str): Names of repo settings according to the GraphQL", "The data returned by the API. \"\"\" response = requests.post( \"https://api.github.com/graphql\", json={\"query\": query},", "the API. \"\"\" response = requests.post( \"https://api.github.com/graphql\", json={\"query\": query}, headers=headers ).json() if \"errors\"", "the query returned an error message. Returns: dict: The data returned by the", "several of OWNER, COLLABORATOR, ORGANIZATION_MEMBER. Defaults to \"OWNER\". Returns: str: GraphQL query. \"\"\"", "to all your repos - list of additional logins of your GitHub organizations", "nameWithOwner isArchived isFork {settings} } } } } } }\"\"\".replace( \"{settings}\", settings ).replace(", "as file: config = yaml.safe_load(file.read()) if config == {}: raise ValueError( \"No config", "dict) -> None: \"\"\"Pretty print a dictionary in YAML format. Useful for development", "to repos you forked as well \"\"\" config = {} if config_path and", "with error '{err}'.\") else: return response[\"data\"] def pretty_print(dic: dict) -> None: \"\"\"Pretty print", "typing import Any import requests import yaml if os.path.exists(\"gh_token.py\"): from gh_token import GH_TOKEN", "Use triple-quotes. Minimal example: ''' { viewer { login } } ''' Raises:", "in (\".repo-config.yml\", \".repo-config.yaml\"): if os.path.exists(path): with open(path) as file: config = yaml.safe_load(file.read()) if", "your GitHub organizations to query for repos - boolean whether or not apply", "Args: settings (str): Names of repo settings according to the GraphQL API, separated", "for path in (\".repo-config.yml\", \".repo-config.yaml\"): if os.path.exists(path): with open(path) as file: config =", "affil: str = \"OWNER\") -> str: \"\"\"Construct GraphQL query from settings list. Args:", "\"\"\"Load .repo-config.(yml|yaml). Returns: tuple[dict[str, Any], list[str], bool]: - Dictionary of GitHub settings to", "by the API. \"\"\" response = requests.post( \"https://api.github.com/graphql\", json={\"query\": query}, headers=headers ).json() if", "YAML format. Useful for development and debugging. \"\"\" print(yaml.dump(dic)) def get_gql_query(settings: str, affil:", "path in (\".repo-config.yml\", \".repo-config.yaml\"): if os.path.exists(path): with open(path) as file: config = yaml.safe_load(file.read())", "os.environ[\"GH_TOKEN\"] headers = {\"Authorization\": f\"token {GH_TOKEN}\"} def query_gh_gpl_api(query: str) -> dict: \"\"\"Query the", "boolean whether or not apply settings to repos you forked as well \"\"\"", "apply settings to repos you forked as well \"\"\" config = {} if", "= os.environ[\"GH_TOKEN\"] headers = {\"Authorization\": f\"token {GH_TOKEN}\"} def query_gh_gpl_api(query: str) -> dict: \"\"\"Query", "import requests import yaml if os.path.exists(\"gh_token.py\"): from gh_token import GH_TOKEN else: GH_TOKEN =", "Any], list[str], bool]: \"\"\"Load .repo-config.(yml|yaml). Returns: tuple[dict[str, Any], list[str], bool]: - Dictionary of", "query}, headers=headers ).json() if \"errors\" in response: err = response[\"errors\"][0][\"message\"] raise Exception(f\"Request failed", "str = \"OWNER\") -> str: \"\"\"Construct GraphQL query from settings list. Args: settings", "}\"\"\".replace( \"{settings}\", settings ).replace( \"{affil}\", affil ) def load_config(config_path: str = None) ->", "whether or not apply settings to repos you forked as well \"\"\" config", "\"\"\"{ viewer { repositories(first: 100, affiliations: [{affil}]) { nodes { name nameWithOwner isArchived", "-> dict: \"\"\"Query the GitHub GraphQL API. Args: query (str): Multi-line query string.", "query returned an error message. Returns: dict: The data returned by the API.", "{ nodes { login repositories(first: 100) { nodes { name nameWithOwner isArchived isFork", "(\".repo-config.yml\", \".repo-config.yaml\"): if os.path.exists(path): with open(path) as file: config = yaml.safe_load(file.read()) if config", "isArchived isFork {settings} } } organizations(first: 100) { nodes { login repositories(first: 100)", "settings to repos you forked as well \"\"\" config = {} if config_path", "set as '{config_path}' but no such file exists.\" ) elif config_path: with open(config_path)", "the GitHub GraphQL API. Args: query (str): Multi-line query string. Use triple-quotes. Minimal", "If the query returned an error message. Returns: dict: The data returned by", "author affiliations to their repos. One or several of OWNER, COLLABORATOR, ORGANIZATION_MEMBER. Defaults", "ORGANIZATION_MEMBER. Defaults to \"OWNER\". Returns: str: GraphQL query. \"\"\" return \"\"\"{ viewer {", "message. Returns: dict: The data returned by the API. \"\"\" response = requests.post(", "bool]: - Dictionary of GitHub settings to apply to all your repos -", "as file: config = yaml.safe_load(file.read()) for path in (\".repo-config.yml\", \".repo-config.yaml\"): if os.path.exists(path): with", ".repo-config.(yml|yaml). Returns: tuple[dict[str, Any], list[str], bool]: - Dictionary of GitHub settings to apply", "settings to apply to all your repos - list of additional logins of" ]
[ "= 0 while i < len(iAxis): ans = noUniqHittableFeatures*(1-exp(-iAxis[i]/noUniqHittableFeatures)) uniqueGenesHit.append(ans) i += 1", "+= 1 return dataDict # ------------------------------------------------------------------------------------------------ # # ------------------------------------------------------------------------------------------------ # def BuildEssentialityDictThatIsKeyedByLocusTag(dataArray): #", "mean and standard deviation of the number of unique features hit at each", "in hittableFeatures: featureHitCountDict[feature] = 0 featuresHitAtLeastOnce = 0 featuresHitAtLeastOnceVersusMutant = [] i =", "averageFeatureHitCount, sdFeatureHitCount, featureHitCountUpperBound, \\ featureHitCountLowerBound, noUniqHittableFeatures ] # ------------------------------------------------------------------------------------------------ # # ------------------------------------------------------------------------------------------------ #", "sdFeatureHitCount = [] featureHitCountUpperBound = [] featureHitCountLowerBound = [] # Calculate the mean", "if headers[i] != 'sysName': headersWithoutSysName.append(headers[i]) i += 1 dataDict = {} for line", "# ------------------------------------------------------------------------------------------------ # def BuildEssentialityDictThatIsKeyedByLocusTag(dataArray): # Not yet ready for prime time #", "= ET.parse(transposonCoordToFeatureDictFile) root = tree.getroot() importedCoordsList = root.findall('coord') for coord in importedCoordsList: coordinate", "= 0 featuresHitAtLeastOnceVersusMutant = [] i = 1 while i <= maxMutants: randomCoord", "import xml.etree.ElementTree as ET import pdb transposonCoordToFeatureDictFileHandle = open(transposonCoordToFeatureDictFile, 'r') transposonCoordToFeatureDict = {}", "int(choice(hittableTransposonCoords)) featuresToBeHit = transposonCoordToFeatureDict[randomCoord] isAnyFeatureIncludingThisCoordNotHittable = False for featureToBeHit in featuresToBeHit: if featureToBeHit", "featuresHitAtLeastOnceVersusMutant # ------------------------------------------------------------------------------------------------ # # ------------------------------------------------------------------------------------------------ # def SimulateMultiplePickings(transposonCoordToFeatureDictFile, numberOfTrials, maxMutants): from scipy", "from numpy import mean, std, arange import xml.etree.ElementTree as ET import pdb transposonCoordToFeatureDictFileHandle", "for line in dataArray: dataDict[line['sysName']] = {} for header in headersWithoutSysName: dataDict[line['sysName']][header] =", "# Not yet ready for prime time # Build essentiality data dict that", "of mean and standard # deviation of number of hits picked i =", "------------------------------------------------------------------------------------------------ # def FindATandTAPositions2(genomeFile, format='genbank'): # Does the same thing as FindATandTAPositions but", "= line[header] return dataDict # ------------------------------------------------------------------------------------------------ # # ------------------------------------------------------------------------------------------------ # def BuildCDSDictThatIsKeyedByLocusTag(cdsFeatures): #", "of the number of unique features hit at each pick # from the", "len(featuresHitAtLeastOnceTrialsArray[0]): collectedFeatureHitCountArray.append([]) i += 1 i = 0 while i < len(collectedFeatureHitCountArray): j", "dataDict[line['sysName']][header] = line[header] return dataDict # ------------------------------------------------------------------------------------------------ # # ------------------------------------------------------------------------------------------------ # def BuildCDSDictThatIsKeyedByLocusTag(cdsFeatures):", "= atRegex.match(sequence[i:i+2]) if atMatch != None: ATandTAPositions.append(i+1) i += 1 return [ATandTAPositions, sequence]", "transposonCoordToFeatureDict[coordinate].append(locusName) if essentiality == 'Dispensable': hittableTransposonCoords.append(coordinate) hittableFeatures.append(locusName) elif essentiality == 'Essential': notHittableFeatures.append(locusName) notHittableTransposonCoords.append(coordinate)", "# Build essentiality data dict that is keyed by locus tag essentialityDict =", "i = 0 while i < len(collectedFeatureHitCountArray): j = 0 while j <", "import pdb nonEssentialGeneCount = len(hittableFeatures) featureHitCountDict = {} for feature in hittableFeatures: featureHitCountDict[feature]", "that is keyed by locus tag essentialityDict = {} locusTags = [] headersWithoutSysName", "{} for line in dataArray: dataDict[line['sysName']] = {} for header in headersWithoutSysName: dataDict[line['sysName']][header]", "choice import pdb nonEssentialGeneCount = len(hittableFeatures) featureHitCountDict = {} for feature in hittableFeatures:", "maxMutants): from numpy.random import choice import pdb nonEssentialGeneCount = len(hittableFeatures) featureHitCountDict = {}", "sdFeatureHitCount[i]) featureHitCountLowerBound.append(averageFeatureHitCount[i] - sdFeatureHitCount[i]) i += 1 # Prepare an x axis (the", "< len(cdsFeatures): locusTag = cdsFeatures[i].tagDict['locus_tag'][0] cdsDict[locusTag] = cdsFeatures[i] i += 1 return cdsDict", "== 'Dispensable': hittableTransposonCoords.append(coordinate) hittableFeatures.append(locusName) elif essentiality == 'Essential': notHittableFeatures.append(locusName) notHittableTransposonCoords.append(coordinate) else: otherFeatures.append(locusName) print(locusName)", "Not yet ready for prime time # Import a defined format essentiality data", "'Essential': notHittableFeatures.append(locusName) notHittableTransposonCoords.append(coordinate) else: otherFeatures.append(locusName) print(locusName) hittableFeatures = unique(hittableFeatures) hittableTransposonCoords = unique(hittableTransposonCoords) notHittableFeatures", "- 1: atMatch = atRegex.match(sequence[i:i+2]) if atMatch != None: ATandTAPositions.append(i+1) i += 1", "open(transposonCoordToFeatureDictFile, 'r') transposonCoordToFeatureDict = {} hittableFeatures = [] hittableTransposonCoords = [] notHittableTransposonCoords =", "< len(collectedFeatureHitCountArray): j = 0 while j < len(featuresHitAtLeastOnceTrialsArray): collectedFeatureHitCountArray[i].append(featuresHitAtLeastOnceTrialsArray[j][i]) j += 1", "= [] otherFeatures = [] tree = ET.parse(transposonCoordToFeatureDictFile) root = tree.getroot() importedCoordsList =", "[] sdFeatureHitCount = [] featureHitCountUpperBound = [] featureHitCountLowerBound = [] # Calculate the", "# Assumes that data is in the format: locus tag, gene name, essentiality", "= arange(1, maxMutants+1, 1) noUniqHittableFeatures = len(hittableFeatures) return [iAxis, averageFeatureHitCount, sdFeatureHitCount, featureHitCountUpperBound, \\", "hittableFeatures.append(locusName) elif essentiality == 'Essential': notHittableFeatures.append(locusName) notHittableTransposonCoords.append(coordinate) else: otherFeatures.append(locusName) print(locusName) hittableFeatures = unique(hittableFeatures)", "= 0 while i < len(headers): if headers[i] != 'sysName': headersWithoutSysName.append(headers[i]) i +=", "mean, std, arange import xml.etree.ElementTree as ET import pdb transposonCoordToFeatureDictFileHandle = open(transposonCoordToFeatureDictFile, 'r')", "< len(sequence) - 1: atMatch = atRegex.match(sequence[i:i+2]) if atMatch != None: ATandTAPositions.append(i+1) i", "i += 1 return featuresHitAtLeastOnceVersusMutant # ------------------------------------------------------------------------------------------------ # # ------------------------------------------------------------------------------------------------ # def SimulateMultiplePickings(transposonCoordToFeatureDictFile,", "[] i = 1 while i <= maxMutants: randomCoord = int(choice(hittableTransposonCoords)) featuresToBeHit =", "while i < len(data): # Ignore comment lines if data[i][0] != '#': dataLine", "open(fileName, 'r') data = fileHandle.readlines() dataDict = {} i = 0 while i", "SimulatePicking(hittableFeatures, notHittableFeatures, hittableTransposonCoords, \\ transposonCoordToFeatureDict, maxMutants): from numpy.random import choice import pdb nonEssentialGeneCount", "= noUniqHittableFeatures*(1-exp(-iAxis[i]/noUniqHittableFeatures)) uniqueGenesHit.append(ans) i += 1 uniqueGenesHit = array(uniqueGenesHit, float) return uniqueGenesHit #", "= True if isAnyFeatureIncludingThisCoordNotHittable == False: for featureToBeHit in featuresToBeHit: try: featureHitCountDict[featureToBeHit] +=", "------------------------------------------------------------------------------------------------ # # ------------------------------------------------------------------------------------------------ # def PoissonEstimateOfGenesHit(iAxis, noUniqHittableFeatures): from numpy import exp, array,", "coord in importedCoordsList: coordinate = int(coord.attrib['coord']) loci = coord.findall('locus') importedCoordsKeys = transposonCoordToFeatureDict.keys() if", "in featuresToBeHit: try: featureHitCountDict[featureToBeHit] += 1 except: pdb.set_trace() if featureHitCountDict[featureToBeHit] == 1: featuresHitAtLeastOnce", "= cdsFeatures[i] i += 1 return cdsDict # ------------------------------------------------------------------------------------------------ # # ------------------------------------------------------------------------------------------------ #", "------------------------------------------------------------------------------------------------ # # ------------------------------------------------------------------------------------------------ # def SimulatePicking(hittableFeatures, notHittableFeatures, hittableTransposonCoords, \\ transposonCoordToFeatureDict, maxMutants): from", "= [] i = 0 while i < numberOfTrials: featuresHitAtLeastOnceVersusMutant = \\ SimulatePicking(hittableFeatures,", "# so you only need one file format import re from pdb import", "= [] featureHitCountUpperBound = [] featureHitCountLowerBound = [] # Calculate the mean and", "# ------------------------------------------------------------------------------------------------ # # ------------------------------------------------------------------------------------------------ # def FindATandTAPositions2(genomeFile, format='genbank'): # Does the same", "def BuildEssentialityDictThatIsKeyedByLocusTag(dataArray): # Not yet ready for prime time # Build essentiality data", "0 while i < numberOfTrials: featuresHitAtLeastOnceVersusMutant = \\ SimulatePicking(hittableFeatures, notHittableFeatures, hittableTransposonCoords, \\ transposonCoordToFeatureDict,", "pick # from the trials i = 0 while i < len(collectedFeatureHitCountArray): averageFeatureHitCount.append(mean(collectedFeatureHitCountArray[i]))", "import ParseCSVLine fileHandle = open(fileName, 'r') data = fileHandle.readlines() dataDict = {} i", "# def BuildCDSDictThatIsKeyedByLocusTag(cdsFeatures): # Not yet ready for prime time i = 0", "ImportFastaSequence(genomeFile) ATandTAPositions = [] atRegex = re.compile('(at|ta)', re.IGNORECASE) # set_trace() i = 0", "'fasta': sequence = ImportFastaSequence(genomeFile) ATandTAPositions = [] atRegex = re.compile('(at|ta)', re.IGNORECASE) # set_trace()", "= root.findall('coord') for coord in importedCoordsList: coordinate = int(coord.attrib['coord']) loci = coord.findall('locus') importedCoordsKeys", "the format: locus tag, gene name, essentiality from .utils import ParseCSVLine fileHandle =", "set_trace() i = 0 while i < len(sequence) - 1: atMatch = atRegex.match(sequence[i:i+2])", "= 0 while i < len(data): # Ignore comment lines if data[i][0] !=", "{} i = 0 while i < len(data): # Ignore comment lines if", "# # ------------------------------------------------------------------------------------------------ # def BuildEssentialityDictThatIsKeyedByLocusTag(dataArray): # Not yet ready for prime time", "deviation of the number of unique features hit at each pick # from", "headersWithoutSysName = [] i = 0 while i < len(headers): if headers[i] !=", "essentiality data file # Assumes that data is in the format: locus tag,", "= open(fileName, 'r') data = fileHandle.readlines() dataDict = {} i = 0 while", "{} for header in headersWithoutSysName: dataDict[line['sysName']][header] = line[header] return dataDict # ------------------------------------------------------------------------------------------------ #", "# def FindATandTAPositions2(genomeFile, format='genbank'): # Does the same thing as FindATandTAPositions but can", "# Simulate a number of picking runs featuresHitAtLeastOnceTrialsArray = [] i = 0", "[] i = 0 while i < len(headers): if headers[i] != 'sysName': headersWithoutSysName.append(headers[i])", "if featureToBeHit in notHittableFeatures: isAnyFeatureIncludingThisCoordNotHittable = True if isAnyFeatureIncludingThisCoordNotHittable == False: for featureToBeHit", "featureHitCountUpperBound, \\ featureHitCountLowerBound, noUniqHittableFeatures ] # ------------------------------------------------------------------------------------------------ # # ------------------------------------------------------------------------------------------------ # def PoissonEstimateOfGenesHit(iAxis,", "Not yet ready for prime time # Build essentiality data dict that is", "------------------------------------------------------------------------------------------------ # # ------------------------------------------------------------------------------------------------ # def BuildEssentialityDictThatIsKeyedByLocusTag(dataArray): # Not yet ready for prime", "an x axis (the number of mutants picked) for the output iAxis =", "notHittableFeatures = unique(notHittableFeatures) otherFeatures = unique(otherFeatures) intersection = intersect1d(hittableFeatures, notHittableFeatures) # Simulate a", "Fasta file, \\ # so you only need one file format import re", "def PoissonEstimateOfGenesHit(iAxis, noUniqHittableFeatures): from numpy import exp, array, float uniqueGenesHit = [] i", "= [] notHittableTransposonCoords = [] notHittableFeatures = [] otherFeatures = [] tree =", "notHittableFeatures, hittableTransposonCoords, \\ transposonCoordToFeatureDict, maxMutants) featuresHitAtLeastOnceTrialsArray.append(featuresHitAtLeastOnceVersusMutant) i += 1 # Collect together then", "line in dataArray: dataDict[line['sysName']] = {} for header in headersWithoutSysName: dataDict[line['sysName']][header] = line[header]", "int(coord.attrib['coord']) loci = coord.findall('locus') importedCoordsKeys = transposonCoordToFeatureDict.keys() if coordinate not in importedCoordsKeys: transposonCoordToFeatureDict[coordinate]", "each pick # from the trials i = 0 while i < len(collectedFeatureHitCountArray):", "< len(featuresHitAtLeastOnceTrialsArray): collectedFeatureHitCountArray[i].append(featuresHitAtLeastOnceTrialsArray[j][i]) j += 1 i += 1 averageFeatureHitCount = [] sdFeatureHitCount", "# def SimulateMultiplePickings(transposonCoordToFeatureDictFile, numberOfTrials, maxMutants): from scipy import unique, intersect1d from numpy import", "notHittableTransposonCoords.append(coordinate) else: otherFeatures.append(locusName) print(locusName) hittableFeatures = unique(hittableFeatures) hittableTransposonCoords = unique(hittableTransposonCoords) notHittableFeatures = unique(notHittableFeatures)", "= {} i = 0 while i < len(data): # Ignore comment lines", ".utils import ParseCSVLine fileHandle = open(fileName, 'r') data = fileHandle.readlines() dataDict = {}", "1 while i <= maxMutants: randomCoord = int(choice(hittableTransposonCoords)) featuresToBeHit = transposonCoordToFeatureDict[randomCoord] isAnyFeatureIncludingThisCoordNotHittable =", "def SimulateMultiplePickings(transposonCoordToFeatureDictFile, numberOfTrials, maxMutants): from scipy import unique, intersect1d from numpy import mean,", "transposonCoordToFeatureDict[coordinate] = [] for locus in loci: locusName = locus.attrib['locus'] essentiality = locus.attrib['essentiality']", "then data from the picking runs for calculation of mean and standard #", "unique features hit at each pick # from the trials i = 0", "= ImportFastaSequence(genomeFile) ATandTAPositions = [] atRegex = re.compile('(at|ta)', re.IGNORECASE) # set_trace() i =", "= {} locusTags = [] headersWithoutSysName = [] i = 0 while i", "return [iAxis, averageFeatureHitCount, sdFeatureHitCount, featureHitCountUpperBound, \\ featureHitCountLowerBound, noUniqHittableFeatures ] # ------------------------------------------------------------------------------------------------ # #", "else: otherFeatures.append(locusName) print(locusName) hittableFeatures = unique(hittableFeatures) hittableTransposonCoords = unique(hittableTransposonCoords) notHittableFeatures = unique(notHittableFeatures) otherFeatures", "= {} for feature in hittableFeatures: featureHitCountDict[feature] = 0 featuresHitAtLeastOnce = 0 featuresHitAtLeastOnceVersusMutant", "= fileHandle.readlines() dataDict = {} i = 0 while i < len(data): #", "from pdb import set_trace if format == 'genbank': sequence = ImportGenBankSequence(genomeFile) elif format", "[] featureHitCountUpperBound = [] featureHitCountLowerBound = [] # Calculate the mean and standard", "'r') data = fileHandle.readlines() dataDict = {} i = 0 while i <", "0 featuresHitAtLeastOnce = 0 featuresHitAtLeastOnceVersusMutant = [] i = 1 while i <=", "averageFeatureHitCount.append(mean(collectedFeatureHitCountArray[i])) sdFeatureHitCount.append(std(collectedFeatureHitCountArray[i])) featureHitCountUpperBound.append(averageFeatureHitCount[i] + sdFeatureHitCount[i]) featureHitCountLowerBound.append(averageFeatureHitCount[i] - sdFeatureHitCount[i]) i += 1 # Prepare", "= array(uniqueGenesHit, float) return uniqueGenesHit # ------------------------------------------------------------------------------------------------ # # ------------------------------------------------------------------------------------------------ # def FindATandTAPositions2(genomeFile,", "one file format import re from pdb import set_trace if format == 'genbank':", "len(sequence) - 1: atMatch = atRegex.match(sequence[i:i+2]) if atMatch != None: ATandTAPositions.append(i+1) i +=", "need one file format import re from pdb import set_trace if format ==", "coord.findall('locus') importedCoordsKeys = transposonCoordToFeatureDict.keys() if coordinate not in importedCoordsKeys: transposonCoordToFeatureDict[coordinate] = [] for", "unique(hittableTransposonCoords) notHittableFeatures = unique(notHittableFeatures) otherFeatures = unique(otherFeatures) intersection = intersect1d(hittableFeatures, notHittableFeatures) # Simulate", "# Not yet ready for prime time i = 0 cdsDict = {}", "{} hittableFeatures = [] hittableTransposonCoords = [] notHittableTransposonCoords = [] notHittableFeatures = []", "featuresToBeHit: try: featureHitCountDict[featureToBeHit] += 1 except: pdb.set_trace() if featureHitCountDict[featureToBeHit] == 1: featuresHitAtLeastOnce +=", "= 0 featuresHitAtLeastOnce = 0 featuresHitAtLeastOnceVersusMutant = [] i = 1 while i", "root.findall('coord') for coord in importedCoordsList: coordinate = int(coord.attrib['coord']) loci = coord.findall('locus') importedCoordsKeys =", "= transposonCoordToFeatureDict[randomCoord] isAnyFeatureIncludingThisCoordNotHittable = False for featureToBeHit in featuresToBeHit: if featureToBeHit in notHittableFeatures:", "'genbank': sequence = ImportGenBankSequence(genomeFile) elif format == 'fasta': sequence = ImportFastaSequence(genomeFile) ATandTAPositions =", "= [] headersWithoutSysName = [] i = 0 while i < len(headers): if", "tree = ET.parse(transposonCoordToFeatureDictFile) root = tree.getroot() importedCoordsList = root.findall('coord') for coord in importedCoordsList:", "uniqueGenesHit = [] i = 0 while i < len(iAxis): ans = noUniqHittableFeatures*(1-exp(-iAxis[i]/noUniqHittableFeatures))", "# def PoissonEstimateOfGenesHit(iAxis, noUniqHittableFeatures): from numpy import exp, array, float uniqueGenesHit = []", "for featureToBeHit in featuresToBeHit: try: featureHitCountDict[featureToBeHit] += 1 except: pdb.set_trace() if featureHitCountDict[featureToBeHit] ==", "prime time # Build essentiality data dict that is keyed by locus tag", "= [] i = 1 while i <= maxMutants: randomCoord = int(choice(hittableTransposonCoords)) featuresToBeHit", "# from the trials i = 0 while i < len(collectedFeatureHitCountArray): averageFeatureHitCount.append(mean(collectedFeatureHitCountArray[i])) sdFeatureHitCount.append(std(collectedFeatureHitCountArray[i]))", "elif essentiality == 'Essential': notHittableFeatures.append(locusName) notHittableTransposonCoords.append(coordinate) else: otherFeatures.append(locusName) print(locusName) hittableFeatures = unique(hittableFeatures) hittableTransposonCoords", "------------------------------------------------------------------------------------------------ # def SimulateMultiplePickings(transposonCoordToFeatureDictFile, numberOfTrials, maxMutants): from scipy import unique, intersect1d from numpy", "otherFeatures = unique(otherFeatures) intersection = intersect1d(hittableFeatures, notHittableFeatures) # Simulate a number of picking", "notHittableFeatures, hittableTransposonCoords, \\ transposonCoordToFeatureDict, maxMutants): from numpy.random import choice import pdb nonEssentialGeneCount =", "hits picked i = 0 collectedFeatureHitCountArray = [] while i < len(featuresHitAtLeastOnceTrialsArray[0]): collectedFeatureHitCountArray.append([])", "# ------------------------------------------------------------------------------------------------ # # ------------------------------------------------------------------------------------------------ # def SimulatePicking(hittableFeatures, notHittableFeatures, hittableTransposonCoords, \\ transposonCoordToFeatureDict, maxMutants):", "yet ready for prime time i = 0 cdsDict = {} while i", "atRegex = re.compile('(at|ta)', re.IGNORECASE) # set_trace() i = 0 while i < len(sequence)", "iAxis = arange(1, maxMutants+1, 1) noUniqHittableFeatures = len(hittableFeatures) return [iAxis, averageFeatureHitCount, sdFeatureHitCount, featureHitCountUpperBound,", "featureHitCountDict[featureToBeHit] += 1 except: pdb.set_trace() if featureHitCountDict[featureToBeHit] == 1: featuresHitAtLeastOnce += 1 featuresHitAtLeastOnceVersusMutant.append(featuresHitAtLeastOnce)", "i = 0 collectedFeatureHitCountArray = [] while i < len(featuresHitAtLeastOnceTrialsArray[0]): collectedFeatureHitCountArray.append([]) i +=", "the mean and standard deviation of the number of unique features hit at", "i += 1 dataDict = {} for line in dataArray: dataDict[line['sysName']] = {}", "FindATandTAPositions2(genomeFile, format='genbank'): # Does the same thing as FindATandTAPositions but can work with", "return dataDict # ------------------------------------------------------------------------------------------------ # # ------------------------------------------------------------------------------------------------ # def BuildEssentialityDictThatIsKeyedByLocusTag(dataArray): # Not yet", "# # ------------------------------------------------------------------------------------------------ # def PoissonEstimateOfGenesHit(iAxis, noUniqHittableFeatures): from numpy import exp, array, float", "'#': dataLine = ParseCSVLine(data[i]) dataDict[dataLine[0]] = [dataLine[1], dataLine[2]] i += 1 return dataDict", "i < len(cdsFeatures): locusTag = cdsFeatures[i].tagDict['locus_tag'][0] cdsDict[locusTag] = cdsFeatures[i] i += 1 return", "for calculation of mean and standard # deviation of number of hits picked", "collectedFeatureHitCountArray = [] while i < len(featuresHitAtLeastOnceTrialsArray[0]): collectedFeatureHitCountArray.append([]) i += 1 i =", "if data[i][0] != '#': dataLine = ParseCSVLine(data[i]) dataDict[dataLine[0]] = [dataLine[1], dataLine[2]] i +=", "<= maxMutants: randomCoord = int(choice(hittableTransposonCoords)) featuresToBeHit = transposonCoordToFeatureDict[randomCoord] isAnyFeatureIncludingThisCoordNotHittable = False for featureToBeHit", "a number of picking runs featuresHitAtLeastOnceTrialsArray = [] i = 0 while i", "[iAxis, averageFeatureHitCount, sdFeatureHitCount, featureHitCountUpperBound, \\ featureHitCountLowerBound, noUniqHittableFeatures ] # ------------------------------------------------------------------------------------------------ # # ------------------------------------------------------------------------------------------------", "ready for prime time # Build essentiality data dict that is keyed by", "and standard deviation of the number of unique features hit at each pick", "+= 1 # Collect together then data from the picking runs for calculation", "try: featureHitCountDict[featureToBeHit] += 1 except: pdb.set_trace() if featureHitCountDict[featureToBeHit] == 1: featuresHitAtLeastOnce += 1", "for feature in hittableFeatures: featureHitCountDict[feature] = 0 featuresHitAtLeastOnce = 0 featuresHitAtLeastOnceVersusMutant = []", "importedCoordsKeys = transposonCoordToFeatureDict.keys() if coordinate not in importedCoordsKeys: transposonCoordToFeatureDict[coordinate] = [] for locus", "0 while i < len(sequence) - 1: atMatch = atRegex.match(sequence[i:i+2]) if atMatch !=", "intersect1d(hittableFeatures, notHittableFeatures) # Simulate a number of picking runs featuresHitAtLeastOnceTrialsArray = [] i", "std, arange import xml.etree.ElementTree as ET import pdb transposonCoordToFeatureDictFileHandle = open(transposonCoordToFeatureDictFile, 'r') transposonCoordToFeatureDict", "featuresHitAtLeastOnce += 1 featuresHitAtLeastOnceVersusMutant.append(featuresHitAtLeastOnce) i += 1 return featuresHitAtLeastOnceVersusMutant # ------------------------------------------------------------------------------------------------ # #", "< len(data): # Ignore comment lines if data[i][0] != '#': dataLine = ParseCSVLine(data[i])", "# Import a defined format essentiality data file # Assumes that data is", "loci: locusName = locus.attrib['locus'] essentiality = locus.attrib['essentiality'] transposonCoordToFeatureDict[coordinate].append(locusName) if essentiality == 'Dispensable': hittableTransposonCoords.append(coordinate)", "featureToBeHit in featuresToBeHit: try: featureHitCountDict[featureToBeHit] += 1 except: pdb.set_trace() if featureHitCountDict[featureToBeHit] == 1:", "essentiality = locus.attrib['essentiality'] transposonCoordToFeatureDict[coordinate].append(locusName) if essentiality == 'Dispensable': hittableTransposonCoords.append(coordinate) hittableFeatures.append(locusName) elif essentiality ==", "isAnyFeatureIncludingThisCoordNotHittable = False for featureToBeHit in featuresToBeHit: if featureToBeHit in notHittableFeatures: isAnyFeatureIncludingThisCoordNotHittable =", "= False for featureToBeHit in featuresToBeHit: if featureToBeHit in notHittableFeatures: isAnyFeatureIncludingThisCoordNotHittable = True", "== 'Essential': notHittableFeatures.append(locusName) notHittableTransposonCoords.append(coordinate) else: otherFeatures.append(locusName) print(locusName) hittableFeatures = unique(hittableFeatures) hittableTransposonCoords = unique(hittableTransposonCoords)", "featureHitCountDict[featureToBeHit] == 1: featuresHitAtLeastOnce += 1 featuresHitAtLeastOnceVersusMutant.append(featuresHitAtLeastOnce) i += 1 return featuresHitAtLeastOnceVersusMutant #", "maxMutants: randomCoord = int(choice(hittableTransposonCoords)) featuresToBeHit = transposonCoordToFeatureDict[randomCoord] isAnyFeatureIncludingThisCoordNotHittable = False for featureToBeHit in", "locusName = locus.attrib['locus'] essentiality = locus.attrib['essentiality'] transposonCoordToFeatureDict[coordinate].append(locusName) if essentiality == 'Dispensable': hittableTransposonCoords.append(coordinate) hittableFeatures.append(locusName)", "0 while i < len(headers): if headers[i] != 'sysName': headersWithoutSysName.append(headers[i]) i += 1", "def ImportEssentialityData(fileName): # Not yet ready for prime time # Import a defined", "dataLine[2]] i += 1 return dataDict # ------------------------------------------------------------------------------------------------ # # ------------------------------------------------------------------------------------------------ # def", "in the format: locus tag, gene name, essentiality from .utils import ParseCSVLine fileHandle", "file, \\ # so you only need one file format import re from", "i <= maxMutants: randomCoord = int(choice(hittableTransposonCoords)) featuresToBeHit = transposonCoordToFeatureDict[randomCoord] isAnyFeatureIncludingThisCoordNotHittable = False for", "the picking runs for calculation of mean and standard # deviation of number", "while i < len(cdsFeatures): locusTag = cdsFeatures[i].tagDict['locus_tag'][0] cdsDict[locusTag] = cdsFeatures[i] i += 1", "of unique features hit at each pick # from the trials i =", "1 featuresHitAtLeastOnceVersusMutant.append(featuresHitAtLeastOnce) i += 1 return featuresHitAtLeastOnceVersusMutant # ------------------------------------------------------------------------------------------------ # # ------------------------------------------------------------------------------------------------ #", "= 0 while i < numberOfTrials: featuresHitAtLeastOnceVersusMutant = \\ SimulatePicking(hittableFeatures, notHittableFeatures, hittableTransposonCoords, \\", "nonEssentialGeneCount = len(hittableFeatures) featureHitCountDict = {} for feature in hittableFeatures: featureHitCountDict[feature] = 0", "= unique(hittableFeatures) hittableTransposonCoords = unique(hittableTransposonCoords) notHittableFeatures = unique(notHittableFeatures) otherFeatures = unique(otherFeatures) intersection =", "maxMutants) featuresHitAtLeastOnceTrialsArray.append(featuresHitAtLeastOnceVersusMutant) i += 1 # Collect together then data from the picking", "a Fasta file, \\ # so you only need one file format import", "= 1 while i <= maxMutants: randomCoord = int(choice(hittableTransposonCoords)) featuresToBeHit = transposonCoordToFeatureDict[randomCoord] isAnyFeatureIncludingThisCoordNotHittable", "sequence = ImportGenBankSequence(genomeFile) elif format == 'fasta': sequence = ImportFastaSequence(genomeFile) ATandTAPositions = []", "i += 1 uniqueGenesHit = array(uniqueGenesHit, float) return uniqueGenesHit # ------------------------------------------------------------------------------------------------ # #", "set_trace if format == 'genbank': sequence = ImportGenBankSequence(genomeFile) elif format == 'fasta': sequence", "+= 1 # Prepare an x axis (the number of mutants picked) for", "# Prepare an x axis (the number of mutants picked) for the output", "i += 1 return dataDict # ------------------------------------------------------------------------------------------------ # # ------------------------------------------------------------------------------------------------ # def BuildEssentialityDictThatIsKeyedByLocusTag(dataArray):", "0 while i < len(data): # Ignore comment lines if data[i][0] != '#':", "cdsFeatures[i] i += 1 return cdsDict # ------------------------------------------------------------------------------------------------ # # ------------------------------------------------------------------------------------------------ # def", "while j < len(featuresHitAtLeastOnceTrialsArray): collectedFeatureHitCountArray[i].append(featuresHitAtLeastOnceTrialsArray[j][i]) j += 1 i += 1 averageFeatureHitCount =", "intersect1d from numpy import mean, std, arange import xml.etree.ElementTree as ET import pdb", "import set_trace if format == 'genbank': sequence = ImportGenBankSequence(genomeFile) elif format == 'fasta':", "for prime time # Build essentiality data dict that is keyed by locus", "+= 1 dataDict = {} for line in dataArray: dataDict[line['sysName']] = {} for", "dataDict[line['sysName']] = {} for header in headersWithoutSysName: dataDict[line['sysName']][header] = line[header] return dataDict #", "i += 1 # Prepare an x axis (the number of mutants picked)", "work with a GenBank or a Fasta file, \\ # so you only", "[] featureHitCountLowerBound = [] # Calculate the mean and standard deviation of the", "time i = 0 cdsDict = {} while i < len(cdsFeatures): locusTag =", "cdsDict # ------------------------------------------------------------------------------------------------ # # ------------------------------------------------------------------------------------------------ # def SimulatePicking(hittableFeatures, notHittableFeatures, hittableTransposonCoords, \\ transposonCoordToFeatureDict,", "in importedCoordsKeys: transposonCoordToFeatureDict[coordinate] = [] for locus in loci: locusName = locus.attrib['locus'] essentiality", "tree.getroot() importedCoordsList = root.findall('coord') for coord in importedCoordsList: coordinate = int(coord.attrib['coord']) loci =", "mean and standard # deviation of number of hits picked i = 0", "is in the format: locus tag, gene name, essentiality from .utils import ParseCSVLine", "\\ transposonCoordToFeatureDict, maxMutants): from numpy.random import choice import pdb nonEssentialGeneCount = len(hittableFeatures) featureHitCountDict", "data = fileHandle.readlines() dataDict = {} i = 0 while i < len(data):", "------------------------------------------------------------------------------------------------ # # ------------------------------------------------------------------------------------------------ # def FindATandTAPositions2(genomeFile, format='genbank'): # Does the same thing", "can work with a GenBank or a Fasta file, \\ # so you", "coordinate not in importedCoordsKeys: transposonCoordToFeatureDict[coordinate] = [] for locus in loci: locusName =", "exp, array, float uniqueGenesHit = [] i = 0 while i < len(iAxis):", "pdb nonEssentialGeneCount = len(hittableFeatures) featureHitCountDict = {} for feature in hittableFeatures: featureHitCountDict[feature] =", "ATandTAPositions = [] atRegex = re.compile('(at|ta)', re.IGNORECASE) # set_trace() i = 0 while", "for featureToBeHit in featuresToBeHit: if featureToBeHit in notHittableFeatures: isAnyFeatureIncludingThisCoordNotHittable = True if isAnyFeatureIncludingThisCoordNotHittable", "except: pdb.set_trace() if featureHitCountDict[featureToBeHit] == 1: featuresHitAtLeastOnce += 1 featuresHitAtLeastOnceVersusMutant.append(featuresHitAtLeastOnce) i += 1", "------------------------------------------------------------------------------------------------ # def BuildCDSDictThatIsKeyedByLocusTag(cdsFeatures): # Not yet ready for prime time i =", "= len(hittableFeatures) featureHitCountDict = {} for feature in hittableFeatures: featureHitCountDict[feature] = 0 featuresHitAtLeastOnce", "featureHitCountDict = {} for feature in hittableFeatures: featureHitCountDict[feature] = 0 featuresHitAtLeastOnce = 0", "xml.etree.ElementTree as ET import pdb transposonCoordToFeatureDictFileHandle = open(transposonCoordToFeatureDictFile, 'r') transposonCoordToFeatureDict = {} hittableFeatures", "averageFeatureHitCount = [] sdFeatureHitCount = [] featureHitCountUpperBound = [] featureHitCountLowerBound = [] #", "while i < len(iAxis): ans = noUniqHittableFeatures*(1-exp(-iAxis[i]/noUniqHittableFeatures)) uniqueGenesHit.append(ans) i += 1 uniqueGenesHit =", "locusTag = cdsFeatures[i].tagDict['locus_tag'][0] cdsDict[locusTag] = cdsFeatures[i] i += 1 return cdsDict # ------------------------------------------------------------------------------------------------", "re.compile('(at|ta)', re.IGNORECASE) # set_trace() i = 0 while i < len(sequence) - 1:", "= cdsFeatures[i].tagDict['locus_tag'][0] cdsDict[locusTag] = cdsFeatures[i] i += 1 return cdsDict # ------------------------------------------------------------------------------------------------ #", "data is in the format: locus tag, gene name, essentiality from .utils import", "from the trials i = 0 while i < len(collectedFeatureHitCountArray): averageFeatureHitCount.append(mean(collectedFeatureHitCountArray[i])) sdFeatureHitCount.append(std(collectedFeatureHitCountArray[i])) featureHitCountUpperBound.append(averageFeatureHitCount[i]", "len(iAxis): ans = noUniqHittableFeatures*(1-exp(-iAxis[i]/noUniqHittableFeatures)) uniqueGenesHit.append(ans) i += 1 uniqueGenesHit = array(uniqueGenesHit, float) return", "i < numberOfTrials: featuresHitAtLeastOnceVersusMutant = \\ SimulatePicking(hittableFeatures, notHittableFeatures, hittableTransposonCoords, \\ transposonCoordToFeatureDict, maxMutants) featuresHitAtLeastOnceTrialsArray.append(featuresHitAtLeastOnceVersusMutant)", "for coord in importedCoordsList: coordinate = int(coord.attrib['coord']) loci = coord.findall('locus') importedCoordsKeys = transposonCoordToFeatureDict.keys()", "# Collect together then data from the picking runs for calculation of mean", "cdsFeatures[i].tagDict['locus_tag'][0] cdsDict[locusTag] = cdsFeatures[i] i += 1 return cdsDict # ------------------------------------------------------------------------------------------------ # #", "= int(coord.attrib['coord']) loci = coord.findall('locus') importedCoordsKeys = transposonCoordToFeatureDict.keys() if coordinate not in importedCoordsKeys:", "len(featuresHitAtLeastOnceTrialsArray): collectedFeatureHitCountArray[i].append(featuresHitAtLeastOnceTrialsArray[j][i]) j += 1 i += 1 averageFeatureHitCount = [] sdFeatureHitCount =", "Not yet ready for prime time i = 0 cdsDict = {} while", "if featureHitCountDict[featureToBeHit] == 1: featuresHitAtLeastOnce += 1 featuresHitAtLeastOnceVersusMutant.append(featuresHitAtLeastOnce) i += 1 return featuresHitAtLeastOnceVersusMutant", "while i < len(sequence) - 1: atMatch = atRegex.match(sequence[i:i+2]) if atMatch != None:", "ParseCSVLine fileHandle = open(fileName, 'r') data = fileHandle.readlines() dataDict = {} i =", "= coord.findall('locus') importedCoordsKeys = transposonCoordToFeatureDict.keys() if coordinate not in importedCoordsKeys: transposonCoordToFeatureDict[coordinate] = []", "# ------------------------------------------------------------------------------------------------ # # ------------------------------------------------------------------------------------------------ # def BuildCDSDictThatIsKeyedByLocusTag(cdsFeatures): # Not yet ready for", "# ------------------------------------------------------------------------------------------------ # # ------------------------------------------------------------------------------------------------ # def BuildEssentialityDictThatIsKeyedByLocusTag(dataArray): # Not yet ready for", "i = 0 cdsDict = {} while i < len(cdsFeatures): locusTag = cdsFeatures[i].tagDict['locus_tag'][0]", "< numberOfTrials: featuresHitAtLeastOnceVersusMutant = \\ SimulatePicking(hittableFeatures, notHittableFeatures, hittableTransposonCoords, \\ transposonCoordToFeatureDict, maxMutants) featuresHitAtLeastOnceTrialsArray.append(featuresHitAtLeastOnceVersusMutant) i", "i += 1 return cdsDict # ------------------------------------------------------------------------------------------------ # # ------------------------------------------------------------------------------------------------ # def SimulatePicking(hittableFeatures,", "# # ------------------------------------------------------------------------------------------------ # def BuildCDSDictThatIsKeyedByLocusTag(cdsFeatures): # Not yet ready for prime time", "defined format essentiality data file # Assumes that data is in the format:", "FindATandTAPositions but can work with a GenBank or a Fasta file, \\ #", "if atMatch != None: ATandTAPositions.append(i+1) i += 1 return [ATandTAPositions, sequence] # ------------------------------------------------------------------------------------------------", "= [] tree = ET.parse(transposonCoordToFeatureDictFile) root = tree.getroot() importedCoordsList = root.findall('coord') for coord", "for locus in loci: locusName = locus.attrib['locus'] essentiality = locus.attrib['essentiality'] transposonCoordToFeatureDict[coordinate].append(locusName) if essentiality", "hittableFeatures = [] hittableTransposonCoords = [] notHittableTransposonCoords = [] notHittableFeatures = [] otherFeatures", "number of unique features hit at each pick # from the trials i", "transposonCoordToFeatureDict, maxMutants) featuresHitAtLeastOnceTrialsArray.append(featuresHitAtLeastOnceVersusMutant) i += 1 # Collect together then data from the", "+= 1 return featuresHitAtLeastOnceVersusMutant # ------------------------------------------------------------------------------------------------ # # ------------------------------------------------------------------------------------------------ # def SimulateMultiplePickings(transposonCoordToFeatureDictFile, numberOfTrials,", "from .utils import ParseCSVLine fileHandle = open(fileName, 'r') data = fileHandle.readlines() dataDict =", "= [] notHittableFeatures = [] otherFeatures = [] tree = ET.parse(transposonCoordToFeatureDictFile) root =", "in dataArray: dataDict[line['sysName']] = {} for header in headersWithoutSysName: dataDict[line['sysName']][header] = line[header] return", "[] i = 0 while i < len(iAxis): ans = noUniqHittableFeatures*(1-exp(-iAxis[i]/noUniqHittableFeatures)) uniqueGenesHit.append(ans) i", "dataDict # ------------------------------------------------------------------------------------------------ # # ------------------------------------------------------------------------------------------------ # def BuildEssentialityDictThatIsKeyedByLocusTag(dataArray): # Not yet ready", "< len(collectedFeatureHitCountArray): averageFeatureHitCount.append(mean(collectedFeatureHitCountArray[i])) sdFeatureHitCount.append(std(collectedFeatureHitCountArray[i])) featureHitCountUpperBound.append(averageFeatureHitCount[i] + sdFeatureHitCount[i]) featureHitCountLowerBound.append(averageFeatureHitCount[i] - sdFeatureHitCount[i]) i += 1", "intersection = intersect1d(hittableFeatures, notHittableFeatures) # Simulate a number of picking runs featuresHitAtLeastOnceTrialsArray =", "cdsDict = {} while i < len(cdsFeatures): locusTag = cdsFeatures[i].tagDict['locus_tag'][0] cdsDict[locusTag] = cdsFeatures[i]", "[] tree = ET.parse(transposonCoordToFeatureDictFile) root = tree.getroot() importedCoordsList = root.findall('coord') for coord in", "len(hittableFeatures) featureHitCountDict = {} for feature in hittableFeatures: featureHitCountDict[feature] = 0 featuresHitAtLeastOnce =", "hittableTransposonCoords.append(coordinate) hittableFeatures.append(locusName) elif essentiality == 'Essential': notHittableFeatures.append(locusName) notHittableTransposonCoords.append(coordinate) else: otherFeatures.append(locusName) print(locusName) hittableFeatures =", "trials i = 0 while i < len(collectedFeatureHitCountArray): averageFeatureHitCount.append(mean(collectedFeatureHitCountArray[i])) sdFeatureHitCount.append(std(collectedFeatureHitCountArray[i])) featureHitCountUpperBound.append(averageFeatureHitCount[i] + sdFeatureHitCount[i])", "# deviation of number of hits picked i = 0 collectedFeatureHitCountArray = []", "# ------------------------------------------------------------------------------------------------ # def SimulateMultiplePickings(transposonCoordToFeatureDictFile, numberOfTrials, maxMutants): from scipy import unique, intersect1d from", "as FindATandTAPositions but can work with a GenBank or a Fasta file, \\", "not in importedCoordsKeys: transposonCoordToFeatureDict[coordinate] = [] for locus in loci: locusName = locus.attrib['locus']", "= \\ SimulatePicking(hittableFeatures, notHittableFeatures, hittableTransposonCoords, \\ transposonCoordToFeatureDict, maxMutants) featuresHitAtLeastOnceTrialsArray.append(featuresHitAtLeastOnceVersusMutant) i += 1 #", "= ImportGenBankSequence(genomeFile) elif format == 'fasta': sequence = ImportFastaSequence(genomeFile) ATandTAPositions = [] atRegex", "locus.attrib['locus'] essentiality = locus.attrib['essentiality'] transposonCoordToFeatureDict[coordinate].append(locusName) if essentiality == 'Dispensable': hittableTransposonCoords.append(coordinate) hittableFeatures.append(locusName) elif essentiality", "1 # Prepare an x axis (the number of mutants picked) for the", "for prime time i = 0 cdsDict = {} while i < len(cdsFeatures):", "with a GenBank or a Fasta file, \\ # so you only need", "featuresHitAtLeastOnceVersusMutant = \\ SimulatePicking(hittableFeatures, notHittableFeatures, hittableTransposonCoords, \\ transposonCoordToFeatureDict, maxMutants) featuresHitAtLeastOnceTrialsArray.append(featuresHitAtLeastOnceVersusMutant) i += 1", "uniqueGenesHit.append(ans) i += 1 uniqueGenesHit = array(uniqueGenesHit, float) return uniqueGenesHit # ------------------------------------------------------------------------------------------------ #", "format: locus tag, gene name, essentiality from .utils import ParseCSVLine fileHandle = open(fileName,", "+ sdFeatureHitCount[i]) featureHitCountLowerBound.append(averageFeatureHitCount[i] - sdFeatureHitCount[i]) i += 1 # Prepare an x axis", "1 i = 0 while i < len(collectedFeatureHitCountArray): j = 0 while j", "'sysName': headersWithoutSysName.append(headers[i]) i += 1 dataDict = {} for line in dataArray: dataDict[line['sysName']]", "atRegex.match(sequence[i:i+2]) if atMatch != None: ATandTAPositions.append(i+1) i += 1 return [ATandTAPositions, sequence] #", "hittableTransposonCoords, \\ transposonCoordToFeatureDict, maxMutants) featuresHitAtLeastOnceTrialsArray.append(featuresHitAtLeastOnceVersusMutant) i += 1 # Collect together then data", "featureHitCountLowerBound, noUniqHittableFeatures ] # ------------------------------------------------------------------------------------------------ # # ------------------------------------------------------------------------------------------------ # def PoissonEstimateOfGenesHit(iAxis, noUniqHittableFeatures): from", "0 while i < len(collectedFeatureHitCountArray): averageFeatureHitCount.append(mean(collectedFeatureHitCountArray[i])) sdFeatureHitCount.append(std(collectedFeatureHitCountArray[i])) featureHitCountUpperBound.append(averageFeatureHitCount[i] + sdFeatureHitCount[i]) featureHitCountLowerBound.append(averageFeatureHitCount[i] - sdFeatureHitCount[i])", "# ------------------------------------------------------------------------------------------------ # # ------------------------------------------------------------------------------------------------ # def SimulateMultiplePickings(transposonCoordToFeatureDictFile, numberOfTrials, maxMutants): from scipy import", "importedCoordsList = root.findall('coord') for coord in importedCoordsList: coordinate = int(coord.attrib['coord']) loci = coord.findall('locus')", "featureHitCountLowerBound = [] # Calculate the mean and standard deviation of the number", "= [] i = 0 while i < len(headers): if headers[i] != 'sysName':", "dataDict = {} i = 0 while i < len(data): # Ignore comment", "while i < len(collectedFeatureHitCountArray): j = 0 while j < len(featuresHitAtLeastOnceTrialsArray): collectedFeatureHitCountArray[i].append(featuresHitAtLeastOnceTrialsArray[j][i]) j", "essentiality == 'Essential': notHittableFeatures.append(locusName) notHittableTransposonCoords.append(coordinate) else: otherFeatures.append(locusName) print(locusName) hittableFeatures = unique(hittableFeatures) hittableTransposonCoords =", "< len(iAxis): ans = noUniqHittableFeatures*(1-exp(-iAxis[i]/noUniqHittableFeatures)) uniqueGenesHit.append(ans) i += 1 uniqueGenesHit = array(uniqueGenesHit, float)", "locus tag, gene name, essentiality from .utils import ParseCSVLine fileHandle = open(fileName, 'r')", "i = 0 while i < len(sequence) - 1: atMatch = atRegex.match(sequence[i:i+2]) if", "1 # Collect together then data from the picking runs for calculation of", "return dataDict # ------------------------------------------------------------------------------------------------ # # ------------------------------------------------------------------------------------------------ # def BuildCDSDictThatIsKeyedByLocusTag(cdsFeatures): # Not yet", "[] for locus in loci: locusName = locus.attrib['locus'] essentiality = locus.attrib['essentiality'] transposonCoordToFeatureDict[coordinate].append(locusName) if", "runs for calculation of mean and standard # deviation of number of hits", "collectedFeatureHitCountArray.append([]) i += 1 i = 0 while i < len(collectedFeatureHitCountArray): j =", "format == 'genbank': sequence = ImportGenBankSequence(genomeFile) elif format == 'fasta': sequence = ImportFastaSequence(genomeFile)", "from the picking runs for calculation of mean and standard # deviation of", "# Ignore comment lines if data[i][0] != '#': dataLine = ParseCSVLine(data[i]) dataDict[dataLine[0]] =", "SimulateMultiplePickings(transposonCoordToFeatureDictFile, numberOfTrials, maxMutants): from scipy import unique, intersect1d from numpy import mean, std,", "transposonCoordToFeatureDict[randomCoord] isAnyFeatureIncludingThisCoordNotHittable = False for featureToBeHit in featuresToBeHit: if featureToBeHit in notHittableFeatures: isAnyFeatureIncludingThisCoordNotHittable", "+= 1 uniqueGenesHit = array(uniqueGenesHit, float) return uniqueGenesHit # ------------------------------------------------------------------------------------------------ # # ------------------------------------------------------------------------------------------------", "featureHitCountLowerBound.append(averageFeatureHitCount[i] - sdFeatureHitCount[i]) i += 1 # Prepare an x axis (the number", "locus in loci: locusName = locus.attrib['locus'] essentiality = locus.attrib['essentiality'] transposonCoordToFeatureDict[coordinate].append(locusName) if essentiality ==", "featuresHitAtLeastOnceTrialsArray = [] i = 0 while i < numberOfTrials: featuresHitAtLeastOnceVersusMutant = \\", "unique(hittableFeatures) hittableTransposonCoords = unique(hittableTransposonCoords) notHittableFeatures = unique(notHittableFeatures) otherFeatures = unique(otherFeatures) intersection = intersect1d(hittableFeatures,", "featureToBeHit in featuresToBeHit: if featureToBeHit in notHittableFeatures: isAnyFeatureIncludingThisCoordNotHittable = True if isAnyFeatureIncludingThisCoordNotHittable ==", "otherFeatures.append(locusName) print(locusName) hittableFeatures = unique(hittableFeatures) hittableTransposonCoords = unique(hittableTransposonCoords) notHittableFeatures = unique(notHittableFeatures) otherFeatures =", "= ParseCSVLine(data[i]) dataDict[dataLine[0]] = [dataLine[1], dataLine[2]] i += 1 return dataDict # ------------------------------------------------------------------------------------------------", "== False: for featureToBeHit in featuresToBeHit: try: featureHitCountDict[featureToBeHit] += 1 except: pdb.set_trace() if", "------------------------------------------------------------------------------------------------ # def ImportEssentialityData(fileName): # Not yet ready for prime time # Import", "return cdsDict # ------------------------------------------------------------------------------------------------ # # ------------------------------------------------------------------------------------------------ # def SimulatePicking(hittableFeatures, notHittableFeatures, hittableTransposonCoords, \\", "coordinate = int(coord.attrib['coord']) loci = coord.findall('locus') importedCoordsKeys = transposonCoordToFeatureDict.keys() if coordinate not in", "standard # deviation of number of hits picked i = 0 collectedFeatureHitCountArray =", "arange(1, maxMutants+1, 1) noUniqHittableFeatures = len(hittableFeatures) return [iAxis, averageFeatureHitCount, sdFeatureHitCount, featureHitCountUpperBound, \\ featureHitCountLowerBound,", "file # Assumes that data is in the format: locus tag, gene name,", "header in headersWithoutSysName: dataDict[line['sysName']][header] = line[header] return dataDict # ------------------------------------------------------------------------------------------------ # # ------------------------------------------------------------------------------------------------", "same thing as FindATandTAPositions but can work with a GenBank or a Fasta", "as ET import pdb transposonCoordToFeatureDictFileHandle = open(transposonCoordToFeatureDictFile, 'r') transposonCoordToFeatureDict = {} hittableFeatures =", "= len(hittableFeatures) return [iAxis, averageFeatureHitCount, sdFeatureHitCount, featureHitCountUpperBound, \\ featureHitCountLowerBound, noUniqHittableFeatures ] # ------------------------------------------------------------------------------------------------", "in loci: locusName = locus.attrib['locus'] essentiality = locus.attrib['essentiality'] transposonCoordToFeatureDict[coordinate].append(locusName) if essentiality == 'Dispensable':", "if coordinate not in importedCoordsKeys: transposonCoordToFeatureDict[coordinate] = [] for locus in loci: locusName", "1: atMatch = atRegex.match(sequence[i:i+2]) if atMatch != None: ATandTAPositions.append(i+1) i += 1 return", "essentiality data dict that is keyed by locus tag essentialityDict = {} locusTags", "------------------------------------------------------------------------------------------------ # def PoissonEstimateOfGenesHit(iAxis, noUniqHittableFeatures): from numpy import exp, array, float uniqueGenesHit =", "hit at each pick # from the trials i = 0 while i", "unique, intersect1d from numpy import mean, std, arange import xml.etree.ElementTree as ET import", "i < len(featuresHitAtLeastOnceTrialsArray[0]): collectedFeatureHitCountArray.append([]) i += 1 i = 0 while i <", "feature in hittableFeatures: featureHitCountDict[feature] = 0 featuresHitAtLeastOnce = 0 featuresHitAtLeastOnceVersusMutant = [] i", "time # Build essentiality data dict that is keyed by locus tag essentialityDict", "0 while j < len(featuresHitAtLeastOnceTrialsArray): collectedFeatureHitCountArray[i].append(featuresHitAtLeastOnceTrialsArray[j][i]) j += 1 i += 1 averageFeatureHitCount", "# ------------------------------------------------------------------------------------------------ # def FindATandTAPositions2(genomeFile, format='genbank'): # Does the same thing as FindATandTAPositions", "noUniqHittableFeatures = len(hittableFeatures) return [iAxis, averageFeatureHitCount, sdFeatureHitCount, featureHitCountUpperBound, \\ featureHitCountLowerBound, noUniqHittableFeatures ] #", "file format import re from pdb import set_trace if format == 'genbank': sequence", "# def BuildEssentialityDictThatIsKeyedByLocusTag(dataArray): # Not yet ready for prime time # Build essentiality", "featuresHitAtLeastOnceVersusMutant = [] i = 1 while i <= maxMutants: randomCoord = int(choice(hittableTransposonCoords))", "the output iAxis = arange(1, maxMutants+1, 1) noUniqHittableFeatures = len(hittableFeatures) return [iAxis, averageFeatureHitCount,", "print(locusName) hittableFeatures = unique(hittableFeatures) hittableTransposonCoords = unique(hittableTransposonCoords) notHittableFeatures = unique(notHittableFeatures) otherFeatures = unique(otherFeatures)", "tag essentialityDict = {} locusTags = [] headersWithoutSysName = [] i = 0", "[] otherFeatures = [] tree = ET.parse(transposonCoordToFeatureDictFile) root = tree.getroot() importedCoordsList = root.findall('coord')", "i < len(sequence) - 1: atMatch = atRegex.match(sequence[i:i+2]) if atMatch != None: ATandTAPositions.append(i+1)", "= [] atRegex = re.compile('(at|ta)', re.IGNORECASE) # set_trace() i = 0 while i", "1 dataDict = {} for line in dataArray: dataDict[line['sysName']] = {} for header", "together then data from the picking runs for calculation of mean and standard", "keyed by locus tag essentialityDict = {} locusTags = [] headersWithoutSysName = []", "featureHitCountUpperBound = [] featureHitCountLowerBound = [] # Calculate the mean and standard deviation", "len(hittableFeatures) return [iAxis, averageFeatureHitCount, sdFeatureHitCount, featureHitCountUpperBound, \\ featureHitCountLowerBound, noUniqHittableFeatures ] # ------------------------------------------------------------------------------------------------ #", "BuildCDSDictThatIsKeyedByLocusTag(cdsFeatures): # Not yet ready for prime time i = 0 cdsDict =", "transposonCoordToFeatureDict.keys() if coordinate not in importedCoordsKeys: transposonCoordToFeatureDict[coordinate] = [] for locus in loci:", "------------------------------------------------------------------------------------------------ # # ------------------------------------------------------------------------------------------------ # def SimulateMultiplePickings(transposonCoordToFeatureDictFile, numberOfTrials, maxMutants): from scipy import unique,", "while i < len(headers): if headers[i] != 'sysName': headersWithoutSysName.append(headers[i]) i += 1 dataDict", "yet ready for prime time # Import a defined format essentiality data file", "False for featureToBeHit in featuresToBeHit: if featureToBeHit in notHittableFeatures: isAnyFeatureIncludingThisCoordNotHittable = True if", "numpy import mean, std, arange import xml.etree.ElementTree as ET import pdb transposonCoordToFeatureDictFileHandle =", "# ------------------------------------------------------------------------------------------------ # def ImportEssentialityData(fileName): # Not yet ready for prime time #", "ImportEssentialityData(fileName): # Not yet ready for prime time # Import a defined format", "== 1: featuresHitAtLeastOnce += 1 featuresHitAtLeastOnceVersusMutant.append(featuresHitAtLeastOnce) i += 1 return featuresHitAtLeastOnceVersusMutant # ------------------------------------------------------------------------------------------------", "import pdb transposonCoordToFeatureDictFileHandle = open(transposonCoordToFeatureDictFile, 'r') transposonCoordToFeatureDict = {} hittableFeatures = [] hittableTransposonCoords", "unique(notHittableFeatures) otherFeatures = unique(otherFeatures) intersection = intersect1d(hittableFeatures, notHittableFeatures) # Simulate a number of", "+= 1 return cdsDict # ------------------------------------------------------------------------------------------------ # # ------------------------------------------------------------------------------------------------ # def SimulatePicking(hittableFeatures, notHittableFeatures,", "from numpy.random import choice import pdb nonEssentialGeneCount = len(hittableFeatures) featureHitCountDict = {} for", "number of picking runs featuresHitAtLeastOnceTrialsArray = [] i = 0 while i <", "float) return uniqueGenesHit # ------------------------------------------------------------------------------------------------ # # ------------------------------------------------------------------------------------------------ # def FindATandTAPositions2(genomeFile, format='genbank'): #", "Calculate the mean and standard deviation of the number of unique features hit", "numberOfTrials: featuresHitAtLeastOnceVersusMutant = \\ SimulatePicking(hittableFeatures, notHittableFeatures, hittableTransposonCoords, \\ transposonCoordToFeatureDict, maxMutants) featuresHitAtLeastOnceTrialsArray.append(featuresHitAtLeastOnceVersusMutant) i +=", "== 'genbank': sequence = ImportGenBankSequence(genomeFile) elif format == 'fasta': sequence = ImportFastaSequence(genomeFile) ATandTAPositions", "+= 1 averageFeatureHitCount = [] sdFeatureHitCount = [] featureHitCountUpperBound = [] featureHitCountLowerBound =", "== 'fasta': sequence = ImportFastaSequence(genomeFile) ATandTAPositions = [] atRegex = re.compile('(at|ta)', re.IGNORECASE) #", "def BuildCDSDictThatIsKeyedByLocusTag(cdsFeatures): # Not yet ready for prime time i = 0 cdsDict", "= [] featureHitCountLowerBound = [] # Calculate the mean and standard deviation of", "sequence = ImportFastaSequence(genomeFile) ATandTAPositions = [] atRegex = re.compile('(at|ta)', re.IGNORECASE) # set_trace() i", "a GenBank or a Fasta file, \\ # so you only need one", "name, essentiality from .utils import ParseCSVLine fileHandle = open(fileName, 'r') data = fileHandle.readlines()", "# Not yet ready for prime time # Import a defined format essentiality", "(the number of mutants picked) for the output iAxis = arange(1, maxMutants+1, 1)", "i += 1 # Collect together then data from the picking runs for", "transposonCoordToFeatureDict, maxMutants): from numpy.random import choice import pdb nonEssentialGeneCount = len(hittableFeatures) featureHitCountDict =", "cdsDict[locusTag] = cdsFeatures[i] i += 1 return cdsDict # ------------------------------------------------------------------------------------------------ # # ------------------------------------------------------------------------------------------------", "featuresHitAtLeastOnceTrialsArray.append(featuresHitAtLeastOnceVersusMutant) i += 1 # Collect together then data from the picking runs", "= 0 collectedFeatureHitCountArray = [] while i < len(featuresHitAtLeastOnceTrialsArray[0]): collectedFeatureHitCountArray.append([]) i += 1", "0 while i < len(iAxis): ans = noUniqHittableFeatures*(1-exp(-iAxis[i]/noUniqHittableFeatures)) uniqueGenesHit.append(ans) i += 1 uniqueGenesHit", "is keyed by locus tag essentialityDict = {} locusTags = [] headersWithoutSysName =", "prime time # Import a defined format essentiality data file # Assumes that", "for the output iAxis = arange(1, maxMutants+1, 1) noUniqHittableFeatures = len(hittableFeatures) return [iAxis,", "maxMutants+1, 1) noUniqHittableFeatures = len(hittableFeatures) return [iAxis, averageFeatureHitCount, sdFeatureHitCount, featureHitCountUpperBound, \\ featureHitCountLowerBound, noUniqHittableFeatures", "= 0 while i < len(sequence) - 1: atMatch = atRegex.match(sequence[i:i+2]) if atMatch", "for prime time # Import a defined format essentiality data file # Assumes", "gene name, essentiality from .utils import ParseCSVLine fileHandle = open(fileName, 'r') data =", "sdFeatureHitCount[i]) i += 1 # Prepare an x axis (the number of mutants", "dict that is keyed by locus tag essentialityDict = {} locusTags = []", "i < len(data): # Ignore comment lines if data[i][0] != '#': dataLine =", "i += 1 averageFeatureHitCount = [] sdFeatureHitCount = [] featureHitCountUpperBound = [] featureHitCountLowerBound", "data file # Assumes that data is in the format: locus tag, gene", "= [] sdFeatureHitCount = [] featureHitCountUpperBound = [] featureHitCountLowerBound = [] # Calculate", "------------------------------------------------------------------------------------------------ # def SimulatePicking(hittableFeatures, notHittableFeatures, hittableTransposonCoords, \\ transposonCoordToFeatureDict, maxMutants): from numpy.random import choice", "i = 1 while i <= maxMutants: randomCoord = int(choice(hittableTransposonCoords)) featuresToBeHit = transposonCoordToFeatureDict[randomCoord]", "\\ transposonCoordToFeatureDict, maxMutants) featuresHitAtLeastOnceTrialsArray.append(featuresHitAtLeastOnceVersusMutant) i += 1 # Collect together then data from", "Simulate a number of picking runs featuresHitAtLeastOnceTrialsArray = [] i = 0 while", "Build essentiality data dict that is keyed by locus tag essentialityDict = {}", "hittableFeatures = unique(hittableFeatures) hittableTransposonCoords = unique(hittableTransposonCoords) notHittableFeatures = unique(notHittableFeatures) otherFeatures = unique(otherFeatures) intersection", "dataDict[dataLine[0]] = [dataLine[1], dataLine[2]] i += 1 return dataDict # ------------------------------------------------------------------------------------------------ # #", "the same thing as FindATandTAPositions but can work with a GenBank or a", "format essentiality data file # Assumes that data is in the format: locus", "0 while i < len(collectedFeatureHitCountArray): j = 0 while j < len(featuresHitAtLeastOnceTrialsArray): collectedFeatureHitCountArray[i].append(featuresHitAtLeastOnceTrialsArray[j][i])", "[] notHittableTransposonCoords = [] notHittableFeatures = [] otherFeatures = [] tree = ET.parse(transposonCoordToFeatureDictFile)", "at each pick # from the trials i = 0 while i <", "{} while i < len(cdsFeatures): locusTag = cdsFeatures[i].tagDict['locus_tag'][0] cdsDict[locusTag] = cdsFeatures[i] i +=", "i = 0 while i < len(headers): if headers[i] != 'sysName': headersWithoutSysName.append(headers[i]) i", "= 0 while i < len(collectedFeatureHitCountArray): averageFeatureHitCount.append(mean(collectedFeatureHitCountArray[i])) sdFeatureHitCount.append(std(collectedFeatureHitCountArray[i])) featureHitCountUpperBound.append(averageFeatureHitCount[i] + sdFeatureHitCount[i]) featureHitCountLowerBound.append(averageFeatureHitCount[i] -", "len(headers): if headers[i] != 'sysName': headersWithoutSysName.append(headers[i]) i += 1 dataDict = {} for", "line[header] return dataDict # ------------------------------------------------------------------------------------------------ # # ------------------------------------------------------------------------------------------------ # def BuildCDSDictThatIsKeyedByLocusTag(cdsFeatures): # Not", "locus tag essentialityDict = {} locusTags = [] headersWithoutSysName = [] i =", "and standard # deviation of number of hits picked i = 0 collectedFeatureHitCountArray", "so you only need one file format import re from pdb import set_trace", "picked i = 0 collectedFeatureHitCountArray = [] while i < len(featuresHitAtLeastOnceTrialsArray[0]): collectedFeatureHitCountArray.append([]) i", "0 cdsDict = {} while i < len(cdsFeatures): locusTag = cdsFeatures[i].tagDict['locus_tag'][0] cdsDict[locusTag] =", "# ------------------------------------------------------------------------------------------------ # def SimulatePicking(hittableFeatures, notHittableFeatures, hittableTransposonCoords, \\ transposonCoordToFeatureDict, maxMutants): from numpy.random import", "headersWithoutSysName.append(headers[i]) i += 1 dataDict = {} for line in dataArray: dataDict[line['sysName']] =", "in importedCoordsList: coordinate = int(coord.attrib['coord']) loci = coord.findall('locus') importedCoordsKeys = transposonCoordToFeatureDict.keys() if coordinate", "mutants picked) for the output iAxis = arange(1, maxMutants+1, 1) noUniqHittableFeatures = len(hittableFeatures)", "pdb import set_trace if format == 'genbank': sequence = ImportGenBankSequence(genomeFile) elif format ==", "collectedFeatureHitCountArray[i].append(featuresHitAtLeastOnceTrialsArray[j][i]) j += 1 i += 1 averageFeatureHitCount = [] sdFeatureHitCount = []", "i < len(headers): if headers[i] != 'sysName': headersWithoutSysName.append(headers[i]) i += 1 dataDict =", "maxMutants): from scipy import unique, intersect1d from numpy import mean, std, arange import", "len(collectedFeatureHitCountArray): averageFeatureHitCount.append(mean(collectedFeatureHitCountArray[i])) sdFeatureHitCount.append(std(collectedFeatureHitCountArray[i])) featureHitCountUpperBound.append(averageFeatureHitCount[i] + sdFeatureHitCount[i]) featureHitCountLowerBound.append(averageFeatureHitCount[i] - sdFeatureHitCount[i]) i += 1 #", "re.IGNORECASE) # set_trace() i = 0 while i < len(sequence) - 1: atMatch", "a defined format essentiality data file # Assumes that data is in the", "= 0 while i < len(collectedFeatureHitCountArray): j = 0 while j < len(featuresHitAtLeastOnceTrialsArray):", "[dataLine[1], dataLine[2]] i += 1 return dataDict # ------------------------------------------------------------------------------------------------ # # ------------------------------------------------------------------------------------------------ #", "while i < numberOfTrials: featuresHitAtLeastOnceVersusMutant = \\ SimulatePicking(hittableFeatures, notHittableFeatures, hittableTransposonCoords, \\ transposonCoordToFeatureDict, maxMutants)", "essentiality == 'Dispensable': hittableTransposonCoords.append(coordinate) hittableFeatures.append(locusName) elif essentiality == 'Essential': notHittableFeatures.append(locusName) notHittableTransposonCoords.append(coordinate) else: otherFeatures.append(locusName)", "notHittableFeatures) # Simulate a number of picking runs featuresHitAtLeastOnceTrialsArray = [] i =", "[] hittableTransposonCoords = [] notHittableTransposonCoords = [] notHittableFeatures = [] otherFeatures = []", "data dict that is keyed by locus tag essentialityDict = {} locusTags =", "[] headersWithoutSysName = [] i = 0 while i < len(headers): if headers[i]", "noUniqHittableFeatures ] # ------------------------------------------------------------------------------------------------ # # ------------------------------------------------------------------------------------------------ # def PoissonEstimateOfGenesHit(iAxis, noUniqHittableFeatures): from numpy", "# Does the same thing as FindATandTAPositions but can work with a GenBank", "number of hits picked i = 0 collectedFeatureHitCountArray = [] while i <", "if isAnyFeatureIncludingThisCoordNotHittable == False: for featureToBeHit in featuresToBeHit: try: featureHitCountDict[featureToBeHit] += 1 except:", "------------------------------------------------------------------------------------------------ # # ------------------------------------------------------------------------------------------------ # def BuildCDSDictThatIsKeyedByLocusTag(cdsFeatures): # Not yet ready for prime", "notHittableTransposonCoords = [] notHittableFeatures = [] otherFeatures = [] tree = ET.parse(transposonCoordToFeatureDictFile) root", "format import re from pdb import set_trace if format == 'genbank': sequence =", "!= 'sysName': headersWithoutSysName.append(headers[i]) i += 1 dataDict = {} for line in dataArray:", "in notHittableFeatures: isAnyFeatureIncludingThisCoordNotHittable = True if isAnyFeatureIncludingThisCoordNotHittable == False: for featureToBeHit in featuresToBeHit:", "hittableTransposonCoords = unique(hittableTransposonCoords) notHittableFeatures = unique(notHittableFeatures) otherFeatures = unique(otherFeatures) intersection = intersect1d(hittableFeatures, notHittableFeatures)", "pdb.set_trace() if featureHitCountDict[featureToBeHit] == 1: featuresHitAtLeastOnce += 1 featuresHitAtLeastOnceVersusMutant.append(featuresHitAtLeastOnce) i += 1 return", "= re.compile('(at|ta)', re.IGNORECASE) # set_trace() i = 0 while i < len(sequence) -", "= {} for header in headersWithoutSysName: dataDict[line['sysName']][header] = line[header] return dataDict # ------------------------------------------------------------------------------------------------", "ready for prime time i = 0 cdsDict = {} while i <", "uniqueGenesHit # ------------------------------------------------------------------------------------------------ # # ------------------------------------------------------------------------------------------------ # def FindATandTAPositions2(genomeFile, format='genbank'): # Does the", "headersWithoutSysName: dataDict[line['sysName']][header] = line[header] return dataDict # ------------------------------------------------------------------------------------------------ # # ------------------------------------------------------------------------------------------------ # def", "runs featuresHitAtLeastOnceTrialsArray = [] i = 0 while i < numberOfTrials: featuresHitAtLeastOnceVersusMutant =", "picking runs for calculation of mean and standard # deviation of number of", "len(collectedFeatureHitCountArray): j = 0 while j < len(featuresHitAtLeastOnceTrialsArray): collectedFeatureHitCountArray[i].append(featuresHitAtLeastOnceTrialsArray[j][i]) j += 1 i", "standard deviation of the number of unique features hit at each pick #", "notHittableFeatures = [] otherFeatures = [] tree = ET.parse(transposonCoordToFeatureDictFile) root = tree.getroot() importedCoordsList", "array(uniqueGenesHit, float) return uniqueGenesHit # ------------------------------------------------------------------------------------------------ # # ------------------------------------------------------------------------------------------------ # def FindATandTAPositions2(genomeFile, format='genbank'):", "!= '#': dataLine = ParseCSVLine(data[i]) dataDict[dataLine[0]] = [dataLine[1], dataLine[2]] i += 1 return", "= [] i = 0 while i < len(iAxis): ans = noUniqHittableFeatures*(1-exp(-iAxis[i]/noUniqHittableFeatures)) uniqueGenesHit.append(ans)", "{} for feature in hittableFeatures: featureHitCountDict[feature] = 0 featuresHitAtLeastOnce = 0 featuresHitAtLeastOnceVersusMutant =", "= tree.getroot() importedCoordsList = root.findall('coord') for coord in importedCoordsList: coordinate = int(coord.attrib['coord']) loci", "= open(transposonCoordToFeatureDictFile, 'r') transposonCoordToFeatureDict = {} hittableFeatures = [] hittableTransposonCoords = [] notHittableTransposonCoords", "elif format == 'fasta': sequence = ImportFastaSequence(genomeFile) ATandTAPositions = [] atRegex = re.compile('(at|ta)',", "ET.parse(transposonCoordToFeatureDictFile) root = tree.getroot() importedCoordsList = root.findall('coord') for coord in importedCoordsList: coordinate =", "import unique, intersect1d from numpy import mean, std, arange import xml.etree.ElementTree as ET", "re from pdb import set_trace if format == 'genbank': sequence = ImportGenBankSequence(genomeFile) elif", "fileHandle.readlines() dataDict = {} i = 0 while i < len(data): # Ignore", "j < len(featuresHitAtLeastOnceTrialsArray): collectedFeatureHitCountArray[i].append(featuresHitAtLeastOnceTrialsArray[j][i]) j += 1 i += 1 averageFeatureHitCount = []", "Collect together then data from the picking runs for calculation of mean and", "# ------------------------------------------------------------------------------------------------ # def PoissonEstimateOfGenesHit(iAxis, noUniqHittableFeatures): from numpy import exp, array, float uniqueGenesHit", "[] atRegex = re.compile('(at|ta)', re.IGNORECASE) # set_trace() i = 0 while i <", "numpy.random import choice import pdb nonEssentialGeneCount = len(hittableFeatures) featureHitCountDict = {} for feature", "PoissonEstimateOfGenesHit(iAxis, noUniqHittableFeatures): from numpy import exp, array, float uniqueGenesHit = [] i =", "root = tree.getroot() importedCoordsList = root.findall('coord') for coord in importedCoordsList: coordinate = int(coord.attrib['coord'])", "transposonCoordToFeatureDict = {} hittableFeatures = [] hittableTransposonCoords = [] notHittableTransposonCoords = [] notHittableFeatures", "numberOfTrials, maxMutants): from scipy import unique, intersect1d from numpy import mean, std, arange", "features hit at each pick # from the trials i = 0 while", "dataDict = {} for line in dataArray: dataDict[line['sysName']] = {} for header in", "= [] # Calculate the mean and standard deviation of the number of", "def SimulatePicking(hittableFeatures, notHittableFeatures, hittableTransposonCoords, \\ transposonCoordToFeatureDict, maxMutants): from numpy.random import choice import pdb", "= unique(notHittableFeatures) otherFeatures = unique(otherFeatures) intersection = intersect1d(hittableFeatures, notHittableFeatures) # Simulate a number", "from numpy import exp, array, float uniqueGenesHit = [] i = 0 while", "] # ------------------------------------------------------------------------------------------------ # # ------------------------------------------------------------------------------------------------ # def PoissonEstimateOfGenesHit(iAxis, noUniqHittableFeatures): from numpy import", "+= 1 except: pdb.set_trace() if featureHitCountDict[featureToBeHit] == 1: featuresHitAtLeastOnce += 1 featuresHitAtLeastOnceVersusMutant.append(featuresHitAtLeastOnce) i", "atMatch != None: ATandTAPositions.append(i+1) i += 1 return [ATandTAPositions, sequence] # ------------------------------------------------------------------------------------------------ #", "dataDict # ------------------------------------------------------------------------------------------------ # # ------------------------------------------------------------------------------------------------ # def BuildCDSDictThatIsKeyedByLocusTag(cdsFeatures): # Not yet ready", "arange import xml.etree.ElementTree as ET import pdb transposonCoordToFeatureDictFileHandle = open(transposonCoordToFeatureDictFile, 'r') transposonCoordToFeatureDict =", "1 return featuresHitAtLeastOnceVersusMutant # ------------------------------------------------------------------------------------------------ # # ------------------------------------------------------------------------------------------------ # def SimulateMultiplePickings(transposonCoordToFeatureDictFile, numberOfTrials, maxMutants):", "randomCoord = int(choice(hittableTransposonCoords)) featuresToBeHit = transposonCoordToFeatureDict[randomCoord] isAnyFeatureIncludingThisCoordNotHittable = False for featureToBeHit in featuresToBeHit:", "ET import pdb transposonCoordToFeatureDictFileHandle = open(transposonCoordToFeatureDictFile, 'r') transposonCoordToFeatureDict = {} hittableFeatures = []", "data from the picking runs for calculation of mean and standard # deviation", "ans = noUniqHittableFeatures*(1-exp(-iAxis[i]/noUniqHittableFeatures)) uniqueGenesHit.append(ans) i += 1 uniqueGenesHit = array(uniqueGenesHit, float) return uniqueGenesHit", "notHittableFeatures.append(locusName) notHittableTransposonCoords.append(coordinate) else: otherFeatures.append(locusName) print(locusName) hittableFeatures = unique(hittableFeatures) hittableTransposonCoords = unique(hittableTransposonCoords) notHittableFeatures =", "import re from pdb import set_trace if format == 'genbank': sequence = ImportGenBankSequence(genomeFile)", "[] notHittableFeatures = [] otherFeatures = [] tree = ET.parse(transposonCoordToFeatureDictFile) root = tree.getroot()", "noUniqHittableFeatures): from numpy import exp, array, float uniqueGenesHit = [] i = 0", "while i <= maxMutants: randomCoord = int(choice(hittableTransposonCoords)) featuresToBeHit = transposonCoordToFeatureDict[randomCoord] isAnyFeatureIncludingThisCoordNotHittable = False", "if format == 'genbank': sequence = ImportGenBankSequence(genomeFile) elif format == 'fasta': sequence =", "len(data): # Ignore comment lines if data[i][0] != '#': dataLine = ParseCSVLine(data[i]) dataDict[dataLine[0]]", "data[i][0] != '#': dataLine = ParseCSVLine(data[i]) dataDict[dataLine[0]] = [dataLine[1], dataLine[2]] i += 1", "comment lines if data[i][0] != '#': dataLine = ParseCSVLine(data[i]) dataDict[dataLine[0]] = [dataLine[1], dataLine[2]]", "output iAxis = arange(1, maxMutants+1, 1) noUniqHittableFeatures = len(hittableFeatures) return [iAxis, averageFeatureHitCount, sdFeatureHitCount,", "i < len(iAxis): ans = noUniqHittableFeatures*(1-exp(-iAxis[i]/noUniqHittableFeatures)) uniqueGenesHit.append(ans) i += 1 uniqueGenesHit = array(uniqueGenesHit,", "1 i += 1 averageFeatureHitCount = [] sdFeatureHitCount = [] featureHitCountUpperBound = []", "importedCoordsList: coordinate = int(coord.attrib['coord']) loci = coord.findall('locus') importedCoordsKeys = transposonCoordToFeatureDict.keys() if coordinate not", "ImportGenBankSequence(genomeFile) elif format == 'fasta': sequence = ImportFastaSequence(genomeFile) ATandTAPositions = [] atRegex =", "locus.attrib['essentiality'] transposonCoordToFeatureDict[coordinate].append(locusName) if essentiality == 'Dispensable': hittableTransposonCoords.append(coordinate) hittableFeatures.append(locusName) elif essentiality == 'Essential': notHittableFeatures.append(locusName)", "1 return cdsDict # ------------------------------------------------------------------------------------------------ # # ------------------------------------------------------------------------------------------------ # def SimulatePicking(hittableFeatures, notHittableFeatures, hittableTransposonCoords,", "dataLine = ParseCSVLine(data[i]) dataDict[dataLine[0]] = [dataLine[1], dataLine[2]] i += 1 return dataDict #", "1 except: pdb.set_trace() if featureHitCountDict[featureToBeHit] == 1: featuresHitAtLeastOnce += 1 featuresHitAtLeastOnceVersusMutant.append(featuresHitAtLeastOnce) i +=", "hittableTransposonCoords, \\ transposonCoordToFeatureDict, maxMutants): from numpy.random import choice import pdb nonEssentialGeneCount = len(hittableFeatures)", "numpy import exp, array, float uniqueGenesHit = [] i = 0 while i", "headers[i] != 'sysName': headersWithoutSysName.append(headers[i]) i += 1 dataDict = {} for line in", "False: for featureToBeHit in featuresToBeHit: try: featureHitCountDict[featureToBeHit] += 1 except: pdb.set_trace() if featureHitCountDict[featureToBeHit]", "+= 1 i = 0 while i < len(collectedFeatureHitCountArray): j = 0 while", "locusTags = [] headersWithoutSysName = [] i = 0 while i < len(headers):", "i = 0 while i < len(collectedFeatureHitCountArray): averageFeatureHitCount.append(mean(collectedFeatureHitCountArray[i])) sdFeatureHitCount.append(std(collectedFeatureHitCountArray[i])) featureHitCountUpperBound.append(averageFeatureHitCount[i] + sdFeatureHitCount[i]) featureHitCountLowerBound.append(averageFeatureHitCount[i]", "0 collectedFeatureHitCountArray = [] while i < len(featuresHitAtLeastOnceTrialsArray[0]): collectedFeatureHitCountArray.append([]) i += 1 i", "featureToBeHit in notHittableFeatures: isAnyFeatureIncludingThisCoordNotHittable = True if isAnyFeatureIncludingThisCoordNotHittable == False: for featureToBeHit in", "Ignore comment lines if data[i][0] != '#': dataLine = ParseCSVLine(data[i]) dataDict[dataLine[0]] = [dataLine[1],", "i += 1 i = 0 while i < len(collectedFeatureHitCountArray): j = 0", "# ------------------------------------------------------------------------------------------------ # # ------------------------------------------------------------------------------------------------ # def PoissonEstimateOfGenesHit(iAxis, noUniqHittableFeatures): from numpy import exp,", "# # ------------------------------------------------------------------------------------------------ # def FindATandTAPositions2(genomeFile, format='genbank'): # Does the same thing as", "= 0 while j < len(featuresHitAtLeastOnceTrialsArray): collectedFeatureHitCountArray[i].append(featuresHitAtLeastOnceTrialsArray[j][i]) j += 1 i += 1", "def FindATandTAPositions2(genomeFile, format='genbank'): # Does the same thing as FindATandTAPositions but can work", "= {} while i < len(cdsFeatures): locusTag = cdsFeatures[i].tagDict['locus_tag'][0] cdsDict[locusTag] = cdsFeatures[i] i", "featuresHitAtLeastOnce = 0 featuresHitAtLeastOnceVersusMutant = [] i = 1 while i <= maxMutants:", "noUniqHittableFeatures*(1-exp(-iAxis[i]/noUniqHittableFeatures)) uniqueGenesHit.append(ans) i += 1 uniqueGenesHit = array(uniqueGenesHit, float) return uniqueGenesHit # ------------------------------------------------------------------------------------------------", "= int(choice(hittableTransposonCoords)) featuresToBeHit = transposonCoordToFeatureDict[randomCoord] isAnyFeatureIncludingThisCoordNotHittable = False for featureToBeHit in featuresToBeHit: if", "= unique(hittableTransposonCoords) notHittableFeatures = unique(notHittableFeatures) otherFeatures = unique(otherFeatures) intersection = intersect1d(hittableFeatures, notHittableFeatures) #", "prime time i = 0 cdsDict = {} while i < len(cdsFeatures): locusTag", "[] while i < len(featuresHitAtLeastOnceTrialsArray[0]): collectedFeatureHitCountArray.append([]) i += 1 i = 0 while", "+= 1 i += 1 averageFeatureHitCount = [] sdFeatureHitCount = [] featureHitCountUpperBound =", "notHittableFeatures: isAnyFeatureIncludingThisCoordNotHittable = True if isAnyFeatureIncludingThisCoordNotHittable == False: for featureToBeHit in featuresToBeHit: try:", "\\ # so you only need one file format import re from pdb", "the number of unique features hit at each pick # from the trials", "but can work with a GenBank or a Fasta file, \\ # so", "number of mutants picked) for the output iAxis = arange(1, maxMutants+1, 1) noUniqHittableFeatures", "len(cdsFeatures): locusTag = cdsFeatures[i].tagDict['locus_tag'][0] cdsDict[locusTag] = cdsFeatures[i] i += 1 return cdsDict #", "otherFeatures = [] tree = ET.parse(transposonCoordToFeatureDictFile) root = tree.getroot() importedCoordsList = root.findall('coord') for", "1 averageFeatureHitCount = [] sdFeatureHitCount = [] featureHitCountUpperBound = [] featureHitCountLowerBound = []", "thing as FindATandTAPositions but can work with a GenBank or a Fasta file,", "Prepare an x axis (the number of mutants picked) for the output iAxis", "i < len(collectedFeatureHitCountArray): j = 0 while j < len(featuresHitAtLeastOnceTrialsArray): collectedFeatureHitCountArray[i].append(featuresHitAtLeastOnceTrialsArray[j][i]) j +=", "import exp, array, float uniqueGenesHit = [] i = 0 while i <", "float uniqueGenesHit = [] i = 0 while i < len(iAxis): ans =", "from scipy import unique, intersect1d from numpy import mean, std, arange import xml.etree.ElementTree", "you only need one file format import re from pdb import set_trace if", "= [dataLine[1], dataLine[2]] i += 1 return dataDict # ------------------------------------------------------------------------------------------------ # # ------------------------------------------------------------------------------------------------", "of number of hits picked i = 0 collectedFeatureHitCountArray = [] while i", "isAnyFeatureIncludingThisCoordNotHittable = True if isAnyFeatureIncludingThisCoordNotHittable == False: for featureToBeHit in featuresToBeHit: try: featureHitCountDict[featureToBeHit]", "= 0 cdsDict = {} while i < len(cdsFeatures): locusTag = cdsFeatures[i].tagDict['locus_tag'][0] cdsDict[locusTag]", "ParseCSVLine(data[i]) dataDict[dataLine[0]] = [dataLine[1], dataLine[2]] i += 1 return dataDict # ------------------------------------------------------------------------------------------------ #", "1 uniqueGenesHit = array(uniqueGenesHit, float) return uniqueGenesHit # ------------------------------------------------------------------------------------------------ # # ------------------------------------------------------------------------------------------------ #", "'Dispensable': hittableTransposonCoords.append(coordinate) hittableFeatures.append(locusName) elif essentiality == 'Essential': notHittableFeatures.append(locusName) notHittableTransposonCoords.append(coordinate) else: otherFeatures.append(locusName) print(locusName) hittableFeatures", "essentiality from .utils import ParseCSVLine fileHandle = open(fileName, 'r') data = fileHandle.readlines() dataDict", "# set_trace() i = 0 while i < len(sequence) - 1: atMatch =", "sdFeatureHitCount.append(std(collectedFeatureHitCountArray[i])) featureHitCountUpperBound.append(averageFeatureHitCount[i] + sdFeatureHitCount[i]) featureHitCountLowerBound.append(averageFeatureHitCount[i] - sdFeatureHitCount[i]) i += 1 # Prepare an", "Does the same thing as FindATandTAPositions but can work with a GenBank or", "i = 0 while i < len(data): # Ignore comment lines if data[i][0]", "yet ready for prime time # Build essentiality data dict that is keyed", "in featuresToBeHit: if featureToBeHit in notHittableFeatures: isAnyFeatureIncludingThisCoordNotHittable = True if isAnyFeatureIncludingThisCoordNotHittable == False:", "= locus.attrib['essentiality'] transposonCoordToFeatureDict[coordinate].append(locusName) if essentiality == 'Dispensable': hittableTransposonCoords.append(coordinate) hittableFeatures.append(locusName) elif essentiality == 'Essential':", "import choice import pdb nonEssentialGeneCount = len(hittableFeatures) featureHitCountDict = {} for feature in", "only need one file format import re from pdb import set_trace if format", "uniqueGenesHit = array(uniqueGenesHit, float) return uniqueGenesHit # ------------------------------------------------------------------------------------------------ # # ------------------------------------------------------------------------------------------------ # def", "[] i = 0 while i < numberOfTrials: featuresHitAtLeastOnceVersusMutant = \\ SimulatePicking(hittableFeatures, notHittableFeatures,", "GenBank or a Fasta file, \\ # so you only need one file", "i < len(collectedFeatureHitCountArray): averageFeatureHitCount.append(mean(collectedFeatureHitCountArray[i])) sdFeatureHitCount.append(std(collectedFeatureHitCountArray[i])) featureHitCountUpperBound.append(averageFeatureHitCount[i] + sdFeatureHitCount[i]) featureHitCountLowerBound.append(averageFeatureHitCount[i] - sdFeatureHitCount[i]) i +=", "pdb transposonCoordToFeatureDictFileHandle = open(transposonCoordToFeatureDictFile, 'r') transposonCoordToFeatureDict = {} hittableFeatures = [] hittableTransposonCoords =", "in headersWithoutSysName: dataDict[line['sysName']][header] = line[header] return dataDict # ------------------------------------------------------------------------------------------------ # # ------------------------------------------------------------------------------------------------ #", "# ------------------------------------------------------------------------------------------------ # def BuildCDSDictThatIsKeyedByLocusTag(cdsFeatures): # Not yet ready for prime time i", "# # ------------------------------------------------------------------------------------------------ # def SimulateMultiplePickings(transposonCoordToFeatureDictFile, numberOfTrials, maxMutants): from scipy import unique, intersect1d", "loci = coord.findall('locus') importedCoordsKeys = transposonCoordToFeatureDict.keys() if coordinate not in importedCoordsKeys: transposonCoordToFeatureDict[coordinate] =", "unique(otherFeatures) intersection = intersect1d(hittableFeatures, notHittableFeatures) # Simulate a number of picking runs featuresHitAtLeastOnceTrialsArray", "\\ SimulatePicking(hittableFeatures, notHittableFeatures, hittableTransposonCoords, \\ transposonCoordToFeatureDict, maxMutants) featuresHitAtLeastOnceTrialsArray.append(featuresHitAtLeastOnceVersusMutant) i += 1 # Collect", "j = 0 while j < len(featuresHitAtLeastOnceTrialsArray): collectedFeatureHitCountArray[i].append(featuresHitAtLeastOnceTrialsArray[j][i]) j += 1 i +=", "= intersect1d(hittableFeatures, notHittableFeatures) # Simulate a number of picking runs featuresHitAtLeastOnceTrialsArray = []", "= [] while i < len(featuresHitAtLeastOnceTrialsArray[0]): collectedFeatureHitCountArray.append([]) i += 1 i = 0", "- sdFeatureHitCount[i]) i += 1 # Prepare an x axis (the number of", "lines if data[i][0] != '#': dataLine = ParseCSVLine(data[i]) dataDict[dataLine[0]] = [dataLine[1], dataLine[2]] i", "= [] hittableTransposonCoords = [] notHittableTransposonCoords = [] notHittableFeatures = [] otherFeatures =", "deviation of number of hits picked i = 0 collectedFeatureHitCountArray = [] while", "= [] for locus in loci: locusName = locus.attrib['locus'] essentiality = locus.attrib['essentiality'] transposonCoordToFeatureDict[coordinate].append(locusName)", "format='genbank'): # Does the same thing as FindATandTAPositions but can work with a", "of hits picked i = 0 collectedFeatureHitCountArray = [] while i < len(featuresHitAtLeastOnceTrialsArray[0]):", "featureHitCountDict[feature] = 0 featuresHitAtLeastOnce = 0 featuresHitAtLeastOnceVersusMutant = [] i = 1 while", "return featuresHitAtLeastOnceVersusMutant # ------------------------------------------------------------------------------------------------ # # ------------------------------------------------------------------------------------------------ # def SimulateMultiplePickings(transposonCoordToFeatureDictFile, numberOfTrials, maxMutants): from", "fileHandle = open(fileName, 'r') data = fileHandle.readlines() dataDict = {} i = 0", "featureHitCountUpperBound.append(averageFeatureHitCount[i] + sdFeatureHitCount[i]) featureHitCountLowerBound.append(averageFeatureHitCount[i] - sdFeatureHitCount[i]) i += 1 # Prepare an x", "featuresHitAtLeastOnceVersusMutant.append(featuresHitAtLeastOnce) i += 1 return featuresHitAtLeastOnceVersusMutant # ------------------------------------------------------------------------------------------------ # # ------------------------------------------------------------------------------------------------ # def", "essentialityDict = {} locusTags = [] headersWithoutSysName = [] i = 0 while", "------------------------------------------------------------------------------------------------ # def BuildEssentialityDictThatIsKeyedByLocusTag(dataArray): # Not yet ready for prime time # Build", "the trials i = 0 while i < len(collectedFeatureHitCountArray): averageFeatureHitCount.append(mean(collectedFeatureHitCountArray[i])) sdFeatureHitCount.append(std(collectedFeatureHitCountArray[i])) featureHitCountUpperBound.append(averageFeatureHitCount[i] +", "for header in headersWithoutSysName: dataDict[line['sysName']][header] = line[header] return dataDict # ------------------------------------------------------------------------------------------------ # #", "< len(featuresHitAtLeastOnceTrialsArray[0]): collectedFeatureHitCountArray.append([]) i += 1 i = 0 while i < len(collectedFeatureHitCountArray):", "that data is in the format: locus tag, gene name, essentiality from .utils", "1) noUniqHittableFeatures = len(hittableFeatures) return [iAxis, averageFeatureHitCount, sdFeatureHitCount, featureHitCountUpperBound, \\ featureHitCountLowerBound, noUniqHittableFeatures ]", "i = 0 while i < numberOfTrials: featuresHitAtLeastOnceVersusMutant = \\ SimulatePicking(hittableFeatures, notHittableFeatures, hittableTransposonCoords,", "while i < len(featuresHitAtLeastOnceTrialsArray[0]): collectedFeatureHitCountArray.append([]) i += 1 i = 0 while i", "= transposonCoordToFeatureDict.keys() if coordinate not in importedCoordsKeys: transposonCoordToFeatureDict[coordinate] = [] for locus in", "of picking runs featuresHitAtLeastOnceTrialsArray = [] i = 0 while i < numberOfTrials:", "isAnyFeatureIncludingThisCoordNotHittable == False: for featureToBeHit in featuresToBeHit: try: featureHitCountDict[featureToBeHit] += 1 except: pdb.set_trace()", "Import a defined format essentiality data file # Assumes that data is in", "x axis (the number of mutants picked) for the output iAxis = arange(1,", "importedCoordsKeys: transposonCoordToFeatureDict[coordinate] = [] for locus in loci: locusName = locus.attrib['locus'] essentiality =", "axis (the number of mutants picked) for the output iAxis = arange(1, maxMutants+1,", "True if isAnyFeatureIncludingThisCoordNotHittable == False: for featureToBeHit in featuresToBeHit: try: featureHitCountDict[featureToBeHit] += 1", "# # ------------------------------------------------------------------------------------------------ # def SimulatePicking(hittableFeatures, notHittableFeatures, hittableTransposonCoords, \\ transposonCoordToFeatureDict, maxMutants): from numpy.random", "= unique(otherFeatures) intersection = intersect1d(hittableFeatures, notHittableFeatures) # Simulate a number of picking runs", "j += 1 i += 1 averageFeatureHitCount = [] sdFeatureHitCount = [] featureHitCountUpperBound", "scipy import unique, intersect1d from numpy import mean, std, arange import xml.etree.ElementTree as", "Assumes that data is in the format: locus tag, gene name, essentiality from", "while i < len(collectedFeatureHitCountArray): averageFeatureHitCount.append(mean(collectedFeatureHitCountArray[i])) sdFeatureHitCount.append(std(collectedFeatureHitCountArray[i])) featureHitCountUpperBound.append(averageFeatureHitCount[i] + sdFeatureHitCount[i]) featureHitCountLowerBound.append(averageFeatureHitCount[i] - sdFeatureHitCount[i]) i", "# def SimulatePicking(hittableFeatures, notHittableFeatures, hittableTransposonCoords, \\ transposonCoordToFeatureDict, maxMutants): from numpy.random import choice import", "time # Import a defined format essentiality data file # Assumes that data", "import mean, std, arange import xml.etree.ElementTree as ET import pdb transposonCoordToFeatureDictFileHandle = open(transposonCoordToFeatureDictFile,", "return uniqueGenesHit # ------------------------------------------------------------------------------------------------ # # ------------------------------------------------------------------------------------------------ # def FindATandTAPositions2(genomeFile, format='genbank'): # Does", "i = 0 while i < len(iAxis): ans = noUniqHittableFeatures*(1-exp(-iAxis[i]/noUniqHittableFeatures)) uniqueGenesHit.append(ans) i +=", "= {} hittableFeatures = [] hittableTransposonCoords = [] notHittableTransposonCoords = [] notHittableFeatures =", "SimulatePicking(hittableFeatures, notHittableFeatures, hittableTransposonCoords, \\ transposonCoordToFeatureDict, maxMutants) featuresHitAtLeastOnceTrialsArray.append(featuresHitAtLeastOnceVersusMutant) i += 1 # Collect together", "format == 'fasta': sequence = ImportFastaSequence(genomeFile) ATandTAPositions = [] atRegex = re.compile('(at|ta)', re.IGNORECASE)", "by locus tag essentialityDict = {} locusTags = [] headersWithoutSysName = [] i", "# Calculate the mean and standard deviation of the number of unique features", "featuresToBeHit = transposonCoordToFeatureDict[randomCoord] isAnyFeatureIncludingThisCoordNotHittable = False for featureToBeHit in featuresToBeHit: if featureToBeHit in", "ready for prime time # Import a defined format essentiality data file #", "sdFeatureHitCount, featureHitCountUpperBound, \\ featureHitCountLowerBound, noUniqHittableFeatures ] # ------------------------------------------------------------------------------------------------ # # ------------------------------------------------------------------------------------------------ # def", "'r') transposonCoordToFeatureDict = {} hittableFeatures = [] hittableTransposonCoords = [] notHittableTransposonCoords = []", "1 return dataDict # ------------------------------------------------------------------------------------------------ # # ------------------------------------------------------------------------------------------------ # def BuildEssentialityDictThatIsKeyedByLocusTag(dataArray): # Not", "transposonCoordToFeatureDictFileHandle = open(transposonCoordToFeatureDictFile, 'r') transposonCoordToFeatureDict = {} hittableFeatures = [] hittableTransposonCoords = []", "of mutants picked) for the output iAxis = arange(1, maxMutants+1, 1) noUniqHittableFeatures =", "calculation of mean and standard # deviation of number of hits picked i", "{} locusTags = [] headersWithoutSysName = [] i = 0 while i <", "tag, gene name, essentiality from .utils import ParseCSVLine fileHandle = open(fileName, 'r') data", "featuresToBeHit: if featureToBeHit in notHittableFeatures: isAnyFeatureIncludingThisCoordNotHittable = True if isAnyFeatureIncludingThisCoordNotHittable == False: for", "# def ImportEssentialityData(fileName): # Not yet ready for prime time # Import a", "0 featuresHitAtLeastOnceVersusMutant = [] i = 1 while i <= maxMutants: randomCoord =", "+= 1 featuresHitAtLeastOnceVersusMutant.append(featuresHitAtLeastOnce) i += 1 return featuresHitAtLeastOnceVersusMutant # ------------------------------------------------------------------------------------------------ # # ------------------------------------------------------------------------------------------------", "= locus.attrib['locus'] essentiality = locus.attrib['essentiality'] transposonCoordToFeatureDict[coordinate].append(locusName) if essentiality == 'Dispensable': hittableTransposonCoords.append(coordinate) hittableFeatures.append(locusName) elif", "picked) for the output iAxis = arange(1, maxMutants+1, 1) noUniqHittableFeatures = len(hittableFeatures) return", "or a Fasta file, \\ # so you only need one file format", "dataArray: dataDict[line['sysName']] = {} for header in headersWithoutSysName: dataDict[line['sysName']][header] = line[header] return dataDict", "\\ featureHitCountLowerBound, noUniqHittableFeatures ] # ------------------------------------------------------------------------------------------------ # # ------------------------------------------------------------------------------------------------ # def PoissonEstimateOfGenesHit(iAxis, noUniqHittableFeatures):", "array, float uniqueGenesHit = [] i = 0 while i < len(iAxis): ans", "< len(headers): if headers[i] != 'sysName': headersWithoutSysName.append(headers[i]) i += 1 dataDict = {}", "hittableTransposonCoords = [] notHittableTransposonCoords = [] notHittableFeatures = [] otherFeatures = [] tree", "BuildEssentialityDictThatIsKeyedByLocusTag(dataArray): # Not yet ready for prime time # Build essentiality data dict", "if essentiality == 'Dispensable': hittableTransposonCoords.append(coordinate) hittableFeatures.append(locusName) elif essentiality == 'Essential': notHittableFeatures.append(locusName) notHittableTransposonCoords.append(coordinate) else:", "hittableFeatures: featureHitCountDict[feature] = 0 featuresHitAtLeastOnce = 0 featuresHitAtLeastOnceVersusMutant = [] i = 1", "1: featuresHitAtLeastOnce += 1 featuresHitAtLeastOnceVersusMutant.append(featuresHitAtLeastOnce) i += 1 return featuresHitAtLeastOnceVersusMutant # ------------------------------------------------------------------------------------------------ #", "[] # Calculate the mean and standard deviation of the number of unique", "picking runs featuresHitAtLeastOnceTrialsArray = [] i = 0 while i < numberOfTrials: featuresHitAtLeastOnceVersusMutant", "atMatch = atRegex.match(sequence[i:i+2]) if atMatch != None: ATandTAPositions.append(i+1) i += 1 return [ATandTAPositions,", "= {} for line in dataArray: dataDict[line['sysName']] = {} for header in headersWithoutSysName:" ]
[ "with pytest.raises(NotSupportedError): comment(text, post) with pytest.raises(NotSupportedError): fake_comment = {'id': '1', 'post_id': '2'} uncomment(fake_comment)", "def test_like(logged, post, cassette): if logged: response = like(post) assert response == {'status':", "logged: response = like(post) assert response == {'status': 'ok', 'post_id': post['id']} response =", "== {'status': 'ok', 'post_id': post['id']} else: with pytest.raises(NotSupportedError): like(post) with pytest.raises(NotSupportedError): unlike(post) def", "pytest.raises(NotSupportedError): fake_comment = {'id': '1', 'post_id': '2'} uncomment(fake_comment) def test_save(logged, post, cassette): if", "== {'result': 'following', 'status': 'ok', 'user_id': user['id']} response = unfollow(user) assert response ==", "test_like(logged, post, cassette): if logged: response = like(post) assert response == {'status': 'ok',", "assert response == {'status': 'ok', 'post_id': post['id']} else: with pytest.raises(NotSupportedError): comment(text, post) with", "'2'} uncomment(fake_comment) def test_save(logged, post, cassette): if logged: response = save(post) assert response", "assert response == {'status': 'ok', 'post_id': post['id']} response = unlike(post) assert response ==", "pytest.raises(NotSupportedError): like(post) with pytest.raises(NotSupportedError): unlike(post) def test_comment(logged, post, cassette): text = 'awesome!' if", "assert response == {'status': 'ok', 'user_id': user['id']} else: with pytest.raises(NotSupportedError): follow(user) with pytest.raises(NotSupportedError):", "import pytest from onegram.exceptions import NotSupportedError from onegram import follow, unfollow from onegram", "from onegram.exceptions import NotSupportedError from onegram import follow, unfollow from onegram import like,", "'ok', 'user_id': user['id']} response = unfollow(user) assert response == {'status': 'ok', 'user_id': user['id']}", "'following', 'status': 'ok', 'user_id': user['id']} response = unfollow(user) assert response == {'status': 'ok',", "response = like(post) assert response == {'status': 'ok', 'post_id': post['id']} response = unlike(post)", "with pytest.raises(NotSupportedError): unfollow(user) def test_like(logged, post, cassette): if logged: response = like(post) assert", "assert response == {'status': 'ok', 'post_id': post['id']} else: with pytest.raises(NotSupportedError): save(post) with pytest.raises(NotSupportedError):", "save, unsave def test_follow(logged, user, cassette): if logged: response = follow(user) assert response", "else: with pytest.raises(NotSupportedError): comment(text, post) with pytest.raises(NotSupportedError): fake_comment = {'id': '1', 'post_id': '2'}", "= unlike(post) assert response == {'status': 'ok', 'post_id': post['id']} else: with pytest.raises(NotSupportedError): like(post)", "{'id': '1', 'post_id': '2'} uncomment(fake_comment) def test_save(logged, post, cassette): if logged: response =", "onegram import like, unlike from onegram import comment, uncomment from onegram import save,", "response == {'status': 'ok', 'post_id': post['id']} else: with pytest.raises(NotSupportedError): comment(text, post) with pytest.raises(NotSupportedError):", "== {'status': 'ok', 'post_id': post['id']} else: with pytest.raises(NotSupportedError): comment(text, post) with pytest.raises(NotSupportedError): fake_comment", "cassette): if logged: response = follow(user) assert response == {'result': 'following', 'status': 'ok',", "cassette): if logged: response = like(post) assert response == {'status': 'ok', 'post_id': post['id']}", "cassette): text = 'awesome!' if logged: commentary = comment(text, post) assert commentary['id'] assert", "'awesome!' if logged: commentary = comment(text, post) assert commentary['id'] assert commentary['text'] == text", "post) with pytest.raises(NotSupportedError): fake_comment = {'id': '1', 'post_id': '2'} uncomment(fake_comment) def test_save(logged, post,", "like(post) with pytest.raises(NotSupportedError): unlike(post) def test_comment(logged, post, cassette): text = 'awesome!' if logged:", "'post_id': post['id']} response = unsave(post) assert response == {'status': 'ok', 'post_id': post['id']} else:", "cassette): if logged: response = save(post) assert response == {'status': 'ok', 'post_id': post['id']}", "like(post) assert response == {'status': 'ok', 'post_id': post['id']} response = unlike(post) assert response", "response == {'status': 'ok', 'post_id': post['id']} else: with pytest.raises(NotSupportedError): save(post) with pytest.raises(NotSupportedError): unsave(post)", "'ok', 'post_id': post['id']} response = unsave(post) assert response == {'status': 'ok', 'post_id': post['id']}", "assert commentary['status'] == 'ok' assert commentary['post_id'] == post['id'] response = uncomment(commentary) assert response", "test_follow(logged, user, cassette): if logged: response = follow(user) assert response == {'result': 'following',", "save(post) assert response == {'status': 'ok', 'post_id': post['id']} response = unsave(post) assert response", "logged: response = save(post) assert response == {'status': 'ok', 'post_id': post['id']} response =", "'1', 'post_id': '2'} uncomment(fake_comment) def test_save(logged, post, cassette): if logged: response = save(post)", "comment(text, post) with pytest.raises(NotSupportedError): fake_comment = {'id': '1', 'post_id': '2'} uncomment(fake_comment) def test_save(logged,", "== 'ok' assert commentary['post_id'] == post['id'] response = uncomment(commentary) assert response == {'status':", "onegram import save, unsave def test_follow(logged, user, cassette): if logged: response = follow(user)", "response = follow(user) assert response == {'result': 'following', 'status': 'ok', 'user_id': user['id']} response", "= save(post) assert response == {'status': 'ok', 'post_id': post['id']} response = unsave(post) assert", "unsave(post) assert response == {'status': 'ok', 'post_id': post['id']} else: with pytest.raises(NotSupportedError): save(post) with", "assert commentary['text'] == text assert commentary['status'] == 'ok' assert commentary['post_id'] == post['id'] response", "{'status': 'ok', 'post_id': post['id']} else: with pytest.raises(NotSupportedError): comment(text, post) with pytest.raises(NotSupportedError): fake_comment =", "follow(user) with pytest.raises(NotSupportedError): unfollow(user) def test_like(logged, post, cassette): if logged: response = like(post)", "commentary = comment(text, post) assert commentary['id'] assert commentary['text'] == text assert commentary['status'] ==", "import follow, unfollow from onegram import like, unlike from onegram import comment, uncomment", "onegram import follow, unfollow from onegram import like, unlike from onegram import comment,", "response = unfollow(user) assert response == {'status': 'ok', 'user_id': user['id']} else: with pytest.raises(NotSupportedError):", "user, cassette): if logged: response = follow(user) assert response == {'result': 'following', 'status':", "unlike(post) def test_comment(logged, post, cassette): text = 'awesome!' if logged: commentary = comment(text,", "comment, uncomment from onegram import save, unsave def test_follow(logged, user, cassette): if logged:", "post['id']} else: with pytest.raises(NotSupportedError): like(post) with pytest.raises(NotSupportedError): unlike(post) def test_comment(logged, post, cassette): text", "commentary['id'] assert commentary['text'] == text assert commentary['status'] == 'ok' assert commentary['post_id'] == post['id']", "commentary['text'] == text assert commentary['status'] == 'ok' assert commentary['post_id'] == post['id'] response =", "'post_id': post['id']} else: with pytest.raises(NotSupportedError): comment(text, post) with pytest.raises(NotSupportedError): fake_comment = {'id': '1',", "== text assert commentary['status'] == 'ok' assert commentary['post_id'] == post['id'] response = uncomment(commentary)", "with pytest.raises(NotSupportedError): fake_comment = {'id': '1', 'post_id': '2'} uncomment(fake_comment) def test_save(logged, post, cassette):", "response == {'status': 'ok', 'post_id': post['id']} else: with pytest.raises(NotSupportedError): like(post) with pytest.raises(NotSupportedError): unlike(post)", "'ok' assert commentary['post_id'] == post['id'] response = uncomment(commentary) assert response == {'status': 'ok',", "post, cassette): text = 'awesome!' if logged: commentary = comment(text, post) assert commentary['id']", "post['id'] response = uncomment(commentary) assert response == {'status': 'ok', 'post_id': post['id']} else: with", "user['id']} response = unfollow(user) assert response == {'status': 'ok', 'user_id': user['id']} else: with", "{'status': 'ok', 'post_id': post['id']} response = unlike(post) assert response == {'status': 'ok', 'post_id':", "pytest.raises(NotSupportedError): unlike(post) def test_comment(logged, post, cassette): text = 'awesome!' if logged: commentary =", "def test_save(logged, post, cassette): if logged: response = save(post) assert response == {'status':", "assert commentary['post_id'] == post['id'] response = uncomment(commentary) assert response == {'status': 'ok', 'post_id':", "uncomment(fake_comment) def test_save(logged, post, cassette): if logged: response = save(post) assert response ==", "{'status': 'ok', 'post_id': post['id']} response = unsave(post) assert response == {'status': 'ok', 'post_id':", "with pytest.raises(NotSupportedError): like(post) with pytest.raises(NotSupportedError): unlike(post) def test_comment(logged, post, cassette): text = 'awesome!'", "response = save(post) assert response == {'status': 'ok', 'post_id': post['id']} response = unsave(post)", "from onegram import follow, unfollow from onegram import like, unlike from onegram import", "== post['id'] response = uncomment(commentary) assert response == {'status': 'ok', 'post_id': post['id']} else:", "onegram import comment, uncomment from onegram import save, unsave def test_follow(logged, user, cassette):", "uncomment(commentary) assert response == {'status': 'ok', 'post_id': post['id']} else: with pytest.raises(NotSupportedError): comment(text, post)", "post, cassette): if logged: response = save(post) assert response == {'status': 'ok', 'post_id':", "from onegram import like, unlike from onegram import comment, uncomment from onegram import", "def test_comment(logged, post, cassette): text = 'awesome!' if logged: commentary = comment(text, post)", "text assert commentary['status'] == 'ok' assert commentary['post_id'] == post['id'] response = uncomment(commentary) assert", "'post_id': '2'} uncomment(fake_comment) def test_save(logged, post, cassette): if logged: response = save(post) assert", "from onegram import save, unsave def test_follow(logged, user, cassette): if logged: response =", "uncomment from onegram import save, unsave def test_follow(logged, user, cassette): if logged: response", "== {'status': 'ok', 'user_id': user['id']} else: with pytest.raises(NotSupportedError): follow(user) with pytest.raises(NotSupportedError): unfollow(user) def", "{'result': 'following', 'status': 'ok', 'user_id': user['id']} response = unfollow(user) assert response == {'status':", "commentary['post_id'] == post['id'] response = uncomment(commentary) assert response == {'status': 'ok', 'post_id': post['id']}", "import save, unsave def test_follow(logged, user, cassette): if logged: response = follow(user) assert", "{'status': 'ok', 'post_id': post['id']} else: with pytest.raises(NotSupportedError): like(post) with pytest.raises(NotSupportedError): unlike(post) def test_comment(logged,", "'user_id': user['id']} else: with pytest.raises(NotSupportedError): follow(user) with pytest.raises(NotSupportedError): unfollow(user) def test_like(logged, post, cassette):", "'post_id': post['id']} response = unlike(post) assert response == {'status': 'ok', 'post_id': post['id']} else:", "if logged: response = save(post) assert response == {'status': 'ok', 'post_id': post['id']} response", "unfollow(user) assert response == {'status': 'ok', 'user_id': user['id']} else: with pytest.raises(NotSupportedError): follow(user) with", "logged: commentary = comment(text, post) assert commentary['id'] assert commentary['text'] == text assert commentary['status']", "unsave def test_follow(logged, user, cassette): if logged: response = follow(user) assert response ==", "logged: response = follow(user) assert response == {'result': 'following', 'status': 'ok', 'user_id': user['id']}", "comment(text, post) assert commentary['id'] assert commentary['text'] == text assert commentary['status'] == 'ok' assert", "test_save(logged, post, cassette): if logged: response = save(post) assert response == {'status': 'ok',", "post, cassette): if logged: response = like(post) assert response == {'status': 'ok', 'post_id':", "import NotSupportedError from onegram import follow, unfollow from onegram import like, unlike from", "response == {'result': 'following', 'status': 'ok', 'user_id': user['id']} response = unfollow(user) assert response", "import comment, uncomment from onegram import save, unsave def test_follow(logged, user, cassette): if", "assert response == {'status': 'ok', 'post_id': post['id']} response = unsave(post) assert response ==", "def test_follow(logged, user, cassette): if logged: response = follow(user) assert response == {'result':", "= unsave(post) assert response == {'status': 'ok', 'post_id': post['id']} else: with pytest.raises(NotSupportedError): save(post)", "like, unlike from onegram import comment, uncomment from onegram import save, unsave def", "with pytest.raises(NotSupportedError): follow(user) with pytest.raises(NotSupportedError): unfollow(user) def test_like(logged, post, cassette): if logged: response", "= 'awesome!' if logged: commentary = comment(text, post) assert commentary['id'] assert commentary['text'] ==", "'ok', 'post_id': post['id']} response = unlike(post) assert response == {'status': 'ok', 'post_id': post['id']}", "assert response == {'result': 'following', 'status': 'ok', 'user_id': user['id']} response = unfollow(user) assert", "NotSupportedError from onegram import follow, unfollow from onegram import like, unlike from onegram", "response == {'status': 'ok', 'post_id': post['id']} response = unsave(post) assert response == {'status':", "else: with pytest.raises(NotSupportedError): follow(user) with pytest.raises(NotSupportedError): unfollow(user) def test_like(logged, post, cassette): if logged:", "pytest.raises(NotSupportedError): comment(text, post) with pytest.raises(NotSupportedError): fake_comment = {'id': '1', 'post_id': '2'} uncomment(fake_comment) def", "assert commentary['id'] assert commentary['text'] == text assert commentary['status'] == 'ok' assert commentary['post_id'] ==", "post['id']} else: with pytest.raises(NotSupportedError): comment(text, post) with pytest.raises(NotSupportedError): fake_comment = {'id': '1', 'post_id':", "pytest.raises(NotSupportedError): unfollow(user) def test_like(logged, post, cassette): if logged: response = like(post) assert response", "'user_id': user['id']} response = unfollow(user) assert response == {'status': 'ok', 'user_id': user['id']} else:", "unlike(post) assert response == {'status': 'ok', 'post_id': post['id']} else: with pytest.raises(NotSupportedError): like(post) with", "user['id']} else: with pytest.raises(NotSupportedError): follow(user) with pytest.raises(NotSupportedError): unfollow(user) def test_like(logged, post, cassette): if", "commentary['status'] == 'ok' assert commentary['post_id'] == post['id'] response = uncomment(commentary) assert response ==", "follow, unfollow from onegram import like, unlike from onegram import comment, uncomment from", "post['id']} response = unsave(post) assert response == {'status': 'ok', 'post_id': post['id']} else: with", "= like(post) assert response == {'status': 'ok', 'post_id': post['id']} response = unlike(post) assert", "unlike from onegram import comment, uncomment from onegram import save, unsave def test_follow(logged,", "'post_id': post['id']} else: with pytest.raises(NotSupportedError): like(post) with pytest.raises(NotSupportedError): unlike(post) def test_comment(logged, post, cassette):", "else: with pytest.raises(NotSupportedError): like(post) with pytest.raises(NotSupportedError): unlike(post) def test_comment(logged, post, cassette): text =", "post['id']} response = unlike(post) assert response == {'status': 'ok', 'post_id': post['id']} else: with", "unfollow(user) def test_like(logged, post, cassette): if logged: response = like(post) assert response ==", "if logged: response = like(post) assert response == {'status': 'ok', 'post_id': post['id']} response", "assert response == {'status': 'ok', 'post_id': post['id']} else: with pytest.raises(NotSupportedError): like(post) with pytest.raises(NotSupportedError):", "post) assert commentary['id'] assert commentary['text'] == text assert commentary['status'] == 'ok' assert commentary['post_id']", "response == {'status': 'ok', 'post_id': post['id']} response = unlike(post) assert response == {'status':", "import like, unlike from onegram import comment, uncomment from onegram import save, unsave", "'ok', 'post_id': post['id']} else: with pytest.raises(NotSupportedError): comment(text, post) with pytest.raises(NotSupportedError): fake_comment = {'id':", "= comment(text, post) assert commentary['id'] assert commentary['text'] == text assert commentary['status'] == 'ok'", "== {'status': 'ok', 'post_id': post['id']} response = unlike(post) assert response == {'status': 'ok',", "'status': 'ok', 'user_id': user['id']} response = unfollow(user) assert response == {'status': 'ok', 'user_id':", "from onegram import comment, uncomment from onegram import save, unsave def test_follow(logged, user,", "follow(user) assert response == {'result': 'following', 'status': 'ok', 'user_id': user['id']} response = unfollow(user)", "onegram.exceptions import NotSupportedError from onegram import follow, unfollow from onegram import like, unlike", "unfollow from onegram import like, unlike from onegram import comment, uncomment from onegram", "== {'status': 'ok', 'post_id': post['id']} response = unsave(post) assert response == {'status': 'ok',", "fake_comment = {'id': '1', 'post_id': '2'} uncomment(fake_comment) def test_save(logged, post, cassette): if logged:", "pytest.raises(NotSupportedError): follow(user) with pytest.raises(NotSupportedError): unfollow(user) def test_like(logged, post, cassette): if logged: response =", "= follow(user) assert response == {'result': 'following', 'status': 'ok', 'user_id': user['id']} response =", "if logged: commentary = comment(text, post) assert commentary['id'] assert commentary['text'] == text assert", "= unfollow(user) assert response == {'status': 'ok', 'user_id': user['id']} else: with pytest.raises(NotSupportedError): follow(user)", "response = unsave(post) assert response == {'status': 'ok', 'post_id': post['id']} else: with pytest.raises(NotSupportedError):", "test_comment(logged, post, cassette): text = 'awesome!' if logged: commentary = comment(text, post) assert", "pytest from onegram.exceptions import NotSupportedError from onegram import follow, unfollow from onegram import", "response = uncomment(commentary) assert response == {'status': 'ok', 'post_id': post['id']} else: with pytest.raises(NotSupportedError):", "'ok', 'user_id': user['id']} else: with pytest.raises(NotSupportedError): follow(user) with pytest.raises(NotSupportedError): unfollow(user) def test_like(logged, post,", "if logged: response = follow(user) assert response == {'result': 'following', 'status': 'ok', 'user_id':", "'ok', 'post_id': post['id']} else: with pytest.raises(NotSupportedError): like(post) with pytest.raises(NotSupportedError): unlike(post) def test_comment(logged, post,", "text = 'awesome!' if logged: commentary = comment(text, post) assert commentary['id'] assert commentary['text']", "= {'id': '1', 'post_id': '2'} uncomment(fake_comment) def test_save(logged, post, cassette): if logged: response", "{'status': 'ok', 'user_id': user['id']} else: with pytest.raises(NotSupportedError): follow(user) with pytest.raises(NotSupportedError): unfollow(user) def test_like(logged,", "response == {'status': 'ok', 'user_id': user['id']} else: with pytest.raises(NotSupportedError): follow(user) with pytest.raises(NotSupportedError): unfollow(user)", "with pytest.raises(NotSupportedError): unlike(post) def test_comment(logged, post, cassette): text = 'awesome!' if logged: commentary", "= uncomment(commentary) assert response == {'status': 'ok', 'post_id': post['id']} else: with pytest.raises(NotSupportedError): comment(text,", "response = unlike(post) assert response == {'status': 'ok', 'post_id': post['id']} else: with pytest.raises(NotSupportedError):" ]
[ "functionality so you'll # have to keep this somewhere safe and associated with", "And let's now try to create a transaction on that profile. resp =", "# Store the profile id somewhere so that we can later retrieve it.", "profile for one of our users. tree = cim_api.create_profile( card_number=u\"4111111111111111\", expiration_date=u\"2008-07\", customer_id=u\"test_account\") #", "cim_api = cim.Api(u'LOGIN', u'TRANS_KEY', is_test=True, delimiter=u\",\", encapsulator=u\"\") # We create a profile for", "created using the profile_id tree = cim_api.get_profile(customer_profile_id=profile_id) pprint(tree) # And let's now try", "profile_id tree = cim_api.get_profile(customer_profile_id=profile_id) pprint(tree) # And let's now try to create a", "create a profile for one of our users. tree = cim_api.create_profile( card_number=u\"4111111111111111\", expiration_date=u\"2008-07\",", "later retrieve it. # CIM doesn't have a listing or search functionality so", "we can later retrieve it. # CIM doesn't have a listing or search", "try to create a transaction on that profile. resp = cim_api.create_profile_transaction( customer_profile_id=profile_id, amount=50.0", "have to keep this somewhere safe and associated with the user. profile_id =", "users. tree = cim_api.create_profile( card_number=u\"4111111111111111\", expiration_date=u\"2008-07\", customer_id=u\"test_account\") # Store the profile id somewhere", "account dashboard or through # the constructor of any of the API objects", "doesn't have a listing or search functionality so you'll # have to keep", "or search functionality so you'll # have to keep this somewhere safe and", "amount=50.0 ) pprint(resp) # We did what we needed, we can remove the", "= tree.customer_profile_id.text_ # Retrieve again the profile we just created using the profile_id", "cim_api.create_profile_transaction( customer_profile_id=profile_id, amount=50.0 ) pprint(resp) # We did what we needed, we can", "to create a transaction on that profile. resp = cim_api.create_profile_transaction( customer_profile_id=profile_id, amount=50.0 )", "import pprint # Note that you need to specify a delimiter and an", "a listing or search functionality so you'll # have to keep this somewhere", "to keep this somewhere safe and associated with the user. profile_id = tree.customer_profile_id.text_", "from pprint import pprint # Note that you need to specify a delimiter", "objects cim_api = cim.Api(u'LOGIN', u'TRANS_KEY', is_test=True, delimiter=u\",\", encapsulator=u\"\") # We create a profile", "this somewhere safe and associated with the user. profile_id = tree.customer_profile_id.text_ # Retrieve", "delimiter and an encapsulator # for your account (either in your account dashboard", "encapsulator # for your account (either in your account dashboard or through #", "Store the profile id somewhere so that we can later retrieve it. #", "so that we can later retrieve it. # CIM doesn't have a listing", "a profile for one of our users. tree = cim_api.create_profile( card_number=u\"4111111111111111\", expiration_date=u\"2008-07\", customer_id=u\"test_account\")", "# have to keep this somewhere safe and associated with the user. profile_id", "encapsulator=u\"\") # We create a profile for one of our users. tree =", "# Note that you need to specify a delimiter and an encapsulator #", "using the profile_id tree = cim_api.get_profile(customer_profile_id=profile_id) pprint(tree) # And let's now try to", "tree = cim_api.create_profile( card_number=u\"4111111111111111\", expiration_date=u\"2008-07\", customer_id=u\"test_account\") # Store the profile id somewhere so", "# We create a profile for one of our users. tree = cim_api.create_profile(", "that profile. resp = cim_api.create_profile_transaction( customer_profile_id=profile_id, amount=50.0 ) pprint(resp) # We did what", "a delimiter and an encapsulator # for your account (either in your account", "profile id somewhere so that we can later retrieve it. # CIM doesn't", "account (either in your account dashboard or through # the constructor of any", "We create a profile for one of our users. tree = cim_api.create_profile( card_number=u\"4111111111111111\",", "for one of our users. tree = cim_api.create_profile( card_number=u\"4111111111111111\", expiration_date=u\"2008-07\", customer_id=u\"test_account\") # Store", "safe and associated with the user. profile_id = tree.customer_profile_id.text_ # Retrieve again the", "search functionality so you'll # have to keep this somewhere safe and associated", "that we can later retrieve it. # CIM doesn't have a listing or", "# the constructor of any of the API objects cim_api = cim.Api(u'LOGIN', u'TRANS_KEY',", "create a transaction on that profile. resp = cim_api.create_profile_transaction( customer_profile_id=profile_id, amount=50.0 ) pprint(resp)", "your account dashboard or through # the constructor of any of the API", "somewhere so that we can later retrieve it. # CIM doesn't have a", "CIM doesn't have a listing or search functionality so you'll # have to", ") pprint(resp) # We did what we needed, we can remove the profile", "card_number=u\"4111111111111111\", expiration_date=u\"2008-07\", customer_id=u\"test_account\") # Store the profile id somewhere so that we can", "any of the API objects cim_api = cim.Api(u'LOGIN', u'TRANS_KEY', is_test=True, delimiter=u\",\", encapsulator=u\"\") #", "can later retrieve it. # CIM doesn't have a listing or search functionality", "the profile we just created using the profile_id tree = cim_api.get_profile(customer_profile_id=profile_id) pprint(tree) #", "with the user. profile_id = tree.customer_profile_id.text_ # Retrieve again the profile we just", "now try to create a transaction on that profile. resp = cim_api.create_profile_transaction( customer_profile_id=profile_id,", "for your account (either in your account dashboard or through # the constructor", "# Retrieve again the profile we just created using the profile_id tree =", "pprint # Note that you need to specify a delimiter and an encapsulator", "tree = cim_api.get_profile(customer_profile_id=profile_id) pprint(tree) # And let's now try to create a transaction", "of our users. tree = cim_api.create_profile( card_number=u\"4111111111111111\", expiration_date=u\"2008-07\", customer_id=u\"test_account\") # Store the profile", "(either in your account dashboard or through # the constructor of any of", "keep this somewhere safe and associated with the user. profile_id = tree.customer_profile_id.text_ #", "of the API objects cim_api = cim.Api(u'LOGIN', u'TRANS_KEY', is_test=True, delimiter=u\",\", encapsulator=u\"\") # We", "to specify a delimiter and an encapsulator # for your account (either in", "# for your account (either in your account dashboard or through # the", "in your account dashboard or through # the constructor of any of the", "through # the constructor of any of the API objects cim_api = cim.Api(u'LOGIN',", "and an encapsulator # for your account (either in your account dashboard or", "just created using the profile_id tree = cim_api.get_profile(customer_profile_id=profile_id) pprint(tree) # And let's now", "the constructor of any of the API objects cim_api = cim.Api(u'LOGIN', u'TRANS_KEY', is_test=True,", "of any of the API objects cim_api = cim.Api(u'LOGIN', u'TRANS_KEY', is_test=True, delimiter=u\",\", encapsulator=u\"\")", "a transaction on that profile. resp = cim_api.create_profile_transaction( customer_profile_id=profile_id, amount=50.0 ) pprint(resp) #", "id somewhere so that we can later retrieve it. # CIM doesn't have", "let's now try to create a transaction on that profile. resp = cim_api.create_profile_transaction(", "did what we needed, we can remove the profile for this example. pprint(cim_api.delete_profile(customer_profile_id=profile_id))", "need to specify a delimiter and an encapsulator # for your account (either", "cim from pprint import pprint # Note that you need to specify a", "the API objects cim_api = cim.Api(u'LOGIN', u'TRANS_KEY', is_test=True, delimiter=u\",\", encapsulator=u\"\") # We create", "= cim_api.create_profile( card_number=u\"4111111111111111\", expiration_date=u\"2008-07\", customer_id=u\"test_account\") # Store the profile id somewhere so that", "somewhere safe and associated with the user. profile_id = tree.customer_profile_id.text_ # Retrieve again", "on that profile. resp = cim_api.create_profile_transaction( customer_profile_id=profile_id, amount=50.0 ) pprint(resp) # We did", "pprint(tree) # And let's now try to create a transaction on that profile.", "tree.customer_profile_id.text_ # Retrieve again the profile we just created using the profile_id tree", "pprint(resp) # We did what we needed, we can remove the profile for", "u'TRANS_KEY', is_test=True, delimiter=u\",\", encapsulator=u\"\") # We create a profile for one of our", "dashboard or through # the constructor of any of the API objects cim_api", "transaction on that profile. resp = cim_api.create_profile_transaction( customer_profile_id=profile_id, amount=50.0 ) pprint(resp) # We", "again the profile we just created using the profile_id tree = cim_api.get_profile(customer_profile_id=profile_id) pprint(tree)", "profile we just created using the profile_id tree = cim_api.get_profile(customer_profile_id=profile_id) pprint(tree) # And", "We did what we needed, we can remove the profile for this example.", "you'll # have to keep this somewhere safe and associated with the user.", "one of our users. tree = cim_api.create_profile( card_number=u\"4111111111111111\", expiration_date=u\"2008-07\", customer_id=u\"test_account\") # Store the", "pprint import pprint # Note that you need to specify a delimiter and", "Note that you need to specify a delimiter and an encapsulator # for", "customer_profile_id=profile_id, amount=50.0 ) pprint(resp) # We did what we needed, we can remove", "listing or search functionality so you'll # have to keep this somewhere safe", "have a listing or search functionality so you'll # have to keep this", "profile. resp = cim_api.create_profile_transaction( customer_profile_id=profile_id, amount=50.0 ) pprint(resp) # We did what we", "or through # the constructor of any of the API objects cim_api =", "API objects cim_api = cim.Api(u'LOGIN', u'TRANS_KEY', is_test=True, delimiter=u\",\", encapsulator=u\"\") # We create a", "so you'll # have to keep this somewhere safe and associated with the", "our users. tree = cim_api.create_profile( card_number=u\"4111111111111111\", expiration_date=u\"2008-07\", customer_id=u\"test_account\") # Store the profile id", "cim_api.get_profile(customer_profile_id=profile_id) pprint(tree) # And let's now try to create a transaction on that", "import cim from pprint import pprint # Note that you need to specify", "specify a delimiter and an encapsulator # for your account (either in your", "associated with the user. profile_id = tree.customer_profile_id.text_ # Retrieve again the profile we", "from authorize import cim from pprint import pprint # Note that you need", "user. profile_id = tree.customer_profile_id.text_ # Retrieve again the profile we just created using", "it. # CIM doesn't have a listing or search functionality so you'll #", "the profile id somewhere so that we can later retrieve it. # CIM", "is_test=True, delimiter=u\",\", encapsulator=u\"\") # We create a profile for one of our users.", "authorize import cim from pprint import pprint # Note that you need to", "that you need to specify a delimiter and an encapsulator # for your", "cim.Api(u'LOGIN', u'TRANS_KEY', is_test=True, delimiter=u\",\", encapsulator=u\"\") # We create a profile for one of", "delimiter=u\",\", encapsulator=u\"\") # We create a profile for one of our users. tree", "# CIM doesn't have a listing or search functionality so you'll # have", "= cim_api.get_profile(customer_profile_id=profile_id) pprint(tree) # And let's now try to create a transaction on", "= cim.Api(u'LOGIN', u'TRANS_KEY', is_test=True, delimiter=u\",\", encapsulator=u\"\") # We create a profile for one", "# We did what we needed, we can remove the profile for this", "cim_api.create_profile( card_number=u\"4111111111111111\", expiration_date=u\"2008-07\", customer_id=u\"test_account\") # Store the profile id somewhere so that we", "# And let's now try to create a transaction on that profile. resp", "resp = cim_api.create_profile_transaction( customer_profile_id=profile_id, amount=50.0 ) pprint(resp) # We did what we needed,", "you need to specify a delimiter and an encapsulator # for your account", "an encapsulator # for your account (either in your account dashboard or through", "and associated with the user. profile_id = tree.customer_profile_id.text_ # Retrieve again the profile", "the user. profile_id = tree.customer_profile_id.text_ # Retrieve again the profile we just created", "we just created using the profile_id tree = cim_api.get_profile(customer_profile_id=profile_id) pprint(tree) # And let's", "your account (either in your account dashboard or through # the constructor of", "retrieve it. # CIM doesn't have a listing or search functionality so you'll", "profile_id = tree.customer_profile_id.text_ # Retrieve again the profile we just created using the", "constructor of any of the API objects cim_api = cim.Api(u'LOGIN', u'TRANS_KEY', is_test=True, delimiter=u\",\",", "Retrieve again the profile we just created using the profile_id tree = cim_api.get_profile(customer_profile_id=profile_id)", "= cim_api.create_profile_transaction( customer_profile_id=profile_id, amount=50.0 ) pprint(resp) # We did what we needed, we", "customer_id=u\"test_account\") # Store the profile id somewhere so that we can later retrieve", "the profile_id tree = cim_api.get_profile(customer_profile_id=profile_id) pprint(tree) # And let's now try to create", "<filename>doc/example.py<gh_stars>1-10 from authorize import cim from pprint import pprint # Note that you", "expiration_date=u\"2008-07\", customer_id=u\"test_account\") # Store the profile id somewhere so that we can later" ]
[ "in input_str.split(\"\\n\")] def find_pair(sorted_report: List[int], target_num: int) -> Optional[Tuple[int, int]]: front_index = 0", "b elif a + b < target_num: front_index += 1 else: # a", "elif a + b < target_num: front_index += 1 else: # a +", "e) = (-1, -1, -1) if triple: (c, d, e) = triple return", "while front_index != end_index: a = sorted_report[front_index] b = sorted_report[end_index] if a +", "List[int], target: int) -> Optional[Tuple[int, int, int]]: for i in sorted_report: pair =", "[int(x) for x in input_str.split(\"\\n\")] def find_pair(sorted_report: List[int], target_num: int) -> Optional[Tuple[int, int]]:", "= file.read() return [int(x) for x in input_str.split(\"\\n\")] def find_pair(sorted_report: List[int], target_num: int)", "return a, b elif a + b < target_num: front_index += 1 else:", "else: # a + b > advent_2020 end_index -= 1 # given valid", "valid input, should never be reached. return None def find_triple(sorted_report: List[int], target: int)", "for x in input_str.split(\"\\n\")] def find_pair(sorted_report: List[int], target_num: int) -> Optional[Tuple[int, int]]: front_index", "report_data = parse_input(input_data_filepath) report_data.sort() pair = find_pair(report_data, 2020) (a, b) = (-1, -1)", "List, Tuple, Optional def report_repair(input_data_filepath: str) -> Tuple[int, int]: report_data = parse_input(input_data_filepath) report_data.sort()", "-1) if pair: (a, b) = pair triple = find_triple(report_data, 2020) (c, d,", "1 else: # a + b > advent_2020 end_index -= 1 # given", "None def find_triple(sorted_report: List[int], target: int) -> Optional[Tuple[int, int, int]]: for i in", "front_index != end_index: a = sorted_report[front_index] b = sorted_report[end_index] if a + b", "for i in sorted_report: pair = find_pair(sorted_report, target-i) if pair: a, b =", "str) -> Tuple[int, int]: report_data = parse_input(input_data_filepath) report_data.sort() pair = find_pair(report_data, 2020) (a,", "input_str = file.read() return [int(x) for x in input_str.split(\"\\n\")] def find_pair(sorted_report: List[int], target_num:", "front_index += 1 else: # a + b > advent_2020 end_index -= 1", "triple: (c, d, e) = triple return a * b, c * d", "2020) (c, d, e) = (-1, -1, -1) if triple: (c, d, e)", "target_num: return a, b elif a + b < target_num: front_index += 1", "def find_triple(sorted_report: List[int], target: int) -> Optional[Tuple[int, int, int]]: for i in sorted_report:", "= triple return a * b, c * d * e def parse_input(input_data_filepath:", "if pair: (a, b) = pair triple = find_triple(report_data, 2020) (c, d, e)", "a = sorted_report[front_index] b = sorted_report[end_index] if a + b == target_num: return", "1 # given valid input, should never be reached. return None def find_triple(sorted_report:", "input_str.split(\"\\n\")] def find_pair(sorted_report: List[int], target_num: int) -> Optional[Tuple[int, int]]: front_index = 0 end_index", "(a, b) = pair triple = find_triple(report_data, 2020) (c, d, e) = (-1,", "int) -> Optional[Tuple[int, int, int]]: for i in sorted_report: pair = find_pair(sorted_report, target-i)", "+ b < target_num: front_index += 1 else: # a + b >", "reached. return None def find_triple(sorted_report: List[int], target: int) -> Optional[Tuple[int, int, int]]: for", "-1, -1) if triple: (c, d, e) = triple return a * b,", "b > advent_2020 end_index -= 1 # given valid input, should never be", "e) = triple return a * b, c * d * e def", "a + b < target_num: front_index += 1 else: # a + b", "end_index: a = sorted_report[front_index] b = sorted_report[end_index] if a + b == target_num:", "advent_2020 end_index -= 1 # given valid input, should never be reached. return", "pair triple = find_triple(report_data, 2020) (c, d, e) = (-1, -1, -1) if", "return a * b, c * d * e def parse_input(input_data_filepath: str) ->", "open(input_data_filepath) as file: input_str = file.read() return [int(x) for x in input_str.split(\"\\n\")] def", "List[int], target_num: int) -> Optional[Tuple[int, int]]: front_index = 0 end_index = len(sorted_report) -", "> advent_2020 end_index -= 1 # given valid input, should never be reached.", "= find_triple(report_data, 2020) (c, d, e) = (-1, -1, -1) if triple: (c,", "* e def parse_input(input_data_filepath: str) -> List[int]: with open(input_data_filepath) as file: input_str =", "= sorted_report[front_index] b = sorted_report[end_index] if a + b == target_num: return a,", "b) = (-1, -1) if pair: (a, b) = pair triple = find_triple(report_data,", "find_pair(report_data, 2020) (a, b) = (-1, -1) if pair: (a, b) = pair", "parse_input(input_data_filepath) report_data.sort() pair = find_pair(report_data, 2020) (a, b) = (-1, -1) if pair:", "report_data.sort() pair = find_pair(report_data, 2020) (a, b) = (-1, -1) if pair: (a,", "sorted_report[front_index] b = sorted_report[end_index] if a + b == target_num: return a, b", "be reached. return None def find_triple(sorted_report: List[int], target: int) -> Optional[Tuple[int, int, int]]:", "-> Tuple[int, int]: report_data = parse_input(input_data_filepath) report_data.sort() pair = find_pair(report_data, 2020) (a, b)", "file: input_str = file.read() return [int(x) for x in input_str.split(\"\\n\")] def find_pair(sorted_report: List[int],", "find_triple(report_data, 2020) (c, d, e) = (-1, -1, -1) if triple: (c, d,", "file.read() return [int(x) for x in input_str.split(\"\\n\")] def find_pair(sorted_report: List[int], target_num: int) ->", "sorted_report: pair = find_pair(sorted_report, target-i) if pair: a, b = pair return a,", "1 while front_index != end_index: a = sorted_report[front_index] b = sorted_report[end_index] if a", "= pair triple = find_triple(report_data, 2020) (c, d, e) = (-1, -1, -1)", "d, e) = (-1, -1, -1) if triple: (c, d, e) = triple", "with open(input_data_filepath) as file: input_str = file.read() return [int(x) for x in input_str.split(\"\\n\")]", "find_pair(sorted_report: List[int], target_num: int) -> Optional[Tuple[int, int]]: front_index = 0 end_index = len(sorted_report)", "int]]: for i in sorted_report: pair = find_pair(sorted_report, target-i) if pair: a, b", "+ b == target_num: return a, b elif a + b < target_num:", "== target_num: return a, b elif a + b < target_num: front_index +=", "List[int]: with open(input_data_filepath) as file: input_str = file.read() return [int(x) for x in", "b, c * d * e def parse_input(input_data_filepath: str) -> List[int]: with open(input_data_filepath)", "= len(sorted_report) - 1 while front_index != end_index: a = sorted_report[front_index] b =", "find_pair(sorted_report, target-i) if pair: a, b = pair return a, b, i return", "from typing import List, Tuple, Optional def report_repair(input_data_filepath: str) -> Tuple[int, int]: report_data", "<reponame>SutterButter4/AdventOfCode from typing import List, Tuple, Optional def report_repair(input_data_filepath: str) -> Tuple[int, int]:", "- 1 while front_index != end_index: a = sorted_report[front_index] b = sorted_report[end_index] if", "# a + b > advent_2020 end_index -= 1 # given valid input,", "triple return a * b, c * d * e def parse_input(input_data_filepath: str)", "Optional[Tuple[int, int, int]]: for i in sorted_report: pair = find_pair(sorted_report, target-i) if pair:", "a + b > advent_2020 end_index -= 1 # given valid input, should", "* d * e def parse_input(input_data_filepath: str) -> List[int]: with open(input_data_filepath) as file:", "sorted_report[end_index] if a + b == target_num: return a, b elif a +", "target_num: front_index += 1 else: # a + b > advent_2020 end_index -=", "Optional def report_repair(input_data_filepath: str) -> Tuple[int, int]: report_data = parse_input(input_data_filepath) report_data.sort() pair =", "-> Optional[Tuple[int, int, int]]: for i in sorted_report: pair = find_pair(sorted_report, target-i) if", "(a, b) = (-1, -1) if pair: (a, b) = pair triple =", "+= 1 else: # a + b > advent_2020 end_index -= 1 #", "int, int]]: for i in sorted_report: pair = find_pair(sorted_report, target-i) if pair: a,", "-> Optional[Tuple[int, int]]: front_index = 0 end_index = len(sorted_report) - 1 while front_index", "# given valid input, should never be reached. return None def find_triple(sorted_report: List[int],", "-> List[int]: with open(input_data_filepath) as file: input_str = file.read() return [int(x) for x", "if a + b == target_num: return a, b elif a + b", "b = sorted_report[end_index] if a + b == target_num: return a, b elif", "= 0 end_index = len(sorted_report) - 1 while front_index != end_index: a =", "def report_repair(input_data_filepath: str) -> Tuple[int, int]: report_data = parse_input(input_data_filepath) report_data.sort() pair = find_pair(report_data,", "end_index -= 1 # given valid input, should never be reached. return None", "(-1, -1, -1) if triple: (c, d, e) = triple return a *", "= (-1, -1) if pair: (a, b) = pair triple = find_triple(report_data, 2020)", "= find_pair(sorted_report, target-i) if pair: a, b = pair return a, b, i", "if triple: (c, d, e) = triple return a * b, c *", "triple = find_triple(report_data, 2020) (c, d, e) = (-1, -1, -1) if triple:", "should never be reached. return None def find_triple(sorted_report: List[int], target: int) -> Optional[Tuple[int,", "input, should never be reached. return None def find_triple(sorted_report: List[int], target: int) ->", "import List, Tuple, Optional def report_repair(input_data_filepath: str) -> Tuple[int, int]: report_data = parse_input(input_data_filepath)", "target_num: int) -> Optional[Tuple[int, int]]: front_index = 0 end_index = len(sorted_report) - 1", "pair = find_pair(sorted_report, target-i) if pair: a, b = pair return a, b,", "end_index = len(sorted_report) - 1 while front_index != end_index: a = sorted_report[front_index] b", "report_repair(input_data_filepath: str) -> Tuple[int, int]: report_data = parse_input(input_data_filepath) report_data.sort() pair = find_pair(report_data, 2020)", "(c, d, e) = (-1, -1, -1) if triple: (c, d, e) =", "given valid input, should never be reached. return None def find_triple(sorted_report: List[int], target:", "d * e def parse_input(input_data_filepath: str) -> List[int]: with open(input_data_filepath) as file: input_str", "find_triple(sorted_report: List[int], target: int) -> Optional[Tuple[int, int, int]]: for i in sorted_report: pair", "target: int) -> Optional[Tuple[int, int, int]]: for i in sorted_report: pair = find_pair(sorted_report,", "str) -> List[int]: with open(input_data_filepath) as file: input_str = file.read() return [int(x) for", "0 end_index = len(sorted_report) - 1 while front_index != end_index: a = sorted_report[front_index]", "a, b elif a + b < target_num: front_index += 1 else: #", "-1) if triple: (c, d, e) = triple return a * b, c", "def parse_input(input_data_filepath: str) -> List[int]: with open(input_data_filepath) as file: input_str = file.read() return", "i in sorted_report: pair = find_pair(sorted_report, target-i) if pair: a, b = pair", "typing import List, Tuple, Optional def report_repair(input_data_filepath: str) -> Tuple[int, int]: report_data =", "= parse_input(input_data_filepath) report_data.sort() pair = find_pair(report_data, 2020) (a, b) = (-1, -1) if", "return [int(x) for x in input_str.split(\"\\n\")] def find_pair(sorted_report: List[int], target_num: int) -> Optional[Tuple[int,", "a * b, c * d * e def parse_input(input_data_filepath: str) -> List[int]:", "target-i) if pair: a, b = pair return a, b, i return None", "int]: report_data = parse_input(input_data_filepath) report_data.sort() pair = find_pair(report_data, 2020) (a, b) = (-1,", "2020) (a, b) = (-1, -1) if pair: (a, b) = pair triple", "= sorted_report[end_index] if a + b == target_num: return a, b elif a", "= find_pair(report_data, 2020) (a, b) = (-1, -1) if pair: (a, b) =", "Tuple[int, int]: report_data = parse_input(input_data_filepath) report_data.sort() pair = find_pair(report_data, 2020) (a, b) =", "return None def find_triple(sorted_report: List[int], target: int) -> Optional[Tuple[int, int, int]]: for i", "len(sorted_report) - 1 while front_index != end_index: a = sorted_report[front_index] b = sorted_report[end_index]", "b) = pair triple = find_triple(report_data, 2020) (c, d, e) = (-1, -1,", "(c, d, e) = triple return a * b, c * d *", "Tuple, Optional def report_repair(input_data_filepath: str) -> Tuple[int, int]: report_data = parse_input(input_data_filepath) report_data.sort() pair", "!= end_index: a = sorted_report[front_index] b = sorted_report[end_index] if a + b ==", "+ b > advent_2020 end_index -= 1 # given valid input, should never", "= (-1, -1, -1) if triple: (c, d, e) = triple return a", "int) -> Optional[Tuple[int, int]]: front_index = 0 end_index = len(sorted_report) - 1 while", "(-1, -1) if pair: (a, b) = pair triple = find_triple(report_data, 2020) (c,", "Optional[Tuple[int, int]]: front_index = 0 end_index = len(sorted_report) - 1 while front_index !=", "def find_pair(sorted_report: List[int], target_num: int) -> Optional[Tuple[int, int]]: front_index = 0 end_index =", "parse_input(input_data_filepath: str) -> List[int]: with open(input_data_filepath) as file: input_str = file.read() return [int(x)", "b < target_num: front_index += 1 else: # a + b > advent_2020", "a + b == target_num: return a, b elif a + b <", "e def parse_input(input_data_filepath: str) -> List[int]: with open(input_data_filepath) as file: input_str = file.read()", "pair: (a, b) = pair triple = find_triple(report_data, 2020) (c, d, e) =", "x in input_str.split(\"\\n\")] def find_pair(sorted_report: List[int], target_num: int) -> Optional[Tuple[int, int]]: front_index =", "< target_num: front_index += 1 else: # a + b > advent_2020 end_index", "d, e) = triple return a * b, c * d * e", "c * d * e def parse_input(input_data_filepath: str) -> List[int]: with open(input_data_filepath) as", "in sorted_report: pair = find_pair(sorted_report, target-i) if pair: a, b = pair return", "* b, c * d * e def parse_input(input_data_filepath: str) -> List[int]: with", "front_index = 0 end_index = len(sorted_report) - 1 while front_index != end_index: a", "pair = find_pair(report_data, 2020) (a, b) = (-1, -1) if pair: (a, b)", "int]]: front_index = 0 end_index = len(sorted_report) - 1 while front_index != end_index:", "as file: input_str = file.read() return [int(x) for x in input_str.split(\"\\n\")] def find_pair(sorted_report:", "b == target_num: return a, b elif a + b < target_num: front_index", "never be reached. return None def find_triple(sorted_report: List[int], target: int) -> Optional[Tuple[int, int,", "-= 1 # given valid input, should never be reached. return None def" ]
[ "from %(f)d to %(t)d in %(s)d seconds' % {'g': asg_name, 'f': current_capacity, 't':", "/ check_interval_sec)) for trial in range(0, max_trials + 1): current_size = get_spark_worker_node_count(spark) if", "SparkSession.builder \\ .master(env.spark_master) \\ .appName('Trinity %(e)s' % {'e': env.env}) \\ .config('spark.executor.uri', env.spark_executor_uri) \\", "aws_region_name, aws_cluster_size) logging.info('Waiting for workers to join Spark cluster ...') if setup_spark_cluster_timeout_sec is", "{'e': env.env}) \\ .config('spark.executor.uri', env.spark_executor_uri) \\ .config('spark.sql.shuffle.partitions', env.spark_sql_shuffle_partitions) \\ .config('spark.driver.memory', env.spark_driver_memory) \\ .config('spark.executor.memory',", "import boto3 from pyspark.sql import SparkSession def get_asg_inservice_instance_count(asg_name: str, region_name: str) -> int:", "int, timeout_sec: int, check_interval_sec: int = 3): max_trials = int(math.ceil(timeout_sec / check_interval_sec)) for", "check_interval_sec)) for trial in range(0, max_trials + 1): current_size = get_spark_worker_node_count(spark) if current_size", "'f': current_capacity, 't': desired_capacity, 's': timeout_sec}) def wait_on_nodes_to_join_spark_cluster(spark, desired_cluster_size: int, timeout_sec: int, check_interval_sec:", "get_spark_worker_node_count(spark) if current_size != desired_cluster_size: time.sleep(check_interval_sec) else: return logging.warning('Failed to bring %(d)d nodes", "= None, aws_region_name: str = None): from envconfig.env import env if aws_asg_name is", "for trial in range(0, max_trial + 1): inservice_instance_count = get_asg_inservice_instance_count(asg_name, region_name) if inservice_instance_count", "join Spark cluster ...') if setup_spark_cluster_timeout_sec is None: setup_spark_cluster_timeout_sec = aws_cluster_size * 20", "bring %(d)d nodes to Spark cluster in %(s)d seconds, current cluster size: %(c)d'", "int, check_interval_sec: int = 15, ten_inst_timeout_sec: int = 30): aws_client = boto3.client('autoscaling', region_name=region_name)", "%(d)d nodes to Spark cluster in %(s)d seconds, current cluster size: %(c)d' %", "'s': timeout_sec, 'c': get_spark_worker_node_count(spark)}) def get_spark_worker_node_count(spark): # noinspection PyProtectedMember return spark.sparkContext._jsc.sc().getExecutorMemoryStatus().size() - 1", "asg['Instances'] if inst['LifecycleState'] == 'InService']) def adjust_ec2_asg(asg_name: str, region_name: str, desired_capacity: int, check_interval_sec:", "datefmt='%Y-%m-%d %H:%M:%S') logging.info('Adjusting AWS autoscaling group \"%(g)s\" capacity to %(c)d ...' % {'g':", "adjust the capacity of asg \"%(g)s\" from %(f)d to %(t)d in %(s)d seconds'", "= env.spark_pyspark_python os.environ['PYTHONPATH'] = env.spark_pythonpath spark = SparkSession.builder \\ .master(env.spark_master) \\ .appName('Trinity %(e)s'", "current_capacity) timeout_sec = ten_inst_timeout_sec * math.ceil(abs(adjust_capacity) / 10) max_trial = int(math.ceil(timeout_sec / check_interval_sec))", "import os import time import boto3 from pyspark.sql import SparkSession def get_asg_inservice_instance_count(asg_name: str,", "and Spark cluster are standing by') def shutdown_notebook(aws_asg_name: str = None, aws_region_name: str", "if setup_spark_cluster_timeout_sec is None: setup_spark_cluster_timeout_sec = aws_cluster_size * 20 wait_on_nodes_to_join_spark_cluster(spark, aws_cluster_size, setup_spark_cluster_timeout_sec) logging.info('Notebook", "standing by') def shutdown_notebook(aws_asg_name: str = None, aws_region_name: str = None): from envconfig.env", "PyProtectedMember return spark.sparkContext._jsc.sc().getExecutorMemoryStatus().size() - 1 def setup_spark_session() -> SparkSession: from envconfig.env import env", "%H:%M:%S') logging.info('Adjusting AWS autoscaling group \"%(g)s\" capacity to %(c)d ...' % {'g': aws_asg_name,", "region_name = env.aws_region_name cluster_size = env.aws_cluster_size adjust_ec2_asg(asg_name, region_name, cluster_size) os.environ['PYSPARK_PYTHON'] = env.spark_pyspark_python os.environ['PYTHONPATH']", "max_trials + 1): current_size = get_spark_worker_node_count(spark) if current_size != desired_cluster_size: time.sleep(check_interval_sec) else: return", "get_spark_worker_node_count(spark)}) def get_spark_worker_node_count(spark): # noinspection PyProtectedMember return spark.sparkContext._jsc.sc().getExecutorMemoryStatus().size() - 1 def setup_spark_session() ->", "int = None, aws_asg_name: str = None, aws_region_name: str = None, setup_spark_cluster_timeout_sec: int", "return logging.warning('Failed to bring %(d)d nodes to Spark cluster in %(s)d seconds, current", "int = 30): aws_client = boto3.client('autoscaling', region_name=region_name) aws_client.update_auto_scaling_group(AutoScalingGroupName=asg_name, DesiredCapacity=desired_capacity, MinSize=0, MaxSize=desired_capacity) current_capacity =", "* 20 wait_on_nodes_to_join_spark_cluster(spark, aws_cluster_size, setup_spark_cluster_timeout_sec) logging.info('Notebook and Spark cluster are standing by') def", "noinspection PyProtectedMember return spark.sparkContext._jsc.sc().getExecutorMemoryStatus().size() - 1 def setup_spark_session() -> SparkSession: from envconfig.env import", "boto3.client('autoscaling', region_name=region_name) asg = client.describe_auto_scaling_groups(AutoScalingGroupNames=[asg_name])['AutoScalingGroups'][0] return sum([1 for inst in asg['Instances'] if inst['LifecycleState']", "= None, aws_asg_name: str = None, aws_region_name: str = None, setup_spark_cluster_timeout_sec: int =", "in %(s)d seconds' % {'g': asg_name, 'f': current_capacity, 't': desired_capacity, 's': timeout_sec}) def", "\\ .config('spark.sql.shuffle.partitions', env.spark_sql_shuffle_partitions) \\ .config('spark.driver.memory', env.spark_driver_memory) \\ .config('spark.executor.memory', env.spark_executor_memory) \\ .getOrCreate() wait_on_nodes_to_join_spark_cluster(spark, env.aws_cluster_size,", "else: return logging.warning('Failed to adjust the capacity of asg \"%(g)s\" from %(f)d to", "check_interval_sec: int = 3): max_trials = int(math.ceil(timeout_sec / check_interval_sec)) for trial in range(0,", "10) max_trial = int(math.ceil(timeout_sec / check_interval_sec)) for trial in range(0, max_trial + 1):", "in %(s)d seconds, current cluster size: %(c)d' % {'d': desired_cluster_size, 's': timeout_sec, 'c':", "timeout_sec: int, check_interval_sec: int = 3): max_trials = int(math.ceil(timeout_sec / check_interval_sec)) for trial", "inst in asg['Instances'] if inst['LifecycleState'] == 'InService']) def adjust_ec2_asg(asg_name: str, region_name: str, desired_capacity:", "1): current_size = get_spark_worker_node_count(spark) if current_size != desired_cluster_size: time.sleep(check_interval_sec) else: return logging.warning('Failed to", ".master(env.spark_master) \\ .appName('Trinity %(e)s' % {'e': env.env}) \\ .config('spark.executor.uri', env.spark_executor_uri) \\ .config('spark.sql.shuffle.partitions', env.spark_sql_shuffle_partitions)", "int(math.ceil(timeout_sec / check_interval_sec)) for trial in range(0, max_trial + 1): inservice_instance_count = get_asg_inservice_instance_count(asg_name,", "str = None): from envconfig.env import env if aws_asg_name is None: aws_asg_name =", "%(s)d seconds, current cluster size: %(c)d' % {'d': desired_cluster_size, 's': timeout_sec, 'c': get_spark_worker_node_count(spark)})", "import env if aws_asg_name is None: aws_asg_name = env.aws_asg_name if aws_region_name is None:", "env.aws_cluster_size * 10) return spark def prep_notebook(spark, aws_cluster_size: int = None, aws_asg_name: str", "get_asg_inservice_instance_count(asg_name: str, region_name: str) -> int: client = boto3.client('autoscaling', region_name=region_name) asg = client.describe_auto_scaling_groups(AutoScalingGroupNames=[asg_name])['AutoScalingGroups'][0]", "'c': get_spark_worker_node_count(spark)}) def get_spark_worker_node_count(spark): # noinspection PyProtectedMember return spark.sparkContext._jsc.sc().getExecutorMemoryStatus().size() - 1 def setup_spark_session()", "\\ .config('spark.driver.memory', env.spark_driver_memory) \\ .config('spark.executor.memory', env.spark_executor_memory) \\ .getOrCreate() wait_on_nodes_to_join_spark_cluster(spark, env.aws_cluster_size, env.aws_cluster_size * 10)", "adjust_capacity = abs(desired_capacity - current_capacity) timeout_sec = ten_inst_timeout_sec * math.ceil(abs(adjust_capacity) / 10) max_trial", "import time import boto3 from pyspark.sql import SparkSession def get_asg_inservice_instance_count(asg_name: str, region_name: str)", "return logging.warning('Failed to adjust the capacity of asg \"%(g)s\" from %(f)d to %(t)d", "= get_spark_worker_node_count(spark) if current_size != desired_cluster_size: time.sleep(check_interval_sec) else: return logging.warning('Failed to bring %(d)d", "int(math.ceil(timeout_sec / check_interval_sec)) for trial in range(0, max_trials + 1): current_size = get_spark_worker_node_count(spark)", "is None: aws_region_name = env.aws_region_name logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s %(message)s', datefmt='%Y-%m-%d %H:%M:%S') logging.info('Adjusting AWS", "logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s %(message)s', datefmt='%Y-%m-%d %H:%M:%S') logging.info('Adjusting AWS autoscaling group \"%(g)s\" capacity to", "asg \"%(g)s\" from %(f)d to %(t)d in %(s)d seconds' % {'g': asg_name, 'f':", "import SparkSession def get_asg_inservice_instance_count(asg_name: str, region_name: str) -> int: client = boto3.client('autoscaling', region_name=region_name)", "for inst in asg['Instances'] if inst['LifecycleState'] == 'InService']) def adjust_ec2_asg(asg_name: str, region_name: str,", "def get_spark_worker_node_count(spark): # noinspection PyProtectedMember return spark.sparkContext._jsc.sc().getExecutorMemoryStatus().size() - 1 def setup_spark_session() -> SparkSession:", "aws_asg_name = env.aws_asg_name if aws_region_name is None: aws_region_name = env.aws_region_name logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s", "aws_asg_name, 'c': aws_cluster_size}) adjust_ec2_asg(aws_asg_name, aws_region_name, aws_cluster_size) logging.info('Waiting for workers to join Spark cluster", "client.describe_auto_scaling_groups(AutoScalingGroupNames=[asg_name])['AutoScalingGroups'][0] return sum([1 for inst in asg['Instances'] if inst['LifecycleState'] == 'InService']) def adjust_ec2_asg(asg_name:", "desired_capacity, 's': timeout_sec}) def wait_on_nodes_to_join_spark_cluster(spark, desired_cluster_size: int, timeout_sec: int, check_interval_sec: int = 3):", "\\ .appName('Trinity %(e)s' % {'e': env.env}) \\ .config('spark.executor.uri', env.spark_executor_uri) \\ .config('spark.sql.shuffle.partitions', env.spark_sql_shuffle_partitions) \\", "desired_cluster_size: int, timeout_sec: int, check_interval_sec: int = 3): max_trials = int(math.ceil(timeout_sec / check_interval_sec))", "is None: aws_region_name = env.aws_region_name logging.info('Shutting down AWS autoscaling group \"%(g)s\" by adjusting", "ten_inst_timeout_sec * math.ceil(abs(adjust_capacity) / 10) max_trial = int(math.ceil(timeout_sec / check_interval_sec)) for trial in", "None: aws_asg_name = env.aws_asg_name if aws_region_name is None: aws_region_name = env.aws_region_name logging.info('Shutting down", "down AWS autoscaling group \"%(g)s\" by adjusting capacity to 0' % {'g': aws_asg_name})", "None, aws_region_name: str = None): from envconfig.env import env if aws_asg_name is None:", "asg = client.describe_auto_scaling_groups(AutoScalingGroupNames=[asg_name])['AutoScalingGroups'][0] return sum([1 for inst in asg['Instances'] if inst['LifecycleState'] == 'InService'])", "aws_region_name = env.aws_region_name logging.info('Shutting down AWS autoscaling group \"%(g)s\" by adjusting capacity to", "if aws_asg_name is None: aws_asg_name = env.aws_asg_name if aws_region_name is None: aws_region_name =", "cluster_size = env.aws_cluster_size adjust_ec2_asg(asg_name, region_name, cluster_size) os.environ['PYSPARK_PYTHON'] = env.spark_pyspark_python os.environ['PYTHONPATH'] = env.spark_pythonpath spark", "max_trial + 1): inservice_instance_count = get_asg_inservice_instance_count(asg_name, region_name) if inservice_instance_count != desired_capacity: time.sleep(check_interval_sec) else:", "...' % {'g': aws_asg_name, 'c': aws_cluster_size}) adjust_ec2_asg(aws_asg_name, aws_region_name, aws_cluster_size) logging.info('Waiting for workers to", "env.aws_asg_name if aws_region_name is None: aws_region_name = env.aws_region_name logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s %(message)s', datefmt='%Y-%m-%d", "None: aws_region_name = env.aws_region_name logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s %(message)s', datefmt='%Y-%m-%d %H:%M:%S') logging.info('Adjusting AWS autoscaling", "max_trial = int(math.ceil(timeout_sec / check_interval_sec)) for trial in range(0, max_trial + 1): inservice_instance_count", "math import os import time import boto3 from pyspark.sql import SparkSession def get_asg_inservice_instance_count(asg_name:", "region_name=region_name) aws_client.update_auto_scaling_group(AutoScalingGroupName=asg_name, DesiredCapacity=desired_capacity, MinSize=0, MaxSize=desired_capacity) current_capacity = get_asg_inservice_instance_count(asg_name, region_name) adjust_capacity = abs(desired_capacity -", "desired_capacity: int, check_interval_sec: int = 15, ten_inst_timeout_sec: int = 30): aws_client = boto3.client('autoscaling',", "current cluster size: %(c)d' % {'d': desired_cluster_size, 's': timeout_sec, 'c': get_spark_worker_node_count(spark)}) def get_spark_worker_node_count(spark):", "from envconfig.env import env asg_name = env.aws_asg_name region_name = env.aws_region_name cluster_size = env.aws_cluster_size", "= env.aws_asg_name if aws_region_name is None: aws_region_name = env.aws_region_name logging.info('Shutting down AWS autoscaling", "None: aws_region_name = env.aws_region_name logging.info('Shutting down AWS autoscaling group \"%(g)s\" by adjusting capacity", "= ten_inst_timeout_sec * math.ceil(abs(adjust_capacity) / 10) max_trial = int(math.ceil(timeout_sec / check_interval_sec)) for trial", "None, aws_asg_name: str = None, aws_region_name: str = None, setup_spark_cluster_timeout_sec: int = None):", "region_name) if inservice_instance_count != desired_capacity: time.sleep(check_interval_sec) else: return logging.warning('Failed to adjust the capacity", "trial in range(0, max_trials + 1): current_size = get_spark_worker_node_count(spark) if current_size != desired_cluster_size:", "to Spark cluster in %(s)d seconds, current cluster size: %(c)d' % {'d': desired_cluster_size,", "= None, setup_spark_cluster_timeout_sec: int = None): from envconfig.env import env if aws_cluster_size is", "inservice_instance_count = get_asg_inservice_instance_count(asg_name, region_name) if inservice_instance_count != desired_capacity: time.sleep(check_interval_sec) else: return logging.warning('Failed to", "= env.aws_asg_name if aws_region_name is None: aws_region_name = env.aws_region_name logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s %(message)s',", "None, aws_region_name: str = None, setup_spark_cluster_timeout_sec: int = None): from envconfig.env import env", "logging.warning('Failed to bring %(d)d nodes to Spark cluster in %(s)d seconds, current cluster", "= int(math.ceil(timeout_sec / check_interval_sec)) for trial in range(0, max_trials + 1): current_size =", "{'g': aws_asg_name, 'c': aws_cluster_size}) adjust_ec2_asg(aws_asg_name, aws_region_name, aws_cluster_size) logging.info('Waiting for workers to join Spark", "capacity to %(c)d ...' % {'g': aws_asg_name, 'c': aws_cluster_size}) adjust_ec2_asg(aws_asg_name, aws_region_name, aws_cluster_size) logging.info('Waiting", "Spark cluster ...') if setup_spark_cluster_timeout_sec is None: setup_spark_cluster_timeout_sec = aws_cluster_size * 20 wait_on_nodes_to_join_spark_cluster(spark,", "adjust_ec2_asg(asg_name, region_name, cluster_size) os.environ['PYSPARK_PYTHON'] = env.spark_pyspark_python os.environ['PYTHONPATH'] = env.spark_pythonpath spark = SparkSession.builder \\", "def adjust_ec2_asg(asg_name: str, region_name: str, desired_capacity: int, check_interval_sec: int = 15, ten_inst_timeout_sec: int", "int = 15, ten_inst_timeout_sec: int = 30): aws_client = boto3.client('autoscaling', region_name=region_name) aws_client.update_auto_scaling_group(AutoScalingGroupName=asg_name, DesiredCapacity=desired_capacity,", "{'d': desired_cluster_size, 's': timeout_sec, 'c': get_spark_worker_node_count(spark)}) def get_spark_worker_node_count(spark): # noinspection PyProtectedMember return spark.sparkContext._jsc.sc().getExecutorMemoryStatus().size()", "time.sleep(check_interval_sec) else: return logging.warning('Failed to adjust the capacity of asg \"%(g)s\" from %(f)d", ".appName('Trinity %(e)s' % {'e': env.env}) \\ .config('spark.executor.uri', env.spark_executor_uri) \\ .config('spark.sql.shuffle.partitions', env.spark_sql_shuffle_partitions) \\ .config('spark.driver.memory',", "autoscaling group \"%(g)s\" capacity to %(c)d ...' % {'g': aws_asg_name, 'c': aws_cluster_size}) adjust_ec2_asg(aws_asg_name,", "15, ten_inst_timeout_sec: int = 30): aws_client = boto3.client('autoscaling', region_name=region_name) aws_client.update_auto_scaling_group(AutoScalingGroupName=asg_name, DesiredCapacity=desired_capacity, MinSize=0, MaxSize=desired_capacity)", "None: setup_spark_cluster_timeout_sec = aws_cluster_size * 20 wait_on_nodes_to_join_spark_cluster(spark, aws_cluster_size, setup_spark_cluster_timeout_sec) logging.info('Notebook and Spark cluster", "aws_cluster_size is None: aws_cluster_size = env.aws_cluster_size if aws_asg_name is None: aws_asg_name = env.aws_asg_name", "to adjust the capacity of asg \"%(g)s\" from %(f)d to %(t)d in %(s)d", "'c': aws_cluster_size}) adjust_ec2_asg(aws_asg_name, aws_region_name, aws_cluster_size) logging.info('Waiting for workers to join Spark cluster ...')", "str) -> int: client = boto3.client('autoscaling', region_name=region_name) asg = client.describe_auto_scaling_groups(AutoScalingGroupNames=[asg_name])['AutoScalingGroups'][0] return sum([1 for", "%(t)d in %(s)d seconds' % {'g': asg_name, 'f': current_capacity, 't': desired_capacity, 's': timeout_sec})", "%(f)d to %(t)d in %(s)d seconds' % {'g': asg_name, 'f': current_capacity, 't': desired_capacity,", "to %(c)d ...' % {'g': aws_asg_name, 'c': aws_cluster_size}) adjust_ec2_asg(aws_asg_name, aws_region_name, aws_cluster_size) logging.info('Waiting for", "boto3.client('autoscaling', region_name=region_name) aws_client.update_auto_scaling_group(AutoScalingGroupName=asg_name, DesiredCapacity=desired_capacity, MinSize=0, MaxSize=desired_capacity) current_capacity = get_asg_inservice_instance_count(asg_name, region_name) adjust_capacity = abs(desired_capacity", "def setup_spark_session() -> SparkSession: from envconfig.env import env asg_name = env.aws_asg_name region_name =", "math.ceil(abs(adjust_capacity) / 10) max_trial = int(math.ceil(timeout_sec / check_interval_sec)) for trial in range(0, max_trial", "str, region_name: str, desired_capacity: int, check_interval_sec: int = 15, ten_inst_timeout_sec: int = 30):", "for trial in range(0, max_trials + 1): current_size = get_spark_worker_node_count(spark) if current_size !=", "int = 3): max_trials = int(math.ceil(timeout_sec / check_interval_sec)) for trial in range(0, max_trials", "envconfig.env import env if aws_cluster_size is None: aws_cluster_size = env.aws_cluster_size if aws_asg_name is", "from envconfig.env import env if aws_asg_name is None: aws_asg_name = env.aws_asg_name if aws_region_name", "- current_capacity) timeout_sec = ten_inst_timeout_sec * math.ceil(abs(adjust_capacity) / 10) max_trial = int(math.ceil(timeout_sec /", "{'g': asg_name, 'f': current_capacity, 't': desired_capacity, 's': timeout_sec}) def wait_on_nodes_to_join_spark_cluster(spark, desired_cluster_size: int, timeout_sec:", "str = None, aws_region_name: str = None): from envconfig.env import env if aws_asg_name", "Spark cluster in %(s)d seconds, current cluster size: %(c)d' % {'d': desired_cluster_size, 's':", "max_trials = int(math.ceil(timeout_sec / check_interval_sec)) for trial in range(0, max_trials + 1): current_size", "def shutdown_notebook(aws_asg_name: str = None, aws_region_name: str = None): from envconfig.env import env", "* math.ceil(abs(adjust_capacity) / 10) max_trial = int(math.ceil(timeout_sec / check_interval_sec)) for trial in range(0,", "1): inservice_instance_count = get_asg_inservice_instance_count(asg_name, region_name) if inservice_instance_count != desired_capacity: time.sleep(check_interval_sec) else: return logging.warning('Failed", "pyspark.sql import SparkSession def get_asg_inservice_instance_count(asg_name: str, region_name: str) -> int: client = boto3.client('autoscaling',", "current_size != desired_cluster_size: time.sleep(check_interval_sec) else: return logging.warning('Failed to bring %(d)d nodes to Spark", "= None): from envconfig.env import env if aws_asg_name is None: aws_asg_name = env.aws_asg_name", "env.spark_pyspark_python os.environ['PYTHONPATH'] = env.spark_pythonpath spark = SparkSession.builder \\ .master(env.spark_master) \\ .appName('Trinity %(e)s' %", "region_name: str, desired_capacity: int, check_interval_sec: int = 15, ten_inst_timeout_sec: int = 30): aws_client", "env.aws_cluster_size adjust_ec2_asg(asg_name, region_name, cluster_size) os.environ['PYSPARK_PYTHON'] = env.spark_pyspark_python os.environ['PYTHONPATH'] = env.spark_pythonpath spark = SparkSession.builder", "aws_asg_name = env.aws_asg_name if aws_region_name is None: aws_region_name = env.aws_region_name logging.info('Shutting down AWS", "autoscaling group \"%(g)s\" by adjusting capacity to 0' % {'g': aws_asg_name}) adjust_ec2_asg(aws_asg_name, aws_region_name,", "seconds' % {'g': asg_name, 'f': current_capacity, 't': desired_capacity, 's': timeout_sec}) def wait_on_nodes_to_join_spark_cluster(spark, desired_cluster_size:", "aws_client = boto3.client('autoscaling', region_name=region_name) aws_client.update_auto_scaling_group(AutoScalingGroupName=asg_name, DesiredCapacity=desired_capacity, MinSize=0, MaxSize=desired_capacity) current_capacity = get_asg_inservice_instance_count(asg_name, region_name) adjust_capacity", "current_capacity, 't': desired_capacity, 's': timeout_sec}) def wait_on_nodes_to_join_spark_cluster(spark, desired_cluster_size: int, timeout_sec: int, check_interval_sec: int", "from envconfig.env import env if aws_cluster_size is None: aws_cluster_size = env.aws_cluster_size if aws_asg_name", "3): max_trials = int(math.ceil(timeout_sec / check_interval_sec)) for trial in range(0, max_trials + 1):", "DesiredCapacity=desired_capacity, MinSize=0, MaxSize=desired_capacity) current_capacity = get_asg_inservice_instance_count(asg_name, region_name) adjust_capacity = abs(desired_capacity - current_capacity) timeout_sec", "setup_spark_session() -> SparkSession: from envconfig.env import env asg_name = env.aws_asg_name region_name = env.aws_region_name", "region_name) adjust_capacity = abs(desired_capacity - current_capacity) timeout_sec = ten_inst_timeout_sec * math.ceil(abs(adjust_capacity) / 10)", "wait_on_nodes_to_join_spark_cluster(spark, desired_cluster_size: int, timeout_sec: int, check_interval_sec: int = 3): max_trials = int(math.ceil(timeout_sec /", "= None, aws_region_name: str = None, setup_spark_cluster_timeout_sec: int = None): from envconfig.env import", "if aws_region_name is None: aws_region_name = env.aws_region_name logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s %(message)s', datefmt='%Y-%m-%d %H:%M:%S')", "= 15, ten_inst_timeout_sec: int = 30): aws_client = boto3.client('autoscaling', region_name=region_name) aws_client.update_auto_scaling_group(AutoScalingGroupName=asg_name, DesiredCapacity=desired_capacity, MinSize=0,", "%(c)d ...' % {'g': aws_asg_name, 'c': aws_cluster_size}) adjust_ec2_asg(aws_asg_name, aws_region_name, aws_cluster_size) logging.info('Waiting for workers", "30): aws_client = boto3.client('autoscaling', region_name=region_name) aws_client.update_auto_scaling_group(AutoScalingGroupName=asg_name, DesiredCapacity=desired_capacity, MinSize=0, MaxSize=desired_capacity) current_capacity = get_asg_inservice_instance_count(asg_name, region_name)", "seconds, current cluster size: %(c)d' % {'d': desired_cluster_size, 's': timeout_sec, 'c': get_spark_worker_node_count(spark)}) def", "os.environ['PYSPARK_PYTHON'] = env.spark_pyspark_python os.environ['PYTHONPATH'] = env.spark_pythonpath spark = SparkSession.builder \\ .master(env.spark_master) \\ .appName('Trinity", "envconfig.env import env if aws_asg_name is None: aws_asg_name = env.aws_asg_name if aws_region_name is", "/ check_interval_sec)) for trial in range(0, max_trial + 1): inservice_instance_count = get_asg_inservice_instance_count(asg_name, region_name)", "%(c)d' % {'d': desired_cluster_size, 's': timeout_sec, 'c': get_spark_worker_node_count(spark)}) def get_spark_worker_node_count(spark): # noinspection PyProtectedMember", "%(message)s', datefmt='%Y-%m-%d %H:%M:%S') logging.info('Adjusting AWS autoscaling group \"%(g)s\" capacity to %(c)d ...' %", "range(0, max_trials + 1): current_size = get_spark_worker_node_count(spark) if current_size != desired_cluster_size: time.sleep(check_interval_sec) else:", "if current_size != desired_cluster_size: time.sleep(check_interval_sec) else: return logging.warning('Failed to bring %(d)d nodes to", "env if aws_asg_name is None: aws_asg_name = env.aws_asg_name if aws_region_name is None: aws_region_name", "aws_region_name is None: aws_region_name = env.aws_region_name logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s %(message)s', datefmt='%Y-%m-%d %H:%M:%S') logging.info('Adjusting", "SparkSession: from envconfig.env import env asg_name = env.aws_asg_name region_name = env.aws_region_name cluster_size =", "\\ .master(env.spark_master) \\ .appName('Trinity %(e)s' % {'e': env.env}) \\ .config('spark.executor.uri', env.spark_executor_uri) \\ .config('spark.sql.shuffle.partitions',", "!= desired_capacity: time.sleep(check_interval_sec) else: return logging.warning('Failed to adjust the capacity of asg \"%(g)s\"", "!= desired_cluster_size: time.sleep(check_interval_sec) else: return logging.warning('Failed to bring %(d)d nodes to Spark cluster", "import env asg_name = env.aws_asg_name region_name = env.aws_region_name cluster_size = env.aws_cluster_size adjust_ec2_asg(asg_name, region_name,", "aws_cluster_size) logging.info('Waiting for workers to join Spark cluster ...') if setup_spark_cluster_timeout_sec is None:", "env.aws_cluster_size if aws_asg_name is None: aws_asg_name = env.aws_asg_name if aws_region_name is None: aws_region_name", "import env if aws_cluster_size is None: aws_cluster_size = env.aws_cluster_size if aws_asg_name is None:", "= aws_cluster_size * 20 wait_on_nodes_to_join_spark_cluster(spark, aws_cluster_size, setup_spark_cluster_timeout_sec) logging.info('Notebook and Spark cluster are standing", "is None: aws_asg_name = env.aws_asg_name if aws_region_name is None: aws_region_name = env.aws_region_name logging.basicConfig(level=logging.INFO,", "str, region_name: str) -> int: client = boto3.client('autoscaling', region_name=region_name) asg = client.describe_auto_scaling_groups(AutoScalingGroupNames=[asg_name])['AutoScalingGroups'][0] return", "* 10) return spark def prep_notebook(spark, aws_cluster_size: int = None, aws_asg_name: str =", "\\ .getOrCreate() wait_on_nodes_to_join_spark_cluster(spark, env.aws_cluster_size, env.aws_cluster_size * 10) return spark def prep_notebook(spark, aws_cluster_size: int", "# noinspection PyProtectedMember return spark.sparkContext._jsc.sc().getExecutorMemoryStatus().size() - 1 def setup_spark_session() -> SparkSession: from envconfig.env", ".config('spark.driver.memory', env.spark_driver_memory) \\ .config('spark.executor.memory', env.spark_executor_memory) \\ .getOrCreate() wait_on_nodes_to_join_spark_cluster(spark, env.aws_cluster_size, env.aws_cluster_size * 10) return", "aws_asg_name: str = None, aws_region_name: str = None, setup_spark_cluster_timeout_sec: int = None): from", "= env.aws_cluster_size if aws_asg_name is None: aws_asg_name = env.aws_asg_name if aws_region_name is None:", "env.aws_asg_name region_name = env.aws_region_name cluster_size = env.aws_cluster_size adjust_ec2_asg(asg_name, region_name, cluster_size) os.environ['PYSPARK_PYTHON'] = env.spark_pyspark_python", "inservice_instance_count != desired_capacity: time.sleep(check_interval_sec) else: return logging.warning('Failed to adjust the capacity of asg", "in asg['Instances'] if inst['LifecycleState'] == 'InService']) def adjust_ec2_asg(asg_name: str, region_name: str, desired_capacity: int,", "= None): from envconfig.env import env if aws_cluster_size is None: aws_cluster_size = env.aws_cluster_size", "if inservice_instance_count != desired_capacity: time.sleep(check_interval_sec) else: return logging.warning('Failed to adjust the capacity of", "asg_name = env.aws_asg_name region_name = env.aws_region_name cluster_size = env.aws_cluster_size adjust_ec2_asg(asg_name, region_name, cluster_size) os.environ['PYSPARK_PYTHON']", "% {'e': env.env}) \\ .config('spark.executor.uri', env.spark_executor_uri) \\ .config('spark.sql.shuffle.partitions', env.spark_sql_shuffle_partitions) \\ .config('spark.driver.memory', env.spark_driver_memory) \\", "envconfig.env import env asg_name = env.aws_asg_name region_name = env.aws_region_name cluster_size = env.aws_cluster_size adjust_ec2_asg(asg_name,", "env.spark_driver_memory) \\ .config('spark.executor.memory', env.spark_executor_memory) \\ .getOrCreate() wait_on_nodes_to_join_spark_cluster(spark, env.aws_cluster_size, env.aws_cluster_size * 10) return spark", "boto3 from pyspark.sql import SparkSession def get_asg_inservice_instance_count(asg_name: str, region_name: str) -> int: client", "desired_cluster_size: time.sleep(check_interval_sec) else: return logging.warning('Failed to bring %(d)d nodes to Spark cluster in", "cluster ...') if setup_spark_cluster_timeout_sec is None: setup_spark_cluster_timeout_sec = aws_cluster_size * 20 wait_on_nodes_to_join_spark_cluster(spark, aws_cluster_size,", "prep_notebook(spark, aws_cluster_size: int = None, aws_asg_name: str = None, aws_region_name: str = None,", "str = None, aws_region_name: str = None, setup_spark_cluster_timeout_sec: int = None): from envconfig.env", "= client.describe_auto_scaling_groups(AutoScalingGroupNames=[asg_name])['AutoScalingGroups'][0] return sum([1 for inst in asg['Instances'] if inst['LifecycleState'] == 'InService']) def", "are standing by') def shutdown_notebook(aws_asg_name: str = None, aws_region_name: str = None): from", "+ 1): inservice_instance_count = get_asg_inservice_instance_count(asg_name, region_name) if inservice_instance_count != desired_capacity: time.sleep(check_interval_sec) else: return", "if inst['LifecycleState'] == 'InService']) def adjust_ec2_asg(asg_name: str, region_name: str, desired_capacity: int, check_interval_sec: int", "spark.sparkContext._jsc.sc().getExecutorMemoryStatus().size() - 1 def setup_spark_session() -> SparkSession: from envconfig.env import env asg_name =", "region_name: str) -> int: client = boto3.client('autoscaling', region_name=region_name) asg = client.describe_auto_scaling_groups(AutoScalingGroupNames=[asg_name])['AutoScalingGroups'][0] return sum([1", "env.aws_region_name cluster_size = env.aws_cluster_size adjust_ec2_asg(asg_name, region_name, cluster_size) os.environ['PYSPARK_PYTHON'] = env.spark_pyspark_python os.environ['PYTHONPATH'] = env.spark_pythonpath", "os.environ['PYTHONPATH'] = env.spark_pythonpath spark = SparkSession.builder \\ .master(env.spark_master) \\ .appName('Trinity %(e)s' % {'e':", "aws_region_name is None: aws_region_name = env.aws_region_name logging.info('Shutting down AWS autoscaling group \"%(g)s\" by", "format='%(asctime)s %(levelname)s %(message)s', datefmt='%Y-%m-%d %H:%M:%S') logging.info('Adjusting AWS autoscaling group \"%(g)s\" capacity to %(c)d", "%(levelname)s %(message)s', datefmt='%Y-%m-%d %H:%M:%S') logging.info('Adjusting AWS autoscaling group \"%(g)s\" capacity to %(c)d ...'", "env.spark_sql_shuffle_partitions) \\ .config('spark.driver.memory', env.spark_driver_memory) \\ .config('spark.executor.memory', env.spark_executor_memory) \\ .getOrCreate() wait_on_nodes_to_join_spark_cluster(spark, env.aws_cluster_size, env.aws_cluster_size *", "is None: setup_spark_cluster_timeout_sec = aws_cluster_size * 20 wait_on_nodes_to_join_spark_cluster(spark, aws_cluster_size, setup_spark_cluster_timeout_sec) logging.info('Notebook and Spark", "env asg_name = env.aws_asg_name region_name = env.aws_region_name cluster_size = env.aws_cluster_size adjust_ec2_asg(asg_name, region_name, cluster_size)", "= get_asg_inservice_instance_count(asg_name, region_name) adjust_capacity = abs(desired_capacity - current_capacity) timeout_sec = ten_inst_timeout_sec * math.ceil(abs(adjust_capacity)", "client = boto3.client('autoscaling', region_name=region_name) asg = client.describe_auto_scaling_groups(AutoScalingGroupNames=[asg_name])['AutoScalingGroups'][0] return sum([1 for inst in asg['Instances']", "% {'g': aws_asg_name, 'c': aws_cluster_size}) adjust_ec2_asg(aws_asg_name, aws_region_name, aws_cluster_size) logging.info('Waiting for workers to join", "logging.warning('Failed to adjust the capacity of asg \"%(g)s\" from %(f)d to %(t)d in", "desired_cluster_size, 's': timeout_sec, 'c': get_spark_worker_node_count(spark)}) def get_spark_worker_node_count(spark): # noinspection PyProtectedMember return spark.sparkContext._jsc.sc().getExecutorMemoryStatus().size() -", "20 wait_on_nodes_to_join_spark_cluster(spark, aws_cluster_size, setup_spark_cluster_timeout_sec) logging.info('Notebook and Spark cluster are standing by') def shutdown_notebook(aws_asg_name:", "in range(0, max_trial + 1): inservice_instance_count = get_asg_inservice_instance_count(asg_name, region_name) if inservice_instance_count != desired_capacity:", "aws_client.update_auto_scaling_group(AutoScalingGroupName=asg_name, DesiredCapacity=desired_capacity, MinSize=0, MaxSize=desired_capacity) current_capacity = get_asg_inservice_instance_count(asg_name, region_name) adjust_capacity = abs(desired_capacity - current_capacity)", "= env.aws_asg_name region_name = env.aws_region_name cluster_size = env.aws_cluster_size adjust_ec2_asg(asg_name, region_name, cluster_size) os.environ['PYSPARK_PYTHON'] =", "/ 10) max_trial = int(math.ceil(timeout_sec / check_interval_sec)) for trial in range(0, max_trial +", "timeout_sec}) def wait_on_nodes_to_join_spark_cluster(spark, desired_cluster_size: int, timeout_sec: int, check_interval_sec: int = 3): max_trials =", "time import boto3 from pyspark.sql import SparkSession def get_asg_inservice_instance_count(asg_name: str, region_name: str) ->", "abs(desired_capacity - current_capacity) timeout_sec = ten_inst_timeout_sec * math.ceil(abs(adjust_capacity) / 10) max_trial = int(math.ceil(timeout_sec", "setup_spark_cluster_timeout_sec: int = None): from envconfig.env import env if aws_cluster_size is None: aws_cluster_size", "aws_region_name: str = None, setup_spark_cluster_timeout_sec: int = None): from envconfig.env import env if", "None): from envconfig.env import env if aws_asg_name is None: aws_asg_name = env.aws_asg_name if", "setup_spark_cluster_timeout_sec = aws_cluster_size * 20 wait_on_nodes_to_join_spark_cluster(spark, aws_cluster_size, setup_spark_cluster_timeout_sec) logging.info('Notebook and Spark cluster are", "logging.info('Notebook and Spark cluster are standing by') def shutdown_notebook(aws_asg_name: str = None, aws_region_name:", "current_capacity = get_asg_inservice_instance_count(asg_name, region_name) adjust_capacity = abs(desired_capacity - current_capacity) timeout_sec = ten_inst_timeout_sec *", "% {'g': asg_name, 'f': current_capacity, 't': desired_capacity, 's': timeout_sec}) def wait_on_nodes_to_join_spark_cluster(spark, desired_cluster_size: int,", "import logging import math import os import time import boto3 from pyspark.sql import", "nodes to Spark cluster in %(s)d seconds, current cluster size: %(c)d' % {'d':", "aws_asg_name is None: aws_asg_name = env.aws_asg_name if aws_region_name is None: aws_region_name = env.aws_region_name", "str, desired_capacity: int, check_interval_sec: int = 15, ten_inst_timeout_sec: int = 30): aws_client =", "None: aws_asg_name = env.aws_asg_name if aws_region_name is None: aws_region_name = env.aws_region_name logging.basicConfig(level=logging.INFO, format='%(asctime)s", "= boto3.client('autoscaling', region_name=region_name) aws_client.update_auto_scaling_group(AutoScalingGroupName=asg_name, DesiredCapacity=desired_capacity, MinSize=0, MaxSize=desired_capacity) current_capacity = get_asg_inservice_instance_count(asg_name, region_name) adjust_capacity =", "os import time import boto3 from pyspark.sql import SparkSession def get_asg_inservice_instance_count(asg_name: str, region_name:", "setup_spark_cluster_timeout_sec) logging.info('Notebook and Spark cluster are standing by') def shutdown_notebook(aws_asg_name: str = None,", "- 1 def setup_spark_session() -> SparkSession: from envconfig.env import env asg_name = env.aws_asg_name", "return spark def prep_notebook(spark, aws_cluster_size: int = None, aws_asg_name: str = None, aws_region_name:", "SparkSession def get_asg_inservice_instance_count(asg_name: str, region_name: str) -> int: client = boto3.client('autoscaling', region_name=region_name) asg", "time.sleep(check_interval_sec) else: return logging.warning('Failed to bring %(d)d nodes to Spark cluster in %(s)d", "workers to join Spark cluster ...') if setup_spark_cluster_timeout_sec is None: setup_spark_cluster_timeout_sec = aws_cluster_size", "= int(math.ceil(timeout_sec / check_interval_sec)) for trial in range(0, max_trial + 1): inservice_instance_count =", "= get_asg_inservice_instance_count(asg_name, region_name) if inservice_instance_count != desired_capacity: time.sleep(check_interval_sec) else: return logging.warning('Failed to adjust", "Spark cluster are standing by') def shutdown_notebook(aws_asg_name: str = None, aws_region_name: str =", "timeout_sec = ten_inst_timeout_sec * math.ceil(abs(adjust_capacity) / 10) max_trial = int(math.ceil(timeout_sec / check_interval_sec)) for", "aws_region_name = env.aws_region_name logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s %(message)s', datefmt='%Y-%m-%d %H:%M:%S') logging.info('Adjusting AWS autoscaling group", "'s': timeout_sec}) def wait_on_nodes_to_join_spark_cluster(spark, desired_cluster_size: int, timeout_sec: int, check_interval_sec: int = 3): max_trials", "AWS autoscaling group \"%(g)s\" capacity to %(c)d ...' % {'g': aws_asg_name, 'c': aws_cluster_size})", "from pyspark.sql import SparkSession def get_asg_inservice_instance_count(asg_name: str, region_name: str) -> int: client =", "cluster_size) os.environ['PYSPARK_PYTHON'] = env.spark_pyspark_python os.environ['PYTHONPATH'] = env.spark_pythonpath spark = SparkSession.builder \\ .master(env.spark_master) \\", "aws_cluster_size = env.aws_cluster_size if aws_asg_name is None: aws_asg_name = env.aws_asg_name if aws_region_name is", "int: client = boto3.client('autoscaling', region_name=region_name) asg = client.describe_auto_scaling_groups(AutoScalingGroupNames=[asg_name])['AutoScalingGroups'][0] return sum([1 for inst in", "wait_on_nodes_to_join_spark_cluster(spark, env.aws_cluster_size, env.aws_cluster_size * 10) return spark def prep_notebook(spark, aws_cluster_size: int = None,", "by') def shutdown_notebook(aws_asg_name: str = None, aws_region_name: str = None): from envconfig.env import", "region_name=region_name) asg = client.describe_auto_scaling_groups(AutoScalingGroupNames=[asg_name])['AutoScalingGroups'][0] return sum([1 for inst in asg['Instances'] if inst['LifecycleState'] ==", "def wait_on_nodes_to_join_spark_cluster(spark, desired_cluster_size: int, timeout_sec: int, check_interval_sec: int = 3): max_trials = int(math.ceil(timeout_sec", "env.aws_region_name logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s %(message)s', datefmt='%Y-%m-%d %H:%M:%S') logging.info('Adjusting AWS autoscaling group \"%(g)s\" capacity", "timeout_sec, 'c': get_spark_worker_node_count(spark)}) def get_spark_worker_node_count(spark): # noinspection PyProtectedMember return spark.sparkContext._jsc.sc().getExecutorMemoryStatus().size() - 1 def", "aws_cluster_size}) adjust_ec2_asg(aws_asg_name, aws_region_name, aws_cluster_size) logging.info('Waiting for workers to join Spark cluster ...') if", "if aws_region_name is None: aws_region_name = env.aws_region_name logging.info('Shutting down AWS autoscaling group \"%(g)s\"", "def get_asg_inservice_instance_count(asg_name: str, region_name: str) -> int: client = boto3.client('autoscaling', region_name=region_name) asg =", "10) return spark def prep_notebook(spark, aws_cluster_size: int = None, aws_asg_name: str = None,", "-> int: client = boto3.client('autoscaling', region_name=region_name) asg = client.describe_auto_scaling_groups(AutoScalingGroupNames=[asg_name])['AutoScalingGroups'][0] return sum([1 for inst", "env.spark_executor_memory) \\ .getOrCreate() wait_on_nodes_to_join_spark_cluster(spark, env.aws_cluster_size, env.aws_cluster_size * 10) return spark def prep_notebook(spark, aws_cluster_size:", "group \"%(g)s\" by adjusting capacity to 0' % {'g': aws_asg_name}) adjust_ec2_asg(aws_asg_name, aws_region_name, 0)", "adjust_ec2_asg(asg_name: str, region_name: str, desired_capacity: int, check_interval_sec: int = 15, ten_inst_timeout_sec: int =", "of asg \"%(g)s\" from %(f)d to %(t)d in %(s)d seconds' % {'g': asg_name,", "env if aws_cluster_size is None: aws_cluster_size = env.aws_cluster_size if aws_asg_name is None: aws_asg_name", "ten_inst_timeout_sec: int = 30): aws_client = boto3.client('autoscaling', region_name=region_name) aws_client.update_auto_scaling_group(AutoScalingGroupName=asg_name, DesiredCapacity=desired_capacity, MinSize=0, MaxSize=desired_capacity) current_capacity", "= env.aws_region_name logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s %(message)s', datefmt='%Y-%m-%d %H:%M:%S') logging.info('Adjusting AWS autoscaling group \"%(g)s\"", "None: aws_cluster_size = env.aws_cluster_size if aws_asg_name is None: aws_asg_name = env.aws_asg_name if aws_region_name", "spark = SparkSession.builder \\ .master(env.spark_master) \\ .appName('Trinity %(e)s' % {'e': env.env}) \\ .config('spark.executor.uri',", "check_interval_sec)) for trial in range(0, max_trial + 1): inservice_instance_count = get_asg_inservice_instance_count(asg_name, region_name) if", "is None: aws_cluster_size = env.aws_cluster_size if aws_asg_name is None: aws_asg_name = env.aws_asg_name if", "== 'InService']) def adjust_ec2_asg(asg_name: str, region_name: str, desired_capacity: int, check_interval_sec: int = 15,", "else: return logging.warning('Failed to bring %(d)d nodes to Spark cluster in %(s)d seconds,", "= boto3.client('autoscaling', region_name=region_name) asg = client.describe_auto_scaling_groups(AutoScalingGroupNames=[asg_name])['AutoScalingGroups'][0] return sum([1 for inst in asg['Instances'] if", "adjust_ec2_asg(aws_asg_name, aws_region_name, aws_cluster_size) logging.info('Waiting for workers to join Spark cluster ...') if setup_spark_cluster_timeout_sec", "None, setup_spark_cluster_timeout_sec: int = None): from envconfig.env import env if aws_cluster_size is None:", "\"%(g)s\" from %(f)d to %(t)d in %(s)d seconds' % {'g': asg_name, 'f': current_capacity,", "get_spark_worker_node_count(spark): # noinspection PyProtectedMember return spark.sparkContext._jsc.sc().getExecutorMemoryStatus().size() - 1 def setup_spark_session() -> SparkSession: from", "inst['LifecycleState'] == 'InService']) def adjust_ec2_asg(asg_name: str, region_name: str, desired_capacity: int, check_interval_sec: int =", "region_name, cluster_size) os.environ['PYSPARK_PYTHON'] = env.spark_pyspark_python os.environ['PYTHONPATH'] = env.spark_pythonpath spark = SparkSession.builder \\ .master(env.spark_master)", "get_asg_inservice_instance_count(asg_name, region_name) if inservice_instance_count != desired_capacity: time.sleep(check_interval_sec) else: return logging.warning('Failed to adjust the", "import math import os import time import boto3 from pyspark.sql import SparkSession def", "get_asg_inservice_instance_count(asg_name, region_name) adjust_capacity = abs(desired_capacity - current_capacity) timeout_sec = ten_inst_timeout_sec * math.ceil(abs(adjust_capacity) /", "cluster size: %(c)d' % {'d': desired_cluster_size, 's': timeout_sec, 'c': get_spark_worker_node_count(spark)}) def get_spark_worker_node_count(spark): #", "trial in range(0, max_trial + 1): inservice_instance_count = get_asg_inservice_instance_count(asg_name, region_name) if inservice_instance_count !=", "% {'d': desired_cluster_size, 's': timeout_sec, 'c': get_spark_worker_node_count(spark)}) def get_spark_worker_node_count(spark): # noinspection PyProtectedMember return", "check_interval_sec: int = 15, ten_inst_timeout_sec: int = 30): aws_client = boto3.client('autoscaling', region_name=region_name) aws_client.update_auto_scaling_group(AutoScalingGroupName=asg_name,", "'t': desired_capacity, 's': timeout_sec}) def wait_on_nodes_to_join_spark_cluster(spark, desired_cluster_size: int, timeout_sec: int, check_interval_sec: int =", "%(s)d seconds' % {'g': asg_name, 'f': current_capacity, 't': desired_capacity, 's': timeout_sec}) def wait_on_nodes_to_join_spark_cluster(spark,", "1 def setup_spark_session() -> SparkSession: from envconfig.env import env asg_name = env.aws_asg_name region_name", "+ 1): current_size = get_spark_worker_node_count(spark) if current_size != desired_cluster_size: time.sleep(check_interval_sec) else: return logging.warning('Failed", "return sum([1 for inst in asg['Instances'] if inst['LifecycleState'] == 'InService']) def adjust_ec2_asg(asg_name: str,", "wait_on_nodes_to_join_spark_cluster(spark, aws_cluster_size, setup_spark_cluster_timeout_sec) logging.info('Notebook and Spark cluster are standing by') def shutdown_notebook(aws_asg_name: str", "cluster are standing by') def shutdown_notebook(aws_asg_name: str = None, aws_region_name: str = None):", "return spark.sparkContext._jsc.sc().getExecutorMemoryStatus().size() - 1 def setup_spark_session() -> SparkSession: from envconfig.env import env asg_name", "= env.spark_pythonpath spark = SparkSession.builder \\ .master(env.spark_master) \\ .appName('Trinity %(e)s' % {'e': env.env})", "to join Spark cluster ...') if setup_spark_cluster_timeout_sec is None: setup_spark_cluster_timeout_sec = aws_cluster_size *", "desired_capacity: time.sleep(check_interval_sec) else: return logging.warning('Failed to adjust the capacity of asg \"%(g)s\" from", "env.spark_executor_uri) \\ .config('spark.sql.shuffle.partitions', env.spark_sql_shuffle_partitions) \\ .config('spark.driver.memory', env.spark_driver_memory) \\ .config('spark.executor.memory', env.spark_executor_memory) \\ .getOrCreate() wait_on_nodes_to_join_spark_cluster(spark,", "setup_spark_cluster_timeout_sec is None: setup_spark_cluster_timeout_sec = aws_cluster_size * 20 wait_on_nodes_to_join_spark_cluster(spark, aws_cluster_size, setup_spark_cluster_timeout_sec) logging.info('Notebook and", "size: %(c)d' % {'d': desired_cluster_size, 's': timeout_sec, 'c': get_spark_worker_node_count(spark)}) def get_spark_worker_node_count(spark): # noinspection", "is None: aws_asg_name = env.aws_asg_name if aws_region_name is None: aws_region_name = env.aws_region_name logging.info('Shutting", "def prep_notebook(spark, aws_cluster_size: int = None, aws_asg_name: str = None, aws_region_name: str =", "AWS autoscaling group \"%(g)s\" by adjusting capacity to 0' % {'g': aws_asg_name}) adjust_ec2_asg(aws_asg_name,", "group \"%(g)s\" capacity to %(c)d ...' % {'g': aws_asg_name, 'c': aws_cluster_size}) adjust_ec2_asg(aws_asg_name, aws_region_name,", "logging import math import os import time import boto3 from pyspark.sql import SparkSession", "= abs(desired_capacity - current_capacity) timeout_sec = ten_inst_timeout_sec * math.ceil(abs(adjust_capacity) / 10) max_trial =", "\\ .config('spark.executor.memory', env.spark_executor_memory) \\ .getOrCreate() wait_on_nodes_to_join_spark_cluster(spark, env.aws_cluster_size, env.aws_cluster_size * 10) return spark def", "= env.aws_cluster_size adjust_ec2_asg(asg_name, region_name, cluster_size) os.environ['PYSPARK_PYTHON'] = env.spark_pyspark_python os.environ['PYTHONPATH'] = env.spark_pythonpath spark =", ".config('spark.executor.uri', env.spark_executor_uri) \\ .config('spark.sql.shuffle.partitions', env.spark_sql_shuffle_partitions) \\ .config('spark.driver.memory', env.spark_driver_memory) \\ .config('spark.executor.memory', env.spark_executor_memory) \\ .getOrCreate()", "...') if setup_spark_cluster_timeout_sec is None: setup_spark_cluster_timeout_sec = aws_cluster_size * 20 wait_on_nodes_to_join_spark_cluster(spark, aws_cluster_size, setup_spark_cluster_timeout_sec)", "= 30): aws_client = boto3.client('autoscaling', region_name=region_name) aws_client.update_auto_scaling_group(AutoScalingGroupName=asg_name, DesiredCapacity=desired_capacity, MinSize=0, MaxSize=desired_capacity) current_capacity = get_asg_inservice_instance_count(asg_name,", "MinSize=0, MaxSize=desired_capacity) current_capacity = get_asg_inservice_instance_count(asg_name, region_name) adjust_capacity = abs(desired_capacity - current_capacity) timeout_sec =", "\"%(g)s\" capacity to %(c)d ...' % {'g': aws_asg_name, 'c': aws_cluster_size}) adjust_ec2_asg(aws_asg_name, aws_region_name, aws_cluster_size)", "aws_cluster_size: int = None, aws_asg_name: str = None, aws_region_name: str = None, setup_spark_cluster_timeout_sec:", "env.aws_region_name logging.info('Shutting down AWS autoscaling group \"%(g)s\" by adjusting capacity to 0' %", "to %(t)d in %(s)d seconds' % {'g': asg_name, 'f': current_capacity, 't': desired_capacity, 's':", "range(0, max_trial + 1): inservice_instance_count = get_asg_inservice_instance_count(asg_name, region_name) if inservice_instance_count != desired_capacity: time.sleep(check_interval_sec)", "'InService']) def adjust_ec2_asg(asg_name: str, region_name: str, desired_capacity: int, check_interval_sec: int = 15, ten_inst_timeout_sec:", "int = None): from envconfig.env import env if aws_cluster_size is None: aws_cluster_size =", "in range(0, max_trials + 1): current_size = get_spark_worker_node_count(spark) if current_size != desired_cluster_size: time.sleep(check_interval_sec)", "= SparkSession.builder \\ .master(env.spark_master) \\ .appName('Trinity %(e)s' % {'e': env.env}) \\ .config('spark.executor.uri', env.spark_executor_uri)", "env.spark_pythonpath spark = SparkSession.builder \\ .master(env.spark_master) \\ .appName('Trinity %(e)s' % {'e': env.env}) \\", "env.aws_cluster_size, env.aws_cluster_size * 10) return spark def prep_notebook(spark, aws_cluster_size: int = None, aws_asg_name:", "int, check_interval_sec: int = 3): max_trials = int(math.ceil(timeout_sec / check_interval_sec)) for trial in", "env.aws_asg_name if aws_region_name is None: aws_region_name = env.aws_region_name logging.info('Shutting down AWS autoscaling group", "str = None, setup_spark_cluster_timeout_sec: int = None): from envconfig.env import env if aws_cluster_size", "%(e)s' % {'e': env.env}) \\ .config('spark.executor.uri', env.spark_executor_uri) \\ .config('spark.sql.shuffle.partitions', env.spark_sql_shuffle_partitions) \\ .config('spark.driver.memory', env.spark_driver_memory)", "for workers to join Spark cluster ...') if setup_spark_cluster_timeout_sec is None: setup_spark_cluster_timeout_sec =", "logging.info('Shutting down AWS autoscaling group \"%(g)s\" by adjusting capacity to 0' % {'g':", "cluster in %(s)d seconds, current cluster size: %(c)d' % {'d': desired_cluster_size, 's': timeout_sec,", ".config('spark.sql.shuffle.partitions', env.spark_sql_shuffle_partitions) \\ .config('spark.driver.memory', env.spark_driver_memory) \\ .config('spark.executor.memory', env.spark_executor_memory) \\ .getOrCreate() wait_on_nodes_to_join_spark_cluster(spark, env.aws_cluster_size, env.aws_cluster_size", "aws_cluster_size, setup_spark_cluster_timeout_sec) logging.info('Notebook and Spark cluster are standing by') def shutdown_notebook(aws_asg_name: str =", "= env.aws_region_name cluster_size = env.aws_cluster_size adjust_ec2_asg(asg_name, region_name, cluster_size) os.environ['PYSPARK_PYTHON'] = env.spark_pyspark_python os.environ['PYTHONPATH'] =", "if aws_cluster_size is None: aws_cluster_size = env.aws_cluster_size if aws_asg_name is None: aws_asg_name =", "= 3): max_trials = int(math.ceil(timeout_sec / check_interval_sec)) for trial in range(0, max_trials +", "asg_name, 'f': current_capacity, 't': desired_capacity, 's': timeout_sec}) def wait_on_nodes_to_join_spark_cluster(spark, desired_cluster_size: int, timeout_sec: int,", "logging.info('Waiting for workers to join Spark cluster ...') if setup_spark_cluster_timeout_sec is None: setup_spark_cluster_timeout_sec", "logging.info('Adjusting AWS autoscaling group \"%(g)s\" capacity to %(c)d ...' % {'g': aws_asg_name, 'c':", "\\ .config('spark.executor.uri', env.spark_executor_uri) \\ .config('spark.sql.shuffle.partitions', env.spark_sql_shuffle_partitions) \\ .config('spark.driver.memory', env.spark_driver_memory) \\ .config('spark.executor.memory', env.spark_executor_memory) \\", "env.env}) \\ .config('spark.executor.uri', env.spark_executor_uri) \\ .config('spark.sql.shuffle.partitions', env.spark_sql_shuffle_partitions) \\ .config('spark.driver.memory', env.spark_driver_memory) \\ .config('spark.executor.memory', env.spark_executor_memory)", "current_size = get_spark_worker_node_count(spark) if current_size != desired_cluster_size: time.sleep(check_interval_sec) else: return logging.warning('Failed to bring", "= env.aws_region_name logging.info('Shutting down AWS autoscaling group \"%(g)s\" by adjusting capacity to 0'", "shutdown_notebook(aws_asg_name: str = None, aws_region_name: str = None): from envconfig.env import env if", ".config('spark.executor.memory', env.spark_executor_memory) \\ .getOrCreate() wait_on_nodes_to_join_spark_cluster(spark, env.aws_cluster_size, env.aws_cluster_size * 10) return spark def prep_notebook(spark,", "-> SparkSession: from envconfig.env import env asg_name = env.aws_asg_name region_name = env.aws_region_name cluster_size", "capacity of asg \"%(g)s\" from %(f)d to %(t)d in %(s)d seconds' % {'g':", ".getOrCreate() wait_on_nodes_to_join_spark_cluster(spark, env.aws_cluster_size, env.aws_cluster_size * 10) return spark def prep_notebook(spark, aws_cluster_size: int =", "None): from envconfig.env import env if aws_cluster_size is None: aws_cluster_size = env.aws_cluster_size if", "to bring %(d)d nodes to Spark cluster in %(s)d seconds, current cluster size:", "aws_region_name: str = None): from envconfig.env import env if aws_asg_name is None: aws_asg_name", "spark def prep_notebook(spark, aws_cluster_size: int = None, aws_asg_name: str = None, aws_region_name: str", "MaxSize=desired_capacity) current_capacity = get_asg_inservice_instance_count(asg_name, region_name) adjust_capacity = abs(desired_capacity - current_capacity) timeout_sec = ten_inst_timeout_sec", "aws_cluster_size * 20 wait_on_nodes_to_join_spark_cluster(spark, aws_cluster_size, setup_spark_cluster_timeout_sec) logging.info('Notebook and Spark cluster are standing by')", "the capacity of asg \"%(g)s\" from %(f)d to %(t)d in %(s)d seconds' %", "sum([1 for inst in asg['Instances'] if inst['LifecycleState'] == 'InService']) def adjust_ec2_asg(asg_name: str, region_name:" ]
[ "clean_re_password(self): password = self.cleaned_data.get('password') re_password = self.cleaned_data.get('re_password') if password and re_password and password", "self.user_cache = None return super().__init__(*args, **kwargs) def get_initial_for_field(self, field, field_name): return '' def", "self.following_categories: following_query |= Q(pk=category.pk) self.fields['adding_categories'].queryset = Category.objects.exclude(following_query) def clean(self): cleaned_data = self.cleaned_data cleaned_data['following_categories']", "= forms.CharField( label='<PASSWORD>', widget=forms.PasswordInput, ) class Meta: model = User fields = ('username',", "adding_categories = forms.ModelMultipleChoiceField(queryset=None) def __init__(self, user, *args, **kwargs): super().__init__(user, *args, **kwargs) following_query =", "RegisterForm(forms.ModelForm): error_messages = { 'password_mismatch': _('Mật khẩu không khớp.'), } re_password = forms.CharField(", "**kwargs) class Meta: model = User fields = ('last_name', 'first_name', 'avatar') labels =", "clean(self): cleaned_data = self.cleaned_data cleaned_data['following_categories'] = self.following_categories.union(cleaned_data.get('adding_categories')) return cleaned_data class Meta(TopicOrganizeForm.Meta): fields =", "if self.user_cache is None: raise forms.ValidationError( self.error_messages['invalid_login'], code='invalid_login', ) return self.cleaned_data def get_user(self):", "username=username, password=password) if self.user_cache is None: raise forms.ValidationError( self.error_messages['invalid_login'], code='invalid_login', ) return self.cleaned_data", "field_name): return '' def clean_re_password(self): password = self.cleaned_data.get('password') re_password = self.cleaned_data.get('re_password') if password", "Q() for category in self.following_categories: following_query |= Q(pk=category.pk) self.fields['adding_categories'].queryset = Category.objects.exclude(following_query) def clean(self):", "is not None: self.user_cache = authenticate( username=username, password=password) if self.user_cache is None: raise", "__init__(self, *args, **kwargs): self.user_cache = None return super().__init__(*args, **kwargs) def get_initial_for_field(self, field, field_name):", "self.user.check_password(old_password): raise forms.ValidationError( self.error_messages['password_incorrect'], code='password_incorrect', ) return old_password def clean(self): new_password = self.cleaned_data.get('<PASSWORD>_<PASSWORD>')", "self.error_messages['password_mismatch'], code='password_mismatch' ) return re_password def save(self, commit=True): user = super().save(commit=False) user.set_password(self.cleaned_data[\"password\"]) if", "if not self.user.check_password(old_password): raise forms.ValidationError( self.error_messages['password_incorrect'], code='password_incorrect', ) return old_password def clean(self): new_password", "{ 'following_categories': forms.CheckboxSelectMultiple(), } class TopicAddForm(TopicOrganizeForm): adding_categories = forms.ModelMultipleChoiceField(queryset=None) def __init__(self, user, *args,", "Category.objects.exclude(following_query) def clean(self): cleaned_data = self.cleaned_data cleaned_data['following_categories'] = self.following_categories.union(cleaned_data.get('adding_categories')) return cleaned_data class Meta(TopicOrganizeForm.Meta):", "def clean(self): username = self.cleaned_data.get('username') password = self.cleaned_data.get('password') if username is not None", "} def __init__(self, *args, **kwargs): self.user_cache = None return super().__init__(*args, **kwargs) def get_initial_for_field(self,", "_(\"Mật khẩu cũ bạn vừa nhập không đúng.\"), } old_password = forms.CharField( label=_(\"M<PASSWORD>\"),", "raise forms.ValidationError( self.error_messages['password_mismatch'], code='password_mismatch' ) return re_password def save(self, commit=True): user = super().save(commit=False)", "self.error_messages['invalid_login'], code='invalid_login', ) return self.cleaned_data def get_user(self): return self.user_cache class RegisterForm(forms.ModelForm): error_messages =", "= ('last_name', 'first_name', 'avatar') labels = { 'last_name': 'Họ', 'first_name': 'Tên', 'avatar': 'Avatar',", "def get_initial_for_field(self, field, field_name): return '' def clean(self): username = self.cleaned_data.get('username') password =", "cleaned_data = self.cleaned_data cleaned_data['following_categories'] = self.following_categories.union(cleaned_data.get('adding_categories')) return cleaned_data class Meta(TopicOrganizeForm.Meta): fields = ('following_categories',", "} class TopicAddForm(TopicOrganizeForm): adding_categories = forms.ModelMultipleChoiceField(queryset=None) def __init__(self, user, *args, **kwargs): super().__init__(user, *args,", "import gettext, gettext_lazy as _ from django.contrib.auth import authenticate, forms as auth_forms from", "from django import forms from .models import User from news.models import Category from", "'first_name', 'last_name', 'email', 'password',) labels = { 'email': 'Email', 'password': '<PASSWORD>', 'first_name': 'Tên',", "_('Mật khẩu không khớp.'), } re_password = forms.CharField( label='<PASSWORD>', widget=forms.PasswordInput, ) class Meta:", "'adding_categories', ) widgets = { 'adding_categories': forms.CheckboxSelectMultiple() } class UserUpdateForm(forms.ModelForm): def __init__(self, user,", "widgets = { 'adding_categories': forms.CheckboxSelectMultiple() } class UserUpdateForm(forms.ModelForm): def __init__(self, user, *args, **kwargs):", "không đúng.\"), } def __init__(self, *args, **kwargs): self.user_cache = None return super().__init__(*args, **kwargs)", "fields = ('last_name', 'first_name', 'avatar') labels = { 'last_name': 'Họ', 'first_name': 'Tên', 'avatar':", "username = forms.CharField( label=_('Username'), ) password = forms.CharField( label=_(\"<PASSWORD>\"), strip=False, widget=forms.PasswordInput(attrs={'autocomplete': 'current-password'}), )", "field, field_name): return '' def clean(self): username = self.cleaned_data.get('username') password = self.cleaned_data.get('password') if", "self.user_cache class RegisterForm(forms.ModelForm): error_messages = { 'password_mismatch': _('Mật khẩu không khớp.'), } re_password", "re_password def save(self, commit=True): user = super().save(commit=False) user.set_password(self.cleaned_data[\"password\"]) if commit: user.save() return user", "{ 'password_mismatch': _('Mật khẩu không khớp.'), } re_password = forms.CharField( label='<PASSWORD>', widget=forms.PasswordInput, )", "self.cleaned_data.get('re_password') if password and re_password and password != re_password: raise forms.ValidationError( self.error_messages['password_mismatch'], code='password_mismatch'", "return '' def clean_re_password(self): password = self.cleaned_data.get('password') re_password = self.cleaned_data.get('re_password') if password and", "self.fields['adding_categories'].queryset = Category.objects.exclude(following_query) def clean(self): cleaned_data = self.cleaned_data cleaned_data['following_categories'] = self.following_categories.union(cleaned_data.get('adding_categories')) return cleaned_data", "def clean(self): cleaned_data = self.cleaned_data cleaned_data['following_categories'] = self.following_categories.union(cleaned_data.get('adding_categories')) return cleaned_data class Meta(TopicOrganizeForm.Meta): fields", "error_messages = { 'password_mismatch': _('Mật khẩu không khớp.'), } re_password = forms.CharField( label='<PASSWORD>',", "fields = ('following_categories', 'adding_categories', ) widgets = { 'adding_categories': forms.CheckboxSelectMultiple() } class UserUpdateForm(forms.ModelForm):", "'password_incorrect': _(\"Mật khẩu cũ bạn vừa nhập không đúng.\"), } old_password = forms.CharField(", "not self.user.check_password(old_password): raise forms.ValidationError( self.error_messages['password_incorrect'], code='password_incorrect', ) return old_password def clean(self): new_password =", "model = User fields = ('username', 'first_name', 'last_name', 'email', 'password',) labels = {", "không khớp.'), } new_password1 = forms.CharField( label=_(\"Mật khẩu\"), widget=forms.PasswordInput(attrs={'autocomplete': 'new-password'}), strip=False, ) new_password2", "code='invalid_login', ) return self.cleaned_data def get_user(self): return self.user_cache class RegisterForm(forms.ModelForm): error_messages = {", "forms.CheckboxSelectMultiple() } class UserUpdateForm(forms.ModelForm): def __init__(self, user, *args, **kwargs): self.user = user return", ") return old_password def clean(self): new_password = self.cleaned_data.get('<PASSWORD>_<PASSWORD>') self.user.set_password(new_password) self.user.save() return self.cleaned_data class", "django.db.models import Q class LoginForm(forms.Form): username = forms.CharField( label=_('Username'), ) password = forms.CharField(", ") class PasswordChangeForm(SetPasswordForm): error_messages = { **SetPasswordForm.error_messages, 'password_incorrect': _(\"Mật khẩu cũ bạn vừa", "forms.CharField( label='<PASSWORD>', widget=forms.PasswordInput, ) class Meta: model = User fields = ('username', 'first_name',", "get_initial_for_field(self, field, field_name): return '' def clean_re_password(self): password = self.cleaned_data.get('password') re_password = self.cleaned_data.get('re_password')", "field, field_name): return '' def clean_re_password(self): password = self.cleaned_data.get('password') re_password = self.cleaned_data.get('re_password') if", "from django.utils.translation import gettext, gettext_lazy as _ from django.contrib.auth import authenticate, forms as", "('following_categories', ) widgets = { 'following_categories': forms.CheckboxSelectMultiple(), } class TopicAddForm(TopicOrganizeForm): adding_categories = forms.ModelMultipleChoiceField(queryset=None)", "!= re_password: raise forms.ValidationError( self.error_messages['password_mismatch'], code='password_mismatch' ) return re_password def save(self, commit=True): user", "= Category.objects.exclude(following_query) def clean(self): cleaned_data = self.cleaned_data cleaned_data['following_categories'] = self.following_categories.union(cleaned_data.get('adding_categories')) return cleaned_data class", "forms.ValidationError( self.error_messages['invalid_login'], code='invalid_login', ) return self.cleaned_data def get_user(self): return self.user_cache class RegisterForm(forms.ModelForm): error_messages", "label=_('Username'), ) password = forms.CharField( label=_(\"<PASSWORD>\"), strip=False, widget=forms.PasswordInput(attrs={'autocomplete': 'current-password'}), ) error_messages = {", "forms.ValidationError( self.error_messages['password_incorrect'], code='password_incorrect', ) return old_password def clean(self): new_password = self.cleaned_data.get('<PASSWORD>_<PASSWORD>') self.user.set_password(new_password) self.user.save()", ") return self.cleaned_data def get_user(self): return self.user_cache class RegisterForm(forms.ModelForm): error_messages = { 'password_mismatch':", "self.cleaned_data cleaned_data['following_categories'] = self.following_categories.union(cleaned_data.get('adding_categories')) return cleaned_data class Meta(TopicOrganizeForm.Meta): fields = ('following_categories', 'adding_categories', )", "class Meta(TopicOrganizeForm.Meta): fields = ('following_categories', 'adding_categories', ) widgets = { 'adding_categories': forms.CheckboxSelectMultiple() }", "('last_name', 'first_name', 'avatar') labels = { 'last_name': 'Họ', 'first_name': 'Tên', 'avatar': 'Avatar', }", "return self.cleaned_data class TopicOrganizeForm(forms.ModelForm): def __init__(self, user, *args, **kwargs): super().__init__(*args, **kwargs) self.following_categories =", "super().__init__(*args, **kwargs) self.following_categories = user.following_categories.all() self.fields['following_categories'].queryset = self.following_categories class Meta: model = User", "super().__init__(*args, **kwargs) class Meta: model = User fields = ('last_name', 'first_name', 'avatar') labels", "forms.CharField( label=_(\"Nhập lại mật khẩu\"), strip=False, widget=forms.PasswordInput(attrs={'autocomplete': 'new-password'}), ) class PasswordChangeForm(SetPasswordForm): error_messages =", "= User fields = ('username', 'first_name', 'last_name', 'email', 'password',) labels = { 'email':", "= forms.CharField( label=_('Username'), ) password = forms.CharField( label=_(\"<PASSWORD>\"), strip=False, widget=forms.PasswordInput(attrs={'autocomplete': 'current-password'}), ) error_messages", "forms as auth_forms from django.db.models import Q class LoginForm(forms.Form): username = forms.CharField( label=_('Username'),", "as _ from django.contrib.auth import authenticate, forms as auth_forms from django.db.models import Q", "'password_mismatch': _('Mật khẩu không khớp.'), } new_password1 = forms.CharField( label=_(\"Mật khẩu\"), widget=forms.PasswordInput(attrs={'autocomplete': 'new-password'}),", "from django.db.models import Q class LoginForm(forms.Form): username = forms.CharField( label=_('Username'), ) password =", "return self.cleaned_data def get_user(self): return self.user_cache class RegisterForm(forms.ModelForm): error_messages = { 'password_mismatch': _('Mật", "widget=forms.PasswordInput(attrs={'autocomplete': 'current-password'}), ) error_messages = { 'invalid_login': _(\"Username hoặc mật khẩu không đúng.\"),", "self.cleaned_data.get('<PASSWORD>_<PASSWORD>') self.user.set_password(new_password) self.user.save() return self.cleaned_data class TopicOrganizeForm(forms.ModelForm): def __init__(self, user, *args, **kwargs): super().__init__(*args,", "following_query = Q() for category in self.following_categories: following_query |= Q(pk=category.pk) self.fields['adding_categories'].queryset = Category.objects.exclude(following_query)", "re_password: raise forms.ValidationError( self.error_messages['password_mismatch'], code='password_mismatch' ) return re_password def save(self, commit=True): user =", "not None: self.user_cache = authenticate( username=username, password=password) if self.user_cache is None: raise forms.ValidationError(", "widget=forms.PasswordInput(attrs={'autocomplete': 'new-password'}), strip=False, ) new_password2 = forms.CharField( label=_(\"Nhập lại mật khẩu\"), strip=False, widget=forms.PasswordInput(attrs={'autocomplete':", "code='password_mismatch' ) return re_password def save(self, commit=True): user = super().save(commit=False) user.set_password(self.cleaned_data[\"password\"]) if commit:", "Meta: model = User fields = ('username', 'first_name', 'last_name', 'email', 'password',) labels =", "user return super().__init__(*args, **kwargs) class Meta: model = User fields = ('last_name', 'first_name',", "authenticate( username=username, password=password) if self.user_cache is None: raise forms.ValidationError( self.error_messages['invalid_login'], code='invalid_login', ) return", "user.set_password(self.cleaned_data[\"password\"]) if commit: user.save() return user class SetPasswordForm(auth_forms.SetPasswordForm): error_messages = { 'password_mismatch': _('Mật", "self.following_categories = user.following_categories.all() self.fields['following_categories'].queryset = self.following_categories class Meta: model = User fields =", "} new_password1 = forms.CharField( label=_(\"Mật khẩu\"), widget=forms.PasswordInput(attrs={'autocomplete': 'new-password'}), strip=False, ) new_password2 = forms.CharField(", "fields = ('username', 'first_name', 'last_name', 'email', 'password',) labels = { 'email': 'Email', 'password':", "|= Q(pk=category.pk) self.fields['adding_categories'].queryset = Category.objects.exclude(following_query) def clean(self): cleaned_data = self.cleaned_data cleaned_data['following_categories'] = self.following_categories.union(cleaned_data.get('adding_categories'))", "widgets = { 'following_categories': forms.CheckboxSelectMultiple(), } class TopicAddForm(TopicOrganizeForm): adding_categories = forms.ModelMultipleChoiceField(queryset=None) def __init__(self,", "= self.cleaned_data.get('old_password') if not self.user.check_password(old_password): raise forms.ValidationError( self.error_messages['password_incorrect'], code='password_incorrect', ) return old_password def", "= self.cleaned_data.get('username') password = self.cleaned_data.get('password') if username is not None and password is", ") new_password2 = forms.CharField( label=_(\"Nhập lại mật khẩu\"), strip=False, widget=forms.PasswordInput(attrs={'autocomplete': 'new-password'}), ) class", "def __init__(self, user, *args, **kwargs): self.user = user return super().__init__(*args, **kwargs) class Meta:", "username is not None and password is not None: self.user_cache = authenticate( username=username,", "user.following_categories.all() self.fields['following_categories'].queryset = self.following_categories class Meta: model = User fields = ('following_categories', )", "class PasswordChangeForm(SetPasswordForm): error_messages = { **SetPasswordForm.error_messages, 'password_incorrect': _(\"Mật khẩu cũ bạn vừa nhập", "khẩu không khớp.'), } re_password = forms.CharField( label='<PASSWORD>', widget=forms.PasswordInput, ) class Meta: model", "'new_<PASSWORD>'] def get_initial_for_field(self, field, field_name): return '' def clean_old_password(self): old_password = self.cleaned_data.get('old_password') if", "for category in self.following_categories: following_query |= Q(pk=category.pk) self.fields['adding_categories'].queryset = Category.objects.exclude(following_query) def clean(self): cleaned_data", "'Họ', } def get_initial_for_field(self, field, field_name): return '' def clean_re_password(self): password = self.cleaned_data.get('password')", "strip=False, widget=forms.PasswordInput( attrs={'autocomplete': 'current-password', 'autofocus': True}), ) field_order = ['old_password', 'new_password1', 'new_<PASSWORD>'] def", "news.models import Category from django.utils.translation import gettext, gettext_lazy as _ from django.contrib.auth import", "re_password = self.cleaned_data.get('re_password') if password and re_password and password != re_password: raise forms.ValidationError(", ") error_messages = { 'invalid_login': _(\"Username hoặc mật khẩu không đúng.\"), } def", "= forms.CharField( label=_(\"Mật khẩu\"), widget=forms.PasswordInput(attrs={'autocomplete': 'new-password'}), strip=False, ) new_password2 = forms.CharField( label=_(\"Nhập lại", "TopicOrganizeForm(forms.ModelForm): def __init__(self, user, *args, **kwargs): super().__init__(*args, **kwargs) self.following_categories = user.following_categories.all() self.fields['following_categories'].queryset =", "fields = ('following_categories', ) widgets = { 'following_categories': forms.CheckboxSelectMultiple(), } class TopicAddForm(TopicOrganizeForm): adding_categories", "new_password2 = forms.CharField( label=_(\"Nhập lại mật khẩu\"), strip=False, widget=forms.PasswordInput(attrs={'autocomplete': 'new-password'}), ) class PasswordChangeForm(SetPasswordForm):", "field_name): return '' def clean_old_password(self): old_password = self.cleaned_data.get('old_password') if not self.user.check_password(old_password): raise forms.ValidationError(", "Meta(TopicOrganizeForm.Meta): fields = ('following_categories', 'adding_categories', ) widgets = { 'adding_categories': forms.CheckboxSelectMultiple() } class", "return user class SetPasswordForm(auth_forms.SetPasswordForm): error_messages = { 'password_mismatch': _('Mật khẩu không khớp.'), }", "SetPasswordForm(auth_forms.SetPasswordForm): error_messages = { 'password_mismatch': _('Mật khẩu không khớp.'), } new_password1 = forms.CharField(", "'invalid_login': _(\"Username hoặc mật khẩu không đúng.\"), } def __init__(self, *args, **kwargs): self.user_cache", "self.user.set_password(new_password) self.user.save() return self.cleaned_data class TopicOrganizeForm(forms.ModelForm): def __init__(self, user, *args, **kwargs): super().__init__(*args, **kwargs)", "User fields = ('following_categories', ) widgets = { 'following_categories': forms.CheckboxSelectMultiple(), } class TopicAddForm(TopicOrganizeForm):", "clean(self): new_password = self.cleaned_data.get('<PASSWORD>_<PASSWORD>') self.user.set_password(new_password) self.user.save() return self.cleaned_data class TopicOrganizeForm(forms.ModelForm): def __init__(self, user,", "self.cleaned_data.get('old_password') if not self.user.check_password(old_password): raise forms.ValidationError( self.error_messages['password_incorrect'], code='password_incorrect', ) return old_password def clean(self):", "and re_password and password != re_password: raise forms.ValidationError( self.error_messages['password_mismatch'], code='password_mismatch' ) return re_password", "= self.following_categories class Meta: model = User fields = ('following_categories', ) widgets =", "mật khẩu\"), strip=False, widget=forms.PasswordInput(attrs={'autocomplete': 'new-password'}), ) class PasswordChangeForm(SetPasswordForm): error_messages = { **SetPasswordForm.error_messages, 'password_incorrect':", "old_password = self.cleaned_data.get('old_password') if not self.user.check_password(old_password): raise forms.ValidationError( self.error_messages['password_incorrect'], code='password_incorrect', ) return old_password", "category in self.following_categories: following_query |= Q(pk=category.pk) self.fields['adding_categories'].queryset = Category.objects.exclude(following_query) def clean(self): cleaned_data =", "widget=forms.PasswordInput(attrs={'autocomplete': 'new-password'}), ) class PasswordChangeForm(SetPasswordForm): error_messages = { **SetPasswordForm.error_messages, 'password_incorrect': _(\"Mật khẩu cũ", "user class SetPasswordForm(auth_forms.SetPasswordForm): error_messages = { 'password_mismatch': _('Mật khẩu không khớp.'), } new_password1", "def get_initial_for_field(self, field, field_name): return '' def clean_re_password(self): password = self.cleaned_data.get('password') re_password =", "label=_(\"Nhập lại mật khẩu\"), strip=False, widget=forms.PasswordInput(attrs={'autocomplete': 'new-password'}), ) class PasswordChangeForm(SetPasswordForm): error_messages = {", "{ 'invalid_login': _(\"Username hoặc mật khẩu không đúng.\"), } def __init__(self, *args, **kwargs):", "} re_password = forms.CharField( label='<PASSWORD>', widget=forms.PasswordInput, ) class Meta: model = User fields", "'first_name': 'Tên', 'last_name': 'Họ', } def get_initial_for_field(self, field, field_name): return '' def clean_re_password(self):", "return '' def clean_old_password(self): old_password = self.cleaned_data.get('old_password') if not self.user.check_password(old_password): raise forms.ValidationError( self.error_messages['password_incorrect'],", "'current-password'}), ) error_messages = { 'invalid_login': _(\"Username hoặc mật khẩu không đúng.\"), }", "and password != re_password: raise forms.ValidationError( self.error_messages['password_mismatch'], code='password_mismatch' ) return re_password def save(self,", "*args, **kwargs) following_query = Q() for category in self.following_categories: following_query |= Q(pk=category.pk) self.fields['adding_categories'].queryset", "__init__(self, user, *args, **kwargs): self.user = user return super().__init__(*args, **kwargs) class Meta: model", "self.user_cache is None: raise forms.ValidationError( self.error_messages['invalid_login'], code='invalid_login', ) return self.cleaned_data def get_user(self): return", "**kwargs) self.following_categories = user.following_categories.all() self.fields['following_categories'].queryset = self.following_categories class Meta: model = User fields", "class TopicAddForm(TopicOrganizeForm): adding_categories = forms.ModelMultipleChoiceField(queryset=None) def __init__(self, user, *args, **kwargs): super().__init__(user, *args, **kwargs)", "raise forms.ValidationError( self.error_messages['password_incorrect'], code='password_incorrect', ) return old_password def clean(self): new_password = self.cleaned_data.get('<PASSWORD>_<PASSWORD>') self.user.set_password(new_password)", "= ['old_password', 'new_password1', 'new_<PASSWORD>'] def get_initial_for_field(self, field, field_name): return '' def clean_old_password(self): old_password", "self.cleaned_data class TopicOrganizeForm(forms.ModelForm): def __init__(self, user, *args, **kwargs): super().__init__(*args, **kwargs) self.following_categories = user.following_categories.all()", "return cleaned_data class Meta(TopicOrganizeForm.Meta): fields = ('following_categories', 'adding_categories', ) widgets = { 'adding_categories':", "= { 'invalid_login': _(\"Username hoặc mật khẩu không đúng.\"), } def __init__(self, *args,", "} def get_initial_for_field(self, field, field_name): return '' def clean_re_password(self): password = self.cleaned_data.get('password') re_password", "from .models import User from news.models import Category from django.utils.translation import gettext, gettext_lazy", "self.user.save() return self.cleaned_data class TopicOrganizeForm(forms.ModelForm): def __init__(self, user, *args, **kwargs): super().__init__(*args, **kwargs) self.following_categories", "TopicAddForm(TopicOrganizeForm): adding_categories = forms.ModelMultipleChoiceField(queryset=None) def __init__(self, user, *args, **kwargs): super().__init__(user, *args, **kwargs) following_query", "_('Mật khẩu không khớp.'), } new_password1 = forms.CharField( label=_(\"Mật khẩu\"), widget=forms.PasswordInput(attrs={'autocomplete': 'new-password'}), strip=False,", "password = self.cleaned_data.get('password') re_password = self.cleaned_data.get('re_password') if password and re_password and password !=", "label=_(\"M<PASSWORD>\"), strip=False, widget=forms.PasswordInput( attrs={'autocomplete': 'current-password', 'autofocus': True}), ) field_order = ['old_password', 'new_password1', 'new_<PASSWORD>']", "class Meta: model = User fields = ('username', 'first_name', 'last_name', 'email', 'password',) labels", "is not None and password is not None: self.user_cache = authenticate( username=username, password=password)", "khớp.'), } re_password = forms.CharField( label='<PASSWORD>', widget=forms.PasswordInput, ) class Meta: model = User", "**SetPasswordForm.error_messages, 'password_incorrect': _(\"Mật khẩu cũ bạn vừa nhập không đúng.\"), } old_password =", "'autofocus': True}), ) field_order = ['old_password', 'new_password1', 'new_<PASSWORD>'] def get_initial_for_field(self, field, field_name): return", "model = User fields = ('last_name', 'first_name', 'avatar') labels = { 'last_name': 'Họ',", "= forms.ModelMultipleChoiceField(queryset=None) def __init__(self, user, *args, **kwargs): super().__init__(user, *args, **kwargs) following_query = Q()", "= super().save(commit=False) user.set_password(self.cleaned_data[\"password\"]) if commit: user.save() return user class SetPasswordForm(auth_forms.SetPasswordForm): error_messages = {", "def get_user(self): return self.user_cache class RegisterForm(forms.ModelForm): error_messages = { 'password_mismatch': _('Mật khẩu không", "class SetPasswordForm(auth_forms.SetPasswordForm): error_messages = { 'password_mismatch': _('Mật khẩu không khớp.'), } new_password1 =", "= { 'adding_categories': forms.CheckboxSelectMultiple() } class UserUpdateForm(forms.ModelForm): def __init__(self, user, *args, **kwargs): self.user", "authenticate, forms as auth_forms from django.db.models import Q class LoginForm(forms.Form): username = forms.CharField(", "import authenticate, forms as auth_forms from django.db.models import Q class LoginForm(forms.Form): username =", "def get_initial_for_field(self, field, field_name): return '' def clean_old_password(self): old_password = self.cleaned_data.get('old_password') if not", "new_password = self.cleaned_data.get('<PASSWORD>_<PASSWORD>') self.user.set_password(new_password) self.user.save() return self.cleaned_data class TopicOrganizeForm(forms.ModelForm): def __init__(self, user, *args,", "return old_password def clean(self): new_password = self.cleaned_data.get('<PASSWORD>_<PASSWORD>') self.user.set_password(new_password) self.user.save() return self.cleaned_data class TopicOrganizeForm(forms.ModelForm):", "in self.following_categories: following_query |= Q(pk=category.pk) self.fields['adding_categories'].queryset = Category.objects.exclude(following_query) def clean(self): cleaned_data = self.cleaned_data", "None return super().__init__(*args, **kwargs) def get_initial_for_field(self, field, field_name): return '' def clean(self): username", "django import forms from .models import User from news.models import Category from django.utils.translation", "return self.user_cache class RegisterForm(forms.ModelForm): error_messages = { 'password_mismatch': _('Mật khẩu không khớp.'), }", "user = super().save(commit=False) user.set_password(self.cleaned_data[\"password\"]) if commit: user.save() return user class SetPasswordForm(auth_forms.SetPasswordForm): error_messages =", ") return re_password def save(self, commit=True): user = super().save(commit=False) user.set_password(self.cleaned_data[\"password\"]) if commit: user.save()", "user.save() return user class SetPasswordForm(auth_forms.SetPasswordForm): error_messages = { 'password_mismatch': _('Mật khẩu không khớp.'),", "= user return super().__init__(*args, **kwargs) class Meta: model = User fields = ('last_name',", "self.error_messages['password_incorrect'], code='password_incorrect', ) return old_password def clean(self): new_password = self.cleaned_data.get('<PASSWORD>_<PASSWORD>') self.user.set_password(new_password) self.user.save() return", "_ from django.contrib.auth import authenticate, forms as auth_forms from django.db.models import Q class", "def __init__(self, *args, **kwargs): self.user_cache = None return super().__init__(*args, **kwargs) def get_initial_for_field(self, field,", "nhập không đúng.\"), } old_password = forms.CharField( label=_(\"M<PASSWORD>\"), strip=False, widget=forms.PasswordInput( attrs={'autocomplete': 'current-password', 'autofocus':", "def clean_old_password(self): old_password = self.cleaned_data.get('old_password') if not self.user.check_password(old_password): raise forms.ValidationError( self.error_messages['password_incorrect'], code='password_incorrect', )", "None and password is not None: self.user_cache = authenticate( username=username, password=password) if self.user_cache", "def clean_re_password(self): password = self.cleaned_data.get('password') re_password = self.cleaned_data.get('re_password') if password and re_password and", "= forms.CharField( label=_(\"<PASSWORD>\"), strip=False, widget=forms.PasswordInput(attrs={'autocomplete': 'current-password'}), ) error_messages = { 'invalid_login': _(\"Username hoặc", "= { 'following_categories': forms.CheckboxSelectMultiple(), } class TopicAddForm(TopicOrganizeForm): adding_categories = forms.ModelMultipleChoiceField(queryset=None) def __init__(self, user,", "'adding_categories': forms.CheckboxSelectMultiple() } class UserUpdateForm(forms.ModelForm): def __init__(self, user, *args, **kwargs): self.user = user", "clean(self): username = self.cleaned_data.get('username') password = self.cleaned_data.get('password') if username is not None and", "đúng.\"), } old_password = forms.CharField( label=_(\"M<PASSWORD>\"), strip=False, widget=forms.PasswordInput( attrs={'autocomplete': 'current-password', 'autofocus': True}), )", "raise forms.ValidationError( self.error_messages['invalid_login'], code='invalid_login', ) return self.cleaned_data def get_user(self): return self.user_cache class RegisterForm(forms.ModelForm):", "= None return super().__init__(*args, **kwargs) def get_initial_for_field(self, field, field_name): return '' def clean(self):", "forms.CharField( label=_('Username'), ) password = forms.CharField( label=_(\"<PASSWORD>\"), strip=False, widget=forms.PasswordInput(attrs={'autocomplete': 'current-password'}), ) error_messages =", "super().__init__(*args, **kwargs) def get_initial_for_field(self, field, field_name): return '' def clean(self): username = self.cleaned_data.get('username')", "None: raise forms.ValidationError( self.error_messages['invalid_login'], code='invalid_login', ) return self.cleaned_data def get_user(self): return self.user_cache class", ") widgets = { 'adding_categories': forms.CheckboxSelectMultiple() } class UserUpdateForm(forms.ModelForm): def __init__(self, user, *args,", "'password',) labels = { 'email': 'Email', 'password': '<PASSWORD>', 'first_name': 'Tên', 'last_name': 'Họ', }", "'last_name': 'Họ', } def get_initial_for_field(self, field, field_name): return '' def clean_re_password(self): password =", "khẩu không đúng.\"), } def __init__(self, *args, **kwargs): self.user_cache = None return super().__init__(*args,", "self.cleaned_data def get_user(self): return self.user_cache class RegisterForm(forms.ModelForm): error_messages = { 'password_mismatch': _('Mật khẩu", "old_password = forms.CharField( label=_(\"M<PASSWORD>\"), strip=False, widget=forms.PasswordInput( attrs={'autocomplete': 'current-password', 'autofocus': True}), ) field_order =", "error_messages = { **SetPasswordForm.error_messages, 'password_incorrect': _(\"Mật khẩu cũ bạn vừa nhập không đúng.\"),", "user, *args, **kwargs): super().__init__(user, *args, **kwargs) following_query = Q() for category in self.following_categories:", "widget=forms.PasswordInput, ) class Meta: model = User fields = ('username', 'first_name', 'last_name', 'email',", "('following_categories', 'adding_categories', ) widgets = { 'adding_categories': forms.CheckboxSelectMultiple() } class UserUpdateForm(forms.ModelForm): def __init__(self,", "'' def clean_re_password(self): password = self.cleaned_data.get('password') re_password = self.cleaned_data.get('re_password') if password and re_password", "save(self, commit=True): user = super().save(commit=False) user.set_password(self.cleaned_data[\"password\"]) if commit: user.save() return user class SetPasswordForm(auth_forms.SetPasswordForm):", "strip=False, widget=forms.PasswordInput(attrs={'autocomplete': 'new-password'}), ) class PasswordChangeForm(SetPasswordForm): error_messages = { **SetPasswordForm.error_messages, 'password_incorrect': _(\"Mật khẩu", "def save(self, commit=True): user = super().save(commit=False) user.set_password(self.cleaned_data[\"password\"]) if commit: user.save() return user class", "khẩu\"), widget=forms.PasswordInput(attrs={'autocomplete': 'new-password'}), strip=False, ) new_password2 = forms.CharField( label=_(\"Nhập lại mật khẩu\"), strip=False,", "= self.cleaned_data cleaned_data['following_categories'] = self.following_categories.union(cleaned_data.get('adding_categories')) return cleaned_data class Meta(TopicOrganizeForm.Meta): fields = ('following_categories', 'adding_categories',", "User from news.models import Category from django.utils.translation import gettext, gettext_lazy as _ from", "_(\"Username hoặc mật khẩu không đúng.\"), } def __init__(self, *args, **kwargs): self.user_cache =", "strip=False, ) new_password2 = forms.CharField( label=_(\"Nhập lại mật khẩu\"), strip=False, widget=forms.PasswordInput(attrs={'autocomplete': 'new-password'}), )", "= ('following_categories', 'adding_categories', ) widgets = { 'adding_categories': forms.CheckboxSelectMultiple() } class UserUpdateForm(forms.ModelForm): def", "cleaned_data['following_categories'] = self.following_categories.union(cleaned_data.get('adding_categories')) return cleaned_data class Meta(TopicOrganizeForm.Meta): fields = ('following_categories', 'adding_categories', ) widgets", "password and re_password and password != re_password: raise forms.ValidationError( self.error_messages['password_mismatch'], code='password_mismatch' ) return", "not None and password is not None: self.user_cache = authenticate( username=username, password=password) if", "} old_password = forms.CharField( label=_(\"M<PASSWORD>\"), strip=False, widget=forms.PasswordInput( attrs={'autocomplete': 'current-password', 'autofocus': True}), ) field_order", "= forms.CharField( label=_(\"M<PASSWORD>\"), strip=False, widget=forms.PasswordInput( attrs={'autocomplete': 'current-password', 'autofocus': True}), ) field_order = ['old_password',", "'new_password1', 'new_<PASSWORD>'] def get_initial_for_field(self, field, field_name): return '' def clean_old_password(self): old_password = self.cleaned_data.get('old_password')", "password is not None: self.user_cache = authenticate( username=username, password=password) if self.user_cache is None:", "password=password) if self.user_cache is None: raise forms.ValidationError( self.error_messages['invalid_login'], code='invalid_login', ) return self.cleaned_data def", "self.cleaned_data.get('password') re_password = self.cleaned_data.get('re_password') if password and re_password and password != re_password: raise", "= self.cleaned_data.get('re_password') if password and re_password and password != re_password: raise forms.ValidationError( self.error_messages['password_mismatch'],", "không đúng.\"), } old_password = forms.CharField( label=_(\"M<PASSWORD>\"), strip=False, widget=forms.PasswordInput( attrs={'autocomplete': 'current-password', 'autofocus': True}),", "password != re_password: raise forms.ValidationError( self.error_messages['password_mismatch'], code='password_mismatch' ) return re_password def save(self, commit=True):", "= { 'email': 'Email', 'password': '<PASSWORD>', 'first_name': 'Tên', 'last_name': 'Họ', } def get_initial_for_field(self,", "= authenticate( username=username, password=password) if self.user_cache is None: raise forms.ValidationError( self.error_messages['invalid_login'], code='invalid_login', )", "re_password and password != re_password: raise forms.ValidationError( self.error_messages['password_mismatch'], code='password_mismatch' ) return re_password def", "if username is not None and password is not None: self.user_cache = authenticate(", "following_query |= Q(pk=category.pk) self.fields['adding_categories'].queryset = Category.objects.exclude(following_query) def clean(self): cleaned_data = self.cleaned_data cleaned_data['following_categories'] =", "**kwargs) def get_initial_for_field(self, field, field_name): return '' def clean(self): username = self.cleaned_data.get('username') password", "user, *args, **kwargs): self.user = user return super().__init__(*args, **kwargs) class Meta: model =", ") class Meta: model = User fields = ('username', 'first_name', 'last_name', 'email', 'password',)", "self.following_categories class Meta: model = User fields = ('following_categories', ) widgets = {", "khớp.'), } new_password1 = forms.CharField( label=_(\"Mật khẩu\"), widget=forms.PasswordInput(attrs={'autocomplete': 'new-password'}), strip=False, ) new_password2 =", "forms.CheckboxSelectMultiple(), } class TopicAddForm(TopicOrganizeForm): adding_categories = forms.ModelMultipleChoiceField(queryset=None) def __init__(self, user, *args, **kwargs): super().__init__(user,", "forms.ValidationError( self.error_messages['password_mismatch'], code='password_mismatch' ) return re_password def save(self, commit=True): user = super().save(commit=False) user.set_password(self.cleaned_data[\"password\"])", "= User fields = ('following_categories', ) widgets = { 'following_categories': forms.CheckboxSelectMultiple(), } class", "error_messages = { 'password_mismatch': _('Mật khẩu không khớp.'), } new_password1 = forms.CharField( label=_(\"Mật", "class RegisterForm(forms.ModelForm): error_messages = { 'password_mismatch': _('Mật khẩu không khớp.'), } re_password =", "class UserUpdateForm(forms.ModelForm): def __init__(self, user, *args, **kwargs): self.user = user return super().__init__(*args, **kwargs)", "= { 'password_mismatch': _('Mật khẩu không khớp.'), } re_password = forms.CharField( label='<PASSWORD>', widget=forms.PasswordInput,", "super().save(commit=False) user.set_password(self.cleaned_data[\"password\"]) if commit: user.save() return user class SetPasswordForm(auth_forms.SetPasswordForm): error_messages = { 'password_mismatch':", "Category from django.utils.translation import gettext, gettext_lazy as _ from django.contrib.auth import authenticate, forms", "class LoginForm(forms.Form): username = forms.CharField( label=_('Username'), ) password = forms.CharField( label=_(\"<PASSWORD>\"), strip=False, widget=forms.PasswordInput(attrs={'autocomplete':", "không khớp.'), } re_password = forms.CharField( label='<PASSWORD>', widget=forms.PasswordInput, ) class Meta: model =", "'last_name', 'email', 'password',) labels = { 'email': 'Email', 'password': '<PASSWORD>', 'first_name': 'Tên', 'last_name':", "field, field_name): return '' def clean_old_password(self): old_password = self.cleaned_data.get('old_password') if not self.user.check_password(old_password): raise", "= self.following_categories.union(cleaned_data.get('adding_categories')) return cleaned_data class Meta(TopicOrganizeForm.Meta): fields = ('following_categories', 'adding_categories', ) widgets =", "*args, **kwargs): self.user_cache = None return super().__init__(*args, **kwargs) def get_initial_for_field(self, field, field_name): return", "lại mật khẩu\"), strip=False, widget=forms.PasswordInput(attrs={'autocomplete': 'new-password'}), ) class PasswordChangeForm(SetPasswordForm): error_messages = { **SetPasswordForm.error_messages,", ") password = forms.CharField( label=_(\"<PASSWORD>\"), strip=False, widget=forms.PasswordInput(attrs={'autocomplete': 'current-password'}), ) error_messages = { 'invalid_login':", "__init__(self, user, *args, **kwargs): super().__init__(user, *args, **kwargs) following_query = Q() for category in", "khẩu\"), strip=False, widget=forms.PasswordInput(attrs={'autocomplete': 'new-password'}), ) class PasswordChangeForm(SetPasswordForm): error_messages = { **SetPasswordForm.error_messages, 'password_incorrect': _(\"Mật", "field_name): return '' def clean(self): username = self.cleaned_data.get('username') password = self.cleaned_data.get('password') if username", "forms.CharField( label=_(\"Mật khẩu\"), widget=forms.PasswordInput(attrs={'autocomplete': 'new-password'}), strip=False, ) new_password2 = forms.CharField( label=_(\"Nhập lại mật", "new_password1 = forms.CharField( label=_(\"Mật khẩu\"), widget=forms.PasswordInput(attrs={'autocomplete': 'new-password'}), strip=False, ) new_password2 = forms.CharField( label=_(\"Nhập", "**kwargs): super().__init__(user, *args, **kwargs) following_query = Q() for category in self.following_categories: following_query |=", "get_initial_for_field(self, field, field_name): return '' def clean(self): username = self.cleaned_data.get('username') password = self.cleaned_data.get('password')", "field_order = ['old_password', 'new_password1', 'new_<PASSWORD>'] def get_initial_for_field(self, field, field_name): return '' def clean_old_password(self):", "def __init__(self, user, *args, **kwargs): super().__init__(user, *args, **kwargs) following_query = Q() for category", "'following_categories': forms.CheckboxSelectMultiple(), } class TopicAddForm(TopicOrganizeForm): adding_categories = forms.ModelMultipleChoiceField(queryset=None) def __init__(self, user, *args, **kwargs):", "= ('following_categories', ) widgets = { 'following_categories': forms.CheckboxSelectMultiple(), } class TopicAddForm(TopicOrganizeForm): adding_categories =", "= self.cleaned_data.get('<PASSWORD>_<PASSWORD>') self.user.set_password(new_password) self.user.save() return self.cleaned_data class TopicOrganizeForm(forms.ModelForm): def __init__(self, user, *args, **kwargs):", "('username', 'first_name', 'last_name', 'email', 'password',) labels = { 'email': 'Email', 'password': '<PASSWORD>', 'first_name':", "import forms from .models import User from news.models import Category from django.utils.translation import", "'email': 'Email', 'password': '<PASSWORD>', 'first_name': 'Tên', 'last_name': 'Họ', } def get_initial_for_field(self, field, field_name):", "= User fields = ('last_name', 'first_name', 'avatar') labels = { 'last_name': 'Họ', 'first_name':", "re_password = forms.CharField( label='<PASSWORD>', widget=forms.PasswordInput, ) class Meta: model = User fields =", "'new-password'}), ) class PasswordChangeForm(SetPasswordForm): error_messages = { **SetPasswordForm.error_messages, 'password_incorrect': _(\"Mật khẩu cũ bạn", "**kwargs) following_query = Q() for category in self.following_categories: following_query |= Q(pk=category.pk) self.fields['adding_categories'].queryset =", "code='password_incorrect', ) return old_password def clean(self): new_password = self.cleaned_data.get('<PASSWORD>_<PASSWORD>') self.user.set_password(new_password) self.user.save() return self.cleaned_data", "super().__init__(user, *args, **kwargs) following_query = Q() for category in self.following_categories: following_query |= Q(pk=category.pk)", "forms.CharField( label=_(\"M<PASSWORD>\"), strip=False, widget=forms.PasswordInput( attrs={'autocomplete': 'current-password', 'autofocus': True}), ) field_order = ['old_password', 'new_password1',", "*args, **kwargs): super().__init__(*args, **kwargs) self.following_categories = user.following_categories.all() self.fields['following_categories'].queryset = self.following_categories class Meta: model", "**kwargs): self.user_cache = None return super().__init__(*args, **kwargs) def get_initial_for_field(self, field, field_name): return ''", "django.utils.translation import gettext, gettext_lazy as _ from django.contrib.auth import authenticate, forms as auth_forms", "commit=True): user = super().save(commit=False) user.set_password(self.cleaned_data[\"password\"]) if commit: user.save() return user class SetPasswordForm(auth_forms.SetPasswordForm): error_messages", "'' def clean(self): username = self.cleaned_data.get('username') password = self.cleaned_data.get('password') if username is not", "import User from news.models import Category from django.utils.translation import gettext, gettext_lazy as _", "mật khẩu không đúng.\"), } def __init__(self, *args, **kwargs): self.user_cache = None return", "return re_password def save(self, commit=True): user = super().save(commit=False) user.set_password(self.cleaned_data[\"password\"]) if commit: user.save() return", "'Tên', 'last_name': 'Họ', } def get_initial_for_field(self, field, field_name): return '' def clean_re_password(self): password", "and password is not None: self.user_cache = authenticate( username=username, password=password) if self.user_cache is", "True}), ) field_order = ['old_password', 'new_password1', 'new_<PASSWORD>'] def get_initial_for_field(self, field, field_name): return ''", "= user.following_categories.all() self.fields['following_categories'].queryset = self.following_categories class Meta: model = User fields = ('following_categories',", "django.contrib.auth import authenticate, forms as auth_forms from django.db.models import Q class LoginForm(forms.Form): username", ".models import User from news.models import Category from django.utils.translation import gettext, gettext_lazy as", "'Email', 'password': '<PASSWORD>', 'first_name': 'Tên', 'last_name': 'Họ', } def get_initial_for_field(self, field, field_name): return", "**kwargs): super().__init__(*args, **kwargs) self.following_categories = user.following_categories.all() self.fields['following_categories'].queryset = self.following_categories class Meta: model =", "strip=False, widget=forms.PasswordInput(attrs={'autocomplete': 'current-password'}), ) error_messages = { 'invalid_login': _(\"Username hoặc mật khẩu không", "widget=forms.PasswordInput( attrs={'autocomplete': 'current-password', 'autofocus': True}), ) field_order = ['old_password', 'new_password1', 'new_<PASSWORD>'] def get_initial_for_field(self,", "class Meta: model = User fields = ('following_categories', ) widgets = { 'following_categories':", "LoginForm(forms.Form): username = forms.CharField( label=_('Username'), ) password = forms.CharField( label=_(\"<PASSWORD>\"), strip=False, widget=forms.PasswordInput(attrs={'autocomplete': 'current-password'}),", "{ **SetPasswordForm.error_messages, 'password_incorrect': _(\"Mật khẩu cũ bạn vừa nhập không đúng.\"), } old_password", "đúng.\"), } def __init__(self, *args, **kwargs): self.user_cache = None return super().__init__(*args, **kwargs) def", "} class UserUpdateForm(forms.ModelForm): def __init__(self, user, *args, **kwargs): self.user = user return super().__init__(*args,", "old_password def clean(self): new_password = self.cleaned_data.get('<PASSWORD>_<PASSWORD>') self.user.set_password(new_password) self.user.save() return self.cleaned_data class TopicOrganizeForm(forms.ModelForm): def", "= forms.CharField( label=_(\"Nhập lại mật khẩu\"), strip=False, widget=forms.PasswordInput(attrs={'autocomplete': 'new-password'}), ) class PasswordChangeForm(SetPasswordForm): error_messages", "None: self.user_cache = authenticate( username=username, password=password) if self.user_cache is None: raise forms.ValidationError( self.error_messages['invalid_login'],", "vừa nhập không đúng.\"), } old_password = forms.CharField( label=_(\"M<PASSWORD>\"), strip=False, widget=forms.PasswordInput( attrs={'autocomplete': 'current-password',", "self.fields['following_categories'].queryset = self.following_categories class Meta: model = User fields = ('following_categories', ) widgets", "clean_old_password(self): old_password = self.cleaned_data.get('old_password') if not self.user.check_password(old_password): raise forms.ValidationError( self.error_messages['password_incorrect'], code='password_incorrect', ) return", "class TopicOrganizeForm(forms.ModelForm): def __init__(self, user, *args, **kwargs): super().__init__(*args, **kwargs) self.following_categories = user.following_categories.all() self.fields['following_categories'].queryset", "Q(pk=category.pk) self.fields['adding_categories'].queryset = Category.objects.exclude(following_query) def clean(self): cleaned_data = self.cleaned_data cleaned_data['following_categories'] = self.following_categories.union(cleaned_data.get('adding_categories')) return", "self.user = user return super().__init__(*args, **kwargs) class Meta: model = User fields =", "get_initial_for_field(self, field, field_name): return '' def clean_old_password(self): old_password = self.cleaned_data.get('old_password') if not self.user.check_password(old_password):", "self.following_categories.union(cleaned_data.get('adding_categories')) return cleaned_data class Meta(TopicOrganizeForm.Meta): fields = ('following_categories', 'adding_categories', ) widgets = {", "return super().__init__(*args, **kwargs) class Meta: model = User fields = ('last_name', 'first_name', 'avatar')", "{ 'email': 'Email', 'password': '<PASSWORD>', 'first_name': 'Tên', 'last_name': 'Họ', } def get_initial_for_field(self, field,", "khẩu cũ bạn vừa nhập không đúng.\"), } old_password = forms.CharField( label=_(\"M<PASSWORD>\"), strip=False,", "password = self.cleaned_data.get('password') if username is not None and password is not None:", "Meta: model = User fields = ('following_categories', ) widgets = { 'following_categories': forms.CheckboxSelectMultiple(),", "model = User fields = ('following_categories', ) widgets = { 'following_categories': forms.CheckboxSelectMultiple(), }", "Q class LoginForm(forms.Form): username = forms.CharField( label=_('Username'), ) password = forms.CharField( label=_(\"<PASSWORD>\"), strip=False,", "'new-password'}), strip=False, ) new_password2 = forms.CharField( label=_(\"Nhập lại mật khẩu\"), strip=False, widget=forms.PasswordInput(attrs={'autocomplete': 'new-password'}),", "'password_mismatch': _('Mật khẩu không khớp.'), } re_password = forms.CharField( label='<PASSWORD>', widget=forms.PasswordInput, ) class", "label=_(\"Mật khẩu\"), widget=forms.PasswordInput(attrs={'autocomplete': 'new-password'}), strip=False, ) new_password2 = forms.CharField( label=_(\"Nhập lại mật khẩu\"),", "as auth_forms from django.db.models import Q class LoginForm(forms.Form): username = forms.CharField( label=_('Username'), )", "if commit: user.save() return user class SetPasswordForm(auth_forms.SetPasswordForm): error_messages = { 'password_mismatch': _('Mật khẩu", "PasswordChangeForm(SetPasswordForm): error_messages = { **SetPasswordForm.error_messages, 'password_incorrect': _(\"Mật khẩu cũ bạn vừa nhập không", "def __init__(self, user, *args, **kwargs): super().__init__(*args, **kwargs) self.following_categories = user.following_categories.all() self.fields['following_categories'].queryset = self.following_categories", "*args, **kwargs): super().__init__(user, *args, **kwargs) following_query = Q() for category in self.following_categories: following_query", "password = forms.CharField( label=_(\"<PASSWORD>\"), strip=False, widget=forms.PasswordInput(attrs={'autocomplete': 'current-password'}), ) error_messages = { 'invalid_login': _(\"Username", "'email', 'password',) labels = { 'email': 'Email', 'password': '<PASSWORD>', 'first_name': 'Tên', 'last_name': 'Họ',", "import Q class LoginForm(forms.Form): username = forms.CharField( label=_('Username'), ) password = forms.CharField( label=_(\"<PASSWORD>\"),", "label='<PASSWORD>', widget=forms.PasswordInput, ) class Meta: model = User fields = ('username', 'first_name', 'last_name',", "gettext, gettext_lazy as _ from django.contrib.auth import authenticate, forms as auth_forms from django.db.models", "user, *args, **kwargs): super().__init__(*args, **kwargs) self.following_categories = user.following_categories.all() self.fields['following_categories'].queryset = self.following_categories class Meta:", "self.user_cache = authenticate( username=username, password=password) if self.user_cache is None: raise forms.ValidationError( self.error_messages['invalid_login'], code='invalid_login',", "from news.models import Category from django.utils.translation import gettext, gettext_lazy as _ from django.contrib.auth", "Meta: model = User fields = ('last_name', 'first_name', 'avatar') labels = { 'last_name':", ") field_order = ['old_password', 'new_password1', 'new_<PASSWORD>'] def get_initial_for_field(self, field, field_name): return '' def", "= self.cleaned_data.get('password') re_password = self.cleaned_data.get('re_password') if password and re_password and password != re_password:", "forms.ModelMultipleChoiceField(queryset=None) def __init__(self, user, *args, **kwargs): super().__init__(user, *args, **kwargs) following_query = Q() for", "*args, **kwargs): self.user = user return super().__init__(*args, **kwargs) class Meta: model = User", "self.cleaned_data.get('password') if username is not None and password is not None: self.user_cache =", "= Q() for category in self.following_categories: following_query |= Q(pk=category.pk) self.fields['adding_categories'].queryset = Category.objects.exclude(following_query) def", "forms.CharField( label=_(\"<PASSWORD>\"), strip=False, widget=forms.PasswordInput(attrs={'autocomplete': 'current-password'}), ) error_messages = { 'invalid_login': _(\"Username hoặc mật", "self.cleaned_data.get('username') password = self.cleaned_data.get('password') if username is not None and password is not", "username = self.cleaned_data.get('username') password = self.cleaned_data.get('password') if username is not None and password", "gettext_lazy as _ from django.contrib.auth import authenticate, forms as auth_forms from django.db.models import", "__init__(self, user, *args, **kwargs): super().__init__(*args, **kwargs) self.following_categories = user.following_categories.all() self.fields['following_categories'].queryset = self.following_categories class", "def clean(self): new_password = self.cleaned_data.get('<PASSWORD>_<PASSWORD>') self.user.set_password(new_password) self.user.save() return self.cleaned_data class TopicOrganizeForm(forms.ModelForm): def __init__(self,", "bạn vừa nhập không đúng.\"), } old_password = forms.CharField( label=_(\"M<PASSWORD>\"), strip=False, widget=forms.PasswordInput( attrs={'autocomplete':", "= ('username', 'first_name', 'last_name', 'email', 'password',) labels = { 'email': 'Email', 'password': '<PASSWORD>',", "return '' def clean(self): username = self.cleaned_data.get('username') password = self.cleaned_data.get('password') if username is", "cũ bạn vừa nhập không đúng.\"), } old_password = forms.CharField( label=_(\"M<PASSWORD>\"), strip=False, widget=forms.PasswordInput(", "'password': '<PASSWORD>', 'first_name': 'Tên', 'last_name': 'Họ', } def get_initial_for_field(self, field, field_name): return ''", "is None: raise forms.ValidationError( self.error_messages['invalid_login'], code='invalid_login', ) return self.cleaned_data def get_user(self): return self.user_cache", "forms from .models import User from news.models import Category from django.utils.translation import gettext,", "import Category from django.utils.translation import gettext, gettext_lazy as _ from django.contrib.auth import authenticate,", "return super().__init__(*args, **kwargs) def get_initial_for_field(self, field, field_name): return '' def clean(self): username =", "from django.contrib.auth import authenticate, forms as auth_forms from django.db.models import Q class LoginForm(forms.Form):", "error_messages = { 'invalid_login': _(\"Username hoặc mật khẩu không đúng.\"), } def __init__(self,", "hoặc mật khẩu không đúng.\"), } def __init__(self, *args, **kwargs): self.user_cache = None", "{ 'adding_categories': forms.CheckboxSelectMultiple() } class UserUpdateForm(forms.ModelForm): def __init__(self, user, *args, **kwargs): self.user =", "labels = { 'email': 'Email', 'password': '<PASSWORD>', 'first_name': 'Tên', 'last_name': 'Họ', } def", "khẩu không khớp.'), } new_password1 = forms.CharField( label=_(\"Mật khẩu\"), widget=forms.PasswordInput(attrs={'autocomplete': 'new-password'}), strip=False, )", "cleaned_data class Meta(TopicOrganizeForm.Meta): fields = ('following_categories', 'adding_categories', ) widgets = { 'adding_categories': forms.CheckboxSelectMultiple()", "UserUpdateForm(forms.ModelForm): def __init__(self, user, *args, **kwargs): self.user = user return super().__init__(*args, **kwargs) class", "= { **SetPasswordForm.error_messages, 'password_incorrect': _(\"Mật khẩu cũ bạn vừa nhập không đúng.\"), }", "**kwargs): self.user = user return super().__init__(*args, **kwargs) class Meta: model = User fields", "= self.cleaned_data.get('password') if username is not None and password is not None: self.user_cache", "class Meta: model = User fields = ('last_name', 'first_name', 'avatar') labels = {", "commit: user.save() return user class SetPasswordForm(auth_forms.SetPasswordForm): error_messages = { 'password_mismatch': _('Mật khẩu không", "= { 'password_mismatch': _('Mật khẩu không khớp.'), } new_password1 = forms.CharField( label=_(\"Mật khẩu\"),", "User fields = ('username', 'first_name', 'last_name', 'email', 'password',) labels = { 'email': 'Email',", "label=_(\"<PASSWORD>\"), strip=False, widget=forms.PasswordInput(attrs={'autocomplete': 'current-password'}), ) error_messages = { 'invalid_login': _(\"Username hoặc mật khẩu", "'current-password', 'autofocus': True}), ) field_order = ['old_password', 'new_password1', 'new_<PASSWORD>'] def get_initial_for_field(self, field, field_name):", "'' def clean_old_password(self): old_password = self.cleaned_data.get('old_password') if not self.user.check_password(old_password): raise forms.ValidationError( self.error_messages['password_incorrect'], code='password_incorrect',", "auth_forms from django.db.models import Q class LoginForm(forms.Form): username = forms.CharField( label=_('Username'), ) password", "['old_password', 'new_password1', 'new_<PASSWORD>'] def get_initial_for_field(self, field, field_name): return '' def clean_old_password(self): old_password =", "{ 'password_mismatch': _('Mật khẩu không khớp.'), } new_password1 = forms.CharField( label=_(\"Mật khẩu\"), widget=forms.PasswordInput(attrs={'autocomplete':", "attrs={'autocomplete': 'current-password', 'autofocus': True}), ) field_order = ['old_password', 'new_password1', 'new_<PASSWORD>'] def get_initial_for_field(self, field,", "User fields = ('last_name', 'first_name', 'avatar') labels = { 'last_name': 'Họ', 'first_name': 'Tên',", "'<PASSWORD>', 'first_name': 'Tên', 'last_name': 'Họ', } def get_initial_for_field(self, field, field_name): return '' def", ") widgets = { 'following_categories': forms.CheckboxSelectMultiple(), } class TopicAddForm(TopicOrganizeForm): adding_categories = forms.ModelMultipleChoiceField(queryset=None) def", "if password and re_password and password != re_password: raise forms.ValidationError( self.error_messages['password_mismatch'], code='password_mismatch' )", "get_user(self): return self.user_cache class RegisterForm(forms.ModelForm): error_messages = { 'password_mismatch': _('Mật khẩu không khớp.')," ]
[ "sessionmaker @contextmanager def make(db_engine, **kwargs): \"\"\"Scope a db_session for a context, and close", "from contextlib import contextmanager from sqlalchemy.orm import sessionmaker @contextmanager def make(db_engine, **kwargs): \"\"\"Scope", "**kwargs): \"\"\"Scope a db_session for a context, and close it afterwards\"\"\" db_session =", "import sessionmaker @contextmanager def make(db_engine, **kwargs): \"\"\"Scope a db_session for a context, and", "sqlalchemy.orm import sessionmaker @contextmanager def make(db_engine, **kwargs): \"\"\"Scope a db_session for a context,", "context, and close it afterwards\"\"\" db_session = sessionmaker(bind=db_engine, **kwargs)() try: yield db_session finally:", "@contextmanager def make(db_engine, **kwargs): \"\"\"Scope a db_session for a context, and close it", "from sqlalchemy.orm import sessionmaker @contextmanager def make(db_engine, **kwargs): \"\"\"Scope a db_session for a", "and close it afterwards\"\"\" db_session = sessionmaker(bind=db_engine, **kwargs)() try: yield db_session finally: db_session.close()", "make(db_engine, **kwargs): \"\"\"Scope a db_session for a context, and close it afterwards\"\"\" db_session", "db_session for a context, and close it afterwards\"\"\" db_session = sessionmaker(bind=db_engine, **kwargs)() try:", "contextmanager from sqlalchemy.orm import sessionmaker @contextmanager def make(db_engine, **kwargs): \"\"\"Scope a db_session for", "a context, and close it afterwards\"\"\" db_session = sessionmaker(bind=db_engine, **kwargs)() try: yield db_session", "def make(db_engine, **kwargs): \"\"\"Scope a db_session for a context, and close it afterwards\"\"\"", "<reponame>boomletsgo/falcon-rest from contextlib import contextmanager from sqlalchemy.orm import sessionmaker @contextmanager def make(db_engine, **kwargs):", "import contextmanager from sqlalchemy.orm import sessionmaker @contextmanager def make(db_engine, **kwargs): \"\"\"Scope a db_session", "\"\"\"Scope a db_session for a context, and close it afterwards\"\"\" db_session = sessionmaker(bind=db_engine,", "for a context, and close it afterwards\"\"\" db_session = sessionmaker(bind=db_engine, **kwargs)() try: yield", "a db_session for a context, and close it afterwards\"\"\" db_session = sessionmaker(bind=db_engine, **kwargs)()", "contextlib import contextmanager from sqlalchemy.orm import sessionmaker @contextmanager def make(db_engine, **kwargs): \"\"\"Scope a" ]
[ "= NumericProperty(0) acceleration = ReferenceListProperty(acceleration_x, acceleration_y) in_move = BooleanProperty(False) def __init__(self, *args, velocity=(0,", "self.bind(on_update=self.move) def _update_velocity(self): \"\"\"Change velocity because of acceleration\"\"\" self.velocity = Vector(*self.velocity) + Vector(*self.acceleration)", "not value and self.in_move: self.dispatch(\"on_stop_x\") if not self.velocity_y: self.dispatch(\"on_stop\") def on_velocity_y(self, instance, value):", "and self.in_move: self.dispatch(\"on_stop_x\") if not self.velocity_y: self.dispatch(\"on_stop\") def on_velocity_y(self, instance, value): \"\"\"Dispatch event", "on y move\"\"\" if not value and self.in_move: self.dispatch(\"on_stop_y\") if not self.velocity_x: self.dispatch(\"on_stop\")", "self.dispatch(\"on_move\") def _reset_acceleration(self): \"\"\"Set acceleration to zero\"\"\" self.acceleration_x = self.acceleration_y = 0 def", "direction\"\"\" self.velocity_y = 0 def move_stop(self): \"\"\"Stop object\"\"\" self.move_stop_x() self.move_stop_y() def on_velocity_x(self, instance,", "not value and self.in_move: self.dispatch(\"on_stop_y\") if not self.velocity_x: self.dispatch(\"on_stop\") def on_move(self): \"\"\"On move", "x direction\"\"\" self.velocity_x = 0 def move_stop_y(self): \"\"\"Stop in y direction\"\"\" self.velocity_y =", "acceleration_y) in_move = BooleanProperty(False) def __init__(self, *args, velocity=(0, 0), speed_limit=10., **kwargs): \"\"\"Movable constructor", "self.register_event_type('on_move_y') self.register_event_type('on_stop') self.register_event_type('on_stop_x') self.register_event_type('on_stop_y') self.bind(on_update=self.move) def _update_velocity(self): \"\"\"Change velocity because of acceleration\"\"\" self.velocity", "> self.speed_limit: self.velocity = (velocity_vector * self.speed_limit / velocity_vector.length()) def move(self, instance): \"\"\"Move", "move(self, instance): \"\"\"Move object :param instance: self analog :type instance: kivy.uix.widget.Widget \"\"\" self._update_velocity()", "def _change_position(self): \"\"\"Change objects position\"\"\" self.x += self.velocity_x if self.velocity_x: self.dispatch(\"on_move_x\") self.y +=", "self.register_event_type('on_move_x') self.register_event_type('on_move_y') self.register_event_type('on_stop') self.register_event_type('on_stop_x') self.register_event_type('on_stop_y') self.bind(on_update=self.move) def _update_velocity(self): \"\"\"Change velocity because of acceleration\"\"\"", "zero\"\"\" self.acceleration_x = self.acceleration_y = 0 def move_stop_x(self): \"\"\"Stop in x direction\"\"\" self.velocity_x", "self.velocity_y: self.dispatch(\"on_move_y\") if self.velocity_y or self.velocity_x: self.dispatch(\"on_move\") def _reset_acceleration(self): \"\"\"Set acceleration to zero\"\"\"", "Vector(*self.acceleration) velocity_vector = Vector(self.velocity) if velocity_vector.length() > self.speed_limit: self.velocity = (velocity_vector * self.speed_limit", "in y direction\"\"\" self.velocity_y = 0 def move_stop(self): \"\"\"Stop object\"\"\" self.move_stop_x() self.move_stop_y() def", "def on_move_x(self): \"\"\"On move x event\"\"\" pass def on_move_y(self): \"\"\"On move y event\"\"\"", "not self.velocity_x: self.dispatch(\"on_stop\") def on_move(self): \"\"\"On move event\"\"\" self.in_move = True def on_move_x(self):", "if not self.velocity_x: self.dispatch(\"on_stop\") def on_move(self): \"\"\"On move event\"\"\" self.in_move = True def", "(velocity_vector * self.speed_limit / velocity_vector.length()) def move(self, instance): \"\"\"Move object :param instance: self", "self.speed_limit: self.velocity = (velocity_vector * self.speed_limit / velocity_vector.length()) def move(self, instance): \"\"\"Move object", "class Movable(BaseObject): \"\"\"Mixins for movable classes\"\"\" velocity_x = NumericProperty(0) velocity_y = NumericProperty(0) velocity", "self.register_event_type('on_move') self.register_event_type('on_move_x') self.register_event_type('on_move_y') self.register_event_type('on_stop') self.register_event_type('on_stop_x') self.register_event_type('on_stop_y') self.bind(on_update=self.move) def _update_velocity(self): \"\"\"Change velocity because of", "if self.velocity_y or self.velocity_x: self.dispatch(\"on_move\") def _reset_acceleration(self): \"\"\"Set acceleration to zero\"\"\" self.acceleration_x =", "if self.velocity_x: self.dispatch(\"on_move_x\") self.y += self.velocity_y if self.velocity_y: self.dispatch(\"on_move_y\") if self.velocity_y or self.velocity_x:", "event on x move\"\"\" if not value and self.in_move: self.dispatch(\"on_stop_x\") if not self.velocity_y:", "speed_limit: speed limit for object. 10 by default :type speed_limit: float \"\"\" super(Movable,", "_change_position(self): \"\"\"Change objects position\"\"\" self.x += self.velocity_x if self.velocity_x: self.dispatch(\"on_move_x\") self.y += self.velocity_y", "ReferenceListProperty(acceleration_x, acceleration_y) in_move = BooleanProperty(False) def __init__(self, *args, velocity=(0, 0), speed_limit=10., **kwargs): \"\"\"Movable", "speed_limit self.velocity = velocity self.add_to_collections([\"movable\"]) self.register_event_type('on_move') self.register_event_type('on_move_x') self.register_event_type('on_move_y') self.register_event_type('on_stop') self.register_event_type('on_stop_x') self.register_event_type('on_stop_y') self.bind(on_update=self.move) def", "if self.velocity_y: self.dispatch(\"on_move_y\") if self.velocity_y or self.velocity_x: self.dispatch(\"on_move\") def _reset_acceleration(self): \"\"\"Set acceleration to", "0 def move_stop_x(self): \"\"\"Stop in x direction\"\"\" self.velocity_x = 0 def move_stop_y(self): \"\"\"Stop", "on_stop_x(self): \"\"\"On stop x event\"\"\" pass def on_stop_y(self): \"\"\"On stop y event\"\"\" pass", "def _update_velocity(self): \"\"\"Change velocity because of acceleration\"\"\" self.velocity = Vector(*self.velocity) + Vector(*self.acceleration) velocity_vector", "ReferenceListProperty, BooleanProperty) from kivy.vector import Vector from parabox.base_object import BaseObject class Movable(BaseObject): \"\"\"Mixins", "self.in_move = False def on_stop_x(self): \"\"\"On stop x event\"\"\" pass def on_stop_y(self): \"\"\"On", "self.dispatch(\"on_move_x\") self.y += self.velocity_y if self.velocity_y: self.dispatch(\"on_move_y\") if self.velocity_y or self.velocity_x: self.dispatch(\"on_move\") def", "def move(self, instance): \"\"\"Move object :param instance: self analog :type instance: kivy.uix.widget.Widget \"\"\"", "move x event\"\"\" pass def on_move_y(self): \"\"\"On move y event\"\"\" pass def on_stop(self):", "= ReferenceListProperty(acceleration_x, acceleration_y) in_move = BooleanProperty(False) def __init__(self, *args, velocity=(0, 0), speed_limit=10., **kwargs):", "to zero\"\"\" self.acceleration_x = self.acceleration_y = 0 def move_stop_x(self): \"\"\"Stop in x direction\"\"\"", "False def on_stop_x(self): \"\"\"On stop x event\"\"\" pass def on_stop_y(self): \"\"\"On stop y", "self.velocity_x: self.dispatch(\"on_stop\") def on_move(self): \"\"\"On move event\"\"\" self.in_move = True def on_move_x(self): \"\"\"On", "\"\"\"Change velocity because of acceleration\"\"\" self.velocity = Vector(*self.velocity) + Vector(*self.acceleration) velocity_vector = Vector(self.velocity)", "velocity=(0, 0), speed_limit=10., **kwargs): \"\"\"Movable constructor :param velocity: velocity vector :type velocity: kivy.vector.Vector", "by default :type speed_limit: float \"\"\" super(Movable, self).__init__(*args, **kwargs) self.speed_limit = speed_limit self.velocity", "_update_velocity(self): \"\"\"Change velocity because of acceleration\"\"\" self.velocity = Vector(*self.velocity) + Vector(*self.acceleration) velocity_vector =", "self.dispatch(\"on_stop\") def on_move(self): \"\"\"On move event\"\"\" self.in_move = True def on_move_x(self): \"\"\"On move", "stop event\"\"\" self.in_move = False def on_stop_x(self): \"\"\"On stop x event\"\"\" pass def", "= False def on_stop_x(self): \"\"\"On stop x event\"\"\" pass def on_stop_y(self): \"\"\"On stop", ":param speed_limit: speed limit for object. 10 by default :type speed_limit: float \"\"\"", "= self.acceleration_y = 0 def move_stop_x(self): \"\"\"Stop in x direction\"\"\" self.velocity_x = 0", "value and self.in_move: self.dispatch(\"on_stop_y\") if not self.velocity_x: self.dispatch(\"on_stop\") def on_move(self): \"\"\"On move event\"\"\"", "self.x += self.velocity_x if self.velocity_x: self.dispatch(\"on_move_x\") self.y += self.velocity_y if self.velocity_y: self.dispatch(\"on_move_y\") if", "direction\"\"\" self.velocity_x = 0 def move_stop_y(self): \"\"\"Stop in y direction\"\"\" self.velocity_y = 0", "self.velocity_y: self.dispatch(\"on_stop\") def on_velocity_y(self, instance, value): \"\"\"Dispatch event on y move\"\"\" if not", "NumericProperty(0) velocity_y = NumericProperty(0) velocity = ReferenceListProperty(velocity_x, velocity_y) acceleration_x = NumericProperty(0) acceleration_y =", "def on_velocity_y(self, instance, value): \"\"\"Dispatch event on y move\"\"\" if not value and", "NumericProperty(0) acceleration_y = NumericProperty(0) acceleration = ReferenceListProperty(acceleration_x, acceleration_y) in_move = BooleanProperty(False) def __init__(self,", "if not value and self.in_move: self.dispatch(\"on_stop_y\") if not self.velocity_x: self.dispatch(\"on_stop\") def on_move(self): \"\"\"On", "self.move_stop_y() def on_velocity_x(self, instance, value): \"\"\"Dispatch event on x move\"\"\" if not value", "move\"\"\" if not value and self.in_move: self.dispatch(\"on_stop_y\") if not self.velocity_x: self.dispatch(\"on_stop\") def on_move(self):", "kivy.uix.widget.Widget \"\"\" self._update_velocity() self._change_position() self._reset_acceleration() def _change_position(self): \"\"\"Change objects position\"\"\" self.x += self.velocity_x", "object :param instance: self analog :type instance: kivy.uix.widget.Widget \"\"\" self._update_velocity() self._change_position() self._reset_acceleration() def", "\"\"\" self._update_velocity() self._change_position() self._reset_acceleration() def _change_position(self): \"\"\"Change objects position\"\"\" self.x += self.velocity_x if", "or self.velocity_x: self.dispatch(\"on_move\") def _reset_acceleration(self): \"\"\"Set acceleration to zero\"\"\" self.acceleration_x = self.acceleration_y =", "velocity: kivy.vector.Vector :param speed_limit: speed limit for object. 10 by default :type speed_limit:", "* self.speed_limit / velocity_vector.length()) def move(self, instance): \"\"\"Move object :param instance: self analog", "self analog :type instance: kivy.uix.widget.Widget \"\"\" self._update_velocity() self._change_position() self._reset_acceleration() def _change_position(self): \"\"\"Change objects", "and self.in_move: self.dispatch(\"on_stop_y\") if not self.velocity_x: self.dispatch(\"on_stop\") def on_move(self): \"\"\"On move event\"\"\" self.in_move", "constructor :param velocity: velocity vector :type velocity: kivy.vector.Vector :param speed_limit: speed limit for", "self._update_velocity() self._change_position() self._reset_acceleration() def _change_position(self): \"\"\"Change objects position\"\"\" self.x += self.velocity_x if self.velocity_x:", "event\"\"\" self.in_move = True def on_move_x(self): \"\"\"On move x event\"\"\" pass def on_move_y(self):", "if velocity_vector.length() > self.speed_limit: self.velocity = (velocity_vector * self.speed_limit / velocity_vector.length()) def move(self,", "movable classes\"\"\" velocity_x = NumericProperty(0) velocity_y = NumericProperty(0) velocity = ReferenceListProperty(velocity_x, velocity_y) acceleration_x", "self.velocity = (velocity_vector * self.speed_limit / velocity_vector.length()) def move(self, instance): \"\"\"Move object :param", "NumericProperty, ReferenceListProperty, BooleanProperty) from kivy.vector import Vector from parabox.base_object import BaseObject class Movable(BaseObject):", "Vector(self.velocity) if velocity_vector.length() > self.speed_limit: self.velocity = (velocity_vector * self.speed_limit / velocity_vector.length()) def", "if not value and self.in_move: self.dispatch(\"on_stop_x\") if not self.velocity_y: self.dispatch(\"on_stop\") def on_velocity_y(self, instance,", "event on y move\"\"\" if not value and self.in_move: self.dispatch(\"on_stop_y\") if not self.velocity_x:", "NumericProperty(0) velocity = ReferenceListProperty(velocity_x, velocity_y) acceleration_x = NumericProperty(0) acceleration_y = NumericProperty(0) acceleration =", "super(Movable, self).__init__(*args, **kwargs) self.speed_limit = speed_limit self.velocity = velocity self.add_to_collections([\"movable\"]) self.register_event_type('on_move') self.register_event_type('on_move_x') self.register_event_type('on_move_y')", "on_velocity_x(self, instance, value): \"\"\"Dispatch event on x move\"\"\" if not value and self.in_move:", "velocity_vector = Vector(self.velocity) if velocity_vector.length() > self.speed_limit: self.velocity = (velocity_vector * self.speed_limit /", "classes\"\"\" velocity_x = NumericProperty(0) velocity_y = NumericProperty(0) velocity = ReferenceListProperty(velocity_x, velocity_y) acceleration_x =", "value and self.in_move: self.dispatch(\"on_stop_x\") if not self.velocity_y: self.dispatch(\"on_stop\") def on_velocity_y(self, instance, value): \"\"\"Dispatch", "10 by default :type speed_limit: float \"\"\" super(Movable, self).__init__(*args, **kwargs) self.speed_limit = speed_limit", "value): \"\"\"Dispatch event on y move\"\"\" if not value and self.in_move: self.dispatch(\"on_stop_y\") if", "speed_limit: float \"\"\" super(Movable, self).__init__(*args, **kwargs) self.speed_limit = speed_limit self.velocity = velocity self.add_to_collections([\"movable\"])", "velocity_x = NumericProperty(0) velocity_y = NumericProperty(0) velocity = ReferenceListProperty(velocity_x, velocity_y) acceleration_x = NumericProperty(0)", "\"\"\"Stop object\"\"\" self.move_stop_x() self.move_stop_y() def on_velocity_x(self, instance, value): \"\"\"Dispatch event on x move\"\"\"", "if not self.velocity_y: self.dispatch(\"on_stop\") def on_velocity_y(self, instance, value): \"\"\"Dispatch event on y move\"\"\"", "def _reset_acceleration(self): \"\"\"Set acceleration to zero\"\"\" self.acceleration_x = self.acceleration_y = 0 def move_stop_x(self):", "True def on_move_x(self): \"\"\"On move x event\"\"\" pass def on_move_y(self): \"\"\"On move y", "= True def on_move_x(self): \"\"\"On move x event\"\"\" pass def on_move_y(self): \"\"\"On move", "of acceleration\"\"\" self.velocity = Vector(*self.velocity) + Vector(*self.acceleration) velocity_vector = Vector(self.velocity) if velocity_vector.length() >", "analog :type instance: kivy.uix.widget.Widget \"\"\" self._update_velocity() self._change_position() self._reset_acceleration() def _change_position(self): \"\"\"Change objects position\"\"\"", "\"\"\"Dispatch event on x move\"\"\" if not value and self.in_move: self.dispatch(\"on_stop_x\") if not", "from parabox.base_object import BaseObject class Movable(BaseObject): \"\"\"Mixins for movable classes\"\"\" velocity_x = NumericProperty(0)", "acceleration_y = NumericProperty(0) acceleration = ReferenceListProperty(acceleration_x, acceleration_y) in_move = BooleanProperty(False) def __init__(self, *args,", "self.register_event_type('on_stop_y') self.bind(on_update=self.move) def _update_velocity(self): \"\"\"Change velocity because of acceleration\"\"\" self.velocity = Vector(*self.velocity) +", "def on_move(self): \"\"\"On move event\"\"\" self.in_move = True def on_move_x(self): \"\"\"On move x", "= NumericProperty(0) acceleration_y = NumericProperty(0) acceleration = ReferenceListProperty(acceleration_x, acceleration_y) in_move = BooleanProperty(False) def", "self.velocity = Vector(*self.velocity) + Vector(*self.acceleration) velocity_vector = Vector(self.velocity) if velocity_vector.length() > self.speed_limit: self.velocity", "\"\"\"Mixins for movable classes\"\"\" velocity_x = NumericProperty(0) velocity_y = NumericProperty(0) velocity = ReferenceListProperty(velocity_x,", "instance): \"\"\"Move object :param instance: self analog :type instance: kivy.uix.widget.Widget \"\"\" self._update_velocity() self._change_position()", "self.velocity_x if self.velocity_x: self.dispatch(\"on_move_x\") self.y += self.velocity_y if self.velocity_y: self.dispatch(\"on_move_y\") if self.velocity_y or", "kivy.vector import Vector from parabox.base_object import BaseObject class Movable(BaseObject): \"\"\"Mixins for movable classes\"\"\"", "**kwargs): \"\"\"Movable constructor :param velocity: velocity vector :type velocity: kivy.vector.Vector :param speed_limit: speed", "self.acceleration_x = self.acceleration_y = 0 def move_stop_x(self): \"\"\"Stop in x direction\"\"\" self.velocity_x =", "x event\"\"\" pass def on_move_y(self): \"\"\"On move y event\"\"\" pass def on_stop(self): \"\"\"On", ":type speed_limit: float \"\"\" super(Movable, self).__init__(*args, **kwargs) self.speed_limit = speed_limit self.velocity = velocity", "float \"\"\" super(Movable, self).__init__(*args, **kwargs) self.speed_limit = speed_limit self.velocity = velocity self.add_to_collections([\"movable\"]) self.register_event_type('on_move')", "\"\"\"Move object :param instance: self analog :type instance: kivy.uix.widget.Widget \"\"\" self._update_velocity() self._change_position() self._reset_acceleration()", "= 0 def move_stop(self): \"\"\"Stop object\"\"\" self.move_stop_x() self.move_stop_y() def on_velocity_x(self, instance, value): \"\"\"Dispatch", "instance: self analog :type instance: kivy.uix.widget.Widget \"\"\" self._update_velocity() self._change_position() self._reset_acceleration() def _change_position(self): \"\"\"Change", "object\"\"\" self.move_stop_x() self.move_stop_y() def on_velocity_x(self, instance, value): \"\"\"Dispatch event on x move\"\"\" if", "def on_velocity_x(self, instance, value): \"\"\"Dispatch event on x move\"\"\" if not value and", "self.in_move: self.dispatch(\"on_stop_y\") if not self.velocity_x: self.dispatch(\"on_stop\") def on_move(self): \"\"\"On move event\"\"\" self.in_move =", "\"\"\"Stop in y direction\"\"\" self.velocity_y = 0 def move_stop(self): \"\"\"Stop object\"\"\" self.move_stop_x() self.move_stop_y()", "instance, value): \"\"\"Dispatch event on y move\"\"\" if not value and self.in_move: self.dispatch(\"on_stop_y\")", "velocity because of acceleration\"\"\" self.velocity = Vector(*self.velocity) + Vector(*self.acceleration) velocity_vector = Vector(self.velocity) if", "import BaseObject class Movable(BaseObject): \"\"\"Mixins for movable classes\"\"\" velocity_x = NumericProperty(0) velocity_y =", "self.velocity_x: self.dispatch(\"on_move_x\") self.y += self.velocity_y if self.velocity_y: self.dispatch(\"on_move_y\") if self.velocity_y or self.velocity_x: self.dispatch(\"on_move\")", "instance: kivy.uix.widget.Widget \"\"\" self._update_velocity() self._change_position() self._reset_acceleration() def _change_position(self): \"\"\"Change objects position\"\"\" self.x +=", "self.dispatch(\"on_stop\") def on_velocity_y(self, instance, value): \"\"\"Dispatch event on y move\"\"\" if not value", "move event\"\"\" self.in_move = True def on_move_x(self): \"\"\"On move x event\"\"\" pass def", "def on_stop(self): \"\"\"On stop event\"\"\" self.in_move = False def on_stop_x(self): \"\"\"On stop x", "def move_stop(self): \"\"\"Stop object\"\"\" self.move_stop_x() self.move_stop_y() def on_velocity_x(self, instance, value): \"\"\"Dispatch event on", "BooleanProperty(False) def __init__(self, *args, velocity=(0, 0), speed_limit=10., **kwargs): \"\"\"Movable constructor :param velocity: velocity", "self.dispatch(\"on_move_y\") if self.velocity_y or self.velocity_x: self.dispatch(\"on_move\") def _reset_acceleration(self): \"\"\"Set acceleration to zero\"\"\" self.acceleration_x", "**kwargs) self.speed_limit = speed_limit self.velocity = velocity self.add_to_collections([\"movable\"]) self.register_event_type('on_move') self.register_event_type('on_move_x') self.register_event_type('on_move_y') self.register_event_type('on_stop') self.register_event_type('on_stop_x')", "= Vector(self.velocity) if velocity_vector.length() > self.speed_limit: self.velocity = (velocity_vector * self.speed_limit / velocity_vector.length())", "\"\"\" super(Movable, self).__init__(*args, **kwargs) self.speed_limit = speed_limit self.velocity = velocity self.add_to_collections([\"movable\"]) self.register_event_type('on_move') self.register_event_type('on_move_x')", "\"\"\"On move x event\"\"\" pass def on_move_y(self): \"\"\"On move y event\"\"\" pass def", "velocity: velocity vector :type velocity: kivy.vector.Vector :param speed_limit: speed limit for object. 10", "self.velocity = velocity self.add_to_collections([\"movable\"]) self.register_event_type('on_move') self.register_event_type('on_move_x') self.register_event_type('on_move_y') self.register_event_type('on_stop') self.register_event_type('on_stop_x') self.register_event_type('on_stop_y') self.bind(on_update=self.move) def _update_velocity(self):", "event\"\"\" pass def on_move_y(self): \"\"\"On move y event\"\"\" pass def on_stop(self): \"\"\"On stop", "self.move_stop_x() self.move_stop_y() def on_velocity_x(self, instance, value): \"\"\"Dispatch event on x move\"\"\" if not", "self).__init__(*args, **kwargs) self.speed_limit = speed_limit self.velocity = velocity self.add_to_collections([\"movable\"]) self.register_event_type('on_move') self.register_event_type('on_move_x') self.register_event_type('on_move_y') self.register_event_type('on_stop')", "\"\"\"Dispatch event on y move\"\"\" if not value and self.in_move: self.dispatch(\"on_stop_y\") if not", "NumericProperty(0) acceleration = ReferenceListProperty(acceleration_x, acceleration_y) in_move = BooleanProperty(False) def __init__(self, *args, velocity=(0, 0),", ":param instance: self analog :type instance: kivy.uix.widget.Widget \"\"\" self._update_velocity() self._change_position() self._reset_acceleration() def _change_position(self):", ":type instance: kivy.uix.widget.Widget \"\"\" self._update_velocity() self._change_position() self._reset_acceleration() def _change_position(self): \"\"\"Change objects position\"\"\" self.x", "instance, value): \"\"\"Dispatch event on x move\"\"\" if not value and self.in_move: self.dispatch(\"on_stop_x\")", "parabox.base_object import BaseObject class Movable(BaseObject): \"\"\"Mixins for movable classes\"\"\" velocity_x = NumericProperty(0) velocity_y", "move y event\"\"\" pass def on_stop(self): \"\"\"On stop event\"\"\" self.in_move = False def", "\"\"\"On move y event\"\"\" pass def on_stop(self): \"\"\"On stop event\"\"\" self.in_move = False", "limit for object. 10 by default :type speed_limit: float \"\"\" super(Movable, self).__init__(*args, **kwargs)", "self.speed_limit / velocity_vector.length()) def move(self, instance): \"\"\"Move object :param instance: self analog :type", "_reset_acceleration(self): \"\"\"Set acceleration to zero\"\"\" self.acceleration_x = self.acceleration_y = 0 def move_stop_x(self): \"\"\"Stop", "self.speed_limit = speed_limit self.velocity = velocity self.add_to_collections([\"movable\"]) self.register_event_type('on_move') self.register_event_type('on_move_x') self.register_event_type('on_move_y') self.register_event_type('on_stop') self.register_event_type('on_stop_x') self.register_event_type('on_stop_y')", "import ( NumericProperty, ReferenceListProperty, BooleanProperty) from kivy.vector import Vector from parabox.base_object import BaseObject", "\"\"\"On move event\"\"\" self.in_move = True def on_move_x(self): \"\"\"On move x event\"\"\" pass", "def __init__(self, *args, velocity=(0, 0), speed_limit=10., **kwargs): \"\"\"Movable constructor :param velocity: velocity vector", "objects position\"\"\" self.x += self.velocity_x if self.velocity_x: self.dispatch(\"on_move_x\") self.y += self.velocity_y if self.velocity_y:", "self.register_event_type('on_stop_x') self.register_event_type('on_stop_y') self.bind(on_update=self.move) def _update_velocity(self): \"\"\"Change velocity because of acceleration\"\"\" self.velocity = Vector(*self.velocity)", "velocity vector :type velocity: kivy.vector.Vector :param speed_limit: speed limit for object. 10 by", "not self.velocity_y: self.dispatch(\"on_stop\") def on_velocity_y(self, instance, value): \"\"\"Dispatch event on y move\"\"\" if", "on_stop(self): \"\"\"On stop event\"\"\" self.in_move = False def on_stop_x(self): \"\"\"On stop x event\"\"\"", "def move_stop_y(self): \"\"\"Stop in y direction\"\"\" self.velocity_y = 0 def move_stop(self): \"\"\"Stop object\"\"\"", "object. 10 by default :type speed_limit: float \"\"\" super(Movable, self).__init__(*args, **kwargs) self.speed_limit =", "because of acceleration\"\"\" self.velocity = Vector(*self.velocity) + Vector(*self.acceleration) velocity_vector = Vector(self.velocity) if velocity_vector.length()", "( NumericProperty, ReferenceListProperty, BooleanProperty) from kivy.vector import Vector from parabox.base_object import BaseObject class", "= (velocity_vector * self.speed_limit / velocity_vector.length()) def move(self, instance): \"\"\"Move object :param instance:", "__init__(self, *args, velocity=(0, 0), speed_limit=10., **kwargs): \"\"\"Movable constructor :param velocity: velocity vector :type", "velocity = ReferenceListProperty(velocity_x, velocity_y) acceleration_x = NumericProperty(0) acceleration_y = NumericProperty(0) acceleration = ReferenceListProperty(acceleration_x,", "self.in_move: self.dispatch(\"on_stop_x\") if not self.velocity_y: self.dispatch(\"on_stop\") def on_velocity_y(self, instance, value): \"\"\"Dispatch event on", "= NumericProperty(0) velocity_y = NumericProperty(0) velocity = ReferenceListProperty(velocity_x, velocity_y) acceleration_x = NumericProperty(0) acceleration_y", "self.velocity_y or self.velocity_x: self.dispatch(\"on_move\") def _reset_acceleration(self): \"\"\"Set acceleration to zero\"\"\" self.acceleration_x = self.acceleration_y", "def move_stop_x(self): \"\"\"Stop in x direction\"\"\" self.velocity_x = 0 def move_stop_y(self): \"\"\"Stop in", "acceleration = ReferenceListProperty(acceleration_x, acceleration_y) in_move = BooleanProperty(False) def __init__(self, *args, velocity=(0, 0), speed_limit=10.,", "self._reset_acceleration() def _change_position(self): \"\"\"Change objects position\"\"\" self.x += self.velocity_x if self.velocity_x: self.dispatch(\"on_move_x\") self.y", "self.y += self.velocity_y if self.velocity_y: self.dispatch(\"on_move_y\") if self.velocity_y or self.velocity_x: self.dispatch(\"on_move\") def _reset_acceleration(self):", "= NumericProperty(0) velocity = ReferenceListProperty(velocity_x, velocity_y) acceleration_x = NumericProperty(0) acceleration_y = NumericProperty(0) acceleration", "for object. 10 by default :type speed_limit: float \"\"\" super(Movable, self).__init__(*args, **kwargs) self.speed_limit", "0 def move_stop(self): \"\"\"Stop object\"\"\" self.move_stop_x() self.move_stop_y() def on_velocity_x(self, instance, value): \"\"\"Dispatch event", "= speed_limit self.velocity = velocity self.add_to_collections([\"movable\"]) self.register_event_type('on_move') self.register_event_type('on_move_x') self.register_event_type('on_move_y') self.register_event_type('on_stop') self.register_event_type('on_stop_x') self.register_event_type('on_stop_y') self.bind(on_update=self.move)", "velocity_vector.length() > self.speed_limit: self.velocity = (velocity_vector * self.speed_limit / velocity_vector.length()) def move(self, instance):", "self.velocity_x = 0 def move_stop_y(self): \"\"\"Stop in y direction\"\"\" self.velocity_y = 0 def", "BooleanProperty) from kivy.vector import Vector from parabox.base_object import BaseObject class Movable(BaseObject): \"\"\"Mixins for", "self.add_to_collections([\"movable\"]) self.register_event_type('on_move') self.register_event_type('on_move_x') self.register_event_type('on_move_y') self.register_event_type('on_stop') self.register_event_type('on_stop_x') self.register_event_type('on_stop_y') self.bind(on_update=self.move) def _update_velocity(self): \"\"\"Change velocity because", "self.velocity_y if self.velocity_y: self.dispatch(\"on_move_y\") if self.velocity_y or self.velocity_x: self.dispatch(\"on_move\") def _reset_acceleration(self): \"\"\"Set acceleration", "move\"\"\" if not value and self.in_move: self.dispatch(\"on_stop_x\") if not self.velocity_y: self.dispatch(\"on_stop\") def on_velocity_y(self,", "value): \"\"\"Dispatch event on x move\"\"\" if not value and self.in_move: self.dispatch(\"on_stop_x\") if", "on_move(self): \"\"\"On move event\"\"\" self.in_move = True def on_move_x(self): \"\"\"On move x event\"\"\"", "speed limit for object. 10 by default :type speed_limit: float \"\"\" super(Movable, self).__init__(*args,", "= 0 def move_stop_x(self): \"\"\"Stop in x direction\"\"\" self.velocity_x = 0 def move_stop_y(self):", "velocity_vector.length()) def move(self, instance): \"\"\"Move object :param instance: self analog :type instance: kivy.uix.widget.Widget", "pass def on_move_y(self): \"\"\"On move y event\"\"\" pass def on_stop(self): \"\"\"On stop event\"\"\"", "velocity self.add_to_collections([\"movable\"]) self.register_event_type('on_move') self.register_event_type('on_move_x') self.register_event_type('on_move_y') self.register_event_type('on_stop') self.register_event_type('on_stop_x') self.register_event_type('on_stop_y') self.bind(on_update=self.move) def _update_velocity(self): \"\"\"Change velocity", "= BooleanProperty(False) def __init__(self, *args, velocity=(0, 0), speed_limit=10., **kwargs): \"\"\"Movable constructor :param velocity:", "Vector(*self.velocity) + Vector(*self.acceleration) velocity_vector = Vector(self.velocity) if velocity_vector.length() > self.speed_limit: self.velocity = (velocity_vector", "event\"\"\" pass def on_stop(self): \"\"\"On stop event\"\"\" self.in_move = False def on_stop_x(self): \"\"\"On", "y event\"\"\" pass def on_stop(self): \"\"\"On stop event\"\"\" self.in_move = False def on_stop_x(self):", "on x move\"\"\" if not value and self.in_move: self.dispatch(\"on_stop_x\") if not self.velocity_y: self.dispatch(\"on_stop\")", "Movable(BaseObject): \"\"\"Mixins for movable classes\"\"\" velocity_x = NumericProperty(0) velocity_y = NumericProperty(0) velocity =", "def on_move_y(self): \"\"\"On move y event\"\"\" pass def on_stop(self): \"\"\"On stop event\"\"\" self.in_move", "move_stop_x(self): \"\"\"Stop in x direction\"\"\" self.velocity_x = 0 def move_stop_y(self): \"\"\"Stop in y", "position\"\"\" self.x += self.velocity_x if self.velocity_x: self.dispatch(\"on_move_x\") self.y += self.velocity_y if self.velocity_y: self.dispatch(\"on_move_y\")", "acceleration to zero\"\"\" self.acceleration_x = self.acceleration_y = 0 def move_stop_x(self): \"\"\"Stop in x", "vector :type velocity: kivy.vector.Vector :param speed_limit: speed limit for object. 10 by default", "for movable classes\"\"\" velocity_x = NumericProperty(0) velocity_y = NumericProperty(0) velocity = ReferenceListProperty(velocity_x, velocity_y)", "acceleration\"\"\" self.velocity = Vector(*self.velocity) + Vector(*self.acceleration) velocity_vector = Vector(self.velocity) if velocity_vector.length() > self.speed_limit:", "+ Vector(*self.acceleration) velocity_vector = Vector(self.velocity) if velocity_vector.length() > self.speed_limit: self.velocity = (velocity_vector *", "on_move_y(self): \"\"\"On move y event\"\"\" pass def on_stop(self): \"\"\"On stop event\"\"\" self.in_move =", "kivy.vector.Vector :param speed_limit: speed limit for object. 10 by default :type speed_limit: float", "= Vector(*self.velocity) + Vector(*self.acceleration) velocity_vector = Vector(self.velocity) if velocity_vector.length() > self.speed_limit: self.velocity =", "from kivy.properties import ( NumericProperty, ReferenceListProperty, BooleanProperty) from kivy.vector import Vector from parabox.base_object", "kivy.properties import ( NumericProperty, ReferenceListProperty, BooleanProperty) from kivy.vector import Vector from parabox.base_object import", "+= self.velocity_x if self.velocity_x: self.dispatch(\"on_move_x\") self.y += self.velocity_y if self.velocity_y: self.dispatch(\"on_move_y\") if self.velocity_y", "= velocity self.add_to_collections([\"movable\"]) self.register_event_type('on_move') self.register_event_type('on_move_x') self.register_event_type('on_move_y') self.register_event_type('on_stop') self.register_event_type('on_stop_x') self.register_event_type('on_stop_y') self.bind(on_update=self.move) def _update_velocity(self): \"\"\"Change", "= 0 def move_stop_y(self): \"\"\"Stop in y direction\"\"\" self.velocity_y = 0 def move_stop(self):", "acceleration_x = NumericProperty(0) acceleration_y = NumericProperty(0) acceleration = ReferenceListProperty(acceleration_x, acceleration_y) in_move = BooleanProperty(False)", "\"\"\"Set acceleration to zero\"\"\" self.acceleration_x = self.acceleration_y = 0 def move_stop_x(self): \"\"\"Stop in", "\"\"\"On stop event\"\"\" self.in_move = False def on_stop_x(self): \"\"\"On stop x event\"\"\" pass", "velocity_y) acceleration_x = NumericProperty(0) acceleration_y = NumericProperty(0) acceleration = ReferenceListProperty(acceleration_x, acceleration_y) in_move =", "self.dispatch(\"on_stop_y\") if not self.velocity_x: self.dispatch(\"on_stop\") def on_move(self): \"\"\"On move event\"\"\" self.in_move = True", "\"\"\"Stop in x direction\"\"\" self.velocity_x = 0 def move_stop_y(self): \"\"\"Stop in y direction\"\"\"", "default :type speed_limit: float \"\"\" super(Movable, self).__init__(*args, **kwargs) self.speed_limit = speed_limit self.velocity =", "speed_limit=10., **kwargs): \"\"\"Movable constructor :param velocity: velocity vector :type velocity: kivy.vector.Vector :param speed_limit:", "/ velocity_vector.length()) def move(self, instance): \"\"\"Move object :param instance: self analog :type instance:", "import Vector from parabox.base_object import BaseObject class Movable(BaseObject): \"\"\"Mixins for movable classes\"\"\" velocity_x", "+= self.velocity_y if self.velocity_y: self.dispatch(\"on_move_y\") if self.velocity_y or self.velocity_x: self.dispatch(\"on_move\") def _reset_acceleration(self): \"\"\"Set", "move_stop_y(self): \"\"\"Stop in y direction\"\"\" self.velocity_y = 0 def move_stop(self): \"\"\"Stop object\"\"\" self.move_stop_x()", "BaseObject class Movable(BaseObject): \"\"\"Mixins for movable classes\"\"\" velocity_x = NumericProperty(0) velocity_y = NumericProperty(0)", "\"\"\"Change objects position\"\"\" self.x += self.velocity_x if self.velocity_x: self.dispatch(\"on_move_x\") self.y += self.velocity_y if", "self.velocity_x: self.dispatch(\"on_move\") def _reset_acceleration(self): \"\"\"Set acceleration to zero\"\"\" self.acceleration_x = self.acceleration_y = 0", "self.velocity_y = 0 def move_stop(self): \"\"\"Stop object\"\"\" self.move_stop_x() self.move_stop_y() def on_velocity_x(self, instance, value):", "\"\"\"Movable constructor :param velocity: velocity vector :type velocity: kivy.vector.Vector :param speed_limit: speed limit", "Vector from parabox.base_object import BaseObject class Movable(BaseObject): \"\"\"Mixins for movable classes\"\"\" velocity_x =", "*args, velocity=(0, 0), speed_limit=10., **kwargs): \"\"\"Movable constructor :param velocity: velocity vector :type velocity:", "0 def move_stop_y(self): \"\"\"Stop in y direction\"\"\" self.velocity_y = 0 def move_stop(self): \"\"\"Stop", "on_move_x(self): \"\"\"On move x event\"\"\" pass def on_move_y(self): \"\"\"On move y event\"\"\" pass", "ReferenceListProperty(velocity_x, velocity_y) acceleration_x = NumericProperty(0) acceleration_y = NumericProperty(0) acceleration = ReferenceListProperty(acceleration_x, acceleration_y) in_move", "0), speed_limit=10., **kwargs): \"\"\"Movable constructor :param velocity: velocity vector :type velocity: kivy.vector.Vector :param", "velocity_y = NumericProperty(0) velocity = ReferenceListProperty(velocity_x, velocity_y) acceleration_x = NumericProperty(0) acceleration_y = NumericProperty(0)", "self.register_event_type('on_stop') self.register_event_type('on_stop_x') self.register_event_type('on_stop_y') self.bind(on_update=self.move) def _update_velocity(self): \"\"\"Change velocity because of acceleration\"\"\" self.velocity =", "in x direction\"\"\" self.velocity_x = 0 def move_stop_y(self): \"\"\"Stop in y direction\"\"\" self.velocity_y", "= ReferenceListProperty(velocity_x, velocity_y) acceleration_x = NumericProperty(0) acceleration_y = NumericProperty(0) acceleration = ReferenceListProperty(acceleration_x, acceleration_y)", ":param velocity: velocity vector :type velocity: kivy.vector.Vector :param speed_limit: speed limit for object.", "move_stop(self): \"\"\"Stop object\"\"\" self.move_stop_x() self.move_stop_y() def on_velocity_x(self, instance, value): \"\"\"Dispatch event on x", "self.in_move = True def on_move_x(self): \"\"\"On move x event\"\"\" pass def on_move_y(self): \"\"\"On", "y move\"\"\" if not value and self.in_move: self.dispatch(\"on_stop_y\") if not self.velocity_x: self.dispatch(\"on_stop\") def", "self._change_position() self._reset_acceleration() def _change_position(self): \"\"\"Change objects position\"\"\" self.x += self.velocity_x if self.velocity_x: self.dispatch(\"on_move_x\")", "x move\"\"\" if not value and self.in_move: self.dispatch(\"on_stop_x\") if not self.velocity_y: self.dispatch(\"on_stop\") def", "on_velocity_y(self, instance, value): \"\"\"Dispatch event on y move\"\"\" if not value and self.in_move:", "def on_stop_x(self): \"\"\"On stop x event\"\"\" pass def on_stop_y(self): \"\"\"On stop y event\"\"\"", "self.dispatch(\"on_stop_x\") if not self.velocity_y: self.dispatch(\"on_stop\") def on_velocity_y(self, instance, value): \"\"\"Dispatch event on y", "pass def on_stop(self): \"\"\"On stop event\"\"\" self.in_move = False def on_stop_x(self): \"\"\"On stop", ":type velocity: kivy.vector.Vector :param speed_limit: speed limit for object. 10 by default :type", "y direction\"\"\" self.velocity_y = 0 def move_stop(self): \"\"\"Stop object\"\"\" self.move_stop_x() self.move_stop_y() def on_velocity_x(self,", "self.acceleration_y = 0 def move_stop_x(self): \"\"\"Stop in x direction\"\"\" self.velocity_x = 0 def", "in_move = BooleanProperty(False) def __init__(self, *args, velocity=(0, 0), speed_limit=10., **kwargs): \"\"\"Movable constructor :param", "from kivy.vector import Vector from parabox.base_object import BaseObject class Movable(BaseObject): \"\"\"Mixins for movable", "event\"\"\" self.in_move = False def on_stop_x(self): \"\"\"On stop x event\"\"\" pass def on_stop_y(self):" ]
[ "import lru_cache from pydantic import BaseSettings class Settings(BaseSettings): \"\"\"Settings model.\"\"\" secret_key: str =", "mongo_url: str = \"\" testing: bool = False @lru_cache(typed=False) def get_settings() -> Settings:", "str = \"\" mongo_url: str = \"\" testing: bool = False @lru_cache(typed=False) def", "\"\" testing: bool = False @lru_cache(typed=False) def get_settings() -> Settings: \"\"\"Initialize settings.\"\"\" return", "str = \"\" testing: bool = False @lru_cache(typed=False) def get_settings() -> Settings: \"\"\"Initialize", "pydantic import BaseSettings class Settings(BaseSettings): \"\"\"Settings model.\"\"\" secret_key: str = \"\" mongo_url: str", "testing: bool = False @lru_cache(typed=False) def get_settings() -> Settings: \"\"\"Initialize settings.\"\"\" return Settings()", "Settings(BaseSettings): \"\"\"Settings model.\"\"\" secret_key: str = \"\" mongo_url: str = \"\" testing: bool", "= \"\" testing: bool = False @lru_cache(typed=False) def get_settings() -> Settings: \"\"\"Initialize settings.\"\"\"", "BaseSettings class Settings(BaseSettings): \"\"\"Settings model.\"\"\" secret_key: str = \"\" mongo_url: str = \"\"", "= \"\" mongo_url: str = \"\" testing: bool = False @lru_cache(typed=False) def get_settings()", "\"\" mongo_url: str = \"\" testing: bool = False @lru_cache(typed=False) def get_settings() ->", "from pydantic import BaseSettings class Settings(BaseSettings): \"\"\"Settings model.\"\"\" secret_key: str = \"\" mongo_url:", "from functools import lru_cache from pydantic import BaseSettings class Settings(BaseSettings): \"\"\"Settings model.\"\"\" secret_key:", "\"\"\"Settings model.\"\"\" secret_key: str = \"\" mongo_url: str = \"\" testing: bool =", "lru_cache from pydantic import BaseSettings class Settings(BaseSettings): \"\"\"Settings model.\"\"\" secret_key: str = \"\"", "class Settings(BaseSettings): \"\"\"Settings model.\"\"\" secret_key: str = \"\" mongo_url: str = \"\" testing:", "secret_key: str = \"\" mongo_url: str = \"\" testing: bool = False @lru_cache(typed=False)", "functools import lru_cache from pydantic import BaseSettings class Settings(BaseSettings): \"\"\"Settings model.\"\"\" secret_key: str", "import BaseSettings class Settings(BaseSettings): \"\"\"Settings model.\"\"\" secret_key: str = \"\" mongo_url: str =", "model.\"\"\" secret_key: str = \"\" mongo_url: str = \"\" testing: bool = False" ]
[ "axis=1) def red3(x): return mp.min(x, axis=1, keepdims=True) def red4(x): return mp.min(x, axis=0) def", "x.dot(Wx) + b) cache = next_h, prev_h, x, Wx, Wh return next_h, cache", "= x.T.dot(dtanh) # (D, H) # Gradients of loss wrt Wh dWh =", "def red4(x): return mp.max(x, axis=0) def red5(x): return mp.max(x, axis=0, keepdims=True) grad1 =", "= grad(red5) assert np.all(grad5(x_np).asnumpy() == x_grad3) def test_min(): x_np = np.array([[1, 2], [2,", "0], [0, 1], [1, 1]]) x_grad3 = np.array([[0, 0], [0, 0], [1, 1]])", "H) # Gradients of loss wrt Wh dWh = prev_h.T.dot(dtanh) # Gradients of", "1]]) def red1(x): return mp.sum(x) def red2(x): return mp.sum(x, axis=0) def red3(x): return", "error: ', rel_error(db, grad_arrays[4])) def test_zero_input_grad(): def foo1(x): return 1 bar1 = grad(foo1)", "[0, 0]]) x_grad1 = np.array([[0, 1], [1, 0], [0, 0]]) x_grad2 = np.array([[0,", "loss wrt b. Note we broadcast b in practice. Thus result of #", "= np.tanh(prev_h.dot(Wh) + x.dot(Wx) + b) cache = next_h, prev_h, x, Wx, Wh", "red4(x): return mp.max(x, axis=0) def red5(x): return mp.max(x, axis=0, keepdims=True) grad1 = grad(red1)", "in practice. Thus result of # matrix ops are just sum over columns", "6]]) x_grad = np.array([[1, 1], [1, 1], [1, 1]]) def red1(x): return mp.sum(x)", "foo1(x): return 1 bar1 = grad(foo1) assert bar1(0) == 0.0 def test_reduction(): def", "+ np.abs(y)))) def rnn_step_forward(x, prev_h, Wx, Wh, b): next_h = np.tanh(prev_h.dot(Wh) + x.dot(Wx)", "grad(red2) assert np.all(grad2(x_np).asnumpy() == x_grad) grad3 = grad(red3) assert np.all(grad3(x_np).asnumpy() == x_grad) def", "def rel_error(x, y): \"\"\" returns relative error \"\"\" return np.max(np.abs(x - y) /", "x_grad1) grad2 = grad(red2) assert np.all(grad2(x_np).asnumpy() == x_grad2) grad3 = grad(red3) assert np.all(grad3(x_np).asnumpy()", "* nm(dnext_h) grad_loss_function = return_numpy(grad_and_loss(rnn_step_forward_loss, range(5))) grad_arrays = grad_loss_function(x, h, Wx, Wh, b,", "grad(red3) assert np.all(grad3(x_np).asnumpy() == x_grad2) grad4 = grad(red4) assert np.all(grad4(x_np).asnumpy() == x_grad3) grad5", "# Gradients of loss wrt b. Note we broadcast b in practice. Thus", "= 4, 5, 6 x = np.random.randn(N, D) h = np.random.randn(N, H) Wx", "x dx = dtanh.dot(Wx.T) # Gradients of loss wrt prev_h dprev_h = dtanh.dot(Wh.T)", "keepdims=True) def red4(x): return mp.min(x, axis=0) def red5(x): return mp.min(x, axis=0, keepdims=True) grad1", "cache # Gradients of loss wrt tanh dtanh = dnext_h * (1 -", "0], [1, 1]]) def red1(x): return mp.min(x) def red2(x): return mp.min(x, axis=1) def", "np.ones([N, 1]).T.dot(dtanh)[0, :] return dx, dprev_h, dWx, dWh, db # preparation N, D,", "def test_autograd(): @convert_args def minpy_rnn_step_forward(x, prev_h, Wx, Wh, b): next_h = mp.tanh(x.dot(Wx) +", "prev_h, x, Wx, Wh return next_h, cache def rnn_step_backward(dnext_h, cache): dx, dprev_h, dWx,", "np.array([[0, 1], [1, 0], [0, 0]]) x_grad2 = np.array([[0, 1], [1, 0], [1,", "= np.array([[0, 1], [1, 0], [0, 0]]) def red1(x): return mp.max(x) def red2(x):", "b, dnext_h: minpy_rnn_step_forward(x, h, Wx, Wh, b) * nm(dnext_h) grad_loss_function = return_numpy(grad_and_loss(rnn_step_forward_loss, range(5)))", "prev_h, Wx, Wh, b): next_h = mp.tanh(x.dot(Wx) + prev_h.dot(Wh) + b) return next_h", "from rnn_step_forward next_h, prev_h, x, Wx, Wh = cache # Gradients of loss", "minpy_rnn_step_forward(x, prev_h, Wx, Wh, b): next_h = mp.tanh(x.dot(Wx) + prev_h.dot(Wh) + b) return", "keepdims=True) grad1 = grad(red1) assert np.all(grad1(x_np).asnumpy() == x_grad) grad2 = grad(red2) assert np.all(grad2(x_np).asnumpy()", "nm(dnext_h) grad_loss_function = return_numpy(grad_and_loss(rnn_step_forward_loss, range(5))) grad_arrays = grad_loss_function(x, h, Wx, Wh, b, dnext_h)[0]", "== 0.0 def test_reduction(): def test_sum(): x_np = np.array([[1, 2], [3, 4], [5,", "np.array([[0, 0], [0, 0], [1, 1]]) x_grad2 = np.array([[1, 0], [0, 1], [1,", "== x_grad3) grad5 = grad(red5) assert np.all(grad5(x_np).asnumpy() == x_grad3) def test_min(): x_np =", "return mp.sum(x, axis=0) def red3(x): return mp.sum(x, axis=0, keepdims=True) grad1 = grad(red1) assert", "[0, 0], [1, 1]]) x_grad2 = np.array([[1, 0], [0, 1], [1, 1]]) x_grad3", "y) / (np.maximum(1e-8, np.abs(x) + np.abs(y)))) def rnn_step_forward(x, prev_h, Wx, Wh, b): next_h", "import print_function import minpy.numpy as mp import numpy as np import minpy.dispatch.policy as", "mp.min(x, axis=1) def red3(x): return mp.min(x, axis=1, keepdims=True) def red4(x): return mp.min(x, axis=0)", "= grad(red1) assert np.all(grad1(x_np).asnumpy() == x_grad) grad2 = grad(red2) assert np.all(grad2(x_np).asnumpy() == x_grad)", "MinPy start = time.time() rnn_step_forward_loss = lambda x, h, Wx, Wh, b, dnext_h:", "def rnn_step_forward(x, prev_h, Wx, Wh, b): next_h = np.tanh(prev_h.dot(Wh) + x.dot(Wx) + b)", "1], [1, 0], [0, 0]]) def red1(x): return mp.max(x) def red2(x): return mp.max(x,", "next_h = np.tanh(prev_h.dot(Wh) + x.dot(Wx) + b) cache = next_h, prev_h, x, Wx,", "= np.array([[1, 2], [2, 1], [0, 0]]) x_grad1 = np.array([[0, 1], [1, 0],", "def red1(x): return mp.min(x) def red2(x): return mp.min(x, axis=1) def red3(x): return mp.min(x,", "loss wrt prev_h dprev_h = dtanh.dot(Wh.T) # Gradients of loss wrt Wx dWx", "def test_zero_input_grad(): def foo1(x): return 1 bar1 = grad(foo1) assert bar1(0) == 0.0", "grad(foo1) assert bar1(0) == 0.0 def test_reduction(): def test_sum(): x_np = np.array([[1, 2],", "mp.min(x, axis=0, keepdims=True) grad1 = grad(red1) assert np.all(grad1(x_np).asnumpy() == x_grad1) grad2 = grad(red2)", "policy from minpy.core import convert_args, return_numpy, grad_and_loss, grad, minpy_to_numpy as mn, numpy_to_minpy as", "start) # test NumPy start = time.time() out, cache = rnn_step_forward(x, h, Wx,", "dx, dprev_h, dWx, dWh, db # preparation N, D, H = 4, 5,", "np.random.randn(H, H) b = np.random.randn(H) out, cache = rnn_step_forward(x, h, Wx, Wh, b)", "np.all(grad3(x_np).asnumpy() == x_grad2) grad4 = grad(red4) assert np.all(grad4(x_np).asnumpy() == x_grad3) grad5 = grad(red5)", "0], [1, 1]]) x_grad2 = np.array([[1, 0], [0, 1], [1, 1]]) x_grad3 =", "prev_h, x, Wx, Wh = cache # Gradients of loss wrt tanh dtanh", "grad, minpy_to_numpy as mn, numpy_to_minpy as nm import time # mp.set_policy(policy.OnlyNumPyPolicy()) def test_autograd():", "[1, 0], [0, 0]]) def red1(x): return mp.max(x) def red2(x): return mp.max(x, axis=1)", "test NumPy start = time.time() out, cache = rnn_step_forward(x, h, Wx, Wh, b)", "calculation end = time.time() print(\"NumPy total time elapsed:\", end - start) print() print(\"Result", "# matrix ops are just sum over columns db = dtanh.sum(axis=0) # ==", "h = np.random.randn(N, H) Wx = np.random.randn(D, H) Wh = np.random.randn(H, H) b", "time elapsed:\", end - start) # test NumPy start = time.time() out, cache", "np.abs(y)))) def rnn_step_forward(x, prev_h, Wx, Wh, b): next_h = np.tanh(prev_h.dot(Wh) + x.dot(Wx) +", "prev_h, Wx, Wh, b): next_h = np.tanh(prev_h.dot(Wh) + x.dot(Wx) + b) cache =", "', rel_error(dWx, grad_arrays[2])) print('dWh error: ', rel_error(dWh, grad_arrays[3])) print('db error: ', rel_error(db, grad_arrays[4]))", "mp.max(x, axis=1, keepdims=True) def red4(x): return mp.max(x, axis=0) def red5(x): return mp.max(x, axis=0,", "out *= dnext_h # to agree with MinPy calculation end = time.time() print(\"NumPy", "loss wrt Wh dWh = prev_h.T.dot(dtanh) # Gradients of loss wrt b. Note", "1]]) def red1(x): return mp.min(x) def red2(x): return mp.min(x, axis=1) def red3(x): return", "# to agree with MinPy calculation end = time.time() print(\"NumPy total time elapsed:\",", "x, Wx, Wh = cache # Gradients of loss wrt tanh dtanh =", "error: ', rel_error(dWh, grad_arrays[3])) print('db error: ', rel_error(db, grad_arrays[4])) def test_zero_input_grad(): def foo1(x):", "+ prev_h.dot(Wh) + b) return next_h def rel_error(x, y): \"\"\" returns relative error", "rel_error(dWh, grad_arrays[3])) print('db error: ', rel_error(db, grad_arrays[4])) def test_zero_input_grad(): def foo1(x): return 1", "x_np = np.array([[1, 2], [3, 4], [5, 6]]) x_grad = np.array([[1, 1], [1,", "h, Wx, Wh, b) dnext_h = np.random.randn(*out.shape) # test MinPy start = time.time()", "x_grad2 = np.array([[1, 0], [0, 1], [1, 1]]) x_grad3 = np.array([[0, 0], [0,", "import minpy.numpy as mp import numpy as np import minpy.dispatch.policy as policy from", "b) return next_h def rel_error(x, y): \"\"\" returns relative error \"\"\" return np.max(np.abs(x", "b) dnext_h = np.random.randn(*out.shape) # test MinPy start = time.time() rnn_step_forward_loss = lambda", "(N, H) # Gradients of loss wrt x dx = dtanh.dot(Wx.T) # Gradients", "assert bar1(0) == 0.0 def test_reduction(): def test_sum(): x_np = np.array([[1, 2], [3,", "dWh = prev_h.T.dot(dtanh) # Gradients of loss wrt b. Note we broadcast b", "def red2(x): return mp.max(x, axis=1) def red3(x): return mp.max(x, axis=1, keepdims=True) def red4(x):", "== x_grad) grad2 = grad(red2) assert np.all(grad2(x_np).asnumpy() == x_grad) grad3 = grad(red3) assert", "result of # matrix ops are just sum over columns db = dtanh.sum(axis=0)", "as mp import numpy as np import minpy.dispatch.policy as policy from minpy.core import", "np.array([[0, 1], [1, 0], [0, 0]]) def red1(x): return mp.max(x) def red2(x): return", "Wx, Wh, b) dnext_h = np.random.randn(*out.shape) # test MinPy start = time.time() rnn_step_forward_loss", "= np.random.randn(*out.shape) # test MinPy start = time.time() rnn_step_forward_loss = lambda x, h,", "np.all(grad2(x_np).asnumpy() == x_grad2) grad3 = grad(red3) assert np.all(grad3(x_np).asnumpy() == x_grad2) grad4 = grad(red4)", "x_grad) grad2 = grad(red2) assert np.all(grad2(x_np).asnumpy() == x_grad) grad3 = grad(red3) assert np.all(grad3(x_np).asnumpy()", "@convert_args def minpy_rnn_step_forward(x, prev_h, Wx, Wh, b): next_h = mp.tanh(x.dot(Wx) + prev_h.dot(Wh) +", "grad(red3) assert np.all(grad3(x_np).asnumpy() == x_grad) def test_max(): x_np = np.array([[1, 2], [2, 1],", "grad_arrays[3])) print('db error: ', rel_error(db, grad_arrays[4])) def test_zero_input_grad(): def foo1(x): return 1 bar1", "2], [3, 4], [5, 6]]) x_grad = np.array([[1, 1], [1, 1], [1, 1]])", "we broadcast b in practice. Thus result of # matrix ops are just", "convert_args, return_numpy, grad_and_loss, grad, minpy_to_numpy as mn, numpy_to_minpy as nm import time #", "matrix ops are just sum over columns db = dtanh.sum(axis=0) # == np.ones([N,", "assert np.all(grad4(x_np).asnumpy() == x_grad3) grad5 = grad(red5) assert np.all(grad5(x_np).asnumpy() == x_grad3) test_sum() test_max()", "- y) / (np.maximum(1e-8, np.abs(x) + np.abs(y)))) def rnn_step_forward(x, prev_h, Wx, Wh, b):", "axis=0, keepdims=True) grad1 = grad(red1) assert np.all(grad1(x_np).asnumpy() == x_grad1) grad2 = grad(red2) assert", "assert np.all(grad4(x_np).asnumpy() == x_grad3) grad5 = grad(red5) assert np.all(grad5(x_np).asnumpy() == x_grad3) def test_min():", "grad4 = grad(red4) assert np.all(grad4(x_np).asnumpy() == x_grad3) grad5 = grad(red5) assert np.all(grad5(x_np).asnumpy() ==", "np.all(grad2(x_np).asnumpy() == x_grad) grad3 = grad(red3) assert np.all(grad3(x_np).asnumpy() == x_grad) def test_max(): x_np", "next_h, prev_h, x, Wx, Wh = cache # Gradients of loss wrt tanh", "Wx, Wh, b): next_h = mp.tanh(x.dot(Wx) + prev_h.dot(Wh) + b) return next_h def", "- next_h * next_h) # (N, H) # Gradients of loss wrt x", "= time.time() print(\"NumPy total time elapsed:\", end - start) print() print(\"Result Check:\") print('dx", "np.array([[1, 1], [1, 1], [1, 1]]) def red1(x): return mp.sum(x) def red2(x): return", "H) Wx = np.random.randn(D, H) Wh = np.random.randn(H, H) b = np.random.randn(H) out,", "= np.array([[0, 1], [1, 0], [1, 1]]) x_grad3 = np.array([[0, 1], [1, 0],", "== x_grad2) grad4 = grad(red4) assert np.all(grad4(x_np).asnumpy() == x_grad3) grad5 = grad(red5) assert", "1 bar1 = grad(foo1) assert bar1(0) == 0.0 def test_reduction(): def test_sum(): x_np", "end = time.time() print(\"MinPy total time elapsed:\", end - start) # test NumPy", "[1, 1]]) x_grad2 = np.array([[1, 0], [0, 1], [1, 1]]) x_grad3 = np.array([[0,", "mp.max(x) def red2(x): return mp.max(x, axis=1) def red3(x): return mp.max(x, axis=1, keepdims=True) def", "cache = rnn_step_forward(x, h, Wx, Wh, b) dx, dprev_h, dWx, dWh, db =", "dprev_h, dWx, dWh, db = rnn_step_backward(dnext_h, cache) out *= dnext_h # to agree", "None, None # Load values from rnn_step_forward next_h, prev_h, x, Wx, Wh =", "np.tanh(prev_h.dot(Wh) + x.dot(Wx) + b) cache = next_h, prev_h, x, Wx, Wh return", "db = rnn_step_backward(dnext_h, cache) out *= dnext_h # to agree with MinPy calculation", "start = time.time() out, cache = rnn_step_forward(x, h, Wx, Wh, b) dx, dprev_h,", "return mp.max(x, axis=1) def red3(x): return mp.max(x, axis=1, keepdims=True) def red4(x): return mp.max(x,", "return mp.max(x, axis=0, keepdims=True) grad1 = grad(red1) assert np.all(grad1(x_np).asnumpy() == x_grad1) grad2 =", "np.array([[1, 2], [2, 1], [0, 0]]) x_grad1 = np.array([[0, 0], [0, 0], [1,", "[2, 1], [0, 0]]) x_grad1 = np.array([[0, 1], [1, 0], [0, 0]]) x_grad2", "rnn_step_forward next_h, prev_h, x, Wx, Wh = cache # Gradients of loss wrt", "Wh, b) dx, dprev_h, dWx, dWh, db = rnn_step_backward(dnext_h, cache) out *= dnext_h", "__future__ import print_function import minpy.numpy as mp import numpy as np import minpy.dispatch.policy", "return next_h def rel_error(x, y): \"\"\" returns relative error \"\"\" return np.max(np.abs(x -", "', rel_error(db, grad_arrays[4])) def test_zero_input_grad(): def foo1(x): return 1 bar1 = grad(foo1) assert", "0], [0, 0]]) def red1(x): return mp.max(x) def red2(x): return mp.max(x, axis=1) def", "None, None, None # Load values from rnn_step_forward next_h, prev_h, x, Wx, Wh", "assert np.all(grad5(x_np).asnumpy() == x_grad3) test_sum() test_max() test_min() if __name__ == \"__main__\": test_autograd() test_zero_input_grad()", "mp.tanh(x.dot(Wx) + prev_h.dot(Wh) + b) return next_h def rel_error(x, y): \"\"\" returns relative", "mp.min(x) def red2(x): return mp.min(x, axis=1) def red3(x): return mp.min(x, axis=1, keepdims=True) def", "[0, 0]]) x_grad2 = np.array([[0, 1], [1, 0], [1, 1]]) x_grad3 = np.array([[0,", "return_numpy(grad_and_loss(rnn_step_forward_loss, range(5))) grad_arrays = grad_loss_function(x, h, Wx, Wh, b, dnext_h)[0] end = time.time()", "bar1(0) == 0.0 def test_reduction(): def test_sum(): x_np = np.array([[1, 2], [3, 4],", "ops are just sum over columns db = dtanh.sum(axis=0) # == np.ones([N, 1]).T.dot(dtanh)[0,", "print('dprev_h error: ', rel_error(dprev_h, grad_arrays[1])) print('dWx error: ', rel_error(dWx, grad_arrays[2])) print('dWh error: ',", "x_grad) grad3 = grad(red3) assert np.all(grad3(x_np).asnumpy() == x_grad) def test_max(): x_np = np.array([[1,", "np.array([[1, 2], [3, 4], [5, 6]]) x_grad = np.array([[1, 1], [1, 1], [1,", "dnext_h = np.random.randn(*out.shape) # test MinPy start = time.time() rnn_step_forward_loss = lambda x,", "loss wrt tanh dtanh = dnext_h * (1 - next_h * next_h) #", "assert np.all(grad2(x_np).asnumpy() == x_grad) grad3 = grad(red3) assert np.all(grad3(x_np).asnumpy() == x_grad) def test_max():", "[1, 1]]) x_grad3 = np.array([[0, 1], [1, 0], [0, 0]]) def red1(x): return", "[1, 1]]) def red1(x): return mp.min(x) def red2(x): return mp.min(x, axis=1) def red3(x):", "minpy.numpy as mp import numpy as np import minpy.dispatch.policy as policy from minpy.core", "x_grad = np.array([[1, 1], [1, 1], [1, 1]]) def red1(x): return mp.sum(x) def", "[2, 1], [0, 0]]) x_grad1 = np.array([[0, 0], [0, 0], [1, 1]]) x_grad2", "dx = dtanh.dot(Wx.T) # Gradients of loss wrt prev_h dprev_h = dtanh.dot(Wh.T) #", "print('dWh error: ', rel_error(dWh, grad_arrays[3])) print('db error: ', rel_error(db, grad_arrays[4])) def test_zero_input_grad(): def", "red4(x): return mp.min(x, axis=0) def red5(x): return mp.min(x, axis=0, keepdims=True) grad1 = grad(red1)", "# (N, H) # Gradients of loss wrt x dx = dtanh.dot(Wx.T) #", "x_grad2) grad3 = grad(red3) assert np.all(grad3(x_np).asnumpy() == x_grad2) grad4 = grad(red4) assert np.all(grad4(x_np).asnumpy()", "= grad(red3) assert np.all(grad3(x_np).asnumpy() == x_grad) def test_max(): x_np = np.array([[1, 2], [2,", "grad2 = grad(red2) assert np.all(grad2(x_np).asnumpy() == x_grad2) grad3 = grad(red3) assert np.all(grad3(x_np).asnumpy() ==", "rel_error(dprev_h, grad_arrays[1])) print('dWx error: ', rel_error(dWx, grad_arrays[2])) print('dWh error: ', rel_error(dWh, grad_arrays[3])) print('db", "end - start) print() print(\"Result Check:\") print('dx error: ', rel_error(dx, grad_arrays[0])) print('dprev_h error:", "== x_grad2) grad3 = grad(red3) assert np.all(grad3(x_np).asnumpy() == x_grad2) grad4 = grad(red4) assert", "b): next_h = mp.tanh(x.dot(Wx) + prev_h.dot(Wh) + b) return next_h def rel_error(x, y):", "as mn, numpy_to_minpy as nm import time # mp.set_policy(policy.OnlyNumPyPolicy()) def test_autograd(): @convert_args def", "over columns db = dtanh.sum(axis=0) # == np.ones([N, 1]).T.dot(dtanh)[0, :] return dx, dprev_h,", "= np.array([[0, 1], [1, 0], [0, 0]]) x_grad2 = np.array([[0, 1], [1, 0],", "= grad(red1) assert np.all(grad1(x_np).asnumpy() == x_grad1) grad2 = grad(red2) assert np.all(grad2(x_np).asnumpy() == x_grad2)", "# test MinPy start = time.time() rnn_step_forward_loss = lambda x, h, Wx, Wh,", "# Load values from rnn_step_forward next_h, prev_h, x, Wx, Wh = cache #", "lambda x, h, Wx, Wh, b, dnext_h: minpy_rnn_step_forward(x, h, Wx, Wh, b) *", "rnn_step_backward(dnext_h, cache): dx, dprev_h, dWx, dWh, db = None, None, None, None, None", "x_grad3) grad5 = grad(red5) assert np.all(grad5(x_np).asnumpy() == x_grad3) def test_min(): x_np = np.array([[1,", "Wx, Wh return next_h, cache def rnn_step_backward(dnext_h, cache): dx, dprev_h, dWx, dWh, db", "rel_error(dx, grad_arrays[0])) print('dprev_h error: ', rel_error(dprev_h, grad_arrays[1])) print('dWx error: ', rel_error(dWx, grad_arrays[2])) print('dWh", "[1, 0], [1, 1]]) x_grad3 = np.array([[0, 1], [1, 0], [0, 0]]) def", "H) b = np.random.randn(H) out, cache = rnn_step_forward(x, h, Wx, Wh, b) dnext_h", "= mp.tanh(x.dot(Wx) + prev_h.dot(Wh) + b) return next_h def rel_error(x, y): \"\"\" returns", "= time.time() print(\"MinPy total time elapsed:\", end - start) # test NumPy start", "grad(red4) assert np.all(grad4(x_np).asnumpy() == x_grad3) grad5 = grad(red5) assert np.all(grad5(x_np).asnumpy() == x_grad3) def", "assert np.all(grad1(x_np).asnumpy() == x_grad1) grad2 = grad(red2) assert np.all(grad2(x_np).asnumpy() == x_grad2) grad3 =", "relative error \"\"\" return np.max(np.abs(x - y) / (np.maximum(1e-8, np.abs(x) + np.abs(y)))) def", "x, h, Wx, Wh, b, dnext_h: minpy_rnn_step_forward(x, h, Wx, Wh, b) * nm(dnext_h)", "red2(x): return mp.max(x, axis=1) def red3(x): return mp.max(x, axis=1, keepdims=True) def red4(x): return", "error \"\"\" return np.max(np.abs(x - y) / (np.maximum(1e-8, np.abs(x) + np.abs(y)))) def rnn_step_forward(x,", "next_h, cache def rnn_step_backward(dnext_h, cache): dx, dprev_h, dWx, dWh, db = None, None,", "dWh, db = None, None, None, None, None # Load values from rnn_step_forward", "time.time() rnn_step_forward_loss = lambda x, h, Wx, Wh, b, dnext_h: minpy_rnn_step_forward(x, h, Wx,", "= grad(red5) assert np.all(grad5(x_np).asnumpy() == x_grad3) test_sum() test_max() test_min() if __name__ == \"__main__\":", "grad(red4) assert np.all(grad4(x_np).asnumpy() == x_grad3) grad5 = grad(red5) assert np.all(grad5(x_np).asnumpy() == x_grad3) test_sum()", "nm import time # mp.set_policy(policy.OnlyNumPyPolicy()) def test_autograd(): @convert_args def minpy_rnn_step_forward(x, prev_h, Wx, Wh,", "time.time() out, cache = rnn_step_forward(x, h, Wx, Wh, b) dx, dprev_h, dWx, dWh,", "grad_arrays[1])) print('dWx error: ', rel_error(dWx, grad_arrays[2])) print('dWh error: ', rel_error(dWh, grad_arrays[3])) print('db error:", "rnn_step_forward(x, prev_h, Wx, Wh, b): next_h = np.tanh(prev_h.dot(Wh) + x.dot(Wx) + b) cache", "next_h = mp.tanh(x.dot(Wx) + prev_h.dot(Wh) + b) return next_h def rel_error(x, y): \"\"\"", "h, Wx, Wh, b, dnext_h: minpy_rnn_step_forward(x, h, Wx, Wh, b) * nm(dnext_h) grad_loss_function", "= rnn_step_backward(dnext_h, cache) out *= dnext_h # to agree with MinPy calculation end", "print('db error: ', rel_error(db, grad_arrays[4])) def test_zero_input_grad(): def foo1(x): return 1 bar1 =", "test_max(): x_np = np.array([[1, 2], [2, 1], [0, 0]]) x_grad1 = np.array([[0, 1],", "range(5))) grad_arrays = grad_loss_function(x, h, Wx, Wh, b, dnext_h)[0] end = time.time() print(\"MinPy", "loss wrt Wx dWx = x.T.dot(dtanh) # (D, H) # Gradients of loss", "time # mp.set_policy(policy.OnlyNumPyPolicy()) def test_autograd(): @convert_args def minpy_rnn_step_forward(x, prev_h, Wx, Wh, b): next_h", "Note we broadcast b in practice. Thus result of # matrix ops are", "dprev_h, dWx, dWh, db # preparation N, D, H = 4, 5, 6", "np.abs(x) + np.abs(y)))) def rnn_step_forward(x, prev_h, Wx, Wh, b): next_h = np.tanh(prev_h.dot(Wh) +", "H = 4, 5, 6 x = np.random.randn(N, D) h = np.random.randn(N, H)", "= np.random.randn(N, H) Wx = np.random.randn(D, H) Wh = np.random.randn(H, H) b =", "1]]) x_grad3 = np.array([[0, 1], [1, 0], [0, 0]]) def red1(x): return mp.max(x)", "of loss wrt x dx = dtanh.dot(Wx.T) # Gradients of loss wrt prev_h", "0]]) x_grad2 = np.array([[0, 1], [1, 0], [1, 1]]) x_grad3 = np.array([[0, 1],", "return mp.sum(x) def red2(x): return mp.sum(x, axis=0) def red3(x): return mp.sum(x, axis=0, keepdims=True)", "end - start) # test NumPy start = time.time() out, cache = rnn_step_forward(x,", "= dtanh.sum(axis=0) # == np.ones([N, 1]).T.dot(dtanh)[0, :] return dx, dprev_h, dWx, dWh, db", "mp import numpy as np import minpy.dispatch.policy as policy from minpy.core import convert_args,", "sum over columns db = dtanh.sum(axis=0) # == np.ones([N, 1]).T.dot(dtanh)[0, :] return dx,", "== x_grad3) def test_min(): x_np = np.array([[1, 2], [2, 1], [0, 0]]) x_grad1", "next_h def rel_error(x, y): \"\"\" returns relative error \"\"\" return np.max(np.abs(x - y)", "tanh dtanh = dnext_h * (1 - next_h * next_h) # (N, H)", "mp.min(x, axis=0) def red5(x): return mp.min(x, axis=0, keepdims=True) grad1 = grad(red1) assert np.all(grad1(x_np).asnumpy()", "time.time() print(\"NumPy total time elapsed:\", end - start) print() print(\"Result Check:\") print('dx error:", "mp.set_policy(policy.OnlyNumPyPolicy()) def test_autograd(): @convert_args def minpy_rnn_step_forward(x, prev_h, Wx, Wh, b): next_h = mp.tanh(x.dot(Wx)", "= grad_loss_function(x, h, Wx, Wh, b, dnext_h)[0] end = time.time() print(\"MinPy total time", "np.all(grad5(x_np).asnumpy() == x_grad3) test_sum() test_max() test_min() if __name__ == \"__main__\": test_autograd() test_zero_input_grad() test_reduction()", "== np.ones([N, 1]).T.dot(dtanh)[0, :] return dx, dprev_h, dWx, dWh, db # preparation N,", "= grad(red4) assert np.all(grad4(x_np).asnumpy() == x_grad3) grad5 = grad(red5) assert np.all(grad5(x_np).asnumpy() == x_grad3)", "np.random.randn(H) out, cache = rnn_step_forward(x, h, Wx, Wh, b) dnext_h = np.random.randn(*out.shape) #", "Wx, Wh, b) * nm(dnext_h) grad_loss_function = return_numpy(grad_and_loss(rnn_step_forward_loss, range(5))) grad_arrays = grad_loss_function(x, h,", "grad(red2) assert np.all(grad2(x_np).asnumpy() == x_grad2) grad3 = grad(red3) assert np.all(grad3(x_np).asnumpy() == x_grad2) grad4", "4, 5, 6 x = np.random.randn(N, D) h = np.random.randn(N, H) Wx =", "1], [1, 1], [1, 1]]) def red1(x): return mp.sum(x) def red2(x): return mp.sum(x,", "= np.array([[0, 0], [0, 0], [1, 1]]) x_grad2 = np.array([[1, 0], [0, 1],", "', rel_error(dprev_h, grad_arrays[1])) print('dWx error: ', rel_error(dWx, grad_arrays[2])) print('dWh error: ', rel_error(dWh, grad_arrays[3]))", "\"\"\" return np.max(np.abs(x - y) / (np.maximum(1e-8, np.abs(x) + np.abs(y)))) def rnn_step_forward(x, prev_h,", "Wh = cache # Gradients of loss wrt tanh dtanh = dnext_h *", "x_grad2 = np.array([[0, 1], [1, 0], [1, 1]]) x_grad3 = np.array([[0, 1], [1,", "0]]) def red1(x): return mp.max(x) def red2(x): return mp.max(x, axis=1) def red3(x): return", "red3(x): return mp.min(x, axis=1, keepdims=True) def red4(x): return mp.min(x, axis=0) def red5(x): return", "grad_and_loss, grad, minpy_to_numpy as mn, numpy_to_minpy as nm import time # mp.set_policy(policy.OnlyNumPyPolicy()) def", "import time # mp.set_policy(policy.OnlyNumPyPolicy()) def test_autograd(): @convert_args def minpy_rnn_step_forward(x, prev_h, Wx, Wh, b):", "rnn_step_forward(x, h, Wx, Wh, b) dx, dprev_h, dWx, dWh, db = rnn_step_backward(dnext_h, cache)", "= rnn_step_forward(x, h, Wx, Wh, b) dx, dprev_h, dWx, dWh, db = rnn_step_backward(dnext_h,", "red5(x): return mp.max(x, axis=0, keepdims=True) grad1 = grad(red1) assert np.all(grad1(x_np).asnumpy() == x_grad1) grad2", "import minpy.dispatch.policy as policy from minpy.core import convert_args, return_numpy, grad_and_loss, grad, minpy_to_numpy as", "grad_arrays[2])) print('dWh error: ', rel_error(dWh, grad_arrays[3])) print('db error: ', rel_error(db, grad_arrays[4])) def test_zero_input_grad():", "grad3 = grad(red3) assert np.all(grad3(x_np).asnumpy() == x_grad) def test_max(): x_np = np.array([[1, 2],", "[0, 1], [1, 1]]) x_grad3 = np.array([[0, 0], [0, 0], [1, 1]]) def", "+ b) return next_h def rel_error(x, y): \"\"\" returns relative error \"\"\" return", "grad5 = grad(red5) assert np.all(grad5(x_np).asnumpy() == x_grad3) def test_min(): x_np = np.array([[1, 2],", "def red3(x): return mp.sum(x, axis=0, keepdims=True) grad1 = grad(red1) assert np.all(grad1(x_np).asnumpy() == x_grad)", "axis=1) def red3(x): return mp.max(x, axis=1, keepdims=True) def red4(x): return mp.max(x, axis=0) def", "values from rnn_step_forward next_h, prev_h, x, Wx, Wh = cache # Gradients of", "dWh, db = rnn_step_backward(dnext_h, cache) out *= dnext_h # to agree with MinPy", "grad1 = grad(red1) assert np.all(grad1(x_np).asnumpy() == x_grad) grad2 = grad(red2) assert np.all(grad2(x_np).asnumpy() ==", "print('dx error: ', rel_error(dx, grad_arrays[0])) print('dprev_h error: ', rel_error(dprev_h, grad_arrays[1])) print('dWx error: ',", "Wh, b): next_h = np.tanh(prev_h.dot(Wh) + x.dot(Wx) + b) cache = next_h, prev_h,", "test_zero_input_grad(): def foo1(x): return 1 bar1 = grad(foo1) assert bar1(0) == 0.0 def", "Wx = np.random.randn(D, H) Wh = np.random.randn(H, H) b = np.random.randn(H) out, cache", "0], [1, 1]]) x_grad3 = np.array([[0, 1], [1, 0], [0, 0]]) def red1(x):", "mp.max(x, axis=1) def red3(x): return mp.max(x, axis=1, keepdims=True) def red4(x): return mp.max(x, axis=0)", "# Gradients of loss wrt Wx dWx = x.T.dot(dtanh) # (D, H) #", "grad(red1) assert np.all(grad1(x_np).asnumpy() == x_grad1) grad2 = grad(red2) assert np.all(grad2(x_np).asnumpy() == x_grad2) grad3", "+ x.dot(Wx) + b) cache = next_h, prev_h, x, Wx, Wh return next_h,", "[1, 1]]) x_grad3 = np.array([[0, 0], [0, 0], [1, 1]]) def red1(x): return", "mp.min(x, axis=1, keepdims=True) def red4(x): return mp.min(x, axis=0) def red5(x): return mp.min(x, axis=0,", "Wh, b): next_h = mp.tanh(x.dot(Wx) + prev_h.dot(Wh) + b) return next_h def rel_error(x,", "Wh, b, dnext_h: minpy_rnn_step_forward(x, h, Wx, Wh, b) * nm(dnext_h) grad_loss_function = return_numpy(grad_and_loss(rnn_step_forward_loss,", "grad_arrays[4])) def test_zero_input_grad(): def foo1(x): return 1 bar1 = grad(foo1) assert bar1(0) ==", "dtanh.dot(Wh.T) # Gradients of loss wrt Wx dWx = x.T.dot(dtanh) # (D, H)", "(np.maximum(1e-8, np.abs(x) + np.abs(y)))) def rnn_step_forward(x, prev_h, Wx, Wh, b): next_h = np.tanh(prev_h.dot(Wh)", "np.all(grad1(x_np).asnumpy() == x_grad1) grad2 = grad(red2) assert np.all(grad2(x_np).asnumpy() == x_grad2) grad3 = grad(red3)", "out, cache = rnn_step_forward(x, h, Wx, Wh, b) dnext_h = np.random.randn(*out.shape) # test", "[3, 4], [5, 6]]) x_grad = np.array([[1, 1], [1, 1], [1, 1]]) def", "practice. Thus result of # matrix ops are just sum over columns db", "# Gradients of loss wrt tanh dtanh = dnext_h * (1 - next_h", "Wh, b, dnext_h)[0] end = time.time() print(\"MinPy total time elapsed:\", end - start)", "np.random.randn(D, H) Wh = np.random.randn(H, H) b = np.random.randn(H) out, cache = rnn_step_forward(x,", "np.all(grad4(x_np).asnumpy() == x_grad3) grad5 = grad(red5) assert np.all(grad5(x_np).asnumpy() == x_grad3) test_sum() test_max() test_min()", "', rel_error(dx, grad_arrays[0])) print('dprev_h error: ', rel_error(dprev_h, grad_arrays[1])) print('dWx error: ', rel_error(dWx, grad_arrays[2]))", "1]).T.dot(dtanh)[0, :] return dx, dprev_h, dWx, dWh, db # preparation N, D, H", "x.T.dot(dtanh) # (D, H) # Gradients of loss wrt Wh dWh = prev_h.T.dot(dtanh)", "== x_grad) grad3 = grad(red3) assert np.all(grad3(x_np).asnumpy() == x_grad) def test_max(): x_np =", "0], [0, 0], [1, 1]]) def red1(x): return mp.min(x) def red2(x): return mp.min(x,", "= return_numpy(grad_and_loss(rnn_step_forward_loss, range(5))) grad_arrays = grad_loss_function(x, h, Wx, Wh, b, dnext_h)[0] end =", "= np.array([[0, 0], [0, 0], [1, 1]]) def red1(x): return mp.min(x) def red2(x):", "are just sum over columns db = dtanh.sum(axis=0) # == np.ones([N, 1]).T.dot(dtanh)[0, :]", "Wh, b) dnext_h = np.random.randn(*out.shape) # test MinPy start = time.time() rnn_step_forward_loss =", "= grad(red2) assert np.all(grad2(x_np).asnumpy() == x_grad2) grad3 = grad(red3) assert np.all(grad3(x_np).asnumpy() == x_grad2)", "= lambda x, h, Wx, Wh, b, dnext_h: minpy_rnn_step_forward(x, h, Wx, Wh, b)", "agree with MinPy calculation end = time.time() print(\"NumPy total time elapsed:\", end -", "5, 6 x = np.random.randn(N, D) h = np.random.randn(N, H) Wx = np.random.randn(D,", "keepdims=True) def red4(x): return mp.max(x, axis=0) def red5(x): return mp.max(x, axis=0, keepdims=True) grad1", "grad_arrays = grad_loss_function(x, h, Wx, Wh, b, dnext_h)[0] end = time.time() print(\"MinPy total", "axis=1, keepdims=True) def red4(x): return mp.max(x, axis=0) def red5(x): return mp.max(x, axis=0, keepdims=True)", "wrt b. Note we broadcast b in practice. Thus result of # matrix", "Wh, b) * nm(dnext_h) grad_loss_function = return_numpy(grad_and_loss(rnn_step_forward_loss, range(5))) grad_arrays = grad_loss_function(x, h, Wx,", "error: ', rel_error(dx, grad_arrays[0])) print('dprev_h error: ', rel_error(dprev_h, grad_arrays[1])) print('dWx error: ', rel_error(dWx,", "0], [0, 0]]) x_grad2 = np.array([[0, 1], [1, 0], [1, 1]]) x_grad3 =", "h, Wx, Wh, b) * nm(dnext_h) grad_loss_function = return_numpy(grad_and_loss(rnn_step_forward_loss, range(5))) grad_arrays = grad_loss_function(x,", "# mp.set_policy(policy.OnlyNumPyPolicy()) def test_autograd(): @convert_args def minpy_rnn_step_forward(x, prev_h, Wx, Wh, b): next_h =", "b. Note we broadcast b in practice. Thus result of # matrix ops", "dWx, dWh, db = rnn_step_backward(dnext_h, cache) out *= dnext_h # to agree with", "grad(red1) assert np.all(grad1(x_np).asnumpy() == x_grad) grad2 = grad(red2) assert np.all(grad2(x_np).asnumpy() == x_grad) grad3", "red1(x): return mp.min(x) def red2(x): return mp.min(x, axis=1) def red3(x): return mp.min(x, axis=1,", "Thus result of # matrix ops are just sum over columns db =", "of loss wrt b. Note we broadcast b in practice. Thus result of", "[1, 0], [0, 0]]) x_grad2 = np.array([[0, 1], [1, 0], [1, 1]]) x_grad3", "x_grad2) grad4 = grad(red4) assert np.all(grad4(x_np).asnumpy() == x_grad3) grad5 = grad(red5) assert np.all(grad5(x_np).asnumpy()", "*= dnext_h # to agree with MinPy calculation end = time.time() print(\"NumPy total", "def test_max(): x_np = np.array([[1, 2], [2, 1], [0, 0]]) x_grad1 = np.array([[0,", "b = np.random.randn(H) out, cache = rnn_step_forward(x, h, Wx, Wh, b) dnext_h =", "# Gradients of loss wrt Wh dWh = prev_h.T.dot(dtanh) # Gradients of loss", "h, Wx, Wh, b) dx, dprev_h, dWx, dWh, db = rnn_step_backward(dnext_h, cache) out", "= time.time() out, cache = rnn_step_forward(x, h, Wx, Wh, b) dx, dprev_h, dWx,", "def red5(x): return mp.min(x, axis=0, keepdims=True) grad1 = grad(red1) assert np.all(grad1(x_np).asnumpy() == x_grad1)", "red3(x): return mp.max(x, axis=1, keepdims=True) def red4(x): return mp.max(x, axis=0) def red5(x): return", "Load values from rnn_step_forward next_h, prev_h, x, Wx, Wh = cache # Gradients", "minpy.core import convert_args, return_numpy, grad_and_loss, grad, minpy_to_numpy as mn, numpy_to_minpy as nm import", "np.array([[1, 0], [0, 1], [1, 1]]) x_grad3 = np.array([[0, 0], [0, 0], [1,", "print(\"MinPy total time elapsed:\", end - start) # test NumPy start = time.time()", "= np.random.randn(D, H) Wh = np.random.randn(H, H) b = np.random.randn(H) out, cache =", "np.all(grad3(x_np).asnumpy() == x_grad) def test_max(): x_np = np.array([[1, 2], [2, 1], [0, 0]])", "= prev_h.T.dot(dtanh) # Gradients of loss wrt b. Note we broadcast b in", "prev_h.T.dot(dtanh) # Gradients of loss wrt b. Note we broadcast b in practice.", "of # matrix ops are just sum over columns db = dtanh.sum(axis=0) #", "= rnn_step_forward(x, h, Wx, Wh, b) dnext_h = np.random.randn(*out.shape) # test MinPy start", "NumPy start = time.time() out, cache = rnn_step_forward(x, h, Wx, Wh, b) dx,", "dx, dprev_h, dWx, dWh, db = rnn_step_backward(dnext_h, cache) out *= dnext_h # to", "None, None, None, None, None # Load values from rnn_step_forward next_h, prev_h, x,", "= np.random.randn(H) out, cache = rnn_step_forward(x, h, Wx, Wh, b) dnext_h = np.random.randn(*out.shape)", "rnn_step_forward_loss = lambda x, h, Wx, Wh, b, dnext_h: minpy_rnn_step_forward(x, h, Wx, Wh,", "x_grad1 = np.array([[0, 1], [1, 0], [0, 0]]) x_grad2 = np.array([[0, 1], [1,", "grad(red5) assert np.all(grad5(x_np).asnumpy() == x_grad3) def test_min(): x_np = np.array([[1, 2], [2, 1],", "total time elapsed:\", end - start) print() print(\"Result Check:\") print('dx error: ', rel_error(dx,", "return mp.min(x, axis=1, keepdims=True) def red4(x): return mp.min(x, axis=0) def red5(x): return mp.min(x,", "axis=0) def red5(x): return mp.max(x, axis=0, keepdims=True) grad1 = grad(red1) assert np.all(grad1(x_np).asnumpy() ==", "def red2(x): return mp.sum(x, axis=0) def red3(x): return mp.sum(x, axis=0, keepdims=True) grad1 =", "mp.max(x, axis=0) def red5(x): return mp.max(x, axis=0, keepdims=True) grad1 = grad(red1) assert np.all(grad1(x_np).asnumpy()", "None, None, None, None # Load values from rnn_step_forward next_h, prev_h, x, Wx,", "error: ', rel_error(dWx, grad_arrays[2])) print('dWh error: ', rel_error(dWh, grad_arrays[3])) print('db error: ', rel_error(db,", "# (D, H) # Gradients of loss wrt Wh dWh = prev_h.T.dot(dtanh) #", "total time elapsed:\", end - start) # test NumPy start = time.time() out,", "print_function import minpy.numpy as mp import numpy as np import minpy.dispatch.policy as policy", "return next_h, cache def rnn_step_backward(dnext_h, cache): dx, dprev_h, dWx, dWh, db = None,", "print(\"NumPy total time elapsed:\", end - start) print() print(\"Result Check:\") print('dx error: ',", "preparation N, D, H = 4, 5, 6 x = np.random.randn(N, D) h", "N, D, H = 4, 5, 6 x = np.random.randn(N, D) h =", "= np.array([[1, 2], [2, 1], [0, 0]]) x_grad1 = np.array([[0, 0], [0, 0],", "cache = next_h, prev_h, x, Wx, Wh return next_h, cache def rnn_step_backward(dnext_h, cache):", "return dx, dprev_h, dWx, dWh, db # preparation N, D, H = 4,", "= np.random.randn(H, H) b = np.random.randn(H) out, cache = rnn_step_forward(x, h, Wx, Wh,", "1], [1, 1]]) def red1(x): return mp.sum(x) def red2(x): return mp.sum(x, axis=0) def", "= cache # Gradients of loss wrt tanh dtanh = dnext_h * (1", "def rnn_step_backward(dnext_h, cache): dx, dprev_h, dWx, dWh, db = None, None, None, None,", "[0, 0]]) def red1(x): return mp.max(x) def red2(x): return mp.max(x, axis=1) def red3(x):", "assert np.all(grad3(x_np).asnumpy() == x_grad2) grad4 = grad(red4) assert np.all(grad4(x_np).asnumpy() == x_grad3) grad5 =", "grad1 = grad(red1) assert np.all(grad1(x_np).asnumpy() == x_grad1) grad2 = grad(red2) assert np.all(grad2(x_np).asnumpy() ==", "dnext_h)[0] end = time.time() print(\"MinPy total time elapsed:\", end - start) # test", "= time.time() rnn_step_forward_loss = lambda x, h, Wx, Wh, b, dnext_h: minpy_rnn_step_forward(x, h,", "prev_h.dot(Wh) + b) return next_h def rel_error(x, y): \"\"\" returns relative error \"\"\"", "x_grad3) grad5 = grad(red5) assert np.all(grad5(x_np).asnumpy() == x_grad3) test_sum() test_max() test_min() if __name__", "print() print(\"Result Check:\") print('dx error: ', rel_error(dx, grad_arrays[0])) print('dprev_h error: ', rel_error(dprev_h, grad_arrays[1]))", "= np.random.randn(N, D) h = np.random.randn(N, H) Wx = np.random.randn(D, H) Wh =", "= grad(foo1) assert bar1(0) == 0.0 def test_reduction(): def test_sum(): x_np = np.array([[1,", "prev_h dprev_h = dtanh.dot(Wh.T) # Gradients of loss wrt Wx dWx = x.T.dot(dtanh)", "minpy.dispatch.policy as policy from minpy.core import convert_args, return_numpy, grad_and_loss, grad, minpy_to_numpy as mn,", "return_numpy, grad_and_loss, grad, minpy_to_numpy as mn, numpy_to_minpy as nm import time # mp.set_policy(policy.OnlyNumPyPolicy())", "(1 - next_h * next_h) # (N, H) # Gradients of loss wrt", "axis=0) def red3(x): return mp.sum(x, axis=0, keepdims=True) grad1 = grad(red1) assert np.all(grad1(x_np).asnumpy() ==", "mn, numpy_to_minpy as nm import time # mp.set_policy(policy.OnlyNumPyPolicy()) def test_autograd(): @convert_args def minpy_rnn_step_forward(x,", "rel_error(dWx, grad_arrays[2])) print('dWh error: ', rel_error(dWh, grad_arrays[3])) print('db error: ', rel_error(db, grad_arrays[4])) def", "grad3 = grad(red3) assert np.all(grad3(x_np).asnumpy() == x_grad2) grad4 = grad(red4) assert np.all(grad4(x_np).asnumpy() ==", "red5(x): return mp.min(x, axis=0, keepdims=True) grad1 = grad(red1) assert np.all(grad1(x_np).asnumpy() == x_grad1) grad2", "db # preparation N, D, H = 4, 5, 6 x = np.random.randn(N,", "return mp.min(x, axis=0) def red5(x): return mp.min(x, axis=0, keepdims=True) grad1 = grad(red1) assert", "np.max(np.abs(x - y) / (np.maximum(1e-8, np.abs(x) + np.abs(y)))) def rnn_step_forward(x, prev_h, Wx, Wh,", "b in practice. Thus result of # matrix ops are just sum over", ":] return dx, dprev_h, dWx, dWh, db # preparation N, D, H =", "MinPy calculation end = time.time() print(\"NumPy total time elapsed:\", end - start) print()", "[5, 6]]) x_grad = np.array([[1, 1], [1, 1], [1, 1]]) def red1(x): return", "with MinPy calculation end = time.time() print(\"NumPy total time elapsed:\", end - start)", "= next_h, prev_h, x, Wx, Wh return next_h, cache def rnn_step_backward(dnext_h, cache): dx,", "next_h, prev_h, x, Wx, Wh return next_h, cache def rnn_step_backward(dnext_h, cache): dx, dprev_h,", "rnn_step_backward(dnext_h, cache) out *= dnext_h # to agree with MinPy calculation end =", "dnext_h: minpy_rnn_step_forward(x, h, Wx, Wh, b) * nm(dnext_h) grad_loss_function = return_numpy(grad_and_loss(rnn_step_forward_loss, range(5))) grad_arrays", "[1, 1]]) def red1(x): return mp.sum(x) def red2(x): return mp.sum(x, axis=0) def red3(x):", "0]]) x_grad1 = np.array([[0, 0], [0, 0], [1, 1]]) x_grad2 = np.array([[1, 0],", "x_grad) def test_max(): x_np = np.array([[1, 2], [2, 1], [0, 0]]) x_grad1 =", "mp.max(x, axis=0, keepdims=True) grad1 = grad(red1) assert np.all(grad1(x_np).asnumpy() == x_grad1) grad2 = grad(red2)", "test_min(): x_np = np.array([[1, 2], [2, 1], [0, 0]]) x_grad1 = np.array([[0, 0],", "axis=0) def red5(x): return mp.min(x, axis=0, keepdims=True) grad1 = grad(red1) assert np.all(grad1(x_np).asnumpy() ==", "of loss wrt prev_h dprev_h = dtanh.dot(Wh.T) # Gradients of loss wrt Wx", "np.random.randn(N, H) Wx = np.random.randn(D, H) Wh = np.random.randn(H, H) b = np.random.randn(H)", "test_reduction(): def test_sum(): x_np = np.array([[1, 2], [3, 4], [5, 6]]) x_grad =", "D, H = 4, 5, 6 x = np.random.randn(N, D) h = np.random.randn(N,", "x_grad3 = np.array([[0, 1], [1, 0], [0, 0]]) def red1(x): return mp.max(x) def", "[0, 0], [1, 1]]) def red1(x): return mp.min(x) def red2(x): return mp.min(x, axis=1)", "wrt x dx = dtanh.dot(Wx.T) # Gradients of loss wrt prev_h dprev_h =", "wrt Wh dWh = prev_h.T.dot(dtanh) # Gradients of loss wrt b. Note we", "grad_loss_function(x, h, Wx, Wh, b, dnext_h)[0] end = time.time() print(\"MinPy total time elapsed:\",", "Wh return next_h, cache def rnn_step_backward(dnext_h, cache): dx, dprev_h, dWx, dWh, db =", "\"\"\" returns relative error \"\"\" return np.max(np.abs(x - y) / (np.maximum(1e-8, np.abs(x) +", "axis=0, keepdims=True) grad1 = grad(red1) assert np.all(grad1(x_np).asnumpy() == x_grad) grad2 = grad(red2) assert", "x = np.random.randn(N, D) h = np.random.randn(N, H) Wx = np.random.randn(D, H) Wh", "0]]) x_grad1 = np.array([[0, 1], [1, 0], [0, 0]]) x_grad2 = np.array([[0, 1],", "start) print() print(\"Result Check:\") print('dx error: ', rel_error(dx, grad_arrays[0])) print('dprev_h error: ', rel_error(dprev_h,", "of loss wrt tanh dtanh = dnext_h * (1 - next_h * next_h)", "bar1 = grad(foo1) assert bar1(0) == 0.0 def test_reduction(): def test_sum(): x_np =", "dtanh = dnext_h * (1 - next_h * next_h) # (N, H) #", "4], [5, 6]]) x_grad = np.array([[1, 1], [1, 1], [1, 1]]) def red1(x):", "def red3(x): return mp.max(x, axis=1, keepdims=True) def red4(x): return mp.max(x, axis=0) def red5(x):", "red2(x): return mp.min(x, axis=1) def red3(x): return mp.min(x, axis=1, keepdims=True) def red4(x): return", "cache = rnn_step_forward(x, h, Wx, Wh, b) dnext_h = np.random.randn(*out.shape) # test MinPy", "cache): dx, dprev_h, dWx, dWh, db = None, None, None, None, None #", "print('dWx error: ', rel_error(dWx, grad_arrays[2])) print('dWh error: ', rel_error(dWh, grad_arrays[3])) print('db error: ',", "1], [1, 0], [1, 1]]) x_grad3 = np.array([[0, 1], [1, 0], [0, 0]])", "assert np.all(grad5(x_np).asnumpy() == x_grad3) def test_min(): x_np = np.array([[1, 2], [2, 1], [0,", "return 1 bar1 = grad(foo1) assert bar1(0) == 0.0 def test_reduction(): def test_sum():", "to agree with MinPy calculation end = time.time() print(\"NumPy total time elapsed:\", end", "test_sum(): x_np = np.array([[1, 2], [3, 4], [5, 6]]) x_grad = np.array([[1, 1],", "x_np = np.array([[1, 2], [2, 1], [0, 0]]) x_grad1 = np.array([[0, 0], [0,", "y): \"\"\" returns relative error \"\"\" return np.max(np.abs(x - y) / (np.maximum(1e-8, np.abs(x)", "Gradients of loss wrt prev_h dprev_h = dtanh.dot(Wh.T) # Gradients of loss wrt", "b): next_h = np.tanh(prev_h.dot(Wh) + x.dot(Wx) + b) cache = next_h, prev_h, x,", "return mp.sum(x, axis=0, keepdims=True) grad1 = grad(red1) assert np.all(grad1(x_np).asnumpy() == x_grad) grad2 =", "def minpy_rnn_step_forward(x, prev_h, Wx, Wh, b): next_h = mp.tanh(x.dot(Wx) + prev_h.dot(Wh) + b)", "# preparation N, D, H = 4, 5, 6 x = np.random.randn(N, D)", "import numpy as np import minpy.dispatch.policy as policy from minpy.core import convert_args, return_numpy,", "out, cache = rnn_step_forward(x, h, Wx, Wh, b) dx, dprev_h, dWx, dWh, db", "end = time.time() print(\"NumPy total time elapsed:\", end - start) print() print(\"Result Check:\")", "H) # Gradients of loss wrt x dx = dtanh.dot(Wx.T) # Gradients of", "print(\"Result Check:\") print('dx error: ', rel_error(dx, grad_arrays[0])) print('dprev_h error: ', rel_error(dprev_h, grad_arrays[1])) print('dWx", "x_grad3) def test_min(): x_np = np.array([[1, 2], [2, 1], [0, 0]]) x_grad1 =", "minpy_to_numpy as mn, numpy_to_minpy as nm import time # mp.set_policy(policy.OnlyNumPyPolicy()) def test_autograd(): @convert_args", "rel_error(db, grad_arrays[4])) def test_zero_input_grad(): def foo1(x): return 1 bar1 = grad(foo1) assert bar1(0)", "dtanh.dot(Wx.T) # Gradients of loss wrt prev_h dprev_h = dtanh.dot(Wh.T) # Gradients of", "return mp.max(x, axis=1, keepdims=True) def red4(x): return mp.max(x, axis=0) def red5(x): return mp.max(x,", "np.array([[0, 1], [1, 0], [1, 1]]) x_grad3 = np.array([[0, 1], [1, 0], [0,", "cache def rnn_step_backward(dnext_h, cache): dx, dprev_h, dWx, dWh, db = None, None, None,", "test_autograd(): @convert_args def minpy_rnn_step_forward(x, prev_h, Wx, Wh, b): next_h = mp.tanh(x.dot(Wx) + prev_h.dot(Wh)", "(D, H) # Gradients of loss wrt Wh dWh = prev_h.T.dot(dtanh) # Gradients", "np.all(grad1(x_np).asnumpy() == x_grad) grad2 = grad(red2) assert np.all(grad2(x_np).asnumpy() == x_grad) grad3 = grad(red3)", "1]]) x_grad3 = np.array([[0, 0], [0, 0], [1, 1]]) def red1(x): return mp.min(x)", "dnext_h * (1 - next_h * next_h) # (N, H) # Gradients of", "assert np.all(grad2(x_np).asnumpy() == x_grad2) grad3 = grad(red3) assert np.all(grad3(x_np).asnumpy() == x_grad2) grad4 =", "x_grad3 = np.array([[0, 0], [0, 0], [1, 1]]) def red1(x): return mp.min(x) def", "db = None, None, None, None, None # Load values from rnn_step_forward next_h,", "grad_loss_function = return_numpy(grad_and_loss(rnn_step_forward_loss, range(5))) grad_arrays = grad_loss_function(x, h, Wx, Wh, b, dnext_h)[0] end", "wrt Wx dWx = x.T.dot(dtanh) # (D, H) # Gradients of loss wrt", "grad2 = grad(red2) assert np.all(grad2(x_np).asnumpy() == x_grad) grad3 = grad(red3) assert np.all(grad3(x_np).asnumpy() ==", "dWh, db # preparation N, D, H = 4, 5, 6 x =", "', rel_error(dWh, grad_arrays[3])) print('db error: ', rel_error(db, grad_arrays[4])) def test_zero_input_grad(): def foo1(x): return", "b) * nm(dnext_h) grad_loss_function = return_numpy(grad_and_loss(rnn_step_forward_loss, range(5))) grad_arrays = grad_loss_function(x, h, Wx, Wh,", "start = time.time() rnn_step_forward_loss = lambda x, h, Wx, Wh, b, dnext_h: minpy_rnn_step_forward(x,", "x, Wx, Wh return next_h, cache def rnn_step_backward(dnext_h, cache): dx, dprev_h, dWx, dWh,", "np.array([[1, 2], [2, 1], [0, 0]]) x_grad1 = np.array([[0, 1], [1, 0], [0,", "as np import minpy.dispatch.policy as policy from minpy.core import convert_args, return_numpy, grad_and_loss, grad,", "mp.sum(x, axis=0) def red3(x): return mp.sum(x, axis=0, keepdims=True) grad1 = grad(red1) assert np.all(grad1(x_np).asnumpy()", "wrt prev_h dprev_h = dtanh.dot(Wh.T) # Gradients of loss wrt Wx dWx =", "= dnext_h * (1 - next_h * next_h) # (N, H) # Gradients", "= None, None, None, None, None # Load values from rnn_step_forward next_h, prev_h,", "numpy_to_minpy as nm import time # mp.set_policy(policy.OnlyNumPyPolicy()) def test_autograd(): @convert_args def minpy_rnn_step_forward(x, prev_h,", "dtanh.sum(axis=0) # == np.ones([N, 1]).T.dot(dtanh)[0, :] return dx, dprev_h, dWx, dWh, db #", "Wx, Wh = cache # Gradients of loss wrt tanh dtanh = dnext_h", "axis=1, keepdims=True) def red4(x): return mp.min(x, axis=0) def red5(x): return mp.min(x, axis=0, keepdims=True)", "Gradients of loss wrt b. Note we broadcast b in practice. Thus result", "None # Load values from rnn_step_forward next_h, prev_h, x, Wx, Wh = cache", "dWx = x.T.dot(dtanh) # (D, H) # Gradients of loss wrt Wh dWh", "def test_min(): x_np = np.array([[1, 2], [2, 1], [0, 0]]) x_grad1 = np.array([[0,", "2], [2, 1], [0, 0]]) x_grad1 = np.array([[0, 0], [0, 0], [1, 1]])", "= dtanh.dot(Wh.T) # Gradients of loss wrt Wx dWx = x.T.dot(dtanh) # (D,", "* (1 - next_h * next_h) # (N, H) # Gradients of loss", "Gradients of loss wrt Wh dWh = prev_h.T.dot(dtanh) # Gradients of loss wrt", "0], [0, 0], [1, 1]]) x_grad2 = np.array([[1, 0], [0, 1], [1, 1]])", "minpy_rnn_step_forward(x, h, Wx, Wh, b) * nm(dnext_h) grad_loss_function = return_numpy(grad_and_loss(rnn_step_forward_loss, range(5))) grad_arrays =", "grad5 = grad(red5) assert np.all(grad5(x_np).asnumpy() == x_grad3) test_sum() test_max() test_min() if __name__ ==", "assert np.all(grad3(x_np).asnumpy() == x_grad) def test_max(): x_np = np.array([[1, 2], [2, 1], [0,", "[0, 0]]) x_grad1 = np.array([[0, 0], [0, 0], [1, 1]]) x_grad2 = np.array([[1,", "dx, dprev_h, dWx, dWh, db = None, None, None, None, None # Load", "dWx, dWh, db = None, None, None, None, None # Load values from", "Gradients of loss wrt tanh dtanh = dnext_h * (1 - next_h *", "numpy as np import minpy.dispatch.policy as policy from minpy.core import convert_args, return_numpy, grad_and_loss,", "# Gradients of loss wrt prev_h dprev_h = dtanh.dot(Wh.T) # Gradients of loss", "return mp.min(x, axis=0, keepdims=True) grad1 = grad(red1) assert np.all(grad1(x_np).asnumpy() == x_grad1) grad2 =", "as nm import time # mp.set_policy(policy.OnlyNumPyPolicy()) def test_autograd(): @convert_args def minpy_rnn_step_forward(x, prev_h, Wx,", "b, dnext_h)[0] end = time.time() print(\"MinPy total time elapsed:\", end - start) #", "return mp.min(x, axis=1) def red3(x): return mp.min(x, axis=1, keepdims=True) def red4(x): return mp.min(x,", "Gradients of loss wrt Wx dWx = x.T.dot(dtanh) # (D, H) # Gradients", "wrt tanh dtanh = dnext_h * (1 - next_h * next_h) # (N,", "def foo1(x): return 1 bar1 = grad(foo1) assert bar1(0) == 0.0 def test_reduction():", "red1(x): return mp.max(x) def red2(x): return mp.max(x, axis=1) def red3(x): return mp.max(x, axis=1,", "cache) out *= dnext_h # to agree with MinPy calculation end = time.time()", "np.array([[0, 0], [0, 0], [1, 1]]) def red1(x): return mp.min(x) def red2(x): return", "Wx dWx = x.T.dot(dtanh) # (D, H) # Gradients of loss wrt Wh", "= grad(red3) assert np.all(grad3(x_np).asnumpy() == x_grad2) grad4 = grad(red4) assert np.all(grad4(x_np).asnumpy() == x_grad3)", "def red1(x): return mp.max(x) def red2(x): return mp.max(x, axis=1) def red3(x): return mp.max(x,", "def red4(x): return mp.min(x, axis=0) def red5(x): return mp.min(x, axis=0, keepdims=True) grad1 =", "x_grad1 = np.array([[0, 0], [0, 0], [1, 1]]) x_grad2 = np.array([[1, 0], [0,", "b) cache = next_h, prev_h, x, Wx, Wh return next_h, cache def rnn_step_backward(dnext_h,", "return np.max(np.abs(x - y) / (np.maximum(1e-8, np.abs(x) + np.abs(y)))) def rnn_step_forward(x, prev_h, Wx,", "red1(x): return mp.sum(x) def red2(x): return mp.sum(x, axis=0) def red3(x): return mp.sum(x, axis=0,", "db = dtanh.sum(axis=0) # == np.ones([N, 1]).T.dot(dtanh)[0, :] return dx, dprev_h, dWx, dWh,", "elapsed:\", end - start) # test NumPy start = time.time() out, cache =", "1], [1, 0], [0, 0]]) x_grad2 = np.array([[0, 1], [1, 0], [1, 1]])", "np.all(grad4(x_np).asnumpy() == x_grad3) grad5 = grad(red5) assert np.all(grad5(x_np).asnumpy() == x_grad3) def test_min(): x_np", "Wh = np.random.randn(H, H) b = np.random.randn(H) out, cache = rnn_step_forward(x, h, Wx,", "= np.array([[1, 1], [1, 1], [1, 1]]) def red1(x): return mp.sum(x) def red2(x):", "1]]) x_grad2 = np.array([[1, 0], [0, 1], [1, 1]]) x_grad3 = np.array([[0, 0],", "rnn_step_forward(x, h, Wx, Wh, b) dnext_h = np.random.randn(*out.shape) # test MinPy start =", "dWx, dWh, db # preparation N, D, H = 4, 5, 6 x", "mp.sum(x) def red2(x): return mp.sum(x, axis=0) def red3(x): return mp.sum(x, axis=0, keepdims=True) grad1", "def test_reduction(): def test_sum(): x_np = np.array([[1, 2], [3, 4], [5, 6]]) x_grad", "dnext_h # to agree with MinPy calculation end = time.time() print(\"NumPy total time", "Gradients of loss wrt x dx = dtanh.dot(Wx.T) # Gradients of loss wrt", "time elapsed:\", end - start) print() print(\"Result Check:\") print('dx error: ', rel_error(dx, grad_arrays[0]))", "H) Wh = np.random.randn(H, H) b = np.random.randn(H) out, cache = rnn_step_forward(x, h,", "1], [0, 0]]) x_grad1 = np.array([[0, 1], [1, 0], [0, 0]]) x_grad2 =", "loss wrt x dx = dtanh.dot(Wx.T) # Gradients of loss wrt prev_h dprev_h", "test MinPy start = time.time() rnn_step_forward_loss = lambda x, h, Wx, Wh, b,", "/ (np.maximum(1e-8, np.abs(x) + np.abs(y)))) def rnn_step_forward(x, prev_h, Wx, Wh, b): next_h =", "np import minpy.dispatch.policy as policy from minpy.core import convert_args, return_numpy, grad_and_loss, grad, minpy_to_numpy", "Wx, Wh, b): next_h = np.tanh(prev_h.dot(Wh) + x.dot(Wx) + b) cache = next_h,", "== x_grad1) grad2 = grad(red2) assert np.all(grad2(x_np).asnumpy() == x_grad2) grad3 = grad(red3) assert", "rel_error(x, y): \"\"\" returns relative error \"\"\" return np.max(np.abs(x - y) / (np.maximum(1e-8,", "# test NumPy start = time.time() out, cache = rnn_step_forward(x, h, Wx, Wh,", "return mp.max(x) def red2(x): return mp.max(x, axis=1) def red3(x): return mp.max(x, axis=1, keepdims=True)", "red3(x): return mp.sum(x, axis=0, keepdims=True) grad1 = grad(red1) assert np.all(grad1(x_np).asnumpy() == x_grad) grad2", "mp.sum(x, axis=0, keepdims=True) grad1 = grad(red1) assert np.all(grad1(x_np).asnumpy() == x_grad) grad2 = grad(red2)", "columns db = dtanh.sum(axis=0) # == np.ones([N, 1]).T.dot(dtanh)[0, :] return dx, dprev_h, dWx,", "elapsed:\", end - start) print() print(\"Result Check:\") print('dx error: ', rel_error(dx, grad_arrays[0])) print('dprev_h", "Check:\") print('dx error: ', rel_error(dx, grad_arrays[0])) print('dprev_h error: ', rel_error(dprev_h, grad_arrays[1])) print('dWx error:", "= dtanh.dot(Wx.T) # Gradients of loss wrt prev_h dprev_h = dtanh.dot(Wh.T) # Gradients", "* next_h) # (N, H) # Gradients of loss wrt x dx =", "0.0 def test_reduction(): def test_sum(): x_np = np.array([[1, 2], [3, 4], [5, 6]])", "- start) # test NumPy start = time.time() out, cache = rnn_step_forward(x, h,", "broadcast b in practice. Thus result of # matrix ops are just sum", "def red5(x): return mp.max(x, axis=0, keepdims=True) grad1 = grad(red1) assert np.all(grad1(x_np).asnumpy() == x_grad1)", "time.time() print(\"MinPy total time elapsed:\", end - start) # test NumPy start =", "red2(x): return mp.sum(x, axis=0) def red3(x): return mp.sum(x, axis=0, keepdims=True) grad1 = grad(red1)", "Wx, Wh, b, dnext_h: minpy_rnn_step_forward(x, h, Wx, Wh, b) * nm(dnext_h) grad_loss_function =", "np.all(grad5(x_np).asnumpy() == x_grad3) def test_min(): x_np = np.array([[1, 2], [2, 1], [0, 0]])", "Wh dWh = prev_h.T.dot(dtanh) # Gradients of loss wrt b. Note we broadcast", "= np.array([[1, 2], [3, 4], [5, 6]]) x_grad = np.array([[1, 1], [1, 1],", "dprev_h = dtanh.dot(Wh.T) # Gradients of loss wrt Wx dWx = x.T.dot(dtanh) #", "grad_arrays[0])) print('dprev_h error: ', rel_error(dprev_h, grad_arrays[1])) print('dWx error: ', rel_error(dWx, grad_arrays[2])) print('dWh error:", "= np.array([[1, 0], [0, 1], [1, 1]]) x_grad3 = np.array([[0, 0], [0, 0],", "of loss wrt Wh dWh = prev_h.T.dot(dtanh) # Gradients of loss wrt b.", "def red2(x): return mp.min(x, axis=1) def red3(x): return mp.min(x, axis=1, keepdims=True) def red4(x):", "+ b) cache = next_h, prev_h, x, Wx, Wh return next_h, cache def", "= grad(red2) assert np.all(grad2(x_np).asnumpy() == x_grad) grad3 = grad(red3) assert np.all(grad3(x_np).asnumpy() == x_grad)", "# Gradients of loss wrt x dx = dtanh.dot(Wx.T) # Gradients of loss", "grad(red5) assert np.all(grad5(x_np).asnumpy() == x_grad3) test_sum() test_max() test_min() if __name__ == \"__main__\": test_autograd()", "next_h) # (N, H) # Gradients of loss wrt x dx = dtanh.dot(Wx.T)", "from __future__ import print_function import minpy.numpy as mp import numpy as np import", "return mp.max(x, axis=0) def red5(x): return mp.max(x, axis=0, keepdims=True) grad1 = grad(red1) assert", "Wx, Wh, b) dx, dprev_h, dWx, dWh, db = rnn_step_backward(dnext_h, cache) out *=", "of loss wrt Wx dWx = x.T.dot(dtanh) # (D, H) # Gradients of", "x_np = np.array([[1, 2], [2, 1], [0, 0]]) x_grad1 = np.array([[0, 1], [1,", "1], [0, 0]]) x_grad1 = np.array([[0, 0], [0, 0], [1, 1]]) x_grad2 =", "Wx, Wh, b, dnext_h)[0] end = time.time() print(\"MinPy total time elapsed:\", end -", "def red3(x): return mp.min(x, axis=1, keepdims=True) def red4(x): return mp.min(x, axis=0) def red5(x):", "2], [2, 1], [0, 0]]) x_grad1 = np.array([[0, 1], [1, 0], [0, 0]])", "from minpy.core import convert_args, return_numpy, grad_and_loss, grad, minpy_to_numpy as mn, numpy_to_minpy as nm", "# == np.ones([N, 1]).T.dot(dtanh)[0, :] return dx, dprev_h, dWx, dWh, db # preparation", "b) dx, dprev_h, dWx, dWh, db = rnn_step_backward(dnext_h, cache) out *= dnext_h #", "6 x = np.random.randn(N, D) h = np.random.randn(N, H) Wx = np.random.randn(D, H)", "- start) print() print(\"Result Check:\") print('dx error: ', rel_error(dx, grad_arrays[0])) print('dprev_h error: ',", "[1, 1], [1, 1]]) def red1(x): return mp.sum(x) def red2(x): return mp.sum(x, axis=0)", "just sum over columns db = dtanh.sum(axis=0) # == np.ones([N, 1]).T.dot(dtanh)[0, :] return", "== x_grad) def test_max(): x_np = np.array([[1, 2], [2, 1], [0, 0]]) x_grad1", "== x_grad3) grad5 = grad(red5) assert np.all(grad5(x_np).asnumpy() == x_grad3) test_sum() test_max() test_min() if", "def red1(x): return mp.sum(x) def red2(x): return mp.sum(x, axis=0) def red3(x): return mp.sum(x,", "as policy from minpy.core import convert_args, return_numpy, grad_and_loss, grad, minpy_to_numpy as mn, numpy_to_minpy", "keepdims=True) grad1 = grad(red1) assert np.all(grad1(x_np).asnumpy() == x_grad1) grad2 = grad(red2) assert np.all(grad2(x_np).asnumpy()", "dprev_h, dWx, dWh, db = None, None, None, None, None # Load values", "returns relative error \"\"\" return np.max(np.abs(x - y) / (np.maximum(1e-8, np.abs(x) + np.abs(y))))", "assert np.all(grad1(x_np).asnumpy() == x_grad) grad2 = grad(red2) assert np.all(grad2(x_np).asnumpy() == x_grad) grad3 =", "h, Wx, Wh, b, dnext_h)[0] end = time.time() print(\"MinPy total time elapsed:\", end", "return mp.min(x) def red2(x): return mp.min(x, axis=1) def red3(x): return mp.min(x, axis=1, keepdims=True)", "next_h * next_h) # (N, H) # Gradients of loss wrt x dx", "error: ', rel_error(dprev_h, grad_arrays[1])) print('dWx error: ', rel_error(dWx, grad_arrays[2])) print('dWh error: ', rel_error(dWh,", "np.random.randn(N, D) h = np.random.randn(N, H) Wx = np.random.randn(D, H) Wh = np.random.randn(H,", "import convert_args, return_numpy, grad_and_loss, grad, minpy_to_numpy as mn, numpy_to_minpy as nm import time", "1], [1, 1]]) x_grad3 = np.array([[0, 0], [0, 0], [1, 1]]) def red1(x):", "D) h = np.random.randn(N, H) Wx = np.random.randn(D, H) Wh = np.random.randn(H, H)", "def test_sum(): x_np = np.array([[1, 2], [3, 4], [5, 6]]) x_grad = np.array([[1,", "np.random.randn(*out.shape) # test MinPy start = time.time() rnn_step_forward_loss = lambda x, h, Wx," ]
[ "__author__ = 'pzqa' l1 = list(map(lambda i: Point(i, i*i), range(-5, 6))) l2 =", "= list(map(lambda i: Point(i, i*i), range(-5, 6))) l2 = list(filter(lambda el: el.x %", "point import Point __author__ = 'pzqa' l1 = list(map(lambda i: Point(i, i*i), range(-5,", "Point(i, i*i), range(-5, 6))) l2 = list(filter(lambda el: el.x % 2 == 0,", "'pzqa' l1 = list(map(lambda i: Point(i, i*i), range(-5, 6))) l2 = list(filter(lambda el:", "range(-5, 6))) l2 = list(filter(lambda el: el.x % 2 == 0, l1)) print(l1)", "i*i), range(-5, 6))) l2 = list(filter(lambda el: el.x % 2 == 0, l1))", "from point import Point __author__ = 'pzqa' l1 = list(map(lambda i: Point(i, i*i),", "Point __author__ = 'pzqa' l1 = list(map(lambda i: Point(i, i*i), range(-5, 6))) l2", "= 'pzqa' l1 = list(map(lambda i: Point(i, i*i), range(-5, 6))) l2 = list(filter(lambda", "6))) l2 = list(filter(lambda el: el.x % 2 == 0, l1)) print(l1) print(l2)", "import Point __author__ = 'pzqa' l1 = list(map(lambda i: Point(i, i*i), range(-5, 6)))", "l1 = list(map(lambda i: Point(i, i*i), range(-5, 6))) l2 = list(filter(lambda el: el.x", "i: Point(i, i*i), range(-5, 6))) l2 = list(filter(lambda el: el.x % 2 ==", "list(map(lambda i: Point(i, i*i), range(-5, 6))) l2 = list(filter(lambda el: el.x % 2" ]
[ "type(j) == list: flatten(j) else: l_r.append(j) return l_r x= [[1,\"a\",[\"cat\"],2],[[[3]],\"dog\"],4,5] print(flatten(x)) #SORU 2_________________________________________________________", "l_r=[] def flatten(l): for j in l: if type(j) == list: flatten(j) else:", "if type(j) == list: j.reverse() l.reverse() return l x=[[1, 2],[3, 4],[5, 6, 7]]", "type(j) == list: j.reverse() l.reverse() return l x=[[1, 2],[3, 4],[5, 6, 7]] print(rev(x))", "for j in l: if type(j) == list: flatten(j) else: l_r.append(j) return l_r", "flatten(j) else: l_r.append(j) return l_r x= [[1,\"a\",[\"cat\"],2],[[[3]],\"dog\"],4,5] print(flatten(x)) #SORU 2_________________________________________________________ def rev(l): for", "2_________________________________________________________ def rev(l): for j in l: if type(j) == list: j.reverse() l.reverse()", "j in l: if type(j) == list: flatten(j) else: l_r.append(j) return l_r x=", "j in l: if type(j) == list: j.reverse() l.reverse() return l x=[[1, 2],[3,", "l_r.append(j) return l_r x= [[1,\"a\",[\"cat\"],2],[[[3]],\"dog\"],4,5] print(flatten(x)) #SORU 2_________________________________________________________ def rev(l): for j in", "for j in l: if type(j) == list: j.reverse() l.reverse() return l x=[[1,", "else: l_r.append(j) return l_r x= [[1,\"a\",[\"cat\"],2],[[[3]],\"dog\"],4,5] print(flatten(x)) #SORU 2_________________________________________________________ def rev(l): for j", "#SORU 1_______________________________________________________ l_r=[] def flatten(l): for j in l: if type(j) == list:", "list: flatten(j) else: l_r.append(j) return l_r x= [[1,\"a\",[\"cat\"],2],[[[3]],\"dog\"],4,5] print(flatten(x)) #SORU 2_________________________________________________________ def rev(l):", "[[1,\"a\",[\"cat\"],2],[[[3]],\"dog\"],4,5] print(flatten(x)) #SORU 2_________________________________________________________ def rev(l): for j in l: if type(j) ==", "#SORU 2_________________________________________________________ def rev(l): for j in l: if type(j) == list: j.reverse()", "l: if type(j) == list: flatten(j) else: l_r.append(j) return l_r x= [[1,\"a\",[\"cat\"],2],[[[3]],\"dog\"],4,5] print(flatten(x))", "x= [[1,\"a\",[\"cat\"],2],[[[3]],\"dog\"],4,5] print(flatten(x)) #SORU 2_________________________________________________________ def rev(l): for j in l: if type(j)", "in l: if type(j) == list: j.reverse() l.reverse() return l x=[[1, 2],[3, 4],[5,", "l: if type(j) == list: j.reverse() l.reverse() return l x=[[1, 2],[3, 4],[5, 6,", "def flatten(l): for j in l: if type(j) == list: flatten(j) else: l_r.append(j)", "rev(l): for j in l: if type(j) == list: j.reverse() l.reverse() return l", "l_r x= [[1,\"a\",[\"cat\"],2],[[[3]],\"dog\"],4,5] print(flatten(x)) #SORU 2_________________________________________________________ def rev(l): for j in l: if", "return l_r x= [[1,\"a\",[\"cat\"],2],[[[3]],\"dog\"],4,5] print(flatten(x)) #SORU 2_________________________________________________________ def rev(l): for j in l:", "flatten(l): for j in l: if type(j) == list: flatten(j) else: l_r.append(j) return", "in l: if type(j) == list: flatten(j) else: l_r.append(j) return l_r x= [[1,\"a\",[\"cat\"],2],[[[3]],\"dog\"],4,5]", "== list: flatten(j) else: l_r.append(j) return l_r x= [[1,\"a\",[\"cat\"],2],[[[3]],\"dog\"],4,5] print(flatten(x)) #SORU 2_________________________________________________________ def", "def rev(l): for j in l: if type(j) == list: j.reverse() l.reverse() return", "1_______________________________________________________ l_r=[] def flatten(l): for j in l: if type(j) == list: flatten(j)", "print(flatten(x)) #SORU 2_________________________________________________________ def rev(l): for j in l: if type(j) == list:", "if type(j) == list: flatten(j) else: l_r.append(j) return l_r x= [[1,\"a\",[\"cat\"],2],[[[3]],\"dog\"],4,5] print(flatten(x)) #SORU" ]
[ "django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations =", "('invitations_sent_rejected', models.IntegerField(default=0)), ('invitations_recieved', models.IntegerField(default=0)), ('invitations_recieved_accepted', models.IntegerField(default=0)), ('invitations_recieved_rejected', models.IntegerField(default=0)), ('ad_post_count', models.IntegerField(default=0)), ('user_image', models.ImageField(default='user_profile_default.jpg', upload_to='profile_pics_stds')),", "serialize=False, verbose_name='ID')), ('textArea', models.CharField(default='', max_length=300, null=True)), ('username', models.CharField(default='username', max_length=100)), ('name', models.CharField(max_length=200)), ('email', models.EmailField(max_length=254,", "= True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Academy', fields=[", "Generated by Django 3.0.4 on 2020-08-31 17:24 from django.conf import settings from django.db", "operations = [ migrations.CreateModel( name='Academy', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('textArea', models.CharField(default='',", "models.CharField(max_length=11)), ('profile_complete', models.BooleanField(default=False)), ('invitations_sent', models.IntegerField(default=0)), ('invitations_sent_accepted', models.IntegerField(default=0)), ('invitations_sent_rejected', models.IntegerField(default=0)), ('invitations_recieved', models.IntegerField(default=0)), ('invitations_recieved_accepted', models.IntegerField(default=0)),", "from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration):", "models.CharField(max_length=100)), ('total_ads', models.IntegerField(default=0)), ('ads_deleted', models.IntegerField(default=0)), ('phone', models.CharField(max_length=11)), ('profile_complete', models.BooleanField(default=False)), ('invitations_sent', models.IntegerField(default=0)), ('invitations_sent_accepted', models.IntegerField(default=0)),", "] operations = [ migrations.CreateModel( name='Academy', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('textArea',", "= [ migrations.CreateModel( name='Academy', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('textArea', models.CharField(default='', max_length=300,", "name='Academy', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('textArea', models.CharField(default='', max_length=300, null=True)), ('username', models.CharField(default='username',", "models.IntegerField(default=0)), ('invitations_recieved_accepted', models.IntegerField(default=0)), ('invitations_recieved_rejected', models.IntegerField(default=0)), ('ad_post_count', models.IntegerField(default=0)), ('user_image', models.ImageField(default='user_profile_default.jpg', upload_to='profile_pics_stds')), ('academy', models.OneToOneField(null=True, on_delete=django.db.models.deletion.CASCADE,", "Django 3.0.4 on 2020-08-31 17:24 from django.conf import settings from django.db import migrations,", "class Migration(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [", "('ads_deleted', models.IntegerField(default=0)), ('phone', models.CharField(max_length=11)), ('profile_complete', models.BooleanField(default=False)), ('invitations_sent', models.IntegerField(default=0)), ('invitations_sent_accepted', models.IntegerField(default=0)), ('invitations_sent_rejected', models.IntegerField(default=0)), ('invitations_recieved',", "('invitations_recieved_rejected', models.IntegerField(default=0)), ('ad_post_count', models.IntegerField(default=0)), ('user_image', models.ImageField(default='user_profile_default.jpg', upload_to='profile_pics_stds')), ('academy', models.OneToOneField(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], ),", "import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [", "on 2020-08-31 17:24 from django.conf import settings from django.db import migrations, models import", "3.0.4 on 2020-08-31 17:24 from django.conf import settings from django.db import migrations, models", "django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial", "17:24 from django.conf import settings from django.db import migrations, models import django.db.models.deletion class", "= [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Academy', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True,", "[ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Academy', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False,", "<reponame>UsamaKashif/studentutor<gh_stars>1-10 # Generated by Django 3.0.4 on 2020-08-31 17:24 from django.conf import settings", "models.CharField(default='username', max_length=100)), ('name', models.CharField(max_length=200)), ('email', models.EmailField(max_length=254, unique=True)), ('city', models.CharField(max_length=100)), ('total_ads', models.IntegerField(default=0)), ('ads_deleted', models.IntegerField(default=0)),", "('invitations_sent', models.IntegerField(default=0)), ('invitations_sent_accepted', models.IntegerField(default=0)), ('invitations_sent_rejected', models.IntegerField(default=0)), ('invitations_recieved', models.IntegerField(default=0)), ('invitations_recieved_accepted', models.IntegerField(default=0)), ('invitations_recieved_rejected', models.IntegerField(default=0)), ('ad_post_count',", "('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('textArea', models.CharField(default='', max_length=300, null=True)), ('username', models.CharField(default='username', max_length=100)), ('name',", "models.IntegerField(default=0)), ('invitations_recieved_rejected', models.IntegerField(default=0)), ('ad_post_count', models.IntegerField(default=0)), ('user_image', models.ImageField(default='user_profile_default.jpg', upload_to='profile_pics_stds')), ('academy', models.OneToOneField(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ],", "initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Academy',", "max_length=300, null=True)), ('username', models.CharField(default='username', max_length=100)), ('name', models.CharField(max_length=200)), ('email', models.EmailField(max_length=254, unique=True)), ('city', models.CharField(max_length=100)), ('total_ads',", "import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations", "unique=True)), ('city', models.CharField(max_length=100)), ('total_ads', models.IntegerField(default=0)), ('ads_deleted', models.IntegerField(default=0)), ('phone', models.CharField(max_length=11)), ('profile_complete', models.BooleanField(default=False)), ('invitations_sent', models.IntegerField(default=0)),", "models.IntegerField(default=0)), ('invitations_recieved', models.IntegerField(default=0)), ('invitations_recieved_accepted', models.IntegerField(default=0)), ('invitations_recieved_rejected', models.IntegerField(default=0)), ('ad_post_count', models.IntegerField(default=0)), ('user_image', models.ImageField(default='user_profile_default.jpg', upload_to='profile_pics_stds')), ('academy',", "models.BooleanField(default=False)), ('invitations_sent', models.IntegerField(default=0)), ('invitations_sent_accepted', models.IntegerField(default=0)), ('invitations_sent_rejected', models.IntegerField(default=0)), ('invitations_recieved', models.IntegerField(default=0)), ('invitations_recieved_accepted', models.IntegerField(default=0)), ('invitations_recieved_rejected', models.IntegerField(default=0)),", "('total_ads', models.IntegerField(default=0)), ('ads_deleted', models.IntegerField(default=0)), ('phone', models.CharField(max_length=11)), ('profile_complete', models.BooleanField(default=False)), ('invitations_sent', models.IntegerField(default=0)), ('invitations_sent_accepted', models.IntegerField(default=0)), ('invitations_sent_rejected',", "('username', models.CharField(default='username', max_length=100)), ('name', models.CharField(max_length=200)), ('email', models.EmailField(max_length=254, unique=True)), ('city', models.CharField(max_length=100)), ('total_ads', models.IntegerField(default=0)), ('ads_deleted',", "('phone', models.CharField(max_length=11)), ('profile_complete', models.BooleanField(default=False)), ('invitations_sent', models.IntegerField(default=0)), ('invitations_sent_accepted', models.IntegerField(default=0)), ('invitations_sent_rejected', models.IntegerField(default=0)), ('invitations_recieved', models.IntegerField(default=0)), ('invitations_recieved_accepted',", "('city', models.CharField(max_length=100)), ('total_ads', models.IntegerField(default=0)), ('ads_deleted', models.IntegerField(default=0)), ('phone', models.CharField(max_length=11)), ('profile_complete', models.BooleanField(default=False)), ('invitations_sent', models.IntegerField(default=0)), ('invitations_sent_accepted',", "django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies =", "('invitations_sent_accepted', models.IntegerField(default=0)), ('invitations_sent_rejected', models.IntegerField(default=0)), ('invitations_recieved', models.IntegerField(default=0)), ('invitations_recieved_accepted', models.IntegerField(default=0)), ('invitations_recieved_rejected', models.IntegerField(default=0)), ('ad_post_count', models.IntegerField(default=0)), ('user_image',", "models.CharField(default='', max_length=300, null=True)), ('username', models.CharField(default='username', max_length=100)), ('name', models.CharField(max_length=200)), ('email', models.EmailField(max_length=254, unique=True)), ('city', models.CharField(max_length=100)),", "[ migrations.CreateModel( name='Academy', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('textArea', models.CharField(default='', max_length=300, null=True)),", "models.IntegerField(default=0)), ('invitations_sent_accepted', models.IntegerField(default=0)), ('invitations_sent_rejected', models.IntegerField(default=0)), ('invitations_recieved', models.IntegerField(default=0)), ('invitations_recieved_accepted', models.IntegerField(default=0)), ('invitations_recieved_rejected', models.IntegerField(default=0)), ('ad_post_count', models.IntegerField(default=0)),", "models.EmailField(max_length=254, unique=True)), ('city', models.CharField(max_length=100)), ('total_ads', models.IntegerField(default=0)), ('ads_deleted', models.IntegerField(default=0)), ('phone', models.CharField(max_length=11)), ('profile_complete', models.BooleanField(default=False)), ('invitations_sent',", "models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ]", "models.IntegerField(default=0)), ('phone', models.CharField(max_length=11)), ('profile_complete', models.BooleanField(default=False)), ('invitations_sent', models.IntegerField(default=0)), ('invitations_sent_accepted', models.IntegerField(default=0)), ('invitations_sent_rejected', models.IntegerField(default=0)), ('invitations_recieved', models.IntegerField(default=0)),", "models.IntegerField(default=0)), ('invitations_sent_rejected', models.IntegerField(default=0)), ('invitations_recieved', models.IntegerField(default=0)), ('invitations_recieved_accepted', models.IntegerField(default=0)), ('invitations_recieved_rejected', models.IntegerField(default=0)), ('ad_post_count', models.IntegerField(default=0)), ('user_image', models.ImageField(default='user_profile_default.jpg',", "('invitations_recieved_accepted', models.IntegerField(default=0)), ('invitations_recieved_rejected', models.IntegerField(default=0)), ('ad_post_count', models.IntegerField(default=0)), ('user_image', models.ImageField(default='user_profile_default.jpg', upload_to='profile_pics_stds')), ('academy', models.OneToOneField(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),", "# Generated by Django 3.0.4 on 2020-08-31 17:24 from django.conf import settings from", "primary_key=True, serialize=False, verbose_name='ID')), ('textArea', models.CharField(default='', max_length=300, null=True)), ('username', models.CharField(default='username', max_length=100)), ('name', models.CharField(max_length=200)), ('email',", "import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial =", "migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Academy', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),", "models.CharField(max_length=200)), ('email', models.EmailField(max_length=254, unique=True)), ('city', models.CharField(max_length=100)), ('total_ads', models.IntegerField(default=0)), ('ads_deleted', models.IntegerField(default=0)), ('phone', models.CharField(max_length=11)), ('profile_complete',", "Migration(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel(", "verbose_name='ID')), ('textArea', models.CharField(default='', max_length=300, null=True)), ('username', models.CharField(default='username', max_length=100)), ('name', models.CharField(max_length=200)), ('email', models.EmailField(max_length=254, unique=True)),", "models.IntegerField(default=0)), ('ad_post_count', models.IntegerField(default=0)), ('user_image', models.ImageField(default='user_profile_default.jpg', upload_to='profile_pics_stds')), ('academy', models.OneToOneField(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], ), ]", "dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Academy', fields=[ ('id', models.AutoField(auto_created=True,", "from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies", "True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Academy', fields=[ ('id',", "settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True", "migrations.CreateModel( name='Academy', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('textArea', models.CharField(default='', max_length=300, null=True)), ('username',", "max_length=100)), ('name', models.CharField(max_length=200)), ('email', models.EmailField(max_length=254, unique=True)), ('city', models.CharField(max_length=100)), ('total_ads', models.IntegerField(default=0)), ('ads_deleted', models.IntegerField(default=0)), ('phone',", "migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL),", "null=True)), ('username', models.CharField(default='username', max_length=100)), ('name', models.CharField(max_length=200)), ('email', models.EmailField(max_length=254, unique=True)), ('city', models.CharField(max_length=100)), ('total_ads', models.IntegerField(default=0)),", "2020-08-31 17:24 from django.conf import settings from django.db import migrations, models import django.db.models.deletion", "models.IntegerField(default=0)), ('ads_deleted', models.IntegerField(default=0)), ('phone', models.CharField(max_length=11)), ('profile_complete', models.BooleanField(default=False)), ('invitations_sent', models.IntegerField(default=0)), ('invitations_sent_accepted', models.IntegerField(default=0)), ('invitations_sent_rejected', models.IntegerField(default=0)),", "by Django 3.0.4 on 2020-08-31 17:24 from django.conf import settings from django.db import", "('name', models.CharField(max_length=200)), ('email', models.EmailField(max_length=254, unique=True)), ('city', models.CharField(max_length=100)), ('total_ads', models.IntegerField(default=0)), ('ads_deleted', models.IntegerField(default=0)), ('phone', models.CharField(max_length=11)),", "('textArea', models.CharField(default='', max_length=300, null=True)), ('username', models.CharField(default='username', max_length=100)), ('name', models.CharField(max_length=200)), ('email', models.EmailField(max_length=254, unique=True)), ('city',", "('email', models.EmailField(max_length=254, unique=True)), ('city', models.CharField(max_length=100)), ('total_ads', models.IntegerField(default=0)), ('ads_deleted', models.IntegerField(default=0)), ('phone', models.CharField(max_length=11)), ('profile_complete', models.BooleanField(default=False)),", "('profile_complete', models.BooleanField(default=False)), ('invitations_sent', models.IntegerField(default=0)), ('invitations_sent_accepted', models.IntegerField(default=0)), ('invitations_sent_rejected', models.IntegerField(default=0)), ('invitations_recieved', models.IntegerField(default=0)), ('invitations_recieved_accepted', models.IntegerField(default=0)), ('invitations_recieved_rejected',", "('invitations_recieved', models.IntegerField(default=0)), ('invitations_recieved_accepted', models.IntegerField(default=0)), ('invitations_recieved_rejected', models.IntegerField(default=0)), ('ad_post_count', models.IntegerField(default=0)), ('user_image', models.ImageField(default='user_profile_default.jpg', upload_to='profile_pics_stds')), ('academy', models.OneToOneField(null=True,", "fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('textArea', models.CharField(default='', max_length=300, null=True)), ('username', models.CharField(default='username', max_length=100)),", "models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('textArea', models.CharField(default='', max_length=300, null=True)), ('username', models.CharField(default='username', max_length=100)), ('name', models.CharField(max_length=200))," ]
[ "and ratings) - adding tracks to a group with the same tracks already", "delete: deleting a group doesn't reset the group filter + sort: sorting grouptracks", "tracks: pressing all tracks again will return to previous group - delete: fixed", "tracks already present didn't work (it was impossible to make duplicities except through", "ms and convert them to and from string as needed (done?) - implement", "now focuses the filter editline - in groups menu: groups are now listed", "it with groups and therefore it is possible to query and intersection of", "numpy (diff) Features - filter tracks case insensitive, advanced filetring: !a:artist !n:name !r:rating", "is now available + filter: new advanced filter option !g: allow query groups", "statistics about the tracks (counts and ratings) - adding tracks to a group", "the same group leads to duplicate grouptracks that share the same underlying track;", "it + filter: a history of last 20 filter rules is now available", "sorting - resorting sorted (remember previous sort and apply it after the new", "this will read the config VERSION = 0.1.4 def main(): app = QtWidgets.QApplication(sys.argv)", "it + relink: change the grouptrack to link to some other similar track", "import window shouldnt continue after enter is pressed (a problem with the groupname)", "- filter tracks case insensitive, advanced filetring: !a:artist !n:name !r:rating - groups, group", "or marked as new - column sizes for importwindow - import cue (gruop)", "- import window - disable enter and esc to close the window -", "model.config # this will read the config VERSION = 0.1.4 def main(): app", "(grouptrack) and replace the current link in the group with it + relink:", "import os from PyQt5 import QtWidgets from ui import mainWindow import model.config #", "be nice to have exact data type before moving on, datetime.time looks promising", "editline - in groups menu: groups are now listed alphabetically - id3v1: id3v1", "import: select best ('B' key) + import: quick resolve (select best track for", "enter is pressed (a problem with the groupname) - ok button must be", "now displays best match indicators + track rating in tooltip is now displayed", "and tracks dont disappear v0.1.2: - import: fixed importing mp3 without track number", "#!/usr/bin/env python ''' Created on Mar 8, 2012 @author: <NAME>, <EMAIL> Requirements -", "tooltip is now displayed as a set of stars, plus and dashes +", "properly tanks to wordWrap=False - both tables: removed grid + filter: now doesn't", "change groupname of import or provide an option to do so - striping", "now available + filter: new advanced filter option !g: allow query groups (by", "sort and apply it after the new sort) - context menu on tracks", "add group (manually), rename group, remove group - merge tracks (and retain links", "group leads to duplicate grouptracks that share the same underlying track; in short,", "(a problem with the groupname) - ok button must be disabled if there", "v0.1.3: + all tracks: pressing all tracks again will return to previous group", "merge tracks (and retain links to groups) - NOTE: merging tracks in the", "addtogroup: now supported for grouptracks (group mode) too + import: tracks column now", "menus and shortcuts - (any replacement to cfg) v0.1.1: + import: nameCutToSearchKey -", "- star for new (star vs circle?, there must be visible difference with", "and convert them to and from string as needed (done?) - implement for", "line, now it provides some basic statistics about the tracks (counts and ratings)", "tracks again will return to previous group - delete: fixed the dialog message", "names like \"original mix\" - support flac, m4a (basic) - tidy up the", "pages - import window - disable enter and esc to close the window", "import delete row(s) - import search key -> artist & name (and vice", "some other similar track v0.1.4: - migration to PyQt5 FIXME: - import: import", "now possible to directly select multiple cuesheets + import: select best ('B' key)", "file/directory selection crashes the application - import: saying no to name cleanup, cleans", "saying no to name cleanup, cleans up the name anyway TODO: - (artist", "same group leads to duplicate grouptracks that share the same underlying track; in", "other similar track v0.1.4: - migration to PyQt5 FIXME: - import: import file/directory", "add new tracks to group - delete tracks from group (not just grouptracks)", "work by parts, artist and name are evaluated separately - both tables: fixed", "Created on Mar 8, 2012 @author: <NAME>, <EMAIL> Requirements - Python 3 -", "of deletion, filter by it) ''' import sys import os from PyQt5 import", "their contents (shortcut \") + import: found tracks now marks verbatim matches with", "indicators + track rating in tooltip is now displayed as a set of", "cue (gruop) - time units - cuesheet has frames, everywhere else are seconds", "so - striping certain parts from names like \"original mix\" - support flac,", "(gruop) - time units - cuesheet has frames, everywhere else are seconds which", "sizes for importwindow - import cue (gruop) - time units - cuesheet has", "merging tracks in the same group leads to duplicate grouptracks that share the", "for our purposes, store ms and convert them to and from string as", "for rating (it should support tracks in groups too) - star for new", "import: found tracks now marks verbatim matches with a star **, ++ if", "-> artist & name (and vice versa) - import real searchkey - import", "row colors + import: it is now possible to directly select multiple cuesheets", "to cfg) v0.1.1: + import: nameCutToSearchKey - make new search key from track", "''' Created on Mar 8, 2012 @author: <NAME>, <EMAIL> Requirements - Python 3", "3 - PyQt4 - SQLAlchemy - SQLite - numpy (diff) Features - filter", "group (grouptrack) and replace the current link in the group with it +", "import too - import directory (group) - stars for rating (it should support", "colors + import: it is now possible to directly select multiple cuesheets +", "button must be disabled if there are any tracks that arent selected or", "new - import window shouldnt continue after enter is pressed (a problem with", "moving on, datetime.time looks promising (immutable though) - datetime.time sucks for our purposes,", "search key from track name, but remove any parenthesis and their contents (shortcut", "fixed importing mp3 without track number + addtogroup: now supported for grouptracks (group", "a set of stars, plus and dashes + track and import tables now", "id3v1: id3v1 reader fixed + merge: now allowed in groupmode (maybe it's confusing)", "status line, now it provides some basic statistics about the tracks (counts and", "- import real searchkey - import mm - import should have some status", "(counts and ratings) - adding tracks to a group with the same tracks", "deleting a track (not a group track) - group filter: enabling group filter", "and esc to close the window - automatically change groupname of import or", "- stars for rating (it should support tracks in groups too) - star", "import: it is now possible to directly select multiple cuesheets + import: select", "duplicities except through import) + detachcopy: make a copy of a track in", "mix\" - support flac, m4a (basic) - tidy up the menus and shortcuts", "a copy of a track in a group (grouptrack) and replace the current", "in a group (grouptrack) and replace the current link in the group with", "the same name - add track to group - import group name -", "ikona - configuration file ~/.regaudio : currently only one configuration value: location of", "like \"original mix\" - support flac, m4a (basic) - tidy up the menus", "theres a button and menu entry for it + filter: a history of", "reworked the status line, now it provides some basic statistics about the tracks", "- datetime.time sucks for our purposes, store ms and convert them to and", "used - merge: fixed, now the links between groups and tracks dont disappear", "configuration file ~/.regaudio : currently only one configuration value: location of the db", "resorting sorted (remember previous sort and apply it after the new sort) -", "arent selected or marked as new - column sizes for importwindow - import", "option !g: allow query groups (by names), note that its also possible to", "should have some status containing number of tracks that are not yet selected", "fixed the dialog message when deleting a track (not a group track) -", "are not yet selected or as new - import window shouldnt continue after", "group filter: enabling group filter now focuses the filter editline - in groups", "now supported for grouptracks (group mode) too + import: tracks column now displays", "sk - import delete row(s) - import search key -> artist & name", "support tracks in groups too) - star for new (star vs circle?, there", "new for no match, the rest is left as is) - tables: columns", "any parenthesis and their contents (shortcut \") + import: found tracks now marks", "it instead of deletion, filter by it) ''' import sys import os from", "just grouptracks) - ikona - configuration file ~/.regaudio : currently only one configuration", "- NOTE: merging tracks in the same group leads to duplicate grouptracks that", "(and vice versa) - import real searchkey - import mm - import should", "- (any replacement to cfg) v0.1.1: + import: nameCutToSearchKey - make new search", "mode) too + import: tracks column now displays best match indicators + track", "ratings) - adding tracks to a group with the same tracks already present", "group with it + relink: change the grouptrack to link to some other", "(immutable though) - datetime.time sucks for our purposes, store ms and convert them", "name, but remove any parenthesis and their contents (shortcut \") + import: found", "os from PyQt5 import QtWidgets from ui import mainWindow import model.config # this", "groupname) - ok button must be disabled if there are any tracks that", "the menus and shortcuts - (any replacement to cfg) v0.1.1: + import: nameCutToSearchKey", "~/.regaudio : currently only one configuration value: location of the db - total", "application - import: saying no to name cleanup, cleans up the name anyway", "separately - both tables: fixed context menu position v0.1.3: + all tracks: pressing", "menu on tracks (the same as tracks menu) - detele tracks (or group", "import: import file/directory selection crashes the application - import: saying no to name", "promising (immutable though) - datetime.time sucks for our purposes, store ms and convert", "**, ++, create new for no match, the rest is left as is)", "difference with rating) - add new tracks to group - delete tracks from", "+ import: * + _ indicators now work by parts, artist and name", "nice to have exact data type before moving on, datetime.time looks promising (immutable", "of pages - import window - disable enter and esc to close the", "- id3v1: id3v1 reader fixed + merge: now allowed in groupmode (maybe it's", "import QtWidgets from ui import mainWindow import model.config # this will read the", "fixed, cue files are expected to be in utf-8 but if they are", "is now possible to directly select multiple cuesheets + import: select best ('B'", "it's confusing) + group delete: deleting a group doesn't reset the group filter", "the group with it + relink: change the grouptrack to link to some", "new flag and set it instead of deletion, filter by it) ''' import", "- import: import file/directory selection crashes the application - import: saying no to", "from track name, but remove any parenthesis and their contents (shortcut \") +", "+ all tracks: pressing all tracks again will return to previous group -", "filetring: !a:artist !n:name !r:rating - groups, group selector, favorites (first in the selector)", "with rating) - add new tracks to group - delete tracks from group", "PyQt5 FIXME: - import: import file/directory selection crashes the application - import: saying", "on tracks (the same as tracks menu) - detele tracks (or group tracks)", "+ relink: change the grouptrack to link to some other similar track v0.1.4:", "to query and intersection of groups + import: * + _ indicators now", "import window - disable enter and esc to close the window - automatically", "filter: new advanced filter option !g: allow query groups (by names), note that", "context menu position v0.1.3: + all tracks: pressing all tracks again will return", "didn't work (it was impossible to make duplicities except through import) + detachcopy:", "are not (on UnicodeDecodeError) latin1 is used - merge: fixed, now the links", "- PyQt4 - SQLAlchemy - SQLite - numpy (diff) Features - filter tracks", "rename group, remove group - merge tracks (and retain links to groups) -", "status containing number of tracks that are not yet selected or as new", "name (and vice versa) - import real searchkey - import mm - import", "merge: now allowed in groupmode (maybe it's confusing) + group delete: deleting a", "on, datetime.time looks promising (immutable though) - datetime.time sucks for our purposes, store", "new - column sizes for importwindow - import cue (gruop) - time units", "are seconds which themselves are inaccurate - it would be nice to have", "menu: groups are now listed alphabetically - id3v1: id3v1 reader fixed + merge:", "store ms and convert them to and from string as needed (done?) -", "alphabetically - id3v1: id3v1 reader fixed + merge: now allowed in groupmode (maybe", "any tracks that arent selected or marked as new - column sizes for", "to and from string as needed (done?) - implement for import too -", "artist & name (and vice versa) - import real searchkey - import mm", "import cue (gruop) - time units - cuesheet has frames, everywhere else are", "continue after enter is pressed (a problem with the groupname) - ok button", "multiple cuesheets + import: select best ('B' key) + import: quick resolve (select", "disable enter and esc to close the window - automatically change groupname of", "for no match, the rest is left as is) - tables: columns are", "SQLite - numpy (diff) Features - filter tracks case insensitive, advanced filetring: !a:artist", "(first in the selector) - filter with groups - advanced sorting - resorting", "select existing, as new, sk -> a,n, a,n -> sk - import delete", "tracks (and retain links to groups) - NOTE: merging tracks in the same", "visible difference with rating) - add new tracks to group - delete tracks", "if there are any tracks that arent selected or marked as new -", "now resized properly tanks to wordWrap=False - both tables: removed grid + filter:", "available + filter: new advanced filter option !g: allow query groups (by names),", "now doesn't reset automatically, theres a button and menu entry for it +", "automatically change groupname of import or provide an option to do so -", "now it provides some basic statistics about the tracks (counts and ratings) -", "groups and therefore it is possible to query and intersection of groups +", "in groups menu: groups are now listed alphabetically - id3v1: id3v1 reader fixed", "TODO: - (artist translator to help fix some issues with names) - (import", "in the same group leads to duplicate grouptracks that share the same underlying", "menu position v0.1.3: + all tracks: pressing all tracks again will return to", "(any replacement to cfg) v0.1.1: + import: nameCutToSearchKey - make new search key", "to help fix some issues with names) - (import should do some things", "+ filter: a history of last 20 filter rules is now available +", "left as is) - tables: columns are now resized properly tanks to wordWrap=False", "- automatically change groupname of import or provide an option to do so", "NOTE: merging tracks in the same group leads to duplicate grouptracks that share", "tracks that are not yet selected or as new - import window shouldnt", "the same tracks already present didn't work (it was impossible to make duplicities", "screw up the ordering) + status: reworked the status line, now it provides", "rules is now available + filter: new advanced filter option !g: allow query", "directly select multiple cuesheets + import: select best ('B' key) + import: quick", "reset automatically, theres a button and menu entry for it + filter: a", "are now listed alphabetically - id3v1: id3v1 reader fixed + merge: now allowed", "**, ++ if search strings match - import: fixed, cue files are expected", "some status containing number of tracks that are not yet selected or as", "between groups and all tracks doesn't screw up the ordering) + status: reworked", "height and alternating row colors + import: it is now possible to directly", "importing mp3 without track number + addtogroup: now supported for grouptracks (group mode)", "dialog message when deleting a track (not a group track) - group filter:", "best track for **, ++, create new for no match, the rest is", "replacement to cfg) v0.1.1: + import: nameCutToSearchKey - make new search key from", "+ track and import tables now both have smaller row height and alternating", "with the same tracks already present didn't work (it was impossible to make", "currently only one configuration value: location of the db - total number of", "columns are now resized properly tanks to wordWrap=False - both tables: removed grid", "cleanup track names) - safe delete (introduce a new flag and set it", "without track number + addtogroup: now supported for grouptracks (group mode) too +", "tables: removed grid + filter: now doesn't reset automatically, theres a button and", "group filter now focuses the filter editline - in groups menu: groups are", "tracks doesn't screw up the ordering) + status: reworked the status line, now", "track rating in tooltip is now displayed as a set of stars, plus", "from names like \"original mix\" - support flac, m4a (basic) - tidy up", "it provides some basic statistics about the tracks (counts and ratings) - adding", "+ sort: sorting grouptracks by number is remembered between all tracks/groups switches (so", "import: nameCutToSearchKey - make new search key from track name, but remove any", "to do so - striping certain parts from names like \"original mix\" -", "multiple tracks with the same name - add track to group - import", "would be nice to have exact data type before moving on, datetime.time looks", "similar track v0.1.4: - migration to PyQt5 FIXME: - import: import file/directory selection", "context menu on tracks (the same as tracks menu) - detele tracks (or", "+ filter: new advanced filter option !g: allow query groups (by names), note", "Requirements - Python 3 - PyQt4 - SQLAlchemy - SQLite - numpy (diff)", "- in groups menu: groups are now listed alphabetically - id3v1: id3v1 reader", "cleanup, cleans up the name anyway TODO: - (artist translator to help fix", "links between groups and tracks dont disappear v0.1.2: - import: fixed importing mp3", "+ addtogroup: now supported for grouptracks (group mode) too + import: tracks column", "now allowed in groupmode (maybe it's confusing) + group delete: deleting a group", "link in the group with it + relink: change the grouptrack to link", "nameCutToSearchKey - make new search key from track name, but remove any parenthesis", "possible to query and intersection of groups + import: * + _ indicators", "to name cleanup, cleans up the name anyway TODO: - (artist translator to", "in the group with it + relink: change the grouptrack to link to", "tracks from group (not just grouptracks) - ikona - configuration file ~/.regaudio :", "leads to duplicate grouptracks that share the same underlying track; in short, multiple", "circle?, there must be visible difference with rating) - add new tracks to", "delete tracks from group (not just grouptracks) - ikona - configuration file ~/.regaudio", "it is possible to query and intersection of groups + import: * +", "the selector) - filter with groups - advanced sorting - resorting sorted (remember", "group - delete tracks from group (not just grouptracks) - ikona - configuration", "match, the rest is left as is) - tables: columns are now resized", "delete, reset, select existing, as new, sk -> a,n, a,n -> sk -", "a group with the same tracks already present didn't work (it was impossible", "automatically, theres a button and menu entry for it + filter: a history", "- import: saying no to name cleanup, cleans up the name anyway TODO:", "but if they are not (on UnicodeDecodeError) latin1 is used - merge: fixed,", "displays best match indicators + track rating in tooltip is now displayed as", "select best ('B' key) + import: quick resolve (select best track for **,", "convert them to and from string as needed (done?) - implement for import", "groups are now listed alphabetically - id3v1: id3v1 reader fixed + merge: now", "for import too - import directory (group) - stars for rating (it should", "+ import: nameCutToSearchKey - make new search key from track name, but remove", "that share the same underlying track; in short, multiple tracks with the same", "provide an option to do so - striping certain parts from names like", "if search strings match - import: fixed, cue files are expected to be", "- import delete row(s) - import search key -> artist & name (and", "(manually), rename group, remove group - merge tracks (and retain links to groups)", "be disabled if there are any tracks that arent selected or marked as", "in groups too) - star for new (star vs circle?, there must be", "import: * + _ indicators now work by parts, artist and name are", "exact data type before moving on, datetime.time looks promising (immutable though) - datetime.time", "automaticaly - cleanup group name, cleanup track names) - safe delete (introduce a", "mm - import should have some status containing number of tracks that are", "- group management - add group (manually), rename group, remove group - merge", "read the config VERSION = 0.1.4 def main(): app = QtWidgets.QApplication(sys.argv) window=mainWindow.MainWindow() window.show()", "- resorting sorted (remember previous sort and apply it after the new sort)", "star for new (star vs circle?, there must be visible difference with rating)", "+ _ indicators now work by parts, artist and name are evaluated separately", "new (star vs circle?, there must be visible difference with rating) - add", "parts, artist and name are evaluated separately - both tables: fixed context menu", "search key -> artist & name (and vice versa) - import real searchkey", "group name, cleanup track names) - safe delete (introduce a new flag and", "sorting grouptracks by number is remembered between all tracks/groups switches (so switching back", "star **, ++ if search strings match - import: fixed, cue files are", "tracks/groups switches (so switching back and forth between groups and all tracks doesn't", "stars, plus and dashes + track and import tables now both have smaller", "remembered between all tracks/groups switches (so switching back and forth between groups and", "new search key from track name, but remove any parenthesis and their contents", "(introduce a new flag and set it instead of deletion, filter by it)", "- import mm - import should have some status containing number of tracks", "a,n, a,n -> sk - import delete row(s) - import search key ->", "resized properly tanks to wordWrap=False - both tables: removed grid + filter: now", "use it with groups and therefore it is possible to query and intersection", "purposes, store ms and convert them to and from string as needed (done?)", "latin1 is used - merge: fixed, now the links between groups and tracks", "the same underlying track; in short, multiple tracks with the same name -", "else are seconds which themselves are inaccurate - it would be nice to", "some issues with names) - (import should do some things automaticaly - cleanup", "- import: fixed importing mp3 without track number + addtogroup: now supported for", "will return to previous group - delete: fixed the dialog message when deleting", "parenthesis and their contents (shortcut \") + import: found tracks now marks verbatim", "no to name cleanup, cleans up the name anyway TODO: - (artist translator", "now listed alphabetically - id3v1: id3v1 reader fixed + merge: now allowed in", "import tables now both have smaller row height and alternating row colors +", "tracks case insensitive, advanced filetring: !a:artist !n:name !r:rating - groups, group selector, favorites", "(remember previous sort and apply it after the new sort) - context menu", "best match indicators + track rating in tooltip is now displayed as a", "id3v1 reader fixed + merge: now allowed in groupmode (maybe it's confusing) +", "disabled if there are any tracks that arent selected or marked as new", "certain parts from names like \"original mix\" - support flac, m4a (basic) -", "config VERSION = 0.1.4 def main(): app = QtWidgets.QApplication(sys.argv) window=mainWindow.MainWindow() window.show() sys.exit(app.exec_()) if", "rating) - add new tracks to group - delete tracks from group (not", "resolve (select best track for **, ++, create new for no match, the", "do some things automaticaly - cleanup group name, cleanup track names) - safe", "confusing) + group delete: deleting a group doesn't reset the group filter +", "be visible difference with rating) - add new tracks to group - delete", "+ status: reworked the status line, now it provides some basic statistics about", "tracks (counts and ratings) - adding tracks to a group with the same", "by number is remembered between all tracks/groups switches (so switching back and forth", "cleans up the name anyway TODO: - (artist translator to help fix some", "Features - filter tracks case insensitive, advanced filetring: !a:artist !n:name !r:rating - groups,", "group, remove group - merge tracks (and retain links to groups) - NOTE:", "set it instead of deletion, filter by it) ''' import sys import os", "the ordering) + status: reworked the status line, now it provides some basic", "the config VERSION = 0.1.4 def main(): app = QtWidgets.QApplication(sys.argv) window=mainWindow.MainWindow() window.show() sys.exit(app.exec_())", "make duplicities except through import) + detachcopy: make a copy of a track", "import search key -> artist & name (and vice versa) - import real", "are now resized properly tanks to wordWrap=False - both tables: removed grid +", "time units - cuesheet has frames, everywhere else are seconds which themselves are", "- implement for import too - import directory (group) - stars for rating", "and their contents (shortcut \") + import: found tracks now marks verbatim matches", "- context menu on tracks (the same as tracks menu) - detele tracks", "shortcuts - (any replacement to cfg) v0.1.1: + import: nameCutToSearchKey - make new", "value: location of the db - total number of pages - import window", "+ track rating in tooltip is now displayed as a set of stars,", "(it was impossible to make duplicities except through import) + detachcopy: make a", "tanks to wordWrap=False - both tables: removed grid + filter: now doesn't reset", "context menu: delete, reset, select existing, as new, sk -> a,n, a,n ->", "sort: sorting grouptracks by number is remembered between all tracks/groups switches (so switching", "of groups + import: * + _ indicators now work by parts, artist", "delete row(s) - import search key -> artist & name (and vice versa)", "have exact data type before moving on, datetime.time looks promising (immutable though) -", "have smaller row height and alternating row colors + import: it is now", "history of last 20 filter rules is now available + filter: new advanced", "are evaluated separately - both tables: fixed context menu position v0.1.3: + all", "- tidy up the menus and shortcuts - (any replacement to cfg) v0.1.1:", "ui import mainWindow import model.config # this will read the config VERSION =", "and menu entry for it + filter: a history of last 20 filter", "name are evaluated separately - both tables: fixed context menu position v0.1.3: +", "same tracks already present didn't work (it was impossible to make duplicities except", "about the tracks (counts and ratings) - adding tracks to a group with", "for importwindow - import cue (gruop) - time units - cuesheet has frames,", "therefore it is possible to query and intersection of groups + import: *", "tracks column now displays best match indicators + track rating in tooltip is", "detachcopy: make a copy of a track in a group (grouptrack) and replace", "and set it instead of deletion, filter by it) ''' import sys import", "same underlying track; in short, multiple tracks with the same name - add", "(and retain links to groups) - NOTE: merging tracks in the same group", "entry for it + filter: a history of last 20 filter rules is", "a group (grouptrack) and replace the current link in the group with it", "- SQLAlchemy - SQLite - numpy (diff) Features - filter tracks case insensitive,", "basic statistics about the tracks (counts and ratings) - adding tracks to a", "(by names), note that its also possible to use it with groups and", "apply it after the new sort) - context menu on tracks (the same", "!g: allow query groups (by names), note that its also possible to use", "track names) - safe delete (introduce a new flag and set it instead", "switches (so switching back and forth between groups and all tracks doesn't screw", "(artist translator to help fix some issues with names) - (import should do", "- cuesheet has frames, everywhere else are seconds which themselves are inaccurate -", "as needed (done?) - implement for import too - import directory (group) -", "(basic) - tidy up the menus and shortcuts - (any replacement to cfg)", "(it should support tracks in groups too) - star for new (star vs", "tracks with the same name - add track to group - import group", "rating (it should support tracks in groups too) - star for new (star", "and import tables now both have smaller row height and alternating row colors", "-> sk - import delete row(s) - import search key -> artist &", "pressing all tracks again will return to previous group - delete: fixed the", "v0.1.4: - migration to PyQt5 FIXME: - import: import file/directory selection crashes the", "the db - total number of pages - import window - disable enter", "not (on UnicodeDecodeError) latin1 is used - merge: fixed, now the links between", "one configuration value: location of the db - total number of pages -", "of a track in a group (grouptrack) and replace the current link in", "from string as needed (done?) - implement for import too - import directory", "there must be visible difference with rating) - add new tracks to group", "in groupmode (maybe it's confusing) + group delete: deleting a group doesn't reset", "versa) - import real searchkey - import mm - import should have some", "a new flag and set it instead of deletion, filter by it) '''", "but remove any parenthesis and their contents (shortcut \") + import: found tracks", "for it + filter: a history of last 20 filter rules is now", "which themselves are inaccurate - it would be nice to have exact data", "to link to some other similar track v0.1.4: - migration to PyQt5 FIXME:", "import context menu: delete, reset, select existing, as new, sk -> a,n, a,n", "now displayed as a set of stars, plus and dashes + track and", "the current link in the group with it + relink: change the grouptrack", "SQLAlchemy - SQLite - numpy (diff) Features - filter tracks case insensitive, advanced", "group (not just grouptracks) - ikona - configuration file ~/.regaudio : currently only", "(import should do some things automaticaly - cleanup group name, cleanup track names)", "after the new sort) - context menu on tracks (the same as tracks", "+ filter: now doesn't reset automatically, theres a button and menu entry for", "dont disappear v0.1.2: - import: fixed importing mp3 without track number + addtogroup:", "utf-8 but if they are not (on UnicodeDecodeError) latin1 is used - merge:", "position v0.1.3: + all tracks: pressing all tracks again will return to previous", "track; in short, multiple tracks with the same name - add track to", "- ikona - configuration file ~/.regaudio : currently only one configuration value: location", "track to group - import group name - import context menu: delete, reset,", "and alternating row colors + import: it is now possible to directly select", "our purposes, store ms and convert them to and from string as needed", "needed (done?) - implement for import too - import directory (group) - stars", "its also possible to use it with groups and therefore it is possible", "change the grouptrack to link to some other similar track v0.1.4: - migration", "tidy up the menus and shortcuts - (any replacement to cfg) v0.1.1: +", "deleting a group doesn't reset the group filter + sort: sorting grouptracks by", "quick resolve (select best track for **, ++, create new for no match,", "(group mode) too + import: tracks column now displays best match indicators +", "!n:name !r:rating - groups, group selector, favorites (first in the selector) - filter", "are inaccurate - it would be nice to have exact data type before", "filter tracks case insensitive, advanced filetring: !a:artist !n:name !r:rating - groups, group selector,", "groups - advanced sorting - resorting sorted (remember previous sort and apply it", "+ detachcopy: make a copy of a track in a group (grouptrack) and", "menu entry for it + filter: a history of last 20 filter rules", "group selector, favorites (first in the selector) - filter with groups - advanced", "Python 3 - PyQt4 - SQLAlchemy - SQLite - numpy (diff) Features -", "import model.config # this will read the config VERSION = 0.1.4 def main():", "tracks to a group with the same tracks already present didn't work (it", "to make duplicities except through import) + detachcopy: make a copy of a", "VERSION = 0.1.4 def main(): app = QtWidgets.QApplication(sys.argv) window=mainWindow.MainWindow() window.show() sys.exit(app.exec_()) if __name__", "grouptracks) - ikona - configuration file ~/.regaudio : currently only one configuration value:", "track (not a group track) - group filter: enabling group filter now focuses", "things automaticaly - cleanup group name, cleanup track names) - safe delete (introduce", "in short, multiple tracks with the same name - add track to group", "crashes the application - import: saying no to name cleanup, cleans up the", "- import search key -> artist & name (and vice versa) - import", "tracks now marks verbatim matches with a star **, ++ if search strings", "- column sizes for importwindow - import cue (gruop) - time units -", "possible to directly select multiple cuesheets + import: select best ('B' key) +", "an option to do so - striping certain parts from names like \"original", "indicators now work by parts, artist and name are evaluated separately - both", "already present didn't work (it was impossible to make duplicities except through import)", "it after the new sort) - context menu on tracks (the same as", "message when deleting a track (not a group track) - group filter: enabling", "and therefore it is possible to query and intersection of groups + import:", "name anyway TODO: - (artist translator to help fix some issues with names)", "note that its also possible to use it with groups and therefore it", "cleanup group name, cleanup track names) - safe delete (introduce a new flag", "to use it with groups and therefore it is possible to query and", "doesn't screw up the ordering) + status: reworked the status line, now it", "datetime.time looks promising (immutable though) - datetime.time sucks for our purposes, store ms", "+ merge: now allowed in groupmode (maybe it's confusing) + group delete: deleting", "row(s) - import search key -> artist & name (and vice versa) -", "directory (group) - stars for rating (it should support tracks in groups too)", "- support flac, m4a (basic) - tidy up the menus and shortcuts -", "reset the group filter + sort: sorting grouptracks by number is remembered between", "selector, favorites (first in the selector) - filter with groups - advanced sorting", "query groups (by names), note that its also possible to use it with", "v0.1.1: + import: nameCutToSearchKey - make new search key from track name, but", "- import: fixed, cue files are expected to be in utf-8 but if", "both tables: removed grid + filter: now doesn't reset automatically, theres a button", "relink: change the grouptrack to link to some other similar track v0.1.4: -", "too) - star for new (star vs circle?, there must be visible difference", "to group - import group name - import context menu: delete, reset, select", "for grouptracks (group mode) too + import: tracks column now displays best match", "+ group delete: deleting a group doesn't reset the group filter + sort:", "remove group - merge tracks (and retain links to groups) - NOTE: merging", "they are not (on UnicodeDecodeError) latin1 is used - merge: fixed, now the", "is now displayed as a set of stars, plus and dashes + track", "number + addtogroup: now supported for grouptracks (group mode) too + import: tracks", "grouptracks that share the same underlying track; in short, multiple tracks with the", "real searchkey - import mm - import should have some status containing number", "short, multiple tracks with the same name - add track to group -", "2012 @author: <NAME>, <EMAIL> Requirements - Python 3 - PyQt4 - SQLAlchemy -", "selector) - filter with groups - advanced sorting - resorting sorted (remember previous", "add track to group - import group name - import context menu: delete,", "some basic statistics about the tracks (counts and ratings) - adding tracks to", "column now displays best match indicators + track rating in tooltip is now", "when deleting a track (not a group track) - group filter: enabling group", "the rest is left as is) - tables: columns are now resized properly", "disappear v0.1.2: - import: fixed importing mp3 without track number + addtogroup: now", "PyQt4 - SQLAlchemy - SQLite - numpy (diff) Features - filter tracks case", "filter + sort: sorting grouptracks by number is remembered between all tracks/groups switches", "matches with a star **, ++ if search strings match - import: fixed,", "all tracks doesn't screw up the ordering) + status: reworked the status line,", "selection crashes the application - import: saying no to name cleanup, cleans up", "window shouldnt continue after enter is pressed (a problem with the groupname) -", "- import directory (group) - stars for rating (it should support tracks in", "both tables: fixed context menu position v0.1.3: + all tracks: pressing all tracks", "present didn't work (it was impossible to make duplicities except through import) +", "listed alphabetically - id3v1: id3v1 reader fixed + merge: now allowed in groupmode", "a track (not a group track) - group filter: enabling group filter now", "(not just grouptracks) - ikona - configuration file ~/.regaudio : currently only one", "import: tracks column now displays best match indicators + track rating in tooltip", "them to and from string as needed (done?) - implement for import too", "must be disabled if there are any tracks that arent selected or marked", "files are expected to be in utf-8 but if they are not (on", "('B' key) + import: quick resolve (select best track for **, ++, create", "tracks) - group management - add group (manually), rename group, remove group -", "new, sk -> a,n, a,n -> sk - import delete row(s) - import", "to previous group - delete: fixed the dialog message when deleting a track", "as new, sk -> a,n, a,n -> sk - import delete row(s) -", "tables now both have smaller row height and alternating row colors + import:", "the filter editline - in groups menu: groups are now listed alphabetically -", "tracks in groups too) - star for new (star vs circle?, there must", "sucks for our purposes, store ms and convert them to and from string", "filter editline - in groups menu: groups are now listed alphabetically - id3v1:", "ok button must be disabled if there are any tracks that arent selected", "group - delete: fixed the dialog message when deleting a track (not a", "column sizes for importwindow - import cue (gruop) - time units - cuesheet", "import mainWindow import model.config # this will read the config VERSION = 0.1.4", "python ''' Created on Mar 8, 2012 @author: <NAME>, <EMAIL> Requirements - Python", "group (manually), rename group, remove group - merge tracks (and retain links to", "key from track name, but remove any parenthesis and their contents (shortcut \")", "in tooltip is now displayed as a set of stars, plus and dashes", "group delete: deleting a group doesn't reset the group filter + sort: sorting", "name cleanup, cleans up the name anyway TODO: - (artist translator to help", "- it would be nice to have exact data type before moving on,", "-> a,n, a,n -> sk - import delete row(s) - import search key", "option to do so - striping certain parts from names like \"original mix\"", "- ok button must be disabled if there are any tracks that arent", "rest is left as is) - tables: columns are now resized properly tanks", "configuration value: location of the db - total number of pages - import", "the status line, now it provides some basic statistics about the tracks (counts", "(not a group track) - group filter: enabling group filter now focuses the", "<NAME>, <EMAIL> Requirements - Python 3 - PyQt4 - SQLAlchemy - SQLite -", "and name are evaluated separately - both tables: fixed context menu position v0.1.3:", "if they are not (on UnicodeDecodeError) latin1 is used - merge: fixed, now", "a button and menu entry for it + filter: a history of last", "file ~/.regaudio : currently only one configuration value: location of the db -", "!r:rating - groups, group selector, favorites (first in the selector) - filter with", "containing number of tracks that are not yet selected or as new -", "filter: enabling group filter now focuses the filter editline - in groups menu:", "number of tracks that are not yet selected or as new - import", "* + _ indicators now work by parts, artist and name are evaluated", "the name anyway TODO: - (artist translator to help fix some issues with", "strings match - import: fixed, cue files are expected to be in utf-8", "everywhere else are seconds which themselves are inaccurate - it would be nice", "the tracks (counts and ratings) - adding tracks to a group with the", "ordering) + status: reworked the status line, now it provides some basic statistics", "import file/directory selection crashes the application - import: saying no to name cleanup,", "(diff) Features - filter tracks case insensitive, advanced filetring: !a:artist !n:name !r:rating -", "new tracks to group - delete tracks from group (not just grouptracks) -", "retain links to groups) - NOTE: merging tracks in the same group leads", "tracks to group - delete tracks from group (not just grouptracks) - ikona", "import directory (group) - stars for rating (it should support tracks in groups", "cue files are expected to be in utf-8 but if they are not", "(maybe it's confusing) + group delete: deleting a group doesn't reset the group", "it) ''' import sys import os from PyQt5 import QtWidgets from ui import", "link to some other similar track v0.1.4: - migration to PyQt5 FIXME: -", "between groups and tracks dont disappear v0.1.2: - import: fixed importing mp3 without", "up the name anyway TODO: - (artist translator to help fix some issues", "track name, but remove any parenthesis and their contents (shortcut \") + import:", "filter by it) ''' import sys import os from PyQt5 import QtWidgets from", "group track) - group filter: enabling group filter now focuses the filter editline", "new advanced filter option !g: allow query groups (by names), note that its", "menu: delete, reset, select existing, as new, sk -> a,n, a,n -> sk", "groups too) - star for new (star vs circle?, there must be visible", "only one configuration value: location of the db - total number of pages", "tables: fixed context menu position v0.1.3: + all tracks: pressing all tracks again", "plus and dashes + track and import tables now both have smaller row", "make new search key from track name, but remove any parenthesis and their", "name - import context menu: delete, reset, select existing, as new, sk ->", "of tracks that are not yet selected or as new - import window", "are expected to be in utf-8 but if they are not (on UnicodeDecodeError)", "now the links between groups and tracks dont disappear v0.1.2: - import: fixed", "as tracks menu) - detele tracks (or group tracks) - group management -", "cuesheets + import: select best ('B' key) + import: quick resolve (select best", "filter rules is now available + filter: new advanced filter option !g: allow", "return to previous group - delete: fixed the dialog message when deleting a", "- filter with groups - advanced sorting - resorting sorted (remember previous sort", "instead of deletion, filter by it) ''' import sys import os from PyQt5", "groupname of import or provide an option to do so - striping certain", "track number + addtogroup: now supported for grouptracks (group mode) too + import:", "filter: now doesn't reset automatically, theres a button and menu entry for it", "translator to help fix some issues with names) - (import should do some", "(group) - stars for rating (it should support tracks in groups too) -", "striping certain parts from names like \"original mix\" - support flac, m4a (basic)", "of last 20 filter rules is now available + filter: new advanced filter", "flag and set it instead of deletion, filter by it) ''' import sys", "import real searchkey - import mm - import should have some status containing", "doesn't reset automatically, theres a button and menu entry for it + filter:", "new sort) - context menu on tracks (the same as tracks menu) -", "has frames, everywhere else are seconds which themselves are inaccurate - it would", "tracks dont disappear v0.1.2: - import: fixed importing mp3 without track number +", "selected or marked as new - column sizes for importwindow - import cue", "cfg) v0.1.1: + import: nameCutToSearchKey - make new search key from track name,", "++, create new for no match, the rest is left as is) -", "to close the window - automatically change groupname of import or provide an", "flac, m4a (basic) - tidy up the menus and shortcuts - (any replacement", "focuses the filter editline - in groups menu: groups are now listed alphabetically", "import sys import os from PyQt5 import QtWidgets from ui import mainWindow import", "UnicodeDecodeError) latin1 is used - merge: fixed, now the links between groups and", "menu) - detele tracks (or group tracks) - group management - add group", "the dialog message when deleting a track (not a group track) - group", "enter and esc to close the window - automatically change groupname of import", "all tracks: pressing all tracks again will return to previous group - delete:", "cuesheet has frames, everywhere else are seconds which themselves are inaccurate - it", "detele tracks (or group tracks) - group management - add group (manually), rename", "alternating row colors + import: it is now possible to directly select multiple", "is pressed (a problem with the groupname) - ok button must be disabled", "before moving on, datetime.time looks promising (immutable though) - datetime.time sucks for our", "implement for import too - import directory (group) - stars for rating (it", "that its also possible to use it with groups and therefore it is", "intersection of groups + import: * + _ indicators now work by parts,", "- group filter: enabling group filter now focuses the filter editline - in", "and from string as needed (done?) - implement for import too - import", "QtWidgets from ui import mainWindow import model.config # this will read the config", "(or group tracks) - group management - add group (manually), rename group, remove", "+ import: quick resolve (select best track for **, ++, create new for", "parts from names like \"original mix\" - support flac, m4a (basic) - tidy", "track for **, ++, create new for no match, the rest is left", "# this will read the config VERSION = 0.1.4 def main(): app =", "found tracks now marks verbatim matches with a star **, ++ if search", "safe delete (introduce a new flag and set it instead of deletion, filter", "(so switching back and forth between groups and all tracks doesn't screw up", "- numpy (diff) Features - filter tracks case insensitive, advanced filetring: !a:artist !n:name", "- delete: fixed the dialog message when deleting a track (not a group", "string as needed (done?) - implement for import too - import directory (group)", "do so - striping certain parts from names like \"original mix\" - support", "pressed (a problem with the groupname) - ok button must be disabled if", "name - add track to group - import group name - import context", "make a copy of a track in a group (grouptrack) and replace the", "type before moving on, datetime.time looks promising (immutable though) - datetime.time sucks for", "groups) - NOTE: merging tracks in the same group leads to duplicate grouptracks", "group tracks) - group management - add group (manually), rename group, remove group", "must be visible difference with rating) - add new tracks to group -", "(on UnicodeDecodeError) latin1 is used - merge: fixed, now the links between groups", "the groupname) - ok button must be disabled if there are any tracks", "tables: columns are now resized properly tanks to wordWrap=False - both tables: removed", "work (it was impossible to make duplicities except through import) + detachcopy: make", "some things automaticaly - cleanup group name, cleanup track names) - safe delete", "the group filter + sort: sorting grouptracks by number is remembered between all", "and shortcuts - (any replacement to cfg) v0.1.1: + import: nameCutToSearchKey - make", "by it) ''' import sys import os from PyQt5 import QtWidgets from ui", "- SQLite - numpy (diff) Features - filter tracks case insensitive, advanced filetring:", "last 20 filter rules is now available + filter: new advanced filter option", "match - import: fixed, cue files are expected to be in utf-8 but", "importwindow - import cue (gruop) - time units - cuesheet has frames, everywhere", "a track in a group (grouptrack) and replace the current link in the", "in utf-8 but if they are not (on UnicodeDecodeError) latin1 is used -", "datetime.time sucks for our purposes, store ms and convert them to and from", "displayed as a set of stars, plus and dashes + track and import", "name, cleanup track names) - safe delete (introduce a new flag and set", "- (import should do some things automaticaly - cleanup group name, cleanup track", "def main(): app = QtWidgets.QApplication(sys.argv) window=mainWindow.MainWindow() window.show() sys.exit(app.exec_()) if __name__ == '__main__': main()", "units - cuesheet has frames, everywhere else are seconds which themselves are inaccurate", "same as tracks menu) - detele tracks (or group tracks) - group management", "a group track) - group filter: enabling group filter now focuses the filter", "no match, the rest is left as is) - tables: columns are now", "groupmode (maybe it's confusing) + group delete: deleting a group doesn't reset the", "selected or as new - import window shouldnt continue after enter is pressed", "best ('B' key) + import: quick resolve (select best track for **, ++,", "issues with names) - (import should do some things automaticaly - cleanup group", "sort) - context menu on tracks (the same as tracks menu) - detele", "as a set of stars, plus and dashes + track and import tables", "groups (by names), note that its also possible to use it with groups", "filter option !g: allow query groups (by names), note that its also possible", "both have smaller row height and alternating row colors + import: it is", "yet selected or as new - import window shouldnt continue after enter is", "total number of pages - import window - disable enter and esc to", "mp3 without track number + addtogroup: now supported for grouptracks (group mode) too", "and intersection of groups + import: * + _ indicators now work by", "inaccurate - it would be nice to have exact data type before moving", "reader fixed + merge: now allowed in groupmode (maybe it's confusing) + group", "favorites (first in the selector) - filter with groups - advanced sorting -", "- detele tracks (or group tracks) - group management - add group (manually),", "back and forth between groups and all tracks doesn't screw up the ordering)", "should support tracks in groups too) - star for new (star vs circle?,", "group doesn't reset the group filter + sort: sorting grouptracks by number is", "_ indicators now work by parts, artist and name are evaluated separately -", "button and menu entry for it + filter: a history of last 20", "artist and name are evaluated separately - both tables: fixed context menu position", "to some other similar track v0.1.4: - migration to PyQt5 FIXME: - import:", "fixed context menu position v0.1.3: + all tracks: pressing all tracks again will", "- make new search key from track name, but remove any parenthesis and", "to be in utf-8 but if they are not (on UnicodeDecodeError) latin1 is", "group - import group name - import context menu: delete, reset, select existing,", "underlying track; in short, multiple tracks with the same name - add track", "that are not yet selected or as new - import window shouldnt continue", "provides some basic statistics about the tracks (counts and ratings) - adding tracks", "be in utf-8 but if they are not (on UnicodeDecodeError) latin1 is used", "- add group (manually), rename group, remove group - merge tracks (and retain", "& name (and vice versa) - import real searchkey - import mm -", "verbatim matches with a star **, ++ if search strings match - import:", "from PyQt5 import QtWidgets from ui import mainWindow import model.config # this will", "of the db - total number of pages - import window - disable", "removed grid + filter: now doesn't reset automatically, theres a button and menu", "a group doesn't reset the group filter + sort: sorting grouptracks by number", "0.1.4 def main(): app = QtWidgets.QApplication(sys.argv) window=mainWindow.MainWindow() window.show() sys.exit(app.exec_()) if __name__ == '__main__':", "group - merge tracks (and retain links to groups) - NOTE: merging tracks", "fixed + merge: now allowed in groupmode (maybe it's confusing) + group delete:", "replace the current link in the group with it + relink: change the", "reset, select existing, as new, sk -> a,n, a,n -> sk - import", "- delete tracks from group (not just grouptracks) - ikona - configuration file", "import: quick resolve (select best track for **, ++, create new for no", "impossible to make duplicities except through import) + detachcopy: make a copy of", "= 0.1.4 def main(): app = QtWidgets.QApplication(sys.argv) window=mainWindow.MainWindow() window.show() sys.exit(app.exec_()) if __name__ ==", "merge: fixed, now the links between groups and tracks dont disappear v0.1.2: -", "- both tables: fixed context menu position v0.1.3: + all tracks: pressing all", "+ import: found tracks now marks verbatim matches with a star **, ++", "20 filter rules is now available + filter: new advanced filter option !g:", "existing, as new, sk -> a,n, a,n -> sk - import delete row(s)", "from ui import mainWindow import model.config # this will read the config VERSION", "grouptracks by number is remembered between all tracks/groups switches (so switching back and", "looks promising (immutable though) - datetime.time sucks for our purposes, store ms and", "enabling group filter now focuses the filter editline - in groups menu: groups", "db - total number of pages - import window - disable enter and", "links to groups) - NOTE: merging tracks in the same group leads to", "- add new tracks to group - delete tracks from group (not just", "track in a group (grouptrack) and replace the current link in the group", "insensitive, advanced filetring: !a:artist !n:name !r:rating - groups, group selector, favorites (first in", "number is remembered between all tracks/groups switches (so switching back and forth between", "copy of a track in a group (grouptrack) and replace the current link", "esc to close the window - automatically change groupname of import or provide", "rating in tooltip is now displayed as a set of stars, plus and", "it would be nice to have exact data type before moving on, datetime.time", "previous group - delete: fixed the dialog message when deleting a track (not", "remove any parenthesis and their contents (shortcut \") + import: found tracks now", "filter with groups - advanced sorting - resorting sorted (remember previous sort and", "tracks (or group tracks) - group management - add group (manually), rename group,", "- merge tracks (and retain links to groups) - NOTE: merging tracks in", "- add track to group - import group name - import context menu:", "forth between groups and all tracks doesn't screw up the ordering) + status:", "was impossible to make duplicities except through import) + detachcopy: make a copy", "as new - import window shouldnt continue after enter is pressed (a problem", "groups menu: groups are now listed alphabetically - id3v1: id3v1 reader fixed +", "groups and all tracks doesn't screw up the ordering) + status: reworked the", "\") + import: found tracks now marks verbatim matches with a star **,", "tracks menu) - detele tracks (or group tracks) - group management - add", "to group - delete tracks from group (not just grouptracks) - ikona -", "import: fixed importing mp3 without track number + addtogroup: now supported for grouptracks", "too - import directory (group) - stars for rating (it should support tracks", "set of stars, plus and dashes + track and import tables now both", "+ import: tracks column now displays best match indicators + track rating in", "same name - add track to group - import group name - import", "up the ordering) + status: reworked the status line, now it provides some", "sorted (remember previous sort and apply it after the new sort) - context", "Mar 8, 2012 @author: <NAME>, <EMAIL> Requirements - Python 3 - PyQt4 -", "also possible to use it with groups and therefore it is possible to", "(star vs circle?, there must be visible difference with rating) - add new", "+ import: it is now possible to directly select multiple cuesheets + import:", "with it + relink: change the grouptrack to link to some other similar", "the grouptrack to link to some other similar track v0.1.4: - migration to", "problem with the groupname) - ok button must be disabled if there are", "the window - automatically change groupname of import or provide an option to", "filter now focuses the filter editline - in groups menu: groups are now", "- cleanup group name, cleanup track names) - safe delete (introduce a new", "sk -> a,n, a,n -> sk - import delete row(s) - import search", "++ if search strings match - import: fixed, cue files are expected to", "share the same underlying track; in short, multiple tracks with the same name", "- total number of pages - import window - disable enter and esc", "not yet selected or as new - import window shouldnt continue after enter", "for **, ++, create new for no match, the rest is left as", "- safe delete (introduce a new flag and set it instead of deletion,", "to a group with the same tracks already present didn't work (it was", "duplicate grouptracks that share the same underlying track; in short, multiple tracks with", "up the menus and shortcuts - (any replacement to cfg) v0.1.1: + import:", "delete: fixed the dialog message when deleting a track (not a group track)", "8, 2012 @author: <NAME>, <EMAIL> Requirements - Python 3 - PyQt4 - SQLAlchemy", "are any tracks that arent selected or marked as new - column sizes", "of stars, plus and dashes + track and import tables now both have", "PyQt5 import QtWidgets from ui import mainWindow import model.config # this will read", "to wordWrap=False - both tables: removed grid + filter: now doesn't reset automatically,", "seconds which themselves are inaccurate - it would be nice to have exact", "vs circle?, there must be visible difference with rating) - add new tracks", "fix some issues with names) - (import should do some things automaticaly -", "!a:artist !n:name !r:rating - groups, group selector, favorites (first in the selector) -", "group name - import context menu: delete, reset, select existing, as new, sk", "<filename>regaudio.py #!/usr/bin/env python ''' Created on Mar 8, 2012 @author: <NAME>, <EMAIL> Requirements", "vice versa) - import real searchkey - import mm - import should have", "in the selector) - filter with groups - advanced sorting - resorting sorted", "a history of last 20 filter rules is now available + filter: new", "<EMAIL> Requirements - Python 3 - PyQt4 - SQLAlchemy - SQLite - numpy", "group management - add group (manually), rename group, remove group - merge tracks", ": currently only one configuration value: location of the db - total number", "or provide an option to do so - striping certain parts from names", "(the same as tracks menu) - detele tracks (or group tracks) - group", "key -> artist & name (and vice versa) - import real searchkey -", "advanced filter option !g: allow query groups (by names), note that its also", "m4a (basic) - tidy up the menus and shortcuts - (any replacement to", "all tracks/groups switches (so switching back and forth between groups and all tracks", "v0.1.2: - import: fixed importing mp3 without track number + addtogroup: now supported", "import should have some status containing number of tracks that are not yet", "names) - (import should do some things automaticaly - cleanup group name, cleanup", "@author: <NAME>, <EMAIL> Requirements - Python 3 - PyQt4 - SQLAlchemy - SQLite", "from group (not just grouptracks) - ikona - configuration file ~/.regaudio : currently", "doesn't reset the group filter + sort: sorting grouptracks by number is remembered", "the application - import: saying no to name cleanup, cleans up the name", "expected to be in utf-8 but if they are not (on UnicodeDecodeError) latin1", "import mm - import should have some status containing number of tracks that", "case insensitive, advanced filetring: !a:artist !n:name !r:rating - groups, group selector, favorites (first", "and dashes + track and import tables now both have smaller row height", "select multiple cuesheets + import: select best ('B' key) + import: quick resolve", "group filter + sort: sorting grouptracks by number is remembered between all tracks/groups", "evaluated separately - both tables: fixed context menu position v0.1.3: + all tracks:", "to PyQt5 FIXME: - import: import file/directory selection crashes the application - import:", "have some status containing number of tracks that are not yet selected or", "with a star **, ++ if search strings match - import: fixed, cue", "is remembered between all tracks/groups switches (so switching back and forth between groups", "management - add group (manually), rename group, remove group - merge tracks (and", "support flac, m4a (basic) - tidy up the menus and shortcuts - (any", "- groups, group selector, favorites (first in the selector) - filter with groups", "query and intersection of groups + import: * + _ indicators now work", "marked as new - column sizes for importwindow - import cue (gruop) -", "smaller row height and alternating row colors + import: it is now possible", "by parts, artist and name are evaluated separately - both tables: fixed context", "current link in the group with it + relink: change the grouptrack to", "tracks that arent selected or marked as new - column sizes for importwindow", "names), note that its also possible to use it with groups and therefore", "to duplicate grouptracks that share the same underlying track; in short, multiple tracks", "and forth between groups and all tracks doesn't screw up the ordering) +", "dashes + track and import tables now both have smaller row height and", "with groups and therefore it is possible to query and intersection of groups", "tracks (the same as tracks menu) - detele tracks (or group tracks) -", "import) + detachcopy: make a copy of a track in a group (grouptrack)", "marks verbatim matches with a star **, ++ if search strings match -", "group with the same tracks already present didn't work (it was impossible to", "migration to PyQt5 FIXME: - import: import file/directory selection crashes the application -", "possible to use it with groups and therefore it is possible to query", "again will return to previous group - delete: fixed the dialog message when", "on Mar 8, 2012 @author: <NAME>, <EMAIL> Requirements - Python 3 - PyQt4", "mainWindow import model.config # this will read the config VERSION = 0.1.4 def", "as is) - tables: columns are now resized properly tanks to wordWrap=False -", "row height and alternating row colors + import: it is now possible to", "- adding tracks to a group with the same tracks already present didn't", "- import context menu: delete, reset, select existing, as new, sk -> a,n,", "it is now possible to directly select multiple cuesheets + import: select best", "number of pages - import window - disable enter and esc to close", "advanced sorting - resorting sorted (remember previous sort and apply it after the", "- both tables: removed grid + filter: now doesn't reset automatically, theres a", "window - disable enter and esc to close the window - automatically change", "that arent selected or marked as new - column sizes for importwindow -", "location of the db - total number of pages - import window -", "import: fixed, cue files are expected to be in utf-8 but if they", "groups, group selector, favorites (first in the selector) - filter with groups -", "key) + import: quick resolve (select best track for **, ++, create new", "FIXME: - import: import file/directory selection crashes the application - import: saying no", "and all tracks doesn't screw up the ordering) + status: reworked the status", "- import group name - import context menu: delete, reset, select existing, as", "frames, everywhere else are seconds which themselves are inaccurate - it would be", "of import or provide an option to do so - striping certain parts", "- disable enter and esc to close the window - automatically change groupname", "- time units - cuesheet has frames, everywhere else are seconds which themselves", "as new - column sizes for importwindow - import cue (gruop) - time", "''' import sys import os from PyQt5 import QtWidgets from ui import mainWindow", "- import cue (gruop) - time units - cuesheet has frames, everywhere else", "import group name - import context menu: delete, reset, select existing, as new,", "a star **, ++ if search strings match - import: fixed, cue files", "- import window shouldnt continue after enter is pressed (a problem with the", "except through import) + detachcopy: make a copy of a track in a", "too + import: tracks column now displays best match indicators + track rating", "create new for no match, the rest is left as is) - tables:", "- tables: columns are now resized properly tanks to wordWrap=False - both tables:", "or as new - import window shouldnt continue after enter is pressed (a", "allow query groups (by names), note that its also possible to use it", "data type before moving on, datetime.time looks promising (immutable though) - datetime.time sucks", "allowed in groupmode (maybe it's confusing) + group delete: deleting a group doesn't", "delete (introduce a new flag and set it instead of deletion, filter by", "for new (star vs circle?, there must be visible difference with rating) -", "to have exact data type before moving on, datetime.time looks promising (immutable though)", "(select best track for **, ++, create new for no match, the rest", "previous sort and apply it after the new sort) - context menu on", "grouptracks (group mode) too + import: tracks column now displays best match indicators", "now both have smaller row height and alternating row colors + import: it", "should do some things automaticaly - cleanup group name, cleanup track names) -", "now work by parts, artist and name are evaluated separately - both tables:", "contents (shortcut \") + import: found tracks now marks verbatim matches with a", "- advanced sorting - resorting sorted (remember previous sort and apply it after", "anyway TODO: - (artist translator to help fix some issues with names) -", "with names) - (import should do some things automaticaly - cleanup group name,", "with the same name - add track to group - import group name", "track and import tables now both have smaller row height and alternating row", "to groups) - NOTE: merging tracks in the same group leads to duplicate", "and replace the current link in the group with it + relink: change", "through import) + detachcopy: make a copy of a track in a group", "deletion, filter by it) ''' import sys import os from PyQt5 import QtWidgets", "names) - safe delete (introduce a new flag and set it instead of", "close the window - automatically change groupname of import or provide an option", "grid + filter: now doesn't reset automatically, theres a button and menu entry", "- (artist translator to help fix some issues with names) - (import should", "now marks verbatim matches with a star **, ++ if search strings match", "sys import os from PyQt5 import QtWidgets from ui import mainWindow import model.config", "between all tracks/groups switches (so switching back and forth between groups and all", "import or provide an option to do so - striping certain parts from", "themselves are inaccurate - it would be nice to have exact data type", "- import should have some status containing number of tracks that are not", "the links between groups and tracks dont disappear v0.1.2: - import: fixed importing", "supported for grouptracks (group mode) too + import: tracks column now displays best", "is possible to query and intersection of groups + import: * + _", "track) - group filter: enabling group filter now focuses the filter editline -", "switching back and forth between groups and all tracks doesn't screw up the", "grouptrack to link to some other similar track v0.1.4: - migration to PyQt5", "- migration to PyQt5 FIXME: - import: import file/directory selection crashes the application", "status: reworked the status line, now it provides some basic statistics about the", "- configuration file ~/.regaudio : currently only one configuration value: location of the", "and apply it after the new sort) - context menu on tracks (the", "search strings match - import: fixed, cue files are expected to be in", "there are any tracks that arent selected or marked as new - column", "all tracks again will return to previous group - delete: fixed the dialog", "groups and tracks dont disappear v0.1.2: - import: fixed importing mp3 without track", "filter: a history of last 20 filter rules is now available + filter:", "to directly select multiple cuesheets + import: select best ('B' key) + import:", "(done?) - implement for import too - import directory (group) - stars for", "searchkey - import mm - import should have some status containing number of", "\"original mix\" - support flac, m4a (basic) - tidy up the menus and", "is) - tables: columns are now resized properly tanks to wordWrap=False - both", "the new sort) - context menu on tracks (the same as tracks menu)", "wordWrap=False - both tables: removed grid + filter: now doesn't reset automatically, theres", "shouldnt continue after enter is pressed (a problem with the groupname) - ok", "- striping certain parts from names like \"original mix\" - support flac, m4a", "advanced filetring: !a:artist !n:name !r:rating - groups, group selector, favorites (first in the", "import: saying no to name cleanup, cleans up the name anyway TODO: -", "(shortcut \") + import: found tracks now marks verbatim matches with a star", "- merge: fixed, now the links between groups and tracks dont disappear v0.1.2:", "is left as is) - tables: columns are now resized properly tanks to", "a,n -> sk - import delete row(s) - import search key -> artist", "match indicators + track rating in tooltip is now displayed as a set", "groups + import: * + _ indicators now work by parts, artist and", "with groups - advanced sorting - resorting sorted (remember previous sort and apply", "track v0.1.4: - migration to PyQt5 FIXME: - import: import file/directory selection crashes", "after enter is pressed (a problem with the groupname) - ok button must", "stars for rating (it should support tracks in groups too) - star for", "help fix some issues with names) - (import should do some things automaticaly", "though) - datetime.time sucks for our purposes, store ms and convert them to", "will read the config VERSION = 0.1.4 def main(): app = QtWidgets.QApplication(sys.argv) window=mainWindow.MainWindow()", "window - automatically change groupname of import or provide an option to do", "- Python 3 - PyQt4 - SQLAlchemy - SQLite - numpy (diff) Features", "fixed, now the links between groups and tracks dont disappear v0.1.2: - import:", "adding tracks to a group with the same tracks already present didn't work", "+ import: select best ('B' key) + import: quick resolve (select best track", "with the groupname) - ok button must be disabled if there are any", "tracks in the same group leads to duplicate grouptracks that share the same", "is used - merge: fixed, now the links between groups and tracks dont" ]
[ "line.replace('\\n','') if word not in entity: entity[word] = 0 entity[word] = entity[word] +", "word not in entity: entity[word] = 0 entity[word] = entity[word] + 1 entity_freq", "\"r\", encoding=\"utf-8\") as f: for line in f: word = line.replace('\\n','') if word", "entity[word] = 0 entity[word] = entity[word] + 1 with open(\"../output/analysis output/entity.txt\", \"r\", encoding=\"utf-8\")", "+ 1 with open(\"../output/analysis output/entity.txt\", \"r\", encoding=\"utf-8\") as f: for line in f:", "not in entity: entity[word] = 0 entity[word] = entity[word] + 1 with open(\"../output/analysis", "word = line.replace('\\n','') if word not in entity: entity[word] = 0 entity[word] =", "output/entity.txt\", \"r\", encoding=\"utf-8\") as f: for line in f: word = line.replace('\\n','') if", "in f: word = line.replace('\\n','') if word not in entity: entity[word] = 0", "{} with open(\"../output/analysis twitter output/entity.txt\", \"r\", encoding=\"utf-8\") as f: for line in f:", "entity = {} with open(\"../output/analysis twitter output/entity.txt\", \"r\", encoding=\"utf-8\") as f: for line", "if word not in entity: entity[word] = 0 entity[word] = entity[word] + 1", "with open(\"../output/analysis twitter output/entity.txt\", \"r\", encoding=\"utf-8\") as f: for line in f: word", "entity[word] + 1 with open(\"../output/analysis output/entity.txt\", \"r\", encoding=\"utf-8\") as f: for line in", "1 with open(\"../output/analysis output/entity.txt\", \"r\", encoding=\"utf-8\") as f: for line in f: word", "0 entity[word] = entity[word] + 1 entity_freq = open(\"../output/entity_frequency.csv\", \"w\", encoding=\"utf-8\") entity_freq.write('entity,frequency\\n') for", "+ 1 entity_freq = open(\"../output/entity_frequency.csv\", \"w\", encoding=\"utf-8\") entity_freq.write('entity,frequency\\n') for key in entity: entity_freq.write(key+','+str(entity[key])+'\\n')", "= entity[word] + 1 with open(\"../output/analysis output/entity.txt\", \"r\", encoding=\"utf-8\") as f: for line", "1 entity_freq = open(\"../output/entity_frequency.csv\", \"w\", encoding=\"utf-8\") entity_freq.write('entity,frequency\\n') for key in entity: entity_freq.write(key+','+str(entity[key])+'\\n') entity_freq.close()", "with open(\"../output/analysis output/entity.txt\", \"r\", encoding=\"utf-8\") as f: for line in f: word =", "= 0 entity[word] = entity[word] + 1 with open(\"../output/analysis output/entity.txt\", \"r\", encoding=\"utf-8\") as", "0 entity[word] = entity[word] + 1 with open(\"../output/analysis output/entity.txt\", \"r\", encoding=\"utf-8\") as f:", "for line in f: word = line.replace('\\n','') if word not in entity: entity[word]", "word not in entity: entity[word] = 0 entity[word] = entity[word] + 1 with", "entity: entity[word] = 0 entity[word] = entity[word] + 1 with open(\"../output/analysis output/entity.txt\", \"r\",", "= entity[word] + 1 entity_freq = open(\"../output/entity_frequency.csv\", \"w\", encoding=\"utf-8\") entity_freq.write('entity,frequency\\n') for key in", "f: for line in f: word = line.replace('\\n','') if word not in entity:", "twitter output/entity.txt\", \"r\", encoding=\"utf-8\") as f: for line in f: word = line.replace('\\n','')", "entity[word] = 0 entity[word] = entity[word] + 1 entity_freq = open(\"../output/entity_frequency.csv\", \"w\", encoding=\"utf-8\")", "= 0 entity[word] = entity[word] + 1 entity_freq = open(\"../output/entity_frequency.csv\", \"w\", encoding=\"utf-8\") entity_freq.write('entity,frequency\\n')", "not in entity: entity[word] = 0 entity[word] = entity[word] + 1 entity_freq =", "in entity: entity[word] = 0 entity[word] = entity[word] + 1 entity_freq = open(\"../output/entity_frequency.csv\",", "f: word = line.replace('\\n','') if word not in entity: entity[word] = 0 entity[word]", "entity[word] = entity[word] + 1 with open(\"../output/analysis output/entity.txt\", \"r\", encoding=\"utf-8\") as f: for", "entity_freq = open(\"../output/entity_frequency.csv\", \"w\", encoding=\"utf-8\") entity_freq.write('entity,frequency\\n') for key in entity: entity_freq.write(key+','+str(entity[key])+'\\n') entity_freq.close() print(entity)", "open(\"../output/analysis twitter output/entity.txt\", \"r\", encoding=\"utf-8\") as f: for line in f: word =", "as f: for line in f: word = line.replace('\\n','') if word not in", "entity: entity[word] = 0 entity[word] = entity[word] + 1 entity_freq = open(\"../output/entity_frequency.csv\", \"w\",", "= line.replace('\\n','') if word not in entity: entity[word] = 0 entity[word] = entity[word]", "= {} with open(\"../output/analysis twitter output/entity.txt\", \"r\", encoding=\"utf-8\") as f: for line in", "entity[word] = entity[word] + 1 entity_freq = open(\"../output/entity_frequency.csv\", \"w\", encoding=\"utf-8\") entity_freq.write('entity,frequency\\n') for key", "open(\"../output/analysis output/entity.txt\", \"r\", encoding=\"utf-8\") as f: for line in f: word = line.replace('\\n','')", "line in f: word = line.replace('\\n','') if word not in entity: entity[word] =", "encoding=\"utf-8\") as f: for line in f: word = line.replace('\\n','') if word not", "entity[word] + 1 entity_freq = open(\"../output/entity_frequency.csv\", \"w\", encoding=\"utf-8\") entity_freq.write('entity,frequency\\n') for key in entity:", "in entity: entity[word] = 0 entity[word] = entity[word] + 1 with open(\"../output/analysis output/entity.txt\"," ]
[ "value return res class SyntaxVectorizer(ABC): def setup_rules(self): pass def text_structures_initializer(self): pass def calculate_morpho_tags(self,", "return res class SyntaxVectorizer(ABC): def setup_rules(self): pass def text_structures_initializer(self): pass def calculate_morpho_tags(self, current_token):", "load_pickled_file(os.path.join('ProcessedData', 'Андреев_Ангелочек.pkl')) print('Pickle loaded') current_nlp_module = spacy_udpipe.load_from_path('ru-syntagrus', PATH_TO_RUS_UDPIPE_MODEL) print('Model loaded') hj = SyntaxVectorizerRU(current_nlp_module)", "__name__ == \"__main__\": # Just checking # Please, pay attention, that this class", "this class imported t = load_pickled_file(os.path.join('ProcessedData', 'Андреев_Ангелочек.pkl')) print('Pickle loaded') current_nlp_module = spacy_udpipe.load_from_path('ru-syntagrus', PATH_TO_RUS_UDPIPE_MODEL)", "res class SyntaxVectorizer(ABC): def setup_rules(self): pass def text_structures_initializer(self): pass def calculate_morpho_tags(self, current_token): pass", "text_structures_initializer(self): pass def calculate_morpho_tags(self, current_token): pass def normalize_morpho_tags(self): pass if __name__ == \"__main__\":", "in tag_representation.split('|'): if len(one_subtag.split('=')) > 1: key = one_subtag.split('=')[0] value = one_subtag.split('=')[1] res[key]", "one_subtag.split('=')[0] value = one_subtag.split('=')[1] res[key] = value return res class SyntaxVectorizer(ABC): def setup_rules(self):", "SyntaxVectorizer(ABC): def setup_rules(self): pass def text_structures_initializer(self): pass def calculate_morpho_tags(self, current_token): pass def normalize_morpho_tags(self):", "'Андреев_Ангелочек.pkl')) print('Pickle loaded') current_nlp_module = spacy_udpipe.load_from_path('ru-syntagrus', PATH_TO_RUS_UDPIPE_MODEL) print('Model loaded') hj = SyntaxVectorizerRU(current_nlp_module) hj.convert_to_attributes(t['Trees'])", "= one_subtag.split('=')[0] value = one_subtag.split('=')[1] res[key] = value return res class SyntaxVectorizer(ABC): def", "# Please, pay attention, that this class imported t = load_pickled_file(os.path.join('ProcessedData', 'Андреев_Ангелочек.pkl')) print('Pickle", "# Just checking # Please, pay attention, that this class imported t =", "def processTag(tag_representation): res = {} if len(tag_representation.split('|')) > 0: for one_subtag in tag_representation.split('|'):", "imported t = load_pickled_file(os.path.join('ProcessedData', 'Андреев_Ангелочек.pkl')) print('Pickle loaded') current_nlp_module = spacy_udpipe.load_from_path('ru-syntagrus', PATH_TO_RUS_UDPIPE_MODEL) print('Model loaded')", "current_nlp_module = spacy_udpipe.load_from_path('ru-syntagrus', PATH_TO_RUS_UDPIPE_MODEL) print('Model loaded') hj = SyntaxVectorizerRU(current_nlp_module) hj.convert_to_attributes(t['Trees']) resAttribs = hj.get_res_attributes()", "len(one_subtag.split('=')) > 1: key = one_subtag.split('=')[0] value = one_subtag.split('=')[1] res[key] = value return", "import spacy_udpipe from .utils import load_pickled_file from .settings import PATH_TO_RUS_UDPIPE_MODEL import spacy def", "spacy def processTag(tag_representation): res = {} if len(tag_representation.split('|')) > 0: for one_subtag in", "print('Pickle loaded') current_nlp_module = spacy_udpipe.load_from_path('ru-syntagrus', PATH_TO_RUS_UDPIPE_MODEL) print('Model loaded') hj = SyntaxVectorizerRU(current_nlp_module) hj.convert_to_attributes(t['Trees']) resAttribs", "normalize_morpho_tags(self): pass if __name__ == \"__main__\": # Just checking # Please, pay attention,", "value = one_subtag.split('=')[1] res[key] = value return res class SyntaxVectorizer(ABC): def setup_rules(self): pass", ".settings import PATH_TO_RUS_UDPIPE_MODEL import spacy def processTag(tag_representation): res = {} if len(tag_representation.split('|')) >", "current_token): pass def normalize_morpho_tags(self): pass if __name__ == \"__main__\": # Just checking #", "import PATH_TO_RUS_UDPIPE_MODEL import spacy def processTag(tag_representation): res = {} if len(tag_representation.split('|')) > 0:", "> 1: key = one_subtag.split('=')[0] value = one_subtag.split('=')[1] res[key] = value return res", "{} if len(tag_representation.split('|')) > 0: for one_subtag in tag_representation.split('|'): if len(one_subtag.split('=')) > 1:", "setup_rules(self): pass def text_structures_initializer(self): pass def calculate_morpho_tags(self, current_token): pass def normalize_morpho_tags(self): pass if", "abstractmethod import os import spacy_udpipe from .utils import load_pickled_file from .settings import PATH_TO_RUS_UDPIPE_MODEL", "t = load_pickled_file(os.path.join('ProcessedData', 'Андреев_Ангелочек.pkl')) print('Pickle loaded') current_nlp_module = spacy_udpipe.load_from_path('ru-syntagrus', PATH_TO_RUS_UDPIPE_MODEL) print('Model loaded') hj", "from .utils import load_pickled_file from .settings import PATH_TO_RUS_UDPIPE_MODEL import spacy def processTag(tag_representation): res", "class SyntaxVectorizer(ABC): def setup_rules(self): pass def text_structures_initializer(self): pass def calculate_morpho_tags(self, current_token): pass def", "def calculate_morpho_tags(self, current_token): pass def normalize_morpho_tags(self): pass if __name__ == \"__main__\": # Just", "1: key = one_subtag.split('=')[0] value = one_subtag.split('=')[1] res[key] = value return res class", "from abc import ABC, abstractmethod import os import spacy_udpipe from .utils import load_pickled_file", "spacy_udpipe from .utils import load_pickled_file from .settings import PATH_TO_RUS_UDPIPE_MODEL import spacy def processTag(tag_representation):", "len(tag_representation.split('|')) > 0: for one_subtag in tag_representation.split('|'): if len(one_subtag.split('=')) > 1: key =", "= load_pickled_file(os.path.join('ProcessedData', 'Андреев_Ангелочек.pkl')) print('Pickle loaded') current_nlp_module = spacy_udpipe.load_from_path('ru-syntagrus', PATH_TO_RUS_UDPIPE_MODEL) print('Model loaded') hj =", "res[key] = value return res class SyntaxVectorizer(ABC): def setup_rules(self): pass def text_structures_initializer(self): pass", "Please, pay attention, that this class imported t = load_pickled_file(os.path.join('ProcessedData', 'Андреев_Ангелочек.pkl')) print('Pickle loaded')", "processTag(tag_representation): res = {} if len(tag_representation.split('|')) > 0: for one_subtag in tag_representation.split('|'): if", "if len(one_subtag.split('=')) > 1: key = one_subtag.split('=')[0] value = one_subtag.split('=')[1] res[key] = value", "one_subtag.split('=')[1] res[key] = value return res class SyntaxVectorizer(ABC): def setup_rules(self): pass def text_structures_initializer(self):", "import load_pickled_file from .settings import PATH_TO_RUS_UDPIPE_MODEL import spacy def processTag(tag_representation): res = {}", "= {} if len(tag_representation.split('|')) > 0: for one_subtag in tag_representation.split('|'): if len(one_subtag.split('=')) >", "def setup_rules(self): pass def text_structures_initializer(self): pass def calculate_morpho_tags(self, current_token): pass def normalize_morpho_tags(self): pass", "Just checking # Please, pay attention, that this class imported t = load_pickled_file(os.path.join('ProcessedData',", "attention, that this class imported t = load_pickled_file(os.path.join('ProcessedData', 'Андреев_Ангелочек.pkl')) print('Pickle loaded') current_nlp_module =", "0: for one_subtag in tag_representation.split('|'): if len(one_subtag.split('=')) > 1: key = one_subtag.split('=')[0] value", "from .settings import PATH_TO_RUS_UDPIPE_MODEL import spacy def processTag(tag_representation): res = {} if len(tag_representation.split('|'))", "if len(tag_representation.split('|')) > 0: for one_subtag in tag_representation.split('|'): if len(one_subtag.split('=')) > 1: key", "if __name__ == \"__main__\": # Just checking # Please, pay attention, that this", "key = one_subtag.split('=')[0] value = one_subtag.split('=')[1] res[key] = value return res class SyntaxVectorizer(ABC):", "import ABC, abstractmethod import os import spacy_udpipe from .utils import load_pickled_file from .settings", "checking # Please, pay attention, that this class imported t = load_pickled_file(os.path.join('ProcessedData', 'Андреев_Ангелочек.pkl'))", "calculate_morpho_tags(self, current_token): pass def normalize_morpho_tags(self): pass if __name__ == \"__main__\": # Just checking", "pass if __name__ == \"__main__\": # Just checking # Please, pay attention, that", "import spacy def processTag(tag_representation): res = {} if len(tag_representation.split('|')) > 0: for one_subtag", "for one_subtag in tag_representation.split('|'): if len(one_subtag.split('=')) > 1: key = one_subtag.split('=')[0] value =", "loaded') current_nlp_module = spacy_udpipe.load_from_path('ru-syntagrus', PATH_TO_RUS_UDPIPE_MODEL) print('Model loaded') hj = SyntaxVectorizerRU(current_nlp_module) hj.convert_to_attributes(t['Trees']) resAttribs =", "class imported t = load_pickled_file(os.path.join('ProcessedData', 'Андреев_Ангелочек.pkl')) print('Pickle loaded') current_nlp_module = spacy_udpipe.load_from_path('ru-syntagrus', PATH_TO_RUS_UDPIPE_MODEL) print('Model", "pass def text_structures_initializer(self): pass def calculate_morpho_tags(self, current_token): pass def normalize_morpho_tags(self): pass if __name__", "import os import spacy_udpipe from .utils import load_pickled_file from .settings import PATH_TO_RUS_UDPIPE_MODEL import", "os import spacy_udpipe from .utils import load_pickled_file from .settings import PATH_TO_RUS_UDPIPE_MODEL import spacy", "def normalize_morpho_tags(self): pass if __name__ == \"__main__\": # Just checking # Please, pay", ".utils import load_pickled_file from .settings import PATH_TO_RUS_UDPIPE_MODEL import spacy def processTag(tag_representation): res =", "pass def calculate_morpho_tags(self, current_token): pass def normalize_morpho_tags(self): pass if __name__ == \"__main__\": #", "one_subtag in tag_representation.split('|'): if len(one_subtag.split('=')) > 1: key = one_subtag.split('=')[0] value = one_subtag.split('=')[1]", "abc import ABC, abstractmethod import os import spacy_udpipe from .utils import load_pickled_file from", "PATH_TO_RUS_UDPIPE_MODEL import spacy def processTag(tag_representation): res = {} if len(tag_representation.split('|')) > 0: for", "= spacy_udpipe.load_from_path('ru-syntagrus', PATH_TO_RUS_UDPIPE_MODEL) print('Model loaded') hj = SyntaxVectorizerRU(current_nlp_module) hj.convert_to_attributes(t['Trees']) resAttribs = hj.get_res_attributes() print('Thats", "spacy_udpipe.load_from_path('ru-syntagrus', PATH_TO_RUS_UDPIPE_MODEL) print('Model loaded') hj = SyntaxVectorizerRU(current_nlp_module) hj.convert_to_attributes(t['Trees']) resAttribs = hj.get_res_attributes() print('Thats all')", "= value return res class SyntaxVectorizer(ABC): def setup_rules(self): pass def text_structures_initializer(self): pass def", "load_pickled_file from .settings import PATH_TO_RUS_UDPIPE_MODEL import spacy def processTag(tag_representation): res = {} if", "pay attention, that this class imported t = load_pickled_file(os.path.join('ProcessedData', 'Андреев_Ангелочек.pkl')) print('Pickle loaded') current_nlp_module", "\"__main__\": # Just checking # Please, pay attention, that this class imported t", "def text_structures_initializer(self): pass def calculate_morpho_tags(self, current_token): pass def normalize_morpho_tags(self): pass if __name__ ==", "== \"__main__\": # Just checking # Please, pay attention, that this class imported", "that this class imported t = load_pickled_file(os.path.join('ProcessedData', 'Андреев_Ангелочек.pkl')) print('Pickle loaded') current_nlp_module = spacy_udpipe.load_from_path('ru-syntagrus',", "res = {} if len(tag_representation.split('|')) > 0: for one_subtag in tag_representation.split('|'): if len(one_subtag.split('='))", "> 0: for one_subtag in tag_representation.split('|'): if len(one_subtag.split('=')) > 1: key = one_subtag.split('=')[0]", "pass def normalize_morpho_tags(self): pass if __name__ == \"__main__\": # Just checking # Please,", "= one_subtag.split('=')[1] res[key] = value return res class SyntaxVectorizer(ABC): def setup_rules(self): pass def", "tag_representation.split('|'): if len(one_subtag.split('=')) > 1: key = one_subtag.split('=')[0] value = one_subtag.split('=')[1] res[key] =", "ABC, abstractmethod import os import spacy_udpipe from .utils import load_pickled_file from .settings import" ]
[ "= regularization def train(self, input, target): layers = self.layers loss = self.loss regularization", "= self.layers for layer in layers: if isinstance(layer, self.diff): layer.mode = \"test\" input", "for _, param in layer.params.items(): l += regularization.forward(param) l += loss.forward(input, target) dout", "in layers: if isinstance(layer, self.diff): layer.mode = \"test\" input = layer.forward(input) return np.argmax(input,", "return np.argmax(input, axis=1), l def eval(self, input): layers = self.layers for layer in", "layer.grads.items(): if regularization is not None: grad += regularization.backward(layer.params[param]) layer.params[param] -= self.lr *", "in layer.params.items(): l += regularization.forward(param) l += loss.forward(input, target) dout = loss.backward() for", "BatchNorm, BatchNorm2d, Dropout class Network(object): def __init__(self): super(Network, self).__init__() self.diff = (BatchNorm, BatchNorm2d,", "self.layers loss = self.loss regularization = self.regularization l = 0 for layer in", "* grad return np.argmax(input, axis=1), l def eval(self, input): layers = self.layers for", "Network(object): def __init__(self): super(Network, self).__init__() self.diff = (BatchNorm, BatchNorm2d, Dropout) def train(self, input,", "class Sequential(Network): def __init__(self, layers, loss, lr, regularization=None): super(Sequential, self).__init__() self.layers = layers", "loss self.lr = lr self.regularization = regularization def train(self, input, target): layers =", "self.layers for layer in layers: if isinstance(layer, self.diff): layer.mode = \"test\" input =", "np.argmax(input, axis=1), l def eval(self, input): layers = self.layers for layer in layers:", "not None: grad += regularization.backward(layer.params[param]) layer.params[param] -= self.lr * grad return np.argmax(input, axis=1),", "Dropout) def train(self, input, target): raise NotImplementedError def eval(self, input): raise NotImplementedError class", "self.regularization = regularization def train(self, input, target): layers = self.layers loss = self.loss", "= self.layers loss = self.loss regularization = self.regularization l = 0 for layer", "= layers self.loss = loss self.lr = lr self.regularization = regularization def train(self,", "loss.backward() for layer in reversed(layers): dout = layer.backward(dout) for param, grad in layer.grads.items():", "Dropout class Network(object): def __init__(self): super(Network, self).__init__() self.diff = (BatchNorm, BatchNorm2d, Dropout) def", "NotImplementedError def eval(self, input): raise NotImplementedError class Sequential(Network): def __init__(self, layers, loss, lr,", "target): raise NotImplementedError def eval(self, input): raise NotImplementedError class Sequential(Network): def __init__(self, layers,", "regularization is not None: for _, param in layer.params.items(): l += regularization.forward(param) l", "+= loss.forward(input, target) dout = loss.backward() for layer in reversed(layers): dout = layer.backward(dout)", "layer.backward(dout) for param, grad in layer.grads.items(): if regularization is not None: grad +=", "regularization=None): super(Sequential, self).__init__() self.layers = layers self.loss = loss self.lr = lr self.regularization", "def train(self, input, target): layers = self.layers loss = self.loss regularization = self.regularization", "grad return np.argmax(input, axis=1), l def eval(self, input): layers = self.layers for layer", "BatchNorm2d, Dropout class Network(object): def __init__(self): super(Network, self).__init__() self.diff = (BatchNorm, BatchNorm2d, Dropout)", "layer.mode = \"train\" input = layer.forward(input) if regularization is not None: for _,", "input): layers = self.layers for layer in layers: if isinstance(layer, self.diff): layer.mode =", "if isinstance(layer, self.diff): layer.mode = \"train\" input = layer.forward(input) if regularization is not", "= loss self.lr = lr self.regularization = regularization def train(self, input, target): layers", "self.lr = lr self.regularization = regularization def train(self, input, target): layers = self.layers", "def __init__(self): super(Network, self).__init__() self.diff = (BatchNorm, BatchNorm2d, Dropout) def train(self, input, target):", "def eval(self, input): raise NotImplementedError class Sequential(Network): def __init__(self, layers, loss, lr, regularization=None):", "is not None: for _, param in layer.params.items(): l += regularization.forward(param) l +=", "in layers: if isinstance(layer, self.diff): layer.mode = \"train\" input = layer.forward(input) if regularization", "self.diff): layer.mode = \"train\" input = layer.forward(input) if regularization is not None: for", "input): raise NotImplementedError class Sequential(Network): def __init__(self, layers, loss, lr, regularization=None): super(Sequential, self).__init__()", "layers: if isinstance(layer, self.diff): layer.mode = \"train\" input = layer.forward(input) if regularization is", "layer in layers: if isinstance(layer, self.diff): layer.mode = \"train\" input = layer.forward(input) if", "self).__init__() self.diff = (BatchNorm, BatchNorm2d, Dropout) def train(self, input, target): raise NotImplementedError def", "= self.regularization l = 0 for layer in layers: if isinstance(layer, self.diff): layer.mode", "param in layer.params.items(): l += regularization.forward(param) l += loss.forward(input, target) dout = loss.backward()", "__init__(self, layers, loss, lr, regularization=None): super(Sequential, self).__init__() self.layers = layers self.loss = loss", "self.layers = layers self.loss = loss self.lr = lr self.regularization = regularization def", "regularization is not None: grad += regularization.backward(layer.params[param]) layer.params[param] -= self.lr * grad return", "= self.loss regularization = self.regularization l = 0 for layer in layers: if", "l def eval(self, input): layers = self.layers for layer in layers: if isinstance(layer,", "axis=1), l def eval(self, input): layers = self.layers for layer in layers: if", "self.regularization l = 0 for layer in layers: if isinstance(layer, self.diff): layer.mode =", "for layer in layers: if isinstance(layer, self.diff): layer.mode = \"test\" input = layer.forward(input)", "__init__(self): super(Network, self).__init__() self.diff = (BatchNorm, BatchNorm2d, Dropout) def train(self, input, target): raise", "loss, lr, regularization=None): super(Sequential, self).__init__() self.layers = layers self.loss = loss self.lr =", "super(Sequential, self).__init__() self.layers = layers self.loss = loss self.lr = lr self.regularization =", "layer in reversed(layers): dout = layer.backward(dout) for param, grad in layer.grads.items(): if regularization", "layers = self.layers loss = self.loss regularization = self.regularization l = 0 for", "not None: for _, param in layer.params.items(): l += regularization.forward(param) l += loss.forward(input,", "np from krikos.nn.layer import BatchNorm, BatchNorm2d, Dropout class Network(object): def __init__(self): super(Network, self).__init__()", "layers = self.layers for layer in layers: if isinstance(layer, self.diff): layer.mode = \"test\"", "eval(self, input): raise NotImplementedError class Sequential(Network): def __init__(self, layers, loss, lr, regularization=None): super(Sequential,", "Sequential(Network): def __init__(self, layers, loss, lr, regularization=None): super(Sequential, self).__init__() self.layers = layers self.loss", "reversed(layers): dout = layer.backward(dout) for param, grad in layer.grads.items(): if regularization is not", "(BatchNorm, BatchNorm2d, Dropout) def train(self, input, target): raise NotImplementedError def eval(self, input): raise", "in reversed(layers): dout = layer.backward(dout) for param, grad in layer.grads.items(): if regularization is", "NotImplementedError class Sequential(Network): def __init__(self, layers, loss, lr, regularization=None): super(Sequential, self).__init__() self.layers =", "in layer.grads.items(): if regularization is not None: grad += regularization.backward(layer.params[param]) layer.params[param] -= self.lr", "krikos.nn.layer import BatchNorm, BatchNorm2d, Dropout class Network(object): def __init__(self): super(Network, self).__init__() self.diff =", "l += regularization.forward(param) l += loss.forward(input, target) dout = loss.backward() for layer in", "target) dout = loss.backward() for layer in reversed(layers): dout = layer.backward(dout) for param,", "lr, regularization=None): super(Sequential, self).__init__() self.layers = layers self.loss = loss self.lr = lr", "= (BatchNorm, BatchNorm2d, Dropout) def train(self, input, target): raise NotImplementedError def eval(self, input):", "param, grad in layer.grads.items(): if regularization is not None: grad += regularization.backward(layer.params[param]) layer.params[param]", "input, target): raise NotImplementedError def eval(self, input): raise NotImplementedError class Sequential(Network): def __init__(self,", "lr self.regularization = regularization def train(self, input, target): layers = self.layers loss =", "train(self, input, target): raise NotImplementedError def eval(self, input): raise NotImplementedError class Sequential(Network): def", "self.lr * grad return np.argmax(input, axis=1), l def eval(self, input): layers = self.layers", "raise NotImplementedError def eval(self, input): raise NotImplementedError class Sequential(Network): def __init__(self, layers, loss,", "if regularization is not None: grad += regularization.backward(layer.params[param]) layer.params[param] -= self.lr * grad", "= lr self.regularization = regularization def train(self, input, target): layers = self.layers loss", "self.loss = loss self.lr = lr self.regularization = regularization def train(self, input, target):", "as np from krikos.nn.layer import BatchNorm, BatchNorm2d, Dropout class Network(object): def __init__(self): super(Network,", "grad in layer.grads.items(): if regularization is not None: grad += regularization.backward(layer.params[param]) layer.params[param] -=", "None: for _, param in layer.params.items(): l += regularization.forward(param) l += loss.forward(input, target)", "class Network(object): def __init__(self): super(Network, self).__init__() self.diff = (BatchNorm, BatchNorm2d, Dropout) def train(self,", "self.diff = (BatchNorm, BatchNorm2d, Dropout) def train(self, input, target): raise NotImplementedError def eval(self,", "+= regularization.backward(layer.params[param]) layer.params[param] -= self.lr * grad return np.argmax(input, axis=1), l def eval(self,", "regularization = self.regularization l = 0 for layer in layers: if isinstance(layer, self.diff):", "l = 0 for layer in layers: if isinstance(layer, self.diff): layer.mode = \"train\"", "layers, loss, lr, regularization=None): super(Sequential, self).__init__() self.layers = layers self.loss = loss self.lr", "layer.params[param] -= self.lr * grad return np.argmax(input, axis=1), l def eval(self, input): layers", "= loss.backward() for layer in reversed(layers): dout = layer.backward(dout) for param, grad in", "None: grad += regularization.backward(layer.params[param]) layer.params[param] -= self.lr * grad return np.argmax(input, axis=1), l", "def train(self, input, target): raise NotImplementedError def eval(self, input): raise NotImplementedError class Sequential(Network):", "import BatchNorm, BatchNorm2d, Dropout class Network(object): def __init__(self): super(Network, self).__init__() self.diff = (BatchNorm,", "_, param in layer.params.items(): l += regularization.forward(param) l += loss.forward(input, target) dout =", "for param, grad in layer.grads.items(): if regularization is not None: grad += regularization.backward(layer.params[param])", "from krikos.nn.layer import BatchNorm, BatchNorm2d, Dropout class Network(object): def __init__(self): super(Network, self).__init__() self.diff", "layers self.loss = loss self.lr = lr self.regularization = regularization def train(self, input,", "target): layers = self.layers loss = self.loss regularization = self.regularization l = 0", "\"train\" input = layer.forward(input) if regularization is not None: for _, param in", "loss.forward(input, target) dout = loss.backward() for layer in reversed(layers): dout = layer.backward(dout) for", "dout = layer.backward(dout) for param, grad in layer.grads.items(): if regularization is not None:", "= layer.forward(input) if regularization is not None: for _, param in layer.params.items(): l", "def __init__(self, layers, loss, lr, regularization=None): super(Sequential, self).__init__() self.layers = layers self.loss =", "layers: if isinstance(layer, self.diff): layer.mode = \"test\" input = layer.forward(input) return np.argmax(input, axis=1)", "0 for layer in layers: if isinstance(layer, self.diff): layer.mode = \"train\" input =", "BatchNorm2d, Dropout) def train(self, input, target): raise NotImplementedError def eval(self, input): raise NotImplementedError", "self).__init__() self.layers = layers self.loss = loss self.lr = lr self.regularization = regularization", "isinstance(layer, self.diff): layer.mode = \"train\" input = layer.forward(input) if regularization is not None:", "if regularization is not None: for _, param in layer.params.items(): l += regularization.forward(param)", "regularization def train(self, input, target): layers = self.layers loss = self.loss regularization =", "regularization.backward(layer.params[param]) layer.params[param] -= self.lr * grad return np.argmax(input, axis=1), l def eval(self, input):", "super(Network, self).__init__() self.diff = (BatchNorm, BatchNorm2d, Dropout) def train(self, input, target): raise NotImplementedError", "layer.forward(input) if regularization is not None: for _, param in layer.params.items(): l +=", "= layer.backward(dout) for param, grad in layer.grads.items(): if regularization is not None: grad", "self.loss regularization = self.regularization l = 0 for layer in layers: if isinstance(layer,", "eval(self, input): layers = self.layers for layer in layers: if isinstance(layer, self.diff): layer.mode", "loss = self.loss regularization = self.regularization l = 0 for layer in layers:", "+= regularization.forward(param) l += loss.forward(input, target) dout = loss.backward() for layer in reversed(layers):", "grad += regularization.backward(layer.params[param]) layer.params[param] -= self.lr * grad return np.argmax(input, axis=1), l def", "for layer in reversed(layers): dout = layer.backward(dout) for param, grad in layer.grads.items(): if", "train(self, input, target): layers = self.layers loss = self.loss regularization = self.regularization l", "raise NotImplementedError class Sequential(Network): def __init__(self, layers, loss, lr, regularization=None): super(Sequential, self).__init__() self.layers", "layer.params.items(): l += regularization.forward(param) l += loss.forward(input, target) dout = loss.backward() for layer", "input, target): layers = self.layers loss = self.loss regularization = self.regularization l =", "l += loss.forward(input, target) dout = loss.backward() for layer in reversed(layers): dout =", "-= self.lr * grad return np.argmax(input, axis=1), l def eval(self, input): layers =", "input = layer.forward(input) if regularization is not None: for _, param in layer.params.items():", "numpy as np from krikos.nn.layer import BatchNorm, BatchNorm2d, Dropout class Network(object): def __init__(self):", "= 0 for layer in layers: if isinstance(layer, self.diff): layer.mode = \"train\" input", "for layer in layers: if isinstance(layer, self.diff): layer.mode = \"train\" input = layer.forward(input)", "def eval(self, input): layers = self.layers for layer in layers: if isinstance(layer, self.diff):", "is not None: grad += regularization.backward(layer.params[param]) layer.params[param] -= self.lr * grad return np.argmax(input,", "layer in layers: if isinstance(layer, self.diff): layer.mode = \"test\" input = layer.forward(input) return", "import numpy as np from krikos.nn.layer import BatchNorm, BatchNorm2d, Dropout class Network(object): def", "= \"train\" input = layer.forward(input) if regularization is not None: for _, param", "regularization.forward(param) l += loss.forward(input, target) dout = loss.backward() for layer in reversed(layers): dout", "dout = loss.backward() for layer in reversed(layers): dout = layer.backward(dout) for param, grad" ]
[ "create_ttl_indexes = input(\"Create TTL Indexes? [y/N] \") if create_ttl_indexes == 'y' or create_ttl_indexes", "MongoClient def main(): hostname = input(\"MongoDB Hostname (Default: localhost): \") if not hostname:", "\") if not hostname: hostname = \"localhost\" port = input(\"MongoDB Port (Default: 27017):", "bookmarks and recommended illusts cache expire in (sec): \")) db['other_cache'].create_index([(\"update_time\", 1)], expireAfterSeconds=other_cache_expires_in) elif", "expireAfterSeconds=illust_ranking_cache_expires_in) search_illust_cache_expires_in = int(input(\"Search illust cache expires in (sec): \")) db['search_illust_cache'].create_index([(\"update_time\", 1)], expireAfterSeconds=search_illust_cache_expires_in)", "search_illust_cache_expires_in = int(input(\"Search illust cache expires in (sec): \")) db['search_illust_cache'].create_index([(\"update_time\", 1)], expireAfterSeconds=search_illust_cache_expires_in) search_user_cache_expires_in", "unique=True) create_ttl_indexes = input(\"Create TTL Indexes? [y/N] \") if create_ttl_indexes == 'y' or", "1)], expireAfterSeconds=download_cache_expires_in) illust_detail_cache_expires_in = int(input(\"Illust detail cache expires in (sec): \")) db['illust_detail_cache'].create_index([(\"update_time\", 1)],", "illusts cache expires in (sec): \")) db['user_illusts_cache'].create_index([(\"update_time\", 1)], expireAfterSeconds=user_illusts_cache_expires_in) other_cache_expires_in = int(input(\"User bookmarks", "\") if create_ttl_indexes == 'y' or create_ttl_indexes == 'Y': download_cache_expires_in = int(input(\"Download cache", "if create_ttl_indexes == 'y' or create_ttl_indexes == 'Y': download_cache_expires_in = int(input(\"Download cache expires", "expireAfterSeconds=user_illusts_cache_expires_in) other_cache_expires_in = int(input(\"User bookmarks and recommended illusts cache expire in (sec): \"))", "db['other_cache'].create_index([(\"update_time\", 1)], expireAfterSeconds=other_cache_expires_in) elif option == \"2\": db['download_cache'].drop_index([(\"update_time\", 1)]) db['illust_detail_cache'].drop_index([(\"update_time\", 1)]) db['illust_ranking_cache'].drop_index([(\"update_time\", 1)])", "expires in (sec): \")) db['illust_ranking_cache'].create_index([(\"update_time\", 1)], expireAfterSeconds=illust_ranking_cache_expires_in) search_illust_cache_expires_in = int(input(\"Search illust cache expires", "db['download_cache'].drop_index([(\"update_time\", 1)]) db['illust_detail_cache'].drop_index([(\"update_time\", 1)]) db['illust_ranking_cache'].drop_index([(\"update_time\", 1)]) db['search_illust_cache'].drop_index([(\"update_time\", 1)]) db['search_user_cache'].drop_index([(\"update_time\", 1)]) db['user_illusts_cache'].drop_index([(\"update_time\", 1)]) db['other_cache'].drop_index([(\"update_time\",", "input(\"MongoDB Port (Default: 27017): \") if not port: port = \"27017\" username =", "Drop TTL Indexes\\n\" \"3: Drop Common Indexes\\n\" \"4: Drop Database\\n\" \"Option: \") if", "\") if option == \"1\": db['download_cache'].create_index([(\"illust_id\", 1)], unique=True) db['illust_detail_cache'].create_index([(\"illust.id\", 1)], unique=True) db['illust_ranking_cache'].create_index([(\"mode\", 1)],", "1)], unique=True) db['search_illust_cache'].create_index([(\"word\", 1)], unique=True) db['search_user_cache'].create_index([(\"word\", 1)], unique=True) db['user_illusts_cache'].create_index([(\"user_id\", 1)], unique=True) db['other_cache'].create_index([(\"type\", 1)],", "search_user_cache_expires_in = int(input(\"Search user cache expires in (sec): \")) db['search_user_cache'].create_index([(\"update_time\", 1)], expireAfterSeconds=search_user_cache_expires_in) user_illusts_cache_expires_in", "download_cache_expires_in = int(input(\"Download cache expires in (sec): \")) db['download_cache'].create_index([(\"update_time\", 1)], expireAfterSeconds=download_cache_expires_in) illust_detail_cache_expires_in =", "client = MongoClient(url) db = client[database_name] option = input(\"1: Create Indexes\\n\" \"2: Drop", "cache expires in (sec): \")) db['user_illusts_cache'].create_index([(\"update_time\", 1)], expireAfterSeconds=user_illusts_cache_expires_in) other_cache_expires_in = int(input(\"User bookmarks and", "if not hostname: hostname = \"localhost\" port = input(\"MongoDB Port (Default: 27017): \")", "create_ttl_indexes == 'Y': download_cache_expires_in = int(input(\"Download cache expires in (sec): \")) db['download_cache'].create_index([(\"update_time\", 1)],", "cache expires in (sec): \")) db['illust_detail_cache'].create_index([(\"update_time\", 1)], expireAfterSeconds=illust_detail_cache_expires_in) illust_ranking_cache_expires_in = int(input(\"Illust ranking cache", "create_ttl_indexes == 'y' or create_ttl_indexes == 'Y': download_cache_expires_in = int(input(\"Download cache expires in", "unique=True) db['search_illust_cache'].create_index([(\"word\", 1)], unique=True) db['search_user_cache'].create_index([(\"word\", 1)], unique=True) db['user_illusts_cache'].create_index([(\"user_id\", 1)], unique=True) db['other_cache'].create_index([(\"type\", 1)], unique=True)", "illusts cache expire in (sec): \")) db['other_cache'].create_index([(\"update_time\", 1)], expireAfterSeconds=other_cache_expires_in) elif option == \"2\":", "db['search_illust_cache'].create_index([(\"update_time\", 1)], expireAfterSeconds=search_illust_cache_expires_in) search_user_cache_expires_in = int(input(\"Search user cache expires in (sec): \")) db['search_user_cache'].create_index([(\"update_time\",", "option == \"3\": db['download_cache'].drop_index([(\"illust_id\", 1)]) db['illust_detail_cache'].drop_index([(\"illust_id\", 1)]) db['illust_ranking_cache'].drop_index([(\"mode\", 1)]) db['search_illust_cache'].drop_index([(\"word\", 1)]) db['search_user_cache'].drop_index([(\"word\", 1)])", "1)]) db['illust_detail_cache'].drop_index([(\"update_time\", 1)]) db['illust_ranking_cache'].drop_index([(\"update_time\", 1)]) db['search_illust_cache'].drop_index([(\"update_time\", 1)]) db['search_user_cache'].drop_index([(\"update_time\", 1)]) db['user_illusts_cache'].drop_index([(\"update_time\", 1)]) db['other_cache'].drop_index([(\"update_time\", 1)])", "1)]) elif option == \"4\": comfirm = input(\"Sure? [y/N]\") if comfirm == 'y'", "= int(input(\"Download cache expires in (sec): \")) db['download_cache'].create_index([(\"update_time\", 1)], expireAfterSeconds=download_cache_expires_in) illust_detail_cache_expires_in = int(input(\"Illust", "db['illust_detail_cache'].create_index([(\"update_time\", 1)], expireAfterSeconds=illust_detail_cache_expires_in) illust_ranking_cache_expires_in = int(input(\"Illust ranking cache expires in (sec): \")) db['illust_ranking_cache'].create_index([(\"update_time\",", "\")) db['other_cache'].create_index([(\"update_time\", 1)], expireAfterSeconds=other_cache_expires_in) elif option == \"2\": db['download_cache'].drop_index([(\"update_time\", 1)]) db['illust_detail_cache'].drop_index([(\"update_time\", 1)]) db['illust_ranking_cache'].drop_index([(\"update_time\",", "in (sec): \")) db['download_cache'].create_index([(\"update_time\", 1)], expireAfterSeconds=download_cache_expires_in) illust_detail_cache_expires_in = int(input(\"Illust detail cache expires in", "detail cache expires in (sec): \")) db['illust_detail_cache'].create_index([(\"update_time\", 1)], expireAfterSeconds=illust_detail_cache_expires_in) illust_ranking_cache_expires_in = int(input(\"Illust ranking", "db['search_illust_cache'].create_index([(\"word\", 1)], unique=True) db['search_user_cache'].create_index([(\"word\", 1)], unique=True) db['user_illusts_cache'].create_index([(\"user_id\", 1)], unique=True) db['other_cache'].create_index([(\"type\", 1)], unique=True) create_ttl_indexes", "illust_ranking_cache_expires_in = int(input(\"Illust ranking cache expires in (sec): \")) db['illust_ranking_cache'].create_index([(\"update_time\", 1)], expireAfterSeconds=illust_ranking_cache_expires_in) search_illust_cache_expires_in", "1)]) elif option == \"3\": db['download_cache'].drop_index([(\"illust_id\", 1)]) db['illust_detail_cache'].drop_index([(\"illust_id\", 1)]) db['illust_ranking_cache'].drop_index([(\"mode\", 1)]) db['search_illust_cache'].drop_index([(\"word\", 1)])", "main(): hostname = input(\"MongoDB Hostname (Default: localhost): \") if not hostname: hostname =", "expireAfterSeconds=other_cache_expires_in) elif option == \"2\": db['download_cache'].drop_index([(\"update_time\", 1)]) db['illust_detail_cache'].drop_index([(\"update_time\", 1)]) db['illust_ranking_cache'].drop_index([(\"update_time\", 1)]) db['search_illust_cache'].drop_index([(\"update_time\", 1)])", "expires in (sec): \")) db['search_illust_cache'].create_index([(\"update_time\", 1)], expireAfterSeconds=search_illust_cache_expires_in) search_user_cache_expires_in = int(input(\"Search user cache expires", "1)], expireAfterSeconds=search_illust_cache_expires_in) search_user_cache_expires_in = int(input(\"Search user cache expires in (sec): \")) db['search_user_cache'].create_index([(\"update_time\", 1)],", "= client[database_name] option = input(\"1: Create Indexes\\n\" \"2: Drop TTL Indexes\\n\" \"3: Drop", "= int(input(\"User illusts cache expires in (sec): \")) db['user_illusts_cache'].create_index([(\"update_time\", 1)], expireAfterSeconds=user_illusts_cache_expires_in) other_cache_expires_in =", "input(\"MongoDB Username: \") password = getpass.getpass(\"MongoDB Password: \") database_name = input(\"MongoDB Database Name:", "Hostname (Default: localhost): \") if not hostname: hostname = \"localhost\" port = input(\"MongoDB", "1)], unique=True) db['illust_detail_cache'].create_index([(\"illust.id\", 1)], unique=True) db['illust_ranking_cache'].create_index([(\"mode\", 1)], unique=True) db['search_illust_cache'].create_index([(\"word\", 1)], unique=True) db['search_user_cache'].create_index([(\"word\", 1)],", "pymongo import MongoClient def main(): hostname = input(\"MongoDB Hostname (Default: localhost): \") if", "password = getpass.getpass(\"MongoDB Password: \") database_name = input(\"MongoDB Database Name: \") url =", "unique=True) db['illust_detail_cache'].create_index([(\"illust.id\", 1)], unique=True) db['illust_ranking_cache'].create_index([(\"mode\", 1)], unique=True) db['search_illust_cache'].create_index([(\"word\", 1)], unique=True) db['search_user_cache'].create_index([(\"word\", 1)], unique=True)", "option == \"2\": db['download_cache'].drop_index([(\"update_time\", 1)]) db['illust_detail_cache'].drop_index([(\"update_time\", 1)]) db['illust_ranking_cache'].drop_index([(\"update_time\", 1)]) db['search_illust_cache'].drop_index([(\"update_time\", 1)]) db['search_user_cache'].drop_index([(\"update_time\", 1)])", "illust_detail_cache_expires_in = int(input(\"Illust detail cache expires in (sec): \")) db['illust_detail_cache'].create_index([(\"update_time\", 1)], expireAfterSeconds=illust_detail_cache_expires_in) illust_ranking_cache_expires_in", "db['user_illusts_cache'].create_index([(\"update_time\", 1)], expireAfterSeconds=user_illusts_cache_expires_in) other_cache_expires_in = int(input(\"User bookmarks and recommended illusts cache expire in", "int(input(\"User illusts cache expires in (sec): \")) db['user_illusts_cache'].create_index([(\"update_time\", 1)], expireAfterSeconds=user_illusts_cache_expires_in) other_cache_expires_in = int(input(\"User", "Indexes? [y/N] \") if create_ttl_indexes == 'y' or create_ttl_indexes == 'Y': download_cache_expires_in =", "== 'Y': download_cache_expires_in = int(input(\"Download cache expires in (sec): \")) db['download_cache'].create_index([(\"update_time\", 1)], expireAfterSeconds=download_cache_expires_in)", "= int(input(\"Search illust cache expires in (sec): \")) db['search_illust_cache'].create_index([(\"update_time\", 1)], expireAfterSeconds=search_illust_cache_expires_in) search_user_cache_expires_in =", "cache expires in (sec): \")) db['search_illust_cache'].create_index([(\"update_time\", 1)], expireAfterSeconds=search_illust_cache_expires_in) search_user_cache_expires_in = int(input(\"Search user cache", "expireAfterSeconds=illust_detail_cache_expires_in) illust_ranking_cache_expires_in = int(input(\"Illust ranking cache expires in (sec): \")) db['illust_ranking_cache'].create_index([(\"update_time\", 1)], expireAfterSeconds=illust_ranking_cache_expires_in)", "= f\"mongodb://{username}:{password}@{hostname}:{port}\" client = MongoClient(url) db = client[database_name] option = input(\"1: Create Indexes\\n\"", "\") if not port: port = \"27017\" username = input(\"MongoDB Username: \") password", "== 'y' or comfirm == 'Y': client.drop_database(database_name) else: print(\"Invalid Option.\") if __name__ ==", "hostname: hostname = \"localhost\" port = input(\"MongoDB Port (Default: 27017): \") if not", "db['user_illusts_cache'].drop_index([(\"update_time\", 1)]) db['other_cache'].drop_index([(\"update_time\", 1)]) elif option == \"3\": db['download_cache'].drop_index([(\"illust_id\", 1)]) db['illust_detail_cache'].drop_index([(\"illust_id\", 1)]) db['illust_ranking_cache'].drop_index([(\"mode\",", "url = f\"mongodb://{username}:{password}@{hostname}:{port}\" client = MongoClient(url) db = client[database_name] option = input(\"1: Create", "'y' or create_ttl_indexes == 'Y': download_cache_expires_in = int(input(\"Download cache expires in (sec): \"))", "\"27017\" username = input(\"MongoDB Username: \") password = getpass.getpass(\"MongoDB Password: \") database_name =", "input(\"MongoDB Database Name: \") url = f\"mongodb://{username}:{password}@{hostname}:{port}\" client = MongoClient(url) db = client[database_name]", "MongoClient(url) db = client[database_name] option = input(\"1: Create Indexes\\n\" \"2: Drop TTL Indexes\\n\"", "int(input(\"Search user cache expires in (sec): \")) db['search_user_cache'].create_index([(\"update_time\", 1)], expireAfterSeconds=search_user_cache_expires_in) user_illusts_cache_expires_in = int(input(\"User", "db['user_illusts_cache'].create_index([(\"user_id\", 1)], unique=True) db['other_cache'].create_index([(\"type\", 1)], unique=True) create_ttl_indexes = input(\"Create TTL Indexes? [y/N] \")", "1)]) db['search_illust_cache'].drop_index([(\"update_time\", 1)]) db['search_user_cache'].drop_index([(\"update_time\", 1)]) db['user_illusts_cache'].drop_index([(\"update_time\", 1)]) db['other_cache'].drop_index([(\"update_time\", 1)]) elif option == \"3\":", "\"2\": db['download_cache'].drop_index([(\"update_time\", 1)]) db['illust_detail_cache'].drop_index([(\"update_time\", 1)]) db['illust_ranking_cache'].drop_index([(\"update_time\", 1)]) db['search_illust_cache'].drop_index([(\"update_time\", 1)]) db['search_user_cache'].drop_index([(\"update_time\", 1)]) db['user_illusts_cache'].drop_index([(\"update_time\", 1)])", "ranking cache expires in (sec): \")) db['illust_ranking_cache'].create_index([(\"update_time\", 1)], expireAfterSeconds=illust_ranking_cache_expires_in) search_illust_cache_expires_in = int(input(\"Search illust", "Drop Common Indexes\\n\" \"4: Drop Database\\n\" \"Option: \") if option == \"1\": db['download_cache'].create_index([(\"illust_id\",", "comfirm == 'y' or comfirm == 'Y': client.drop_database(database_name) else: print(\"Invalid Option.\") if __name__", "db['search_user_cache'].drop_index([(\"update_time\", 1)]) db['user_illusts_cache'].drop_index([(\"update_time\", 1)]) db['other_cache'].drop_index([(\"update_time\", 1)]) elif option == \"3\": db['download_cache'].drop_index([(\"illust_id\", 1)]) db['illust_detail_cache'].drop_index([(\"illust_id\",", "db['search_user_cache'].create_index([(\"update_time\", 1)], expireAfterSeconds=search_user_cache_expires_in) user_illusts_cache_expires_in = int(input(\"User illusts cache expires in (sec): \")) db['user_illusts_cache'].create_index([(\"update_time\",", "\"2: Drop TTL Indexes\\n\" \"3: Drop Common Indexes\\n\" \"4: Drop Database\\n\" \"Option: \")", "1)]) db['user_illusts_cache'].drop_index([(\"user_id\", 1)]) db['other_cache'].drop_index([(\"type\", 1)]) elif option == \"4\": comfirm = input(\"Sure? [y/N]\")", "db['illust_detail_cache'].create_index([(\"illust.id\", 1)], unique=True) db['illust_ranking_cache'].create_index([(\"mode\", 1)], unique=True) db['search_illust_cache'].create_index([(\"word\", 1)], unique=True) db['search_user_cache'].create_index([(\"word\", 1)], unique=True) db['user_illusts_cache'].create_index([(\"user_id\",", "\")) db['search_user_cache'].create_index([(\"update_time\", 1)], expireAfterSeconds=search_user_cache_expires_in) user_illusts_cache_expires_in = int(input(\"User illusts cache expires in (sec): \"))", "= MongoClient(url) db = client[database_name] option = input(\"1: Create Indexes\\n\" \"2: Drop TTL", "(sec): \")) db['illust_ranking_cache'].create_index([(\"update_time\", 1)], expireAfterSeconds=illust_ranking_cache_expires_in) search_illust_cache_expires_in = int(input(\"Search illust cache expires in (sec):", "\"localhost\" port = input(\"MongoDB Port (Default: 27017): \") if not port: port =", "and recommended illusts cache expire in (sec): \")) db['other_cache'].create_index([(\"update_time\", 1)], expireAfterSeconds=other_cache_expires_in) elif option", "1)], expireAfterSeconds=other_cache_expires_in) elif option == \"2\": db['download_cache'].drop_index([(\"update_time\", 1)]) db['illust_detail_cache'].drop_index([(\"update_time\", 1)]) db['illust_ranking_cache'].drop_index([(\"update_time\", 1)]) db['search_illust_cache'].drop_index([(\"update_time\",", "expireAfterSeconds=search_illust_cache_expires_in) search_user_cache_expires_in = int(input(\"Search user cache expires in (sec): \")) db['search_user_cache'].create_index([(\"update_time\", 1)], expireAfterSeconds=search_user_cache_expires_in)", "def main(): hostname = input(\"MongoDB Hostname (Default: localhost): \") if not hostname: hostname", "\") url = f\"mongodb://{username}:{password}@{hostname}:{port}\" client = MongoClient(url) db = client[database_name] option = input(\"1:", "27017): \") if not port: port = \"27017\" username = input(\"MongoDB Username: \")", "== \"2\": db['download_cache'].drop_index([(\"update_time\", 1)]) db['illust_detail_cache'].drop_index([(\"update_time\", 1)]) db['illust_ranking_cache'].drop_index([(\"update_time\", 1)]) db['search_illust_cache'].drop_index([(\"update_time\", 1)]) db['search_user_cache'].drop_index([(\"update_time\", 1)]) db['user_illusts_cache'].drop_index([(\"update_time\",", "option == \"4\": comfirm = input(\"Sure? [y/N]\") if comfirm == 'y' or comfirm", "comfirm = input(\"Sure? [y/N]\") if comfirm == 'y' or comfirm == 'Y': client.drop_database(database_name)", "\")) db['user_illusts_cache'].create_index([(\"update_time\", 1)], expireAfterSeconds=user_illusts_cache_expires_in) other_cache_expires_in = int(input(\"User bookmarks and recommended illusts cache expire", "db['download_cache'].create_index([(\"update_time\", 1)], expireAfterSeconds=download_cache_expires_in) illust_detail_cache_expires_in = int(input(\"Illust detail cache expires in (sec): \")) db['illust_detail_cache'].create_index([(\"update_time\",", "Port (Default: 27017): \") if not port: port = \"27017\" username = input(\"MongoDB", "db['search_user_cache'].create_index([(\"word\", 1)], unique=True) db['user_illusts_cache'].create_index([(\"user_id\", 1)], unique=True) db['other_cache'].create_index([(\"type\", 1)], unique=True) create_ttl_indexes = input(\"Create TTL", "1)], unique=True) create_ttl_indexes = input(\"Create TTL Indexes? [y/N] \") if create_ttl_indexes == 'y'", "1)], unique=True) db['user_illusts_cache'].create_index([(\"user_id\", 1)], unique=True) db['other_cache'].create_index([(\"type\", 1)], unique=True) create_ttl_indexes = input(\"Create TTL Indexes?", "int(input(\"Illust ranking cache expires in (sec): \")) db['illust_ranking_cache'].create_index([(\"update_time\", 1)], expireAfterSeconds=illust_ranking_cache_expires_in) search_illust_cache_expires_in = int(input(\"Search", "int(input(\"User bookmarks and recommended illusts cache expire in (sec): \")) db['other_cache'].create_index([(\"update_time\", 1)], expireAfterSeconds=other_cache_expires_in)", "1)], unique=True) db['other_cache'].create_index([(\"type\", 1)], unique=True) create_ttl_indexes = input(\"Create TTL Indexes? [y/N] \") if", "recommended illusts cache expire in (sec): \")) db['other_cache'].create_index([(\"update_time\", 1)], expireAfterSeconds=other_cache_expires_in) elif option ==", "cache expires in (sec): \")) db['illust_ranking_cache'].create_index([(\"update_time\", 1)], expireAfterSeconds=illust_ranking_cache_expires_in) search_illust_cache_expires_in = int(input(\"Search illust cache", "(sec): \")) db['download_cache'].create_index([(\"update_time\", 1)], expireAfterSeconds=download_cache_expires_in) illust_detail_cache_expires_in = int(input(\"Illust detail cache expires in (sec):", "Create Indexes\\n\" \"2: Drop TTL Indexes\\n\" \"3: Drop Common Indexes\\n\" \"4: Drop Database\\n\"", "1)], expireAfterSeconds=illust_detail_cache_expires_in) illust_ranking_cache_expires_in = int(input(\"Illust ranking cache expires in (sec): \")) db['illust_ranking_cache'].create_index([(\"update_time\", 1)],", "in (sec): \")) db['illust_ranking_cache'].create_index([(\"update_time\", 1)], expireAfterSeconds=illust_ranking_cache_expires_in) search_illust_cache_expires_in = int(input(\"Search illust cache expires in", "cache expire in (sec): \")) db['other_cache'].create_index([(\"update_time\", 1)], expireAfterSeconds=other_cache_expires_in) elif option == \"2\": db['download_cache'].drop_index([(\"update_time\",", "db['search_illust_cache'].drop_index([(\"word\", 1)]) db['search_user_cache'].drop_index([(\"word\", 1)]) db['user_illusts_cache'].drop_index([(\"user_id\", 1)]) db['other_cache'].drop_index([(\"type\", 1)]) elif option == \"4\": comfirm", "db = client[database_name] option = input(\"1: Create Indexes\\n\" \"2: Drop TTL Indexes\\n\" \"3:", "= input(\"Sure? [y/N]\") if comfirm == 'y' or comfirm == 'Y': client.drop_database(database_name) else:", "port = input(\"MongoDB Port (Default: 27017): \") if not port: port = \"27017\"", "option == \"1\": db['download_cache'].create_index([(\"illust_id\", 1)], unique=True) db['illust_detail_cache'].create_index([(\"illust.id\", 1)], unique=True) db['illust_ranking_cache'].create_index([(\"mode\", 1)], unique=True) db['search_illust_cache'].create_index([(\"word\",", "\"1\": db['download_cache'].create_index([(\"illust_id\", 1)], unique=True) db['illust_detail_cache'].create_index([(\"illust.id\", 1)], unique=True) db['illust_ranking_cache'].create_index([(\"mode\", 1)], unique=True) db['search_illust_cache'].create_index([(\"word\", 1)], unique=True)", "input(\"Create TTL Indexes? [y/N] \") if create_ttl_indexes == 'y' or create_ttl_indexes == 'Y':", "= \"27017\" username = input(\"MongoDB Username: \") password = getpass.getpass(\"MongoDB Password: \") database_name", "= int(input(\"Illust ranking cache expires in (sec): \")) db['illust_ranking_cache'].create_index([(\"update_time\", 1)], expireAfterSeconds=illust_ranking_cache_expires_in) search_illust_cache_expires_in =", "in (sec): \")) db['search_illust_cache'].create_index([(\"update_time\", 1)], expireAfterSeconds=search_illust_cache_expires_in) search_user_cache_expires_in = int(input(\"Search user cache expires in", "other_cache_expires_in = int(input(\"User bookmarks and recommended illusts cache expire in (sec): \")) db['other_cache'].create_index([(\"update_time\",", "1)], expireAfterSeconds=illust_ranking_cache_expires_in) search_illust_cache_expires_in = int(input(\"Search illust cache expires in (sec): \")) db['search_illust_cache'].create_index([(\"update_time\", 1)],", "1)]) db['illust_ranking_cache'].drop_index([(\"update_time\", 1)]) db['search_illust_cache'].drop_index([(\"update_time\", 1)]) db['search_user_cache'].drop_index([(\"update_time\", 1)]) db['user_illusts_cache'].drop_index([(\"update_time\", 1)]) db['other_cache'].drop_index([(\"update_time\", 1)]) elif option", "getpass from pymongo import MongoClient def main(): hostname = input(\"MongoDB Hostname (Default: localhost):", "db['search_illust_cache'].drop_index([(\"update_time\", 1)]) db['search_user_cache'].drop_index([(\"update_time\", 1)]) db['user_illusts_cache'].drop_index([(\"update_time\", 1)]) db['other_cache'].drop_index([(\"update_time\", 1)]) elif option == \"3\": db['download_cache'].drop_index([(\"illust_id\",", "f\"mongodb://{username}:{password}@{hostname}:{port}\" client = MongoClient(url) db = client[database_name] option = input(\"1: Create Indexes\\n\" \"2:", "unique=True) db['user_illusts_cache'].create_index([(\"user_id\", 1)], unique=True) db['other_cache'].create_index([(\"type\", 1)], unique=True) create_ttl_indexes = input(\"Create TTL Indexes? [y/N]", "Indexes\\n\" \"4: Drop Database\\n\" \"Option: \") if option == \"1\": db['download_cache'].create_index([(\"illust_id\", 1)], unique=True)", "if not port: port = \"27017\" username = input(\"MongoDB Username: \") password =", "int(input(\"Download cache expires in (sec): \")) db['download_cache'].create_index([(\"update_time\", 1)], expireAfterSeconds=download_cache_expires_in) illust_detail_cache_expires_in = int(input(\"Illust detail", "db['illust_detail_cache'].drop_index([(\"update_time\", 1)]) db['illust_ranking_cache'].drop_index([(\"update_time\", 1)]) db['search_illust_cache'].drop_index([(\"update_time\", 1)]) db['search_user_cache'].drop_index([(\"update_time\", 1)]) db['user_illusts_cache'].drop_index([(\"update_time\", 1)]) db['other_cache'].drop_index([(\"update_time\", 1)]) elif", "input(\"MongoDB Hostname (Default: localhost): \") if not hostname: hostname = \"localhost\" port =", "input(\"Sure? [y/N]\") if comfirm == 'y' or comfirm == 'Y': client.drop_database(database_name) else: print(\"Invalid", "[y/N] \") if create_ttl_indexes == 'y' or create_ttl_indexes == 'Y': download_cache_expires_in = int(input(\"Download", "expire in (sec): \")) db['other_cache'].create_index([(\"update_time\", 1)], expireAfterSeconds=other_cache_expires_in) elif option == \"2\": db['download_cache'].drop_index([(\"update_time\", 1)])", "== \"3\": db['download_cache'].drop_index([(\"illust_id\", 1)]) db['illust_detail_cache'].drop_index([(\"illust_id\", 1)]) db['illust_ranking_cache'].drop_index([(\"mode\", 1)]) db['search_illust_cache'].drop_index([(\"word\", 1)]) db['search_user_cache'].drop_index([(\"word\", 1)]) db['user_illusts_cache'].drop_index([(\"user_id\",", "\"3: Drop Common Indexes\\n\" \"4: Drop Database\\n\" \"Option: \") if option == \"1\":", "Database Name: \") url = f\"mongodb://{username}:{password}@{hostname}:{port}\" client = MongoClient(url) db = client[database_name] option", "in (sec): \")) db['illust_detail_cache'].create_index([(\"update_time\", 1)], expireAfterSeconds=illust_detail_cache_expires_in) illust_ranking_cache_expires_in = int(input(\"Illust ranking cache expires in", "or create_ttl_indexes == 'Y': download_cache_expires_in = int(input(\"Download cache expires in (sec): \")) db['download_cache'].create_index([(\"update_time\",", "= input(\"MongoDB Port (Default: 27017): \") if not port: port = \"27017\" username", "(sec): \")) db['search_illust_cache'].create_index([(\"update_time\", 1)], expireAfterSeconds=search_illust_cache_expires_in) search_user_cache_expires_in = int(input(\"Search user cache expires in (sec):", "1)]) db['search_illust_cache'].drop_index([(\"word\", 1)]) db['search_user_cache'].drop_index([(\"word\", 1)]) db['user_illusts_cache'].drop_index([(\"user_id\", 1)]) db['other_cache'].drop_index([(\"type\", 1)]) elif option == \"4\":", "[y/N]\") if comfirm == 'y' or comfirm == 'Y': client.drop_database(database_name) else: print(\"Invalid Option.\")", "int(input(\"Illust detail cache expires in (sec): \")) db['illust_detail_cache'].create_index([(\"update_time\", 1)], expireAfterSeconds=illust_detail_cache_expires_in) illust_ranking_cache_expires_in = int(input(\"Illust", "db['other_cache'].drop_index([(\"type\", 1)]) elif option == \"4\": comfirm = input(\"Sure? [y/N]\") if comfirm ==", "= input(\"MongoDB Database Name: \") url = f\"mongodb://{username}:{password}@{hostname}:{port}\" client = MongoClient(url) db =", "(sec): \")) db['user_illusts_cache'].create_index([(\"update_time\", 1)], expireAfterSeconds=user_illusts_cache_expires_in) other_cache_expires_in = int(input(\"User bookmarks and recommended illusts cache", "from pymongo import MongoClient def main(): hostname = input(\"MongoDB Hostname (Default: localhost): \")", "\") password = getpass.getpass(\"MongoDB Password: \") database_name = input(\"MongoDB Database Name: \") url", "= input(\"MongoDB Username: \") password = getpass.getpass(\"MongoDB Password: \") database_name = input(\"MongoDB Database", "Indexes\\n\" \"3: Drop Common Indexes\\n\" \"4: Drop Database\\n\" \"Option: \") if option ==", "1)], unique=True) db['illust_ranking_cache'].create_index([(\"mode\", 1)], unique=True) db['search_illust_cache'].create_index([(\"word\", 1)], unique=True) db['search_user_cache'].create_index([(\"word\", 1)], unique=True) db['user_illusts_cache'].create_index([(\"user_id\", 1)],", "'y' or comfirm == 'Y': client.drop_database(database_name) else: print(\"Invalid Option.\") if __name__ == '__main__':", "= getpass.getpass(\"MongoDB Password: \") database_name = input(\"MongoDB Database Name: \") url = f\"mongodb://{username}:{password}@{hostname}:{port}\"", "\"Option: \") if option == \"1\": db['download_cache'].create_index([(\"illust_id\", 1)], unique=True) db['illust_detail_cache'].create_index([(\"illust.id\", 1)], unique=True) db['illust_ranking_cache'].create_index([(\"mode\",", "in (sec): \")) db['search_user_cache'].create_index([(\"update_time\", 1)], expireAfterSeconds=search_user_cache_expires_in) user_illusts_cache_expires_in = int(input(\"User illusts cache expires in", "elif option == \"2\": db['download_cache'].drop_index([(\"update_time\", 1)]) db['illust_detail_cache'].drop_index([(\"update_time\", 1)]) db['illust_ranking_cache'].drop_index([(\"update_time\", 1)]) db['search_illust_cache'].drop_index([(\"update_time\", 1)]) db['search_user_cache'].drop_index([(\"update_time\",", "\"4\": comfirm = input(\"Sure? [y/N]\") if comfirm == 'y' or comfirm == 'Y':", "if comfirm == 'y' or comfirm == 'Y': client.drop_database(database_name) else: print(\"Invalid Option.\") if", "database_name = input(\"MongoDB Database Name: \") url = f\"mongodb://{username}:{password}@{hostname}:{port}\" client = MongoClient(url) db", "= input(\"Create TTL Indexes? [y/N] \") if create_ttl_indexes == 'y' or create_ttl_indexes ==", "db['illust_ranking_cache'].drop_index([(\"mode\", 1)]) db['search_illust_cache'].drop_index([(\"word\", 1)]) db['search_user_cache'].drop_index([(\"word\", 1)]) db['user_illusts_cache'].drop_index([(\"user_id\", 1)]) db['other_cache'].drop_index([(\"type\", 1)]) elif option ==", "'Y': download_cache_expires_in = int(input(\"Download cache expires in (sec): \")) db['download_cache'].create_index([(\"update_time\", 1)], expireAfterSeconds=download_cache_expires_in) illust_detail_cache_expires_in", "option = input(\"1: Create Indexes\\n\" \"2: Drop TTL Indexes\\n\" \"3: Drop Common Indexes\\n\"", "unique=True) db['search_user_cache'].create_index([(\"word\", 1)], unique=True) db['user_illusts_cache'].create_index([(\"user_id\", 1)], unique=True) db['other_cache'].create_index([(\"type\", 1)], unique=True) create_ttl_indexes = input(\"Create", "port = \"27017\" username = input(\"MongoDB Username: \") password = getpass.getpass(\"MongoDB Password: \")", "expireAfterSeconds=download_cache_expires_in) illust_detail_cache_expires_in = int(input(\"Illust detail cache expires in (sec): \")) db['illust_detail_cache'].create_index([(\"update_time\", 1)], expireAfterSeconds=illust_detail_cache_expires_in)", "int(input(\"Search illust cache expires in (sec): \")) db['search_illust_cache'].create_index([(\"update_time\", 1)], expireAfterSeconds=search_illust_cache_expires_in) search_user_cache_expires_in = int(input(\"Search", "= input(\"1: Create Indexes\\n\" \"2: Drop TTL Indexes\\n\" \"3: Drop Common Indexes\\n\" \"4:", "expires in (sec): \")) db['search_user_cache'].create_index([(\"update_time\", 1)], expireAfterSeconds=search_user_cache_expires_in) user_illusts_cache_expires_in = int(input(\"User illusts cache expires", "(sec): \")) db['other_cache'].create_index([(\"update_time\", 1)], expireAfterSeconds=other_cache_expires_in) elif option == \"2\": db['download_cache'].drop_index([(\"update_time\", 1)]) db['illust_detail_cache'].drop_index([(\"update_time\", 1)])", "1)]) db['search_user_cache'].drop_index([(\"update_time\", 1)]) db['user_illusts_cache'].drop_index([(\"update_time\", 1)]) db['other_cache'].drop_index([(\"update_time\", 1)]) elif option == \"3\": db['download_cache'].drop_index([(\"illust_id\", 1)])", "= input(\"MongoDB Hostname (Default: localhost): \") if not hostname: hostname = \"localhost\" port", "db['search_user_cache'].drop_index([(\"word\", 1)]) db['user_illusts_cache'].drop_index([(\"user_id\", 1)]) db['other_cache'].drop_index([(\"type\", 1)]) elif option == \"4\": comfirm = input(\"Sure?", "if option == \"1\": db['download_cache'].create_index([(\"illust_id\", 1)], unique=True) db['illust_detail_cache'].create_index([(\"illust.id\", 1)], unique=True) db['illust_ranking_cache'].create_index([(\"mode\", 1)], unique=True)", "= \"localhost\" port = input(\"MongoDB Port (Default: 27017): \") if not port: port", "expireAfterSeconds=search_user_cache_expires_in) user_illusts_cache_expires_in = int(input(\"User illusts cache expires in (sec): \")) db['user_illusts_cache'].create_index([(\"update_time\", 1)], expireAfterSeconds=user_illusts_cache_expires_in)", "cache expires in (sec): \")) db['download_cache'].create_index([(\"update_time\", 1)], expireAfterSeconds=download_cache_expires_in) illust_detail_cache_expires_in = int(input(\"Illust detail cache", "in (sec): \")) db['other_cache'].create_index([(\"update_time\", 1)], expireAfterSeconds=other_cache_expires_in) elif option == \"2\": db['download_cache'].drop_index([(\"update_time\", 1)]) db['illust_detail_cache'].drop_index([(\"update_time\",", "db['illust_detail_cache'].drop_index([(\"illust_id\", 1)]) db['illust_ranking_cache'].drop_index([(\"mode\", 1)]) db['search_illust_cache'].drop_index([(\"word\", 1)]) db['search_user_cache'].drop_index([(\"word\", 1)]) db['user_illusts_cache'].drop_index([(\"user_id\", 1)]) db['other_cache'].drop_index([(\"type\", 1)]) elif", "\")) db['search_illust_cache'].create_index([(\"update_time\", 1)], expireAfterSeconds=search_illust_cache_expires_in) search_user_cache_expires_in = int(input(\"Search user cache expires in (sec): \"))", "not port: port = \"27017\" username = input(\"MongoDB Username: \") password = getpass.getpass(\"MongoDB", "1)], expireAfterSeconds=user_illusts_cache_expires_in) other_cache_expires_in = int(input(\"User bookmarks and recommended illusts cache expire in (sec):", "1)]) db['search_user_cache'].drop_index([(\"word\", 1)]) db['user_illusts_cache'].drop_index([(\"user_id\", 1)]) db['other_cache'].drop_index([(\"type\", 1)]) elif option == \"4\": comfirm =", "== 'y' or create_ttl_indexes == 'Y': download_cache_expires_in = int(input(\"Download cache expires in (sec):", "= int(input(\"Illust detail cache expires in (sec): \")) db['illust_detail_cache'].create_index([(\"update_time\", 1)], expireAfterSeconds=illust_detail_cache_expires_in) illust_ranking_cache_expires_in =", "hostname = input(\"MongoDB Hostname (Default: localhost): \") if not hostname: hostname = \"localhost\"", "unique=True) db['other_cache'].create_index([(\"type\", 1)], unique=True) create_ttl_indexes = input(\"Create TTL Indexes? [y/N] \") if create_ttl_indexes", "input(\"1: Create Indexes\\n\" \"2: Drop TTL Indexes\\n\" \"3: Drop Common Indexes\\n\" \"4: Drop", "import MongoClient def main(): hostname = input(\"MongoDB Hostname (Default: localhost): \") if not", "Indexes\\n\" \"2: Drop TTL Indexes\\n\" \"3: Drop Common Indexes\\n\" \"4: Drop Database\\n\" \"Option:", "elif option == \"4\": comfirm = input(\"Sure? [y/N]\") if comfirm == 'y' or", "db['download_cache'].drop_index([(\"illust_id\", 1)]) db['illust_detail_cache'].drop_index([(\"illust_id\", 1)]) db['illust_ranking_cache'].drop_index([(\"mode\", 1)]) db['search_illust_cache'].drop_index([(\"word\", 1)]) db['search_user_cache'].drop_index([(\"word\", 1)]) db['user_illusts_cache'].drop_index([(\"user_id\", 1)]) db['other_cache'].drop_index([(\"type\",", "Username: \") password = getpass.getpass(\"MongoDB Password: \") database_name = input(\"MongoDB Database Name: \")", "(sec): \")) db['search_user_cache'].create_index([(\"update_time\", 1)], expireAfterSeconds=search_user_cache_expires_in) user_illusts_cache_expires_in = int(input(\"User illusts cache expires in (sec):", "or comfirm == 'Y': client.drop_database(database_name) else: print(\"Invalid Option.\") if __name__ == '__main__': main()", "db['other_cache'].drop_index([(\"update_time\", 1)]) elif option == \"3\": db['download_cache'].drop_index([(\"illust_id\", 1)]) db['illust_detail_cache'].drop_index([(\"illust_id\", 1)]) db['illust_ranking_cache'].drop_index([(\"mode\", 1)]) db['search_illust_cache'].drop_index([(\"word\",", "\"3\": db['download_cache'].drop_index([(\"illust_id\", 1)]) db['illust_detail_cache'].drop_index([(\"illust_id\", 1)]) db['illust_ranking_cache'].drop_index([(\"mode\", 1)]) db['search_illust_cache'].drop_index([(\"word\", 1)]) db['search_user_cache'].drop_index([(\"word\", 1)]) db['user_illusts_cache'].drop_index([(\"user_id\", 1)])", "Database\\n\" \"Option: \") if option == \"1\": db['download_cache'].create_index([(\"illust_id\", 1)], unique=True) db['illust_detail_cache'].create_index([(\"illust.id\", 1)], unique=True)", "= int(input(\"Search user cache expires in (sec): \")) db['search_user_cache'].create_index([(\"update_time\", 1)], expireAfterSeconds=search_user_cache_expires_in) user_illusts_cache_expires_in =", "= int(input(\"User bookmarks and recommended illusts cache expire in (sec): \")) db['other_cache'].create_index([(\"update_time\", 1)],", "db['other_cache'].create_index([(\"type\", 1)], unique=True) create_ttl_indexes = input(\"Create TTL Indexes? [y/N] \") if create_ttl_indexes ==", "import getpass from pymongo import MongoClient def main(): hostname = input(\"MongoDB Hostname (Default:", "elif option == \"3\": db['download_cache'].drop_index([(\"illust_id\", 1)]) db['illust_detail_cache'].drop_index([(\"illust_id\", 1)]) db['illust_ranking_cache'].drop_index([(\"mode\", 1)]) db['search_illust_cache'].drop_index([(\"word\", 1)]) db['search_user_cache'].drop_index([(\"word\",", "getpass.getpass(\"MongoDB Password: \") database_name = input(\"MongoDB Database Name: \") url = f\"mongodb://{username}:{password}@{hostname}:{port}\" client", "1)]) db['other_cache'].drop_index([(\"update_time\", 1)]) elif option == \"3\": db['download_cache'].drop_index([(\"illust_id\", 1)]) db['illust_detail_cache'].drop_index([(\"illust_id\", 1)]) db['illust_ranking_cache'].drop_index([(\"mode\", 1)])", "TTL Indexes? [y/N] \") if create_ttl_indexes == 'y' or create_ttl_indexes == 'Y': download_cache_expires_in", "TTL Indexes\\n\" \"3: Drop Common Indexes\\n\" \"4: Drop Database\\n\" \"Option: \") if option", "1)]) db['illust_ranking_cache'].drop_index([(\"mode\", 1)]) db['search_illust_cache'].drop_index([(\"word\", 1)]) db['search_user_cache'].drop_index([(\"word\", 1)]) db['user_illusts_cache'].drop_index([(\"user_id\", 1)]) db['other_cache'].drop_index([(\"type\", 1)]) elif option", "\")) db['illust_detail_cache'].create_index([(\"update_time\", 1)], expireAfterSeconds=illust_detail_cache_expires_in) illust_ranking_cache_expires_in = int(input(\"Illust ranking cache expires in (sec): \"))", "not hostname: hostname = \"localhost\" port = input(\"MongoDB Port (Default: 27017): \") if", "username = input(\"MongoDB Username: \") password = getpass.getpass(\"MongoDB Password: \") database_name = input(\"MongoDB", "expires in (sec): \")) db['download_cache'].create_index([(\"update_time\", 1)], expireAfterSeconds=download_cache_expires_in) illust_detail_cache_expires_in = int(input(\"Illust detail cache expires", "illust cache expires in (sec): \")) db['search_illust_cache'].create_index([(\"update_time\", 1)], expireAfterSeconds=search_illust_cache_expires_in) search_user_cache_expires_in = int(input(\"Search user", "user_illusts_cache_expires_in = int(input(\"User illusts cache expires in (sec): \")) db['user_illusts_cache'].create_index([(\"update_time\", 1)], expireAfterSeconds=user_illusts_cache_expires_in) other_cache_expires_in", "\")) db['download_cache'].create_index([(\"update_time\", 1)], expireAfterSeconds=download_cache_expires_in) illust_detail_cache_expires_in = int(input(\"Illust detail cache expires in (sec): \"))", "1)], expireAfterSeconds=search_user_cache_expires_in) user_illusts_cache_expires_in = int(input(\"User illusts cache expires in (sec): \")) db['user_illusts_cache'].create_index([(\"update_time\", 1)],", "user cache expires in (sec): \")) db['search_user_cache'].create_index([(\"update_time\", 1)], expireAfterSeconds=search_user_cache_expires_in) user_illusts_cache_expires_in = int(input(\"User illusts", "cache expires in (sec): \")) db['search_user_cache'].create_index([(\"update_time\", 1)], expireAfterSeconds=search_user_cache_expires_in) user_illusts_cache_expires_in = int(input(\"User illusts cache", "1)]) db['illust_detail_cache'].drop_index([(\"illust_id\", 1)]) db['illust_ranking_cache'].drop_index([(\"mode\", 1)]) db['search_illust_cache'].drop_index([(\"word\", 1)]) db['search_user_cache'].drop_index([(\"word\", 1)]) db['user_illusts_cache'].drop_index([(\"user_id\", 1)]) db['other_cache'].drop_index([(\"type\", 1)])", "db['user_illusts_cache'].drop_index([(\"user_id\", 1)]) db['other_cache'].drop_index([(\"type\", 1)]) elif option == \"4\": comfirm = input(\"Sure? [y/N]\") if", "Drop Database\\n\" \"Option: \") if option == \"1\": db['download_cache'].create_index([(\"illust_id\", 1)], unique=True) db['illust_detail_cache'].create_index([(\"illust.id\", 1)],", "db['illust_ranking_cache'].create_index([(\"mode\", 1)], unique=True) db['search_illust_cache'].create_index([(\"word\", 1)], unique=True) db['search_user_cache'].create_index([(\"word\", 1)], unique=True) db['user_illusts_cache'].create_index([(\"user_id\", 1)], unique=True) db['other_cache'].create_index([(\"type\",", "localhost): \") if not hostname: hostname = \"localhost\" port = input(\"MongoDB Port (Default:", "Password: \") database_name = input(\"MongoDB Database Name: \") url = f\"mongodb://{username}:{password}@{hostname}:{port}\" client =", "\"4: Drop Database\\n\" \"Option: \") if option == \"1\": db['download_cache'].create_index([(\"illust_id\", 1)], unique=True) db['illust_detail_cache'].create_index([(\"illust.id\",", "in (sec): \")) db['user_illusts_cache'].create_index([(\"update_time\", 1)], expireAfterSeconds=user_illusts_cache_expires_in) other_cache_expires_in = int(input(\"User bookmarks and recommended illusts", "Common Indexes\\n\" \"4: Drop Database\\n\" \"Option: \") if option == \"1\": db['download_cache'].create_index([(\"illust_id\", 1)],", "(sec): \")) db['illust_detail_cache'].create_index([(\"update_time\", 1)], expireAfterSeconds=illust_detail_cache_expires_in) illust_ranking_cache_expires_in = int(input(\"Illust ranking cache expires in (sec):", "port: port = \"27017\" username = input(\"MongoDB Username: \") password = getpass.getpass(\"MongoDB Password:", "client[database_name] option = input(\"1: Create Indexes\\n\" \"2: Drop TTL Indexes\\n\" \"3: Drop Common", "db['illust_ranking_cache'].create_index([(\"update_time\", 1)], expireAfterSeconds=illust_ranking_cache_expires_in) search_illust_cache_expires_in = int(input(\"Search illust cache expires in (sec): \")) db['search_illust_cache'].create_index([(\"update_time\",", "Name: \") url = f\"mongodb://{username}:{password}@{hostname}:{port}\" client = MongoClient(url) db = client[database_name] option =", "1)], unique=True) db['search_user_cache'].create_index([(\"word\", 1)], unique=True) db['user_illusts_cache'].create_index([(\"user_id\", 1)], unique=True) db['other_cache'].create_index([(\"type\", 1)], unique=True) create_ttl_indexes =", "1)]) db['other_cache'].drop_index([(\"type\", 1)]) elif option == \"4\": comfirm = input(\"Sure? [y/N]\") if comfirm", "db['illust_ranking_cache'].drop_index([(\"update_time\", 1)]) db['search_illust_cache'].drop_index([(\"update_time\", 1)]) db['search_user_cache'].drop_index([(\"update_time\", 1)]) db['user_illusts_cache'].drop_index([(\"update_time\", 1)]) db['other_cache'].drop_index([(\"update_time\", 1)]) elif option ==", "\") database_name = input(\"MongoDB Database Name: \") url = f\"mongodb://{username}:{password}@{hostname}:{port}\" client = MongoClient(url)", "(Default: 27017): \") if not port: port = \"27017\" username = input(\"MongoDB Username:", "unique=True) db['illust_ranking_cache'].create_index([(\"mode\", 1)], unique=True) db['search_illust_cache'].create_index([(\"word\", 1)], unique=True) db['search_user_cache'].create_index([(\"word\", 1)], unique=True) db['user_illusts_cache'].create_index([(\"user_id\", 1)], unique=True)", "hostname = \"localhost\" port = input(\"MongoDB Port (Default: 27017): \") if not port:", "== \"1\": db['download_cache'].create_index([(\"illust_id\", 1)], unique=True) db['illust_detail_cache'].create_index([(\"illust.id\", 1)], unique=True) db['illust_ranking_cache'].create_index([(\"mode\", 1)], unique=True) db['search_illust_cache'].create_index([(\"word\", 1)],", "expires in (sec): \")) db['user_illusts_cache'].create_index([(\"update_time\", 1)], expireAfterSeconds=user_illusts_cache_expires_in) other_cache_expires_in = int(input(\"User bookmarks and recommended", "(Default: localhost): \") if not hostname: hostname = \"localhost\" port = input(\"MongoDB Port", "db['download_cache'].create_index([(\"illust_id\", 1)], unique=True) db['illust_detail_cache'].create_index([(\"illust.id\", 1)], unique=True) db['illust_ranking_cache'].create_index([(\"mode\", 1)], unique=True) db['search_illust_cache'].create_index([(\"word\", 1)], unique=True) db['search_user_cache'].create_index([(\"word\",", "expires in (sec): \")) db['illust_detail_cache'].create_index([(\"update_time\", 1)], expireAfterSeconds=illust_detail_cache_expires_in) illust_ranking_cache_expires_in = int(input(\"Illust ranking cache expires", "\")) db['illust_ranking_cache'].create_index([(\"update_time\", 1)], expireAfterSeconds=illust_ranking_cache_expires_in) search_illust_cache_expires_in = int(input(\"Search illust cache expires in (sec): \"))", "1)]) db['user_illusts_cache'].drop_index([(\"update_time\", 1)]) db['other_cache'].drop_index([(\"update_time\", 1)]) elif option == \"3\": db['download_cache'].drop_index([(\"illust_id\", 1)]) db['illust_detail_cache'].drop_index([(\"illust_id\", 1)])", "== \"4\": comfirm = input(\"Sure? [y/N]\") if comfirm == 'y' or comfirm ==" ]
[ "import setup setup( name='monta', version='1.2', description='Disk mounting shortcut for use with dmenu.', author='<NAME>',", "from setuptools import setup setup( name='monta', version='1.2', description='Disk mounting shortcut for use with", "mounting shortcut for use with dmenu.', author='<NAME>', url='https://github.com/renatoliveira/monta', include_package_data=True, package_data={ '': [ 'monta',", "shortcut for use with dmenu.', author='<NAME>', url='https://github.com/renatoliveira/monta', include_package_data=True, package_data={ '': [ 'monta', 'license.txt'", "python3 from setuptools import setup setup( name='monta', version='1.2', description='Disk mounting shortcut for use", "name='monta', version='1.2', description='Disk mounting shortcut for use with dmenu.', author='<NAME>', url='https://github.com/renatoliveira/monta', include_package_data=True, package_data={", "description='Disk mounting shortcut for use with dmenu.', author='<NAME>', url='https://github.com/renatoliveira/monta', include_package_data=True, package_data={ '': [", "setup setup( name='monta', version='1.2', description='Disk mounting shortcut for use with dmenu.', author='<NAME>', url='https://github.com/renatoliveira/monta',", "package_data={ '': [ 'monta', 'license.txt' ] }, scripts=[ 'monta/scripts/monta', 'monta/scripts/desmonta', 'monta/montautils' ] )", "<filename>setup.py<gh_stars>0 #!/usr/bin/env python3 from setuptools import setup setup( name='monta', version='1.2', description='Disk mounting shortcut", "#!/usr/bin/env python3 from setuptools import setup setup( name='monta', version='1.2', description='Disk mounting shortcut for", "dmenu.', author='<NAME>', url='https://github.com/renatoliveira/monta', include_package_data=True, package_data={ '': [ 'monta', 'license.txt' ] }, scripts=[ 'monta/scripts/monta',", "url='https://github.com/renatoliveira/monta', include_package_data=True, package_data={ '': [ 'monta', 'license.txt' ] }, scripts=[ 'monta/scripts/monta', 'monta/scripts/desmonta', 'monta/montautils'", "include_package_data=True, package_data={ '': [ 'monta', 'license.txt' ] }, scripts=[ 'monta/scripts/monta', 'monta/scripts/desmonta', 'monta/montautils' ]", "use with dmenu.', author='<NAME>', url='https://github.com/renatoliveira/monta', include_package_data=True, package_data={ '': [ 'monta', 'license.txt' ] },", "for use with dmenu.', author='<NAME>', url='https://github.com/renatoliveira/monta', include_package_data=True, package_data={ '': [ 'monta', 'license.txt' ]", "setuptools import setup setup( name='monta', version='1.2', description='Disk mounting shortcut for use with dmenu.',", "setup( name='monta', version='1.2', description='Disk mounting shortcut for use with dmenu.', author='<NAME>', url='https://github.com/renatoliveira/monta', include_package_data=True,", "author='<NAME>', url='https://github.com/renatoliveira/monta', include_package_data=True, package_data={ '': [ 'monta', 'license.txt' ] }, scripts=[ 'monta/scripts/monta', 'monta/scripts/desmonta',", "with dmenu.', author='<NAME>', url='https://github.com/renatoliveira/monta', include_package_data=True, package_data={ '': [ 'monta', 'license.txt' ] }, scripts=[", "version='1.2', description='Disk mounting shortcut for use with dmenu.', author='<NAME>', url='https://github.com/renatoliveira/monta', include_package_data=True, package_data={ '':" ]
[ "46, 59, 72, 100], dtype=float) for i, c in enumerate(celsius_q): print(\"{} degrees Celsius", "Fahrenheit\".format(c, fahrenheit_a[i])) # create model fc0 = tf.keras.layers.Dense(units=1, input_shape=[1]) model = tf.keras.Sequential([fc0]) #", "59, 72, 100], dtype=float) for i, c in enumerate(celsius_q): print(\"{} degrees Celsius =", "model2.compile(loss=\"mean_squared_error\", optimizer=tf.keras.optimizers.Adam(0.1)) model2.fit(celsius_q, fahrenheit_a, epochs=500, verbose=False) print(\"Finished training the model\") print(model.predict([100.0])) print( \"Model", "layer weights print(fc0.get_weights()) # more layers fc0 = tf.keras.layers.Dense(units=4, input_shape=[1]) fc1 = tf.keras.layers.Dense(units=4)", "fahrenheit_a = np.array([-40, 14, 32, 46, 59, 72, 100], dtype=float) for i, c", "= model.fit(celsius_q, fahrenheit_a, epochs=500, verbose=False) print(\"Finished training model\") # plot stats import matplotlib.pyplot", "= tf.keras.layers.Dense(units=4) fc2 = tf.keras.layers.Dense(units=1) model2 = tf.keras.Sequential([fc0, fc1, fc2]) model2.compile(loss=\"mean_squared_error\", optimizer=tf.keras.optimizers.Adam(0.1)) model2.fit(celsius_q,", "is: {} degrees Fahrenheit\".format( model.predict([100.0]) ) ) print(\"These are the l0 variables: {}\".format(fc0.get_weights()))", "Fahrenheit\".format( model.predict([100.0]) ) ) print(\"These are the l0 variables: {}\".format(fc0.get_weights())) print(\"These are the", "as np import logging logger = tf.get_logger() logger.setLevel(logging.ERROR) # training data celsius_q =", "print(fc0.get_weights()) # more layers fc0 = tf.keras.layers.Dense(units=4, input_shape=[1]) fc1 = tf.keras.layers.Dense(units=4) fc2 =", "weights print(fc0.get_weights()) # more layers fc0 = tf.keras.layers.Dense(units=4, input_shape=[1]) fc1 = tf.keras.layers.Dense(units=4) fc2", "predicts that 100 degrees Celsius is: {} degrees Fahrenheit\".format( model.predict([100.0]) ) ) print(\"These", "fc2]) model2.compile(loss=\"mean_squared_error\", optimizer=tf.keras.optimizers.Adam(0.1)) model2.fit(celsius_q, fahrenheit_a, epochs=500, verbose=False) print(\"Finished training the model\") print(model.predict([100.0])) print(", "fahrenheit_a, epochs=500, verbose=False) print(\"Finished training model\") # plot stats import matplotlib.pyplot as plt", "import numpy as np import logging logger = tf.get_logger() logger.setLevel(logging.ERROR) # training data", "optimizer=tf.keras.optimizers.Adam(0.1)) # train model history = model.fit(celsius_q, fahrenheit_a, epochs=500, verbose=False) print(\"Finished training model\")", "training model\") # plot stats import matplotlib.pyplot as plt plt.xlabel(\"Epoch Number\") plt.ylabel(\"Loss Magnitude\")", "100 degrees Celsius is: {} degrees Fahrenheit\".format( model.predict([100.0]) ) ) print(\"These are the", "fc0 = tf.keras.layers.Dense(units=4, input_shape=[1]) fc1 = tf.keras.layers.Dense(units=4) fc2 = tf.keras.layers.Dense(units=1) model2 = tf.keras.Sequential([fc0,", "model\") # plot stats import matplotlib.pyplot as plt plt.xlabel(\"Epoch Number\") plt.ylabel(\"Loss Magnitude\") plt.plot(history.history[\"loss\"])", "print( \"Model predicts that 100 degrees Celsius is: {} degrees Fahrenheit\".format( model.predict([100.0]) )", "model = tf.keras.Sequential([tf.keras.layers.Dense(units=1, input_shape=[1])]) # compile model model.compile(loss=\"mean_squared_error\", optimizer=tf.keras.optimizers.Adam(0.1)) # train model history", "<filename>udacity/l02c01_celsius_to_fahrenheit.py<gh_stars>1-10 import tensorflow as tf import numpy as np import logging logger =", "tf.keras.layers.Dense(units=1, input_shape=[1]) model = tf.keras.Sequential([fc0]) # model = tf.keras.Sequential([tf.keras.layers.Dense(units=1, input_shape=[1])]) # compile model", "= tf.keras.layers.Dense(units=4, input_shape=[1]) fc1 = tf.keras.layers.Dense(units=4) fc2 = tf.keras.layers.Dense(units=1) model2 = tf.keras.Sequential([fc0, fc1,", "15, 22, 38], dtype=float) fahrenheit_a = np.array([-40, 14, 32, 46, 59, 72, 100],", "fc2 = tf.keras.layers.Dense(units=1) model2 = tf.keras.Sequential([fc0, fc1, fc2]) model2.compile(loss=\"mean_squared_error\", optimizer=tf.keras.optimizers.Adam(0.1)) model2.fit(celsius_q, fahrenheit_a, epochs=500,", "l0 variables: {}\".format(fc0.get_weights())) print(\"These are the l1 variables: {}\".format(fc1.get_weights())) print(\"These are the l2", "model = tf.keras.Sequential([fc0]) # model = tf.keras.Sequential([tf.keras.layers.Dense(units=1, input_shape=[1])]) # compile model model.compile(loss=\"mean_squared_error\", optimizer=tf.keras.optimizers.Adam(0.1))", "# create model fc0 = tf.keras.layers.Dense(units=1, input_shape=[1]) model = tf.keras.Sequential([fc0]) # model =", "fahrenheit_a, epochs=500, verbose=False) print(\"Finished training the model\") print(model.predict([100.0])) print( \"Model predicts that 100", "print(\"These are the l0 variables: {}\".format(fc0.get_weights())) print(\"These are the l1 variables: {}\".format(fc1.get_weights())) print(\"These", "logger.setLevel(logging.ERROR) # training data celsius_q = np.array([-40, -10, 0, 8, 15, 22, 38],", "plt.ylabel(\"Loss Magnitude\") plt.plot(history.history[\"loss\"]) # use model to predict values print(model.predict([100.0])) # layer weights", "# training data celsius_q = np.array([-40, -10, 0, 8, 15, 22, 38], dtype=float)", "dtype=float) for i, c in enumerate(celsius_q): print(\"{} degrees Celsius = {} degrees Fahrenheit\".format(c,", "plt.xlabel(\"Epoch Number\") plt.ylabel(\"Loss Magnitude\") plt.plot(history.history[\"loss\"]) # use model to predict values print(model.predict([100.0])) #", "as tf import numpy as np import logging logger = tf.get_logger() logger.setLevel(logging.ERROR) #", "# compile model model.compile(loss=\"mean_squared_error\", optimizer=tf.keras.optimizers.Adam(0.1)) # train model history = model.fit(celsius_q, fahrenheit_a, epochs=500,", "model.compile(loss=\"mean_squared_error\", optimizer=tf.keras.optimizers.Adam(0.1)) # train model history = model.fit(celsius_q, fahrenheit_a, epochs=500, verbose=False) print(\"Finished training", "input_shape=[1]) fc1 = tf.keras.layers.Dense(units=4) fc2 = tf.keras.layers.Dense(units=1) model2 = tf.keras.Sequential([fc0, fc1, fc2]) model2.compile(loss=\"mean_squared_error\",", "the l0 variables: {}\".format(fc0.get_weights())) print(\"These are the l1 variables: {}\".format(fc1.get_weights())) print(\"These are the", "print(\"Finished training model\") # plot stats import matplotlib.pyplot as plt plt.xlabel(\"Epoch Number\") plt.ylabel(\"Loss", "matplotlib.pyplot as plt plt.xlabel(\"Epoch Number\") plt.ylabel(\"Loss Magnitude\") plt.plot(history.history[\"loss\"]) # use model to predict", "np.array([-40, 14, 32, 46, 59, 72, 100], dtype=float) for i, c in enumerate(celsius_q):", "tf.keras.Sequential([tf.keras.layers.Dense(units=1, input_shape=[1])]) # compile model model.compile(loss=\"mean_squared_error\", optimizer=tf.keras.optimizers.Adam(0.1)) # train model history = model.fit(celsius_q,", "fahrenheit_a[i])) # create model fc0 = tf.keras.layers.Dense(units=1, input_shape=[1]) model = tf.keras.Sequential([fc0]) # model", "degrees Celsius = {} degrees Fahrenheit\".format(c, fahrenheit_a[i])) # create model fc0 = tf.keras.layers.Dense(units=1,", "= tf.keras.Sequential([fc0]) # model = tf.keras.Sequential([tf.keras.layers.Dense(units=1, input_shape=[1])]) # compile model model.compile(loss=\"mean_squared_error\", optimizer=tf.keras.optimizers.Adam(0.1)) #", "Magnitude\") plt.plot(history.history[\"loss\"]) # use model to predict values print(model.predict([100.0])) # layer weights print(fc0.get_weights())", "verbose=False) print(\"Finished training the model\") print(model.predict([100.0])) print( \"Model predicts that 100 degrees Celsius", "input_shape=[1])]) # compile model model.compile(loss=\"mean_squared_error\", optimizer=tf.keras.optimizers.Adam(0.1)) # train model history = model.fit(celsius_q, fahrenheit_a,", "epochs=500, verbose=False) print(\"Finished training model\") # plot stats import matplotlib.pyplot as plt plt.xlabel(\"Epoch", "{} degrees Fahrenheit\".format(c, fahrenheit_a[i])) # create model fc0 = tf.keras.layers.Dense(units=1, input_shape=[1]) model =", "degrees Fahrenheit\".format(c, fahrenheit_a[i])) # create model fc0 = tf.keras.layers.Dense(units=1, input_shape=[1]) model = tf.keras.Sequential([fc0])", "are the l0 variables: {}\".format(fc0.get_weights())) print(\"These are the l1 variables: {}\".format(fc1.get_weights())) print(\"These are", "fc1, fc2]) model2.compile(loss=\"mean_squared_error\", optimizer=tf.keras.optimizers.Adam(0.1)) model2.fit(celsius_q, fahrenheit_a, epochs=500, verbose=False) print(\"Finished training the model\") print(model.predict([100.0]))", "= tf.keras.Sequential([fc0, fc1, fc2]) model2.compile(loss=\"mean_squared_error\", optimizer=tf.keras.optimizers.Adam(0.1)) model2.fit(celsius_q, fahrenheit_a, epochs=500, verbose=False) print(\"Finished training the", "model to predict values print(model.predict([100.0])) # layer weights print(fc0.get_weights()) # more layers fc0", "for i, c in enumerate(celsius_q): print(\"{} degrees Celsius = {} degrees Fahrenheit\".format(c, fahrenheit_a[i]))", "layers fc0 = tf.keras.layers.Dense(units=4, input_shape=[1]) fc1 = tf.keras.layers.Dense(units=4) fc2 = tf.keras.layers.Dense(units=1) model2 =", "verbose=False) print(\"Finished training model\") # plot stats import matplotlib.pyplot as plt plt.xlabel(\"Epoch Number\")", "tf.keras.Sequential([fc0, fc1, fc2]) model2.compile(loss=\"mean_squared_error\", optimizer=tf.keras.optimizers.Adam(0.1)) model2.fit(celsius_q, fahrenheit_a, epochs=500, verbose=False) print(\"Finished training the model\")", "in enumerate(celsius_q): print(\"{} degrees Celsius = {} degrees Fahrenheit\".format(c, fahrenheit_a[i])) # create model", "history = model.fit(celsius_q, fahrenheit_a, epochs=500, verbose=False) print(\"Finished training model\") # plot stats import", "print(model.predict([100.0])) # layer weights print(fc0.get_weights()) # more layers fc0 = tf.keras.layers.Dense(units=4, input_shape=[1]) fc1", "tf.keras.Sequential([fc0]) # model = tf.keras.Sequential([tf.keras.layers.Dense(units=1, input_shape=[1])]) # compile model model.compile(loss=\"mean_squared_error\", optimizer=tf.keras.optimizers.Adam(0.1)) # train", "input_shape=[1]) model = tf.keras.Sequential([fc0]) # model = tf.keras.Sequential([tf.keras.layers.Dense(units=1, input_shape=[1])]) # compile model model.compile(loss=\"mean_squared_error\",", "8, 15, 22, 38], dtype=float) fahrenheit_a = np.array([-40, 14, 32, 46, 59, 72,", "32, 46, 59, 72, 100], dtype=float) for i, c in enumerate(celsius_q): print(\"{} degrees", "= tf.get_logger() logger.setLevel(logging.ERROR) # training data celsius_q = np.array([-40, -10, 0, 8, 15,", "100], dtype=float) for i, c in enumerate(celsius_q): print(\"{} degrees Celsius = {} degrees", "i, c in enumerate(celsius_q): print(\"{} degrees Celsius = {} degrees Fahrenheit\".format(c, fahrenheit_a[i])) #", "np.array([-40, -10, 0, 8, 15, 22, 38], dtype=float) fahrenheit_a = np.array([-40, 14, 32,", "{}\".format(fc0.get_weights())) print(\"These are the l1 variables: {}\".format(fc1.get_weights())) print(\"These are the l2 variables: {}\".format(fc2.get_weights()))", "celsius_q = np.array([-40, -10, 0, 8, 15, 22, 38], dtype=float) fahrenheit_a = np.array([-40,", "train model history = model.fit(celsius_q, fahrenheit_a, epochs=500, verbose=False) print(\"Finished training model\") # plot", "= tf.keras.Sequential([tf.keras.layers.Dense(units=1, input_shape=[1])]) # compile model model.compile(loss=\"mean_squared_error\", optimizer=tf.keras.optimizers.Adam(0.1)) # train model history =", "optimizer=tf.keras.optimizers.Adam(0.1)) model2.fit(celsius_q, fahrenheit_a, epochs=500, verbose=False) print(\"Finished training the model\") print(model.predict([100.0])) print( \"Model predicts", "# plot stats import matplotlib.pyplot as plt plt.xlabel(\"Epoch Number\") plt.ylabel(\"Loss Magnitude\") plt.plot(history.history[\"loss\"]) #", "Celsius = {} degrees Fahrenheit\".format(c, fahrenheit_a[i])) # create model fc0 = tf.keras.layers.Dense(units=1, input_shape=[1])", "epochs=500, verbose=False) print(\"Finished training the model\") print(model.predict([100.0])) print( \"Model predicts that 100 degrees", "dtype=float) fahrenheit_a = np.array([-40, 14, 32, 46, 59, 72, 100], dtype=float) for i,", "tf.keras.layers.Dense(units=1) model2 = tf.keras.Sequential([fc0, fc1, fc2]) model2.compile(loss=\"mean_squared_error\", optimizer=tf.keras.optimizers.Adam(0.1)) model2.fit(celsius_q, fahrenheit_a, epochs=500, verbose=False) print(\"Finished", "logging logger = tf.get_logger() logger.setLevel(logging.ERROR) # training data celsius_q = np.array([-40, -10, 0,", "{} degrees Fahrenheit\".format( model.predict([100.0]) ) ) print(\"These are the l0 variables: {}\".format(fc0.get_weights())) print(\"These", "# train model history = model.fit(celsius_q, fahrenheit_a, epochs=500, verbose=False) print(\"Finished training model\") #", "model.fit(celsius_q, fahrenheit_a, epochs=500, verbose=False) print(\"Finished training model\") # plot stats import matplotlib.pyplot as", "= np.array([-40, 14, 32, 46, 59, 72, 100], dtype=float) for i, c in", "use model to predict values print(model.predict([100.0])) # layer weights print(fc0.get_weights()) # more layers", "14, 32, 46, 59, 72, 100], dtype=float) for i, c in enumerate(celsius_q): print(\"{}", "variables: {}\".format(fc0.get_weights())) print(\"These are the l1 variables: {}\".format(fc1.get_weights())) print(\"These are the l2 variables:", "plt plt.xlabel(\"Epoch Number\") plt.ylabel(\"Loss Magnitude\") plt.plot(history.history[\"loss\"]) # use model to predict values print(model.predict([100.0]))", "tf.keras.layers.Dense(units=4, input_shape=[1]) fc1 = tf.keras.layers.Dense(units=4) fc2 = tf.keras.layers.Dense(units=1) model2 = tf.keras.Sequential([fc0, fc1, fc2])", "print(\"{} degrees Celsius = {} degrees Fahrenheit\".format(c, fahrenheit_a[i])) # create model fc0 =", "fc1 = tf.keras.layers.Dense(units=4) fc2 = tf.keras.layers.Dense(units=1) model2 = tf.keras.Sequential([fc0, fc1, fc2]) model2.compile(loss=\"mean_squared_error\", optimizer=tf.keras.optimizers.Adam(0.1))", "plt.plot(history.history[\"loss\"]) # use model to predict values print(model.predict([100.0])) # layer weights print(fc0.get_weights()) #", "# use model to predict values print(model.predict([100.0])) # layer weights print(fc0.get_weights()) # more", "model fc0 = tf.keras.layers.Dense(units=1, input_shape=[1]) model = tf.keras.Sequential([fc0]) # model = tf.keras.Sequential([tf.keras.layers.Dense(units=1, input_shape=[1])])", "model model.compile(loss=\"mean_squared_error\", optimizer=tf.keras.optimizers.Adam(0.1)) # train model history = model.fit(celsius_q, fahrenheit_a, epochs=500, verbose=False) print(\"Finished", "predict values print(model.predict([100.0])) # layer weights print(fc0.get_weights()) # more layers fc0 = tf.keras.layers.Dense(units=4,", "enumerate(celsius_q): print(\"{} degrees Celsius = {} degrees Fahrenheit\".format(c, fahrenheit_a[i])) # create model fc0", "print(model.predict([100.0])) print( \"Model predicts that 100 degrees Celsius is: {} degrees Fahrenheit\".format( model.predict([100.0])", "plot stats import matplotlib.pyplot as plt plt.xlabel(\"Epoch Number\") plt.ylabel(\"Loss Magnitude\") plt.plot(history.history[\"loss\"]) # use", "Celsius is: {} degrees Fahrenheit\".format( model.predict([100.0]) ) ) print(\"These are the l0 variables:", ") ) print(\"These are the l0 variables: {}\".format(fc0.get_weights())) print(\"These are the l1 variables:", "as plt plt.xlabel(\"Epoch Number\") plt.ylabel(\"Loss Magnitude\") plt.plot(history.history[\"loss\"]) # use model to predict values", "\"Model predicts that 100 degrees Celsius is: {} degrees Fahrenheit\".format( model.predict([100.0]) ) )", "import tensorflow as tf import numpy as np import logging logger = tf.get_logger()", "degrees Celsius is: {} degrees Fahrenheit\".format( model.predict([100.0]) ) ) print(\"These are the l0", "model\") print(model.predict([100.0])) print( \"Model predicts that 100 degrees Celsius is: {} degrees Fahrenheit\".format(", "values print(model.predict([100.0])) # layer weights print(fc0.get_weights()) # more layers fc0 = tf.keras.layers.Dense(units=4, input_shape=[1])", "# model = tf.keras.Sequential([tf.keras.layers.Dense(units=1, input_shape=[1])]) # compile model model.compile(loss=\"mean_squared_error\", optimizer=tf.keras.optimizers.Adam(0.1)) # train model", "# layer weights print(fc0.get_weights()) # more layers fc0 = tf.keras.layers.Dense(units=4, input_shape=[1]) fc1 =", "tf.keras.layers.Dense(units=4) fc2 = tf.keras.layers.Dense(units=1) model2 = tf.keras.Sequential([fc0, fc1, fc2]) model2.compile(loss=\"mean_squared_error\", optimizer=tf.keras.optimizers.Adam(0.1)) model2.fit(celsius_q, fahrenheit_a,", "# more layers fc0 = tf.keras.layers.Dense(units=4, input_shape=[1]) fc1 = tf.keras.layers.Dense(units=4) fc2 = tf.keras.layers.Dense(units=1)", "np import logging logger = tf.get_logger() logger.setLevel(logging.ERROR) # training data celsius_q = np.array([-40,", "numpy as np import logging logger = tf.get_logger() logger.setLevel(logging.ERROR) # training data celsius_q", "fc0 = tf.keras.layers.Dense(units=1, input_shape=[1]) model = tf.keras.Sequential([fc0]) # model = tf.keras.Sequential([tf.keras.layers.Dense(units=1, input_shape=[1])]) #", "Number\") plt.ylabel(\"Loss Magnitude\") plt.plot(history.history[\"loss\"]) # use model to predict values print(model.predict([100.0])) # layer", "more layers fc0 = tf.keras.layers.Dense(units=4, input_shape=[1]) fc1 = tf.keras.layers.Dense(units=4) fc2 = tf.keras.layers.Dense(units=1) model2", "tf import numpy as np import logging logger = tf.get_logger() logger.setLevel(logging.ERROR) # training", "22, 38], dtype=float) fahrenheit_a = np.array([-40, 14, 32, 46, 59, 72, 100], dtype=float)", "logger = tf.get_logger() logger.setLevel(logging.ERROR) # training data celsius_q = np.array([-40, -10, 0, 8,", "model2 = tf.keras.Sequential([fc0, fc1, fc2]) model2.compile(loss=\"mean_squared_error\", optimizer=tf.keras.optimizers.Adam(0.1)) model2.fit(celsius_q, fahrenheit_a, epochs=500, verbose=False) print(\"Finished training", "38], dtype=float) fahrenheit_a = np.array([-40, 14, 32, 46, 59, 72, 100], dtype=float) for", "= tf.keras.layers.Dense(units=1, input_shape=[1]) model = tf.keras.Sequential([fc0]) # model = tf.keras.Sequential([tf.keras.layers.Dense(units=1, input_shape=[1])]) # compile", "create model fc0 = tf.keras.layers.Dense(units=1, input_shape=[1]) model = tf.keras.Sequential([fc0]) # model = tf.keras.Sequential([tf.keras.layers.Dense(units=1,", ") print(\"These are the l0 variables: {}\".format(fc0.get_weights())) print(\"These are the l1 variables: {}\".format(fc1.get_weights()))", "72, 100], dtype=float) for i, c in enumerate(celsius_q): print(\"{} degrees Celsius = {}", "-10, 0, 8, 15, 22, 38], dtype=float) fahrenheit_a = np.array([-40, 14, 32, 46,", "print(\"Finished training the model\") print(model.predict([100.0])) print( \"Model predicts that 100 degrees Celsius is:", "model.predict([100.0]) ) ) print(\"These are the l0 variables: {}\".format(fc0.get_weights())) print(\"These are the l1", "= tf.keras.layers.Dense(units=1) model2 = tf.keras.Sequential([fc0, fc1, fc2]) model2.compile(loss=\"mean_squared_error\", optimizer=tf.keras.optimizers.Adam(0.1)) model2.fit(celsius_q, fahrenheit_a, epochs=500, verbose=False)", "import logging logger = tf.get_logger() logger.setLevel(logging.ERROR) # training data celsius_q = np.array([-40, -10,", "tf.get_logger() logger.setLevel(logging.ERROR) # training data celsius_q = np.array([-40, -10, 0, 8, 15, 22,", "tensorflow as tf import numpy as np import logging logger = tf.get_logger() logger.setLevel(logging.ERROR)", "stats import matplotlib.pyplot as plt plt.xlabel(\"Epoch Number\") plt.ylabel(\"Loss Magnitude\") plt.plot(history.history[\"loss\"]) # use model", "data celsius_q = np.array([-40, -10, 0, 8, 15, 22, 38], dtype=float) fahrenheit_a =", "training data celsius_q = np.array([-40, -10, 0, 8, 15, 22, 38], dtype=float) fahrenheit_a", "model2.fit(celsius_q, fahrenheit_a, epochs=500, verbose=False) print(\"Finished training the model\") print(model.predict([100.0])) print( \"Model predicts that", "compile model model.compile(loss=\"mean_squared_error\", optimizer=tf.keras.optimizers.Adam(0.1)) # train model history = model.fit(celsius_q, fahrenheit_a, epochs=500, verbose=False)", "= np.array([-40, -10, 0, 8, 15, 22, 38], dtype=float) fahrenheit_a = np.array([-40, 14,", "model history = model.fit(celsius_q, fahrenheit_a, epochs=500, verbose=False) print(\"Finished training model\") # plot stats", "degrees Fahrenheit\".format( model.predict([100.0]) ) ) print(\"These are the l0 variables: {}\".format(fc0.get_weights())) print(\"These are", "training the model\") print(model.predict([100.0])) print( \"Model predicts that 100 degrees Celsius is: {}", "the model\") print(model.predict([100.0])) print( \"Model predicts that 100 degrees Celsius is: {} degrees", "0, 8, 15, 22, 38], dtype=float) fahrenheit_a = np.array([-40, 14, 32, 46, 59,", "c in enumerate(celsius_q): print(\"{} degrees Celsius = {} degrees Fahrenheit\".format(c, fahrenheit_a[i])) # create", "import matplotlib.pyplot as plt plt.xlabel(\"Epoch Number\") plt.ylabel(\"Loss Magnitude\") plt.plot(history.history[\"loss\"]) # use model to", "to predict values print(model.predict([100.0])) # layer weights print(fc0.get_weights()) # more layers fc0 =", "that 100 degrees Celsius is: {} degrees Fahrenheit\".format( model.predict([100.0]) ) ) print(\"These are", "= {} degrees Fahrenheit\".format(c, fahrenheit_a[i])) # create model fc0 = tf.keras.layers.Dense(units=1, input_shape=[1]) model" ]
[ ") try: caption = media[\"edge_media_to_caption\"][\"edges\"][0][\"node\"][\"text\"] except IndexError: pass else: if len(caption) > 100:", "else: if await self.download_pic(url, id, format): logger.info(f\"Caption: {caption}\") tags = regex.findall(r\"#([\\p{L}0-9_]+)\", caption) logger.info(f\"Tags:", "x in comments] # Initialize the asynchronous http session headers = { \"DNT\":", "params=params, response_type=\"json\" ) has_next_page = res[\"data\"][\"user\"][\"edge_follow\"][\"page_info\"][\"has_next_page\"] end_cursor = res[\"data\"][\"user\"][\"edge_follow\"][\"page_info\"][\"end_cursor\"] params[\"variables\"] = json.dumps( {\"id\":", "logger.setLevel(logging.DEBUG) ch = logging.StreamHandler() fh = logging.FileHandler(\"./piggy.log\") ch.setLevel(logging.INFO) fh.setLevel(logging.DEBUG) formatter = logging.Formatter(\"%(message)s\") ch.setFormatter(formatter)", "return res elif response_type == \"json\": res = await r.json() logger.debug(res) return res", "await self._like(media[\"id\"]) else: logger.info(\"Not liked!\") async def _like(self, id): headers = { \"DNT\":", "to be printed. Returns: None \"\"\" logger.info(\"#--------\"*3+\"#\") try: mediatype = media[\"__typename\"] except KeyError:", "response_type=\"json\" ) has_next_page = res[\"data\"][\"user\"][\"edge_web_discover_media\"][\"page_info\"][\"has_next_page\"] end_cursor = res[\"data\"][\"user\"][\"edge_web_discover_media\"][\"page_info\"][\"end_cursor\"] params[\"variables\"] = json.dumps( {\"first\": 50,", "the feed elements will be # temporarely stored q = asyncio.Queue() if explore:", "width, url, tags): tags = json.dumps(tags) async with aiosqlite.connect(\"./piggy.db\") as db: await db.execute(", "res[\"data\"][\"location\"][\"edge_location_to_media\"][\"page_info\"][\"end_cursor\"] count += 1 params[\"variables\"] = json.dumps( { \"id\": str(location_id), \"first\": 50, \"after\":", "= media[\"edge_media_to_comment\"][\"count\"] if comments < self.settings[\"comment\"][\"num_of_comments\"][\"min\"] or comments >= self.settings[\"comment\"][\"num_of_comments\"][\"max\"]: return if self.settings[\"comment\"][\"rate\"]", "await db.commit() logger.info(\"Unliked!\") async def comment(self, media): \"\"\" Check if the media satisfy", "user[\"id\"] params = { \"query_hash\": \"a5164aed103f24b03e7b7747a2d94e3c\", \"variables\": json.dumps({\"id\": id, \"first\": 24}) } has_next_page", "else: mediatype = \"GraphImage\" pass likes = media[\"edge_liked_by\"][\"count\"] comments = media[\"edge_media_to_comment\"][\"count\"] shortcode =", "\"\"\" CREATE TABLE IF NOT EXISTS pics ( id INT, height INT, width", "rows) elif self.settings[\"backup\"][\"format\"] == \"json\": await utils.to_json(table_name, header, rows) else: logger.warning( f\"\"\"Unsupported file", "INTEGER ) \"\"\" ) logger.debug(\"Checking table: comments\") await db.execute( \"\"\" CREATE TABLE IF", "settings...\") # Load settings with open(settings_path) as f: self.settings = json.loads( regex.sub(r\"#.+$\", \"\",", "utils.translate_custom_media_type_to_ig(self.settings[\"comment\"][\"media_type\"]): return likes = media[\"edge_liked_by\"][\"count\"] if likes < self.settings[\"comment\"][\"num_of_likes\"][\"min\"] or likes >= self.settings[\"comment\"][\"num_of_likes\"][\"max\"]:", "await self.http_request( \"GET\", f\"https://www.instagram.com/p/{shortcode}/\", params=\"__a=1\", response_type=\"json\" ) username = res[\"graphql\"][\"shortcode_media\"][\"owner\"][\"username\"] logger.info( f\"{utils.translate_ig_media_type_to_custom(mediatype).capitalize()} by", "= json.dumps( {\"tag_name\": hashtag, \"first\": count, \"after\": end_cursor} ) for media in res[\"data\"][\"hashtag\"][\"edge_hashtag_to_media\"][\"edges\"]:", "mediatype == \"GraphImage\" or mediatype == \"GraphSidecar\": comment = self.pic_comments_list[ randint(0, len(self.pic_comments_list)-1) ]", "} ) for media in res[\"data\"][\"location\"][\"edge_location_to_media\"][\"edges\"]: await q.put(media[\"node\"]) async def print(self, media): \"\"\"", "shortcode = media[\"shortcode\"] res = await self.http_request( \"GET\", f\"https://www.instagram.com/p/{shortcode}/\", params=\"__a=1\", response_type=\"json\" ) username", "self.http_request( \"GET\", f\"https://www.instagram.com/p/{shortcode}/\", params=\"__a=1\", response_type=\"json\" ) username = res[\"graphql\"][\"shortcode_media\"][\"owner\"][\"username\"] logger.info( f\"{utils.translate_ig_media_type_to_custom(mediatype).capitalize()} by {username}\\n❤️", "int(time.time()), comment) ) await db.commit() logger.info(\"Comment posted!\") async def follow(self, media): \"\"\" Check", "aiosqlite.connect(\"./piggy.db\") as db: row = await db.execute( \"SELECT * FROM likes WHERE id=?\",", "f\"https://www.instagram.com/{username}/\", params=\"__a:1\" ) return json.loads( regex.findall( r\"<script[^>]*>window._sharedData = (.*?)</script>\", regex.findall( r\"<body[^>]*>(.*)</body>\", res, flags=regex.DOTALL", "\"query_hash\": \"ecd67af449fb6edab7c69a205413bfa7\", \"variables\": json.dumps({\"first\": 24}) } has_next_page = True while has_next_page: res =", "hashtags: [List of hastags] Media with those hashtags will be added to the", "/ 100 <= random(): if mediatype == \"GraphImage\" or mediatype == \"GraphSidecar\": comment", "file format: {self.settings['backup']['format']}.\"\"\" ) await asyncio.sleep( utils.interval_in_seconds(self.settings[\"backup\"][\"every\"]) ) async def close(self): logger.info(\"\\nClosing session...\")", "in hashtags: asyncio.ensure_future(self._hashtag_feed(q, hashtag)) if len(locations): # Add all the media from the", "rows = await db.execute( f\"SELECT * FROM '{table_name}'\" ) header = [i[0] for", "the explore page will be added to to the feed. users: [List of", "= json.dumps( {\"id\": str(id), \"first\": 50, \"after\": end_cursor} ) for user in res[\"data\"][\"user\"][\"edge_followed_by\"][\"edges\"]:", "time self.settings['connection'][\"wait_time\"] += 1 logger.warning( f\"\"\"Too many requests! Retrying in {self.settings['connection']['wait_time']} seconds.\"\"\" )", "id = user[\"id\"] params = { \"query_hash\": \"a5164aed103f24b03e7b7747a2d94e3c\", \"variables\": json.dumps({\"id\": id, \"first\": 24})", "FROM '{table_name}'\" ) header = [i[0] for i in rows.description] rows = await", "request: increase retry time self.settings['connection'][\"wait_time\"] += 1 logger.warning( f\"\"\"Too many requests! Retrying in", "or likes >= self.settings[\"comment\"][\"num_of_likes\"][\"max\"]: return comments = media[\"edge_media_to_comment\"][\"count\"] if comments < self.settings[\"comment\"][\"num_of_comments\"][\"min\"] or", "False async def pic_already_saved(self, id): logger.debug(\"Checking database.\") async with aiosqlite.connect(\"./piggy.db\") as db: row", "import aiohttp import aiosqlite import aiofiles import regex from aiohttp.client_exceptions import ClientConnectorError from", "def _comment(self, id, comment, reply_to_id=None): headers = { \"DNT\": \"1\", \"Host\": \"www.instagram.com\", \"User-Agent\":", "type: {response_type}\") elif r.status == 429: # Unsuccessfull request: increase retry time self.settings['connection'][\"wait_time\"]", ") for user in res[\"data\"][\"user\"][\"edge_followed_by\"][\"edges\"]: followers.append(user[\"node\"][\"username\"]) return followers async def following(self, username=None): following", "= [] pass else: if await self.download_pic(url, id, format): logger.info(f\"Caption: {caption}\") tags =", "media[\"__typename\"] except KeyError: is_video = media[\"is_video\"] if is_video: mediatype = \"GraphVideo\" else: mediatype", "> random(): await self._like(media[\"id\"]) else: logger.info(\"Not liked!\") async def _like(self, id): headers =", ") if await row.fetchone() is None: return False else: return True async def", "media from the given users to the queue for user in users: asyncio.ensure_future(self._user_feed(q,", ") has_next_page = res[\"data\"][\"user\"][\"edge_follow\"][\"page_info\"][\"has_next_page\"] end_cursor = res[\"data\"][\"user\"][\"edge_follow\"][\"page_info\"][\"end_cursor\"] params[\"variables\"] = json.dumps( {\"id\": str(id), \"first\":", "async with aiosqlite.connect(\"./piggy.db\") as db: logger.debug(\"Checking table: pics\") await db.execute( \"\"\" CREATE TABLE", "res[\"data\"][\"hashtag\"][\"edge_hashtag_to_media\"][\"edges\"]: await q.put(media[\"node\"]) async def _location_feed(self, q, location_id): count = 0 params =", "the media satisfy the prerequisites and eventually send a follow request. Args: media:", "if comments < self.settings[\"like\"][\"num_of_comments\"][\"min\"] or comments >= self.settings[\"like\"][\"num_of_comments\"][\"max\"]: logger.info(\"Too many or too few", "False except TimeoutError: return False async def pic_already_saved(self, id): logger.debug(\"Checking database.\") async with", "http session headers = { \"DNT\": \"1\", \"Host\": \"www.instagram.com\", \"Upgrade-Insecure-Requests\": \"1\", \"User-Agent\": self.settings[\"connection\"][\"user_agent\"]", "the prerequisites and eventually send a follow request. Args: media: The media of", "= json.dumps(tags) async with aiosqlite.connect(\"./piggy.db\") as db: await db.execute( \"INSERT INTO pics VALUES(?,?,?,?,?)\",", "async def backup(self): while 1: logger.info(\"Backing up database...\") for table_name in [\"users\", \"likes\",", "\"GET\", \"https://www.instagram.com/graphql/query/\", params=params, response_type=\"json\" ) has_next_page = res[\"data\"][\"user\"][\"edge_followed_by\"][\"page_info\"][\"has_next_page\"] end_cursor = res[\"data\"][\"user\"][\"edge_followed_by\"][\"page_info\"][\"end_cursor\"] params[\"variables\"] =", "liked!\") return if self.settings[\"like\"][\"rate\"] / 100 > random(): await self._like(media[\"id\"]) else: logger.info(\"Not liked!\")", "format: {self.settings['backup']['format']}.\"\"\" ) await asyncio.sleep( utils.interval_in_seconds(self.settings[\"backup\"][\"every\"]) ) async def close(self): logger.info(\"\\nClosing session...\") #", "response type: {response_type}\") elif r.status == 429: # Unsuccessfull request: increase retry time", "media satisfy the prerequisites and eventually send a follow request. Args: media: The", "def login(self): payload = { \"username\": self.settings[\"user\"][\"username\"], \"password\": self.settings[\"user\"][\"password\"] } headers = {", "c = await db.execute(\"SELECT * FROM users WHERE id=?\", (id,)) if c.rowcount: await", "r.read()) await f.close() return True else: return False except TimeoutError: return False async", "ch.setLevel(logging.INFO) fh.setLevel(logging.DEBUG) formatter = logging.Formatter(\"%(message)s\") ch.setFormatter(formatter) formatter = logging.Formatter( \"[%(asctime)s] %(levelname)s %(funcName)s: %(message)s\"", "comments ( id INTEGER, ts INTEGER, comment TEXT ) \"\"\" ) logger.info(\"Updating followers", "None \"\"\" if media[\"comments_disabled\"]: logger.info(\"Comments disabled.\") return if self.settings[\"comment\"][\"only_once\"]: async with aiosqlite.connect(\"./piggy.db\") as", "with aiosqlite.connect(\"./piggy.db\") as db: await db.execute( \"INSERT INTO likes VALUES(?,?)\", (id, int(time.time())) )", "from random import random, randint import asyncio import aiohttp import aiosqlite import aiofiles", "res[\"data\"][\"user\"][\"edge_follow\"][\"edges\"]: following.append(user[\"node\"][\"username\"]) return following async def feed(self, explore=True, users=[], hashtags=[], locations=[]): \"\"\" Generates", "headers=headers, data=payload, response_type=\"json\" ) if res[\"authenticated\"]: logger.info(\"Logged in!\") self.id = res[\"userId\"] elif res[\"message\"]", "= await self.http_request( \"GET\", \"https://www.instagram.com/graphql/query/\", params=params, response_type=\"json\" ) has_next_page = res[\"data\"][\"user\"][\"edge_follow\"][\"page_info\"][\"has_next_page\"] end_cursor =", "is None: return False else: return True async def save_to_database(self, id, type, height,", "async with aiosqlite.connect(\"./piggy.db\") as db: rows = await db.execute( f\"SELECT * FROM '{table_name}'\"", "few likes. Not liked!\") return comments = media[\"edge_media_to_comment\"][\"count\"] if comments < self.settings[\"like\"][\"num_of_comments\"][\"min\"] or", "pass else: if len(caption) > 100: logger.info(f\"{caption:.100}...\") else: logger.info(f\"{caption}\") async def like(self, media):", "required.\") res = await self.http_request( \"POST\", f\"https://www.instagram.com{res['checkpoint_url']}\", headers=headers, data=payload ) logger.error(res) else: logger.error(\"Couldn't", "def comment(self, media): \"\"\" Check if the media satisfy the prerequisites and eventually", "send a follow request. Args: media: The media of the user to be", "header, rows) else: logger.warning( f\"\"\"Unsupported file format: {self.settings['backup']['format']}.\"\"\" ) await asyncio.sleep( utils.interval_in_seconds(self.settings[\"backup\"][\"every\"]) )", "username = res[\"graphql\"][\"shortcode_media\"][\"owner\"][\"username\"] logger.info( f\"{utils.translate_ig_media_type_to_custom(mediatype).capitalize()} by {username}\\n❤️ {likes}, 💬 {comments}\" ) try: caption", "\"POST\", f\"https://www.instagram.com{res['checkpoint_url']}\", headers=headers, data=payload ) logger.error(res) else: logger.error(\"Couldn't log in.\") cookies = utils.cookies_dict(self.session.cookie_jar)", "return following async def feed(self, explore=True, users=[], hashtags=[], locations=[]): \"\"\" Generates a feed", "parameters. Multiple parameters can be passed at the same time. Args: explore: [Bool]", "media[\"edge_media_to_comment\"][\"count\"] if comments < self.settings[\"comment\"][\"num_of_comments\"][\"min\"] or comments >= self.settings[\"comment\"][\"num_of_comments\"][\"max\"]: return if self.settings[\"comment\"][\"rate\"] /", "logger.info(f\"Downloading {id}\") async with aiohttp.ClientSession() as session: try: async with session.get(url) as r:", "up database...\") for table_name in [\"users\", \"likes\", \"comments\"]: if self.settings[\"backup\"][table_name]: async with aiosqlite.connect(\"./piggy.db\")", "res[\"data\"][\"user\"][\"edge_followed_by\"][\"page_info\"][\"has_next_page\"] end_cursor = res[\"data\"][\"user\"][\"edge_followed_by\"][\"page_info\"][\"end_cursor\"] params[\"variables\"] = json.dumps( {\"id\": str(id), \"first\": 50, \"after\": end_cursor}", "f.readlines() self.video_comments_list = [x.strip() for x in comments] # Initialize the asynchronous http", "\"DNT\": \"1\", \"Host\": \"www.instagram.com\", \"Upgrade-Insecure-Requests\": \"1\", \"User-Agent\": self.settings[\"connection\"][\"user_agent\"] } timeout = aiohttp.ClientTimeout( total=self.settings[\"connection\"][\"timeout\"]", "the csrf token. It is needed to log in self.csrf_token = await self._getCsrfTokenFromForm()", "EXISTS users ( id TEXT, username TEXT, ts_follower INTEGER, ts_following INTEGER, follower BOOL,", "comment. Args: media: The media to comment. Retruns: None \"\"\" if media[\"comments_disabled\"]: logger.info(\"Comments", "will send a like. Args: media: The media to like. Retruns: None \"\"\"", "= aiohttp.ClientTimeout( total=self.settings[\"connection\"][\"timeout\"] ) self.session = aiohttp.ClientSession(headers=headers, timeout=timeout) logger.info(\"Session initialized.\") # Get the", "self.download_pic(url, id, format): logger.info(f\"Caption: {caption}\") tags = regex.findall(r\"#([\\p{L}0-9_]+)\", caption) logger.info(f\"Tags: {tags}\") else: return", "\"GET\", \"https://www.instagram.com/graphql/query/\", params=params, response_type=\"json\" ) has_next_page = res[\"data\"][\"user\"][\"edge_owner_to_timeline_media\"][\"page_info\"][\"has_next_page\"] end_cursor = res[\"data\"][\"user\"][\"edge_owner_to_timeline_media\"][\"page_info\"][\"end_cursor\"] params[\"variables\"] =", "count}) } has_next_page = True while has_next_page: res = await self.http_request( \"GET\", \"https://www.instagram.com/graphql/query/\",", "_hashtag_feed(self, q, hashtag): count = 0 params = { \"query_hash\": \"1780c1b186e2c37de9f7da95ce41bb67\", \"variables\": json.dumps({\"tag_name\":", "\"variables\": json.dumps({\"id\": id, \"first\": 24}) } has_next_page = True while has_next_page: res =", "await r.json() logger.debug(res) return res else: raise ValueError(f\"Invalid response type: {response_type}\") elif r.status", "where the feed elements will be # temporarely stored q = asyncio.Queue() if", "users VALUES(?,?,?,?,?)\", (id, None, int(time.time()), False, True) ) await db.commit() logger.info(\"Follow request sent!\")", "= await self.http_request( \"GET\", \"https://www.instagram.com/graphql/query/\", params=params, response_type=\"json\" ) has_next_page = res[\"data\"][\"location\"][\"edge_location_to_media\"][\"page_info\"][\"has_next_page\"] end_cursor =", "else: mediatype = \"GraphImage\" pass else: if not mediatype in utils.translate_custom_media_type_to_ig(self.settings[\"comment\"][\"media_type\"]): return likes", "res[\"data\"][\"user\"][\"edge_followed_by\"][\"edges\"]: followers.append(user[\"node\"][\"username\"]) return followers async def following(self, username=None): following = [] if username", "open(settings_path) as f: self.settings = json.loads( regex.sub(r\"#.+$\", \"\", f.read(), flags=regex.MULTILINE) ) # Load", "to log in self.csrf_token = await self._getCsrfTokenFromForm() async def _getCsrfTokenFromForm(self): # Get login", "loop): self.loop = loop async def http_request( self, method, url, headers=None, params=None, data=None,", "db.execute( \"\"\" UPDATE users SET ts_following=?, following=? WHERE id=? \"\"\", (int(time.time()), True, id)", "users=[], hashtags=[], locations=[]): \"\"\" Generates a feed based on the passed parameters. Multiple", "{username}\\n❤️ {likes}, 💬 {comments}\" ) try: caption = media[\"edge_media_to_caption\"][\"edges\"][0][\"node\"][\"text\"] except IndexError: pass else:", "user in users: asyncio.ensure_future(self._user_feed(q, user)) if len(hashtags): # Add all the media from", "regex.findall( r\"\\\"csrf_token\\\":\\\"(.*?)\\\"\", res, flags=regex.MULTILINE )[0] async def login(self): payload = { \"username\": self.settings[\"user\"][\"username\"],", "as db: row = await db.execute( \"SELECT * FROM comments WHERE id=?\", (media[\"id\"],)", "\"58712303d941c6855d4e888c5f0cd22f\", \"variables\": json.dumps({\"id\": str(id), \"first\": 50}) } has_next_page = True while has_next_page: res", "\"Host\": \"www.instagram.com\", \"User-Agent\": self.settings[\"connection\"][\"user_agent\"], \"X-CSRFToken\": self.csrf_token } await self.http_request( \"POST\", f\"https://www.instagram.com/web/friendships/{id}/unfollow/\", headers=headers )", ") return regex.findall( r\"\\\"csrf_token\\\":\\\"(.*?)\\\"\", res, flags=regex.MULTILINE )[0] async def login(self): payload = {", "WHERE id=?\", (id,) ) if await row.fetchone() is None: return False else: return", "self.settings[\"like\"][\"num_of_comments\"][\"max\"]: logger.info(\"Too many or too few comments. Not liked!\") return if self.settings[\"like\"][\"rate\"] /", "\"\"\" CREATE TABLE IF NOT EXISTS users ( id TEXT, username TEXT, ts_follower", "* FROM comments WHERE id=?\", (media[\"id\"],) ) if await row.fetchone() is None: logger.info(\"Already", "following=1 WHERE username=?\", (username,) ) await db.commit() async def followers(self, username=None): followers =", "db.execute( \"UPDATE users SET following=false WHERE id=?\", (id,) ) await db.commit() async def", "id, type, height, width, url, tags): tags = json.dumps(tags) async with aiosqlite.connect(\"./piggy.db\") as", "if r.status == 200: # Successfull request: decrease retry time if self.settings['connection'][\"wait_time\"] >", "_explore_feed(self, q): params = { \"query_hash\": \"ecd67af449fb6edab7c69a205413bfa7\", \"variables\": json.dumps({\"first\": 24}) } has_next_page =", "q.put(media[\"node\"]) async def print(self, media): \"\"\" Gives a visual representation of a media.", "satisfy the prerequisites and eventually it will send a comment. Args: media: The", "of usernames] Their media will be pulled and added to the feed. hashtags:", "\"GraphImage\" or mediatype == \"GraphSidecar\": comment = self.pic_comments_list[ randint(0, len(self.pic_comments_list)-1) ] else: comment", "= logging.getLogger(__name__) logger.setLevel(logging.DEBUG) ch = logging.StreamHandler() fh = logging.FileHandler(\"./piggy.log\") ch.setLevel(logging.INFO) fh.setLevel(logging.DEBUG) formatter =", "follower BOOL, following BOOL ) \"\"\" ) logger.debug(\"Checking table: likes\") await db.execute( \"\"\"", "many or too few likes. Not liked!\") return comments = media[\"edge_media_to_comment\"][\"count\"] if comments", ")[0][:-1])[\"entry_data\"][\"ProfilePage\"][0][\"graphql\"][\"user\"] # ----------------------------------------------------------------------------- async def download(self, media): id = media[\"id\"] url = media[\"display_url\"]", "(id, int(time.time()), comment) ) await db.commit() logger.info(\"Comment posted!\") async def follow(self, media): \"\"\"", "media[\"dimensions\"][\"width\"] try: caption = media[\"edge_media_to_caption\"][\"edges\"][0][\"node\"][\"text\"] except IndexError: tags = [] pass else: if", "= media[\"edge_media_to_comment\"][\"count\"] if comments < self.settings[\"like\"][\"num_of_comments\"][\"min\"] or comments >= self.settings[\"like\"][\"num_of_comments\"][\"max\"]: logger.info(\"Too many or", "comment = self.pic_comments_list[ randint(0, len(self.pic_comments_list)-1) ] else: comment = self.video_comments_list[ randint(0, len(self.video_comments_list)-1) ]", "return json.loads( regex.findall( r\"<script[^>]*>window._sharedData = (.*?)</script>\", regex.findall( r\"<body[^>]*>(.*)</body>\", res, flags=regex.DOTALL )[0], flags=regex.DOTALL )[0][:-1])[\"entry_data\"][\"ProfilePage\"][0][\"graphql\"][\"user\"]", "self.settings[\"connection\"][\"user_agent\"], \"X-CSRFToken\": self.csrf_token } await self.http_request( \"POST\", f\"https://www.instagram.com/web/likes/{id}/unlike/\", headers=headers ) async with aiosqlite.connect(\"./piggy.db\")", "await db.commit() async def backup(self): while 1: logger.info(\"Backing up database...\") for table_name in", "of a media. Args: media: The media to be printed. Returns: None \"\"\"", "the same time. Args: explore: [Bool] If True the explore page will be", "self.http_request( \"GET\", \"https://www.instagram.com/graphql/query/\", params=params, response_type=\"json\" ) has_next_page = res[\"data\"][\"user\"][\"edge_web_discover_media\"][\"page_info\"][\"has_next_page\"] end_cursor = res[\"data\"][\"user\"][\"edge_web_discover_media\"][\"page_info\"][\"end_cursor\"] params[\"variables\"]", "= [i[0] for i in rows.description] rows = await rows.fetchall() if self.settings[\"backup\"][\"format\"] ==", "id, format): logger.info(f\"Downloading {id}\") async with aiohttp.ClientSession() as session: try: async with session.get(url)", "= await self.http_request( \"GET\", \"https://www.instagram.com/accounts/login/\" ) return regex.findall( r\"\\\"csrf_token\\\":\\\"(.*?)\\\"\", res, flags=regex.MULTILINE )[0] async", "{r.reason}\") if r.status == 200: # Successfull request: decrease retry time if self.settings['connection'][\"wait_time\"]", ">= self.settings[\"like\"][\"num_of_comments\"][\"max\"]: logger.info(\"Too many or too few comments. Not liked!\") return if self.settings[\"like\"][\"rate\"]", "end_cursor = res[\"data\"][\"user\"][\"edge_followed_by\"][\"page_info\"][\"end_cursor\"] params[\"variables\"] = json.dumps( {\"id\": str(id), \"first\": 50, \"after\": end_cursor} )", "def download_pic(self, url, id, format): logger.info(f\"Downloading {id}\") async with aiohttp.ClientSession() as session: try:", "random(): if mediatype == \"GraphImage\" or mediatype == \"GraphSidecar\": comment = self.pic_comments_list[ randint(0,", "by {username}\\n❤️ {likes}, 💬 {comments}\" ) try: caption = media[\"edge_media_to_caption\"][\"edges\"][0][\"node\"][\"text\"] except IndexError: pass", "{id}\") async with aiohttp.ClientSession() as session: try: async with session.get(url) as r: if", "loop async def http_request( self, method, url, headers=None, params=None, data=None, response_type=\"text\" ): await", "is needed to log in self.csrf_token = await self._getCsrfTokenFromForm() async def _getCsrfTokenFromForm(self): #", "self.csrf_token = cookies[\"csrftoken\"] # Initialize the database await self._init_database() async def _init_database(self): logger.info(\"Checking", "EXISTS pics ( id INT, height INT, width INT, url TEXT, tags TEXT", "async with session.get(url) as r: if r.status == 200: f = await aiofiles.open(", "{r.url}\") elif method == \"POST\": r = await self.session.post( url, headers=headers, data=data )", "def setup(self, settings_path=\"settings.json\"): logger.info(\"Loading settings...\") # Load settings with open(settings_path) as f: self.settings", "if comments < self.settings[\"comment\"][\"num_of_comments\"][\"min\"] or comments >= self.settings[\"comment\"][\"num_of_comments\"][\"max\"]: return if self.settings[\"comment\"][\"rate\"] / 100", ") for username in await self.following(): await db.execute( \"UPDATE users SET following=1 WHERE", "= True while has_next_page: res = await self.http_request( \"GET\", \"https://www.instagram.com/graphql/query/\", params=params, response_type=\"json\" )", "Retruns: Yields a media from the generated feed. \"\"\" # Initialize asynchronous queue", "if response_type == \"text\": res = await r.text() logger.debug(res) return res elif response_type", "db.execute(\"UPDATE users SET follower=0, following=1\") for username in await self.followers(): await db.execute( \"UPDATE", "aiosqlite.connect(\"./piggy.db\") as db: await db.execute( \"INSERT INTO pics VALUES(?,?,?,?,?)\", (id, height, width, url,", "async def _unlike(self, id): headers = { \"DNT\": \"1\", \"Host\": \"www.instagram.com\", \"User-Agent\": self.settings[\"connection\"][\"user_agent\"],", "to the queue for location in locations: asyncio.ensure_future(self._location_feed(q, location)) # Keep on yielding", "liked!\") async def _like(self, id): headers = { \"DNT\": \"1\", \"Host\": \"www.instagram.com\", \"User-Agent\":", "# Initialize the database await self._init_database() async def _init_database(self): logger.info(\"Checking database...\") # Connect", ") logger.info(\"Updating followers and following lists.\") await db.execute(\"UPDATE users SET follower=0, following=1\") for", "randint import asyncio import aiohttp import aiosqlite import aiofiles import regex from aiohttp.client_exceptions", "id = user[\"graphql\"][\"user\"][\"id\"] params = { \"query_hash\": \"58712303d941c6855d4e888c5f0cd22f\", \"variables\": json.dumps({\"id\": str(id), \"first\": 50})", "media from the given locations to the queue for location in locations: asyncio.ensure_future(self._location_feed(q,", "await asyncio.sleep( utils.interval_in_seconds(self.settings[\"backup\"][\"every\"]) ) async def close(self): logger.info(\"\\nClosing session...\") # Close the http", "{ \"DNT\": \"1\", \"Host\": \"www.instagram.com\", \"Upgrade-Insecure-Requests\": \"1\", \"User-Agent\": self.settings[\"connection\"][\"user_agent\"] } timeout = aiohttp.ClientTimeout(", "\"\"\" if media[\"comments_disabled\"]: logger.info(\"Comments disabled.\") return if self.settings[\"comment\"][\"only_once\"]: async with aiosqlite.connect(\"./piggy.db\") as db:", "res[\"message\"] == \"checkpoint_required\": logger.info(\"Checkpoint required.\") res = await self.http_request( \"POST\", f\"https://www.instagram.com{res['checkpoint_url']}\", headers=headers, data=payload", "# Add all the media from the given users to the queue for", "for the table names async with aiosqlite.connect(\"./piggy.db\") as db: logger.debug(\"Checking table: pics\") await", "len(caption) > 100: logger.info(f\"{caption:.100}...\") else: logger.info(f\"{caption}\") async def like(self, media): \"\"\" Check if", "id = user[\"graphql\"][\"user\"][\"id\"] params = { \"query_hash\": \"37479f2b8209594dde7facb0d904896a\", \"variables\": json.dumps({\"id\": str(id), \"first\": 50})", "r.status == 200: # Successfull request: decrease retry time if self.settings['connection'][\"wait_time\"] > 0:", "params[\"variables\"] = json.dumps( {\"tag_name\": hashtag, \"first\": count, \"after\": end_cursor} ) for media in", "media type. Not liked!\") return likes = media[\"edge_liked_by\"][\"count\"] if likes < self.settings[\"like\"][\"num_of_likes\"][\"min\"] or", "aiosqlite.connect(\"./piggy.db\") as db: c = await db.execute(\"SELECT * FROM users WHERE id=?\", (id,))", "media[\"edge_media_to_caption\"][\"edges\"][0][\"node\"][\"text\"] except IndexError: tags = [] pass else: if await self.download_pic(url, id, format):", "{caption}\") tags = regex.findall(r\"#([\\p{L}0-9_]+)\", caption) logger.info(f\"Tags: {tags}\") else: return await self.save_to_database(id, type, height,", "self.settings[\"backup\"][table_name]: async with aiosqlite.connect(\"./piggy.db\") as db: rows = await db.execute( f\"SELECT * FROM", "headers=headers, params=params ) logger.debug(f\"[GET] {r.url}\") elif method == \"POST\": r = await self.session.post(", "comments WHERE id=?\", (media[\"id\"],) ) if await row.fetchone() is None: logger.info(\"Already commented.\") return", "with session.get(url) as r: if r.status == 200: f = await aiofiles.open( f\"./images/{id}.{format}\",", "await utils.to_json(table_name, header, rows) else: logger.warning( f\"\"\"Unsupported file format: {self.settings['backup']['format']}.\"\"\" ) await asyncio.sleep(", ") if await row.fetchone(): logger.info(\"Already liked!\") return try: mediatype = media[\"__typename\"] except KeyError:", "= \"GraphVideo\" else: mediatype = \"GraphImage\" pass else: if not mediatype in utils.translate_custom_media_type_to_ig(self.settings[\"comment\"][\"media_type\"]):", "Connect to the local database and look for the table names async with", "def _location_feed(self, q, location_id): count = 0 params = { \"query_hash\": \"1b84447a4d8b6d6d0426fefb34514485\", \"variables\":", "users SET follower=0, following=1\") for username in await self.followers(): await db.execute( \"UPDATE users", "\"POST\": r = await self.session.post( url, headers=headers, data=data ) logger.debug(f\"[POST] {r.url}\") else: raise", "as db: row = await db.execute( \"SELECT * FROM likes WHERE id=?\", (media[\"id\"],)", "(media[\"id\"],) ) if await row.fetchone() is None: logger.info(\"Already commented.\") return try: mediatype =", "\"checkpoint_required\": logger.info(\"Checkpoint required.\") res = await self.http_request( \"POST\", f\"https://www.instagram.com{res['checkpoint_url']}\", headers=headers, data=payload ) logger.error(res)", "from the generated feed. \"\"\" # Initialize asynchronous queue where the feed elements", "headers=headers ) async with aiosqlite.connect(\"./piggy.db\") as db: await db.execute(\"INSERT INTO likes WHERE id=?\",", "printed. Returns: None \"\"\" logger.info(\"#--------\"*3+\"#\") try: mediatype = media[\"__typename\"] except KeyError: is_video =", "too few likes. Not liked!\") return comments = media[\"edge_media_to_comment\"][\"count\"] if comments < self.settings[\"like\"][\"num_of_comments\"][\"min\"]", "aiosqlite.connect(\"./piggy.db\") as db: await db.execute( \"INSERT INTO comments VALUES(?,?,?)\", (id, int(time.time()), comment) )", "+= 1 params[\"variables\"] = json.dumps( {\"tag_name\": hashtag, \"first\": count, \"after\": end_cursor} ) for", "await self.pic_already_saved(id): return height = media[\"dimensions\"][\"height\"] width = media[\"dimensions\"][\"width\"] try: caption = media[\"edge_media_to_caption\"][\"edges\"][0][\"node\"][\"text\"]", "f.close() return True else: return False except TimeoutError: return False async def pic_already_saved(self,", "in res[\"data\"][\"user\"][\"edge_followed_by\"][\"edges\"]: followers.append(user[\"node\"][\"username\"]) return followers async def following(self, username=None): following = [] if", "( id INTEGER, ts INTEGER ) \"\"\" ) logger.debug(\"Checking table: comments\") await db.execute(", "feed. locations: [List of locations ids] Media with those locations will be added", "\"POST\", f\"https://www.instagram.com/web/likes/{id}/unlike/\", headers=headers ) async with aiosqlite.connect(\"./piggy.db\") as db: await db.execute(\"INSERT INTO likes", "likes WHERE id=?\", (id,)) await db.commit() logger.info(\"Unliked!\") async def comment(self, media): \"\"\" Check", "params = { \"query_hash\": \"a5164aed103f24b03e7b7747a2d94e3c\", \"variables\": json.dumps({\"id\": id, \"first\": 24}) } has_next_page =", "50, \"after\": end_cursor} ) for user in res[\"data\"][\"user\"][\"edge_followed_by\"][\"edges\"]: followers.append(user[\"node\"][\"username\"]) return followers async def", "the user to be followed. Retruns: None \"\"\" if self.settings[\"follow\"][\"rate\"] / 100 >", "headers = { \"DNT\": \"1\", \"Host\": \"www.instagram.com\", \"Upgrade-Insecure-Requests\": \"1\", \"User-Agent\": self.settings[\"connection\"][\"user_agent\"] } timeout", "db.execute( \"INSERT INTO likes VALUES(?,?)\", (id, int(time.time())) ) await db.commit() logger.info(\"Liked!\") async def", "count, \"after\": end_cursor} ) for media in res[\"data\"][\"hashtag\"][\"edge_hashtag_to_media\"][\"edges\"]: await q.put(media[\"node\"]) async def _location_feed(self,", "random(): await self._like(media[\"id\"]) else: logger.info(\"Not liked!\") async def _like(self, id): headers = {", "res[\"authenticated\"]: logger.info(\"Logged in!\") self.id = res[\"userId\"] elif res[\"message\"] == \"checkpoint_required\": logger.info(\"Checkpoint required.\") res", "tags): tags = json.dumps(tags) async with aiosqlite.connect(\"./piggy.db\") as db: await db.execute( \"INSERT INTO", "logger.info(f\"Tags: {tags}\") else: return await self.save_to_database(id, type, height, width, url, tags) async def", "db.execute( \"INSERT INTO pics VALUES(?,?,?,?,?)\", (id, height, width, url, tags) ) await db.commit()", "(id, int(time.time())) ) await db.commit() logger.info(\"Liked!\") async def _unlike(self, id): headers = {", "= res[\"data\"][\"user\"][\"edge_web_discover_media\"][\"page_info\"][\"has_next_page\"] end_cursor = res[\"data\"][\"user\"][\"edge_web_discover_media\"][\"page_info\"][\"end_cursor\"] params[\"variables\"] = json.dumps( {\"first\": 50, \"after\": end_cursor} )", "def _unfollow(self, id): headers = { \"DNT\": \"1\", \"Host\": \"www.instagram.com\", \"User-Agent\": self.settings[\"connection\"][\"user_agent\"], \"X-CSRFToken\":", "asyncio.ensure_future(self._explore_feed(q)) if len(users): # Add all the media from the given users to", "\"37479f2b8209594dde7facb0d904896a\", \"variables\": json.dumps({\"id\": str(id), \"first\": 50}) } has_next_page = True while has_next_page: res", "} await self.http_request( \"POST\", f\"https://www.instagram.com/web/likes/{id}/like/\", headers=headers ) async with aiosqlite.connect(\"./piggy.db\") as db: await", "_unlike(self, id): headers = { \"DNT\": \"1\", \"Host\": \"www.instagram.com\", \"User-Agent\": self.settings[\"connection\"][\"user_agent\"], \"X-CSRFToken\": self.csrf_token", "TEXT, tags TEXT ) \"\"\" ) logger.debug(\"Checking table: users\") await db.execute( \"\"\" CREATE", "db: await db.execute( \"INSERT INTO pics VALUES(?,?,?,?,?)\", (id, height, width, url, tags) )", "\"after\": end_cursor} ) for media in res[\"data\"][\"user\"][\"edge_web_discover_media\"][\"edges\"]: await q.put(media[\"node\"]) async def _hashtag_feed(self, q,", "await aiofiles.open( f\"./images/{id}.{format}\", mode=\"wb\" ) await f.write(await r.read()) await f.close() return True else:", "(id,)) if c.rowcount: await db.execute( \"\"\" UPDATE users SET ts_following=?, following=? WHERE id=?", "pass else: if await self.download_pic(url, id, format): logger.info(f\"Caption: {caption}\") tags = regex.findall(r\"#([\\p{L}0-9_]+)\", caption)", "Args: media: The media to comment. Retruns: None \"\"\" if media[\"comments_disabled\"]: logger.info(\"Comments disabled.\")", "self.http_request( \"POST\", \"https://www.instagram.com/accounts/login/ajax/\", headers=headers, data=payload, response_type=\"json\" ) if res[\"authenticated\"]: logger.info(\"Logged in!\") self.id =", "and look for the table names async with aiosqlite.connect(\"./piggy.db\") as db: logger.debug(\"Checking table:", "= res[\"data\"][\"user\"][\"edge_follow\"][\"page_info\"][\"has_next_page\"] end_cursor = res[\"data\"][\"user\"][\"edge_follow\"][\"page_info\"][\"end_cursor\"] params[\"variables\"] = json.dumps( {\"id\": str(id), \"first\": 50, \"after\":", "table: likes\") await db.execute( \"\"\" CREATE TABLE IF NOT EXISTS likes ( id", "code: {r.status} {r.reason}\") if r.status == 200: # Successfull request: decrease retry time", "= (.*?)</script>\", regex.findall( r\"<body[^>]*>(.*)</body>\", res, flags=regex.DOTALL )[0], flags=regex.DOTALL )[0][:-1])[\"entry_data\"][\"ProfilePage\"][0][\"graphql\"][\"user\"] # ----------------------------------------------------------------------------- async def", "media[\"edge_liked_by\"][\"count\"] comments = media[\"edge_media_to_comment\"][\"count\"] shortcode = media[\"shortcode\"] res = await self.http_request( \"GET\", f\"https://www.instagram.com/p/{shortcode}/\",", "not mediatype in utils.translate_custom_media_type_to_ig(self.settings[\"comment\"][\"media_type\"]): return likes = media[\"edge_liked_by\"][\"count\"] if likes < self.settings[\"comment\"][\"num_of_likes\"][\"min\"] or", "be added to to the feed. users: [List of usernames] Their media will", "logger.info(\"Liked!\") async def _unlike(self, id): headers = { \"DNT\": \"1\", \"Host\": \"www.instagram.com\", \"User-Agent\":", "logger.info(\"Checkpoint required.\") res = await self.http_request( \"POST\", f\"https://www.instagram.com{res['checkpoint_url']}\", headers=headers, data=payload ) logger.error(res) else:", "else: logger.warning( f\"\"\"Unsupported file format: {self.settings['backup']['format']}.\"\"\" ) await asyncio.sleep( utils.interval_in_seconds(self.settings[\"backup\"][\"every\"]) ) async def", "r\"\\\"csrf_token\\\":\\\"(.*?)\\\"\", res, flags=regex.MULTILINE )[0] async def login(self): payload = { \"username\": self.settings[\"user\"][\"username\"], \"password\":", "with aiosqlite.connect(\"./piggy.db\") as db: row = await db.execute( \"SELECT * FROM likes WHERE", "posted!\") async def follow(self, media): \"\"\" Check if the media satisfy the prerequisites", "the generated feed. \"\"\" # Initialize asynchronous queue where the feed elements will", "<= random(): if mediatype == \"GraphImage\" or mediatype == \"GraphSidecar\": comment = self.pic_comments_list[", "# Keep on yielding media while more is loaded while 1: while not", "\"password\": self.settings[\"user\"][\"password\"] } headers = { \"User-Agent\": self.settings[\"connection\"][\"user_agent\"], \"X-CSRFToken\": self.csrf_token } res =", "users ( id TEXT, username TEXT, ts_follower INTEGER, ts_following INTEGER, follower BOOL, following", "async def _follow(self, id): headers = { \"DNT\": \"1\", \"Host\": \"www.instagram.com\", \"User-Agent\": self.settings[\"connection\"][\"user_agent\"],", "return res else: raise ValueError(f\"Invalid response type: {response_type}\") elif r.status == 429: #", "\"https://www.instagram.com/graphql/query/\", params=params, response_type=\"json\" ) has_next_page = res[\"data\"][\"location\"][\"edge_location_to_media\"][\"page_info\"][\"has_next_page\"] end_cursor = res[\"data\"][\"location\"][\"edge_location_to_media\"][\"page_info\"][\"end_cursor\"] count += 1", "[Bool] If True the explore page will be added to to the feed.", "__init__(self, loop): self.loop = loop async def http_request( self, method, url, headers=None, params=None,", "self._like(media[\"id\"]) else: logger.info(\"Not liked!\") async def _like(self, id): headers = { \"DNT\": \"1\",", "(id, None, int(time.time()), False, True) ) await db.commit() logger.info(\"Follow request sent!\") async def", "ts_following=?, following=? WHERE id=? \"\"\", (int(time.time()), True, id) ) else: await db.execute( \"INSERT", "else: logger.info(\"Not followed!\") async def _follow(self, id): headers = { \"DNT\": \"1\", \"Host\":", "\"INSERT INTO users VALUES(?,?,?,?,?)\", (id, None, int(time.time()), False, True) ) await db.commit() logger.info(\"Follow", "elif method == \"POST\": r = await self.session.post( url, headers=headers, data=data ) logger.debug(f\"[POST]", "\"text\": res = await r.text() logger.debug(res) return res elif response_type == \"json\": res", "= json.dumps( {\"id\": id, \"first\": 50, \"after\": end_cursor} ) for media in res[\"data\"][\"user\"][\"edge_web_discover_media\"][\"edges\"]:", "logger.debug(\"Checking table: likes\") await db.execute( \"\"\" CREATE TABLE IF NOT EXISTS likes (", "retry time self.settings['connection'][\"wait_time\"] += 1 logger.warning( f\"\"\"Too many requests! Retrying in {self.settings['connection']['wait_time']} seconds.\"\"\"", "INTEGER, follower BOOL, following BOOL ) \"\"\" ) logger.debug(\"Checking table: likes\") await db.execute(", "= regex.findall(r\".([a-zA-Z]+)$\", url)[0] if media[\"__typename\"] != \"GraphImage\" or await self.pic_already_saved(id): return height =", "raise ValueError(f\"Invalid response type: {response_type}\") elif r.status == 429: # Unsuccessfull request: increase", "\"GET\", \"https://www.instagram.com/accounts/login/\" ) return regex.findall( r\"\\\"csrf_token\\\":\\\"(.*?)\\\"\", res, flags=regex.MULTILINE )[0] async def login(self): payload", "mediatype in utils.translate_custom_media_type_to_ig(self.settings[\"like\"][\"media_type\"]): logger.info(\"Wrong media type. Not liked!\") return likes = media[\"edge_liked_by\"][\"count\"] if", "a media. Args: media: The media to be printed. Returns: None \"\"\" logger.info(\"#--------\"*3+\"#\")", "username TEXT, ts_follower INTEGER, ts_following INTEGER, follower BOOL, following BOOL ) \"\"\" )", "of locations ids] Media with those locations will be added to the feed.", "= \"GraphVideo\" else: mediatype = \"GraphImage\" pass likes = media[\"edge_liked_by\"][\"count\"] comments = media[\"edge_media_to_comment\"][\"count\"]", "/ 100 > random(): await self._follow(media[\"owner\"][\"id\"]) else: logger.info(\"Not followed!\") async def _follow(self, id):", "username=None): followers = [] if username is None: id = self.id else: user", "await db.execute( \"\"\" UPDATE users SET ts_following=?, following=? WHERE id=? \"\"\", (int(time.time()), True,", "self.session.get( url, headers=headers, params=params ) logger.debug(f\"[GET] {r.url}\") elif method == \"POST\": r =", "res, flags=regex.DOTALL )[0], flags=regex.DOTALL )[0][:-1])[\"entry_data\"][\"ProfilePage\"][0][\"graphql\"][\"user\"] # ----------------------------------------------------------------------------- async def download(self, media): id =", "row = await db.execute( \"SELECT * FROM likes WHERE id=?\", (media[\"id\"],) ) if", "= { \"DNT\": \"1\", \"Host\": \"www.instagram.com\", \"Upgrade-Insecure-Requests\": \"1\", \"User-Agent\": self.settings[\"connection\"][\"user_agent\"] } timeout =", "res = await r.text() logger.debug(res) return res elif response_type == \"json\": res =", "q, hashtag): count = 0 params = { \"query_hash\": \"1780c1b186e2c37de9f7da95ce41bb67\", \"variables\": json.dumps({\"tag_name\": hashtag,", "\"User-Agent\": self.settings[\"connection\"][\"user_agent\"], \"X-CSRFToken\": self.csrf_token } await self.http_request( \"POST\", f\"https://www.instagram.com/web/likes/{id}/unlike/\", headers=headers ) async with", "media[\"edge_liked_by\"][\"count\"] if likes < self.settings[\"comment\"][\"num_of_likes\"][\"min\"] or likes >= self.settings[\"comment\"][\"num_of_likes\"][\"max\"]: return comments = media[\"edge_media_to_comment\"][\"count\"]", "await db.execute( \"SELECT * FROM pics WHERE id=?\", (id,) ) if await row.fetchone()", "INT, width INT, url TEXT, tags TEXT ) \"\"\" ) logger.debug(\"Checking table: users\")", "\"\"\" logger.info(\"#--------\"*3+\"#\") try: mediatype = media[\"__typename\"] except KeyError: is_video = media[\"is_video\"] if is_video:", "await q.put(media[\"node\"]) async def print(self, media): \"\"\" Gives a visual representation of a", "False, True) ) await db.commit() logger.info(\"Follow request sent!\") async def unfollow(self, id): return", ") async with aiosqlite.connect(\"./piggy.db\") as db: await db.execute( \"INSERT INTO comments VALUES(?,?,?)\", (id,", ") has_next_page = res[\"data\"][\"user\"][\"edge_followed_by\"][\"page_info\"][\"has_next_page\"] end_cursor = res[\"data\"][\"user\"][\"edge_followed_by\"][\"page_info\"][\"end_cursor\"] params[\"variables\"] = json.dumps( {\"id\": str(id), \"first\":", "media in res[\"data\"][\"user\"][\"edge_web_discover_media\"][\"edges\"]: await q.put(media[\"node\"]) async def _hashtag_feed(self, q, hashtag): count = 0", "logger.info(\"Unliked!\") async def comment(self, media): \"\"\" Check if the media satisfy the prerequisites", "for x in comments] # Load comments list for videos with open(\"comments/video_comments.txt\") as", "self.http_request( \"POST\", f\"https://www.instagram.com/web/likes/{id}/like/\", headers=headers ) async with aiosqlite.connect(\"./piggy.db\") as db: await db.execute( \"INSERT", "IF NOT EXISTS likes ( id INTEGER, ts INTEGER ) \"\"\" ) logger.debug(\"Checking", "params = { \"query_hash\": \"58712303d941c6855d4e888c5f0cd22f\", \"variables\": json.dumps({\"id\": str(id), \"first\": 50}) } has_next_page =", "int(time.time())) ) await db.commit() logger.info(\"Liked!\") async def _unlike(self, id): headers = { \"DNT\":", "elif res[\"message\"] == \"checkpoint_required\": logger.info(\"Checkpoint required.\") res = await self.http_request( \"POST\", f\"https://www.instagram.com{res['checkpoint_url']}\", headers=headers,", "elif r.status == 429: # Unsuccessfull request: increase retry time self.settings['connection'][\"wait_time\"] += 1", "= json.dumps( { \"id\": str(location_id), \"first\": 50, \"after\": str(end_cursor) } ) for media", "= await r.json() logger.debug(res) return res else: raise ValueError(f\"Invalid response type: {response_type}\") elif", "be pulled and added to the feed. hashtags: [List of hastags] Media with", "\"variables\": json.dumps({\"first\": 24}) } has_next_page = True while has_next_page: res = await self.http_request(", "the queue for location in locations: asyncio.ensure_future(self._location_feed(q, location)) # Keep on yielding media", "Not liked!\") return comments = media[\"edge_media_to_comment\"][\"count\"] if comments < self.settings[\"like\"][\"num_of_comments\"][\"min\"] or comments >=", "SET following=1 WHERE username=?\", (username,) ) await db.commit() async def followers(self, username=None): followers", "INTO users VALUES(?,?,?,?,?)\", (id, None, int(time.time()), False, True) ) await db.commit() logger.info(\"Follow request", "if is_video: mediatype = \"GraphVideo\" else: mediatype = \"GraphImage\" pass likes = media[\"edge_liked_by\"][\"count\"]", "await self._comment(media[\"id\"], comment) else: logger.info(\"Not commented!\") async def _comment(self, id, comment, reply_to_id=None): headers", "is None: id = self.id else: user = await self.get_user_by_username(username) id = user[\"graphql\"][\"user\"][\"id\"]", "= await self.http_request( \"GET\", \"https://www.instagram.com/graphql/query/\", params=params, response_type=\"json\" ) has_next_page = res[\"data\"][\"hashtag\"][\"edge_hashtag_to_media\"][\"page_info\"][\"has_next_page\"] end_cursor =", "for location in locations: asyncio.ensure_future(self._location_feed(q, location)) # Keep on yielding media while more", "likes. Not liked!\") return comments = media[\"edge_media_to_comment\"][\"count\"] if comments < self.settings[\"like\"][\"num_of_comments\"][\"min\"] or comments", "mode=\"wb\" ) await f.write(await r.read()) await f.close() return True else: return False except", ") for media in res[\"data\"][\"hashtag\"][\"edge_hashtag_to_media\"][\"edges\"]: await q.put(media[\"node\"]) async def _location_feed(self, q, location_id): count", "await self.http_request( \"GET\", \"https://www.instagram.com/graphql/query/\", params=params, response_type=\"json\" ) has_next_page = res[\"data\"][\"user\"][\"edge_followed_by\"][\"page_info\"][\"has_next_page\"] end_cursor = res[\"data\"][\"user\"][\"edge_followed_by\"][\"page_info\"][\"end_cursor\"]", "len(self.pic_comments_list)-1) ] else: comment = self.video_comments_list[ randint(0, len(self.video_comments_list)-1) ] await self._comment(media[\"id\"], comment) else:", "= user[\"id\"] params = { \"query_hash\": \"a5164aed103f24b03e7b7747a2d94e3c\", \"variables\": json.dumps({\"id\": id, \"first\": 24}) }", "at the same time. Args: explore: [Bool] If True the explore page will", "\"\"\", (int(time.time()), True, id) ) else: await db.execute( \"INSERT INTO users VALUES(?,?,?,?,?)\", (id,", "in {self.settings['connection']['wait_time']} seconds.\"\"\" ) return await self.http_request( method, url, headers=headers, params=params, data=data, response_type=response_type", "# Check if the media has already been liked async with aiosqlite.connect(\"./piggy.db\") as", "f\"https://www.instagram.com/web/likes/{id}/like/\", headers=headers ) async with aiosqlite.connect(\"./piggy.db\") as db: await db.execute( \"INSERT INTO likes", "json.dumps( {\"id\": str(id), \"first\": 50, \"after\": end_cursor} ) for user in res[\"data\"][\"user\"][\"edge_followed_by\"][\"edges\"]: followers.append(user[\"node\"][\"username\"])", "def __init__(self, loop): self.loop = loop async def http_request( self, method, url, headers=None,", "logger = logging.getLogger(__name__) logger.setLevel(logging.DEBUG) ch = logging.StreamHandler() fh = logging.FileHandler(\"./piggy.log\") ch.setLevel(logging.INFO) fh.setLevel(logging.DEBUG) formatter", "WHERE username=?\", (username,) ) for username in await self.following(): await db.execute( \"UPDATE users", "30 seconds.\") await asyncio.sleep(30) return await self.http_request( method, url, headers=headers, params=params, data=data, response_type=response_type", "\"SELECT * FROM pics WHERE id=?\", (id,) ) if await row.fetchone() is None:", "followed. Retruns: None \"\"\" if self.settings[\"follow\"][\"rate\"] / 100 > random(): await self._follow(media[\"owner\"][\"id\"]) else:", "requests! Retrying in {self.settings['connection']['wait_time']} seconds.\"\"\" ) return await self.http_request( method, url, headers=headers, params=params,", "q = asyncio.Queue() if explore: # Add the \"explore\" feed to the queue", "str(location_id), \"first\": 50}) } has_next_page = True while has_next_page: res = await self.http_request(", "self.http_request( \"GET\", f\"https://www.instagram.com/{username}/\", params=\"__a:1\" ) return json.loads( regex.findall( r\"<script[^>]*>window._sharedData = (.*?)</script>\", regex.findall( r\"<body[^>]*>(.*)</body>\",", "has_next_page: res = await self.http_request( \"GET\", \"https://www.instagram.com/graphql/query/\", params=params, response_type=\"json\" ) has_next_page = res[\"data\"][\"user\"][\"edge_owner_to_timeline_media\"][\"page_info\"][\"has_next_page\"]", "def close(self): logger.info(\"\\nClosing session...\") # Close the http session await self.session.close() async def", "= self.pic_comments_list[ randint(0, len(self.pic_comments_list)-1) ] else: comment = self.video_comments_list[ randint(0, len(self.video_comments_list)-1) ] await", "db.execute( \"SELECT * FROM pics WHERE id=?\", (id,) ) if await row.fetchone() is", "ClientConnectorError from piggy import utils # Logging logger = logging.getLogger(__name__) logger.setLevel(logging.DEBUG) ch =", "%(levelname)s %(funcName)s: %(message)s\" ) fh.setFormatter(formatter) logger.addHandler(ch) logger.addHandler(fh) class Piggy: def __init__(self, loop): self.loop", "self.session = aiohttp.ClientSession(headers=headers, timeout=timeout) logger.info(\"Session initialized.\") # Get the csrf token. It is", "elif self.settings[\"backup\"][\"format\"] == \"json\": await utils.to_json(table_name, header, rows) else: logger.warning( f\"\"\"Unsupported file format:", "as r: if r.status == 200: f = await aiofiles.open( f\"./images/{id}.{format}\", mode=\"wb\" )", "\"id\": str(location_id), \"first\": 50, \"after\": str(end_cursor) } ) for media in res[\"data\"][\"location\"][\"edge_location_to_media\"][\"edges\"]: await", "hashtags=[], locations=[]): \"\"\" Generates a feed based on the passed parameters. Multiple parameters", "has_next_page = res[\"data\"][\"user\"][\"edge_web_discover_media\"][\"page_info\"][\"has_next_page\"] end_cursor = res[\"data\"][\"user\"][\"edge_web_discover_media\"][\"page_info\"][\"end_cursor\"] params[\"variables\"] = json.dumps( {\"first\": 50, \"after\": end_cursor}", "logging.Formatter( \"[%(asctime)s] %(levelname)s %(funcName)s: %(message)s\" ) fh.setFormatter(formatter) logger.addHandler(ch) logger.addHandler(fh) class Piggy: def __init__(self,", "\"after\": end_cursor} ) for media in res[\"data\"][\"hashtag\"][\"edge_hashtag_to_media\"][\"edges\"]: await q.put(media[\"node\"]) async def _location_feed(self, q,", "# Add the \"explore\" feed to the queue asyncio.ensure_future(self._explore_feed(q)) if len(users): # Add", "self.settings['connection'][\"wait_time\"] += 1 logger.warning( f\"\"\"Too many requests! Retrying in {self.settings['connection']['wait_time']} seconds.\"\"\" ) return", "50, \"after\": str(end_cursor) } ) for media in res[\"data\"][\"location\"][\"edge_location_to_media\"][\"edges\"]: await q.put(media[\"node\"]) async def", "caption) logger.info(f\"Tags: {tags}\") else: return await self.save_to_database(id, type, height, width, url, tags) async", "media from the generated feed. \"\"\" # Initialize asynchronous queue where the feed", "await db.execute( \"\"\" CREATE TABLE IF NOT EXISTS comments ( id INTEGER, ts", "await db.execute( \"UPDATE users SET follower=0 WHERE username=?\", (username,) ) for username in", "is_video = media[\"is_video\"] if is_video: mediatype = \"GraphVideo\" else: mediatype = \"GraphImage\" pass", "media[\"__typename\"] != \"GraphImage\" or await self.pic_already_saved(id): return height = media[\"dimensions\"][\"height\"] width = media[\"dimensions\"][\"width\"]", "queue asyncio.ensure_future(self._explore_feed(q)) if len(users): # Add all the media from the given users", "async def close(self): logger.info(\"\\nClosing session...\") # Close the http session await self.session.close() async", "= await rows.fetchall() if self.settings[\"backup\"][\"format\"] == \"csv\": await utils.to_csv(table_name, header, rows) elif self.settings[\"backup\"][\"format\"]", "f: comments = f.readlines() self.video_comments_list = [x.strip() for x in comments] # Initialize", "params=\"__a:1\" ) return json.loads( regex.findall( r\"<script[^>]*>window._sharedData = (.*?)</script>\", regex.findall( r\"<body[^>]*>(.*)</body>\", res, flags=regex.DOTALL )[0],", "res[\"data\"][\"hashtag\"][\"edge_hashtag_to_media\"][\"page_info\"][\"has_next_page\"] end_cursor = res[\"data\"][\"hashtag\"][\"edge_hashtag_to_media\"][\"page_info\"][\"end_cursor\"] count += 1 params[\"variables\"] = json.dumps( {\"tag_name\": hashtag, \"first\":", "await db.execute( \"\"\" CREATE TABLE IF NOT EXISTS pics ( id INT, height", "r\"<body[^>]*>(.*)</body>\", res, flags=regex.DOTALL )[0], flags=regex.DOTALL )[0][:-1])[\"entry_data\"][\"ProfilePage\"][0][\"graphql\"][\"user\"] # ----------------------------------------------------------------------------- async def download(self, media): id", ") await db.commit() async def backup(self): while 1: logger.info(\"Backing up database...\") for table_name", "= logging.StreamHandler() fh = logging.FileHandler(\"./piggy.log\") ch.setLevel(logging.INFO) fh.setLevel(logging.DEBUG) formatter = logging.Formatter(\"%(message)s\") ch.setFormatter(formatter) formatter =", "db: rows = await db.execute( f\"SELECT * FROM '{table_name}'\" ) header = [i[0]", "CREATE TABLE IF NOT EXISTS likes ( id INTEGER, ts INTEGER ) \"\"\"", "response_type=\"json\" ) has_next_page = res[\"data\"][\"location\"][\"edge_location_to_media\"][\"page_info\"][\"has_next_page\"] end_cursor = res[\"data\"][\"location\"][\"edge_location_to_media\"][\"page_info\"][\"end_cursor\"] count += 1 params[\"variables\"] =", "return if self.settings[\"like\"][\"rate\"] / 100 > random(): await self._like(media[\"id\"]) else: logger.info(\"Not liked!\") async", "users WHERE id=?\", (id,)) if c.rowcount: await db.execute( \"\"\" UPDATE users SET ts_following=?,", "def like(self, media): \"\"\" Check if the media satisfy the prerequisites and eventually", "the feed. users: [List of usernames] Their media will be pulled and added", "} res = await self.http_request( \"POST\", \"https://www.instagram.com/accounts/login/ajax/\", headers=headers, data=payload, response_type=\"json\" ) if res[\"authenticated\"]:", "import aiosqlite import aiofiles import regex from aiohttp.client_exceptions import ClientConnectorError from piggy import", "fh.setLevel(logging.DEBUG) formatter = logging.Formatter(\"%(message)s\") ch.setFormatter(formatter) formatter = logging.Formatter( \"[%(asctime)s] %(levelname)s %(funcName)s: %(message)s\" )", "logger.info(\"Already liked!\") return try: mediatype = media[\"__typename\"] except KeyError: is_video = media[\"is_video\"] if", "except IndexError: pass else: if len(caption) > 100: logger.info(f\"{caption:.100}...\") else: logger.info(f\"{caption}\") async def", "not mediatype in utils.translate_custom_media_type_to_ig(self.settings[\"like\"][\"media_type\"]): logger.info(\"Wrong media type. Not liked!\") return likes = media[\"edge_liked_by\"][\"count\"]", "while not q.empty(): yield await q.get() await asyncio.sleep(1e-12) async def _explore_feed(self, q): params", "user = await self.get_user_by_usernameUsername(user) id = user[\"id\"] params = { \"query_hash\": \"a5164aed103f24b03e7b7747a2d94e3c\", \"variables\":", "\"comment_text\": comment } await self.http_request( \"POST\", f\"https://www.instagram.com/web/comments/{id}/add/\", headers=headers, data=payload ) async with aiosqlite.connect(\"./piggy.db\")", "parameters can be passed at the same time. Args: explore: [Bool] If True", "= media[\"__typename\"] except KeyError: is_video = media[\"is_video\"] if is_video: mediatype = \"GraphVideo\" else:", "or likes >= self.settings[\"like\"][\"num_of_likes\"][\"max\"]: logger.info(\"Too many or too few likes. Not liked!\") return", "import random, randint import asyncio import aiohttp import aiosqlite import aiofiles import regex", "for media in res[\"data\"][\"user\"][\"edge_web_discover_media\"][\"edges\"]: await q.put(media[\"node\"]) async def _hashtag_feed(self, q, hashtag): count =", "= regex.findall(r\"#([\\p{L}0-9_]+)\", caption) logger.info(f\"Tags: {tags}\") else: return await self.save_to_database(id, type, height, width, url,", "formatter = logging.Formatter(\"%(message)s\") ch.setFormatter(formatter) formatter = logging.Formatter( \"[%(asctime)s] %(levelname)s %(funcName)s: %(message)s\" ) fh.setFormatter(formatter)", "if likes < self.settings[\"comment\"][\"num_of_likes\"][\"min\"] or likes >= self.settings[\"comment\"][\"num_of_likes\"][\"max\"]: return comments = media[\"edge_media_to_comment\"][\"count\"] if", "async def save_to_database(self, id, type, height, width, url, tags): tags = json.dumps(tags) async", "has_next_page = res[\"data\"][\"user\"][\"edge_follow\"][\"page_info\"][\"has_next_page\"] end_cursor = res[\"data\"][\"user\"][\"edge_follow\"][\"page_info\"][\"end_cursor\"] params[\"variables\"] = json.dumps( {\"id\": str(id), \"first\": 50,", "i in rows.description] rows = await rows.fetchall() if self.settings[\"backup\"][\"format\"] == \"csv\": await utils.to_csv(table_name,", "log in.\") cookies = utils.cookies_dict(self.session.cookie_jar) self.csrf_token = cookies[\"csrftoken\"] # Initialize the database await", "db: await db.execute( \"INSERT INTO comments VALUES(?,?,?)\", (id, int(time.time()), comment) ) await db.commit()", "self.settings['connection'][\"wait_time\"] > 0: self.settings['connection'][\"wait_time\"] -= 1 if response_type == \"text\": res = await", "db.execute( \"UPDATE users SET following=1 WHERE username=?\", (username,) ) await db.commit() async def", "{ \"id\": str(location_id), \"first\": 50, \"after\": str(end_cursor) } ) for media in res[\"data\"][\"location\"][\"edge_location_to_media\"][\"edges\"]:", "if await row.fetchone() is None: return False else: return True async def save_to_database(self,", "params=params, response_type=\"json\" ) has_next_page = res[\"data\"][\"user\"][\"edge_followed_by\"][\"page_info\"][\"has_next_page\"] end_cursor = res[\"data\"][\"user\"][\"edge_followed_by\"][\"page_info\"][\"end_cursor\"] params[\"variables\"] = json.dumps( {\"id\":", "many requests! Retrying in {self.settings['connection']['wait_time']} seconds.\"\"\" ) return await self.http_request( method, url, headers=headers,", "\"GraphVideo\" else: mediatype = \"GraphImage\" pass else: if not mediatype in utils.translate_custom_media_type_to_ig(self.settings[\"comment\"][\"media_type\"]): return", "NOT EXISTS users ( id TEXT, username TEXT, ts_follower INTEGER, ts_following INTEGER, follower", "the prerequisites and eventually it will send a like. Args: media: The media", "payload = { \"comment_text\": comment } await self.http_request( \"POST\", f\"https://www.instagram.com/web/comments/{id}/add/\", headers=headers, data=payload )", "for username in await self.followers(): await db.execute( \"UPDATE users SET follower=0 WHERE username=?\",", "= res[\"data\"][\"user\"][\"edge_followed_by\"][\"page_info\"][\"end_cursor\"] params[\"variables\"] = json.dumps( {\"id\": str(id), \"first\": 50, \"after\": end_cursor} ) for", "res[\"data\"][\"user\"][\"edge_web_discover_media\"][\"edges\"]: await q.put(media[\"node\"]) async def _hashtag_feed(self, q, hashtag): count = 0 params =", "\"GET\": r = await self.session.get( url, headers=headers, params=params ) logger.debug(f\"[GET] {r.url}\") elif method", "\"\"\" ) logger.debug(\"Checking table: users\") await db.execute( \"\"\" CREATE TABLE IF NOT EXISTS", "aiohttp import aiosqlite import aiofiles import regex from aiohttp.client_exceptions import ClientConnectorError from piggy", "likes WHERE id=?\", (media[\"id\"],) ) if await row.fetchone(): logger.info(\"Already liked!\") return try: mediatype", "for media in res[\"data\"][\"user\"][\"edge_web_discover_media\"][\"edges\"]: await q.put(media[\"node\"]) async def _user_feed(self, q, user): user =", "len(hashtags): # Add all the media from the given hashtags to the queue", "as f: comments = f.readlines() self.pic_comments_list = [x.strip() for x in comments] #", "media[\"comments_disabled\"]: logger.info(\"Comments disabled.\") return if self.settings[\"comment\"][\"only_once\"]: async with aiosqlite.connect(\"./piggy.db\") as db: row =", "ClientConnectorError: logger.error(\"Could not reach the server. Retrying in 30 seconds.\") await asyncio.sleep(30) return", "self.http_request( \"GET\", \"https://www.instagram.com/graphql/query/\", params=params, response_type=\"json\" ) has_next_page = res[\"data\"][\"user\"][\"edge_followed_by\"][\"page_info\"][\"has_next_page\"] end_cursor = res[\"data\"][\"user\"][\"edge_followed_by\"][\"page_info\"][\"end_cursor\"] params[\"variables\"]", "in utils.translate_custom_media_type_to_ig(self.settings[\"comment\"][\"media_type\"]): return likes = media[\"edge_liked_by\"][\"count\"] if likes < self.settings[\"comment\"][\"num_of_likes\"][\"min\"] or likes >=", "= loop async def http_request( self, method, url, headers=None, params=None, data=None, response_type=\"text\" ):", "id INTEGER, ts INTEGER, comment TEXT ) \"\"\" ) logger.info(\"Updating followers and following", "end_cursor = res[\"data\"][\"user\"][\"edge_owner_to_timeline_media\"][\"page_info\"][\"end_cursor\"] params[\"variables\"] = json.dumps( {\"id\": id, \"first\": 50, \"after\": end_cursor} )", "data=payload ) async with aiosqlite.connect(\"./piggy.db\") as db: await db.execute( \"INSERT INTO comments VALUES(?,?,?)\",", "The media to comment. Retruns: None \"\"\" if media[\"comments_disabled\"]: logger.info(\"Comments disabled.\") return if", "in.\") cookies = utils.cookies_dict(self.session.cookie_jar) self.csrf_token = cookies[\"csrftoken\"] # Initialize the database await self._init_database()", "\"X-CSRFToken\": self.csrf_token } res = await self.http_request( \"POST\", \"https://www.instagram.com/accounts/login/ajax/\", headers=headers, data=payload, response_type=\"json\" )", "if self.settings[\"comment\"][\"rate\"] / 100 <= random(): if mediatype == \"GraphImage\" or mediatype ==", "res = await self.http_request( \"GET\", \"https://www.instagram.com/graphql/query/\", params=params, response_type=\"json\" ) has_next_page = res[\"data\"][\"user\"][\"edge_followed_by\"][\"page_info\"][\"has_next_page\"] end_cursor", "WHERE id=?\", (id,)) if c.rowcount: await db.execute( \"\"\" UPDATE users SET ts_following=?, following=?", "\"\"\" # Initialize asynchronous queue where the feed elements will be # temporarely", "media[\"dimensions\"][\"height\"] width = media[\"dimensions\"][\"width\"] try: caption = media[\"edge_media_to_caption\"][\"edges\"][0][\"node\"][\"text\"] except IndexError: tags = []", "return if self.settings[\"comment\"][\"rate\"] / 100 <= random(): if mediatype == \"GraphImage\" or mediatype", "of the user to be followed. Retruns: None \"\"\" if self.settings[\"follow\"][\"rate\"] / 100", "a follow request. Args: media: The media of the user to be followed.", "logger.info(\"Backing up database...\") for table_name in [\"users\", \"likes\", \"comments\"]: if self.settings[\"backup\"][table_name]: async with", "IF NOT EXISTS users ( id TEXT, username TEXT, ts_follower INTEGER, ts_following INTEGER,", "\"query_hash\": \"a5164aed103f24b03e7b7747a2d94e3c\", \"variables\": json.dumps({\"id\": id, \"first\": 24}) } has_next_page = True while has_next_page:", "comments list for videos with open(\"comments/video_comments.txt\") as f: comments = f.readlines() self.video_comments_list =", "asyncio.ensure_future(self._user_feed(q, user)) if len(hashtags): # Add all the media from the given hashtags", "= f.readlines() self.pic_comments_list = [x.strip() for x in comments] # Load comments list", "login(self): payload = { \"username\": self.settings[\"user\"][\"username\"], \"password\": self.settings[\"user\"][\"password\"] } headers = { \"User-Agent\":", "[List of usernames] Their media will be pulled and added to the feed.", "} payload = { \"comment_text\": comment } await self.http_request( \"POST\", f\"https://www.instagram.com/web/comments/{id}/add/\", headers=headers, data=payload", "all the media from the given locations to the queue for location in", "res = await self.http_request( \"POST\", \"https://www.instagram.com/accounts/login/ajax/\", headers=headers, data=payload, response_type=\"json\" ) if res[\"authenticated\"]: logger.info(\"Logged", "locations will be added to the feed. Retruns: Yields a media from the", "if len(locations): # Add all the media from the given locations to the", "== 429: # Unsuccessfull request: increase retry time self.settings['connection'][\"wait_time\"] += 1 logger.warning( f\"\"\"Too", "len(self.video_comments_list)-1) ] await self._comment(media[\"id\"], comment) else: logger.info(\"Not commented!\") async def _comment(self, id, comment,", "await db.execute( \"INSERT INTO pics VALUES(?,?,?,?,?)\", (id, height, width, url, tags) ) await", "import regex from aiohttp.client_exceptions import ClientConnectorError from piggy import utils # Logging logger", "async def feed(self, explore=True, users=[], hashtags=[], locations=[]): \"\"\" Generates a feed based on", "EXISTS comments ( id INTEGER, ts INTEGER, comment TEXT ) \"\"\" ) logger.info(\"Updating", "logger.info( f\"{utils.translate_ig_media_type_to_custom(mediatype).capitalize()} by {username}\\n❤️ {likes}, 💬 {comments}\" ) try: caption = media[\"edge_media_to_caption\"][\"edges\"][0][\"node\"][\"text\"] except", ") for media in res[\"data\"][\"location\"][\"edge_location_to_media\"][\"edges\"]: await q.put(media[\"node\"]) async def print(self, media): \"\"\" Gives", "await self.session.get( url, headers=headers, params=params ) logger.debug(f\"[GET] {r.url}\") elif method == \"POST\": r", "\"variables\": json.dumps({\"id\": str(location_id), \"first\": 50}) } has_next_page = True while has_next_page: res =", "self.http_request( \"GET\", \"https://www.instagram.com/graphql/query/\", params=params, response_type=\"json\" ) has_next_page = res[\"data\"][\"location\"][\"edge_location_to_media\"][\"page_info\"][\"has_next_page\"] end_cursor = res[\"data\"][\"location\"][\"edge_location_to_media\"][\"page_info\"][\"end_cursor\"] count", "{\"id\": str(id), \"first\": 50, \"after\": end_cursor} ) for user in res[\"data\"][\"user\"][\"edge_followed_by\"][\"edges\"]: followers.append(user[\"node\"][\"username\"]) return", "while has_next_page: res = await self.http_request( \"GET\", \"https://www.instagram.com/graphql/query/\", params=params, response_type=\"json\" ) has_next_page =", "res = await self.http_request( \"GET\", \"https://www.instagram.com/graphql/query/\", params=params, response_type=\"json\" ) has_next_page = res[\"data\"][\"user\"][\"edge_web_discover_media\"][\"page_info\"][\"has_next_page\"] end_cursor", "\"Host\": \"www.instagram.com\", \"Upgrade-Insecure-Requests\": \"1\", \"User-Agent\": self.settings[\"connection\"][\"user_agent\"] } timeout = aiohttp.ClientTimeout( total=self.settings[\"connection\"][\"timeout\"] ) self.session", "VALUES(?,?,?,?,?)\", (id, None, int(time.time()), False, True) ) await db.commit() logger.info(\"Follow request sent!\") async", "\"Host\": \"www.instagram.com\", \"User-Agent\": self.settings[\"connection\"][\"user_agent\"], \"X-CSRFToken\": self.csrf_token } await self.http_request( \"POST\", f\"https://www.instagram.com/web/likes/{id}/unlike/\", headers=headers )", "db.execute( f\"SELECT * FROM '{table_name}'\" ) header = [i[0] for i in rows.description]", "logger.warning( f\"\"\"Too many requests! Retrying in {self.settings['connection']['wait_time']} seconds.\"\"\" ) return await self.http_request( method,", "self.settings[\"connection\"][\"user_agent\"], \"X-CSRFToken\": self.csrf_token } await self.http_request( \"POST\", f\"https://www.instagram.com/web/friendships/{id}/follow/\", headers=headers ) async with aiosqlite.connect(\"./piggy.db\")", "params[\"variables\"] = json.dumps( { \"id\": str(location_id), \"first\": 50, \"after\": str(end_cursor) } ) for", "logger.info(\"Loading settings...\") # Load settings with open(settings_path) as f: self.settings = json.loads( regex.sub(r\"#.+$\",", "follow(self, media): \"\"\" Check if the media satisfy the prerequisites and eventually send", "= { \"query_hash\": \"ecd67af449fb6edab7c69a205413bfa7\", \"variables\": json.dumps({\"first\": 24}) } has_next_page = True while has_next_page:", "followers(self, username=None): followers = [] if username is None: id = self.id else:", "TimeoutError: return False async def pic_already_saved(self, id): logger.debug(\"Checking database.\") async with aiosqlite.connect(\"./piggy.db\") as", "locations: [List of locations ids] Media with those locations will be added to", "Load comments list for photos with open(\"comments/pic_comments.txt\") as f: comments = f.readlines() self.pic_comments_list", "if await row.fetchone(): logger.info(\"Already liked!\") return try: mediatype = media[\"__typename\"] except KeyError: is_video", "\"www.instagram.com\", \"User-Agent\": self.settings[\"connection\"][\"user_agent\"], \"X-CSRFToken\": self.csrf_token } payload = { \"comment_text\": comment } await", "None: logger.info(\"Already commented.\") return try: mediatype = media[\"__typename\"] except KeyError: is_video = media[\"is_video\"]", "f\"{utils.translate_ig_media_type_to_custom(mediatype).capitalize()} by {username}\\n❤️ {likes}, 💬 {comments}\" ) try: caption = media[\"edge_media_to_caption\"][\"edges\"][0][\"node\"][\"text\"] except IndexError:", "else: return False except TimeoutError: return False async def pic_already_saved(self, id): logger.debug(\"Checking database.\")", "async with aiosqlite.connect(\"./piggy.db\") as db: await db.execute( \"INSERT INTO pics VALUES(?,?,?,?,?)\", (id, height,", "params=params, data=data, response_type=response_type ) else: logger.debug(f\"Status code: {r.status} {r.reason}\") if r.status == 200:", "= media[\"edge_media_to_comment\"][\"count\"] shortcode = media[\"shortcode\"] res = await self.http_request( \"GET\", f\"https://www.instagram.com/p/{shortcode}/\", params=\"__a=1\", response_type=\"json\"", "logger.debug(\"Checking table: comments\") await db.execute( \"\"\" CREATE TABLE IF NOT EXISTS comments (", "disabled.\") return if self.settings[\"comment\"][\"only_once\"]: async with aiosqlite.connect(\"./piggy.db\") as db: row = await db.execute(", "or mediatype == \"GraphSidecar\": comment = self.pic_comments_list[ randint(0, len(self.pic_comments_list)-1) ] else: comment =", "except IndexError: tags = [] pass else: if await self.download_pic(url, id, format): logger.info(f\"Caption:", "await f.write(await r.read()) await f.close() return True else: return False except TimeoutError: return", "media in res[\"data\"][\"location\"][\"edge_location_to_media\"][\"edges\"]: await q.put(media[\"node\"]) async def print(self, media): \"\"\" Gives a visual", "1 logger.warning( f\"\"\"Too many requests! Retrying in {self.settings['connection']['wait_time']} seconds.\"\"\" ) return await self.http_request(", "\"first\": count}) } has_next_page = True while has_next_page: res = await self.http_request( \"GET\",", "as session: try: async with session.get(url) as r: if r.status == 200: f", "if self.settings[\"like\"][\"rate\"] / 100 > random(): await self._like(media[\"id\"]) else: logger.info(\"Not liked!\") async def", "comments\") await db.execute( \"\"\" CREATE TABLE IF NOT EXISTS comments ( id INTEGER,", "if self.settings[\"comment\"][\"only_once\"]: async with aiosqlite.connect(\"./piggy.db\") as db: row = await db.execute( \"SELECT *", "following BOOL ) \"\"\" ) logger.debug(\"Checking table: likes\") await db.execute( \"\"\" CREATE TABLE", "locations: asyncio.ensure_future(self._location_feed(q, location)) # Keep on yielding media while more is loaded while", "self.settings[\"connection\"][\"user_agent\"], \"X-CSRFToken\": self.csrf_token } await self.http_request( \"POST\", f\"https://www.instagram.com/web/friendships/{id}/unfollow/\", headers=headers ) async with aiosqlite.connect(\"./piggy.db\")", "self.session.close() async def get_user_by_username(self, username): res = await self.http_request( \"GET\", f\"https://www.instagram.com/{username}/\", params=\"__a:1\" )", "{comments}\" ) try: caption = media[\"edge_media_to_caption\"][\"edges\"][0][\"node\"][\"text\"] except IndexError: pass else: if len(caption) >", "200: f = await aiofiles.open( f\"./images/{id}.{format}\", mode=\"wb\" ) await f.write(await r.read()) await f.close()", "username): res = await self.http_request( \"GET\", f\"https://www.instagram.com/{username}/\", params=\"__a:1\" ) return json.loads( regex.findall( r\"<script[^>]*>window._sharedData", "comment) ) await db.commit() logger.info(\"Comment posted!\") async def follow(self, media): \"\"\" Check if", "async with aiosqlite.connect(\"./piggy.db\") as db: await db.execute(\"INSERT INTO likes WHERE id=?\", (id,)) await", "await db.execute( \"SELECT * FROM likes WHERE id=?\", (media[\"id\"],) ) if await row.fetchone():", "{ \"User-Agent\": self.settings[\"connection\"][\"user_agent\"], \"X-CSRFToken\": self.csrf_token } res = await self.http_request( \"POST\", \"https://www.instagram.com/accounts/login/ajax/\", headers=headers,", "http_request( self, method, url, headers=None, params=None, data=None, response_type=\"text\" ): await asyncio.sleep(self.settings['connection'][\"wait_time\"]) try: if", "logger.debug(\"Checking table: pics\") await db.execute( \"\"\" CREATE TABLE IF NOT EXISTS pics (", "logger.info(\"Not followed!\") async def _follow(self, id): headers = { \"DNT\": \"1\", \"Host\": \"www.instagram.com\",", "return comments = media[\"edge_media_to_comment\"][\"count\"] if comments < self.settings[\"like\"][\"num_of_comments\"][\"min\"] or comments >= self.settings[\"like\"][\"num_of_comments\"][\"max\"]: logger.info(\"Too", "comment = self.video_comments_list[ randint(0, len(self.video_comments_list)-1) ] await self._comment(media[\"id\"], comment) else: logger.info(\"Not commented!\") async", "regex.findall(r\".([a-zA-Z]+)$\", url)[0] if media[\"__typename\"] != \"GraphImage\" or await self.pic_already_saved(id): return height = media[\"dimensions\"][\"height\"]", "NOT EXISTS pics ( id INT, height INT, width INT, url TEXT, tags", "(media[\"id\"],) ) if await row.fetchone(): logger.info(\"Already liked!\") return try: mediatype = media[\"__typename\"] except", "= await self.get_user_by_username(username) id = user[\"graphql\"][\"user\"][\"id\"] params = { \"query_hash\": \"58712303d941c6855d4e888c5f0cd22f\", \"variables\": json.dumps({\"id\":", "hashtag): count = 0 params = { \"query_hash\": \"1780c1b186e2c37de9f7da95ce41bb67\", \"variables\": json.dumps({\"tag_name\": hashtag, \"first\":", "in comments] # Initialize the asynchronous http session headers = { \"DNT\": \"1\",", "* FROM '{table_name}'\" ) header = [i[0] for i in rows.description] rows =", "= f.readlines() self.video_comments_list = [x.strip() for x in comments] # Initialize the asynchronous", "media): \"\"\" Check if the media satisfy the prerequisites and eventually send a", "await db.execute(\"INSERT INTO likes WHERE id=?\", (id,)) await db.commit() logger.info(\"Unliked!\") async def comment(self,", "like. Args: media: The media to like. Retruns: None \"\"\" # Check if", "self.following(): await db.execute( \"UPDATE users SET following=1 WHERE username=?\", (username,) ) await db.commit()", "= logging.Formatter(\"%(message)s\") ch.setFormatter(formatter) formatter = logging.Formatter( \"[%(asctime)s] %(levelname)s %(funcName)s: %(message)s\" ) fh.setFormatter(formatter) logger.addHandler(ch)", ") async with aiosqlite.connect(\"./piggy.db\") as db: await db.execute( \"INSERT INTO likes VALUES(?,?)\", (id,", "or comments >= self.settings[\"comment\"][\"num_of_comments\"][\"max\"]: return if self.settings[\"comment\"][\"rate\"] / 100 <= random(): if mediatype", "self.id = res[\"userId\"] elif res[\"message\"] == \"checkpoint_required\": logger.info(\"Checkpoint required.\") res = await self.http_request(", "logger.info(\"Updating followers and following lists.\") await db.execute(\"UPDATE users SET follower=0, following=1\") for username", "💬 {comments}\" ) try: caption = media[\"edge_media_to_caption\"][\"edges\"][0][\"node\"][\"text\"] except IndexError: pass else: if len(caption)", "res = await self.http_request( \"GET\", f\"https://www.instagram.com/p/{shortcode}/\", params=\"__a=1\", response_type=\"json\" ) username = res[\"graphql\"][\"shortcode_media\"][\"owner\"][\"username\"] logger.info(", "pass else: if not mediatype in utils.translate_custom_media_type_to_ig(self.settings[\"like\"][\"media_type\"]): logger.info(\"Wrong media type. Not liked!\") return", "logger.debug(\"Checking database.\") async with aiosqlite.connect(\"./piggy.db\") as db: row = await db.execute( \"SELECT *", "self.csrf_token = await self._getCsrfTokenFromForm() async def _getCsrfTokenFromForm(self): # Get login page and find", "await db.execute( f\"SELECT * FROM '{table_name}'\" ) header = [i[0] for i in", "\"www.instagram.com\", \"Upgrade-Insecure-Requests\": \"1\", \"User-Agent\": self.settings[\"connection\"][\"user_agent\"] } timeout = aiohttp.ClientTimeout( total=self.settings[\"connection\"][\"timeout\"] ) self.session =", "CREATE TABLE IF NOT EXISTS comments ( id INTEGER, ts INTEGER, comment TEXT", "def download(self, media): id = media[\"id\"] url = media[\"display_url\"] format = regex.findall(r\".([a-zA-Z]+)$\", url)[0]", "Media with those locations will be added to the feed. Retruns: Yields a", "self.http_request( \"POST\", f\"https://www.instagram.com{res['checkpoint_url']}\", headers=headers, data=payload ) logger.error(res) else: logger.error(\"Couldn't log in.\") cookies =", "has_next_page = res[\"data\"][\"hashtag\"][\"edge_hashtag_to_media\"][\"page_info\"][\"has_next_page\"] end_cursor = res[\"data\"][\"hashtag\"][\"edge_hashtag_to_media\"][\"page_info\"][\"end_cursor\"] count += 1 params[\"variables\"] = json.dumps( {\"tag_name\":", "will send a comment. Args: media: The media to comment. Retruns: None \"\"\"", "media: The media to be printed. Returns: None \"\"\" logger.info(\"#--------\"*3+\"#\") try: mediatype =", "res[\"data\"][\"location\"][\"edge_location_to_media\"][\"edges\"]: await q.put(media[\"node\"]) async def print(self, media): \"\"\" Gives a visual representation of", "else: return await self.save_to_database(id, type, height, width, url, tags) async def download_pic(self, url,", "return False async def pic_already_saved(self, id): logger.debug(\"Checking database.\") async with aiosqlite.connect(\"./piggy.db\") as db:", "\"after\": end_cursor} ) for user in res[\"data\"][\"user\"][\"edge_followed_by\"][\"edges\"]: followers.append(user[\"node\"][\"username\"]) return followers async def following(self,", "True async def save_to_database(self, id, type, height, width, url, tags): tags = json.dumps(tags)", "Load settings with open(settings_path) as f: self.settings = json.loads( regex.sub(r\"#.+$\", \"\", f.read(), flags=regex.MULTILINE)", "given users to the queue for user in users: asyncio.ensure_future(self._user_feed(q, user)) if len(hashtags):", "while more is loaded while 1: while not q.empty(): yield await q.get() await", "media. Args: media: The media to be printed. Returns: None \"\"\" logger.info(\"#--------\"*3+\"#\") try:", "likes = media[\"edge_liked_by\"][\"count\"] if likes < self.settings[\"comment\"][\"num_of_likes\"][\"min\"] or likes >= self.settings[\"comment\"][\"num_of_likes\"][\"max\"]: return comments", "= res[\"data\"][\"user\"][\"edge_followed_by\"][\"page_info\"][\"has_next_page\"] end_cursor = res[\"data\"][\"user\"][\"edge_followed_by\"][\"page_info\"][\"end_cursor\"] params[\"variables\"] = json.dumps( {\"id\": str(id), \"first\": 50, \"after\":", "None, int(time.time()), False, True) ) await db.commit() logger.info(\"Follow request sent!\") async def unfollow(self,", "db.execute( \"\"\" CREATE TABLE IF NOT EXISTS comments ( id INTEGER, ts INTEGER,", "Gives a visual representation of a media. Args: media: The media to be", "already been liked async with aiosqlite.connect(\"./piggy.db\") as db: row = await db.execute( \"SELECT", "likes < self.settings[\"comment\"][\"num_of_likes\"][\"min\"] or likes >= self.settings[\"comment\"][\"num_of_likes\"][\"max\"]: return comments = media[\"edge_media_to_comment\"][\"count\"] if comments", "def feed(self, explore=True, users=[], hashtags=[], locations=[]): \"\"\" Generates a feed based on the", "needed to log in self.csrf_token = await self._getCsrfTokenFromForm() async def _getCsrfTokenFromForm(self): # Get", "params[\"variables\"] = json.dumps( {\"first\": 50, \"after\": end_cursor} ) for media in res[\"data\"][\"user\"][\"edge_web_discover_media\"][\"edges\"]: await", "return likes = media[\"edge_liked_by\"][\"count\"] if likes < self.settings[\"like\"][\"num_of_likes\"][\"min\"] or likes >= self.settings[\"like\"][\"num_of_likes\"][\"max\"]: logger.info(\"Too", "total=self.settings[\"connection\"][\"timeout\"] ) self.session = aiohttp.ClientSession(headers=headers, timeout=timeout) logger.info(\"Session initialized.\") # Get the csrf token.", "await self._getCsrfTokenFromForm() async def _getCsrfTokenFromForm(self): # Get login page and find the csrf", "all the media from the given hashtags to the queue for hashtag in", "json.dumps({\"first\": 24}) } has_next_page = True while has_next_page: res = await self.http_request( \"GET\",", "= media[\"edge_media_to_caption\"][\"edges\"][0][\"node\"][\"text\"] except IndexError: tags = [] pass else: if await self.download_pic(url, id,", "satisfy the prerequisites and eventually send a follow request. Args: media: The media", "def backup(self): while 1: logger.info(\"Backing up database...\") for table_name in [\"users\", \"likes\", \"comments\"]:", "= media[\"edge_media_to_caption\"][\"edges\"][0][\"node\"][\"text\"] except IndexError: pass else: if len(caption) > 100: logger.info(f\"{caption:.100}...\") else: logger.info(f\"{caption}\")", "200: # Successfull request: decrease retry time if self.settings['connection'][\"wait_time\"] > 0: self.settings['connection'][\"wait_time\"] -=", "location)) # Keep on yielding media while more is loaded while 1: while", "if r.status == 200: f = await aiofiles.open( f\"./images/{id}.{format}\", mode=\"wb\" ) await f.write(await", "\"\", f.read(), flags=regex.MULTILINE) ) # Load comments list for photos with open(\"comments/pic_comments.txt\") as", "logging.Formatter(\"%(message)s\") ch.setFormatter(formatter) formatter = logging.Formatter( \"[%(asctime)s] %(levelname)s %(funcName)s: %(message)s\" ) fh.setFormatter(formatter) logger.addHandler(ch) logger.addHandler(fh)", ") logger.debug(f\"[POST] {r.url}\") else: raise ValueError(f\"Invalid HTTP method: {method}\") except ClientConnectorError: logger.error(\"Could not", "res = await self.http_request( \"GET\", \"https://www.instagram.com/graphql/query/\", params=params, response_type=\"json\" ) has_next_page = res[\"data\"][\"location\"][\"edge_location_to_media\"][\"page_info\"][\"has_next_page\"] end_cursor", "ids] Media with those locations will be added to the feed. Retruns: Yields", "def print(self, media): \"\"\" Gives a visual representation of a media. Args: media:", "\"www.instagram.com\", \"User-Agent\": self.settings[\"connection\"][\"user_agent\"], \"X-CSRFToken\": self.csrf_token } await self.http_request( \"POST\", f\"https://www.instagram.com/web/friendships/{id}/follow/\", headers=headers ) async", "with open(\"comments/video_comments.txt\") as f: comments = f.readlines() self.video_comments_list = [x.strip() for x in", "await db.execute( \"INSERT INTO likes VALUES(?,?)\", (id, int(time.time())) ) await db.commit() logger.info(\"Liked!\") async", "followed!\") async def _follow(self, id): headers = { \"DNT\": \"1\", \"Host\": \"www.instagram.com\", \"User-Agent\":", "logger.debug(\"Checking table: users\") await db.execute( \"\"\" CREATE TABLE IF NOT EXISTS users (", "added to to the feed. users: [List of usernames] Their media will be", "explore=True, users=[], hashtags=[], locations=[]): \"\"\" Generates a feed based on the passed parameters.", "return False else: return True async def save_to_database(self, id, type, height, width, url,", "await self.http_request( \"GET\", \"https://www.instagram.com/graphql/query/\", params=params, response_type=\"json\" ) has_next_page = res[\"data\"][\"user\"][\"edge_web_discover_media\"][\"page_info\"][\"has_next_page\"] end_cursor = res[\"data\"][\"user\"][\"edge_web_discover_media\"][\"page_info\"][\"end_cursor\"]", "self.pic_comments_list = [x.strip() for x in comments] # Load comments list for videos", "comments >= self.settings[\"like\"][\"num_of_comments\"][\"max\"]: logger.info(\"Too many or too few comments. Not liked!\") return if", "await self.http_request( \"POST\", f\"https://www.instagram.com/web/friendships/{id}/follow/\", headers=headers ) async with aiosqlite.connect(\"./piggy.db\") as db: c =", "self.settings[\"backup\"][\"format\"] == \"json\": await utils.to_json(table_name, header, rows) else: logger.warning( f\"\"\"Unsupported file format: {self.settings['backup']['format']}.\"\"\"", "media[\"shortcode\"] res = await self.http_request( \"GET\", f\"https://www.instagram.com/p/{shortcode}/\", params=\"__a=1\", response_type=\"json\" ) username = res[\"graphql\"][\"shortcode_media\"][\"owner\"][\"username\"]", "database await self._init_database() async def _init_database(self): logger.info(\"Checking database...\") # Connect to the local", "caption = media[\"edge_media_to_caption\"][\"edges\"][0][\"node\"][\"text\"] except IndexError: tags = [] pass else: if await self.download_pic(url,", "reply_to_id=None): headers = { \"DNT\": \"1\", \"Host\": \"www.instagram.com\", \"User-Agent\": self.settings[\"connection\"][\"user_agent\"], \"X-CSRFToken\": self.csrf_token }", "if not mediatype in utils.translate_custom_media_type_to_ig(self.settings[\"comment\"][\"media_type\"]): return likes = media[\"edge_liked_by\"][\"count\"] if likes < self.settings[\"comment\"][\"num_of_likes\"][\"min\"]", "self.csrf_token } payload = { \"comment_text\": comment } await self.http_request( \"POST\", f\"https://www.instagram.com/web/comments/{id}/add/\", headers=headers,", "\"likes\", \"comments\"]: if self.settings[\"backup\"][table_name]: async with aiosqlite.connect(\"./piggy.db\") as db: rows = await db.execute(", "self.followers(): await db.execute( \"UPDATE users SET follower=0 WHERE username=?\", (username,) ) for username", "json.loads( regex.findall( r\"<script[^>]*>window._sharedData = (.*?)</script>\", regex.findall( r\"<body[^>]*>(.*)</body>\", res, flags=regex.DOTALL )[0], flags=regex.DOTALL )[0][:-1])[\"entry_data\"][\"ProfilePage\"][0][\"graphql\"][\"user\"] #", "headers=headers, data=payload ) async with aiosqlite.connect(\"./piggy.db\") as db: await db.execute( \"INSERT INTO comments", "[List of hastags] Media with those hashtags will be added to the feed.", "from piggy import utils # Logging logger = logging.getLogger(__name__) logger.setLevel(logging.DEBUG) ch = logging.StreamHandler()", "in [\"users\", \"likes\", \"comments\"]: if self.settings[\"backup\"][table_name]: async with aiosqlite.connect(\"./piggy.db\") as db: rows =", "Args: explore: [Bool] If True the explore page will be added to to", "len(locations): # Add all the media from the given locations to the queue", "self.pic_already_saved(id): return height = media[\"dimensions\"][\"height\"] width = media[\"dimensions\"][\"width\"] try: caption = media[\"edge_media_to_caption\"][\"edges\"][0][\"node\"][\"text\"] except", "response_type=\"json\" ) has_next_page = res[\"data\"][\"user\"][\"edge_follow\"][\"page_info\"][\"has_next_page\"] end_cursor = res[\"data\"][\"user\"][\"edge_follow\"][\"page_info\"][\"end_cursor\"] params[\"variables\"] = json.dumps( {\"id\": str(id),", "The media to be printed. Returns: None \"\"\" logger.info(\"#--------\"*3+\"#\") try: mediatype = media[\"__typename\"]", "await self.http_request( \"POST\", f\"https://www.instagram.com/web/friendships/{id}/unfollow/\", headers=headers ) async with aiosqlite.connect(\"./piggy.db\") as db: await db.execute(", "prerequisites and eventually it will send a comment. Args: media: The media to", "# Load settings with open(settings_path) as f: self.settings = json.loads( regex.sub(r\"#.+$\", \"\", f.read(),", "res = await self.http_request( \"GET\", \"https://www.instagram.com/graphql/query/\", params=params, response_type=\"json\" ) has_next_page = res[\"data\"][\"hashtag\"][\"edge_hashtag_to_media\"][\"page_info\"][\"has_next_page\"] end_cursor", "import asyncio import aiohttp import aiosqlite import aiofiles import regex from aiohttp.client_exceptions import", "format = regex.findall(r\".([a-zA-Z]+)$\", url)[0] if media[\"__typename\"] != \"GraphImage\" or await self.pic_already_saved(id): return height", "is_video: mediatype = \"GraphVideo\" else: mediatype = \"GraphImage\" pass else: if not mediatype", "TABLE IF NOT EXISTS comments ( id INTEGER, ts INTEGER, comment TEXT )", "id) ) else: await db.execute( \"INSERT INTO users VALUES(?,?,?,?,?)\", (id, None, int(time.time()), False,", "\"\"\" if self.settings[\"follow\"][\"rate\"] / 100 > random(): await self._follow(media[\"owner\"][\"id\"]) else: logger.info(\"Not followed!\") async", "\"UPDATE users SET following=false WHERE id=?\", (id,) ) await db.commit() async def backup(self):", "async def setup(self, settings_path=\"settings.json\"): logger.info(\"Loading settings...\") # Load settings with open(settings_path) as f:", "await q.put(media[\"node\"]) async def _user_feed(self, q, user): user = await self.get_user_by_usernameUsername(user) id =", "Args: media: The media to like. Retruns: None \"\"\" # Check if the", "comments. Not liked!\") return if self.settings[\"like\"][\"rate\"] / 100 > random(): await self._like(media[\"id\"]) else:", "async def print(self, media): \"\"\" Gives a visual representation of a media. Args:", "TABLE IF NOT EXISTS likes ( id INTEGER, ts INTEGER ) \"\"\" )", "{r.status}\") async def setup(self, settings_path=\"settings.json\"): logger.info(\"Loading settings...\") # Load settings with open(settings_path) as", "asyncio.ensure_future(self._hashtag_feed(q, hashtag)) if len(locations): # Add all the media from the given locations", "self.settings[\"connection\"][\"user_agent\"], \"X-CSRFToken\": self.csrf_token } payload = { \"comment_text\": comment } await self.http_request( \"POST\",", "randint(0, len(self.video_comments_list)-1) ] await self._comment(media[\"id\"], comment) else: logger.info(\"Not commented!\") async def _comment(self, id,", "table: comments\") await db.execute( \"\"\" CREATE TABLE IF NOT EXISTS comments ( id", "self.http_request( \"POST\", f\"https://www.instagram.com/web/friendships/{id}/unfollow/\", headers=headers ) async with aiosqlite.connect(\"./piggy.db\") as db: await db.execute( \"UPDATE", "It is needed to log in self.csrf_token = await self._getCsrfTokenFromForm() async def _getCsrfTokenFromForm(self):", "pass likes = media[\"edge_liked_by\"][\"count\"] comments = media[\"edge_media_to_comment\"][\"count\"] shortcode = media[\"shortcode\"] res = await", "/ 100 > random(): await self._like(media[\"id\"]) else: logger.info(\"Not liked!\") async def _like(self, id):", "f\"https://www.instagram.com/p/{shortcode}/\", params=\"__a=1\", response_type=\"json\" ) username = res[\"graphql\"][\"shortcode_media\"][\"owner\"][\"username\"] logger.info( f\"{utils.translate_ig_media_type_to_custom(mediatype).capitalize()} by {username}\\n❤️ {likes}, 💬", "db.execute(\"INSERT INTO likes WHERE id=?\", (id,)) await db.commit() logger.info(\"Unliked!\") async def comment(self, media):", "100 > random(): await self._follow(media[\"owner\"][\"id\"]) else: logger.info(\"Not followed!\") async def _follow(self, id): headers", "f: comments = f.readlines() self.pic_comments_list = [x.strip() for x in comments] # Load", "= \"GraphVideo\" else: mediatype = \"GraphImage\" pass else: if not mediatype in utils.translate_custom_media_type_to_ig(self.settings[\"like\"][\"media_type\"]):", "eventually it will send a like. Args: media: The media to like. Retruns:", "if self.settings[\"backup\"][\"format\"] == \"csv\": await utils.to_csv(table_name, header, rows) elif self.settings[\"backup\"][\"format\"] == \"json\": await", "with aiosqlite.connect(\"./piggy.db\") as db: await db.execute( \"INSERT INTO comments VALUES(?,?,?)\", (id, int(time.time()), comment)", "too few comments. Not liked!\") return if self.settings[\"like\"][\"rate\"] / 100 > random(): await", "{tags}\") else: return await self.save_to_database(id, type, height, width, url, tags) async def download_pic(self,", "media): \"\"\" Gives a visual representation of a media. Args: media: The media", "in locations: asyncio.ensure_future(self._location_feed(q, location)) # Keep on yielding media while more is loaded", "pics\") await db.execute( \"\"\" CREATE TABLE IF NOT EXISTS pics ( id INT,", "db: row = await db.execute( \"SELECT * FROM pics WHERE id=?\", (id,) )", "url, headers=headers, params=params ) logger.debug(f\"[GET] {r.url}\") elif method == \"POST\": r = await", ") await db.commit() async def followers(self, username=None): followers = [] if username is", "(id,) ) await db.commit() async def backup(self): while 1: logger.info(\"Backing up database...\") for", "setup(self, settings_path=\"settings.json\"): logger.info(\"Loading settings...\") # Load settings with open(settings_path) as f: self.settings =", "retry time if self.settings['connection'][\"wait_time\"] > 0: self.settings['connection'][\"wait_time\"] -= 1 if response_type == \"text\":", "in rows.description] rows = await rows.fetchall() if self.settings[\"backup\"][\"format\"] == \"csv\": await utils.to_csv(table_name, header,", "INTO comments VALUES(?,?,?)\", (id, int(time.time()), comment) ) await db.commit() logger.info(\"Comment posted!\") async def", "in users: asyncio.ensure_future(self._user_feed(q, user)) if len(hashtags): # Add all the media from the", "= media[\"edge_liked_by\"][\"count\"] comments = media[\"edge_media_to_comment\"][\"count\"] shortcode = media[\"shortcode\"] res = await self.http_request( \"GET\",", "feed to the queue asyncio.ensure_future(self._explore_feed(q)) if len(users): # Add all the media from", "be # temporarely stored q = asyncio.Queue() if explore: # Add the \"explore\"", "\"after\": end_cursor} ) for media in res[\"data\"][\"user\"][\"edge_web_discover_media\"][\"edges\"]: await q.put(media[\"node\"]) async def _user_feed(self, q,", "\"query_hash\": \"58712303d941c6855d4e888c5f0cd22f\", \"variables\": json.dumps({\"id\": str(id), \"first\": 50}) } has_next_page = True while has_next_page:", "100 <= random(): if mediatype == \"GraphImage\" or mediatype == \"GraphSidecar\": comment =", "settings with open(settings_path) as f: self.settings = json.loads( regex.sub(r\"#.+$\", \"\", f.read(), flags=regex.MULTILINE) )", "can be passed at the same time. Args: explore: [Bool] If True the", "= media[\"dimensions\"][\"height\"] width = media[\"dimensions\"][\"width\"] try: caption = media[\"edge_media_to_caption\"][\"edges\"][0][\"node\"][\"text\"] except IndexError: tags =", "self.csrf_token } await self.http_request( \"POST\", f\"https://www.instagram.com/web/friendships/{id}/unfollow/\", headers=headers ) async with aiosqlite.connect(\"./piggy.db\") as db:", "f\"https://www.instagram.com/web/friendships/{id}/unfollow/\", headers=headers ) async with aiosqlite.connect(\"./piggy.db\") as db: await db.execute( \"UPDATE users SET", "Yields a media from the generated feed. \"\"\" # Initialize asynchronous queue where", "int(time.time()), False, True) ) await db.commit() logger.info(\"Follow request sent!\") async def unfollow(self, id):", "\"\"\" # Check if the media has already been liked async with aiosqlite.connect(\"./piggy.db\")", "{r.headers}\") logger.error(await r.text()) raise ValueError(f\"Response error: {r.status}\") async def setup(self, settings_path=\"settings.json\"): logger.info(\"Loading settings...\")", "else: logger.error(\"Couldn't log in.\") cookies = utils.cookies_dict(self.session.cookie_jar) self.csrf_token = cookies[\"csrftoken\"] # Initialize the", "end_cursor = res[\"data\"][\"hashtag\"][\"edge_hashtag_to_media\"][\"page_info\"][\"end_cursor\"] count += 1 params[\"variables\"] = json.dumps( {\"tag_name\": hashtag, \"first\": count,", "await row.fetchone(): logger.info(\"Already liked!\") return try: mediatype = media[\"__typename\"] except KeyError: is_video =", "True while has_next_page: res = await self.http_request( \"GET\", \"https://www.instagram.com/graphql/query/\", params=params, response_type=\"json\" ) has_next_page", "self.csrf_token } await self.http_request( \"POST\", f\"https://www.instagram.com/web/likes/{id}/like/\", headers=headers ) async with aiosqlite.connect(\"./piggy.db\") as db:", "while 1: while not q.empty(): yield await q.get() await asyncio.sleep(1e-12) async def _explore_feed(self,", "= cookies[\"csrftoken\"] # Initialize the database await self._init_database() async def _init_database(self): logger.info(\"Checking database...\")", "mediatype = \"GraphVideo\" else: mediatype = \"GraphImage\" pass likes = media[\"edge_liked_by\"][\"count\"] comments =", "rows = await rows.fetchall() if self.settings[\"backup\"][\"format\"] == \"csv\": await utils.to_csv(table_name, header, rows) elif", "in res[\"data\"][\"user\"][\"edge_follow\"][\"edges\"]: following.append(user[\"node\"][\"username\"]) return following async def feed(self, explore=True, users=[], hashtags=[], locations=[]): \"\"\"", ") fh.setFormatter(formatter) logger.addHandler(ch) logger.addHandler(fh) class Piggy: def __init__(self, loop): self.loop = loop async", "id): logger.debug(\"Checking database.\") async with aiosqlite.connect(\"./piggy.db\") as db: row = await db.execute( \"SELECT", "for photos with open(\"comments/pic_comments.txt\") as f: comments = f.readlines() self.pic_comments_list = [x.strip() for", "the media satisfy the prerequisites and eventually it will send a comment. Args:", "explore page will be added to to the feed. users: [List of usernames]", "= { \"query_hash\": \"a5164aed103f24b03e7b7747a2d94e3c\", \"variables\": json.dumps({\"id\": id, \"first\": 24}) } has_next_page = True", "\"1\", \"Host\": \"www.instagram.com\", \"User-Agent\": self.settings[\"connection\"][\"user_agent\"], \"X-CSRFToken\": self.csrf_token } await self.http_request( \"POST\", f\"https://www.instagram.com/web/likes/{id}/like/\", headers=headers", "the media has already been liked async with aiosqlite.connect(\"./piggy.db\") as db: row =", "aiofiles.open( f\"./images/{id}.{format}\", mode=\"wb\" ) await f.write(await r.read()) await f.close() return True else: return", "flags=regex.MULTILINE )[0] async def login(self): payload = { \"username\": self.settings[\"user\"][\"username\"], \"password\": self.settings[\"user\"][\"password\"] }", "await self.http_request( \"POST\", f\"https://www.instagram.com{res['checkpoint_url']}\", headers=headers, data=payload ) logger.error(res) else: logger.error(\"Couldn't log in.\") cookies", "to the feed. locations: [List of locations ids] Media with those locations will", "table: users\") await db.execute( \"\"\" CREATE TABLE IF NOT EXISTS users ( id", "{ \"username\": self.settings[\"user\"][\"username\"], \"password\": self.settings[\"user\"][\"password\"] } headers = { \"User-Agent\": self.settings[\"connection\"][\"user_agent\"], \"X-CSRFToken\": self.csrf_token", "header = [i[0] for i in rows.description] rows = await rows.fetchall() if self.settings[\"backup\"][\"format\"]", "= [x.strip() for x in comments] # Initialize the asynchronous http session headers", "await self.get_user_by_username(username) id = user[\"graphql\"][\"user\"][\"id\"] params = { \"query_hash\": \"58712303d941c6855d4e888c5f0cd22f\", \"variables\": json.dumps({\"id\": str(id),", "db.execute( \"SELECT * FROM comments WHERE id=?\", (media[\"id\"],) ) if await row.fetchone() is", "db: row = await db.execute( \"SELECT * FROM comments WHERE id=?\", (media[\"id\"],) )", "with those locations will be added to the feed. Retruns: Yields a media", "a media from the generated feed. \"\"\" # Initialize asynchronous queue where the", "\"https://www.instagram.com/graphql/query/\", params=params, response_type=\"json\" ) has_next_page = res[\"data\"][\"user\"][\"edge_web_discover_media\"][\"page_info\"][\"has_next_page\"] end_cursor = res[\"data\"][\"user\"][\"edge_web_discover_media\"][\"page_info\"][\"end_cursor\"] params[\"variables\"] = json.dumps(", "= 0 params = { \"query_hash\": \"1780c1b186e2c37de9f7da95ce41bb67\", \"variables\": json.dumps({\"tag_name\": hashtag, \"first\": count}) }", "= media[\"is_video\"] if is_video: mediatype = \"GraphVideo\" else: mediatype = \"GraphImage\" pass else:", "id=?\", (media[\"id\"],) ) if await row.fetchone() is None: logger.info(\"Already commented.\") return try: mediatype", "for videos with open(\"comments/video_comments.txt\") as f: comments = f.readlines() self.video_comments_list = [x.strip() for", "logger.debug(res) return res elif response_type == \"json\": res = await r.json() logger.debug(res) return", "if media[\"comments_disabled\"]: logger.info(\"Comments disabled.\") return if self.settings[\"comment\"][\"only_once\"]: async with aiosqlite.connect(\"./piggy.db\") as db: row", "= await self.session.post( url, headers=headers, data=data ) logger.debug(f\"[POST] {r.url}\") else: raise ValueError(f\"Invalid HTTP", "logger.error(\"Could not reach the server. Retrying in 30 seconds.\") await asyncio.sleep(30) return await", "hashtags will be added to the feed. locations: [List of locations ids] Media", "or too few comments. Not liked!\") return if self.settings[\"like\"][\"rate\"] / 100 > random():", "= self.id else: user = await self.get_user_by_username(username) id = user[\"graphql\"][\"user\"][\"id\"] params = {", "mediatype = \"GraphImage\" pass likes = media[\"edge_liked_by\"][\"count\"] comments = media[\"edge_media_to_comment\"][\"count\"] shortcode = media[\"shortcode\"]", "50}) } has_next_page = True while has_next_page: res = await self.http_request( \"GET\", \"https://www.instagram.com/graphql/query/\",", "end_cursor} ) for media in res[\"data\"][\"hashtag\"][\"edge_hashtag_to_media\"][\"edges\"]: await q.put(media[\"node\"]) async def _location_feed(self, q, location_id):", "= await self.http_request( \"POST\", f\"https://www.instagram.com{res['checkpoint_url']}\", headers=headers, data=payload ) logger.error(res) else: logger.error(\"Couldn't log in.\")", "await self._follow(media[\"owner\"][\"id\"]) else: logger.info(\"Not followed!\") async def _follow(self, id): headers = { \"DNT\":", "it will send a comment. Args: media: The media to comment. Retruns: None", "username=?\", (username,) ) for username in await self.following(): await db.execute( \"UPDATE users SET", "cookies[\"csrftoken\"] # Initialize the database await self._init_database() async def _init_database(self): logger.info(\"Checking database...\") #", "liked!\") return try: mediatype = media[\"__typename\"] except KeyError: is_video = media[\"is_video\"] if is_video:", "if username is None: id = self.id else: user = await self.get_user_by_username(username) id", "prerequisites and eventually it will send a like. Args: media: The media to", "self.settings[\"connection\"][\"user_agent\"], \"X-CSRFToken\": self.csrf_token } await self.http_request( \"POST\", f\"https://www.instagram.com/web/likes/{id}/like/\", headers=headers ) async with aiosqlite.connect(\"./piggy.db\")", "else: if not mediatype in utils.translate_custom_media_type_to_ig(self.settings[\"comment\"][\"media_type\"]): return likes = media[\"edge_liked_by\"][\"count\"] if likes <", ") async with aiosqlite.connect(\"./piggy.db\") as db: await db.execute( \"UPDATE users SET following=false WHERE", "has_next_page = res[\"data\"][\"user\"][\"edge_followed_by\"][\"page_info\"][\"has_next_page\"] end_cursor = res[\"data\"][\"user\"][\"edge_followed_by\"][\"page_info\"][\"end_cursor\"] params[\"variables\"] = json.dumps( {\"id\": str(id), \"first\": 50,", "{\"first\": 50, \"after\": end_cursor} ) for media in res[\"data\"][\"user\"][\"edge_web_discover_media\"][\"edges\"]: await q.put(media[\"node\"]) async def", "res = await self.http_request( \"POST\", f\"https://www.instagram.com{res['checkpoint_url']}\", headers=headers, data=payload ) logger.error(res) else: logger.error(\"Couldn't log", "'{table_name}'\" ) header = [i[0] for i in rows.description] rows = await rows.fetchall()", "a feed based on the passed parameters. Multiple parameters can be passed at", "locations ids] Media with those locations will be added to the feed. Retruns:", "download(self, media): id = media[\"id\"] url = media[\"display_url\"] format = regex.findall(r\".([a-zA-Z]+)$\", url)[0] if", "Logging logger = logging.getLogger(__name__) logger.setLevel(logging.DEBUG) ch = logging.StreamHandler() fh = logging.FileHandler(\"./piggy.log\") ch.setLevel(logging.INFO) fh.setLevel(logging.DEBUG)", "[i[0] for i in rows.description] rows = await rows.fetchall() if self.settings[\"backup\"][\"format\"] == \"csv\":", "try: if method == \"GET\": r = await self.session.get( url, headers=headers, params=params )", "logger.error(await r.text()) raise ValueError(f\"Response error: {r.status}\") async def setup(self, settings_path=\"settings.json\"): logger.info(\"Loading settings...\") #", "for user in res[\"data\"][\"user\"][\"edge_followed_by\"][\"edges\"]: followers.append(user[\"node\"][\"username\"]) return followers async def following(self, username=None): following =", "to to the feed. users: [List of usernames] Their media will be pulled", "= media[\"edge_liked_by\"][\"count\"] if likes < self.settings[\"comment\"][\"num_of_likes\"][\"min\"] or likes >= self.settings[\"comment\"][\"num_of_likes\"][\"max\"]: return comments =", "media: The media of the user to be followed. Retruns: None \"\"\" if", "response_type == \"text\": res = await r.text() logger.debug(res) return res elif response_type ==", "rows.description] rows = await rows.fetchall() if self.settings[\"backup\"][\"format\"] == \"csv\": await utils.to_csv(table_name, header, rows)", "media from the given hashtags to the queue for hashtag in hashtags: asyncio.ensure_future(self._hashtag_feed(q,", "media: The media to like. Retruns: None \"\"\" # Check if the media", "self, method, url, headers=None, params=None, data=None, response_type=\"text\" ): await asyncio.sleep(self.settings['connection'][\"wait_time\"]) try: if method", "db.commit() logger.info(\"Follow request sent!\") async def unfollow(self, id): return async def _unfollow(self, id):", "res[\"data\"][\"user\"][\"edge_web_discover_media\"][\"page_info\"][\"has_next_page\"] end_cursor = res[\"data\"][\"user\"][\"edge_web_discover_media\"][\"page_info\"][\"end_cursor\"] params[\"variables\"] = json.dumps( {\"first\": 50, \"after\": end_cursor} ) for", "caption = media[\"edge_media_to_caption\"][\"edges\"][0][\"node\"][\"text\"] except IndexError: pass else: if len(caption) > 100: logger.info(f\"{caption:.100}...\") else:", "else: logger.info(f\"{caption}\") async def like(self, media): \"\"\" Check if the media satisfy the", "has already been liked async with aiosqlite.connect(\"./piggy.db\") as db: row = await db.execute(", "async with aiosqlite.connect(\"./piggy.db\") as db: await db.execute( \"INSERT INTO comments VALUES(?,?,?)\", (id, int(time.time()),", "feed. \"\"\" # Initialize asynchronous queue where the feed elements will be #", ">= self.settings[\"comment\"][\"num_of_likes\"][\"max\"]: return comments = media[\"edge_media_to_comment\"][\"count\"] if comments < self.settings[\"comment\"][\"num_of_comments\"][\"min\"] or comments >=", "else: await db.execute( \"INSERT INTO users VALUES(?,?,?,?,?)\", (id, None, int(time.time()), False, True) )", "\"query_hash\": \"37479f2b8209594dde7facb0d904896a\", \"variables\": json.dumps({\"id\": str(id), \"first\": 50}) } has_next_page = True while has_next_page:", "\"Upgrade-Insecure-Requests\": \"1\", \"User-Agent\": self.settings[\"connection\"][\"user_agent\"] } timeout = aiohttp.ClientTimeout( total=self.settings[\"connection\"][\"timeout\"] ) self.session = aiohttp.ClientSession(headers=headers,", "ts INTEGER, comment TEXT ) \"\"\" ) logger.info(\"Updating followers and following lists.\") await", "\"after\": end_cursor} ) for user in res[\"data\"][\"user\"][\"edge_follow\"][\"edges\"]: following.append(user[\"node\"][\"username\"]) return following async def feed(self,", "same time. Args: explore: [Bool] If True the explore page will be added", "self.id else: user = await self.get_user_by_username(username) id = user[\"graphql\"][\"user\"][\"id\"] params = { \"query_hash\":", "piggy import utils # Logging logger = logging.getLogger(__name__) logger.setLevel(logging.DEBUG) ch = logging.StreamHandler() fh", "True else: return False except TimeoutError: return False async def pic_already_saved(self, id): logger.debug(\"Checking", "def _init_database(self): logger.info(\"Checking database...\") # Connect to the local database and look for", "the server. Retrying in 30 seconds.\") await asyncio.sleep(30) return await self.http_request( method, url,", "res[\"data\"][\"user\"][\"edge_follow\"][\"page_info\"][\"end_cursor\"] params[\"variables\"] = json.dumps( {\"id\": str(id), \"first\": 50, \"after\": end_cursor} ) for user", "= media[\"edge_liked_by\"][\"count\"] if likes < self.settings[\"like\"][\"num_of_likes\"][\"min\"] or likes >= self.settings[\"like\"][\"num_of_likes\"][\"max\"]: logger.info(\"Too many or", "await q.get() await asyncio.sleep(1e-12) async def _explore_feed(self, q): params = { \"query_hash\": \"ecd67af449fb6edab7c69a205413bfa7\",", "count = 0 params = { \"query_hash\": \"1780c1b186e2c37de9f7da95ce41bb67\", \"variables\": json.dumps({\"tag_name\": hashtag, \"first\": count})", "with aiosqlite.connect(\"./piggy.db\") as db: logger.debug(\"Checking table: pics\") await db.execute( \"\"\" CREATE TABLE IF", "to the queue for user in users: asyncio.ensure_future(self._user_feed(q, user)) if len(hashtags): # Add", "= await db.execute( \"SELECT * FROM comments WHERE id=?\", (media[\"id\"],) ) if await", "satisfy the prerequisites and eventually it will send a like. Args: media: The", "media has already been liked async with aiosqlite.connect(\"./piggy.db\") as db: row = await", "res, flags=regex.MULTILINE )[0] async def login(self): payload = { \"username\": self.settings[\"user\"][\"username\"], \"password\": self.settings[\"user\"][\"password\"]", "users SET ts_following=?, following=? WHERE id=? \"\"\", (int(time.time()), True, id) ) else: await", "response_type=\"json\" ) has_next_page = res[\"data\"][\"user\"][\"edge_followed_by\"][\"page_info\"][\"has_next_page\"] end_cursor = res[\"data\"][\"user\"][\"edge_followed_by\"][\"page_info\"][\"end_cursor\"] params[\"variables\"] = json.dumps( {\"id\": str(id),", "import json import time from random import random, randint import asyncio import aiohttp", "res[\"data\"][\"user\"][\"edge_follow\"][\"page_info\"][\"has_next_page\"] end_cursor = res[\"data\"][\"user\"][\"edge_follow\"][\"page_info\"][\"end_cursor\"] params[\"variables\"] = json.dumps( {\"id\": str(id), \"first\": 50, \"after\": end_cursor}", "await q.put(media[\"node\"]) async def _hashtag_feed(self, q, hashtag): count = 0 params = {", "queue for user in users: asyncio.ensure_future(self._user_feed(q, user)) if len(hashtags): # Add all the", "async def _location_feed(self, q, location_id): count = 0 params = { \"query_hash\": \"1b84447a4d8b6d6d0426fefb34514485\",", "def _explore_feed(self, q): params = { \"query_hash\": \"ecd67af449fb6edab7c69a205413bfa7\", \"variables\": json.dumps({\"first\": 24}) } has_next_page", "logger.info(\"Not commented!\") async def _comment(self, id, comment, reply_to_id=None): headers = { \"DNT\": \"1\",", "as db: rows = await db.execute( f\"SELECT * FROM '{table_name}'\" ) header =", "\"https://www.instagram.com/graphql/query/\", params=params, response_type=\"json\" ) has_next_page = res[\"data\"][\"hashtag\"][\"edge_hashtag_to_media\"][\"page_info\"][\"has_next_page\"] end_cursor = res[\"data\"][\"hashtag\"][\"edge_hashtag_to_media\"][\"page_info\"][\"end_cursor\"] count += 1", "Not liked!\") return if self.settings[\"like\"][\"rate\"] / 100 > random(): await self._like(media[\"id\"]) else: logger.info(\"Not", "= aiohttp.ClientSession(headers=headers, timeout=timeout) logger.info(\"Session initialized.\") # Get the csrf token. It is needed", "following(self, username=None): following = [] if username is None: id = self.id else:", "self.get_user_by_usernameUsername(user) id = user[\"id\"] params = { \"query_hash\": \"a5164aed103f24b03e7b7747a2d94e3c\", \"variables\": json.dumps({\"id\": id, \"first\":", "< self.settings[\"like\"][\"num_of_comments\"][\"min\"] or comments >= self.settings[\"like\"][\"num_of_comments\"][\"max\"]: logger.info(\"Too many or too few comments. Not", "as db: row = await db.execute( \"SELECT * FROM pics WHERE id=?\", (id,)", "for table_name in [\"users\", \"likes\", \"comments\"]: if self.settings[\"backup\"][table_name]: async with aiosqlite.connect(\"./piggy.db\") as db:", "likes = media[\"edge_liked_by\"][\"count\"] if likes < self.settings[\"like\"][\"num_of_likes\"][\"min\"] or likes >= self.settings[\"like\"][\"num_of_likes\"][\"max\"]: logger.info(\"Too many", "self._getCsrfTokenFromForm() async def _getCsrfTokenFromForm(self): # Get login page and find the csrf token", "local database and look for the table names async with aiosqlite.connect(\"./piggy.db\") as db:", "id, \"first\": 24}) } has_next_page = True while has_next_page: res = await self.http_request(", "randint(0, len(self.pic_comments_list)-1) ] else: comment = self.video_comments_list[ randint(0, len(self.video_comments_list)-1) ] await self._comment(media[\"id\"], comment)", "( id INT, height INT, width INT, url TEXT, tags TEXT ) \"\"\"", "ts_follower INTEGER, ts_following INTEGER, follower BOOL, following BOOL ) \"\"\" ) logger.debug(\"Checking table:", "# Close the http session await self.session.close() async def get_user_by_username(self, username): res =", "= { \"comment_text\": comment } await self.http_request( \"POST\", f\"https://www.instagram.com/web/comments/{id}/add/\", headers=headers, data=payload ) async", "\"\"\" UPDATE users SET ts_following=?, following=? WHERE id=? \"\"\", (int(time.time()), True, id) )", "to the feed. users: [List of usernames] Their media will be pulled and", "= await self.get_user_by_username(username) id = user[\"graphql\"][\"user\"][\"id\"] params = { \"query_hash\": \"37479f2b8209594dde7facb0d904896a\", \"variables\": json.dumps({\"id\":", "commented.\") return try: mediatype = media[\"__typename\"] except KeyError: is_video = media[\"is_video\"] if is_video:", "= media[\"shortcode\"] res = await self.http_request( \"GET\", f\"https://www.instagram.com/p/{shortcode}/\", params=\"__a=1\", response_type=\"json\" ) username =", "Retrying in {self.settings['connection']['wait_time']} seconds.\"\"\" ) return await self.http_request( method, url, headers=headers, params=params, data=data,", "aiohttp.client_exceptions import ClientConnectorError from piggy import utils # Logging logger = logging.getLogger(__name__) logger.setLevel(logging.DEBUG)", "\"variables\": json.dumps({\"tag_name\": hashtag, \"first\": count}) } has_next_page = True while has_next_page: res =", "generated feed. \"\"\" # Initialize asynchronous queue where the feed elements will be", "headers=headers, params=params, data=data, response_type=response_type ) else: logger.error(f\"Response status: {r.status}\") logger.error(f\"Response headers: {r.headers}\") logger.error(await", "{\"id\": str(id), \"first\": 50, \"after\": end_cursor} ) for user in res[\"data\"][\"user\"][\"edge_follow\"][\"edges\"]: following.append(user[\"node\"][\"username\"]) return", "params=params, data=data, response_type=response_type ) else: logger.error(f\"Response status: {r.status}\") logger.error(f\"Response headers: {r.headers}\") logger.error(await r.text())", "the passed parameters. Multiple parameters can be passed at the same time. Args:", "with aiosqlite.connect(\"./piggy.db\") as db: rows = await db.execute( f\"SELECT * FROM '{table_name}'\" )", "await self.http_request( \"GET\", \"https://www.instagram.com/graphql/query/\", params=params, response_type=\"json\" ) has_next_page = res[\"data\"][\"user\"][\"edge_owner_to_timeline_media\"][\"page_info\"][\"has_next_page\"] end_cursor = res[\"data\"][\"user\"][\"edge_owner_to_timeline_media\"][\"page_info\"][\"end_cursor\"]", "logger.error(res) else: logger.error(\"Couldn't log in.\") cookies = utils.cookies_dict(self.session.cookie_jar) self.csrf_token = cookies[\"csrftoken\"] # Initialize", "json.dumps( {\"id\": id, \"first\": 50, \"after\": end_cursor} ) for media in res[\"data\"][\"user\"][\"edge_web_discover_media\"][\"edges\"]: await", "\"json\": res = await r.json() logger.debug(res) return res else: raise ValueError(f\"Invalid response type:", "the given users to the queue for user in users: asyncio.ensure_future(self._user_feed(q, user)) if", "\"first\": 50, \"after\": end_cursor} ) for media in res[\"data\"][\"user\"][\"edge_web_discover_media\"][\"edges\"]: await q.put(media[\"node\"]) async def", "else: logger.info(\"Not commented!\") async def _comment(self, id, comment, reply_to_id=None): headers = { \"DNT\":", "else: return True async def save_to_database(self, id, type, height, width, url, tags): tags", "VALUES(?,?,?)\", (id, int(time.time()), comment) ) await db.commit() logger.info(\"Comment posted!\") async def follow(self, media):", "logger.addHandler(fh) class Piggy: def __init__(self, loop): self.loop = loop async def http_request( self,", "has_next_page: res = await self.http_request( \"GET\", \"https://www.instagram.com/graphql/query/\", params=params, response_type=\"json\" ) has_next_page = res[\"data\"][\"user\"][\"edge_web_discover_media\"][\"page_info\"][\"has_next_page\"]", "if await self.download_pic(url, id, format): logger.info(f\"Caption: {caption}\") tags = regex.findall(r\"#([\\p{L}0-9_]+)\", caption) logger.info(f\"Tags: {tags}\")", "\"DNT\": \"1\", \"Host\": \"www.instagram.com\", \"User-Agent\": self.settings[\"connection\"][\"user_agent\"], \"X-CSRFToken\": self.csrf_token } await self.http_request( \"POST\", f\"https://www.instagram.com/web/likes/{id}/unlike/\",", "aiofiles import regex from aiohttp.client_exceptions import ClientConnectorError from piggy import utils # Logging", "await self.save_to_database(id, type, height, width, url, tags) async def download_pic(self, url, id, format):", "res = await self.http_request( \"GET\", \"https://www.instagram.com/graphql/query/\", params=params, response_type=\"json\" ) has_next_page = res[\"data\"][\"user\"][\"edge_follow\"][\"page_info\"][\"has_next_page\"] end_cursor", "for media in res[\"data\"][\"location\"][\"edge_location_to_media\"][\"edges\"]: await q.put(media[\"node\"]) async def print(self, media): \"\"\" Gives a", "be followed. Retruns: None \"\"\" if self.settings[\"follow\"][\"rate\"] / 100 > random(): await self._follow(media[\"owner\"][\"id\"])", "INTEGER, ts INTEGER ) \"\"\" ) logger.debug(\"Checking table: comments\") await db.execute( \"\"\" CREATE", "{r.status}\") logger.error(f\"Response headers: {r.headers}\") logger.error(await r.text()) raise ValueError(f\"Response error: {r.status}\") async def setup(self,", "\"\"\" Generates a feed based on the passed parameters. Multiple parameters can be", "media will be pulled and added to the feed. hashtags: [List of hastags]", "likes >= self.settings[\"like\"][\"num_of_likes\"][\"max\"]: logger.info(\"Too many or too few likes. Not liked!\") return comments", "* FROM users WHERE id=?\", (id,)) if c.rowcount: await db.execute( \"\"\" UPDATE users", "type. Not liked!\") return likes = media[\"edge_liked_by\"][\"count\"] if likes < self.settings[\"like\"][\"num_of_likes\"][\"min\"] or likes", "session headers = { \"DNT\": \"1\", \"Host\": \"www.instagram.com\", \"Upgrade-Insecure-Requests\": \"1\", \"User-Agent\": self.settings[\"connection\"][\"user_agent\"] }", "async def login(self): payload = { \"username\": self.settings[\"user\"][\"username\"], \"password\": self.settings[\"user\"][\"password\"] } headers =", "more is loaded while 1: while not q.empty(): yield await q.get() await asyncio.sleep(1e-12)", "= res[\"data\"][\"location\"][\"edge_location_to_media\"][\"page_info\"][\"has_next_page\"] end_cursor = res[\"data\"][\"location\"][\"edge_location_to_media\"][\"page_info\"][\"end_cursor\"] count += 1 params[\"variables\"] = json.dumps( { \"id\":", "async def get_user_by_username(self, username): res = await self.http_request( \"GET\", f\"https://www.instagram.com/{username}/\", params=\"__a:1\" ) return", "self.video_comments_list[ randint(0, len(self.video_comments_list)-1) ] await self._comment(media[\"id\"], comment) else: logger.info(\"Not commented!\") async def _comment(self,", "media[\"edge_liked_by\"][\"count\"] if likes < self.settings[\"like\"][\"num_of_likes\"][\"min\"] or likes >= self.settings[\"like\"][\"num_of_likes\"][\"max\"]: logger.info(\"Too many or too", "= { \"query_hash\": \"1780c1b186e2c37de9f7da95ce41bb67\", \"variables\": json.dumps({\"tag_name\": hashtag, \"first\": count}) } has_next_page = True", ") await db.commit() logger.info(\"Comment posted!\") async def follow(self, media): \"\"\" Check if the", "TEXT, username TEXT, ts_follower INTEGER, ts_following INTEGER, follower BOOL, following BOOL ) \"\"\"", "regex.sub(r\"#.+$\", \"\", f.read(), flags=regex.MULTILINE) ) # Load comments list for photos with open(\"comments/pic_comments.txt\")", "# Initialize the asynchronous http session headers = { \"DNT\": \"1\", \"Host\": \"www.instagram.com\",", "IF NOT EXISTS comments ( id INTEGER, ts INTEGER, comment TEXT ) \"\"\"", "as f: self.settings = json.loads( regex.sub(r\"#.+$\", \"\", f.read(), flags=regex.MULTILINE) ) # Load comments", "with open(\"comments/pic_comments.txt\") as f: comments = f.readlines() self.pic_comments_list = [x.strip() for x in", "media[\"is_video\"] if is_video: mediatype = \"GraphVideo\" else: mediatype = \"GraphImage\" pass likes =", "logger.info(\"Session initialized.\") # Get the csrf token. It is needed to log in", "else: user = await self.get_user_by_username(username) id = user[\"graphql\"][\"user\"][\"id\"] params = { \"query_hash\": \"58712303d941c6855d4e888c5f0cd22f\",", "self._follow(media[\"owner\"][\"id\"]) else: logger.info(\"Not followed!\") async def _follow(self, id): headers = { \"DNT\": \"1\",", "= await db.execute( f\"SELECT * FROM '{table_name}'\" ) header = [i[0] for i", "users SET following=false WHERE id=?\", (id,) ) await db.commit() async def backup(self): while", "stored q = asyncio.Queue() if explore: # Add the \"explore\" feed to the", "\"first\": 50}) } has_next_page = True while has_next_page: res = await self.http_request( \"GET\",", "id=?\", (id,) ) await db.commit() async def backup(self): while 1: logger.info(\"Backing up database...\")", "and eventually it will send a like. Args: media: The media to like.", "if the media satisfy the prerequisites and eventually it will send a like.", "the table names async with aiosqlite.connect(\"./piggy.db\") as db: logger.debug(\"Checking table: pics\") await db.execute(", "will be # temporarely stored q = asyncio.Queue() if explore: # Add the", "= user[\"graphql\"][\"user\"][\"id\"] params = { \"query_hash\": \"37479f2b8209594dde7facb0d904896a\", \"variables\": json.dumps({\"id\": str(id), \"first\": 50}) }", "headers = { \"DNT\": \"1\", \"Host\": \"www.instagram.com\", \"User-Agent\": self.settings[\"connection\"][\"user_agent\"], \"X-CSRFToken\": self.csrf_token } await", "random(): await self._follow(media[\"owner\"][\"id\"]) else: logger.info(\"Not followed!\") async def _follow(self, id): headers = {", "location in locations: asyncio.ensure_future(self._location_feed(q, location)) # Keep on yielding media while more is", "row.fetchone() is None: return False else: return True async def save_to_database(self, id, type,", "# Load comments list for videos with open(\"comments/video_comments.txt\") as f: comments = f.readlines()", "* FROM pics WHERE id=?\", (id,) ) if await row.fetchone() is None: return", "session await self.session.close() async def get_user_by_username(self, username): res = await self.http_request( \"GET\", f\"https://www.instagram.com/{username}/\",", "100: logger.info(f\"{caption:.100}...\") else: logger.info(f\"{caption}\") async def like(self, media): \"\"\" Check if the media", "in await self.followers(): await db.execute( \"UPDATE users SET follower=0 WHERE username=?\", (username,) )", "asyncio import aiohttp import aiosqlite import aiofiles import regex from aiohttp.client_exceptions import ClientConnectorError", "added to the feed. hashtags: [List of hastags] Media with those hashtags will", "on the passed parameters. Multiple parameters can be passed at the same time.", ") await db.commit() logger.info(\"Liked!\") async def _unlike(self, id): headers = { \"DNT\": \"1\",", "be added to the feed. locations: [List of locations ids] Media with those", "follow request. Args: media: The media of the user to be followed. Retruns:", "regex.findall( r\"<body[^>]*>(.*)</body>\", res, flags=regex.DOTALL )[0], flags=regex.DOTALL )[0][:-1])[\"entry_data\"][\"ProfilePage\"][0][\"graphql\"][\"user\"] # ----------------------------------------------------------------------------- async def download(self, media):", "res[\"graphql\"][\"shortcode_media\"][\"owner\"][\"username\"] logger.info( f\"{utils.translate_ig_media_type_to_custom(mediatype).capitalize()} by {username}\\n❤️ {likes}, 💬 {comments}\" ) try: caption = media[\"edge_media_to_caption\"][\"edges\"][0][\"node\"][\"text\"]", "id TEXT, username TEXT, ts_follower INTEGER, ts_following INTEGER, follower BOOL, following BOOL )", "url, headers=None, params=None, data=None, response_type=\"text\" ): await asyncio.sleep(self.settings['connection'][\"wait_time\"]) try: if method == \"GET\":", "username in await self.followers(): await db.execute( \"UPDATE users SET follower=0 WHERE username=?\", (username,)", "comments] # Initialize the asynchronous http session headers = { \"DNT\": \"1\", \"Host\":", "\"variables\": json.dumps({\"id\": str(id), \"first\": 50}) } has_next_page = True while has_next_page: res =", "a like. Args: media: The media to like. Retruns: None \"\"\" # Check", "await asyncio.sleep(self.settings['connection'][\"wait_time\"]) try: if method == \"GET\": r = await self.session.get( url, headers=headers,", "q.empty(): yield await q.get() await asyncio.sleep(1e-12) async def _explore_feed(self, q): params = {", "elif response_type == \"json\": res = await r.json() logger.debug(res) return res else: raise", "logger.warning( f\"\"\"Unsupported file format: {self.settings['backup']['format']}.\"\"\" ) await asyncio.sleep( utils.interval_in_seconds(self.settings[\"backup\"][\"every\"]) ) async def close(self):", "id): return async def _unfollow(self, id): headers = { \"DNT\": \"1\", \"Host\": \"www.instagram.com\",", "self.http_request( method, url, headers=headers, params=params, data=data, response_type=response_type ) else: logger.debug(f\"Status code: {r.status} {r.reason}\")", "logger.info(\"Comments disabled.\") return if self.settings[\"comment\"][\"only_once\"]: async with aiosqlite.connect(\"./piggy.db\") as db: row = await", "followers.append(user[\"node\"][\"username\"]) return followers async def following(self, username=None): following = [] if username is", "await db.execute( \"\"\" CREATE TABLE IF NOT EXISTS likes ( id INTEGER, ts", "{self.settings['connection']['wait_time']} seconds.\"\"\" ) return await self.http_request( method, url, headers=headers, params=params, data=data, response_type=response_type )", "and find the csrf token res = await self.http_request( \"GET\", \"https://www.instagram.com/accounts/login/\" ) return", "async with aiosqlite.connect(\"./piggy.db\") as db: c = await db.execute(\"SELECT * FROM users WHERE", "q.get() await asyncio.sleep(1e-12) async def _explore_feed(self, q): params = { \"query_hash\": \"ecd67af449fb6edab7c69a205413bfa7\", \"variables\":", "self.settings[\"comment\"][\"num_of_likes\"][\"max\"]: return comments = media[\"edge_media_to_comment\"][\"count\"] if comments < self.settings[\"comment\"][\"num_of_comments\"][\"min\"] or comments >= self.settings[\"comment\"][\"num_of_comments\"][\"max\"]:", "the prerequisites and eventually it will send a comment. Args: media: The media", ") logger.error(res) else: logger.error(\"Couldn't log in.\") cookies = utils.cookies_dict(self.session.cookie_jar) self.csrf_token = cookies[\"csrftoken\"] #", "the feed. locations: [List of locations ids] Media with those locations will be", "res else: raise ValueError(f\"Invalid response type: {response_type}\") elif r.status == 429: # Unsuccessfull", "\"\"\" Gives a visual representation of a media. Args: media: The media to", "open(\"comments/pic_comments.txt\") as f: comments = f.readlines() self.pic_comments_list = [x.strip() for x in comments]", "import logging import json import time from random import random, randint import asyncio", "id, format): logger.info(f\"Caption: {caption}\") tags = regex.findall(r\"#([\\p{L}0-9_]+)\", caption) logger.info(f\"Tags: {tags}\") else: return await", "params=params ) logger.debug(f\"[GET] {r.url}\") elif method == \"POST\": r = await self.session.post( url,", "page will be added to to the feed. users: [List of usernames] Their", "csrf token. It is needed to log in self.csrf_token = await self._getCsrfTokenFromForm() async", "await rows.fetchall() if self.settings[\"backup\"][\"format\"] == \"csv\": await utils.to_csv(table_name, header, rows) elif self.settings[\"backup\"][\"format\"] ==", "> 0: self.settings['connection'][\"wait_time\"] -= 1 if response_type == \"text\": res = await r.text()", "def follow(self, media): \"\"\" Check if the media satisfy the prerequisites and eventually", "await db.commit() logger.info(\"Follow request sent!\") async def unfollow(self, id): return async def _unfollow(self,", "asyncio.sleep(self.settings['connection'][\"wait_time\"]) try: if method == \"GET\": r = await self.session.get( url, headers=headers, params=params", "\"GraphSidecar\": comment = self.pic_comments_list[ randint(0, len(self.pic_comments_list)-1) ] else: comment = self.video_comments_list[ randint(0, len(self.video_comments_list)-1)", "as db: c = await db.execute(\"SELECT * FROM users WHERE id=?\", (id,)) if", "in res[\"data\"][\"location\"][\"edge_location_to_media\"][\"edges\"]: await q.put(media[\"node\"]) async def print(self, media): \"\"\" Gives a visual representation", "utils.cookies_dict(self.session.cookie_jar) self.csrf_token = cookies[\"csrftoken\"] # Initialize the database await self._init_database() async def _init_database(self):", "\"after\": str(end_cursor) } ) for media in res[\"data\"][\"location\"][\"edge_location_to_media\"][\"edges\"]: await q.put(media[\"node\"]) async def print(self,", "params=params, response_type=\"json\" ) has_next_page = res[\"data\"][\"user\"][\"edge_web_discover_media\"][\"page_info\"][\"has_next_page\"] end_cursor = res[\"data\"][\"user\"][\"edge_web_discover_media\"][\"page_info\"][\"end_cursor\"] params[\"variables\"] = json.dumps( {\"first\":", "24}) } has_next_page = True while has_next_page: res = await self.http_request( \"GET\", \"https://www.instagram.com/graphql/query/\",", "id=?\", (id,)) if c.rowcount: await db.execute( \"\"\" UPDATE users SET ts_following=?, following=? WHERE", "[List of locations ids] Media with those locations will be added to the", "import aiofiles import regex from aiohttp.client_exceptions import ClientConnectorError from piggy import utils #", "\"ecd67af449fb6edab7c69a205413bfa7\", \"variables\": json.dumps({\"first\": 24}) } has_next_page = True while has_next_page: res = await", "the http session await self.session.close() async def get_user_by_username(self, username): res = await self.http_request(", "media[\"edge_media_to_comment\"][\"count\"] shortcode = media[\"shortcode\"] res = await self.http_request( \"GET\", f\"https://www.instagram.com/p/{shortcode}/\", params=\"__a=1\", response_type=\"json\" )", "db.commit() async def backup(self): while 1: logger.info(\"Backing up database...\") for table_name in [\"users\",", "queue for hashtag in hashtags: asyncio.ensure_future(self._hashtag_feed(q, hashtag)) if len(locations): # Add all the", "INT, url TEXT, tags TEXT ) \"\"\" ) logger.debug(\"Checking table: users\") await db.execute(", "self.http_request( \"POST\", f\"https://www.instagram.com/web/likes/{id}/unlike/\", headers=headers ) async with aiosqlite.connect(\"./piggy.db\") as db: await db.execute(\"INSERT INTO", "return followers async def following(self, username=None): following = [] if username is None:", "the csrf token res = await self.http_request( \"GET\", \"https://www.instagram.com/accounts/login/\" ) return regex.findall( r\"\\\"csrf_token\\\":\\\"(.*?)\\\"\",", "if len(hashtags): # Add all the media from the given hashtags to the", "self.settings[\"comment\"][\"num_of_likes\"][\"min\"] or likes >= self.settings[\"comment\"][\"num_of_likes\"][\"max\"]: return comments = media[\"edge_media_to_comment\"][\"count\"] if comments < self.settings[\"comment\"][\"num_of_comments\"][\"min\"]", "for i in rows.description] rows = await rows.fetchall() if self.settings[\"backup\"][\"format\"] == \"csv\": await", "async def _init_database(self): logger.info(\"Checking database...\") # Connect to the local database and look", "if method == \"GET\": r = await self.session.get( url, headers=headers, params=params ) logger.debug(f\"[GET]", "locations=[]): \"\"\" Generates a feed based on the passed parameters. Multiple parameters can", "async def like(self, media): \"\"\" Check if the media satisfy the prerequisites and", "aiosqlite.connect(\"./piggy.db\") as db: logger.debug(\"Checking table: pics\") await db.execute( \"\"\" CREATE TABLE IF NOT", "logger.debug(f\"Status code: {r.status} {r.reason}\") if r.status == 200: # Successfull request: decrease retry", "comments < self.settings[\"comment\"][\"num_of_comments\"][\"min\"] or comments >= self.settings[\"comment\"][\"num_of_comments\"][\"max\"]: return if self.settings[\"comment\"][\"rate\"] / 100 <=", "\"DNT\": \"1\", \"Host\": \"www.instagram.com\", \"User-Agent\": self.settings[\"connection\"][\"user_agent\"], \"X-CSRFToken\": self.csrf_token } payload = { \"comment_text\":", "f\"./images/{id}.{format}\", mode=\"wb\" ) await f.write(await r.read()) await f.close() return True else: return False", "WHERE id=?\", (media[\"id\"],) ) if await row.fetchone() is None: logger.info(\"Already commented.\") return try:", "flags=regex.DOTALL )[0][:-1])[\"entry_data\"][\"ProfilePage\"][0][\"graphql\"][\"user\"] # ----------------------------------------------------------------------------- async def download(self, media): id = media[\"id\"] url =", "media[\"edge_media_to_comment\"][\"count\"] if comments < self.settings[\"like\"][\"num_of_comments\"][\"min\"] or comments >= self.settings[\"like\"][\"num_of_comments\"][\"max\"]: logger.info(\"Too many or too", "data=None, response_type=\"text\" ): await asyncio.sleep(self.settings['connection'][\"wait_time\"]) try: if method == \"GET\": r = await", "the media satisfy the prerequisites and eventually it will send a like. Args:", "res = await r.json() logger.debug(res) return res else: raise ValueError(f\"Invalid response type: {response_type}\")", "payload = { \"username\": self.settings[\"user\"][\"username\"], \"password\": self.settings[\"user\"][\"password\"] } headers = { \"User-Agent\": self.settings[\"connection\"][\"user_agent\"],", "The media to like. Retruns: None \"\"\" # Check if the media has", "self.settings[\"connection\"][\"user_agent\"] } timeout = aiohttp.ClientTimeout( total=self.settings[\"connection\"][\"timeout\"] ) self.session = aiohttp.ClientSession(headers=headers, timeout=timeout) logger.info(\"Session initialized.\")", "\"csv\": await utils.to_csv(table_name, header, rows) elif self.settings[\"backup\"][\"format\"] == \"json\": await utils.to_json(table_name, header, rows)", "= media[\"id\"] url = media[\"display_url\"] format = regex.findall(r\".([a-zA-Z]+)$\", url)[0] if media[\"__typename\"] != \"GraphImage\"", "row.fetchone() is None: logger.info(\"Already commented.\") return try: mediatype = media[\"__typename\"] except KeyError: is_video", "\"GraphImage\" pass else: if not mediatype in utils.translate_custom_media_type_to_ig(self.settings[\"comment\"][\"media_type\"]): return likes = media[\"edge_liked_by\"][\"count\"] if", "import utils # Logging logger = logging.getLogger(__name__) logger.setLevel(logging.DEBUG) ch = logging.StreamHandler() fh =", "url, id, format): logger.info(f\"Downloading {id}\") async with aiohttp.ClientSession() as session: try: async with", "\"GraphImage\" or await self.pic_already_saved(id): return height = media[\"dimensions\"][\"height\"] width = media[\"dimensions\"][\"width\"] try: caption", "return True async def save_to_database(self, id, type, height, width, url, tags): tags =", "aiohttp.ClientTimeout( total=self.settings[\"connection\"][\"timeout\"] ) self.session = aiohttp.ClientSession(headers=headers, timeout=timeout) logger.info(\"Session initialized.\") # Get the csrf", "await row.fetchone() is None: logger.info(\"Already commented.\") return try: mediatype = media[\"__typename\"] except KeyError:", "_unfollow(self, id): headers = { \"DNT\": \"1\", \"Host\": \"www.instagram.com\", \"User-Agent\": self.settings[\"connection\"][\"user_agent\"], \"X-CSRFToken\": self.csrf_token", "to the queue for hashtag in hashtags: asyncio.ensure_future(self._hashtag_feed(q, hashtag)) if len(locations): # Add", "self.settings[\"follow\"][\"rate\"] / 100 > random(): await self._follow(media[\"owner\"][\"id\"]) else: logger.info(\"Not followed!\") async def _follow(self,", "tags = json.dumps(tags) async with aiosqlite.connect(\"./piggy.db\") as db: await db.execute( \"INSERT INTO pics", "= res[\"data\"][\"user\"][\"edge_web_discover_media\"][\"page_info\"][\"end_cursor\"] params[\"variables\"] = json.dumps( {\"first\": 50, \"after\": end_cursor} ) for media in", "save_to_database(self, id, type, height, width, url, tags): tags = json.dumps(tags) async with aiosqlite.connect(\"./piggy.db\")", "] else: comment = self.video_comments_list[ randint(0, len(self.video_comments_list)-1) ] await self._comment(media[\"id\"], comment) else: logger.info(\"Not", "json.dumps({\"id\": id, \"first\": 24}) } has_next_page = True while has_next_page: res = await", "logging.getLogger(__name__) logger.setLevel(logging.DEBUG) ch = logging.StreamHandler() fh = logging.FileHandler(\"./piggy.log\") ch.setLevel(logging.INFO) fh.setLevel(logging.DEBUG) formatter = logging.Formatter(\"%(message)s\")", "await db.execute(\"SELECT * FROM users WHERE id=?\", (id,)) if c.rowcount: await db.execute( \"\"\"", "= media[\"display_url\"] format = regex.findall(r\".([a-zA-Z]+)$\", url)[0] if media[\"__typename\"] != \"GraphImage\" or await self.pic_already_saved(id):", "following.append(user[\"node\"][\"username\"]) return following async def feed(self, explore=True, users=[], hashtags=[], locations=[]): \"\"\" Generates a", "media[\"id\"] url = media[\"display_url\"] format = regex.findall(r\".([a-zA-Z]+)$\", url)[0] if media[\"__typename\"] != \"GraphImage\" or", "= res[\"userId\"] elif res[\"message\"] == \"checkpoint_required\": logger.info(\"Checkpoint required.\") res = await self.http_request( \"POST\",", "async with aiohttp.ClientSession() as session: try: async with session.get(url) as r: if r.status", "utils.translate_custom_media_type_to_ig(self.settings[\"like\"][\"media_type\"]): logger.info(\"Wrong media type. Not liked!\") return likes = media[\"edge_liked_by\"][\"count\"] if likes <", "f: self.settings = json.loads( regex.sub(r\"#.+$\", \"\", f.read(), flags=regex.MULTILINE) ) # Load comments list", "comments = f.readlines() self.pic_comments_list = [x.strip() for x in comments] # Load comments", "res elif response_type == \"json\": res = await r.json() logger.debug(res) return res else:", "location_id): count = 0 params = { \"query_hash\": \"1b84447a4d8b6d6d0426fefb34514485\", \"variables\": json.dumps({\"id\": str(location_id), \"first\":", "\"DNT\": \"1\", \"Host\": \"www.instagram.com\", \"User-Agent\": self.settings[\"connection\"][\"user_agent\"], \"X-CSRFToken\": self.csrf_token } await self.http_request( \"POST\", f\"https://www.instagram.com/web/friendships/{id}/unfollow/\",", "\"User-Agent\": self.settings[\"connection\"][\"user_agent\"], \"X-CSRFToken\": self.csrf_token } await self.http_request( \"POST\", f\"https://www.instagram.com/web/friendships/{id}/unfollow/\", headers=headers ) async with", "aiosqlite.connect(\"./piggy.db\") as db: await db.execute( \"UPDATE users SET following=false WHERE id=?\", (id,) )", "async with aiosqlite.connect(\"./piggy.db\") as db: await db.execute( \"INSERT INTO likes VALUES(?,?)\", (id, int(time.time()))", "\"1\", \"User-Agent\": self.settings[\"connection\"][\"user_agent\"] } timeout = aiohttp.ClientTimeout( total=self.settings[\"connection\"][\"timeout\"] ) self.session = aiohttp.ClientSession(headers=headers, timeout=timeout)", "user = await self.get_user_by_username(username) id = user[\"graphql\"][\"user\"][\"id\"] params = { \"query_hash\": \"58712303d941c6855d4e888c5f0cd22f\", \"variables\":", "TEXT, ts_follower INTEGER, ts_following INTEGER, follower BOOL, following BOOL ) \"\"\" ) logger.debug(\"Checking", "def _follow(self, id): headers = { \"DNT\": \"1\", \"Host\": \"www.instagram.com\", \"User-Agent\": self.settings[\"connection\"][\"user_agent\"], \"X-CSRFToken\":", "table names async with aiosqlite.connect(\"./piggy.db\") as db: logger.debug(\"Checking table: pics\") await db.execute( \"\"\"", "error: {r.status}\") async def setup(self, settings_path=\"settings.json\"): logger.info(\"Loading settings...\") # Load settings with open(settings_path)", "self.http_request( \"GET\", \"https://www.instagram.com/graphql/query/\", params=params, response_type=\"json\" ) has_next_page = res[\"data\"][\"user\"][\"edge_follow\"][\"page_info\"][\"has_next_page\"] end_cursor = res[\"data\"][\"user\"][\"edge_follow\"][\"page_info\"][\"end_cursor\"] params[\"variables\"]", "end_cursor} ) for media in res[\"data\"][\"user\"][\"edge_web_discover_media\"][\"edges\"]: await q.put(media[\"node\"]) async def _user_feed(self, q, user):", "id INT, height INT, width INT, url TEXT, tags TEXT ) \"\"\" )", "TABLE IF NOT EXISTS users ( id TEXT, username TEXT, ts_follower INTEGER, ts_following", "will be added to the feed. locations: [List of locations ids] Media with", "return if self.settings[\"comment\"][\"only_once\"]: async with aiosqlite.connect(\"./piggy.db\") as db: row = await db.execute( \"SELECT", "{\"id\": id, \"first\": 50, \"after\": end_cursor} ) for media in res[\"data\"][\"user\"][\"edge_web_discover_media\"][\"edges\"]: await q.put(media[\"node\"])", "res[\"data\"][\"user\"][\"edge_owner_to_timeline_media\"][\"page_info\"][\"end_cursor\"] params[\"variables\"] = json.dumps( {\"id\": id, \"first\": 50, \"after\": end_cursor} ) for media", "WHERE id=?\", (media[\"id\"],) ) if await row.fetchone(): logger.info(\"Already liked!\") return try: mediatype =", "user[\"graphql\"][\"user\"][\"id\"] params = { \"query_hash\": \"37479f2b8209594dde7facb0d904896a\", \"variables\": json.dumps({\"id\": str(id), \"first\": 50}) } has_next_page", "\"POST\", f\"https://www.instagram.com/web/friendships/{id}/follow/\", headers=headers ) async with aiosqlite.connect(\"./piggy.db\") as db: c = await db.execute(\"SELECT", "str(id), \"first\": 50}) } has_next_page = True while has_next_page: res = await self.http_request(", "(username,) ) await db.commit() async def followers(self, username=None): followers = [] if username", "headers = { \"DNT\": \"1\", \"Host\": \"www.instagram.com\", \"User-Agent\": self.settings[\"connection\"][\"user_agent\"], \"X-CSRFToken\": self.csrf_token } payload", "end_cursor} ) for media in res[\"data\"][\"user\"][\"edge_web_discover_media\"][\"edges\"]: await q.put(media[\"node\"]) async def _hashtag_feed(self, q, hashtag):", "Their media will be pulled and added to the feed. hashtags: [List of", "or comments >= self.settings[\"like\"][\"num_of_comments\"][\"max\"]: logger.info(\"Too many or too few comments. Not liked!\") return", "id): headers = { \"DNT\": \"1\", \"Host\": \"www.instagram.com\", \"User-Agent\": self.settings[\"connection\"][\"user_agent\"], \"X-CSRFToken\": self.csrf_token }", ") if await row.fetchone() is None: logger.info(\"Already commented.\") return try: mediatype = media[\"__typename\"]", "\"first\": 50, \"after\": end_cursor} ) for user in res[\"data\"][\"user\"][\"edge_followed_by\"][\"edges\"]: followers.append(user[\"node\"][\"username\"]) return followers async", "Close the http session await self.session.close() async def get_user_by_username(self, username): res = await", "logger.addHandler(ch) logger.addHandler(fh) class Piggy: def __init__(self, loop): self.loop = loop async def http_request(", "\"GraphVideo\" else: mediatype = \"GraphImage\" pass else: if not mediatype in utils.translate_custom_media_type_to_ig(self.settings[\"like\"][\"media_type\"]): logger.info(\"Wrong", "logger.debug(f\"[POST] {r.url}\") else: raise ValueError(f\"Invalid HTTP method: {method}\") except ClientConnectorError: logger.error(\"Could not reach", "if res[\"authenticated\"]: logger.info(\"Logged in!\") self.id = res[\"userId\"] elif res[\"message\"] == \"checkpoint_required\": logger.info(\"Checkpoint required.\")", "id = media[\"id\"] url = media[\"display_url\"] format = regex.findall(r\".([a-zA-Z]+)$\", url)[0] if media[\"__typename\"] !=", "_getCsrfTokenFromForm(self): # Get login page and find the csrf token res = await", "aiosqlite.connect(\"./piggy.db\") as db: row = await db.execute( \"SELECT * FROM pics WHERE id=?\",", "# Add all the media from the given hashtags to the queue for", "self.csrf_token } res = await self.http_request( \"POST\", \"https://www.instagram.com/accounts/login/ajax/\", headers=headers, data=payload, response_type=\"json\" ) if", "WHERE username=?\", (username,) ) await db.commit() async def followers(self, username=None): followers = []", "username=?\", (username,) ) await db.commit() async def followers(self, username=None): followers = [] if", "url, tags): tags = json.dumps(tags) async with aiosqlite.connect(\"./piggy.db\") as db: await db.execute( \"INSERT", "= json.loads( regex.sub(r\"#.+$\", \"\", f.read(), flags=regex.MULTILINE) ) # Load comments list for photos", "pics WHERE id=?\", (id,) ) if await row.fetchone() is None: return False else:", "= res[\"data\"][\"hashtag\"][\"edge_hashtag_to_media\"][\"page_info\"][\"has_next_page\"] end_cursor = res[\"data\"][\"hashtag\"][\"edge_hashtag_to_media\"][\"page_info\"][\"end_cursor\"] count += 1 params[\"variables\"] = json.dumps( {\"tag_name\": hashtag,", "pass else: if not mediatype in utils.translate_custom_media_type_to_ig(self.settings[\"comment\"][\"media_type\"]): return likes = media[\"edge_liked_by\"][\"count\"] if likes", "with those hashtags will be added to the feed. locations: [List of locations", "db.execute( \"\"\" CREATE TABLE IF NOT EXISTS pics ( id INT, height INT,", "id, comment, reply_to_id=None): headers = { \"DNT\": \"1\", \"Host\": \"www.instagram.com\", \"User-Agent\": self.settings[\"connection\"][\"user_agent\"], \"X-CSRFToken\":", "Retruns: None \"\"\" if media[\"comments_disabled\"]: logger.info(\"Comments disabled.\") return if self.settings[\"comment\"][\"only_once\"]: async with aiosqlite.connect(\"./piggy.db\")", "feed(self, explore=True, users=[], hashtags=[], locations=[]): \"\"\" Generates a feed based on the passed", "SET ts_following=?, following=? WHERE id=? \"\"\", (int(time.time()), True, id) ) else: await db.execute(", "user in res[\"data\"][\"user\"][\"edge_follow\"][\"edges\"]: following.append(user[\"node\"][\"username\"]) return following async def feed(self, explore=True, users=[], hashtags=[], locations=[]):", "self.http_request( \"GET\", \"https://www.instagram.com/graphql/query/\", params=params, response_type=\"json\" ) has_next_page = res[\"data\"][\"hashtag\"][\"edge_hashtag_to_media\"][\"page_info\"][\"has_next_page\"] end_cursor = res[\"data\"][\"hashtag\"][\"edge_hashtag_to_media\"][\"page_info\"][\"end_cursor\"] count", "1: logger.info(\"Backing up database...\") for table_name in [\"users\", \"likes\", \"comments\"]: if self.settings[\"backup\"][table_name]: async", "height = media[\"dimensions\"][\"height\"] width = media[\"dimensions\"][\"width\"] try: caption = media[\"edge_media_to_caption\"][\"edges\"][0][\"node\"][\"text\"] except IndexError: tags", ") \"\"\" ) logger.debug(\"Checking table: users\") await db.execute( \"\"\" CREATE TABLE IF NOT", "to be followed. Retruns: None \"\"\" if self.settings[\"follow\"][\"rate\"] / 100 > random(): await", "self.csrf_token } await self.http_request( \"POST\", f\"https://www.instagram.com/web/likes/{id}/unlike/\", headers=headers ) async with aiosqlite.connect(\"./piggy.db\") as db:", "else: logger.error(f\"Response status: {r.status}\") logger.error(f\"Response headers: {r.headers}\") logger.error(await r.text()) raise ValueError(f\"Response error: {r.status}\")", "logger.debug(res) return res else: raise ValueError(f\"Invalid response type: {response_type}\") elif r.status == 429:", "logger.info(\"Not liked!\") async def _like(self, id): headers = { \"DNT\": \"1\", \"Host\": \"www.instagram.com\",", "to comment. Retruns: None \"\"\" if media[\"comments_disabled\"]: logger.info(\"Comments disabled.\") return if self.settings[\"comment\"][\"only_once\"]: async", ") async with aiosqlite.connect(\"./piggy.db\") as db: await db.execute(\"INSERT INTO likes WHERE id=?\", (id,))", "ValueError(f\"Invalid HTTP method: {method}\") except ClientConnectorError: logger.error(\"Could not reach the server. Retrying in", "logger.info(\"\\nClosing session...\") # Close the http session await self.session.close() async def get_user_by_username(self, username):", "\"query_hash\": \"1780c1b186e2c37de9f7da95ce41bb67\", \"variables\": json.dumps({\"tag_name\": hashtag, \"first\": count}) } has_next_page = True while has_next_page:", "1 params[\"variables\"] = json.dumps( {\"tag_name\": hashtag, \"first\": count, \"after\": end_cursor} ) for media", "async def comment(self, media): \"\"\" Check if the media satisfy the prerequisites and", "params=params, response_type=\"json\" ) has_next_page = res[\"data\"][\"user\"][\"edge_owner_to_timeline_media\"][\"page_info\"][\"has_next_page\"] end_cursor = res[\"data\"][\"user\"][\"edge_owner_to_timeline_media\"][\"page_info\"][\"end_cursor\"] params[\"variables\"] = json.dumps( {\"id\":", "str(id), \"first\": 50, \"after\": end_cursor} ) for user in res[\"data\"][\"user\"][\"edge_follow\"][\"edges\"]: following.append(user[\"node\"][\"username\"]) return following", "if the media satisfy the prerequisites and eventually send a follow request. Args:", "if self.settings[\"follow\"][\"rate\"] / 100 > random(): await self._follow(media[\"owner\"][\"id\"]) else: logger.info(\"Not followed!\") async def", "{ \"DNT\": \"1\", \"Host\": \"www.instagram.com\", \"User-Agent\": self.settings[\"connection\"][\"user_agent\"], \"X-CSRFToken\": self.csrf_token } await self.http_request( \"POST\",", "db.commit() async def followers(self, username=None): followers = [] if username is None: id", "await self.http_request( \"POST\", f\"https://www.instagram.com/web/likes/{id}/unlike/\", headers=headers ) async with aiosqlite.connect(\"./piggy.db\") as db: await db.execute(\"INSERT", "f.write(await r.read()) await f.close() return True else: return False except TimeoutError: return False", "for user in users: asyncio.ensure_future(self._user_feed(q, user)) if len(hashtags): # Add all the media", "aiosqlite.connect(\"./piggy.db\") as db: await db.execute(\"INSERT INTO likes WHERE id=?\", (id,)) await db.commit() logger.info(\"Unliked!\")", "\"a5164aed103f24b03e7b7747a2d94e3c\", \"variables\": json.dumps({\"id\": id, \"first\": 24}) } has_next_page = True while has_next_page: res", ") logger.debug(\"Checking table: likes\") await db.execute( \"\"\" CREATE TABLE IF NOT EXISTS likes", "f.readlines() self.pic_comments_list = [x.strip() for x in comments] # Load comments list for", "\"POST\", \"https://www.instagram.com/accounts/login/ajax/\", headers=headers, data=payload, response_type=\"json\" ) if res[\"authenticated\"]: logger.info(\"Logged in!\") self.id = res[\"userId\"]", "users SET following=1 WHERE username=?\", (username,) ) await db.commit() async def followers(self, username=None):", "100 > random(): await self._like(media[\"id\"]) else: logger.info(\"Not liked!\") async def _like(self, id): headers", "the given locations to the queue for location in locations: asyncio.ensure_future(self._location_feed(q, location)) #", "has_next_page: res = await self.http_request( \"GET\", \"https://www.instagram.com/graphql/query/\", params=params, response_type=\"json\" ) has_next_page = res[\"data\"][\"user\"][\"edge_followed_by\"][\"page_info\"][\"has_next_page\"]", "liked!\") return likes = media[\"edge_liked_by\"][\"count\"] if likes < self.settings[\"like\"][\"num_of_likes\"][\"min\"] or likes >= self.settings[\"like\"][\"num_of_likes\"][\"max\"]:", "likes < self.settings[\"like\"][\"num_of_likes\"][\"min\"] or likes >= self.settings[\"like\"][\"num_of_likes\"][\"max\"]: logger.info(\"Too many or too few likes.", ") return await self.http_request( method, url, headers=headers, params=params, data=data, response_type=response_type ) else: logger.error(f\"Response", "\"GraphVideo\" else: mediatype = \"GraphImage\" pass likes = media[\"edge_liked_by\"][\"count\"] comments = media[\"edge_media_to_comment\"][\"count\"] shortcode", "end_cursor = res[\"data\"][\"location\"][\"edge_location_to_media\"][\"page_info\"][\"end_cursor\"] count += 1 params[\"variables\"] = json.dumps( { \"id\": str(location_id), \"first\":", "(username,) ) for username in await self.following(): await db.execute( \"UPDATE users SET following=1", "self.save_to_database(id, type, height, width, url, tags) async def download_pic(self, url, id, format): logger.info(f\"Downloading", "users SET follower=0 WHERE username=?\", (username,) ) for username in await self.following(): await", "async def _unfollow(self, id): headers = { \"DNT\": \"1\", \"Host\": \"www.instagram.com\", \"User-Agent\": self.settings[\"connection\"][\"user_agent\"],", "token. It is needed to log in self.csrf_token = await self._getCsrfTokenFromForm() async def", "res = await self.http_request( \"GET\", \"https://www.instagram.com/accounts/login/\" ) return regex.findall( r\"\\\"csrf_token\\\":\\\"(.*?)\\\"\", res, flags=regex.MULTILINE )[0]", "return await self.http_request( method, url, headers=headers, params=params, data=data, response_type=response_type ) else: logger.error(f\"Response status:", "get_user_by_username(self, username): res = await self.http_request( \"GET\", f\"https://www.instagram.com/{username}/\", params=\"__a:1\" ) return json.loads( regex.findall(", "\"explore\" feed to the queue asyncio.ensure_future(self._explore_feed(q)) if len(users): # Add all the media", "self.settings[\"comment\"][\"num_of_comments\"][\"max\"]: return if self.settings[\"comment\"][\"rate\"] / 100 <= random(): if mediatype == \"GraphImage\" or", "headers=headers, data=payload ) logger.error(res) else: logger.error(\"Couldn't log in.\") cookies = utils.cookies_dict(self.session.cookie_jar) self.csrf_token =", "self.settings[\"backup\"][\"format\"] == \"csv\": await utils.to_csv(table_name, header, rows) elif self.settings[\"backup\"][\"format\"] == \"json\": await utils.to_json(table_name,", "likes >= self.settings[\"comment\"][\"num_of_likes\"][\"max\"]: return comments = media[\"edge_media_to_comment\"][\"count\"] if comments < self.settings[\"comment\"][\"num_of_comments\"][\"min\"] or comments", ")[0], flags=regex.DOTALL )[0][:-1])[\"entry_data\"][\"ProfilePage\"][0][\"graphql\"][\"user\"] # ----------------------------------------------------------------------------- async def download(self, media): id = media[\"id\"] url", "def save_to_database(self, id, type, height, width, url, tags): tags = json.dumps(tags) async with", "< self.settings[\"comment\"][\"num_of_likes\"][\"min\"] or likes >= self.settings[\"comment\"][\"num_of_likes\"][\"max\"]: return comments = media[\"edge_media_to_comment\"][\"count\"] if comments <", "unfollow(self, id): return async def _unfollow(self, id): headers = { \"DNT\": \"1\", \"Host\":", "return height = media[\"dimensions\"][\"height\"] width = media[\"dimensions\"][\"width\"] try: caption = media[\"edge_media_to_caption\"][\"edges\"][0][\"node\"][\"text\"] except IndexError:", "in res[\"data\"][\"user\"][\"edge_web_discover_media\"][\"edges\"]: await q.put(media[\"node\"]) async def _user_feed(self, q, user): user = await self.get_user_by_usernameUsername(user)", "self.http_request( \"GET\", \"https://www.instagram.com/graphql/query/\", params=params, response_type=\"json\" ) has_next_page = res[\"data\"][\"user\"][\"edge_owner_to_timeline_media\"][\"page_info\"][\"has_next_page\"] end_cursor = res[\"data\"][\"user\"][\"edge_owner_to_timeline_media\"][\"page_info\"][\"end_cursor\"] params[\"variables\"]", "ValueError(f\"Invalid response type: {response_type}\") elif r.status == 429: # Unsuccessfull request: increase retry", "except ClientConnectorError: logger.error(\"Could not reach the server. Retrying in 30 seconds.\") await asyncio.sleep(30)", "asyncio.Queue() if explore: # Add the \"explore\" feed to the queue asyncio.ensure_future(self._explore_feed(q)) if", "the local database and look for the table names async with aiosqlite.connect(\"./piggy.db\") as", "data=payload ) logger.error(res) else: logger.error(\"Couldn't log in.\") cookies = utils.cookies_dict(self.session.cookie_jar) self.csrf_token = cookies[\"csrftoken\"]", "== 200: # Successfull request: decrease retry time if self.settings['connection'][\"wait_time\"] > 0: self.settings['connection'][\"wait_time\"]", "locations to the queue for location in locations: asyncio.ensure_future(self._location_feed(q, location)) # Keep on", "with aiosqlite.connect(\"./piggy.db\") as db: c = await db.execute(\"SELECT * FROM users WHERE id=?\",", "# Load comments list for photos with open(\"comments/pic_comments.txt\") as f: comments = f.readlines()", "KeyError: is_video = media[\"is_video\"] if is_video: mediatype = \"GraphVideo\" else: mediatype = \"GraphImage\"", "self.pic_comments_list[ randint(0, len(self.pic_comments_list)-1) ] else: comment = self.video_comments_list[ randint(0, len(self.video_comments_list)-1) ] await self._comment(media[\"id\"],", "else: user = await self.get_user_by_username(username) id = user[\"graphql\"][\"user\"][\"id\"] params = { \"query_hash\": \"37479f2b8209594dde7facb0d904896a\",", ") async def close(self): logger.info(\"\\nClosing session...\") # Close the http session await self.session.close()", "download_pic(self, url, id, format): logger.info(f\"Downloading {id}\") async with aiohttp.ClientSession() as session: try: async", "pics ( id INT, height INT, width INT, url TEXT, tags TEXT )", "\"POST\", f\"https://www.instagram.com/web/comments/{id}/add/\", headers=headers, data=payload ) async with aiosqlite.connect(\"./piggy.db\") as db: await db.execute( \"INSERT", "\"GET\", f\"https://www.instagram.com/{username}/\", params=\"__a:1\" ) return json.loads( regex.findall( r\"<script[^>]*>window._sharedData = (.*?)</script>\", regex.findall( r\"<body[^>]*>(.*)</body>\", res,", "\"GET\", f\"https://www.instagram.com/p/{shortcode}/\", params=\"__a=1\", response_type=\"json\" ) username = res[\"graphql\"][\"shortcode_media\"][\"owner\"][\"username\"] logger.info( f\"{utils.translate_ig_media_type_to_custom(mediatype).capitalize()} by {username}\\n❤️ {likes},", "session.get(url) as r: if r.status == 200: f = await aiofiles.open( f\"./images/{id}.{format}\", mode=\"wb\"", "r = await self.session.post( url, headers=headers, data=data ) logger.debug(f\"[POST] {r.url}\") else: raise ValueError(f\"Invalid", ") await f.write(await r.read()) await f.close() return True else: return False except TimeoutError:", "params = { \"query_hash\": \"ecd67af449fb6edab7c69a205413bfa7\", \"variables\": json.dumps({\"first\": 24}) } has_next_page = True while", "): await asyncio.sleep(self.settings['connection'][\"wait_time\"]) try: if method == \"GET\": r = await self.session.get( url,", "(id,) ) if await row.fetchone() is None: return False else: return True async", "[x.strip() for x in comments] # Initialize the asynchronous http session headers =", "find the csrf token res = await self.http_request( \"GET\", \"https://www.instagram.com/accounts/login/\" ) return regex.findall(", "TEXT ) \"\"\" ) logger.debug(\"Checking table: users\") await db.execute( \"\"\" CREATE TABLE IF", "if len(users): # Add all the media from the given users to the", "media to like. Retruns: None \"\"\" # Check if the media has already", "\"1\", \"Host\": \"www.instagram.com\", \"Upgrade-Insecure-Requests\": \"1\", \"User-Agent\": self.settings[\"connection\"][\"user_agent\"] } timeout = aiohttp.ClientTimeout( total=self.settings[\"connection\"][\"timeout\"] )", "database...\") for table_name in [\"users\", \"likes\", \"comments\"]: if self.settings[\"backup\"][table_name]: async with aiosqlite.connect(\"./piggy.db\") as", "http session await self.session.close() async def get_user_by_username(self, username): res = await self.http_request( \"GET\",", "in comments] # Load comments list for videos with open(\"comments/video_comments.txt\") as f: comments", "regex.findall(r\"#([\\p{L}0-9_]+)\", caption) logger.info(f\"Tags: {tags}\") else: return await self.save_to_database(id, type, height, width, url, tags)", "from the given users to the queue for user in users: asyncio.ensure_future(self._user_feed(q, user))", "\"SELECT * FROM likes WHERE id=?\", (media[\"id\"],) ) if await row.fetchone(): logger.info(\"Already liked!\")", "= \"GraphImage\" pass else: if not mediatype in utils.translate_custom_media_type_to_ig(self.settings[\"like\"][\"media_type\"]): logger.info(\"Wrong media type. Not", "media in res[\"data\"][\"user\"][\"edge_web_discover_media\"][\"edges\"]: await q.put(media[\"node\"]) async def _user_feed(self, q, user): user = await", "liked!\") return comments = media[\"edge_media_to_comment\"][\"count\"] if comments < self.settings[\"like\"][\"num_of_comments\"][\"min\"] or comments >= self.settings[\"like\"][\"num_of_comments\"][\"max\"]:", "as db: await db.execute(\"INSERT INTO likes WHERE id=?\", (id,)) await db.commit() logger.info(\"Unliked!\") async", "== \"text\": res = await r.text() logger.debug(res) return res elif response_type == \"json\":", "q.put(media[\"node\"]) async def _hashtag_feed(self, q, hashtag): count = 0 params = { \"query_hash\":", "Initialize the asynchronous http session headers = { \"DNT\": \"1\", \"Host\": \"www.instagram.com\", \"Upgrade-Insecure-Requests\":", "return await self.http_request( method, url, headers=headers, params=params, data=data, response_type=response_type ) else: logger.debug(f\"Status code:", "\"first\": 50, \"after\": str(end_cursor) } ) for media in res[\"data\"][\"location\"][\"edge_location_to_media\"][\"edges\"]: await q.put(media[\"node\"]) async", "request sent!\") async def unfollow(self, id): return async def _unfollow(self, id): headers =", "logger.info(f\"Caption: {caption}\") tags = regex.findall(r\"#([\\p{L}0-9_]+)\", caption) logger.info(f\"Tags: {tags}\") else: return await self.save_to_database(id, type,", "logger.info(\"Too many or too few likes. Not liked!\") return comments = media[\"edge_media_to_comment\"][\"count\"] if", "following=? WHERE id=? \"\"\", (int(time.time()), True, id) ) else: await db.execute( \"INSERT INTO", "for username in await self.following(): await db.execute( \"UPDATE users SET following=1 WHERE username=?\",", "aiohttp.ClientSession() as session: try: async with session.get(url) as r: if r.status == 200:", "commented!\") async def _comment(self, id, comment, reply_to_id=None): headers = { \"DNT\": \"1\", \"Host\":", "params = { \"query_hash\": \"1b84447a4d8b6d6d0426fefb34514485\", \"variables\": json.dumps({\"id\": str(location_id), \"first\": 50}) } has_next_page =", "explore: # Add the \"explore\" feed to the queue asyncio.ensure_future(self._explore_feed(q)) if len(users): #", "def followers(self, username=None): followers = [] if username is None: id = self.id", ") for media in res[\"data\"][\"user\"][\"edge_web_discover_media\"][\"edges\"]: await q.put(media[\"node\"]) async def _user_feed(self, q, user): user", "res = await self.http_request( \"GET\", \"https://www.instagram.com/graphql/query/\", params=params, response_type=\"json\" ) has_next_page = res[\"data\"][\"user\"][\"edge_owner_to_timeline_media\"][\"page_info\"][\"has_next_page\"] end_cursor", "self.settings[\"user\"][\"username\"], \"password\": self.settings[\"user\"][\"password\"] } headers = { \"User-Agent\": self.settings[\"connection\"][\"user_agent\"], \"X-CSRFToken\": self.csrf_token } res", "(.*?)</script>\", regex.findall( r\"<body[^>]*>(.*)</body>\", res, flags=regex.DOTALL )[0], flags=regex.DOTALL )[0][:-1])[\"entry_data\"][\"ProfilePage\"][0][\"graphql\"][\"user\"] # ----------------------------------------------------------------------------- async def download(self,", "username in await self.following(): await db.execute( \"UPDATE users SET following=1 WHERE username=?\", (username,)", "cookies = utils.cookies_dict(self.session.cookie_jar) self.csrf_token = cookies[\"csrftoken\"] # Initialize the database await self._init_database() async", "from aiohttp.client_exceptions import ClientConnectorError from piggy import utils # Logging logger = logging.getLogger(__name__)", "follower=0 WHERE username=?\", (username,) ) for username in await self.following(): await db.execute( \"UPDATE", ") header = [i[0] for i in rows.description] rows = await rows.fetchall() if", "flags=regex.DOTALL )[0], flags=regex.DOTALL )[0][:-1])[\"entry_data\"][\"ProfilePage\"][0][\"graphql\"][\"user\"] # ----------------------------------------------------------------------------- async def download(self, media): id = media[\"id\"]", "= json.dumps( {\"first\": 50, \"after\": end_cursor} ) for media in res[\"data\"][\"user\"][\"edge_web_discover_media\"][\"edges\"]: await q.put(media[\"node\"])", ") \"\"\" ) logger.info(\"Updating followers and following lists.\") await db.execute(\"UPDATE users SET follower=0,", "names async with aiosqlite.connect(\"./piggy.db\") as db: logger.debug(\"Checking table: pics\") await db.execute( \"\"\" CREATE", "[] pass else: if await self.download_pic(url, id, format): logger.info(f\"Caption: {caption}\") tags = regex.findall(r\"#([\\p{L}0-9_]+)\",", "async def _user_feed(self, q, user): user = await self.get_user_by_usernameUsername(user) id = user[\"id\"] params", "\"Host\": \"www.instagram.com\", \"User-Agent\": self.settings[\"connection\"][\"user_agent\"], \"X-CSRFToken\": self.csrf_token } await self.http_request( \"POST\", f\"https://www.instagram.com/web/friendships/{id}/follow/\", headers=headers )", "BOOL ) \"\"\" ) logger.debug(\"Checking table: likes\") await db.execute( \"\"\" CREATE TABLE IF", "send a like. Args: media: The media to like. Retruns: None \"\"\" #", "token res = await self.http_request( \"GET\", \"https://www.instagram.com/accounts/login/\" ) return regex.findall( r\"\\\"csrf_token\\\":\\\"(.*?)\\\"\", res, flags=regex.MULTILINE", "with aiosqlite.connect(\"./piggy.db\") as db: row = await db.execute( \"SELECT * FROM pics WHERE", "method == \"POST\": r = await self.session.post( url, headers=headers, data=data ) logger.debug(f\"[POST] {r.url}\")", "many or too few comments. Not liked!\") return if self.settings[\"like\"][\"rate\"] / 100 >", "\"GET\", \"https://www.instagram.com/graphql/query/\", params=params, response_type=\"json\" ) has_next_page = res[\"data\"][\"user\"][\"edge_follow\"][\"page_info\"][\"has_next_page\"] end_cursor = res[\"data\"][\"user\"][\"edge_follow\"][\"page_info\"][\"end_cursor\"] params[\"variables\"] =", "is loaded while 1: while not q.empty(): yield await q.get() await asyncio.sleep(1e-12) async", "media: The media to comment. Retruns: None \"\"\" if media[\"comments_disabled\"]: logger.info(\"Comments disabled.\") return", "feed. hashtags: [List of hastags] Media with those hashtags will be added to", "time from random import random, randint import asyncio import aiohttp import aiosqlite import", ") await asyncio.sleep( utils.interval_in_seconds(self.settings[\"backup\"][\"every\"]) ) async def close(self): logger.info(\"\\nClosing session...\") # Close the", "async def _comment(self, id, comment, reply_to_id=None): headers = { \"DNT\": \"1\", \"Host\": \"www.instagram.com\",", "50, \"after\": end_cursor} ) for media in res[\"data\"][\"user\"][\"edge_web_discover_media\"][\"edges\"]: await q.put(media[\"node\"]) async def _user_feed(self,", "\"www.instagram.com\", \"User-Agent\": self.settings[\"connection\"][\"user_agent\"], \"X-CSRFToken\": self.csrf_token } await self.http_request( \"POST\", f\"https://www.instagram.com/web/friendships/{id}/unfollow/\", headers=headers ) async", "if is_video: mediatype = \"GraphVideo\" else: mediatype = \"GraphImage\" pass else: if not", "# Connect to the local database and look for the table names async", "q, location_id): count = 0 params = { \"query_hash\": \"1b84447a4d8b6d6d0426fefb34514485\", \"variables\": json.dumps({\"id\": str(location_id),", "Args: media: The media to be printed. Returns: None \"\"\" logger.info(\"#--------\"*3+\"#\") try: mediatype", "f\"https://www.instagram.com/web/friendships/{id}/follow/\", headers=headers ) async with aiosqlite.connect(\"./piggy.db\") as db: c = await db.execute(\"SELECT *", "will be added to to the feed. users: [List of usernames] Their media", "await self.following(): await db.execute( \"UPDATE users SET following=1 WHERE username=?\", (username,) ) await", "import time from random import random, randint import asyncio import aiohttp import aiosqlite", "will be pulled and added to the feed. hashtags: [List of hastags] Media", "is None: logger.info(\"Already commented.\") return try: mediatype = media[\"__typename\"] except KeyError: is_video =", "asyncio.sleep(30) return await self.http_request( method, url, headers=headers, params=params, data=data, response_type=response_type ) else: logger.debug(f\"Status", "the media from the given locations to the queue for location in locations:", "\"User-Agent\": self.settings[\"connection\"][\"user_agent\"], \"X-CSRFToken\": self.csrf_token } await self.http_request( \"POST\", f\"https://www.instagram.com/web/friendships/{id}/follow/\", headers=headers ) async with", "comments] # Load comments list for videos with open(\"comments/video_comments.txt\") as f: comments =", "utils # Logging logger = logging.getLogger(__name__) logger.setLevel(logging.DEBUG) ch = logging.StreamHandler() fh = logging.FileHandler(\"./piggy.log\")", "= [x.strip() for x in comments] # Load comments list for videos with", ") for user in res[\"data\"][\"user\"][\"edge_follow\"][\"edges\"]: following.append(user[\"node\"][\"username\"]) return following async def feed(self, explore=True, users=[],", "\"1\", \"Host\": \"www.instagram.com\", \"User-Agent\": self.settings[\"connection\"][\"user_agent\"], \"X-CSRFToken\": self.csrf_token } payload = { \"comment_text\": comment", "Not liked!\") return likes = media[\"edge_liked_by\"][\"count\"] if likes < self.settings[\"like\"][\"num_of_likes\"][\"min\"] or likes >=", "If True the explore page will be added to to the feed. users:", "method == \"GET\": r = await self.session.get( url, headers=headers, params=params ) logger.debug(f\"[GET] {r.url}\")", "of hastags] Media with those hashtags will be added to the feed. locations:", "( id INTEGER, ts INTEGER, comment TEXT ) \"\"\" ) logger.info(\"Updating followers and", "comments < self.settings[\"like\"][\"num_of_comments\"][\"min\"] or comments >= self.settings[\"like\"][\"num_of_comments\"][\"max\"]: logger.info(\"Too many or too few comments.", "\"INSERT INTO likes VALUES(?,?)\", (id, int(time.time())) ) await db.commit() logger.info(\"Liked!\") async def _unlike(self,", "follower=0, following=1\") for username in await self.followers(): await db.execute( \"UPDATE users SET follower=0", "database and look for the table names async with aiosqlite.connect(\"./piggy.db\") as db: logger.debug(\"Checking", "async with aiosqlite.connect(\"./piggy.db\") as db: row = await db.execute( \"SELECT * FROM pics", "the given hashtags to the queue for hashtag in hashtags: asyncio.ensure_future(self._hashtag_feed(q, hashtag)) if", "db.commit() logger.info(\"Liked!\") async def _unlike(self, id): headers = { \"DNT\": \"1\", \"Host\": \"www.instagram.com\",", "session...\") # Close the http session await self.session.close() async def get_user_by_username(self, username): res", "def _user_feed(self, q, user): user = await self.get_user_by_usernameUsername(user) id = user[\"id\"] params =", "liked async with aiosqlite.connect(\"./piggy.db\") as db: row = await db.execute( \"SELECT * FROM", "params[\"variables\"] = json.dumps( {\"id\": id, \"first\": 50, \"after\": end_cursor} ) for media in", "end_cursor} ) for user in res[\"data\"][\"user\"][\"edge_follow\"][\"edges\"]: following.append(user[\"node\"][\"username\"]) return following async def feed(self, explore=True,", "} has_next_page = True while has_next_page: res = await self.http_request( \"GET\", \"https://www.instagram.com/graphql/query/\", params=params,", "self.loop = loop async def http_request( self, method, url, headers=None, params=None, data=None, response_type=\"text\"", "logger.debug(f\"[GET] {r.url}\") elif method == \"POST\": r = await self.session.post( url, headers=headers, data=data", "\"comments\"]: if self.settings[\"backup\"][table_name]: async with aiosqlite.connect(\"./piggy.db\") as db: rows = await db.execute( f\"SELECT", "= json.dumps( {\"id\": str(id), \"first\": 50, \"after\": end_cursor} ) for user in res[\"data\"][\"user\"][\"edge_follow\"][\"edges\"]:", "params=None, data=None, response_type=\"text\" ): await asyncio.sleep(self.settings['connection'][\"wait_time\"]) try: if method == \"GET\": r =", "def pic_already_saved(self, id): logger.debug(\"Checking database.\") async with aiosqlite.connect(\"./piggy.db\") as db: row = await", "await self.http_request( \"GET\", \"https://www.instagram.com/graphql/query/\", params=params, response_type=\"json\" ) has_next_page = res[\"data\"][\"hashtag\"][\"edge_hashtag_to_media\"][\"page_info\"][\"has_next_page\"] end_cursor = res[\"data\"][\"hashtag\"][\"edge_hashtag_to_media\"][\"page_info\"][\"end_cursor\"]", "count = 0 params = { \"query_hash\": \"1b84447a4d8b6d6d0426fefb34514485\", \"variables\": json.dumps({\"id\": str(location_id), \"first\": 50})", "INTEGER, ts_following INTEGER, follower BOOL, following BOOL ) \"\"\" ) logger.debug(\"Checking table: likes\")", "def _unlike(self, id): headers = { \"DNT\": \"1\", \"Host\": \"www.instagram.com\", \"User-Agent\": self.settings[\"connection\"][\"user_agent\"], \"X-CSRFToken\":", "json.dumps({\"id\": str(location_id), \"first\": 50}) } has_next_page = True while has_next_page: res = await", "else: logger.debug(f\"Status code: {r.status} {r.reason}\") if r.status == 200: # Successfull request: decrease", "url TEXT, tags TEXT ) \"\"\" ) logger.debug(\"Checking table: users\") await db.execute( \"\"\"", "with aiosqlite.connect(\"./piggy.db\") as db: await db.execute(\"INSERT INTO likes WHERE id=?\", (id,)) await db.commit()", "def http_request( self, method, url, headers=None, params=None, data=None, response_type=\"text\" ): await asyncio.sleep(self.settings['connection'][\"wait_time\"]) try:", "in!\") self.id = res[\"userId\"] elif res[\"message\"] == \"checkpoint_required\": logger.info(\"Checkpoint required.\") res = await", "media to comment. Retruns: None \"\"\" if media[\"comments_disabled\"]: logger.info(\"Comments disabled.\") return if self.settings[\"comment\"][\"only_once\"]:", "= await aiofiles.open( f\"./images/{id}.{format}\", mode=\"wb\" ) await f.write(await r.read()) await f.close() return True", "return likes = media[\"edge_liked_by\"][\"count\"] if likes < self.settings[\"comment\"][\"num_of_likes\"][\"min\"] or likes >= self.settings[\"comment\"][\"num_of_likes\"][\"max\"]: return", "self.settings[\"comment\"][\"only_once\"]: async with aiosqlite.connect(\"./piggy.db\") as db: row = await db.execute( \"SELECT * FROM", "await q.put(media[\"node\"]) async def _location_feed(self, q, location_id): count = 0 params = {", "= await self._getCsrfTokenFromForm() async def _getCsrfTokenFromForm(self): # Get login page and find the", "await self.http_request( \"GET\", \"https://www.instagram.com/accounts/login/\" ) return regex.findall( r\"\\\"csrf_token\\\":\\\"(.*?)\\\"\", res, flags=regex.MULTILINE )[0] async def", ") logger.debug(\"Checking table: comments\") await db.execute( \"\"\" CREATE TABLE IF NOT EXISTS comments", "or too few likes. Not liked!\") return comments = media[\"edge_media_to_comment\"][\"count\"] if comments <", "res[\"data\"][\"user\"][\"edge_owner_to_timeline_media\"][\"page_info\"][\"has_next_page\"] end_cursor = res[\"data\"][\"user\"][\"edge_owner_to_timeline_media\"][\"page_info\"][\"end_cursor\"] params[\"variables\"] = json.dumps( {\"id\": id, \"first\": 50, \"after\": end_cursor}", "== \"json\": await utils.to_json(table_name, header, rows) else: logger.warning( f\"\"\"Unsupported file format: {self.settings['backup']['format']}.\"\"\" )", "try: async with session.get(url) as r: if r.status == 200: f = await", "= media[\"dimensions\"][\"width\"] try: caption = media[\"edge_media_to_caption\"][\"edges\"][0][\"node\"][\"text\"] except IndexError: tags = [] pass else:", "= await self.http_request( \"POST\", \"https://www.instagram.com/accounts/login/ajax/\", headers=headers, data=payload, response_type=\"json\" ) if res[\"authenticated\"]: logger.info(\"Logged in!\")", "} headers = { \"User-Agent\": self.settings[\"connection\"][\"user_agent\"], \"X-CSRFToken\": self.csrf_token } res = await self.http_request(", "VALUES(?,?)\", (id, int(time.time())) ) await db.commit() logger.info(\"Liked!\") async def _unlike(self, id): headers =", "comments = media[\"edge_media_to_comment\"][\"count\"] if comments < self.settings[\"comment\"][\"num_of_comments\"][\"min\"] or comments >= self.settings[\"comment\"][\"num_of_comments\"][\"max\"]: return if", "[\"users\", \"likes\", \"comments\"]: if self.settings[\"backup\"][table_name]: async with aiosqlite.connect(\"./piggy.db\") as db: rows = await", "following lists.\") await db.execute(\"UPDATE users SET follower=0, following=1\") for username in await self.followers():", "await asyncio.sleep(30) return await self.http_request( method, url, headers=headers, params=params, data=data, response_type=response_type ) else:", "else: raise ValueError(f\"Invalid HTTP method: {method}\") except ClientConnectorError: logger.error(\"Could not reach the server.", "WHERE id=? \"\"\", (int(time.time()), True, id) ) else: await db.execute( \"INSERT INTO users", "if c.rowcount: await db.execute( \"\"\" UPDATE users SET ts_following=?, following=? WHERE id=? \"\"\",", "comments VALUES(?,?,?)\", (id, int(time.time()), comment) ) await db.commit() logger.info(\"Comment posted!\") async def follow(self,", "the \"explore\" feed to the queue asyncio.ensure_future(self._explore_feed(q)) if len(users): # Add all the", "user in res[\"data\"][\"user\"][\"edge_followed_by\"][\"edges\"]: followers.append(user[\"node\"][\"username\"]) return followers async def following(self, username=None): following = []", "await db.execute( \"\"\" CREATE TABLE IF NOT EXISTS users ( id TEXT, username", "list for videos with open(\"comments/video_comments.txt\") as f: comments = f.readlines() self.video_comments_list = [x.strip()", "{self.settings['backup']['format']}.\"\"\" ) await asyncio.sleep( utils.interval_in_seconds(self.settings[\"backup\"][\"every\"]) ) async def close(self): logger.info(\"\\nClosing session...\") # Close", "Initialize the database await self._init_database() async def _init_database(self): logger.info(\"Checking database...\") # Connect to", ") has_next_page = res[\"data\"][\"user\"][\"edge_owner_to_timeline_media\"][\"page_info\"][\"has_next_page\"] end_cursor = res[\"data\"][\"user\"][\"edge_owner_to_timeline_media\"][\"page_info\"][\"end_cursor\"] params[\"variables\"] = json.dumps( {\"id\": id, \"first\":", "request: decrease retry time if self.settings['connection'][\"wait_time\"] > 0: self.settings['connection'][\"wait_time\"] -= 1 if response_type", "= res[\"graphql\"][\"shortcode_media\"][\"owner\"][\"username\"] logger.info( f\"{utils.translate_ig_media_type_to_custom(mediatype).capitalize()} by {username}\\n❤️ {likes}, 💬 {comments}\" ) try: caption =", "Get the csrf token. It is needed to log in self.csrf_token = await", "if self.settings['connection'][\"wait_time\"] > 0: self.settings['connection'][\"wait_time\"] -= 1 if response_type == \"text\": res =", "\"https://www.instagram.com/graphql/query/\", params=params, response_type=\"json\" ) has_next_page = res[\"data\"][\"user\"][\"edge_owner_to_timeline_media\"][\"page_info\"][\"has_next_page\"] end_cursor = res[\"data\"][\"user\"][\"edge_owner_to_timeline_media\"][\"page_info\"][\"end_cursor\"] params[\"variables\"] = json.dumps(", "= logging.Formatter( \"[%(asctime)s] %(levelname)s %(funcName)s: %(message)s\" ) fh.setFormatter(formatter) logger.addHandler(ch) logger.addHandler(fh) class Piggy: def", "followers async def following(self, username=None): following = [] if username is None: id", "\"UPDATE users SET follower=0 WHERE username=?\", (username,) ) for username in await self.following():", "time. Args: explore: [Bool] If True the explore page will be added to", "# temporarely stored q = asyncio.Queue() if explore: # Add the \"explore\" feed", "for x in comments] # Initialize the asynchronous http session headers = {", "Multiple parameters can be passed at the same time. Args: explore: [Bool] If", "} await self.http_request( \"POST\", f\"https://www.instagram.com/web/comments/{id}/add/\", headers=headers, data=payload ) async with aiosqlite.connect(\"./piggy.db\") as db:", "self.video_comments_list = [x.strip() for x in comments] # Initialize the asynchronous http session", "async with aiosqlite.connect(\"./piggy.db\") as db: await db.execute( \"UPDATE users SET following=false WHERE id=?\",", "db.execute( \"INSERT INTO comments VALUES(?,?,?)\", (id, int(time.time()), comment) ) await db.commit() logger.info(\"Comment posted!\")", "str(location_id), \"first\": 50, \"after\": str(end_cursor) } ) for media in res[\"data\"][\"location\"][\"edge_location_to_media\"][\"edges\"]: await q.put(media[\"node\"])", "mediatype = \"GraphVideo\" else: mediatype = \"GraphImage\" pass else: if not mediatype in", "media of the user to be followed. Retruns: None \"\"\" if self.settings[\"follow\"][\"rate\"] /", "decrease retry time if self.settings['connection'][\"wait_time\"] > 0: self.settings['connection'][\"wait_time\"] -= 1 if response_type ==", "height, width, url, tags) async def download_pic(self, url, id, format): logger.info(f\"Downloading {id}\") async", "headers=headers, params=params, data=data, response_type=response_type ) else: logger.debug(f\"Status code: {r.status} {r.reason}\") if r.status ==", ") has_next_page = res[\"data\"][\"user\"][\"edge_web_discover_media\"][\"page_info\"][\"has_next_page\"] end_cursor = res[\"data\"][\"user\"][\"edge_web_discover_media\"][\"page_info\"][\"end_cursor\"] params[\"variables\"] = json.dumps( {\"first\": 50, \"after\":", "f\"https://www.instagram.com/web/likes/{id}/unlike/\", headers=headers ) async with aiosqlite.connect(\"./piggy.db\") as db: await db.execute(\"INSERT INTO likes WHERE", "db.execute( \"INSERT INTO users VALUES(?,?,?,?,?)\", (id, None, int(time.time()), False, True) ) await db.commit()", "Get login page and find the csrf token res = await self.http_request( \"GET\",", "Load comments list for videos with open(\"comments/video_comments.txt\") as f: comments = f.readlines() self.video_comments_list", "_init_database(self): logger.info(\"Checking database...\") # Connect to the local database and look for the", "INTO likes VALUES(?,?)\", (id, int(time.time())) ) await db.commit() logger.info(\"Liked!\") async def _unlike(self, id):", "response_type=\"text\" ): await asyncio.sleep(self.settings['connection'][\"wait_time\"]) try: if method == \"GET\": r = await self.session.get(", "1 params[\"variables\"] = json.dumps( { \"id\": str(location_id), \"first\": 50, \"after\": str(end_cursor) } )", "has_next_page = True while has_next_page: res = await self.http_request( \"GET\", \"https://www.instagram.com/graphql/query/\", params=params, response_type=\"json\"", "await f.close() return True else: return False except TimeoutError: return False async def", "hashtag in hashtags: asyncio.ensure_future(self._hashtag_feed(q, hashtag)) if len(locations): # Add all the media from", "CREATE TABLE IF NOT EXISTS pics ( id INT, height INT, width INT,", "Retruns: None \"\"\" if self.settings[\"follow\"][\"rate\"] / 100 > random(): await self._follow(media[\"owner\"][\"id\"]) else: logger.info(\"Not", "headers = { \"User-Agent\": self.settings[\"connection\"][\"user_agent\"], \"X-CSRFToken\": self.csrf_token } res = await self.http_request( \"POST\",", "response_type=\"json\" ) if res[\"authenticated\"]: logger.info(\"Logged in!\") self.id = res[\"userId\"] elif res[\"message\"] == \"checkpoint_required\":", "\"\"\" CREATE TABLE IF NOT EXISTS comments ( id INTEGER, ts INTEGER, comment", "== \"GraphImage\" or mediatype == \"GraphSidecar\": comment = self.pic_comments_list[ randint(0, len(self.pic_comments_list)-1) ] else:", "await self.http_request( method, url, headers=headers, params=params, data=data, response_type=response_type ) else: logger.debug(f\"Status code: {r.status}", "if self.settings[\"backup\"][table_name]: async with aiosqlite.connect(\"./piggy.db\") as db: rows = await db.execute( f\"SELECT *", "csrf token res = await self.http_request( \"GET\", \"https://www.instagram.com/accounts/login/\" ) return regex.findall( r\"\\\"csrf_token\\\":\\\"(.*?)\\\"\", res,", "url)[0] if media[\"__typename\"] != \"GraphImage\" or await self.pic_already_saved(id): return height = media[\"dimensions\"][\"height\"] width", "in utils.translate_custom_media_type_to_ig(self.settings[\"like\"][\"media_type\"]): logger.info(\"Wrong media type. Not liked!\") return likes = media[\"edge_liked_by\"][\"count\"] if likes", ") await db.commit() logger.info(\"Follow request sent!\") async def unfollow(self, id): return async def", "!= \"GraphImage\" or await self.pic_already_saved(id): return height = media[\"dimensions\"][\"height\"] width = media[\"dimensions\"][\"width\"] try:", "await db.commit() logger.info(\"Comment posted!\") async def follow(self, media): \"\"\" Check if the media", "logger.error(f\"Response status: {r.status}\") logger.error(f\"Response headers: {r.headers}\") logger.error(await r.text()) raise ValueError(f\"Response error: {r.status}\") async", "} await self.http_request( \"POST\", f\"https://www.instagram.com/web/friendships/{id}/follow/\", headers=headers ) async with aiosqlite.connect(\"./piggy.db\") as db: c", "as db: await db.execute( \"INSERT INTO pics VALUES(?,?,?,?,?)\", (id, height, width, url, tags)", "str(end_cursor) } ) for media in res[\"data\"][\"location\"][\"edge_location_to_media\"][\"edges\"]: await q.put(media[\"node\"]) async def print(self, media):", "close(self): logger.info(\"\\nClosing session...\") # Close the http session await self.session.close() async def get_user_by_username(self,", "method, url, headers=headers, params=params, data=data, response_type=response_type ) else: logger.error(f\"Response status: {r.status}\") logger.error(f\"Response headers:", "\"Host\": \"www.instagram.com\", \"User-Agent\": self.settings[\"connection\"][\"user_agent\"], \"X-CSRFToken\": self.csrf_token } await self.http_request( \"POST\", f\"https://www.instagram.com/web/likes/{id}/like/\", headers=headers )", "\"[%(asctime)s] %(levelname)s %(funcName)s: %(message)s\" ) fh.setFormatter(formatter) logger.addHandler(ch) logger.addHandler(fh) class Piggy: def __init__(self, loop):", "db: await db.execute( \"UPDATE users SET following=false WHERE id=?\", (id,) ) await db.commit()", "== \"json\": res = await r.json() logger.debug(res) return res else: raise ValueError(f\"Invalid response", "IndexError: pass else: if len(caption) > 100: logger.info(f\"{caption:.100}...\") else: logger.info(f\"{caption}\") async def like(self,", "with open(settings_path) as f: self.settings = json.loads( regex.sub(r\"#.+$\", \"\", f.read(), flags=regex.MULTILINE) ) #", "logger.info(f\"{caption}\") async def like(self, media): \"\"\" Check if the media satisfy the prerequisites", "IndexError: tags = [] pass else: if await self.download_pic(url, id, format): logger.info(f\"Caption: {caption}\")", "db: await db.execute( \"INSERT INTO likes VALUES(?,?)\", (id, int(time.time())) ) await db.commit() logger.info(\"Liked!\")", "self.get_user_by_username(username) id = user[\"graphql\"][\"user\"][\"id\"] params = { \"query_hash\": \"37479f2b8209594dde7facb0d904896a\", \"variables\": json.dumps({\"id\": str(id), \"first\":", "\"User-Agent\": self.settings[\"connection\"][\"user_agent\"], \"X-CSRFToken\": self.csrf_token } await self.http_request( \"POST\", f\"https://www.instagram.com/web/likes/{id}/like/\", headers=headers ) async with", "self.settings[\"connection\"][\"user_agent\"], \"X-CSRFToken\": self.csrf_token } res = await self.http_request( \"POST\", \"https://www.instagram.com/accounts/login/ajax/\", headers=headers, data=payload, response_type=\"json\"", "method, url, headers=headers, params=params, data=data, response_type=response_type ) else: logger.debug(f\"Status code: {r.status} {r.reason}\") if", "has_next_page = res[\"data\"][\"user\"][\"edge_owner_to_timeline_media\"][\"page_info\"][\"has_next_page\"] end_cursor = res[\"data\"][\"user\"][\"edge_owner_to_timeline_media\"][\"page_info\"][\"end_cursor\"] params[\"variables\"] = json.dumps( {\"id\": id, \"first\": 50,", "self._comment(media[\"id\"], comment) else: logger.info(\"Not commented!\") async def _comment(self, id, comment, reply_to_id=None): headers =", "comments = f.readlines() self.video_comments_list = [x.strip() for x in comments] # Initialize the", "def _like(self, id): headers = { \"DNT\": \"1\", \"Host\": \"www.instagram.com\", \"User-Agent\": self.settings[\"connection\"][\"user_agent\"], \"X-CSRFToken\":", "followers and following lists.\") await db.execute(\"UPDATE users SET follower=0, following=1\") for username in", "{ \"query_hash\": \"a5164aed103f24b03e7b7747a2d94e3c\", \"variables\": json.dumps({\"id\": id, \"first\": 24}) } has_next_page = True while", "media[\"display_url\"] format = regex.findall(r\".([a-zA-Z]+)$\", url)[0] if media[\"__typename\"] != \"GraphImage\" or await self.pic_already_saved(id): return", "Keep on yielding media while more is loaded while 1: while not q.empty():", "logger.info(\"Wrong media type. Not liked!\") return likes = media[\"edge_liked_by\"][\"count\"] if likes < self.settings[\"like\"][\"num_of_likes\"][\"min\"]", "= res[\"data\"][\"hashtag\"][\"edge_hashtag_to_media\"][\"page_info\"][\"end_cursor\"] count += 1 params[\"variables\"] = json.dumps( {\"tag_name\": hashtag, \"first\": count, \"after\":", "True the explore page will be added to to the feed. users: [List", "id = self.id else: user = await self.get_user_by_username(username) id = user[\"graphql\"][\"user\"][\"id\"] params =", "async def _like(self, id): headers = { \"DNT\": \"1\", \"Host\": \"www.instagram.com\", \"User-Agent\": self.settings[\"connection\"][\"user_agent\"],", "# Unsuccessfull request: increase retry time self.settings['connection'][\"wait_time\"] += 1 logger.warning( f\"\"\"Too many requests!", "self.settings[\"like\"][\"rate\"] / 100 > random(): await self._like(media[\"id\"]) else: logger.info(\"Not liked!\") async def _like(self,", "logging import json import time from random import random, randint import asyncio import", "def following(self, username=None): following = [] if username is None: id = self.id", "0: self.settings['connection'][\"wait_time\"] -= 1 if response_type == \"text\": res = await r.text() logger.debug(res)", "self.settings[\"comment\"][\"rate\"] / 100 <= random(): if mediatype == \"GraphImage\" or mediatype == \"GraphSidecar\":", "url, tags) async def download_pic(self, url, id, format): logger.info(f\"Downloading {id}\") async with aiohttp.ClientSession()", "return await self.save_to_database(id, type, height, width, url, tags) async def download_pic(self, url, id,", "CREATE TABLE IF NOT EXISTS users ( id TEXT, username TEXT, ts_follower INTEGER,", "fh = logging.FileHandler(\"./piggy.log\") ch.setLevel(logging.INFO) fh.setLevel(logging.DEBUG) formatter = logging.Formatter(\"%(message)s\") ch.setFormatter(formatter) formatter = logging.Formatter( \"[%(asctime)s]", "else: if not mediatype in utils.translate_custom_media_type_to_ig(self.settings[\"like\"][\"media_type\"]): logger.info(\"Wrong media type. Not liked!\") return likes", "username is None: id = self.id else: user = await self.get_user_by_username(username) id =", "db.execute( \"\"\" CREATE TABLE IF NOT EXISTS likes ( id INTEGER, ts INTEGER", "type, height, width, url, tags) async def download_pic(self, url, id, format): logger.info(f\"Downloading {id}\")", "timeout=timeout) logger.info(\"Session initialized.\") # Get the csrf token. It is needed to log", "\"GET\", \"https://www.instagram.com/graphql/query/\", params=params, response_type=\"json\" ) has_next_page = res[\"data\"][\"hashtag\"][\"edge_hashtag_to_media\"][\"page_info\"][\"has_next_page\"] end_cursor = res[\"data\"][\"hashtag\"][\"edge_hashtag_to_media\"][\"page_info\"][\"end_cursor\"] count +=", "been liked async with aiosqlite.connect(\"./piggy.db\") as db: row = await db.execute( \"SELECT *", "await self.session.close() async def get_user_by_username(self, username): res = await self.http_request( \"GET\", f\"https://www.instagram.com/{username}/\", params=\"__a:1\"", "async def _getCsrfTokenFromForm(self): # Get login page and find the csrf token res", "import ClientConnectorError from piggy import utils # Logging logger = logging.getLogger(__name__) logger.setLevel(logging.DEBUG) ch", "%(funcName)s: %(message)s\" ) fh.setFormatter(formatter) logger.addHandler(ch) logger.addHandler(fh) class Piggy: def __init__(self, loop): self.loop =", "data=data, response_type=response_type ) else: logger.error(f\"Response status: {r.status}\") logger.error(f\"Response headers: {r.headers}\") logger.error(await r.text()) raise", "media satisfy the prerequisites and eventually it will send a comment. Args: media:", "end_cursor = res[\"data\"][\"user\"][\"edge_follow\"][\"page_info\"][\"end_cursor\"] params[\"variables\"] = json.dumps( {\"id\": str(id), \"first\": 50, \"after\": end_cursor} )", "str(id), \"first\": 50, \"after\": end_cursor} ) for user in res[\"data\"][\"user\"][\"edge_followed_by\"][\"edges\"]: followers.append(user[\"node\"][\"username\"]) return followers", "EXISTS likes ( id INTEGER, ts INTEGER ) \"\"\" ) logger.debug(\"Checking table: comments\")", "settings_path=\"settings.json\"): logger.info(\"Loading settings...\") # Load settings with open(settings_path) as f: self.settings = json.loads(", "initialized.\") # Get the csrf token. It is needed to log in self.csrf_token", "except TimeoutError: return False async def pic_already_saved(self, id): logger.debug(\"Checking database.\") async with aiosqlite.connect(\"./piggy.db\")", "in res[\"data\"][\"hashtag\"][\"edge_hashtag_to_media\"][\"edges\"]: await q.put(media[\"node\"]) async def _location_feed(self, q, location_id): count = 0 params", "INT, height INT, width INT, url TEXT, tags TEXT ) \"\"\" ) logger.debug(\"Checking", "aiosqlite import aiofiles import regex from aiohttp.client_exceptions import ClientConnectorError from piggy import utils", "self.http_request( method, url, headers=headers, params=params, data=data, response_type=response_type ) else: logger.error(f\"Response status: {r.status}\") logger.error(f\"Response", "= await self.get_user_by_usernameUsername(user) id = user[\"id\"] params = { \"query_hash\": \"a5164aed103f24b03e7b7747a2d94e3c\", \"variables\": json.dumps({\"id\":", "\"GET\", \"https://www.instagram.com/graphql/query/\", params=params, response_type=\"json\" ) has_next_page = res[\"data\"][\"location\"][\"edge_location_to_media\"][\"page_info\"][\"has_next_page\"] end_cursor = res[\"data\"][\"location\"][\"edge_location_to_media\"][\"page_info\"][\"end_cursor\"] count +=", "seconds.\") await asyncio.sleep(30) return await self.http_request( method, url, headers=headers, params=params, data=data, response_type=response_type )", "None \"\"\" # Check if the media has already been liked async with", "if await row.fetchone() is None: logger.info(\"Already commented.\") return try: mediatype = media[\"__typename\"] except", "in self.csrf_token = await self._getCsrfTokenFromForm() async def _getCsrfTokenFromForm(self): # Get login page and", "Add all the media from the given locations to the queue for location", "those locations will be added to the feed. Retruns: Yields a media from", "url, headers=headers, params=params, data=data, response_type=response_type ) else: logger.error(f\"Response status: {r.status}\") logger.error(f\"Response headers: {r.headers}\")", "mediatype = \"GraphImage\" pass else: if not mediatype in utils.translate_custom_media_type_to_ig(self.settings[\"like\"][\"media_type\"]): logger.info(\"Wrong media type.", "= user[\"graphql\"][\"user\"][\"id\"] params = { \"query_hash\": \"58712303d941c6855d4e888c5f0cd22f\", \"variables\": json.dumps({\"id\": str(id), \"first\": 50}) }", "INTO likes WHERE id=?\", (id,)) await db.commit() logger.info(\"Unliked!\") async def comment(self, media): \"\"\"", "json import time from random import random, randint import asyncio import aiohttp import", "\"User-Agent\": self.settings[\"connection\"][\"user_agent\"], \"X-CSRFToken\": self.csrf_token } res = await self.http_request( \"POST\", \"https://www.instagram.com/accounts/login/ajax/\", headers=headers, data=payload,", "\"\"\" ) logger.debug(\"Checking table: comments\") await db.execute( \"\"\" CREATE TABLE IF NOT EXISTS", "self.settings[\"like\"][\"num_of_likes\"][\"max\"]: logger.info(\"Too many or too few likes. Not liked!\") return comments = media[\"edge_media_to_comment\"][\"count\"]", "a comment. Args: media: The media to comment. Retruns: None \"\"\" if media[\"comments_disabled\"]:", "added to the feed. locations: [List of locations ids] Media with those locations", "reach the server. Retrying in 30 seconds.\") await asyncio.sleep(30) return await self.http_request( method,", "{response_type}\") elif r.status == 429: # Unsuccessfull request: increase retry time self.settings['connection'][\"wait_time\"] +=", "comment(self, media): \"\"\" Check if the media satisfy the prerequisites and eventually it", "with aiohttp.ClientSession() as session: try: async with session.get(url) as r: if r.status ==", "to the feed. hashtags: [List of hastags] Media with those hashtags will be", "{ \"query_hash\": \"1b84447a4d8b6d6d0426fefb34514485\", \"variables\": json.dumps({\"id\": str(location_id), \"first\": 50}) } has_next_page = True while", "# Add all the media from the given locations to the queue for", "users to the queue for user in users: asyncio.ensure_future(self._user_feed(q, user)) if len(hashtags): #", "async with aiosqlite.connect(\"./piggy.db\") as db: row = await db.execute( \"SELECT * FROM likes", "and following lists.\") await db.execute(\"UPDATE users SET follower=0, following=1\") for username in await", "url, headers=headers, data=data ) logger.debug(f\"[POST] {r.url}\") else: raise ValueError(f\"Invalid HTTP method: {method}\") except", "page and find the csrf token res = await self.http_request( \"GET\", \"https://www.instagram.com/accounts/login/\" )", "following=1\") for username in await self.followers(): await db.execute( \"UPDATE users SET follower=0 WHERE", "self.csrf_token } await self.http_request( \"POST\", f\"https://www.instagram.com/web/friendships/{id}/follow/\", headers=headers ) async with aiosqlite.connect(\"./piggy.db\") as db:", "= [] if username is None: id = self.id else: user = await", "= { \"User-Agent\": self.settings[\"connection\"][\"user_agent\"], \"X-CSRFToken\": self.csrf_token } res = await self.http_request( \"POST\", \"https://www.instagram.com/accounts/login/ajax/\",", ") else: logger.error(f\"Response status: {r.status}\") logger.error(f\"Response headers: {r.headers}\") logger.error(await r.text()) raise ValueError(f\"Response error:", "timeout = aiohttp.ClientTimeout( total=self.settings[\"connection\"][\"timeout\"] ) self.session = aiohttp.ClientSession(headers=headers, timeout=timeout) logger.info(\"Session initialized.\") # Get", "explore: [Bool] If True the explore page will be added to to the", "media while more is loaded while 1: while not q.empty(): yield await q.get()", "db.execute( \"\"\" CREATE TABLE IF NOT EXISTS users ( id TEXT, username TEXT,", "videos with open(\"comments/video_comments.txt\") as f: comments = f.readlines() self.video_comments_list = [x.strip() for x", "> 100: logger.info(f\"{caption:.100}...\") else: logger.info(f\"{caption}\") async def like(self, media): \"\"\" Check if the", "try: mediatype = media[\"__typename\"] except KeyError: is_video = media[\"is_video\"] if is_video: mediatype =", "= { \"query_hash\": \"37479f2b8209594dde7facb0d904896a\", \"variables\": json.dumps({\"id\": str(id), \"first\": 50}) } has_next_page = True", "async def _explore_feed(self, q): params = { \"query_hash\": \"ecd67af449fb6edab7c69a205413bfa7\", \"variables\": json.dumps({\"first\": 24}) }", "\"\"\" Check if the media satisfy the prerequisites and eventually send a follow", "width, url, tags) async def download_pic(self, url, id, format): logger.info(f\"Downloading {id}\") async with", "session: try: async with session.get(url) as r: if r.status == 200: f =", "= await db.execute(\"SELECT * FROM users WHERE id=?\", (id,)) if c.rowcount: await db.execute(", "] await self._comment(media[\"id\"], comment) else: logger.info(\"Not commented!\") async def _comment(self, id, comment, reply_to_id=None):", "{r.status} {r.reason}\") if r.status == 200: # Successfull request: decrease retry time if", "yielding media while more is loaded while 1: while not q.empty(): yield await", "await db.execute( \"UPDATE users SET following=1 WHERE username=?\", (username,) ) await db.commit() async", "\"1\", \"Host\": \"www.instagram.com\", \"User-Agent\": self.settings[\"connection\"][\"user_agent\"], \"X-CSRFToken\": self.csrf_token } await self.http_request( \"POST\", f\"https://www.instagram.com/web/friendships/{id}/unfollow/\", headers=headers", "50, \"after\": end_cursor} ) for user in res[\"data\"][\"user\"][\"edge_follow\"][\"edges\"]: following.append(user[\"node\"][\"username\"]) return following async def", "\"SELECT * FROM comments WHERE id=?\", (media[\"id\"],) ) if await row.fetchone() is None:", "= res[\"data\"][\"user\"][\"edge_owner_to_timeline_media\"][\"page_info\"][\"has_next_page\"] end_cursor = res[\"data\"][\"user\"][\"edge_owner_to_timeline_media\"][\"page_info\"][\"end_cursor\"] params[\"variables\"] = json.dumps( {\"id\": id, \"first\": 50, \"after\":", "50, \"after\": end_cursor} ) for media in res[\"data\"][\"user\"][\"edge_web_discover_media\"][\"edges\"]: await q.put(media[\"node\"]) async def _hashtag_feed(self,", "await self.http_request( method, url, headers=headers, params=params, data=data, response_type=response_type ) else: logger.error(f\"Response status: {r.status}\")", "given locations to the queue for location in locations: asyncio.ensure_future(self._location_feed(q, location)) # Keep", "IF NOT EXISTS pics ( id INT, height INT, width INT, url TEXT,", "c.rowcount: await db.execute( \"\"\" UPDATE users SET ts_following=?, following=? WHERE id=? \"\"\", (int(time.time()),", "q): params = { \"query_hash\": \"ecd67af449fb6edab7c69a205413bfa7\", \"variables\": json.dumps({\"first\": 24}) } has_next_page = True", "if len(caption) > 100: logger.info(f\"{caption:.100}...\") else: logger.info(f\"{caption}\") async def like(self, media): \"\"\" Check", "likes VALUES(?,?)\", (id, int(time.time())) ) await db.commit() logger.info(\"Liked!\") async def _unlike(self, id): headers", "= await self.http_request( \"GET\", \"https://www.instagram.com/graphql/query/\", params=params, response_type=\"json\" ) has_next_page = res[\"data\"][\"user\"][\"edge_followed_by\"][\"page_info\"][\"has_next_page\"] end_cursor =", ") \"\"\" ) logger.debug(\"Checking table: likes\") await db.execute( \"\"\" CREATE TABLE IF NOT", "asyncio.sleep(1e-12) async def _explore_feed(self, q): params = { \"query_hash\": \"ecd67af449fb6edab7c69a205413bfa7\", \"variables\": json.dumps({\"first\": 24})", "not reach the server. Retrying in 30 seconds.\") await asyncio.sleep(30) return await self.http_request(", "and added to the feed. hashtags: [List of hastags] Media with those hashtags", "= \"GraphImage\" pass else: if not mediatype in utils.translate_custom_media_type_to_ig(self.settings[\"comment\"][\"media_type\"]): return likes = media[\"edge_liked_by\"][\"count\"]", "-= 1 if response_type == \"text\": res = await r.text() logger.debug(res) return res", "json.dumps({\"id\": str(id), \"first\": 50}) } has_next_page = True while has_next_page: res = await", "feed based on the passed parameters. Multiple parameters can be passed at the", "headers=None, params=None, data=None, response_type=\"text\" ): await asyncio.sleep(self.settings['connection'][\"wait_time\"]) try: if method == \"GET\": r", "\"first\": count, \"after\": end_cursor} ) for media in res[\"data\"][\"hashtag\"][\"edge_hashtag_to_media\"][\"edges\"]: await q.put(media[\"node\"]) async def", "await db.commit() logger.info(\"Liked!\") async def _unlike(self, id): headers = { \"DNT\": \"1\", \"Host\":", "tags) async def download_pic(self, url, id, format): logger.info(f\"Downloading {id}\") async with aiohttp.ClientSession() as", "# Get the csrf token. It is needed to log in self.csrf_token =", "SET follower=0, following=1\") for username in await self.followers(): await db.execute( \"UPDATE users SET", "\"GraphImage\" pass else: if not mediatype in utils.translate_custom_media_type_to_ig(self.settings[\"like\"][\"media_type\"]): logger.info(\"Wrong media type. Not liked!\")", "\"Host\": \"www.instagram.com\", \"User-Agent\": self.settings[\"connection\"][\"user_agent\"], \"X-CSRFToken\": self.csrf_token } payload = { \"comment_text\": comment }", "\"https://www.instagram.com/accounts/login/\" ) return regex.findall( r\"\\\"csrf_token\\\":\\\"(.*?)\\\"\", res, flags=regex.MULTILINE )[0] async def login(self): payload =", "\"username\": self.settings[\"user\"][\"username\"], \"password\": self.settings[\"user\"][\"password\"] } headers = { \"User-Agent\": self.settings[\"connection\"][\"user_agent\"], \"X-CSRFToken\": self.csrf_token }", "hastags] Media with those hashtags will be added to the feed. locations: [List", "async def following(self, username=None): following = [] if username is None: id =", "those hashtags will be added to the feed. locations: [List of locations ids]", "self.session.post( url, headers=headers, data=data ) logger.debug(f\"[POST] {r.url}\") else: raise ValueError(f\"Invalid HTTP method: {method}\")", "try: caption = media[\"edge_media_to_caption\"][\"edges\"][0][\"node\"][\"text\"] except IndexError: tags = [] pass else: if await", "be printed. Returns: None \"\"\" logger.info(\"#--------\"*3+\"#\") try: mediatype = media[\"__typename\"] except KeyError: is_video", "server. Retrying in 30 seconds.\") await asyncio.sleep(30) return await self.http_request( method, url, headers=headers,", "passed at the same time. Args: explore: [Bool] If True the explore page", "= await self.http_request( \"GET\", \"https://www.instagram.com/graphql/query/\", params=params, response_type=\"json\" ) has_next_page = res[\"data\"][\"user\"][\"edge_web_discover_media\"][\"page_info\"][\"has_next_page\"] end_cursor =", "< self.settings[\"comment\"][\"num_of_comments\"][\"min\"] or comments >= self.settings[\"comment\"][\"num_of_comments\"][\"max\"]: return if self.settings[\"comment\"][\"rate\"] / 100 <= random():", "r\"<script[^>]*>window._sharedData = (.*?)</script>\", regex.findall( r\"<body[^>]*>(.*)</body>\", res, flags=regex.DOTALL )[0], flags=regex.DOTALL )[0][:-1])[\"entry_data\"][\"ProfilePage\"][0][\"graphql\"][\"user\"] # ----------------------------------------------------------------------------- async", "> random(): await self._follow(media[\"owner\"][\"id\"]) else: logger.info(\"Not followed!\") async def _follow(self, id): headers =", "ch.setFormatter(formatter) formatter = logging.Formatter( \"[%(asctime)s] %(levelname)s %(funcName)s: %(message)s\" ) fh.setFormatter(formatter) logger.addHandler(ch) logger.addHandler(fh) class", "id=?\", (id,) ) if await row.fetchone() is None: return False else: return True", "asyncio.sleep( utils.interval_in_seconds(self.settings[\"backup\"][\"every\"]) ) async def close(self): logger.info(\"\\nClosing session...\") # Close the http session", "x in comments] # Load comments list for videos with open(\"comments/video_comments.txt\") as f:", "tags = [] pass else: if await self.download_pic(url, id, format): logger.info(f\"Caption: {caption}\") tags", "= await db.execute( \"SELECT * FROM likes WHERE id=?\", (media[\"id\"],) ) if await", "HTTP method: {method}\") except ClientConnectorError: logger.error(\"Could not reach the server. Retrying in 30", "{r.url}\") else: raise ValueError(f\"Invalid HTTP method: {method}\") except ClientConnectorError: logger.error(\"Could not reach the", "comment TEXT ) \"\"\" ) logger.info(\"Updating followers and following lists.\") await db.execute(\"UPDATE users", "height INT, width INT, url TEXT, tags TEXT ) \"\"\" ) logger.debug(\"Checking table:", "fh.setFormatter(formatter) logger.addHandler(ch) logger.addHandler(fh) class Piggy: def __init__(self, loop): self.loop = loop async def", "\"www.instagram.com\", \"User-Agent\": self.settings[\"connection\"][\"user_agent\"], \"X-CSRFToken\": self.csrf_token } await self.http_request( \"POST\", f\"https://www.instagram.com/web/likes/{id}/unlike/\", headers=headers ) async", "has_next_page: res = await self.http_request( \"GET\", \"https://www.instagram.com/graphql/query/\", params=params, response_type=\"json\" ) has_next_page = res[\"data\"][\"location\"][\"edge_location_to_media\"][\"page_info\"][\"has_next_page\"]", "following async def feed(self, explore=True, users=[], hashtags=[], locations=[]): \"\"\" Generates a feed based", "height, width, url, tags): tags = json.dumps(tags) async with aiosqlite.connect(\"./piggy.db\") as db: await", "visual representation of a media. Args: media: The media to be printed. Returns:", "media): \"\"\" Check if the media satisfy the prerequisites and eventually it will", "hashtag, \"first\": count}) } has_next_page = True while has_next_page: res = await self.http_request(", "the media from the given users to the queue for user in users:", "in await self.following(): await db.execute( \"UPDATE users SET following=1 WHERE username=?\", (username,) )", "logger.info(\"Too many or too few comments. Not liked!\") return if self.settings[\"like\"][\"rate\"] / 100", "# ----------------------------------------------------------------------------- async def download(self, media): id = media[\"id\"] url = media[\"display_url\"] format", ")[0] async def login(self): payload = { \"username\": self.settings[\"user\"][\"username\"], \"password\": self.settings[\"user\"][\"password\"] } headers", "logging.StreamHandler() fh = logging.FileHandler(\"./piggy.log\") ch.setLevel(logging.INFO) fh.setLevel(logging.DEBUG) formatter = logging.Formatter(\"%(message)s\") ch.setFormatter(formatter) formatter = logging.Formatter(", "\"1b84447a4d8b6d6d0426fefb34514485\", \"variables\": json.dumps({\"id\": str(location_id), \"first\": 50}) } has_next_page = True while has_next_page: res", "width = media[\"dimensions\"][\"width\"] try: caption = media[\"edge_media_to_caption\"][\"edges\"][0][\"node\"][\"text\"] except IndexError: tags = [] pass", "== 200: f = await aiofiles.open( f\"./images/{id}.{format}\", mode=\"wb\" ) await f.write(await r.read()) await", "with aiosqlite.connect(\"./piggy.db\") as db: await db.execute( \"INSERT INTO pics VALUES(?,?,?,?,?)\", (id, height, width,", "is_video: mediatype = \"GraphVideo\" else: mediatype = \"GraphImage\" pass likes = media[\"edge_liked_by\"][\"count\"] comments", "ts_following INTEGER, follower BOOL, following BOOL ) \"\"\" ) logger.debug(\"Checking table: likes\") await", "sent!\") async def unfollow(self, id): return async def _unfollow(self, id): headers = {", "Add all the media from the given hashtags to the queue for hashtag", "----------------------------------------------------------------------------- async def download(self, media): id = media[\"id\"] url = media[\"display_url\"] format =", "= res[\"data\"][\"user\"][\"edge_owner_to_timeline_media\"][\"page_info\"][\"end_cursor\"] params[\"variables\"] = json.dumps( {\"id\": id, \"first\": 50, \"after\": end_cursor} ) for", "async def download_pic(self, url, id, format): logger.info(f\"Downloading {id}\") async with aiohttp.ClientSession() as session:", "from the given locations to the queue for location in locations: asyncio.ensure_future(self._location_feed(q, location))", "The media of the user to be followed. Retruns: None \"\"\" if self.settings[\"follow\"][\"rate\"]", "return comments = media[\"edge_media_to_comment\"][\"count\"] if comments < self.settings[\"comment\"][\"num_of_comments\"][\"min\"] or comments >= self.settings[\"comment\"][\"num_of_comments\"][\"max\"]: return", "async def followers(self, username=None): followers = [] if username is None: id =", "Add the \"explore\" feed to the queue asyncio.ensure_future(self._explore_feed(q)) if len(users): # Add all", "\"X-CSRFToken\": self.csrf_token } await self.http_request( \"POST\", f\"https://www.instagram.com/web/likes/{id}/like/\", headers=headers ) async with aiosqlite.connect(\"./piggy.db\") as", "\"https://www.instagram.com/accounts/login/ajax/\", headers=headers, data=payload, response_type=\"json\" ) if res[\"authenticated\"]: logger.info(\"Logged in!\") self.id = res[\"userId\"] elif", "r.text() logger.debug(res) return res elif response_type == \"json\": res = await r.json() logger.debug(res)", "users: asyncio.ensure_future(self._user_feed(q, user)) if len(hashtags): # Add all the media from the given", "async def follow(self, media): \"\"\" Check if the media satisfy the prerequisites and", "not q.empty(): yield await q.get() await asyncio.sleep(1e-12) async def _explore_feed(self, q): params =", "params[\"variables\"] = json.dumps( {\"id\": str(id), \"first\": 50, \"after\": end_cursor} ) for user in", "to the queue asyncio.ensure_future(self._explore_feed(q)) if len(users): # Add all the media from the", "\"\"\" Check if the media satisfy the prerequisites and eventually it will send", "res[\"userId\"] elif res[\"message\"] == \"checkpoint_required\": logger.info(\"Checkpoint required.\") res = await self.http_request( \"POST\", f\"https://www.instagram.com{res['checkpoint_url']}\",", "comments = media[\"edge_media_to_comment\"][\"count\"] shortcode = media[\"shortcode\"] res = await self.http_request( \"GET\", f\"https://www.instagram.com/p/{shortcode}/\", params=\"__a=1\",", "< self.settings[\"like\"][\"num_of_likes\"][\"min\"] or likes >= self.settings[\"like\"][\"num_of_likes\"][\"max\"]: logger.info(\"Too many or too few likes. Not", "users\") await db.execute( \"\"\" CREATE TABLE IF NOT EXISTS users ( id TEXT,", "async def download(self, media): id = media[\"id\"] url = media[\"display_url\"] format = regex.findall(r\".([a-zA-Z]+)$\",", "\"User-Agent\": self.settings[\"connection\"][\"user_agent\"] } timeout = aiohttp.ClientTimeout( total=self.settings[\"connection\"][\"timeout\"] ) self.session = aiohttp.ClientSession(headers=headers, timeout=timeout) logger.info(\"Session", "logger.info(\"Logged in!\") self.id = res[\"userId\"] elif res[\"message\"] == \"checkpoint_required\": logger.info(\"Checkpoint required.\") res =", "yield await q.get() await asyncio.sleep(1e-12) async def _explore_feed(self, q): params = { \"query_hash\":", "{ \"query_hash\": \"37479f2b8209594dde7facb0d904896a\", \"variables\": json.dumps({\"id\": str(id), \"first\": 50}) } has_next_page = True while", "UPDATE users SET ts_following=?, following=? WHERE id=? \"\"\", (int(time.time()), True, id) ) else:", "id=? \"\"\", (int(time.time()), True, id) ) else: await db.execute( \"INSERT INTO users VALUES(?,?,?,?,?)\",", "{ \"DNT\": \"1\", \"Host\": \"www.instagram.com\", \"User-Agent\": self.settings[\"connection\"][\"user_agent\"], \"X-CSRFToken\": self.csrf_token } payload = {", "FROM users WHERE id=?\", (id,)) if c.rowcount: await db.execute( \"\"\" UPDATE users SET", "row = await db.execute( \"SELECT * FROM comments WHERE id=?\", (media[\"id\"],) ) if", "the asynchronous http session headers = { \"DNT\": \"1\", \"Host\": \"www.instagram.com\", \"Upgrade-Insecure-Requests\": \"1\",", "_user_feed(self, q, user): user = await self.get_user_by_usernameUsername(user) id = user[\"id\"] params = {", "with aiosqlite.connect(\"./piggy.db\") as db: await db.execute( \"UPDATE users SET following=false WHERE id=?\", (id,)", "Generates a feed based on the passed parameters. Multiple parameters can be passed", "tags TEXT ) \"\"\" ) logger.debug(\"Checking table: users\") await db.execute( \"\"\" CREATE TABLE", "json.dumps( {\"id\": str(id), \"first\": 50, \"after\": end_cursor} ) for user in res[\"data\"][\"user\"][\"edge_follow\"][\"edges\"]: following.append(user[\"node\"][\"username\"])", "list for photos with open(\"comments/pic_comments.txt\") as f: comments = f.readlines() self.pic_comments_list = [x.strip()", "if explore: # Add the \"explore\" feed to the queue asyncio.ensure_future(self._explore_feed(q)) if len(users):", "Media with those hashtags will be added to the feed. locations: [List of", "None: return False else: return True async def save_to_database(self, id, type, height, width,", "json.loads( regex.sub(r\"#.+$\", \"\", f.read(), flags=regex.MULTILINE) ) # Load comments list for photos with", "feed. users: [List of usernames] Their media will be pulled and added to", "aiosqlite.connect(\"./piggy.db\") as db: await db.execute( \"INSERT INTO likes VALUES(?,?)\", (id, int(time.time())) ) await", "print(self, media): \"\"\" Gives a visual representation of a media. Args: media: The", "False else: return True async def save_to_database(self, id, type, height, width, url, tags):", "await db.execute( \"INSERT INTO users VALUES(?,?,?,?,?)\", (id, None, int(time.time()), False, True) ) await", "def get_user_by_username(self, username): res = await self.http_request( \"GET\", f\"https://www.instagram.com/{username}/\", params=\"__a:1\" ) return json.loads(", "== \"GraphSidecar\": comment = self.pic_comments_list[ randint(0, len(self.pic_comments_list)-1) ] else: comment = self.video_comments_list[ randint(0,", "f\"SELECT * FROM '{table_name}'\" ) header = [i[0] for i in rows.description] rows", "Successfull request: decrease retry time if self.settings['connection'][\"wait_time\"] > 0: self.settings['connection'][\"wait_time\"] -= 1 if", ") logger.debug(f\"[GET] {r.url}\") elif method == \"POST\": r = await self.session.post( url, headers=headers,", "await db.execute( \"UPDATE users SET following=false WHERE id=?\", (id,) ) await db.commit() async", "following = [] if username is None: id = self.id else: user =", "seconds.\"\"\" ) return await self.http_request( method, url, headers=headers, params=params, data=data, response_type=response_type ) else:", "res = await self.http_request( \"GET\", f\"https://www.instagram.com/{username}/\", params=\"__a:1\" ) return json.loads( regex.findall( r\"<script[^>]*>window._sharedData =", "== \"GET\": r = await self.session.get( url, headers=headers, params=params ) logger.debug(f\"[GET] {r.url}\") elif", "r = await self.session.get( url, headers=headers, params=params ) logger.debug(f\"[GET] {r.url}\") elif method ==", "db: logger.debug(\"Checking table: pics\") await db.execute( \"\"\" CREATE TABLE IF NOT EXISTS pics", "INTEGER, comment TEXT ) \"\"\" ) logger.info(\"Updating followers and following lists.\") await db.execute(\"UPDATE", "self.settings[\"user\"][\"password\"] } headers = { \"User-Agent\": self.settings[\"connection\"][\"user_agent\"], \"X-CSRFToken\": self.csrf_token } res = await", "raise ValueError(f\"Response error: {r.status}\") async def setup(self, settings_path=\"settings.json\"): logger.info(\"Loading settings...\") # Load settings", "await self.http_request( \"GET\", \"https://www.instagram.com/graphql/query/\", params=params, response_type=\"json\" ) has_next_page = res[\"data\"][\"user\"][\"edge_follow\"][\"page_info\"][\"has_next_page\"] end_cursor = res[\"data\"][\"user\"][\"edge_follow\"][\"page_info\"][\"end_cursor\"]", "tags = regex.findall(r\"#([\\p{L}0-9_]+)\", caption) logger.info(f\"Tags: {tags}\") else: return await self.save_to_database(id, type, height, width,", "from the given hashtags to the queue for hashtag in hashtags: asyncio.ensure_future(self._hashtag_feed(q, hashtag))", "self.http_request( \"POST\", f\"https://www.instagram.com/web/friendships/{id}/follow/\", headers=headers ) async with aiosqlite.connect(\"./piggy.db\") as db: c = await", "\"POST\", f\"https://www.instagram.com/web/friendships/{id}/unfollow/\", headers=headers ) async with aiosqlite.connect(\"./piggy.db\") as db: await db.execute( \"UPDATE users", "True) ) await db.commit() logger.info(\"Follow request sent!\") async def unfollow(self, id): return async", "type, height, width, url, tags): tags = json.dumps(tags) async with aiosqlite.connect(\"./piggy.db\") as db:", "params=\"__a=1\", response_type=\"json\" ) username = res[\"graphql\"][\"shortcode_media\"][\"owner\"][\"username\"] logger.info( f\"{utils.translate_ig_media_type_to_custom(mediatype).capitalize()} by {username}\\n❤️ {likes}, 💬 {comments}\"", "f\"https://www.instagram.com{res['checkpoint_url']}\", headers=headers, data=payload ) logger.error(res) else: logger.error(\"Couldn't log in.\") cookies = utils.cookies_dict(self.session.cookie_jar) self.csrf_token", "time if self.settings['connection'][\"wait_time\"] > 0: self.settings['connection'][\"wait_time\"] -= 1 if response_type == \"text\": res", "if the media has already been liked async with aiosqlite.connect(\"./piggy.db\") as db: row", "table: pics\") await db.execute( \"\"\" CREATE TABLE IF NOT EXISTS pics ( id", ") \"\"\" ) logger.debug(\"Checking table: comments\") await db.execute( \"\"\" CREATE TABLE IF NOT", "else: comment = self.video_comments_list[ randint(0, len(self.video_comments_list)-1) ] await self._comment(media[\"id\"], comment) else: logger.info(\"Not commented!\")", "like(self, media): \"\"\" Check if the media satisfy the prerequisites and eventually it", "feed. Retruns: Yields a media from the generated feed. \"\"\" # Initialize asynchronous", "if not mediatype in utils.translate_custom_media_type_to_ig(self.settings[\"like\"][\"media_type\"]): logger.info(\"Wrong media type. Not liked!\") return likes =", "= res[\"data\"][\"location\"][\"edge_location_to_media\"][\"page_info\"][\"end_cursor\"] count += 1 params[\"variables\"] = json.dumps( { \"id\": str(location_id), \"first\": 50,", "} timeout = aiohttp.ClientTimeout( total=self.settings[\"connection\"][\"timeout\"] ) self.session = aiohttp.ClientSession(headers=headers, timeout=timeout) logger.info(\"Session initialized.\") #", "Args: media: The media of the user to be followed. Retruns: None \"\"\"", "queue for location in locations: asyncio.ensure_future(self._location_feed(q, location)) # Keep on yielding media while", "hashtags to the queue for hashtag in hashtags: asyncio.ensure_future(self._hashtag_feed(q, hashtag)) if len(locations): #", "request. Args: media: The media of the user to be followed. Retruns: None", "await self.session.post( url, headers=headers, data=data ) logger.debug(f\"[POST] {r.url}\") else: raise ValueError(f\"Invalid HTTP method:", "flags=regex.MULTILINE) ) # Load comments list for photos with open(\"comments/pic_comments.txt\") as f: comments", "= utils.cookies_dict(self.session.cookie_jar) self.csrf_token = cookies[\"csrftoken\"] # Initialize the database await self._init_database() async def", "class Piggy: def __init__(self, loop): self.loop = loop async def http_request( self, method,", "count += 1 params[\"variables\"] = json.dumps( { \"id\": str(location_id), \"first\": 50, \"after\": str(end_cursor)", "in 30 seconds.\") await asyncio.sleep(30) return await self.http_request( method, url, headers=headers, params=params, data=data,", "self.settings['connection'][\"wait_time\"] -= 1 if response_type == \"text\": res = await r.text() logger.debug(res) return", "= await self.http_request( \"GET\", \"https://www.instagram.com/graphql/query/\", params=params, response_type=\"json\" ) has_next_page = res[\"data\"][\"user\"][\"edge_owner_to_timeline_media\"][\"page_info\"][\"has_next_page\"] end_cursor =", "logger.error(\"Couldn't log in.\") cookies = utils.cookies_dict(self.session.cookie_jar) self.csrf_token = cookies[\"csrftoken\"] # Initialize the database", "db.execute(\"SELECT * FROM users WHERE id=?\", (id,)) if c.rowcount: await db.execute( \"\"\" UPDATE", "headers=headers ) async with aiosqlite.connect(\"./piggy.db\") as db: await db.execute( \"UPDATE users SET following=false", "{likes}, 💬 {comments}\" ) try: caption = media[\"edge_media_to_caption\"][\"edges\"][0][\"node\"][\"text\"] except IndexError: pass else: if", "response_type == \"json\": res = await r.json() logger.debug(res) return res else: raise ValueError(f\"Invalid", "response_type=\"json\" ) username = res[\"graphql\"][\"shortcode_media\"][\"owner\"][\"username\"] logger.info( f\"{utils.translate_ig_media_type_to_custom(mediatype).capitalize()} by {username}\\n❤️ {likes}, 💬 {comments}\" )", "return True else: return False except TimeoutError: return False async def pic_already_saved(self, id):", ") else: logger.debug(f\"Status code: {r.status} {r.reason}\") if r.status == 200: # Successfull request:", "a visual representation of a media. Args: media: The media to be printed.", "few comments. Not liked!\") return if self.settings[\"like\"][\"rate\"] / 100 > random(): await self._like(media[\"id\"])", "to like. Retruns: None \"\"\" # Check if the media has already been", "_like(self, id): headers = { \"DNT\": \"1\", \"Host\": \"www.instagram.com\", \"User-Agent\": self.settings[\"connection\"][\"user_agent\"], \"X-CSRFToken\": self.csrf_token", "login page and find the csrf token res = await self.http_request( \"GET\", \"https://www.instagram.com/accounts/login/\"", "await self.http_request( \"POST\", f\"https://www.instagram.com/web/comments/{id}/add/\", headers=headers, data=payload ) async with aiosqlite.connect(\"./piggy.db\") as db: await", "\"GraphImage\" pass likes = media[\"edge_liked_by\"][\"count\"] comments = media[\"edge_media_to_comment\"][\"count\"] shortcode = media[\"shortcode\"] res =", "1 if response_type == \"text\": res = await r.text() logger.debug(res) return res elif", "return async def _unfollow(self, id): headers = { \"DNT\": \"1\", \"Host\": \"www.instagram.com\", \"User-Agent\":", "Piggy: def __init__(self, loop): self.loop = loop async def http_request( self, method, url,", "await self.http_request( \"GET\", f\"https://www.instagram.com/{username}/\", params=\"__a:1\" ) return json.loads( regex.findall( r\"<script[^>]*>window._sharedData = (.*?)</script>\", regex.findall(", "\"\"\" ) logger.info(\"Updating followers and following lists.\") await db.execute(\"UPDATE users SET follower=0, following=1\")", "comments list for photos with open(\"comments/pic_comments.txt\") as f: comments = f.readlines() self.pic_comments_list =", "await db.execute( \"INSERT INTO comments VALUES(?,?,?)\", (id, int(time.time()), comment) ) await db.commit() logger.info(\"Comment", "in res[\"data\"][\"user\"][\"edge_web_discover_media\"][\"edges\"]: await q.put(media[\"node\"]) async def _hashtag_feed(self, q, hashtag): count = 0 params", "== \"POST\": r = await self.session.post( url, headers=headers, data=data ) logger.debug(f\"[POST] {r.url}\") else:", "async def unfollow(self, id): return async def _unfollow(self, id): headers = { \"DNT\":", "open(\"comments/video_comments.txt\") as f: comments = f.readlines() self.video_comments_list = [x.strip() for x in comments]", "comments >= self.settings[\"comment\"][\"num_of_comments\"][\"max\"]: return if self.settings[\"comment\"][\"rate\"] / 100 <= random(): if mediatype ==", "= self.video_comments_list[ randint(0, len(self.video_comments_list)-1) ] await self._comment(media[\"id\"], comment) else: logger.info(\"Not commented!\") async def", "= 0 params = { \"query_hash\": \"1b84447a4d8b6d6d0426fefb34514485\", \"variables\": json.dumps({\"id\": str(location_id), \"first\": 50}) }", "db.execute( \"UPDATE users SET follower=0 WHERE username=?\", (username,) ) for username in await", "async def _hashtag_feed(self, q, hashtag): count = 0 params = { \"query_hash\": \"1780c1b186e2c37de9f7da95ce41bb67\",", "log in self.csrf_token = await self._getCsrfTokenFromForm() async def _getCsrfTokenFromForm(self): # Get login page", "params = { \"query_hash\": \"1780c1b186e2c37de9f7da95ce41bb67\", \"variables\": json.dumps({\"tag_name\": hashtag, \"first\": count}) } has_next_page =", "\"X-CSRFToken\": self.csrf_token } await self.http_request( \"POST\", f\"https://www.instagram.com/web/likes/{id}/unlike/\", headers=headers ) async with aiosqlite.connect(\"./piggy.db\") as", "logging.FileHandler(\"./piggy.log\") ch.setLevel(logging.INFO) fh.setLevel(logging.DEBUG) formatter = logging.Formatter(\"%(message)s\") ch.setFormatter(formatter) formatter = logging.Formatter( \"[%(asctime)s] %(levelname)s %(funcName)s:", "r.text()) raise ValueError(f\"Response error: {r.status}\") async def setup(self, settings_path=\"settings.json\"): logger.info(\"Loading settings...\") # Load", "r: if r.status == 200: f = await aiofiles.open( f\"./images/{id}.{format}\", mode=\"wb\" ) await", "asyncio.ensure_future(self._location_feed(q, location)) # Keep on yielding media while more is loaded while 1:", "INTEGER, ts INTEGER, comment TEXT ) \"\"\" ) logger.info(\"Updating followers and following lists.\")", "end_cursor} ) for user in res[\"data\"][\"user\"][\"edge_followed_by\"][\"edges\"]: followers.append(user[\"node\"][\"username\"]) return followers async def following(self, username=None):", "on yielding media while more is loaded while 1: while not q.empty(): yield", ") if res[\"authenticated\"]: logger.info(\"Logged in!\") self.id = res[\"userId\"] elif res[\"message\"] == \"checkpoint_required\": logger.info(\"Checkpoint", ">= self.settings[\"like\"][\"num_of_likes\"][\"max\"]: logger.info(\"Too many or too few likes. Not liked!\") return comments =", "res[\"data\"][\"hashtag\"][\"edge_hashtag_to_media\"][\"page_info\"][\"end_cursor\"] count += 1 params[\"variables\"] = json.dumps( {\"tag_name\": hashtag, \"first\": count, \"after\": end_cursor}", "as f: comments = f.readlines() self.video_comments_list = [x.strip() for x in comments] #", "row = await db.execute( \"SELECT * FROM pics WHERE id=?\", (id,) ) if", "comment. Retruns: None \"\"\" if media[\"comments_disabled\"]: logger.info(\"Comments disabled.\") return if self.settings[\"comment\"][\"only_once\"]: async with", "eventually send a follow request. Args: media: The media of the user to", "db: await db.execute(\"INSERT INTO likes WHERE id=?\", (id,)) await db.commit() logger.info(\"Unliked!\") async def", "\"www.instagram.com\", \"User-Agent\": self.settings[\"connection\"][\"user_agent\"], \"X-CSRFToken\": self.csrf_token } await self.http_request( \"POST\", f\"https://www.instagram.com/web/likes/{id}/like/\", headers=headers ) async", "[] if username is None: id = self.id else: user = await self.get_user_by_username(username)", "media[\"edge_media_to_caption\"][\"edges\"][0][\"node\"][\"text\"] except IndexError: pass else: if len(caption) > 100: logger.info(f\"{caption:.100}...\") else: logger.info(f\"{caption}\") async", "table_name in [\"users\", \"likes\", \"comments\"]: if self.settings[\"backup\"][table_name]: async with aiosqlite.connect(\"./piggy.db\") as db: rows", "url, headers=headers, params=params, data=data, response_type=response_type ) else: logger.debug(f\"Status code: {r.status} {r.reason}\") if r.status", "params = { \"query_hash\": \"37479f2b8209594dde7facb0d904896a\", \"variables\": json.dumps({\"id\": str(id), \"first\": 50}) } has_next_page =", "TABLE IF NOT EXISTS pics ( id INT, height INT, width INT, url", "None \"\"\" logger.info(\"#--------\"*3+\"#\") try: mediatype = media[\"__typename\"] except KeyError: is_video = media[\"is_video\"] if", "likes ( id INTEGER, ts INTEGER ) \"\"\" ) logger.debug(\"Checking table: comments\") await", "\"GET\", \"https://www.instagram.com/graphql/query/\", params=params, response_type=\"json\" ) has_next_page = res[\"data\"][\"user\"][\"edge_web_discover_media\"][\"page_info\"][\"has_next_page\"] end_cursor = res[\"data\"][\"user\"][\"edge_web_discover_media\"][\"page_info\"][\"end_cursor\"] params[\"variables\"] =", "0 params = { \"query_hash\": \"1780c1b186e2c37de9f7da95ce41bb67\", \"variables\": json.dumps({\"tag_name\": hashtag, \"first\": count}) } has_next_page", "likes = media[\"edge_liked_by\"][\"count\"] comments = media[\"edge_media_to_comment\"][\"count\"] shortcode = media[\"shortcode\"] res = await self.http_request(", "be added to the feed. Retruns: Yields a media from the generated feed.", "await db.execute(\"UPDATE users SET follower=0, following=1\") for username in await self.followers(): await db.execute(", "regex from aiohttp.client_exceptions import ClientConnectorError from piggy import utils # Logging logger =", "id=?\", (id,)) await db.commit() logger.info(\"Unliked!\") async def comment(self, media): \"\"\" Check if the", "increase retry time self.settings['connection'][\"wait_time\"] += 1 logger.warning( f\"\"\"Too many requests! Retrying in {self.settings['connection']['wait_time']}", "backup(self): while 1: logger.info(\"Backing up database...\") for table_name in [\"users\", \"likes\", \"comments\"]: if", "SET follower=0 WHERE username=?\", (username,) ) for username in await self.following(): await db.execute(", "{method}\") except ClientConnectorError: logger.error(\"Could not reach the server. Retrying in 30 seconds.\") await", "the queue asyncio.ensure_future(self._explore_feed(q)) if len(users): # Add all the media from the given", "prerequisites and eventually send a follow request. Args: media: The media of the", "{ \"query_hash\": \"ecd67af449fb6edab7c69a205413bfa7\", \"variables\": json.dumps({\"first\": 24}) } has_next_page = True while has_next_page: res", "hashtag, \"first\": count, \"after\": end_cursor} ) for media in res[\"data\"][\"hashtag\"][\"edge_hashtag_to_media\"][\"edges\"]: await q.put(media[\"node\"]) async", "self.settings[\"comment\"][\"num_of_comments\"][\"min\"] or comments >= self.settings[\"comment\"][\"num_of_comments\"][\"max\"]: return if self.settings[\"comment\"][\"rate\"] / 100 <= random(): if", "{ \"query_hash\": \"1780c1b186e2c37de9f7da95ce41bb67\", \"variables\": json.dumps({\"tag_name\": hashtag, \"first\": count}) } has_next_page = True while", "FROM comments WHERE id=?\", (media[\"id\"],) ) if await row.fetchone() is None: logger.info(\"Already commented.\")", "# Logging logger = logging.getLogger(__name__) logger.setLevel(logging.DEBUG) ch = logging.StreamHandler() fh = logging.FileHandler(\"./piggy.log\") ch.setLevel(logging.INFO)", "for user in res[\"data\"][\"user\"][\"edge_follow\"][\"edges\"]: following.append(user[\"node\"][\"username\"]) return following async def feed(self, explore=True, users=[], hashtags=[],", "= asyncio.Queue() if explore: # Add the \"explore\" feed to the queue asyncio.ensure_future(self._explore_feed(q))", "* FROM likes WHERE id=?\", (media[\"id\"],) ) if await row.fetchone(): logger.info(\"Already liked!\") return", "await self.http_request( \"GET\", \"https://www.instagram.com/graphql/query/\", params=params, response_type=\"json\" ) has_next_page = res[\"data\"][\"location\"][\"edge_location_to_media\"][\"page_info\"][\"has_next_page\"] end_cursor = res[\"data\"][\"location\"][\"edge_location_to_media\"][\"page_info\"][\"end_cursor\"]", "database.\") async with aiosqlite.connect(\"./piggy.db\") as db: row = await db.execute( \"SELECT * FROM", "await self.http_request( \"POST\", \"https://www.instagram.com/accounts/login/ajax/\", headers=headers, data=payload, response_type=\"json\" ) if res[\"authenticated\"]: logger.info(\"Logged in!\") self.id", "r.json() logger.debug(res) return res else: raise ValueError(f\"Invalid response type: {response_type}\") elif r.status ==", "logger.info(\"Checking database...\") # Connect to the local database and look for the table", "res[\"data\"][\"user\"][\"edge_web_discover_media\"][\"edges\"]: await q.put(media[\"node\"]) async def _user_feed(self, q, user): user = await self.get_user_by_usernameUsername(user) id", "await self.http_request( \"POST\", f\"https://www.instagram.com/web/likes/{id}/like/\", headers=headers ) async with aiosqlite.connect(\"./piggy.db\") as db: await db.execute(", "the feed. Retruns: Yields a media from the generated feed. \"\"\" # Initialize", "\"https://www.instagram.com/graphql/query/\", params=params, response_type=\"json\" ) has_next_page = res[\"data\"][\"user\"][\"edge_follow\"][\"page_info\"][\"has_next_page\"] end_cursor = res[\"data\"][\"user\"][\"edge_follow\"][\"page_info\"][\"end_cursor\"] params[\"variables\"] = json.dumps(", "else: if len(caption) > 100: logger.info(f\"{caption:.100}...\") else: logger.info(f\"{caption}\") async def like(self, media): \"\"\"", "rows) else: logger.warning( f\"\"\"Unsupported file format: {self.settings['backup']['format']}.\"\"\" ) await asyncio.sleep( utils.interval_in_seconds(self.settings[\"backup\"][\"every\"]) ) async", "async def http_request( self, method, url, headers=None, params=None, data=None, response_type=\"text\" ): await asyncio.sleep(self.settings['connection'][\"wait_time\"])", "formatter = logging.Formatter( \"[%(asctime)s] %(levelname)s %(funcName)s: %(message)s\" ) fh.setFormatter(formatter) logger.addHandler(ch) logger.addHandler(fh) class Piggy:", "format): logger.info(f\"Downloading {id}\") async with aiohttp.ClientSession() as session: try: async with session.get(url) as", "None \"\"\" if self.settings[\"follow\"][\"rate\"] / 100 > random(): await self._follow(media[\"owner\"][\"id\"]) else: logger.info(\"Not followed!\")", "%(message)s\" ) fh.setFormatter(formatter) logger.addHandler(ch) logger.addHandler(fh) class Piggy: def __init__(self, loop): self.loop = loop", "self.settings[\"like\"][\"num_of_comments\"][\"min\"] or comments >= self.settings[\"like\"][\"num_of_comments\"][\"max\"]: logger.info(\"Too many or too few comments. Not liked!\")", ") async with aiosqlite.connect(\"./piggy.db\") as db: c = await db.execute(\"SELECT * FROM users", "method, url, headers=None, params=None, data=None, response_type=\"text\" ): await asyncio.sleep(self.settings['connection'][\"wait_time\"]) try: if method ==", "aiosqlite.connect(\"./piggy.db\") as db: rows = await db.execute( f\"SELECT * FROM '{table_name}'\" ) header", "has_next_page = res[\"data\"][\"location\"][\"edge_location_to_media\"][\"page_info\"][\"has_next_page\"] end_cursor = res[\"data\"][\"location\"][\"edge_location_to_media\"][\"page_info\"][\"end_cursor\"] count += 1 params[\"variables\"] = json.dumps( {", "FROM pics WHERE id=?\", (id,) ) if await row.fetchone() is None: return False", "asynchronous queue where the feed elements will be # temporarely stored q =", "\"INSERT INTO comments VALUES(?,?,?)\", (id, int(time.time()), comment) ) await db.commit() logger.info(\"Comment posted!\") async", "comment) else: logger.info(\"Not commented!\") async def _comment(self, id, comment, reply_to_id=None): headers = {", "async with aiosqlite.connect(\"./piggy.db\") as db: row = await db.execute( \"SELECT * FROM comments", "\"json\": await utils.to_json(table_name, header, rows) else: logger.warning( f\"\"\"Unsupported file format: {self.settings['backup']['format']}.\"\"\" ) await", "logger.error(f\"Response headers: {r.headers}\") logger.error(await r.text()) raise ValueError(f\"Response error: {r.status}\") async def setup(self, settings_path=\"settings.json\"):", "# Get login page and find the csrf token res = await self.http_request(", "def unfollow(self, id): return async def _unfollow(self, id): headers = { \"DNT\": \"1\",", "await self.get_user_by_username(username) id = user[\"graphql\"][\"user\"][\"id\"] params = { \"query_hash\": \"37479f2b8209594dde7facb0d904896a\", \"variables\": json.dumps({\"id\": str(id),", "= { \"DNT\": \"1\", \"Host\": \"www.instagram.com\", \"User-Agent\": self.settings[\"connection\"][\"user_agent\"], \"X-CSRFToken\": self.csrf_token } payload =", "mediatype in utils.translate_custom_media_type_to_ig(self.settings[\"comment\"][\"media_type\"]): return likes = media[\"edge_liked_by\"][\"count\"] if likes < self.settings[\"comment\"][\"num_of_likes\"][\"min\"] or likes", "await self.get_user_by_usernameUsername(user) id = user[\"id\"] params = { \"query_hash\": \"a5164aed103f24b03e7b7747a2d94e3c\", \"variables\": json.dumps({\"id\": id,", ") has_next_page = res[\"data\"][\"location\"][\"edge_location_to_media\"][\"page_info\"][\"has_next_page\"] end_cursor = res[\"data\"][\"location\"][\"edge_location_to_media\"][\"page_info\"][\"end_cursor\"] count += 1 params[\"variables\"] = json.dumps(", "\"1\", \"Host\": \"www.instagram.com\", \"User-Agent\": self.settings[\"connection\"][\"user_agent\"], \"X-CSRFToken\": self.csrf_token } await self.http_request( \"POST\", f\"https://www.instagram.com/web/likes/{id}/unlike/\", headers=headers", "(id,)) await db.commit() logger.info(\"Unliked!\") async def comment(self, media): \"\"\" Check if the media", "params=params, response_type=\"json\" ) has_next_page = res[\"data\"][\"hashtag\"][\"edge_hashtag_to_media\"][\"page_info\"][\"has_next_page\"] end_cursor = res[\"data\"][\"hashtag\"][\"edge_hashtag_to_media\"][\"page_info\"][\"end_cursor\"] count += 1 params[\"variables\"]", "headers: {r.headers}\") logger.error(await r.text()) raise ValueError(f\"Response error: {r.status}\") async def setup(self, settings_path=\"settings.json\"): logger.info(\"Loading", "queue where the feed elements will be # temporarely stored q = asyncio.Queue()", "= await db.execute( \"SELECT * FROM pics WHERE id=?\", (id,) ) if await", "based on the passed parameters. Multiple parameters can be passed at the same", "Add all the media from the given users to the queue for user", "self.get_user_by_username(username) id = user[\"graphql\"][\"user\"][\"id\"] params = { \"query_hash\": \"58712303d941c6855d4e888c5f0cd22f\", \"variables\": json.dumps({\"id\": str(id), \"first\":", "count += 1 params[\"variables\"] = json.dumps( {\"tag_name\": hashtag, \"first\": count, \"after\": end_cursor} )", "Unsuccessfull request: increase retry time self.settings['connection'][\"wait_time\"] += 1 logger.warning( f\"\"\"Too many requests! Retrying", "SET following=false WHERE id=?\", (id,) ) await db.commit() async def backup(self): while 1:", "async def pic_already_saved(self, id): logger.debug(\"Checking database.\") async with aiosqlite.connect(\"./piggy.db\") as db: row =", "f.read(), flags=regex.MULTILINE) ) # Load comments list for photos with open(\"comments/pic_comments.txt\") as f:", "_comment(self, id, comment, reply_to_id=None): headers = { \"DNT\": \"1\", \"Host\": \"www.instagram.com\", \"User-Agent\": self.settings[\"connection\"][\"user_agent\"],", "\"UPDATE users SET following=1 WHERE username=?\", (username,) ) await db.commit() async def followers(self,", "Initialize asynchronous queue where the feed elements will be # temporarely stored q", "comments = media[\"edge_media_to_comment\"][\"count\"] if comments < self.settings[\"like\"][\"num_of_comments\"][\"min\"] or comments >= self.settings[\"like\"][\"num_of_comments\"][\"max\"]: logger.info(\"Too many", "_follow(self, id): headers = { \"DNT\": \"1\", \"Host\": \"www.instagram.com\", \"User-Agent\": self.settings[\"connection\"][\"user_agent\"], \"X-CSRFToken\": self.csrf_token", "return False except TimeoutError: return False async def pic_already_saved(self, id): logger.debug(\"Checking database.\") async", "response_type=response_type ) else: logger.error(f\"Response status: {r.status}\") logger.error(f\"Response headers: {r.headers}\") logger.error(await r.text()) raise ValueError(f\"Response", "( id TEXT, username TEXT, ts_follower INTEGER, ts_following INTEGER, follower BOOL, following BOOL", "self.http_request( \"GET\", \"https://www.instagram.com/accounts/login/\" ) return regex.findall( r\"\\\"csrf_token\\\":\\\"(.*?)\\\"\", res, flags=regex.MULTILINE )[0] async def login(self):", "photos with open(\"comments/pic_comments.txt\") as f: comments = f.readlines() self.pic_comments_list = [x.strip() for x", "loaded while 1: while not q.empty(): yield await q.get() await asyncio.sleep(1e-12) async def", ") username = res[\"graphql\"][\"shortcode_media\"][\"owner\"][\"username\"] logger.info( f\"{utils.translate_ig_media_type_to_custom(mediatype).capitalize()} by {username}\\n❤️ {likes}, 💬 {comments}\" ) try:", "db.commit() logger.info(\"Comment posted!\") async def follow(self, media): \"\"\" Check if the media satisfy", "json.dumps( {\"first\": 50, \"after\": end_cursor} ) for media in res[\"data\"][\"user\"][\"edge_web_discover_media\"][\"edges\"]: await q.put(media[\"node\"]) async", "\"https://www.instagram.com/graphql/query/\", params=params, response_type=\"json\" ) has_next_page = res[\"data\"][\"user\"][\"edge_followed_by\"][\"page_info\"][\"has_next_page\"] end_cursor = res[\"data\"][\"user\"][\"edge_followed_by\"][\"page_info\"][\"end_cursor\"] params[\"variables\"] = json.dumps(", "user)) if len(hashtags): # Add all the media from the given hashtags to", "media satisfy the prerequisites and eventually it will send a like. Args: media:", "pic_already_saved(self, id): logger.debug(\"Checking database.\") async with aiosqlite.connect(\"./piggy.db\") as db: row = await db.execute(", "if media[\"__typename\"] != \"GraphImage\" or await self.pic_already_saved(id): return height = media[\"dimensions\"][\"height\"] width =", "await self._init_database() async def _init_database(self): logger.info(\"Checking database...\") # Connect to the local database", "and eventually send a follow request. Args: media: The media of the user", "rows.fetchall() if self.settings[\"backup\"][\"format\"] == \"csv\": await utils.to_csv(table_name, header, rows) elif self.settings[\"backup\"][\"format\"] == \"json\":", "data=data, response_type=response_type ) else: logger.debug(f\"Status code: {r.status} {r.reason}\") if r.status == 200: #", "\"DNT\": \"1\", \"Host\": \"www.instagram.com\", \"User-Agent\": self.settings[\"connection\"][\"user_agent\"], \"X-CSRFToken\": self.csrf_token } await self.http_request( \"POST\", f\"https://www.instagram.com/web/likes/{id}/like/\",", "json.dumps( { \"id\": str(location_id), \"first\": 50, \"after\": str(end_cursor) } ) for media in", "if mediatype == \"GraphImage\" or mediatype == \"GraphSidecar\": comment = self.pic_comments_list[ randint(0, len(self.pic_comments_list)-1)", "res[\"data\"][\"user\"][\"edge_web_discover_media\"][\"page_info\"][\"end_cursor\"] params[\"variables\"] = json.dumps( {\"first\": 50, \"after\": end_cursor} ) for media in res[\"data\"][\"user\"][\"edge_web_discover_media\"][\"edges\"]:", "the database await self._init_database() async def _init_database(self): logger.info(\"Checking database...\") # Connect to the", "logger.info(\"Already commented.\") return try: mediatype = media[\"__typename\"] except KeyError: is_video = media[\"is_video\"] if", ") self.session = aiohttp.ClientSession(headers=headers, timeout=timeout) logger.info(\"Session initialized.\") # Get the csrf token. It", "id, \"first\": 50, \"after\": end_cursor} ) for media in res[\"data\"][\"user\"][\"edge_web_discover_media\"][\"edges\"]: await q.put(media[\"node\"]) async", "json.dumps({\"tag_name\": hashtag, \"first\": count}) } has_next_page = True while has_next_page: res = await", "\"POST\", f\"https://www.instagram.com/web/likes/{id}/like/\", headers=headers ) async with aiosqlite.connect(\"./piggy.db\") as db: await db.execute( \"INSERT INTO", "# Successfull request: decrease retry time if self.settings['connection'][\"wait_time\"] > 0: self.settings['connection'][\"wait_time\"] -= 1", "def _getCsrfTokenFromForm(self): # Get login page and find the csrf token res =", "r.status == 429: # Unsuccessfull request: increase retry time self.settings['connection'][\"wait_time\"] += 1 logger.warning(", "media[\"is_video\"] if is_video: mediatype = \"GraphVideo\" else: mediatype = \"GraphImage\" pass else: if", "response_type=response_type ) else: logger.debug(f\"Status code: {r.status} {r.reason}\") if r.status == 200: # Successfull", "aiohttp.ClientSession(headers=headers, timeout=timeout) logger.info(\"Session initialized.\") # Get the csrf token. It is needed to", "hashtags: asyncio.ensure_future(self._hashtag_feed(q, hashtag)) if len(locations): # Add all the media from the given", "self.http_request( \"POST\", f\"https://www.instagram.com/web/comments/{id}/add/\", headers=headers, data=payload ) async with aiosqlite.connect(\"./piggy.db\") as db: await db.execute(", "\"DNT\": \"1\", \"Host\": \"www.instagram.com\", \"User-Agent\": self.settings[\"connection\"][\"user_agent\"], \"X-CSRFToken\": self.csrf_token } await self.http_request( \"POST\", f\"https://www.instagram.com/web/friendships/{id}/follow/\",", "headers=headers ) async with aiosqlite.connect(\"./piggy.db\") as db: await db.execute( \"INSERT INTO likes VALUES(?,?)\",", "url = media[\"display_url\"] format = regex.findall(r\".([a-zA-Z]+)$\", url)[0] if media[\"__typename\"] != \"GraphImage\" or await", "db.commit() logger.info(\"Unliked!\") async def comment(self, media): \"\"\" Check if the media satisfy the", "width INT, url TEXT, tags TEXT ) \"\"\" ) logger.debug(\"Checking table: users\") await", "1: while not q.empty(): yield await q.get() await asyncio.sleep(1e-12) async def _explore_feed(self, q):", "media): id = media[\"id\"] url = media[\"display_url\"] format = regex.findall(r\".([a-zA-Z]+)$\", url)[0] if media[\"__typename\"]", "FROM likes WHERE id=?\", (media[\"id\"],) ) if await row.fetchone(): logger.info(\"Already liked!\") return try:", "feed elements will be # temporarely stored q = asyncio.Queue() if explore: #", "given hashtags to the queue for hashtag in hashtags: asyncio.ensure_future(self._hashtag_feed(q, hashtag)) if len(locations):", "mediatype == \"GraphSidecar\": comment = self.pic_comments_list[ randint(0, len(self.pic_comments_list)-1) ] else: comment = self.video_comments_list[", "data=payload, response_type=\"json\" ) if res[\"authenticated\"]: logger.info(\"Logged in!\") self.id = res[\"userId\"] elif res[\"message\"] ==", ") for media in res[\"data\"][\"user\"][\"edge_web_discover_media\"][\"edges\"]: await q.put(media[\"node\"]) async def _hashtag_feed(self, q, hashtag): count", "regex.findall( r\"<script[^>]*>window._sharedData = (.*?)</script>\", regex.findall( r\"<body[^>]*>(.*)</body>\", res, flags=regex.DOTALL )[0], flags=regex.DOTALL )[0][:-1])[\"entry_data\"][\"ProfilePage\"][0][\"graphql\"][\"user\"] # -----------------------------------------------------------------------------", "while 1: logger.info(\"Backing up database...\") for table_name in [\"users\", \"likes\", \"comments\"]: if self.settings[\"backup\"][table_name]:", "row.fetchone(): logger.info(\"Already liked!\") return try: mediatype = media[\"__typename\"] except KeyError: is_video = media[\"is_video\"]", "as db: await db.execute( \"INSERT INTO comments VALUES(?,?,?)\", (id, int(time.time()), comment) ) await", "{\"tag_name\": hashtag, \"first\": count, \"after\": end_cursor} ) for media in res[\"data\"][\"hashtag\"][\"edge_hashtag_to_media\"][\"edges\"]: await q.put(media[\"node\"])", "params=params, response_type=\"json\" ) has_next_page = res[\"data\"][\"location\"][\"edge_location_to_media\"][\"page_info\"][\"has_next_page\"] end_cursor = res[\"data\"][\"location\"][\"edge_location_to_media\"][\"page_info\"][\"end_cursor\"] count += 1 params[\"variables\"]", "= await self.http_request( \"GET\", f\"https://www.instagram.com/{username}/\", params=\"__a:1\" ) return json.loads( regex.findall( r\"<script[^>]*>window._sharedData = (.*?)</script>\",", "like. Retruns: None \"\"\" # Check if the media has already been liked", "self._init_database() async def _init_database(self): logger.info(\"Checking database...\") # Connect to the local database and", "ch = logging.StreamHandler() fh = logging.FileHandler(\"./piggy.log\") ch.setLevel(logging.INFO) fh.setLevel(logging.DEBUG) formatter = logging.Formatter(\"%(message)s\") ch.setFormatter(formatter) formatter", "will be added to the feed. Retruns: Yields a media from the generated", "likes\") await db.execute( \"\"\" CREATE TABLE IF NOT EXISTS likes ( id INTEGER,", "= { \"username\": self.settings[\"user\"][\"username\"], \"password\": self.settings[\"user\"][\"password\"] } headers = { \"User-Agent\": self.settings[\"connection\"][\"user_agent\"], \"X-CSRFToken\":", "self.settings = json.loads( regex.sub(r\"#.+$\", \"\", f.read(), flags=regex.MULTILINE) ) # Load comments list for", "users: [List of usernames] Their media will be pulled and added to the", "user[\"graphql\"][\"user\"][\"id\"] params = { \"query_hash\": \"58712303d941c6855d4e888c5f0cd22f\", \"variables\": json.dumps({\"id\": str(id), \"first\": 50}) } has_next_page", "_location_feed(self, q, location_id): count = 0 params = { \"query_hash\": \"1b84447a4d8b6d6d0426fefb34514485\", \"variables\": json.dumps({\"id\":", "0 params = { \"query_hash\": \"1b84447a4d8b6d6d0426fefb34514485\", \"variables\": json.dumps({\"id\": str(location_id), \"first\": 50}) } has_next_page", "self.settings[\"like\"][\"num_of_likes\"][\"min\"] or likes >= self.settings[\"like\"][\"num_of_likes\"][\"max\"]: logger.info(\"Too many or too few likes. Not liked!\")", "data=data ) logger.debug(f\"[POST] {r.url}\") else: raise ValueError(f\"Invalid HTTP method: {method}\") except ClientConnectorError: logger.error(\"Could", "look for the table names async with aiosqlite.connect(\"./piggy.db\") as db: logger.debug(\"Checking table: pics\")", "= \"GraphImage\" pass likes = media[\"edge_liked_by\"][\"count\"] comments = media[\"edge_media_to_comment\"][\"count\"] shortcode = media[\"shortcode\"] res", "= await r.text() logger.debug(res) return res elif response_type == \"json\": res = await", "q, user): user = await self.get_user_by_usernameUsername(user) id = user[\"id\"] params = { \"query_hash\":", "if the media satisfy the prerequisites and eventually it will send a comment.", "logger.info(f\"{caption:.100}...\") else: logger.info(f\"{caption}\") async def like(self, media): \"\"\" Check if the media satisfy", "f\"\"\"Too many requests! Retrying in {self.settings['connection']['wait_time']} seconds.\"\"\" ) return await self.http_request( method, url,", "send a comment. Args: media: The media to comment. Retruns: None \"\"\" if", "= res[\"data\"][\"user\"][\"edge_follow\"][\"page_info\"][\"end_cursor\"] params[\"variables\"] = json.dumps( {\"id\": str(id), \"first\": 50, \"after\": end_cursor} ) for", "aiosqlite.connect(\"./piggy.db\") as db: row = await db.execute( \"SELECT * FROM comments WHERE id=?\",", "usernames] Their media will be pulled and added to the feed. hashtags: [List", "or await self.pic_already_saved(id): return height = media[\"dimensions\"][\"height\"] width = media[\"dimensions\"][\"width\"] try: caption =", "+= 1 params[\"variables\"] = json.dumps( { \"id\": str(location_id), \"first\": 50, \"after\": str(end_cursor) }", "def _hashtag_feed(self, q, hashtag): count = 0 params = { \"query_hash\": \"1780c1b186e2c37de9f7da95ce41bb67\", \"variables\":", "+= 1 logger.warning( f\"\"\"Too many requests! Retrying in {self.settings['connection']['wait_time']} seconds.\"\"\" ) return await", "logger.info(\"Follow request sent!\") async def unfollow(self, id): return async def _unfollow(self, id): headers", "has_next_page: res = await self.http_request( \"GET\", \"https://www.instagram.com/graphql/query/\", params=params, response_type=\"json\" ) has_next_page = res[\"data\"][\"user\"][\"edge_follow\"][\"page_info\"][\"has_next_page\"]", "\"X-CSRFToken\": self.csrf_token } await self.http_request( \"POST\", f\"https://www.instagram.com/web/friendships/{id}/unfollow/\", headers=headers ) async with aiosqlite.connect(\"./piggy.db\") as", "\"query_hash\": \"1b84447a4d8b6d6d0426fefb34514485\", \"variables\": json.dumps({\"id\": str(location_id), \"first\": 50}) } has_next_page = True while has_next_page:", "headers=headers, data=data ) logger.debug(f\"[POST] {r.url}\") else: raise ValueError(f\"Invalid HTTP method: {method}\") except ClientConnectorError:", "ts INTEGER ) \"\"\" ) logger.debug(\"Checking table: comments\") await db.execute( \"\"\" CREATE TABLE", "id=?\", (media[\"id\"],) ) if await row.fetchone(): logger.info(\"Already liked!\") return try: mediatype = media[\"__typename\"]", "ValueError(f\"Response error: {r.status}\") async def setup(self, settings_path=\"settings.json\"): logger.info(\"Loading settings...\") # Load settings with", "db.execute( \"SELECT * FROM likes WHERE id=?\", (media[\"id\"],) ) if await row.fetchone(): logger.info(\"Already", "logger.info(\"#--------\"*3+\"#\") try: mediatype = media[\"__typename\"] except KeyError: is_video = media[\"is_video\"] if is_video: mediatype", "await asyncio.sleep(1e-12) async def _explore_feed(self, q): params = { \"query_hash\": \"ecd67af449fb6edab7c69a205413bfa7\", \"variables\": json.dumps({\"first\":", "await self.followers(): await db.execute( \"UPDATE users SET follower=0 WHERE username=?\", (username,) ) for", "user to be followed. Retruns: None \"\"\" if self.settings[\"follow\"][\"rate\"] / 100 > random():", "Check if the media satisfy the prerequisites and eventually it will send a", "format): logger.info(f\"Caption: {caption}\") tags = regex.findall(r\"#([\\p{L}0-9_]+)\", caption) logger.info(f\"Tags: {tags}\") else: return await self.save_to_database(id,", "\"1\", \"Host\": \"www.instagram.com\", \"User-Agent\": self.settings[\"connection\"][\"user_agent\"], \"X-CSRFToken\": self.csrf_token } await self.http_request( \"POST\", f\"https://www.instagram.com/web/friendships/{id}/follow/\", headers=headers", "user = await self.get_user_by_username(username) id = user[\"graphql\"][\"user\"][\"id\"] params = { \"query_hash\": \"37479f2b8209594dde7facb0d904896a\", \"variables\":", "True, id) ) else: await db.execute( \"INSERT INTO users VALUES(?,?,?,?,?)\", (id, None, int(time.time()),", "utils.to_csv(table_name, header, rows) elif self.settings[\"backup\"][\"format\"] == \"json\": await utils.to_json(table_name, header, rows) else: logger.warning(", "db: c = await db.execute(\"SELECT * FROM users WHERE id=?\", (id,)) if c.rowcount:", "response_type=\"json\" ) has_next_page = res[\"data\"][\"user\"][\"edge_owner_to_timeline_media\"][\"page_info\"][\"has_next_page\"] end_cursor = res[\"data\"][\"user\"][\"edge_owner_to_timeline_media\"][\"page_info\"][\"end_cursor\"] params[\"variables\"] = json.dumps( {\"id\": id,", "comment } await self.http_request( \"POST\", f\"https://www.instagram.com/web/comments/{id}/add/\", headers=headers, data=payload ) async with aiosqlite.connect(\"./piggy.db\") as", "await db.commit() async def followers(self, username=None): followers = [] if username is None:", "eventually it will send a comment. Args: media: The media to comment. Retruns:", "\"User-Agent\": self.settings[\"connection\"][\"user_agent\"], \"X-CSRFToken\": self.csrf_token } payload = { \"comment_text\": comment } await self.http_request(", "the feed. hashtags: [List of hastags] Media with those hashtags will be added", "else: mediatype = \"GraphImage\" pass else: if not mediatype in utils.translate_custom_media_type_to_ig(self.settings[\"like\"][\"media_type\"]): logger.info(\"Wrong media", "has_next_page: res = await self.http_request( \"GET\", \"https://www.instagram.com/graphql/query/\", params=params, response_type=\"json\" ) has_next_page = res[\"data\"][\"hashtag\"][\"edge_hashtag_to_media\"][\"page_info\"][\"has_next_page\"]", "= { \"DNT\": \"1\", \"Host\": \"www.instagram.com\", \"User-Agent\": self.settings[\"connection\"][\"user_agent\"], \"X-CSRFToken\": self.csrf_token } await self.http_request(", "= logging.FileHandler(\"./piggy.log\") ch.setLevel(logging.INFO) fh.setLevel(logging.DEBUG) formatter = logging.Formatter(\"%(message)s\") ch.setFormatter(formatter) formatter = logging.Formatter( \"[%(asctime)s] %(levelname)s", "== \"csv\": await utils.to_csv(table_name, header, rows) elif self.settings[\"backup\"][\"format\"] == \"json\": await utils.to_json(table_name, header,", "all the media from the given users to the queue for user in", "elements will be # temporarely stored q = asyncio.Queue() if explore: # Add", "Retrying in 30 seconds.\") await asyncio.sleep(30) return await self.http_request( method, url, headers=headers, params=params,", "end_cursor = res[\"data\"][\"user\"][\"edge_web_discover_media\"][\"page_info\"][\"end_cursor\"] params[\"variables\"] = json.dumps( {\"first\": 50, \"after\": end_cursor} ) for media", "return try: mediatype = media[\"__typename\"] except KeyError: is_video = media[\"is_video\"] if is_video: mediatype", "TEXT ) \"\"\" ) logger.info(\"Updating followers and following lists.\") await db.execute(\"UPDATE users SET", "db: row = await db.execute( \"SELECT * FROM likes WHERE id=?\", (media[\"id\"],) )", "= { \"query_hash\": \"58712303d941c6855d4e888c5f0cd22f\", \"variables\": json.dumps({\"id\": str(id), \"first\": 50}) } has_next_page = True", "response_type=\"json\" ) has_next_page = res[\"data\"][\"hashtag\"][\"edge_hashtag_to_media\"][\"page_info\"][\"has_next_page\"] end_cursor = res[\"data\"][\"hashtag\"][\"edge_hashtag_to_media\"][\"page_info\"][\"end_cursor\"] count += 1 params[\"variables\"] =", "await row.fetchone() is None: return False else: return True async def save_to_database(self, id,", "logger.info(\"Comment posted!\") async def follow(self, media): \"\"\" Check if the media satisfy the", "= { \"query_hash\": \"1b84447a4d8b6d6d0426fefb34514485\", \"variables\": json.dumps({\"id\": str(location_id), \"first\": 50}) } has_next_page = True", "if likes < self.settings[\"like\"][\"num_of_likes\"][\"min\"] or likes >= self.settings[\"like\"][\"num_of_likes\"][\"max\"]: logger.info(\"Too many or too few", "as db: await db.execute( \"UPDATE users SET following=false WHERE id=?\", (id,) ) await", "None: id = self.id else: user = await self.get_user_by_username(username) id = user[\"graphql\"][\"user\"][\"id\"] params", "id INTEGER, ts INTEGER ) \"\"\" ) logger.debug(\"Checking table: comments\") await db.execute( \"\"\"", ") logger.debug(\"Checking table: users\") await db.execute( \"\"\" CREATE TABLE IF NOT EXISTS users", ") return json.loads( regex.findall( r\"<script[^>]*>window._sharedData = (.*?)</script>\", regex.findall( r\"<body[^>]*>(.*)</body>\", res, flags=regex.DOTALL )[0], flags=regex.DOTALL", "random import random, randint import asyncio import aiohttp import aiosqlite import aiofiles import", "mediatype = media[\"__typename\"] except KeyError: is_video = media[\"is_video\"] if is_video: mediatype = \"GraphVideo\"", "{ \"query_hash\": \"58712303d941c6855d4e888c5f0cd22f\", \"variables\": json.dumps({\"id\": str(id), \"first\": 50}) } has_next_page = True while", "} await self.http_request( \"POST\", f\"https://www.instagram.com/web/likes/{id}/unlike/\", headers=headers ) async with aiosqlite.connect(\"./piggy.db\") as db: await", "await self.download_pic(url, id, format): logger.info(f\"Caption: {caption}\") tags = regex.findall(r\"#([\\p{L}0-9_]+)\", caption) logger.info(f\"Tags: {tags}\") else:", "WHERE id=?\", (id,) ) await db.commit() async def backup(self): while 1: logger.info(\"Backing up", "for media in res[\"data\"][\"hashtag\"][\"edge_hashtag_to_media\"][\"edges\"]: await q.put(media[\"node\"]) async def _location_feed(self, q, location_id): count =", "429: # Unsuccessfull request: increase retry time self.settings['connection'][\"wait_time\"] += 1 logger.warning( f\"\"\"Too many", "= await self.http_request( \"GET\", f\"https://www.instagram.com/p/{shortcode}/\", params=\"__a=1\", response_type=\"json\" ) username = res[\"graphql\"][\"shortcode_media\"][\"owner\"][\"username\"] logger.info( f\"{utils.translate_ig_media_type_to_custom(mediatype).capitalize()}", "comment, reply_to_id=None): headers = { \"DNT\": \"1\", \"Host\": \"www.instagram.com\", \"User-Agent\": self.settings[\"connection\"][\"user_agent\"], \"X-CSRFToken\": self.csrf_token", "res[\"data\"][\"location\"][\"edge_location_to_media\"][\"page_info\"][\"has_next_page\"] end_cursor = res[\"data\"][\"location\"][\"edge_location_to_media\"][\"page_info\"][\"end_cursor\"] count += 1 params[\"variables\"] = json.dumps( { \"id\": str(location_id),", "hashtag)) if len(locations): # Add all the media from the given locations to", "asynchronous http session headers = { \"DNT\": \"1\", \"Host\": \"www.instagram.com\", \"Upgrade-Insecure-Requests\": \"1\", \"User-Agent\":", "Retruns: None \"\"\" # Check if the media has already been liked async", "followers = [] if username is None: id = self.id else: user =", "\"\"\" CREATE TABLE IF NOT EXISTS likes ( id INTEGER, ts INTEGER )", "username=None): following = [] if username is None: id = self.id else: user", ") has_next_page = res[\"data\"][\"hashtag\"][\"edge_hashtag_to_media\"][\"page_info\"][\"has_next_page\"] end_cursor = res[\"data\"][\"hashtag\"][\"edge_hashtag_to_media\"][\"page_info\"][\"end_cursor\"] count += 1 params[\"variables\"] = json.dumps(", "method: {method}\") except ClientConnectorError: logger.error(\"Could not reach the server. Retrying in 30 seconds.\")", "await utils.to_csv(table_name, header, rows) elif self.settings[\"backup\"][\"format\"] == \"json\": await utils.to_json(table_name, header, rows) else:", "as db: logger.debug(\"Checking table: pics\") await db.execute( \"\"\" CREATE TABLE IF NOT EXISTS", "res[\"data\"][\"user\"][\"edge_followed_by\"][\"page_info\"][\"end_cursor\"] params[\"variables\"] = json.dumps( {\"id\": str(id), \"first\": 50, \"after\": end_cursor} ) for user", "\"first\": 50, \"after\": end_cursor} ) for user in res[\"data\"][\"user\"][\"edge_follow\"][\"edges\"]: following.append(user[\"node\"][\"username\"]) return following async", "be passed at the same time. Args: explore: [Bool] If True the explore", "with aiosqlite.connect(\"./piggy.db\") as db: row = await db.execute( \"SELECT * FROM comments WHERE", "representation of a media. Args: media: The media to be printed. Returns: None", "else: logger.info(\"Not liked!\") async def _like(self, id): headers = { \"DNT\": \"1\", \"Host\":", "and eventually it will send a comment. Args: media: The media to comment.", "await db.execute( \"SELECT * FROM comments WHERE id=?\", (media[\"id\"],) ) if await row.fetchone()", "return regex.findall( r\"\\\"csrf_token\\\":\\\"(.*?)\\\"\", res, flags=regex.MULTILINE )[0] async def login(self): payload = { \"username\":", "header, rows) elif self.settings[\"backup\"][\"format\"] == \"json\": await utils.to_json(table_name, header, rows) else: logger.warning( f\"\"\"Unsupported", "{ \"comment_text\": comment } await self.http_request( \"POST\", f\"https://www.instagram.com/web/comments/{id}/add/\", headers=headers, data=payload ) async with", "for hashtag in hashtags: asyncio.ensure_future(self._hashtag_feed(q, hashtag)) if len(locations): # Add all the media", "to the feed. Retruns: Yields a media from the generated feed. \"\"\" #", "f\"\"\"Unsupported file format: {self.settings['backup']['format']}.\"\"\" ) await asyncio.sleep( utils.interval_in_seconds(self.settings[\"backup\"][\"every\"]) ) async def close(self): logger.info(\"\\nClosing", "f\"https://www.instagram.com/web/comments/{id}/add/\", headers=headers, data=payload ) async with aiosqlite.connect(\"./piggy.db\") as db: await db.execute( \"INSERT INTO", "added to the feed. Retruns: Yields a media from the generated feed. \"\"\"", ") else: await db.execute( \"INSERT INTO users VALUES(?,?,?,?,?)\", (id, None, int(time.time()), False, True)", "json.dumps( {\"tag_name\": hashtag, \"first\": count, \"after\": end_cursor} ) for media in res[\"data\"][\"hashtag\"][\"edge_hashtag_to_media\"][\"edges\"]: await", "\"X-CSRFToken\": self.csrf_token } await self.http_request( \"POST\", f\"https://www.instagram.com/web/friendships/{id}/follow/\", headers=headers ) async with aiosqlite.connect(\"./piggy.db\") as", "utils.to_json(table_name, header, rows) else: logger.warning( f\"\"\"Unsupported file format: {self.settings['backup']['format']}.\"\"\" ) await asyncio.sleep( utils.interval_in_seconds(self.settings[\"backup\"][\"every\"])", "lists.\") await db.execute(\"UPDATE users SET follower=0, following=1\") for username in await self.followers(): await", "len(users): # Add all the media from the given users to the queue", "= await self.session.get( url, headers=headers, params=params ) logger.debug(f\"[GET] {r.url}\") elif method == \"POST\":", "status: {r.status}\") logger.error(f\"Response headers: {r.headers}\") logger.error(await r.text()) raise ValueError(f\"Response error: {r.status}\") async def", "= media[\"is_video\"] if is_video: mediatype = \"GraphVideo\" else: mediatype = \"GraphImage\" pass likes", "it will send a like. Args: media: The media to like. Retruns: None", "WHERE id=?\", (id,)) await db.commit() logger.info(\"Unliked!\") async def comment(self, media): \"\"\" Check if", "headers=headers ) async with aiosqlite.connect(\"./piggy.db\") as db: c = await db.execute(\"SELECT * FROM", "} await self.http_request( \"POST\", f\"https://www.instagram.com/web/friendships/{id}/unfollow/\", headers=headers ) async with aiosqlite.connect(\"./piggy.db\") as db: await", "(int(time.time()), True, id) ) else: await db.execute( \"INSERT INTO users VALUES(?,?,?,?,?)\", (id, None,", "NOT EXISTS comments ( id INTEGER, ts INTEGER, comment TEXT ) \"\"\" )", "\"1780c1b186e2c37de9f7da95ce41bb67\", \"variables\": json.dumps({\"tag_name\": hashtag, \"first\": count}) } has_next_page = True while has_next_page: res", "q.put(media[\"node\"]) async def _user_feed(self, q, user): user = await self.get_user_by_usernameUsername(user) id = user[\"id\"]", "\"X-CSRFToken\": self.csrf_token } payload = { \"comment_text\": comment } await self.http_request( \"POST\", f\"https://www.instagram.com/web/comments/{id}/add/\",", "following=false WHERE id=?\", (id,) ) await db.commit() async def backup(self): while 1: logger.info(\"Backing", "passed parameters. Multiple parameters can be passed at the same time. Args: explore:", "r.status == 200: f = await aiofiles.open( f\"./images/{id}.{format}\", mode=\"wb\" ) await f.write(await r.read())", "pulled and added to the feed. hashtags: [List of hastags] Media with those", "to the local database and look for the table names async with aiosqlite.connect(\"./piggy.db\")", "Check if the media satisfy the prerequisites and eventually send a follow request.", "Check if the media has already been liked async with aiosqlite.connect(\"./piggy.db\") as db:", "# Initialize asynchronous queue where the feed elements will be # temporarely stored", "as db: await db.execute( \"INSERT INTO likes VALUES(?,?)\", (id, int(time.time())) ) await db.commit()", "temporarely stored q = asyncio.Queue() if explore: # Add the \"explore\" feed to", "q.put(media[\"node\"]) async def _location_feed(self, q, location_id): count = 0 params = { \"query_hash\":", "json.dumps(tags) async with aiosqlite.connect(\"./piggy.db\") as db: await db.execute( \"INSERT INTO pics VALUES(?,?,?,?,?)\", (id,", "try: caption = media[\"edge_media_to_caption\"][\"edges\"][0][\"node\"][\"text\"] except IndexError: pass else: if len(caption) > 100: logger.info(f\"{caption:.100}...\")", "media in res[\"data\"][\"hashtag\"][\"edge_hashtag_to_media\"][\"edges\"]: await q.put(media[\"node\"]) async def _location_feed(self, q, location_id): count = 0", "user): user = await self.get_user_by_usernameUsername(user) id = user[\"id\"] params = { \"query_hash\": \"a5164aed103f24b03e7b7747a2d94e3c\",", "raise ValueError(f\"Invalid HTTP method: {method}\") except ClientConnectorError: logger.error(\"Could not reach the server. Retrying", "BOOL, following BOOL ) \"\"\" ) logger.debug(\"Checking table: likes\") await db.execute( \"\"\" CREATE", "await r.text() logger.debug(res) return res elif response_type == \"json\": res = await r.json()", "NOT EXISTS likes ( id INTEGER, ts INTEGER ) \"\"\" ) logger.debug(\"Checking table:", "database...\") # Connect to the local database and look for the table names", "the media from the given hashtags to the queue for hashtag in hashtags:", "media to be printed. Returns: None \"\"\" logger.info(\"#--------\"*3+\"#\") try: mediatype = media[\"__typename\"] except", ">= self.settings[\"comment\"][\"num_of_comments\"][\"max\"]: return if self.settings[\"comment\"][\"rate\"] / 100 <= random(): if mediatype == \"GraphImage\"", "random, randint import asyncio import aiohttp import aiosqlite import aiofiles import regex from", "[x.strip() for x in comments] # Load comments list for videos with open(\"comments/video_comments.txt\")", "== \"checkpoint_required\": logger.info(\"Checkpoint required.\") res = await self.http_request( \"POST\", f\"https://www.instagram.com{res['checkpoint_url']}\", headers=headers, data=payload )", "mediatype = \"GraphImage\" pass else: if not mediatype in utils.translate_custom_media_type_to_ig(self.settings[\"comment\"][\"media_type\"]): return likes =", ") # Load comments list for photos with open(\"comments/pic_comments.txt\") as f: comments =", "else: raise ValueError(f\"Invalid response type: {response_type}\") elif r.status == 429: # Unsuccessfull request:", "utils.interval_in_seconds(self.settings[\"backup\"][\"every\"]) ) async def close(self): logger.info(\"\\nClosing session...\") # Close the http session await", "\"first\": 24}) } has_next_page = True while has_next_page: res = await self.http_request( \"GET\",", "the queue for hashtag in hashtags: asyncio.ensure_future(self._hashtag_feed(q, hashtag)) if len(locations): # Add all", "\"\"\" ) logger.debug(\"Checking table: likes\") await db.execute( \"\"\" CREATE TABLE IF NOT EXISTS", "the queue for user in users: asyncio.ensure_future(self._user_feed(q, user)) if len(hashtags): # Add all", "Returns: None \"\"\" logger.info(\"#--------\"*3+\"#\") try: mediatype = media[\"__typename\"] except KeyError: is_video = media[\"is_video\"]", "except KeyError: is_video = media[\"is_video\"] if is_video: mediatype = \"GraphVideo\" else: mediatype =", "f = await aiofiles.open( f\"./images/{id}.{format}\", mode=\"wb\" ) await f.write(await r.read()) await f.close() return" ]
[ "random import tempfile import webbrowser import time import uuid import socket import shutil", "else j['load_name'], i.split('-_-')[0] if not j['name'] else j['name'].title() ) for i, j in", "nohtml_base.replace('{% avaliable_paths %}', '' if not self.templates else '<h4>Avaliable paths : {}</h4>'.format( ',", "template, meta in self.templates.items(): template = template.split('-_-')[0] to_copy_path = meta['load_name'] if meta['load_name'] else", "= ('127.0.0.1', self.port) port_open = a_socket.connect_ex(local_connection) a_socket.close() return not (not port_open) def __gen_yaplee_temp(self):", "tag_loc, tags = '', {} if 'tags' in meta['meta']: tag_loc, tags = meta['meta']['tags']()", "meta['meta']['tags']() for tag_meta, tag in tags.items(): tag_source = '' is_tag_has_source = False tag_name", "= template.split('-_-')[0] to_copy_path = meta['load_name'] if meta['load_name'] else template to_copy_path = to_copy_path.split(os.sep)[-1] template_to_copy", "'index.html'), 'w+') as file: file.write(nohtml_base) def start(self): self.generate_files() if self.opentab: webbrowser.open('http://127.0.0.1:{}/'.format(str(self.port))) time.sleep(1) yield", "in self.templates.items()]) ) ) with open(os.path.join(self.temp_path, 'index.html'), 'w+') as file: file.write(nohtml_base) def start(self):", "= self.__gen_yaplee_temp() def is_port_open(self): a_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) local_connection = ('127.0.0.1', self.port) port_open", "tag_source, os.path.join(self.temp_path, tag_source) ) if 'tagvalue' in tag.attrs: tagvalue = tag.get('tagvalue') del tag.attrs['tagvalue']", "is str: styles = [meta['meta']['style']] elif type(meta['meta']['style']) is list: styles = meta['meta']['style'] else:", "tag_source = '' is_tag_has_source = False tag_name = str(tag_meta.split('-_-')[0]) if tag_name in ['link']:", "meta['meta']: js_functions = {i.__name__:i for i in meta['meta']['functions']} elif 'style' in meta['meta']: if", "style=\"text-decoration: none;\" href=\"{}\" target=\"_blank\">{}</a>'.format( i.split('-_-')[0] if j['load_name'] == None else j['load_name'], i.split('-_-')[0] if", "else template to_copy_path = to_copy_path.split(os.sep)[-1] template_to_copy = os.path.join(self.temp_path, to_copy_path.replace('\\\\', os.sep)) shutil.copy( template, template_to_copy", "elif 'style' in meta['meta']: if type(meta['meta']['style']) is str: styles = [meta['meta']['style']] elif type(meta['meta']['style'])", "yaplee.errors import UnknownTemplateValue from yaplee.js.converter import JSFunc class Server: def __init__(self, meta) ->", "False tag_name = str(tag_meta.split('-_-')[0]) if tag_name in ['link']: if 'href' in str(tag): tag_source", "styles = meta['meta']['style'] else: raise UnknownTemplateValue( 'template style must be list or string", "tag_loc, tags = 'head', { str(random.randint(111111, 999999)):BeautifulSoup('', 'html.parser').new_tag( 'link', rel='stylesheet', href=style ) for", "meta) -> None: self.port = meta['config']['port'] self.templates = meta['templates'] self.tree = meta['tree'] self.opentab", "'<h4>Avaliable paths : {}</h4>'.format( ', '.join(['<a style=\"text-decoration: none;\" href=\"{}\" target=\"_blank\">{}</a>'.format( i.split('-_-')[0] if j['load_name']", "tag_meta, tag in tags.items(): tag_source = '' is_tag_has_source = False tag_name = str(tag_meta.split('-_-')[0])", "import tempfile import webbrowser import time import uuid import socket import shutil import", "('://' not in tag_source and tag_source): shutil.copy( tag_source, os.path.join(self.temp_path, tag_source) ) if 'tagvalue'", "uuid.uuid1().hex[:15] path = os.path.join(tempfile.gettempdir(), self.tempuuid) if not os.path.isdir(path): os.mkdir(path) return self.tempuuid, path def", "if type(meta['meta']['style']) is str: styles = [meta['meta']['style']] elif type(meta['meta']['style']) is list: styles =", "= str(tag_meta.split('-_-')[0]) if tag_name in ['link']: if 'href' in str(tag): tag_source = tag.get('href')", "__gen_yaplee_temp(self): self.tempuuid = uuid.uuid1().hex[:15] path = os.path.join(tempfile.gettempdir(), self.tempuuid) if not os.path.isdir(path): os.mkdir(path) return", "Server: def __init__(self, meta) -> None: self.port = meta['config']['port'] self.templates = meta['templates'] self.tree", "('127.0.0.1', self.port) port_open = a_socket.connect_ex(local_connection) a_socket.close() return not (not port_open) def __gen_yaplee_temp(self): self.tempuuid", "tag_source and tag_source): shutil.copy( tag_source, os.path.join(self.temp_path, tag_source) ) if 'tagvalue' in tag.attrs: tagvalue", "open(template_to_copy, 'r+') as file: template_data = file.read() soup = BeautifulSoup(template_data, 'html.parser') for tagname,", "= {i.__name__:i for i in meta['meta']['functions']} elif 'style' in meta['meta']: if type(meta['meta']['style']) is", "'function '+funcname+'(){ '+ str(JSFunc(function))+ ' }' ) file.truncate(0) file.write(soup.prettify()) del file generated_files.append(to_copy_path) if", ") tag_loc, tags = 'head', { str(random.randint(111111, 999999)):BeautifulSoup('', 'html.parser').new_tag( 'link', rel='stylesheet', href=style )", "tagname, tag in tags.items(): soup.find(tag_loc).append(tag) for funcname, function in js_functions.items(): unique_id = str(uuid.uuid1()).split('-')[0]", "'posix' else 'python')+' -m http.server '+str(self.port)+' --bind 127.0.0.1 --directory \"'+self.temp_path+'\"', shell=True ) def", "tag_name in ['link']: if 'href' in str(tag): tag_source = tag.get('href') is_tag_has_source = True", "(not port_open) def __gen_yaplee_temp(self): self.tempuuid = uuid.uuid1().hex[:15] path = os.path.join(tempfile.gettempdir(), self.tempuuid) if not", "time.sleep(1) yield self.temp_uuid, self.temp_path subprocess.run( ('python3' if os.name == 'posix' else 'python')+' -m", "= os.path.join(self.temp_path, to_copy_path.replace('\\\\', os.sep)) shutil.copy( template, template_to_copy ) tag_loc, tags = '', {}", "= str(uuid.uuid1()).split('-')[0] soup.html.append(soup.new_tag('script', id=unique_id)) soup.find('script', {'id': unique_id}).append( 'function '+funcname+'(){ '+ str(JSFunc(function))+ ' }'", "', '.join(['<a style=\"text-decoration: none;\" href=\"{}\" target=\"_blank\">{}</a>'.format( i.split('-_-')[0] if j['load_name'] == None else j['load_name'],", "in js_functions.items(): unique_id = str(uuid.uuid1()).split('-')[0] soup.html.append(soup.new_tag('script', id=unique_id)) soup.find('script', {'id': unique_id}).append( 'function '+funcname+'(){ '+", "tags = 'head', { str(random.randint(111111, 999999)):BeautifulSoup('', 'html.parser').new_tag( 'link', rel='stylesheet', href=style ) for style", "generate_files(self): generated_files = [] js_functions = {} for template, meta in self.templates.items(): template", "import shutil import subprocess import pathlib from bs4 import BeautifulSoup from yaplee.errors import", "j['load_name'], i.split('-_-')[0] if not j['name'] else j['name'].title() ) for i, j in self.templates.items()])", "else: try: if 'src' in str(tag): tag_source = tag.get('src') is_tag_has_source = True except:", "tag.attrs: tagvalue = tag.get('tagvalue') del tag.attrs['tagvalue'] tag.append(tagvalue) elif 'functions' in meta['meta']: js_functions =", "} for style in styles: shutil.copy( style, os.path.join(self.temp_path, style) ) with open(template_to_copy, 'r+')", "return self.tempuuid, path def generate_files(self): generated_files = [] js_functions = {} for template,", "os.path.join(tempfile.gettempdir(), self.tempuuid) if not os.path.isdir(path): os.mkdir(path) return self.tempuuid, path def generate_files(self): generated_files =", "generated_files.append(to_copy_path) if 'index.html' not in generated_files: with open(os.path.join(self.module_path, 'assets', 'no-index.html.py'), 'r+') as file:", "target=\"_blank\">{}</a>'.format( i.split('-_-')[0] if j['load_name'] == None else j['load_name'], i.split('-_-')[0] if not j['name'] else", "raise UnknownTemplateValue( 'template style must be list or string (one style)' ) tag_loc,", "in ['link']: if 'href' in str(tag): tag_source = tag.get('href') is_tag_has_source = True else:", "time import uuid import socket import shutil import subprocess import pathlib from bs4", "self.opentab: webbrowser.open('http://127.0.0.1:{}/'.format(str(self.port))) time.sleep(1) yield self.temp_uuid, self.temp_path subprocess.run( ('python3' if os.name == 'posix' else", "open(os.path.join(self.temp_path, 'index.html'), 'w+') as file: file.write(nohtml_base) def start(self): self.generate_files() if self.opentab: webbrowser.open('http://127.0.0.1:{}/'.format(str(self.port))) time.sleep(1)", "to_copy_path = meta['load_name'] if meta['load_name'] else template to_copy_path = to_copy_path.split(os.sep)[-1] template_to_copy = os.path.join(self.temp_path,", "file.read() file.close() del file nohtml_base = nohtml_base.replace('{% avaliable_paths %}', '' if not self.templates", "subprocess import pathlib from bs4 import BeautifulSoup from yaplee.errors import UnknownTemplateValue from yaplee.js.converter", "a_socket.connect_ex(local_connection) a_socket.close() return not (not port_open) def __gen_yaplee_temp(self): self.tempuuid = uuid.uuid1().hex[:15] path =", "i, j in self.templates.items()]) ) ) with open(os.path.join(self.temp_path, 'index.html'), 'w+') as file: file.write(nohtml_base)", "= tag.get('href') is_tag_has_source = True else: try: if 'src' in str(tag): tag_source =", "self.module_path = str(pathlib.Path(__file__).resolve().parent) self.temp_uuid, self.temp_path = self.__gen_yaplee_temp() def is_port_open(self): a_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)", "j['name'].title() ) for i, j in self.templates.items()]) ) ) with open(os.path.join(self.temp_path, 'index.html'), 'w+')", "'w+') as file: file.write(nohtml_base) def start(self): self.generate_files() if self.opentab: webbrowser.open('http://127.0.0.1:{}/'.format(str(self.port))) time.sleep(1) yield self.temp_uuid,", "generated_files = [] js_functions = {} for template, meta in self.templates.items(): template =", "'tagvalue' in tag.attrs: tagvalue = tag.get('tagvalue') del tag.attrs['tagvalue'] tag.append(tagvalue) elif 'functions' in meta['meta']:", "__init__(self, meta) -> None: self.port = meta['config']['port'] self.templates = meta['templates'] self.tree = meta['tree']", "webbrowser import time import uuid import socket import shutil import subprocess import pathlib", "str(tag): tag_source = tag.get('href') is_tag_has_source = True else: try: if 'src' in str(tag):", "def __gen_yaplee_temp(self): self.tempuuid = uuid.uuid1().hex[:15] path = os.path.join(tempfile.gettempdir(), self.tempuuid) if not os.path.isdir(path): os.mkdir(path)", "if 'tagvalue' in tag.attrs: tagvalue = tag.get('tagvalue') del tag.attrs['tagvalue'] tag.append(tagvalue) elif 'functions' in", "= meta['load_name'] if meta['load_name'] else template to_copy_path = to_copy_path.split(os.sep)[-1] template_to_copy = os.path.join(self.temp_path, to_copy_path.replace('\\\\',", "meta['tree'] self.opentab = meta['config']['opentab'] self.tempuuid = '' self.module_path = str(pathlib.Path(__file__).resolve().parent) self.temp_uuid, self.temp_path =", "none;\" href=\"{}\" target=\"_blank\">{}</a>'.format( i.split('-_-')[0] if j['load_name'] == None else j['load_name'], i.split('-_-')[0] if not", "= file.read() soup = BeautifulSoup(template_data, 'html.parser') for tagname, tag in tags.items(): soup.find(tag_loc).append(tag) for", "if not j['name'] else j['name'].title() ) for i, j in self.templates.items()]) ) )", "funcname, function in js_functions.items(): unique_id = str(uuid.uuid1()).split('-')[0] soup.html.append(soup.new_tag('script', id=unique_id)) soup.find('script', {'id': unique_id}).append( 'function", "import UnknownTemplateValue from yaplee.js.converter import JSFunc class Server: def __init__(self, meta) -> None:", "str(pathlib.Path(__file__).resolve().parent) self.temp_uuid, self.temp_path = self.__gen_yaplee_temp() def is_port_open(self): a_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) local_connection =", "= socket.socket(socket.AF_INET, socket.SOCK_STREAM) local_connection = ('127.0.0.1', self.port) port_open = a_socket.connect_ex(local_connection) a_socket.close() return not", "style must be list or string (one style)' ) tag_loc, tags = 'head',", "= os.path.join(tempfile.gettempdir(), self.tempuuid) if not os.path.isdir(path): os.mkdir(path) return self.tempuuid, path def generate_files(self): generated_files", "list: styles = meta['meta']['style'] else: raise UnknownTemplateValue( 'template style must be list or", "not (not port_open) def __gen_yaplee_temp(self): self.tempuuid = uuid.uuid1().hex[:15] path = os.path.join(tempfile.gettempdir(), self.tempuuid) if", "tag.attrs['tagvalue'] tag.append(tagvalue) elif 'functions' in meta['meta']: js_functions = {i.__name__:i for i in meta['meta']['functions']}", "'link', rel='stylesheet', href=style ) for style in styles } for style in styles:", "soup.find(tag_loc).append(tag) for funcname, function in js_functions.items(): unique_id = str(uuid.uuid1()).split('-')[0] soup.html.append(soup.new_tag('script', id=unique_id)) soup.find('script', {'id':", "to_copy_path.split(os.sep)[-1] template_to_copy = os.path.join(self.temp_path, to_copy_path.replace('\\\\', os.sep)) shutil.copy( template, template_to_copy ) tag_loc, tags =", "port_open) def __gen_yaplee_temp(self): self.tempuuid = uuid.uuid1().hex[:15] path = os.path.join(tempfile.gettempdir(), self.tempuuid) if not os.path.isdir(path):", "import random import tempfile import webbrowser import time import uuid import socket import", "meta['meta']: tag_loc, tags = meta['meta']['tags']() for tag_meta, tag in tags.items(): tag_source = ''", "self.templates else '<h4>Avaliable paths : {}</h4>'.format( ', '.join(['<a style=\"text-decoration: none;\" href=\"{}\" target=\"_blank\">{}</a>'.format( i.split('-_-')[0]", "try: if 'src' in str(tag): tag_source = tag.get('src') is_tag_has_source = True except: continue", "UnknownTemplateValue from yaplee.js.converter import JSFunc class Server: def __init__(self, meta) -> None: self.port", "shutil.copy( template, template_to_copy ) tag_loc, tags = '', {} if 'tags' in meta['meta']:", "with open(template_to_copy, 'r+') as file: template_data = file.read() soup = BeautifulSoup(template_data, 'html.parser') for", "'+funcname+'(){ '+ str(JSFunc(function))+ ' }' ) file.truncate(0) file.write(soup.prettify()) del file generated_files.append(to_copy_path) if 'index.html'", "elif 'functions' in meta['meta']: js_functions = {i.__name__:i for i in meta['meta']['functions']} elif 'style'", "in meta['meta']['functions']} elif 'style' in meta['meta']: if type(meta['meta']['style']) is str: styles = [meta['meta']['style']]", "tags.items(): tag_source = '' is_tag_has_source = False tag_name = str(tag_meta.split('-_-')[0]) if tag_name in", "http.server '+str(self.port)+' --bind 127.0.0.1 --directory \"'+self.temp_path+'\"', shell=True ) def remove_yaplee_dir(self): if os.path.isdir(os.path.join(tempfile.gettempdir(), self.tempuuid)):", "if 'src' in str(tag): tag_source = tag.get('src') is_tag_has_source = True except: continue if", "style)' ) tag_loc, tags = 'head', { str(random.randint(111111, 999999)):BeautifulSoup('', 'html.parser').new_tag( 'link', rel='stylesheet', href=style", "template.split('-_-')[0] to_copy_path = meta['load_name'] if meta['load_name'] else template to_copy_path = to_copy_path.split(os.sep)[-1] template_to_copy =", "= '' self.module_path = str(pathlib.Path(__file__).resolve().parent) self.temp_uuid, self.temp_path = self.__gen_yaplee_temp() def is_port_open(self): a_socket =", "tag.get('tagvalue') del tag.attrs['tagvalue'] tag.append(tagvalue) elif 'functions' in meta['meta']: js_functions = {i.__name__:i for i", "tempfile import webbrowser import time import uuid import socket import shutil import subprocess", "os.path.join(self.temp_path, to_copy_path.replace('\\\\', os.sep)) shutil.copy( template, template_to_copy ) tag_loc, tags = '', {} if", "as file: file.write(nohtml_base) def start(self): self.generate_files() if self.opentab: webbrowser.open('http://127.0.0.1:{}/'.format(str(self.port))) time.sleep(1) yield self.temp_uuid, self.temp_path", "not in generated_files: with open(os.path.join(self.module_path, 'assets', 'no-index.html.py'), 'r+') as file: nohtml_base = file.read()", "a_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) local_connection = ('127.0.0.1', self.port) port_open = a_socket.connect_ex(local_connection) a_socket.close() return", "tag.get('href') is_tag_has_source = True else: try: if 'src' in str(tag): tag_source = tag.get('src')", "or string (one style)' ) tag_loc, tags = 'head', { str(random.randint(111111, 999999)):BeautifulSoup('', 'html.parser').new_tag(", "'style' in meta['meta']: if type(meta['meta']['style']) is str: styles = [meta['meta']['style']] elif type(meta['meta']['style']) is", "template_data = file.read() soup = BeautifulSoup(template_data, 'html.parser') for tagname, tag in tags.items(): soup.find(tag_loc).append(tag)", ") for style in styles } for style in styles: shutil.copy( style, os.path.join(self.temp_path,", "tag in tags.items(): soup.find(tag_loc).append(tag) for funcname, function in js_functions.items(): unique_id = str(uuid.uuid1()).split('-')[0] soup.html.append(soup.new_tag('script',", ") with open(os.path.join(self.temp_path, 'index.html'), 'w+') as file: file.write(nohtml_base) def start(self): self.generate_files() if self.opentab:", "self.temp_path = self.__gen_yaplee_temp() def is_port_open(self): a_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) local_connection = ('127.0.0.1', self.port)", "meta['config']['opentab'] self.tempuuid = '' self.module_path = str(pathlib.Path(__file__).resolve().parent) self.temp_uuid, self.temp_path = self.__gen_yaplee_temp() def is_port_open(self):", "in str(tag): tag_source = tag.get('href') is_tag_has_source = True else: try: if 'src' in", "pathlib from bs4 import BeautifulSoup from yaplee.errors import UnknownTemplateValue from yaplee.js.converter import JSFunc", "class Server: def __init__(self, meta) -> None: self.port = meta['config']['port'] self.templates = meta['templates']", "tag.append(tagvalue) elif 'functions' in meta['meta']: js_functions = {i.__name__:i for i in meta['meta']['functions']} elif", "'' is_tag_has_source = False tag_name = str(tag_meta.split('-_-')[0]) if tag_name in ['link']: if 'href'", "template = template.split('-_-')[0] to_copy_path = meta['load_name'] if meta['load_name'] else template to_copy_path = to_copy_path.split(os.sep)[-1]", "tag_source = tag.get('href') is_tag_has_source = True else: try: if 'src' in str(tag): tag_source", "style in styles } for style in styles: shutil.copy( style, os.path.join(self.temp_path, style) )", "self.tempuuid) if not os.path.isdir(path): os.mkdir(path) return self.tempuuid, path def generate_files(self): generated_files = []", "{i.__name__:i for i in meta['meta']['functions']} elif 'style' in meta['meta']: if type(meta['meta']['style']) is str:", "type(meta['meta']['style']) is list: styles = meta['meta']['style'] else: raise UnknownTemplateValue( 'template style must be", "yaplee.js.converter import JSFunc class Server: def __init__(self, meta) -> None: self.port = meta['config']['port']", "js_functions.items(): unique_id = str(uuid.uuid1()).split('-')[0] soup.html.append(soup.new_tag('script', id=unique_id)) soup.find('script', {'id': unique_id}).append( 'function '+funcname+'(){ '+ str(JSFunc(function))+", "soup.find('script', {'id': unique_id}).append( 'function '+funcname+'(){ '+ str(JSFunc(function))+ ' }' ) file.truncate(0) file.write(soup.prettify()) del", "styles = [meta['meta']['style']] elif type(meta['meta']['style']) is list: styles = meta['meta']['style'] else: raise UnknownTemplateValue(", "as file: nohtml_base = file.read() file.close() del file nohtml_base = nohtml_base.replace('{% avaliable_paths %}',", "os.path.join(self.temp_path, style) ) with open(template_to_copy, 'r+') as file: template_data = file.read() soup =", "str: styles = [meta['meta']['style']] elif type(meta['meta']['style']) is list: styles = meta['meta']['style'] else: raise", "= file.read() file.close() del file nohtml_base = nohtml_base.replace('{% avaliable_paths %}', '' if not", "file.write(nohtml_base) def start(self): self.generate_files() if self.opentab: webbrowser.open('http://127.0.0.1:{}/'.format(str(self.port))) time.sleep(1) yield self.temp_uuid, self.temp_path subprocess.run( ('python3'", "str(tag_meta.split('-_-')[0]) if tag_name in ['link']: if 'href' in str(tag): tag_source = tag.get('href') is_tag_has_source", "elif type(meta['meta']['style']) is list: styles = meta['meta']['style'] else: raise UnknownTemplateValue( 'template style must", "'+str(self.port)+' --bind 127.0.0.1 --directory \"'+self.temp_path+'\"', shell=True ) def remove_yaplee_dir(self): if os.path.isdir(os.path.join(tempfile.gettempdir(), self.tempuuid)): shutil.rmtree(os.path.join(tempfile.gettempdir(),", "= 'head', { str(random.randint(111111, 999999)):BeautifulSoup('', 'html.parser').new_tag( 'link', rel='stylesheet', href=style ) for style in", "is_port_open(self): a_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) local_connection = ('127.0.0.1', self.port) port_open = a_socket.connect_ex(local_connection) a_socket.close()", "os.sep)) shutil.copy( template, template_to_copy ) tag_loc, tags = '', {} if 'tags' in", "def start(self): self.generate_files() if self.opentab: webbrowser.open('http://127.0.0.1:{}/'.format(str(self.port))) time.sleep(1) yield self.temp_uuid, self.temp_path subprocess.run( ('python3' if", "meta['templates'] self.tree = meta['tree'] self.opentab = meta['config']['opentab'] self.tempuuid = '' self.module_path = str(pathlib.Path(__file__).resolve().parent)", "import BeautifulSoup from yaplee.errors import UnknownTemplateValue from yaplee.js.converter import JSFunc class Server: def", "style in styles: shutil.copy( style, os.path.join(self.temp_path, style) ) with open(template_to_copy, 'r+') as file:", "nohtml_base = nohtml_base.replace('{% avaliable_paths %}', '' if not self.templates else '<h4>Avaliable paths :", "= tag.get('src') is_tag_has_source = True except: continue if is_tag_has_source and ('://' not in", "from yaplee.errors import UnknownTemplateValue from yaplee.js.converter import JSFunc class Server: def __init__(self, meta)", "' }' ) file.truncate(0) file.write(soup.prettify()) del file generated_files.append(to_copy_path) if 'index.html' not in generated_files:", "nohtml_base = file.read() file.close() del file nohtml_base = nohtml_base.replace('{% avaliable_paths %}', '' if", "}' ) file.truncate(0) file.write(soup.prettify()) del file generated_files.append(to_copy_path) if 'index.html' not in generated_files: with", "file: template_data = file.read() soup = BeautifulSoup(template_data, 'html.parser') for tagname, tag in tags.items():", "= to_copy_path.split(os.sep)[-1] template_to_copy = os.path.join(self.temp_path, to_copy_path.replace('\\\\', os.sep)) shutil.copy( template, template_to_copy ) tag_loc, tags", "href=\"{}\" target=\"_blank\">{}</a>'.format( i.split('-_-')[0] if j['load_name'] == None else j['load_name'], i.split('-_-')[0] if not j['name']", "del file nohtml_base = nohtml_base.replace('{% avaliable_paths %}', '' if not self.templates else '<h4>Avaliable", "self.tree = meta['tree'] self.opentab = meta['config']['opentab'] self.tempuuid = '' self.module_path = str(pathlib.Path(__file__).resolve().parent) self.temp_uuid,", "if j['load_name'] == None else j['load_name'], i.split('-_-')[0] if not j['name'] else j['name'].title() )", "{ str(random.randint(111111, 999999)):BeautifulSoup('', 'html.parser').new_tag( 'link', rel='stylesheet', href=style ) for style in styles }", "self.tempuuid = '' self.module_path = str(pathlib.Path(__file__).resolve().parent) self.temp_uuid, self.temp_path = self.__gen_yaplee_temp() def is_port_open(self): a_socket", "a_socket.close() return not (not port_open) def __gen_yaplee_temp(self): self.tempuuid = uuid.uuid1().hex[:15] path = os.path.join(tempfile.gettempdir(),", "'href' in str(tag): tag_source = tag.get('href') is_tag_has_source = True else: try: if 'src'", "not in tag_source and tag_source): shutil.copy( tag_source, os.path.join(self.temp_path, tag_source) ) if 'tagvalue' in", "js_functions = {i.__name__:i for i in meta['meta']['functions']} elif 'style' in meta['meta']: if type(meta['meta']['style'])", "def is_port_open(self): a_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) local_connection = ('127.0.0.1', self.port) port_open = a_socket.connect_ex(local_connection)", "in generated_files: with open(os.path.join(self.module_path, 'assets', 'no-index.html.py'), 'r+') as file: nohtml_base = file.read() file.close()", "style, os.path.join(self.temp_path, style) ) with open(template_to_copy, 'r+') as file: template_data = file.read() soup", "{} for template, meta in self.templates.items(): template = template.split('-_-')[0] to_copy_path = meta['load_name'] if", "= False tag_name = str(tag_meta.split('-_-')[0]) if tag_name in ['link']: if 'href' in str(tag):", "file: file.write(nohtml_base) def start(self): self.generate_files() if self.opentab: webbrowser.open('http://127.0.0.1:{}/'.format(str(self.port))) time.sleep(1) yield self.temp_uuid, self.temp_path subprocess.run(", "= a_socket.connect_ex(local_connection) a_socket.close() return not (not port_open) def __gen_yaplee_temp(self): self.tempuuid = uuid.uuid1().hex[:15] path", "else '<h4>Avaliable paths : {}</h4>'.format( ', '.join(['<a style=\"text-decoration: none;\" href=\"{}\" target=\"_blank\">{}</a>'.format( i.split('-_-')[0] if", "if 'tags' in meta['meta']: tag_loc, tags = meta['meta']['tags']() for tag_meta, tag in tags.items():", "if tag_name in ['link']: if 'href' in str(tag): tag_source = tag.get('href') is_tag_has_source =", "JSFunc class Server: def __init__(self, meta) -> None: self.port = meta['config']['port'] self.templates =", "os.name == 'posix' else 'python')+' -m http.server '+str(self.port)+' --bind 127.0.0.1 --directory \"'+self.temp_path+'\"', shell=True", "meta['meta']['style'] else: raise UnknownTemplateValue( 'template style must be list or string (one style)'", "meta['load_name'] if meta['load_name'] else template to_copy_path = to_copy_path.split(os.sep)[-1] template_to_copy = os.path.join(self.temp_path, to_copy_path.replace('\\\\', os.sep))", "shutil.copy( tag_source, os.path.join(self.temp_path, tag_source) ) if 'tagvalue' in tag.attrs: tagvalue = tag.get('tagvalue') del", "self.temp_uuid, self.temp_path = self.__gen_yaplee_temp() def is_port_open(self): a_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) local_connection = ('127.0.0.1',", "tag_name = str(tag_meta.split('-_-')[0]) if tag_name in ['link']: if 'href' in str(tag): tag_source =", "= True else: try: if 'src' in str(tag): tag_source = tag.get('src') is_tag_has_source =", "is_tag_has_source and ('://' not in tag_source and tag_source): shutil.copy( tag_source, os.path.join(self.temp_path, tag_source) )", "file nohtml_base = nohtml_base.replace('{% avaliable_paths %}', '' if not self.templates else '<h4>Avaliable paths", "'python')+' -m http.server '+str(self.port)+' --bind 127.0.0.1 --directory \"'+self.temp_path+'\"', shell=True ) def remove_yaplee_dir(self): if", "def generate_files(self): generated_files = [] js_functions = {} for template, meta in self.templates.items():", "-m http.server '+str(self.port)+' --bind 127.0.0.1 --directory \"'+self.temp_path+'\"', shell=True ) def remove_yaplee_dir(self): if os.path.isdir(os.path.join(tempfile.gettempdir(),", "href=style ) for style in styles } for style in styles: shutil.copy( style,", ") if 'tagvalue' in tag.attrs: tagvalue = tag.get('tagvalue') del tag.attrs['tagvalue'] tag.append(tagvalue) elif 'functions'", "None else j['load_name'], i.split('-_-')[0] if not j['name'] else j['name'].title() ) for i, j", "import time import uuid import socket import shutil import subprocess import pathlib from", "if not self.templates else '<h4>Avaliable paths : {}</h4>'.format( ', '.join(['<a style=\"text-decoration: none;\" href=\"{}\"", "999999)):BeautifulSoup('', 'html.parser').new_tag( 'link', rel='stylesheet', href=style ) for style in styles } for style", "tag_loc, tags = meta['meta']['tags']() for tag_meta, tag in tags.items(): tag_source = '' is_tag_has_source", "not j['name'] else j['name'].title() ) for i, j in self.templates.items()]) ) ) with", "= '' is_tag_has_source = False tag_name = str(tag_meta.split('-_-')[0]) if tag_name in ['link']: if", "-> None: self.port = meta['config']['port'] self.templates = meta['templates'] self.tree = meta['tree'] self.opentab =", "j in self.templates.items()]) ) ) with open(os.path.join(self.temp_path, 'index.html'), 'w+') as file: file.write(nohtml_base) def", "tag_source = tag.get('src') is_tag_has_source = True except: continue if is_tag_has_source and ('://' not", "'assets', 'no-index.html.py'), 'r+') as file: nohtml_base = file.read() file.close() del file nohtml_base =", "path = os.path.join(tempfile.gettempdir(), self.tempuuid) if not os.path.isdir(path): os.mkdir(path) return self.tempuuid, path def generate_files(self):", "open(os.path.join(self.module_path, 'assets', 'no-index.html.py'), 'r+') as file: nohtml_base = file.read() file.close() del file nohtml_base", "i.split('-_-')[0] if not j['name'] else j['name'].title() ) for i, j in self.templates.items()]) )", "path def generate_files(self): generated_files = [] js_functions = {} for template, meta in", "with open(os.path.join(self.temp_path, 'index.html'), 'w+') as file: file.write(nohtml_base) def start(self): self.generate_files() if self.opentab: webbrowser.open('http://127.0.0.1:{}/'.format(str(self.port)))", "from yaplee.js.converter import JSFunc class Server: def __init__(self, meta) -> None: self.port =", "True else: try: if 'src' in str(tag): tag_source = tag.get('src') is_tag_has_source = True", "styles: shutil.copy( style, os.path.join(self.temp_path, style) ) with open(template_to_copy, 'r+') as file: template_data =", "for style in styles } for style in styles: shutil.copy( style, os.path.join(self.temp_path, style)", "import subprocess import pathlib from bs4 import BeautifulSoup from yaplee.errors import UnknownTemplateValue from", "file: nohtml_base = file.read() file.close() del file nohtml_base = nohtml_base.replace('{% avaliable_paths %}', ''", "else 'python')+' -m http.server '+str(self.port)+' --bind 127.0.0.1 --directory \"'+self.temp_path+'\"', shell=True ) def remove_yaplee_dir(self):", "in meta['meta']: tag_loc, tags = meta['meta']['tags']() for tag_meta, tag in tags.items(): tag_source =", "%}', '' if not self.templates else '<h4>Avaliable paths : {}</h4>'.format( ', '.join(['<a style=\"text-decoration:", "os.mkdir(path) return self.tempuuid, path def generate_files(self): generated_files = [] js_functions = {} for", "shutil import subprocess import pathlib from bs4 import BeautifulSoup from yaplee.errors import UnknownTemplateValue", "= uuid.uuid1().hex[:15] path = os.path.join(tempfile.gettempdir(), self.tempuuid) if not os.path.isdir(path): os.mkdir(path) return self.tempuuid, path", "tag.get('src') is_tag_has_source = True except: continue if is_tag_has_source and ('://' not in tag_source", "not self.templates else '<h4>Avaliable paths : {}</h4>'.format( ', '.join(['<a style=\"text-decoration: none;\" href=\"{}\" target=\"_blank\">{}</a>'.format(", "meta['config']['port'] self.templates = meta['templates'] self.tree = meta['tree'] self.opentab = meta['config']['opentab'] self.tempuuid = ''", "shutil.copy( style, os.path.join(self.temp_path, style) ) with open(template_to_copy, 'r+') as file: template_data = file.read()", "tags = meta['meta']['tags']() for tag_meta, tag in tags.items(): tag_source = '' is_tag_has_source =", "'tags' in meta['meta']: tag_loc, tags = meta['meta']['tags']() for tag_meta, tag in tags.items(): tag_source", "BeautifulSoup(template_data, 'html.parser') for tagname, tag in tags.items(): soup.find(tag_loc).append(tag) for funcname, function in js_functions.items():", "os.path.join(self.temp_path, tag_source) ) if 'tagvalue' in tag.attrs: tagvalue = tag.get('tagvalue') del tag.attrs['tagvalue'] tag.append(tagvalue)", "self.opentab = meta['config']['opentab'] self.tempuuid = '' self.module_path = str(pathlib.Path(__file__).resolve().parent) self.temp_uuid, self.temp_path = self.__gen_yaplee_temp()", "is_tag_has_source = True else: try: if 'src' in str(tag): tag_source = tag.get('src') is_tag_has_source", "list or string (one style)' ) tag_loc, tags = 'head', { str(random.randint(111111, 999999)):BeautifulSoup('',", "and tag_source): shutil.copy( tag_source, os.path.join(self.temp_path, tag_source) ) if 'tagvalue' in tag.attrs: tagvalue =", "string (one style)' ) tag_loc, tags = 'head', { str(random.randint(111111, 999999)):BeautifulSoup('', 'html.parser').new_tag( 'link',", "str(JSFunc(function))+ ' }' ) file.truncate(0) file.write(soup.prettify()) del file generated_files.append(to_copy_path) if 'index.html' not in", "else: raise UnknownTemplateValue( 'template style must be list or string (one style)' )", "in tag_source and tag_source): shutil.copy( tag_source, os.path.join(self.temp_path, tag_source) ) if 'tagvalue' in tag.attrs:", "= str(pathlib.Path(__file__).resolve().parent) self.temp_uuid, self.temp_path = self.__gen_yaplee_temp() def is_port_open(self): a_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) local_connection", "webbrowser.open('http://127.0.0.1:{}/'.format(str(self.port))) time.sleep(1) yield self.temp_uuid, self.temp_path subprocess.run( ('python3' if os.name == 'posix' else 'python')+'", "tag in tags.items(): tag_source = '' is_tag_has_source = False tag_name = str(tag_meta.split('-_-')[0]) if", "in self.templates.items(): template = template.split('-_-')[0] to_copy_path = meta['load_name'] if meta['load_name'] else template to_copy_path", "file generated_files.append(to_copy_path) if 'index.html' not in generated_files: with open(os.path.join(self.module_path, 'assets', 'no-index.html.py'), 'r+') as", "start(self): self.generate_files() if self.opentab: webbrowser.open('http://127.0.0.1:{}/'.format(str(self.port))) time.sleep(1) yield self.temp_uuid, self.temp_path subprocess.run( ('python3' if os.name", "'' if not self.templates else '<h4>Avaliable paths : {}</h4>'.format( ', '.join(['<a style=\"text-decoration: none;\"", "if os.name == 'posix' else 'python')+' -m http.server '+str(self.port)+' --bind 127.0.0.1 --directory \"'+self.temp_path+'\"',", "if not os.path.isdir(path): os.mkdir(path) return self.tempuuid, path def generate_files(self): generated_files = [] js_functions", "tags.items(): soup.find(tag_loc).append(tag) for funcname, function in js_functions.items(): unique_id = str(uuid.uuid1()).split('-')[0] soup.html.append(soup.new_tag('script', id=unique_id)) soup.find('script',", "= meta['config']['port'] self.templates = meta['templates'] self.tree = meta['tree'] self.opentab = meta['config']['opentab'] self.tempuuid =", "import uuid import socket import shutil import subprocess import pathlib from bs4 import", "style) ) with open(template_to_copy, 'r+') as file: template_data = file.read() soup = BeautifulSoup(template_data,", "tags = '', {} if 'tags' in meta['meta']: tag_loc, tags = meta['meta']['tags']() for", "uuid import socket import shutil import subprocess import pathlib from bs4 import BeautifulSoup", "in tags.items(): tag_source = '' is_tag_has_source = False tag_name = str(tag_meta.split('-_-')[0]) if tag_name", "as file: template_data = file.read() soup = BeautifulSoup(template_data, 'html.parser') for tagname, tag in", "import os import random import tempfile import webbrowser import time import uuid import", "['link']: if 'href' in str(tag): tag_source = tag.get('href') is_tag_has_source = True else: try:", "in styles } for style in styles: shutil.copy( style, os.path.join(self.temp_path, style) ) with", "rel='stylesheet', href=style ) for style in styles } for style in styles: shutil.copy(", "str(random.randint(111111, 999999)):BeautifulSoup('', 'html.parser').new_tag( 'link', rel='stylesheet', href=style ) for style in styles } for", "be list or string (one style)' ) tag_loc, tags = 'head', { str(random.randint(111111,", "except: continue if is_tag_has_source and ('://' not in tag_source and tag_source): shutil.copy( tag_source,", "file.read() soup = BeautifulSoup(template_data, 'html.parser') for tagname, tag in tags.items(): soup.find(tag_loc).append(tag) for funcname,", "generated_files: with open(os.path.join(self.module_path, 'assets', 'no-index.html.py'), 'r+') as file: nohtml_base = file.read() file.close() del", "file.truncate(0) file.write(soup.prettify()) del file generated_files.append(to_copy_path) if 'index.html' not in generated_files: with open(os.path.join(self.module_path, 'assets',", "in str(tag): tag_source = tag.get('src') is_tag_has_source = True except: continue if is_tag_has_source and", "j['load_name'] == None else j['load_name'], i.split('-_-')[0] if not j['name'] else j['name'].title() ) for", "'r+') as file: template_data = file.read() soup = BeautifulSoup(template_data, 'html.parser') for tagname, tag", "if 'index.html' not in generated_files: with open(os.path.join(self.module_path, 'assets', 'no-index.html.py'), 'r+') as file: nohtml_base", "'', {} if 'tags' in meta['meta']: tag_loc, tags = meta['meta']['tags']() for tag_meta, tag", "os.path.isdir(path): os.mkdir(path) return self.tempuuid, path def generate_files(self): generated_files = [] js_functions = {}", "paths : {}</h4>'.format( ', '.join(['<a style=\"text-decoration: none;\" href=\"{}\" target=\"_blank\">{}</a>'.format( i.split('-_-')[0] if j['load_name'] ==", "del file generated_files.append(to_copy_path) if 'index.html' not in generated_files: with open(os.path.join(self.module_path, 'assets', 'no-index.html.py'), 'r+')", "file.write(soup.prettify()) del file generated_files.append(to_copy_path) if 'index.html' not in generated_files: with open(os.path.join(self.module_path, 'assets', 'no-index.html.py'),", "in tags.items(): soup.find(tag_loc).append(tag) for funcname, function in js_functions.items(): unique_id = str(uuid.uuid1()).split('-')[0] soup.html.append(soup.new_tag('script', id=unique_id))", "= tag.get('tagvalue') del tag.attrs['tagvalue'] tag.append(tagvalue) elif 'functions' in meta['meta']: js_functions = {i.__name__:i for", "import webbrowser import time import uuid import socket import shutil import subprocess import", "import socket import shutil import subprocess import pathlib from bs4 import BeautifulSoup from", "in meta['meta']: js_functions = {i.__name__:i for i in meta['meta']['functions']} elif 'style' in meta['meta']:", "= '', {} if 'tags' in meta['meta']: tag_loc, tags = meta['meta']['tags']() for tag_meta,", "meta['meta']['functions']} elif 'style' in meta['meta']: if type(meta['meta']['style']) is str: styles = [meta['meta']['style']] elif", "str(uuid.uuid1()).split('-')[0] soup.html.append(soup.new_tag('script', id=unique_id)) soup.find('script', {'id': unique_id}).append( 'function '+funcname+'(){ '+ str(JSFunc(function))+ ' }' )", "for style in styles: shutil.copy( style, os.path.join(self.temp_path, style) ) with open(template_to_copy, 'r+') as", "else j['name'].title() ) for i, j in self.templates.items()]) ) ) with open(os.path.join(self.temp_path, 'index.html'),", "i in meta['meta']['functions']} elif 'style' in meta['meta']: if type(meta['meta']['style']) is str: styles =", "{'id': unique_id}).append( 'function '+funcname+'(){ '+ str(JSFunc(function))+ ' }' ) file.truncate(0) file.write(soup.prettify()) del file", "unique_id = str(uuid.uuid1()).split('-')[0] soup.html.append(soup.new_tag('script', id=unique_id)) soup.find('script', {'id': unique_id}).append( 'function '+funcname+'(){ '+ str(JSFunc(function))+ '", "== 'posix' else 'python')+' -m http.server '+str(self.port)+' --bind 127.0.0.1 --directory \"'+self.temp_path+'\"', shell=True )", "avaliable_paths %}', '' if not self.templates else '<h4>Avaliable paths : {}</h4>'.format( ', '.join(['<a", "'' self.module_path = str(pathlib.Path(__file__).resolve().parent) self.temp_uuid, self.temp_path = self.__gen_yaplee_temp() def is_port_open(self): a_socket = socket.socket(socket.AF_INET,", "= [meta['meta']['style']] elif type(meta['meta']['style']) is list: styles = meta['meta']['style'] else: raise UnknownTemplateValue( 'template", "self.temp_path subprocess.run( ('python3' if os.name == 'posix' else 'python')+' -m http.server '+str(self.port)+' --bind", "in tag.attrs: tagvalue = tag.get('tagvalue') del tag.attrs['tagvalue'] tag.append(tagvalue) elif 'functions' in meta['meta']: js_functions", "def __init__(self, meta) -> None: self.port = meta['config']['port'] self.templates = meta['templates'] self.tree =", "self.port = meta['config']['port'] self.templates = meta['templates'] self.tree = meta['tree'] self.opentab = meta['config']['opentab'] self.tempuuid", "is_tag_has_source = True except: continue if is_tag_has_source and ('://' not in tag_source and", ") ) with open(os.path.join(self.temp_path, 'index.html'), 'w+') as file: file.write(nohtml_base) def start(self): self.generate_files() if", "template to_copy_path = to_copy_path.split(os.sep)[-1] template_to_copy = os.path.join(self.temp_path, to_copy_path.replace('\\\\', os.sep)) shutil.copy( template, template_to_copy )", "if is_tag_has_source and ('://' not in tag_source and tag_source): shutil.copy( tag_source, os.path.join(self.temp_path, tag_source)", ") with open(template_to_copy, 'r+') as file: template_data = file.read() soup = BeautifulSoup(template_data, 'html.parser')", "'src' in str(tag): tag_source = tag.get('src') is_tag_has_source = True except: continue if is_tag_has_source", "import JSFunc class Server: def __init__(self, meta) -> None: self.port = meta['config']['port'] self.templates", ": {}</h4>'.format( ', '.join(['<a style=\"text-decoration: none;\" href=\"{}\" target=\"_blank\">{}</a>'.format( i.split('-_-')[0] if j['load_name'] == None", ") tag_loc, tags = '', {} if 'tags' in meta['meta']: tag_loc, tags =", "self.generate_files() if self.opentab: webbrowser.open('http://127.0.0.1:{}/'.format(str(self.port))) time.sleep(1) yield self.temp_uuid, self.temp_path subprocess.run( ('python3' if os.name ==", "meta['load_name'] else template to_copy_path = to_copy_path.split(os.sep)[-1] template_to_copy = os.path.join(self.temp_path, to_copy_path.replace('\\\\', os.sep)) shutil.copy( template,", ") file.truncate(0) file.write(soup.prettify()) del file generated_files.append(to_copy_path) if 'index.html' not in generated_files: with open(os.path.join(self.module_path,", "to_copy_path = to_copy_path.split(os.sep)[-1] template_to_copy = os.path.join(self.temp_path, to_copy_path.replace('\\\\', os.sep)) shutil.copy( template, template_to_copy ) tag_loc,", "to_copy_path.replace('\\\\', os.sep)) shutil.copy( template, template_to_copy ) tag_loc, tags = '', {} if 'tags'", "= {} for template, meta in self.templates.items(): template = template.split('-_-')[0] to_copy_path = meta['load_name']", "'r+') as file: nohtml_base = file.read() file.close() del file nohtml_base = nohtml_base.replace('{% avaliable_paths", "subprocess.run( ('python3' if os.name == 'posix' else 'python')+' -m http.server '+str(self.port)+' --bind 127.0.0.1", "socket import shutil import subprocess import pathlib from bs4 import BeautifulSoup from yaplee.errors", "for i, j in self.templates.items()]) ) ) with open(os.path.join(self.temp_path, 'index.html'), 'w+') as file:", "self.__gen_yaplee_temp() def is_port_open(self): a_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) local_connection = ('127.0.0.1', self.port) port_open =", "= True except: continue if is_tag_has_source and ('://' not in tag_source and tag_source):", "and ('://' not in tag_source and tag_source): shutil.copy( tag_source, os.path.join(self.temp_path, tag_source) ) if", "'index.html' not in generated_files: with open(os.path.join(self.module_path, 'assets', 'no-index.html.py'), 'r+') as file: nohtml_base =", "tag_source): shutil.copy( tag_source, os.path.join(self.temp_path, tag_source) ) if 'tagvalue' in tag.attrs: tagvalue = tag.get('tagvalue')", "os import random import tempfile import webbrowser import time import uuid import socket", "must be list or string (one style)' ) tag_loc, tags = 'head', {", "import pathlib from bs4 import BeautifulSoup from yaplee.errors import UnknownTemplateValue from yaplee.js.converter import", "for i in meta['meta']['functions']} elif 'style' in meta['meta']: if type(meta['meta']['style']) is str: styles", "port_open = a_socket.connect_ex(local_connection) a_socket.close() return not (not port_open) def __gen_yaplee_temp(self): self.tempuuid = uuid.uuid1().hex[:15]", "'.join(['<a style=\"text-decoration: none;\" href=\"{}\" target=\"_blank\">{}</a>'.format( i.split('-_-')[0] if j['load_name'] == None else j['load_name'], i.split('-_-')[0]", "[] js_functions = {} for template, meta in self.templates.items(): template = template.split('-_-')[0] to_copy_path", "meta in self.templates.items(): template = template.split('-_-')[0] to_copy_path = meta['load_name'] if meta['load_name'] else template", "if self.opentab: webbrowser.open('http://127.0.0.1:{}/'.format(str(self.port))) time.sleep(1) yield self.temp_uuid, self.temp_path subprocess.run( ('python3' if os.name == 'posix'", "self.templates = meta['templates'] self.tree = meta['tree'] self.opentab = meta['config']['opentab'] self.tempuuid = '' self.module_path", ") for i, j in self.templates.items()]) ) ) with open(os.path.join(self.temp_path, 'index.html'), 'w+') as", "if 'href' in str(tag): tag_source = tag.get('href') is_tag_has_source = True else: try: if", "self.temp_uuid, self.temp_path subprocess.run( ('python3' if os.name == 'posix' else 'python')+' -m http.server '+str(self.port)+'", "type(meta['meta']['style']) is str: styles = [meta['meta']['style']] elif type(meta['meta']['style']) is list: styles = meta['meta']['style']", "self.tempuuid, path def generate_files(self): generated_files = [] js_functions = {} for template, meta", "== None else j['load_name'], i.split('-_-')[0] if not j['name'] else j['name'].title() ) for i,", "'html.parser') for tagname, tag in tags.items(): soup.find(tag_loc).append(tag) for funcname, function in js_functions.items(): unique_id", "self.templates.items()]) ) ) with open(os.path.join(self.temp_path, 'index.html'), 'w+') as file: file.write(nohtml_base) def start(self): self.generate_files()", "= meta['templates'] self.tree = meta['tree'] self.opentab = meta['config']['opentab'] self.tempuuid = '' self.module_path =", "'template style must be list or string (one style)' ) tag_loc, tags =", "'html.parser').new_tag( 'link', rel='stylesheet', href=style ) for style in styles } for style in", "template, template_to_copy ) tag_loc, tags = '', {} if 'tags' in meta['meta']: tag_loc,", "tag_source) ) if 'tagvalue' in tag.attrs: tagvalue = tag.get('tagvalue') del tag.attrs['tagvalue'] tag.append(tagvalue) elif", "in styles: shutil.copy( style, os.path.join(self.temp_path, style) ) with open(template_to_copy, 'r+') as file: template_data", "for tagname, tag in tags.items(): soup.find(tag_loc).append(tag) for funcname, function in js_functions.items(): unique_id =", "str(tag): tag_source = tag.get('src') is_tag_has_source = True except: continue if is_tag_has_source and ('://'", "for funcname, function in js_functions.items(): unique_id = str(uuid.uuid1()).split('-')[0] soup.html.append(soup.new_tag('script', id=unique_id)) soup.find('script', {'id': unique_id}).append(", "= meta['meta']['style'] else: raise UnknownTemplateValue( 'template style must be list or string (one", "template_to_copy = os.path.join(self.temp_path, to_copy_path.replace('\\\\', os.sep)) shutil.copy( template, template_to_copy ) tag_loc, tags = '',", "yield self.temp_uuid, self.temp_path subprocess.run( ('python3' if os.name == 'posix' else 'python')+' -m http.server", "from bs4 import BeautifulSoup from yaplee.errors import UnknownTemplateValue from yaplee.js.converter import JSFunc class", "is_tag_has_source = False tag_name = str(tag_meta.split('-_-')[0]) if tag_name in ['link']: if 'href' in", "file.close() del file nohtml_base = nohtml_base.replace('{% avaliable_paths %}', '' if not self.templates else", "return not (not port_open) def __gen_yaplee_temp(self): self.tempuuid = uuid.uuid1().hex[:15] path = os.path.join(tempfile.gettempdir(), self.tempuuid)", "'functions' in meta['meta']: js_functions = {i.__name__:i for i in meta['meta']['functions']} elif 'style' in", "unique_id}).append( 'function '+funcname+'(){ '+ str(JSFunc(function))+ ' }' ) file.truncate(0) file.write(soup.prettify()) del file generated_files.append(to_copy_path)", "with open(os.path.join(self.module_path, 'assets', 'no-index.html.py'), 'r+') as file: nohtml_base = file.read() file.close() del file", "= meta['meta']['tags']() for tag_meta, tag in tags.items(): tag_source = '' is_tag_has_source = False", "= [] js_functions = {} for template, meta in self.templates.items(): template = template.split('-_-')[0]", "True except: continue if is_tag_has_source and ('://' not in tag_source and tag_source): shutil.copy(", "if meta['load_name'] else template to_copy_path = to_copy_path.split(os.sep)[-1] template_to_copy = os.path.join(self.temp_path, to_copy_path.replace('\\\\', os.sep)) shutil.copy(", "continue if is_tag_has_source and ('://' not in tag_source and tag_source): shutil.copy( tag_source, os.path.join(self.temp_path,", "for tag_meta, tag in tags.items(): tag_source = '' is_tag_has_source = False tag_name =", "self.port) port_open = a_socket.connect_ex(local_connection) a_socket.close() return not (not port_open) def __gen_yaplee_temp(self): self.tempuuid =", "socket.socket(socket.AF_INET, socket.SOCK_STREAM) local_connection = ('127.0.0.1', self.port) port_open = a_socket.connect_ex(local_connection) a_socket.close() return not (not", "tagvalue = tag.get('tagvalue') del tag.attrs['tagvalue'] tag.append(tagvalue) elif 'functions' in meta['meta']: js_functions = {i.__name__:i", "= BeautifulSoup(template_data, 'html.parser') for tagname, tag in tags.items(): soup.find(tag_loc).append(tag) for funcname, function in", "local_connection = ('127.0.0.1', self.port) port_open = a_socket.connect_ex(local_connection) a_socket.close() return not (not port_open) def", "js_functions = {} for template, meta in self.templates.items(): template = template.split('-_-')[0] to_copy_path =", "in meta['meta']: if type(meta['meta']['style']) is str: styles = [meta['meta']['style']] elif type(meta['meta']['style']) is list:", "'+ str(JSFunc(function))+ ' }' ) file.truncate(0) file.write(soup.prettify()) del file generated_files.append(to_copy_path) if 'index.html' not", "meta['meta']: if type(meta['meta']['style']) is str: styles = [meta['meta']['style']] elif type(meta['meta']['style']) is list: styles", "template_to_copy ) tag_loc, tags = '', {} if 'tags' in meta['meta']: tag_loc, tags", "self.tempuuid = uuid.uuid1().hex[:15] path = os.path.join(tempfile.gettempdir(), self.tempuuid) if not os.path.isdir(path): os.mkdir(path) return self.tempuuid,", "'no-index.html.py'), 'r+') as file: nohtml_base = file.read() file.close() del file nohtml_base = nohtml_base.replace('{%", "self.templates.items(): template = template.split('-_-')[0] to_copy_path = meta['load_name'] if meta['load_name'] else template to_copy_path =", "'head', { str(random.randint(111111, 999999)):BeautifulSoup('', 'html.parser').new_tag( 'link', rel='stylesheet', href=style ) for style in styles", "= nohtml_base.replace('{% avaliable_paths %}', '' if not self.templates else '<h4>Avaliable paths : {}</h4>'.format(", "soup = BeautifulSoup(template_data, 'html.parser') for tagname, tag in tags.items(): soup.find(tag_loc).append(tag) for funcname, function", "{} if 'tags' in meta['meta']: tag_loc, tags = meta['meta']['tags']() for tag_meta, tag in", "function in js_functions.items(): unique_id = str(uuid.uuid1()).split('-')[0] soup.html.append(soup.new_tag('script', id=unique_id)) soup.find('script', {'id': unique_id}).append( 'function '+funcname+'(){", "{}</h4>'.format( ', '.join(['<a style=\"text-decoration: none;\" href=\"{}\" target=\"_blank\">{}</a>'.format( i.split('-_-')[0] if j['load_name'] == None else", "[meta['meta']['style']] elif type(meta['meta']['style']) is list: styles = meta['meta']['style'] else: raise UnknownTemplateValue( 'template style", "socket.SOCK_STREAM) local_connection = ('127.0.0.1', self.port) port_open = a_socket.connect_ex(local_connection) a_socket.close() return not (not port_open)", "j['name'] else j['name'].title() ) for i, j in self.templates.items()]) ) ) with open(os.path.join(self.temp_path,", "= meta['tree'] self.opentab = meta['config']['opentab'] self.tempuuid = '' self.module_path = str(pathlib.Path(__file__).resolve().parent) self.temp_uuid, self.temp_path", "for template, meta in self.templates.items(): template = template.split('-_-')[0] to_copy_path = meta['load_name'] if meta['load_name']", "BeautifulSoup from yaplee.errors import UnknownTemplateValue from yaplee.js.converter import JSFunc class Server: def __init__(self,", "--bind 127.0.0.1 --directory \"'+self.temp_path+'\"', shell=True ) def remove_yaplee_dir(self): if os.path.isdir(os.path.join(tempfile.gettempdir(), self.tempuuid)): shutil.rmtree(os.path.join(tempfile.gettempdir(), self.tempuuid))", "del tag.attrs['tagvalue'] tag.append(tagvalue) elif 'functions' in meta['meta']: js_functions = {i.__name__:i for i in", "soup.html.append(soup.new_tag('script', id=unique_id)) soup.find('script', {'id': unique_id}).append( 'function '+funcname+'(){ '+ str(JSFunc(function))+ ' }' ) file.truncate(0)", "(one style)' ) tag_loc, tags = 'head', { str(random.randint(111111, 999999)):BeautifulSoup('', 'html.parser').new_tag( 'link', rel='stylesheet',", "= meta['config']['opentab'] self.tempuuid = '' self.module_path = str(pathlib.Path(__file__).resolve().parent) self.temp_uuid, self.temp_path = self.__gen_yaplee_temp() def", "id=unique_id)) soup.find('script', {'id': unique_id}).append( 'function '+funcname+'(){ '+ str(JSFunc(function))+ ' }' ) file.truncate(0) file.write(soup.prettify())", "UnknownTemplateValue( 'template style must be list or string (one style)' ) tag_loc, tags", "('python3' if os.name == 'posix' else 'python')+' -m http.server '+str(self.port)+' --bind 127.0.0.1 --directory", "None: self.port = meta['config']['port'] self.templates = meta['templates'] self.tree = meta['tree'] self.opentab = meta['config']['opentab']", "not os.path.isdir(path): os.mkdir(path) return self.tempuuid, path def generate_files(self): generated_files = [] js_functions =", "is list: styles = meta['meta']['style'] else: raise UnknownTemplateValue( 'template style must be list", "styles } for style in styles: shutil.copy( style, os.path.join(self.temp_path, style) ) with open(template_to_copy,", "i.split('-_-')[0] if j['load_name'] == None else j['load_name'], i.split('-_-')[0] if not j['name'] else j['name'].title()", "bs4 import BeautifulSoup from yaplee.errors import UnknownTemplateValue from yaplee.js.converter import JSFunc class Server:" ]
[ "dict.setdefault(i, []) for key in tags: for structure in file: if structure.info['config_type'] ==", "# import module and read xyz file from ase.io import read, write file=read('last3.xyz',", "[] for structure in file: if structure.info['config_type'] not in tags: tags.append(structure.info['config_type']) # extract", "in tags: tags.append(structure.info['config_type']) # extract unique tags and energy sigma dict={} for i", "for structure in file: if structure.info['config_type'] not in tags: tags.append(structure.info['config_type']) # extract unique", "unique tags and energy sigma dict={} for i in tags: dict.setdefault(i, []) for", "index=\":\") # create list of tags tags = [] for structure in file:", "if structure.info['config_type'] not in tags: tags.append(structure.info['config_type']) # extract unique tags and energy sigma", "dict={} for i in tags: dict.setdefault(i, []) for key in tags: for structure", "for i in tags: dict.setdefault(i, []) for key in tags: for structure in", "in file: if structure.info['config_type'] not in tags: tags.append(structure.info['config_type']) # extract unique tags and", "read, write file=read('last3.xyz', index=\":\") # create list of tags tags = [] for", "of tags tags = [] for structure in file: if structure.info['config_type'] not in", "not in tags: tags.append(structure.info['config_type']) # extract unique tags and energy sigma dict={} for", "from ase.io import read, write file=read('last3.xyz', index=\":\") # create list of tags tags", "for structure in file: if structure.info['config_type'] == key and structure.info['energy_sigma'] not in dict.get(key):", "in tags: dict.setdefault(i, []) for key in tags: for structure in file: if", "= [] for structure in file: if structure.info['config_type'] not in tags: tags.append(structure.info['config_type']) #", "structure.info['config_type'] not in tags: tags.append(structure.info['config_type']) # extract unique tags and energy sigma dict={}", "file=read('last3.xyz', index=\":\") # create list of tags tags = [] for structure in", "sigma dict={} for i in tags: dict.setdefault(i, []) for key in tags: for", "read xyz file from ase.io import read, write file=read('last3.xyz', index=\":\") # create list", "# extract unique tags and energy sigma dict={} for i in tags: dict.setdefault(i,", "tags tags = [] for structure in file: if structure.info['config_type'] not in tags:", "create list of tags tags = [] for structure in file: if structure.info['config_type']", "tags: dict.setdefault(i, []) for key in tags: for structure in file: if structure.info['config_type']", "for key in tags: for structure in file: if structure.info['config_type'] == key and", "tags.append(structure.info['config_type']) # extract unique tags and energy sigma dict={} for i in tags:", "structure in file: if structure.info['config_type'] not in tags: tags.append(structure.info['config_type']) # extract unique tags", "i in tags: dict.setdefault(i, []) for key in tags: for structure in file:", "and read xyz file from ase.io import read, write file=read('last3.xyz', index=\":\") # create", "xyz file from ase.io import read, write file=read('last3.xyz', index=\":\") # create list of", "write file=read('last3.xyz', index=\":\") # create list of tags tags = [] for structure", "in tags: for structure in file: if structure.info['config_type'] == key and structure.info['energy_sigma'] not", "and energy sigma dict={} for i in tags: dict.setdefault(i, []) for key in", "file from ase.io import read, write file=read('last3.xyz', index=\":\") # create list of tags", "import module and read xyz file from ase.io import read, write file=read('last3.xyz', index=\":\")", "tags = [] for structure in file: if structure.info['config_type'] not in tags: tags.append(structure.info['config_type'])", "energy sigma dict={} for i in tags: dict.setdefault(i, []) for key in tags:", "module and read xyz file from ase.io import read, write file=read('last3.xyz', index=\":\") #", "import read, write file=read('last3.xyz', index=\":\") # create list of tags tags = []", "tags and energy sigma dict={} for i in tags: dict.setdefault(i, []) for key", "tags: for structure in file: if structure.info['config_type'] == key and structure.info['energy_sigma'] not in", "list of tags tags = [] for structure in file: if structure.info['config_type'] not", "[]) for key in tags: for structure in file: if structure.info['config_type'] == key", "structure in file: if structure.info['config_type'] == key and structure.info['energy_sigma'] not in dict.get(key): dict[key].append(structure.info['energy_sigma'])", "file: if structure.info['config_type'] not in tags: tags.append(structure.info['config_type']) # extract unique tags and energy", "tags: tags.append(structure.info['config_type']) # extract unique tags and energy sigma dict={} for i in", "# create list of tags tags = [] for structure in file: if", "extract unique tags and energy sigma dict={} for i in tags: dict.setdefault(i, [])", "key in tags: for structure in file: if structure.info['config_type'] == key and structure.info['energy_sigma']", "ase.io import read, write file=read('last3.xyz', index=\":\") # create list of tags tags =" ]
[ "global img global img_cp if event == cv2.EVENT_LBUTTONDOWN: cv2.circle(img_cp, (x, y), 5, (255,", "flags, param): global l global img global img_cp if event == cv2.EVENT_LBUTTONDOWN: cv2.circle(img_cp,", "y, flags, param): global l global img global img_cp if event == cv2.EVENT_LBUTTONDOWN:", "cv2.resize(img, dsize=(1000, 1000)) img = cv2.resize(img, (0, 0), fx=0.75, fy=0.75, interpolation=cv2.INTER_NEAREST) img_cp =", "cv2.imshow('image', img_cp) if len(l) == 4: print(l) pts1 = np.float32(l) pts2 = np.float32([[0,", "l global img global img_cp if event == cv2.EVENT_LBUTTONDOWN: cv2.circle(img_cp, (x, y), 5,", "None img_cp = None def draw_circle(event, x, y, flags, param): global l global", "= cv2.resize(img, dsize=(1000, 1000)) img = cv2.resize(img, (0, 0), fx=0.75, fy=0.75, interpolation=cv2.INTER_NEAREST) img_cp", "y), 5, (255, 0, 0), -1) l.append([x, y]) cv2.imshow('image', img_cp) if len(l) ==", "M = cv2.getPerspectiveTransform(pts1, pts2) dst = cv2.warpPerspective(img, M, (300, 300)) cv2.imshow('Original image', img_cp)", "300]]) M = cv2.getPerspectiveTransform(pts1, pts2) dst = cv2.warpPerspective(img, M, (300, 300)) cv2.imshow('Original image',", "def draw_circle(event, x, y, flags, param): global l global img global img_cp if", "img_cp) cv2.imshow('Final', dst) img_cp = img.copy() l.clear() def road_straight(): global img global img_cp", "cv2.imshow('Original image', img_cp) cv2.imshow('Final', dst) img_cp = img.copy() l.clear() def road_straight(): global img", "l.clear() def road_straight(): global img global img_cp img = cv2.imread('road.jpg') img = cv2.resize(img,", "0), -1) l.append([x, y]) cv2.imshow('image', img_cp) if len(l) == 4: print(l) pts1 =", "pts1 = np.float32(l) pts2 = np.float32([[0, 0], [300, 0], [0, 300], [300, 300]])", "0], [300, 0], [0, 300], [300, 300]]) M = cv2.getPerspectiveTransform(pts1, pts2) dst =", "import numpy as np from matplotlib import pyplot as plt l: list =", "as np from matplotlib import pyplot as plt l: list = [] img", "0, 0), -1) l.append([x, y]) cv2.imshow('image', img_cp) if len(l) == 4: print(l) pts1", "300)) cv2.imshow('Original image', img_cp) cv2.imshow('Final', dst) img_cp = img.copy() l.clear() def road_straight(): global", "img.copy() l.clear() def road_straight(): global img global img_cp img = cv2.imread('road.jpg') img =", "list = [] img = None img_cp = None def draw_circle(event, x, y,", "img = cv2.resize(img, dsize=(1000, 1000)) img = cv2.resize(img, (0, 0), fx=0.75, fy=0.75, interpolation=cv2.INTER_NEAREST)", "= [] img = None img_cp = None def draw_circle(event, x, y, flags,", "len(l) == 4: print(l) pts1 = np.float32(l) pts2 = np.float32([[0, 0], [300, 0],", "cv2.warpPerspective(img, M, (300, 300)) cv2.imshow('Original image', img_cp) cv2.imshow('Final', dst) img_cp = img.copy() l.clear()", "numpy as np from matplotlib import pyplot as plt l: list = []", "x, y, flags, param): global l global img global img_cp if event ==", "l: list = [] img = None img_cp = None def draw_circle(event, x,", "= cv2.warpPerspective(img, M, (300, 300)) cv2.imshow('Original image', img_cp) cv2.imshow('Final', dst) img_cp = img.copy()", "event == cv2.EVENT_LBUTTONDOWN: cv2.circle(img_cp, (x, y), 5, (255, 0, 0), -1) l.append([x, y])", "= cv2.imread('road.jpg') img = cv2.resize(img, dsize=(1000, 1000)) img = cv2.resize(img, (0, 0), fx=0.75,", "= None def draw_circle(event, x, y, flags, param): global l global img global", "if len(l) == 4: print(l) pts1 = np.float32(l) pts2 = np.float32([[0, 0], [300,", "= img.copy() l.clear() def road_straight(): global img global img_cp img = cv2.imread('road.jpg') img", "as plt l: list = [] img = None img_cp = None def", "cv2 import numpy as np from matplotlib import pyplot as plt l: list", "global img_cp if event == cv2.EVENT_LBUTTONDOWN: cv2.circle(img_cp, (x, y), 5, (255, 0, 0),", "M, (300, 300)) cv2.imshow('Original image', img_cp) cv2.imshow('Final', dst) img_cp = img.copy() l.clear() def", "= cv2.getPerspectiveTransform(pts1, pts2) dst = cv2.warpPerspective(img, M, (300, 300)) cv2.imshow('Original image', img_cp) cv2.imshow('Final',", "l.append([x, y]) cv2.imshow('image', img_cp) if len(l) == 4: print(l) pts1 = np.float32(l) pts2", "(x, y), 5, (255, 0, 0), -1) l.append([x, y]) cv2.imshow('image', img_cp) if len(l)", "dsize=(1000, 1000)) img = cv2.resize(img, (0, 0), fx=0.75, fy=0.75, interpolation=cv2.INTER_NEAREST) img_cp = img.copy()", "np from matplotlib import pyplot as plt l: list = [] img =", "4: print(l) pts1 = np.float32(l) pts2 = np.float32([[0, 0], [300, 0], [0, 300],", "None def draw_circle(event, x, y, flags, param): global l global img global img_cp", "0), fx=0.75, fy=0.75, interpolation=cv2.INTER_NEAREST) img_cp = img.copy() cv2.namedWindow('image') cv2.imshow('image', img) cv2.setMouseCallback('image', draw_circle) cv2.waitKey()", "= np.float32([[0, 0], [300, 0], [0, 300], [300, 300]]) M = cv2.getPerspectiveTransform(pts1, pts2)", "y]) cv2.imshow('image', img_cp) if len(l) == 4: print(l) pts1 = np.float32(l) pts2 =", "fx=0.75, fy=0.75, interpolation=cv2.INTER_NEAREST) img_cp = img.copy() cv2.namedWindow('image') cv2.imshow('image', img) cv2.setMouseCallback('image', draw_circle) cv2.waitKey() cv2.destroyAllWindows()", "== cv2.EVENT_LBUTTONDOWN: cv2.circle(img_cp, (x, y), 5, (255, 0, 0), -1) l.append([x, y]) cv2.imshow('image',", "def road_straight(): global img global img_cp img = cv2.imread('road.jpg') img = cv2.resize(img, dsize=(1000,", "img = cv2.resize(img, (0, 0), fx=0.75, fy=0.75, interpolation=cv2.INTER_NEAREST) img_cp = img.copy() cv2.namedWindow('image') cv2.imshow('image',", "-1) l.append([x, y]) cv2.imshow('image', img_cp) if len(l) == 4: print(l) pts1 = np.float32(l)", "img_cp if event == cv2.EVENT_LBUTTONDOWN: cv2.circle(img_cp, (x, y), 5, (255, 0, 0), -1)", "= np.float32(l) pts2 = np.float32([[0, 0], [300, 0], [0, 300], [300, 300]]) M", "= None img_cp = None def draw_circle(event, x, y, flags, param): global l", "cv2.getPerspectiveTransform(pts1, pts2) dst = cv2.warpPerspective(img, M, (300, 300)) cv2.imshow('Original image', img_cp) cv2.imshow('Final', dst)", "plt l: list = [] img = None img_cp = None def draw_circle(event,", "interpolation=cv2.INTER_NEAREST) img_cp = img.copy() cv2.namedWindow('image') cv2.imshow('image', img) cv2.setMouseCallback('image', draw_circle) cv2.waitKey() cv2.destroyAllWindows() return road_straight()", "global img_cp img = cv2.imread('road.jpg') img = cv2.resize(img, dsize=(1000, 1000)) img = cv2.resize(img,", "[300, 0], [0, 300], [300, 300]]) M = cv2.getPerspectiveTransform(pts1, pts2) dst = cv2.warpPerspective(img,", "300], [300, 300]]) M = cv2.getPerspectiveTransform(pts1, pts2) dst = cv2.warpPerspective(img, M, (300, 300))", "image', img_cp) cv2.imshow('Final', dst) img_cp = img.copy() l.clear() def road_straight(): global img global", "cv2.resize(img, (0, 0), fx=0.75, fy=0.75, interpolation=cv2.INTER_NEAREST) img_cp = img.copy() cv2.namedWindow('image') cv2.imshow('image', img) cv2.setMouseCallback('image',", "1000)) img = cv2.resize(img, (0, 0), fx=0.75, fy=0.75, interpolation=cv2.INTER_NEAREST) img_cp = img.copy() cv2.namedWindow('image')", "cv2.imread('road.jpg') img = cv2.resize(img, dsize=(1000, 1000)) img = cv2.resize(img, (0, 0), fx=0.75, fy=0.75,", "[300, 300]]) M = cv2.getPerspectiveTransform(pts1, pts2) dst = cv2.warpPerspective(img, M, (300, 300)) cv2.imshow('Original", "pyplot as plt l: list = [] img = None img_cp = None", "(255, 0, 0), -1) l.append([x, y]) cv2.imshow('image', img_cp) if len(l) == 4: print(l)", "np.float32([[0, 0], [300, 0], [0, 300], [300, 300]]) M = cv2.getPerspectiveTransform(pts1, pts2) dst", "draw_circle(event, x, y, flags, param): global l global img global img_cp if event", "img_cp = img.copy() l.clear() def road_straight(): global img global img_cp img = cv2.imread('road.jpg')", "param): global l global img global img_cp if event == cv2.EVENT_LBUTTONDOWN: cv2.circle(img_cp, (x,", "cv2.circle(img_cp, (x, y), 5, (255, 0, 0), -1) l.append([x, y]) cv2.imshow('image', img_cp) if", "(300, 300)) cv2.imshow('Original image', img_cp) cv2.imshow('Final', dst) img_cp = img.copy() l.clear() def road_straight():", "pts2) dst = cv2.warpPerspective(img, M, (300, 300)) cv2.imshow('Original image', img_cp) cv2.imshow('Final', dst) img_cp", "print(l) pts1 = np.float32(l) pts2 = np.float32([[0, 0], [300, 0], [0, 300], [300,", "0], [0, 300], [300, 300]]) M = cv2.getPerspectiveTransform(pts1, pts2) dst = cv2.warpPerspective(img, M,", "img global img_cp img = cv2.imread('road.jpg') img = cv2.resize(img, dsize=(1000, 1000)) img =", "= cv2.resize(img, (0, 0), fx=0.75, fy=0.75, interpolation=cv2.INTER_NEAREST) img_cp = img.copy() cv2.namedWindow('image') cv2.imshow('image', img)", "import cv2 import numpy as np from matplotlib import pyplot as plt l:", "pts2 = np.float32([[0, 0], [300, 0], [0, 300], [300, 300]]) M = cv2.getPerspectiveTransform(pts1,", "dst = cv2.warpPerspective(img, M, (300, 300)) cv2.imshow('Original image', img_cp) cv2.imshow('Final', dst) img_cp =", "fy=0.75, interpolation=cv2.INTER_NEAREST) img_cp = img.copy() cv2.namedWindow('image') cv2.imshow('image', img) cv2.setMouseCallback('image', draw_circle) cv2.waitKey() cv2.destroyAllWindows() return", "import pyplot as plt l: list = [] img = None img_cp =", "road_straight(): global img global img_cp img = cv2.imread('road.jpg') img = cv2.resize(img, dsize=(1000, 1000))", "global l global img global img_cp if event == cv2.EVENT_LBUTTONDOWN: cv2.circle(img_cp, (x, y),", "cv2.EVENT_LBUTTONDOWN: cv2.circle(img_cp, (x, y), 5, (255, 0, 0), -1) l.append([x, y]) cv2.imshow('image', img_cp)", "matplotlib import pyplot as plt l: list = [] img = None img_cp", "if event == cv2.EVENT_LBUTTONDOWN: cv2.circle(img_cp, (x, y), 5, (255, 0, 0), -1) l.append([x,", "img_cp) if len(l) == 4: print(l) pts1 = np.float32(l) pts2 = np.float32([[0, 0],", "== 4: print(l) pts1 = np.float32(l) pts2 = np.float32([[0, 0], [300, 0], [0,", "5, (255, 0, 0), -1) l.append([x, y]) cv2.imshow('image', img_cp) if len(l) == 4:", "img = cv2.imread('road.jpg') img = cv2.resize(img, dsize=(1000, 1000)) img = cv2.resize(img, (0, 0),", "img_cp = None def draw_circle(event, x, y, flags, param): global l global img", "from matplotlib import pyplot as plt l: list = [] img = None", "np.float32(l) pts2 = np.float32([[0, 0], [300, 0], [0, 300], [300, 300]]) M =", "img = None img_cp = None def draw_circle(event, x, y, flags, param): global", "cv2.imshow('Final', dst) img_cp = img.copy() l.clear() def road_straight(): global img global img_cp img", "(0, 0), fx=0.75, fy=0.75, interpolation=cv2.INTER_NEAREST) img_cp = img.copy() cv2.namedWindow('image') cv2.imshow('image', img) cv2.setMouseCallback('image', draw_circle)", "img global img_cp if event == cv2.EVENT_LBUTTONDOWN: cv2.circle(img_cp, (x, y), 5, (255, 0,", "[] img = None img_cp = None def draw_circle(event, x, y, flags, param):", "global img global img_cp img = cv2.imread('road.jpg') img = cv2.resize(img, dsize=(1000, 1000)) img", "img_cp img = cv2.imread('road.jpg') img = cv2.resize(img, dsize=(1000, 1000)) img = cv2.resize(img, (0,", "dst) img_cp = img.copy() l.clear() def road_straight(): global img global img_cp img =", "[0, 300], [300, 300]]) M = cv2.getPerspectiveTransform(pts1, pts2) dst = cv2.warpPerspective(img, M, (300," ]
[ "pprint import pprint class TestText(unittest.TestCase): def test_files(self): differ = Differ() reader = tei_reader.TeiReader()", "pprint class TestText(unittest.TestCase): def test_files(self): differ = Differ() reader = tei_reader.TeiReader() for (tei,", "= reader.read_file(tei) transformed = corpora.text with open(expected, encoding='utf-8') as f: diffs = list(diff", "Differ from xml.dom import minidom from os import linesep from .context import get_files,", "reader.read_file(tei) transformed = corpora.text with open(expected, encoding='utf-8') as f: diffs = list(diff for", ".context import get_files, tei_reader from pprint import pprint class TestText(unittest.TestCase): def test_files(self): differ", "linesep from .context import get_files, tei_reader from pprint import pprint class TestText(unittest.TestCase): def", "os import linesep from .context import get_files, tei_reader from pprint import pprint class", "open(expected, encoding='utf-8') as f: diffs = list(diff for diff in differ.compare( [line.strip() for", "for line in transformed.splitlines(keepends=False)])) self.assertEqual(len([diff for diff in diffs if diff[0:2] != '", "class TestText(unittest.TestCase): def test_files(self): differ = Differ() reader = tei_reader.TeiReader() for (tei, expected)", "= list(diff for diff in differ.compare( [line.strip() for line in f.readlines()], [line.strip() for", "differ = Differ() reader = tei_reader.TeiReader() for (tei, expected) in zip(get_files('tei.xml'), get_files('out.txt')): corpora", "f.readlines()], [line.strip() for line in transformed.splitlines(keepends=False)])) self.assertEqual(len([diff for diff in diffs if diff[0:2]", "reader = tei_reader.TeiReader() for (tei, expected) in zip(get_files('tei.xml'), get_files('out.txt')): corpora = reader.read_file(tei) transformed", "as f: diffs = list(diff for diff in differ.compare( [line.strip() for line in", "= tei_reader.TeiReader() for (tei, expected) in zip(get_files('tei.xml'), get_files('out.txt')): corpora = reader.read_file(tei) transformed =", "[line.strip() for line in transformed.splitlines(keepends=False)])) self.assertEqual(len([diff for diff in diffs if diff[0:2] !=", "transformed.splitlines(keepends=False)])) self.assertEqual(len([diff for diff in diffs if diff[0:2] != ' ']), 0, \"{0}", "import unittest from difflib import Differ from xml.dom import minidom from os import", "= Differ() reader = tei_reader.TeiReader() for (tei, expected) in zip(get_files('tei.xml'), get_files('out.txt')): corpora =", "diff in diffs if diff[0:2] != ' ']), 0, \"{0} not transformed as", "zip(get_files('tei.xml'), get_files('out.txt')): corpora = reader.read_file(tei) transformed = corpora.text with open(expected, encoding='utf-8') as f:", "with open(expected, encoding='utf-8') as f: diffs = list(diff for diff in differ.compare( [line.strip()", "import pprint class TestText(unittest.TestCase): def test_files(self): differ = Differ() reader = tei_reader.TeiReader() for", "<gh_stars>10-100 import unittest from difflib import Differ from xml.dom import minidom from os", "from os import linesep from .context import get_files, tei_reader from pprint import pprint", "[line.strip() for line in f.readlines()], [line.strip() for line in transformed.splitlines(keepends=False)])) self.assertEqual(len([diff for diff", "line in transformed.splitlines(keepends=False)])) self.assertEqual(len([diff for diff in diffs if diff[0:2] != ' ']),", "in transformed.splitlines(keepends=False)])) self.assertEqual(len([diff for diff in diffs if diff[0:2] != ' ']), 0,", "differ.compare( [line.strip() for line in f.readlines()], [line.strip() for line in transformed.splitlines(keepends=False)])) self.assertEqual(len([diff for", "from pprint import pprint class TestText(unittest.TestCase): def test_files(self): differ = Differ() reader =", "minidom from os import linesep from .context import get_files, tei_reader from pprint import", "(tei, expected) in zip(get_files('tei.xml'), get_files('out.txt')): corpora = reader.read_file(tei) transformed = corpora.text with open(expected,", "unittest from difflib import Differ from xml.dom import minidom from os import linesep", "in diffs if diff[0:2] != ' ']), 0, \"{0} not transformed as expected:\\n{1}\".format(tei,", "in f.readlines()], [line.strip() for line in transformed.splitlines(keepends=False)])) self.assertEqual(len([diff for diff in diffs if", "from xml.dom import minidom from os import linesep from .context import get_files, tei_reader", "expected) in zip(get_files('tei.xml'), get_files('out.txt')): corpora = reader.read_file(tei) transformed = corpora.text with open(expected, encoding='utf-8')", "' ']), 0, \"{0} not transformed as expected:\\n{1}\".format(tei, linesep.join(diffs))) if __name__ == '__main__':", "difflib import Differ from xml.dom import minidom from os import linesep from .context", "get_files, tei_reader from pprint import pprint class TestText(unittest.TestCase): def test_files(self): differ = Differ()", "import minidom from os import linesep from .context import get_files, tei_reader from pprint", "test_files(self): differ = Differ() reader = tei_reader.TeiReader() for (tei, expected) in zip(get_files('tei.xml'), get_files('out.txt')):", "TestText(unittest.TestCase): def test_files(self): differ = Differ() reader = tei_reader.TeiReader() for (tei, expected) in", "import linesep from .context import get_files, tei_reader from pprint import pprint class TestText(unittest.TestCase):", "diffs = list(diff for diff in differ.compare( [line.strip() for line in f.readlines()], [line.strip()", "diffs if diff[0:2] != ' ']), 0, \"{0} not transformed as expected:\\n{1}\".format(tei, linesep.join(diffs)))", "encoding='utf-8') as f: diffs = list(diff for diff in differ.compare( [line.strip() for line", "transformed = corpora.text with open(expected, encoding='utf-8') as f: diffs = list(diff for diff", "!= ' ']), 0, \"{0} not transformed as expected:\\n{1}\".format(tei, linesep.join(diffs))) if __name__ ==", "= corpora.text with open(expected, encoding='utf-8') as f: diffs = list(diff for diff in", "list(diff for diff in differ.compare( [line.strip() for line in f.readlines()], [line.strip() for line", "self.assertEqual(len([diff for diff in diffs if diff[0:2] != ' ']), 0, \"{0} not", "diff[0:2] != ' ']), 0, \"{0} not transformed as expected:\\n{1}\".format(tei, linesep.join(diffs))) if __name__", "in zip(get_files('tei.xml'), get_files('out.txt')): corpora = reader.read_file(tei) transformed = corpora.text with open(expected, encoding='utf-8') as", "get_files('out.txt')): corpora = reader.read_file(tei) transformed = corpora.text with open(expected, encoding='utf-8') as f: diffs", "corpora.text with open(expected, encoding='utf-8') as f: diffs = list(diff for diff in differ.compare(", "for line in f.readlines()], [line.strip() for line in transformed.splitlines(keepends=False)])) self.assertEqual(len([diff for diff in", "in differ.compare( [line.strip() for line in f.readlines()], [line.strip() for line in transformed.splitlines(keepends=False)])) self.assertEqual(len([diff", "for diff in differ.compare( [line.strip() for line in f.readlines()], [line.strip() for line in", "line in f.readlines()], [line.strip() for line in transformed.splitlines(keepends=False)])) self.assertEqual(len([diff for diff in diffs", "for (tei, expected) in zip(get_files('tei.xml'), get_files('out.txt')): corpora = reader.read_file(tei) transformed = corpora.text with", "import Differ from xml.dom import minidom from os import linesep from .context import", "from .context import get_files, tei_reader from pprint import pprint class TestText(unittest.TestCase): def test_files(self):", "import get_files, tei_reader from pprint import pprint class TestText(unittest.TestCase): def test_files(self): differ =", "xml.dom import minidom from os import linesep from .context import get_files, tei_reader from", "f: diffs = list(diff for diff in differ.compare( [line.strip() for line in f.readlines()],", "Differ() reader = tei_reader.TeiReader() for (tei, expected) in zip(get_files('tei.xml'), get_files('out.txt')): corpora = reader.read_file(tei)", "for diff in diffs if diff[0:2] != ' ']), 0, \"{0} not transformed", "corpora = reader.read_file(tei) transformed = corpora.text with open(expected, encoding='utf-8') as f: diffs =", "']), 0, \"{0} not transformed as expected:\\n{1}\".format(tei, linesep.join(diffs))) if __name__ == '__main__': unittest.main()", "if diff[0:2] != ' ']), 0, \"{0} not transformed as expected:\\n{1}\".format(tei, linesep.join(diffs))) if", "from difflib import Differ from xml.dom import minidom from os import linesep from", "diff in differ.compare( [line.strip() for line in f.readlines()], [line.strip() for line in transformed.splitlines(keepends=False)]))", "tei_reader.TeiReader() for (tei, expected) in zip(get_files('tei.xml'), get_files('out.txt')): corpora = reader.read_file(tei) transformed = corpora.text", "def test_files(self): differ = Differ() reader = tei_reader.TeiReader() for (tei, expected) in zip(get_files('tei.xml'),", "tei_reader from pprint import pprint class TestText(unittest.TestCase): def test_files(self): differ = Differ() reader" ]
[ "<gh_stars>10-100 import scryptos.crypto.attack.rsautil as rsautil import scryptos.crypto.attack.knapsackutil as knapsackutil import scryptos.crypto.attack.prngutil as prngutil" ]
[ "* import p1.m1 import p1.m2 import p1.m3 import p1.m4 p1.m4.mm_main() import p1.pp1.a1 import", "import * import p1.m1 import p1.m2 import p1.m3 import p1.m4 p1.m4.mm_main() import p1.pp1.a1", "import p1.m1 import p1.m2 import p1.m3 import p1.m4 p1.m4.mm_main() import p1.pp1.a1 import p1.pp1.a2", "p1 import * import p1.m1 import p1.m2 import p1.m3 import p1.m4 p1.m4.mm_main() import", "p1.m1 import p1.m2 import p1.m3 import p1.m4 p1.m4.mm_main() import p1.pp1.a1 import p1.pp1.a2 import", "from p1 import * import p1.m1 import p1.m2 import p1.m3 import p1.m4 p1.m4.mm_main()", "import p1.m2 import p1.m3 import p1.m4 p1.m4.mm_main() import p1.pp1.a1 import p1.pp1.a2 import p1.pp1.a3", "<reponame>mheanng/PythonNote from p1 import * import p1.m1 import p1.m2 import p1.m3 import p1.m4" ]
[ "glob import glob from abc import ABC, abstractmethod class DataBase(ABC): @abstractmethod def make_tidy(self):", "\"\\t\") return (df.assign(**{\"type\":type_, \"file\":df[\"path\"].apply(self._create_path)}) .rename(columns={\"sentence\":\"text\"}) .filter([\"file\", \"text\", \"type\"])) def make_tidy(self): pass def parse_data(self)", "os.path.join(main_path, \"test.tsv\") self.dev_path = os.path.join(main_path, \"validated.tsv\") self.audios_path = os.path.join(main_path, \"clips\") def _create_path(self, audio_name):", "-> pd.DataFrame: pass class MLS(DataBase): ext = \".flac\" basename = \"multi_speech_librespeech\" def __init__(self,", "-> pd.DataFrame: path_label = os.path.join(path_type, \"transcripts.txt\") df = pd.read_csv(path_label, sep=\"\\t\",header=None,names=[\"audio_code\", \"text\"]) df =", "= data_train_dir self.test_path = data_test_dir self.dev_path = data_dev_dir def _create_path(self, path_type:str, audio_code:str): match", "type_): df = pd.read_csv(df_path, sep = \"\\t\") return (df.assign(**{\"type\":type_, \"file\":df[\"path\"].apply(self._create_path)}) .rename(columns={\"sentence\":\"text\"}) .filter([\"file\", \"text\",", "<reponame>rocabrera/audio-learning import re import os import json import pandas as pd from glob", "return df.filter([\"file\", \"text\", \"type\"]) def make_tidy(self): pass def parse_data(self) -> pd.DataFrame: df_train =", "pd.DataFrame: path_label = os.path.join(path_type, \"transcripts.txt\") df = pd.read_csv(path_label, sep=\"\\t\",header=None,names=[\"audio_code\", \"text\"]) df = df.assign(**{\"type\":type_,", "df_test, df_dev], ignore_index=True).assign(base=self.basename) class CommonVoice(DataBase): ext = \".mp3\" basename = \"common_voice\" def __init__(self,", "= \"\\t\") return (df.assign(**{\"type\":type_, \"file\":df[\"path\"].apply(self._create_path)}) .rename(columns={\"sentence\":\"text\"}) .filter([\"file\", \"text\", \"type\"])) def make_tidy(self): pass def", "self.dev_path = data_dev_dir def _create_path(self, path_type:str, audio_code:str): match = re.search(\"(\\d+)_(\\d+)_(\\d+)\",audio_code) return os.path.join(path_type, \"audio\",", "df = df.assign(**{\"type\":type_, \"file\":df.audio_code.apply(lambda x: self._create_path(path_type,x)) }) return df.filter([\"file\", \"text\", \"type\"]) def make_tidy(self):", "__init__(self, data_train_dir, data_test_dir, data_dev_dir): self.train_path = data_train_dir self.test_path = data_test_dir self.dev_path = data_dev_dir", "import pandas as pd from glob import glob from abc import ABC, abstractmethod", "= re.search(\"(\\d+)_(\\d+)_(\\d+)\",audio_code) return os.path.join(path_type, \"audio\", match.group(1), match.group(2), \"\".join([audio_code, self.ext])) def _parse_type(self, path_type:str, type_:str)", "abstractmethod class DataBase(ABC): @abstractmethod def make_tidy(self): pass @abstractmethod def parse_data(self) -> pd.DataFrame: pass", "= data_dev_dir def _create_path(self, path_type:str, audio_code:str): match = re.search(\"(\\d+)_(\\d+)_(\\d+)\",audio_code) return os.path.join(path_type, \"audio\", match.group(1),", "= \"multi_speech_librespeech\" def __init__(self, data_train_dir, data_test_dir, data_dev_dir): self.train_path = data_train_dir self.test_path = data_test_dir", "\"train.tsv\") self.test_path = os.path.join(main_path, \"test.tsv\") self.dev_path = os.path.join(main_path, \"validated.tsv\") self.audios_path = os.path.join(main_path, \"clips\")", "\"transcripts.txt\") df = pd.read_csv(path_label, sep=\"\\t\",header=None,names=[\"audio_code\", \"text\"]) df = df.assign(**{\"type\":type_, \"file\":df.audio_code.apply(lambda x: self._create_path(path_type,x)) })", "def _parse_type(self, df_path, type_): df = pd.read_csv(df_path, sep = \"\\t\") return (df.assign(**{\"type\":type_, \"file\":df[\"path\"].apply(self._create_path)})", "os.path.join(self.audios_path, audio_name) def _parse_type(self, df_path, type_): df = pd.read_csv(df_path, sep = \"\\t\") return", "= os.path.join(path_type, \"transcripts.txt\") df = pd.read_csv(path_label, sep=\"\\t\",header=None,names=[\"audio_code\", \"text\"]) df = df.assign(**{\"type\":type_, \"file\":df.audio_code.apply(lambda x:", "-> pd.DataFrame: df_train = self._parse_type(self.train_path, \"train\") df_test = self._parse_type(self.test_path, \"test\") df_dev = self._parse_type(self.dev_path,", "def __init__(self, main_path): self.train_path = os.path.join(main_path, \"train.tsv\") self.test_path = os.path.join(main_path, \"test.tsv\") self.dev_path =", "pass class MLS(DataBase): ext = \".flac\" basename = \"multi_speech_librespeech\" def __init__(self, data_train_dir, data_test_dir,", "pd.concat([df_train, df_test, df_dev], ignore_index=True).assign(base=self.basename) class CommonVoice(DataBase): ext = \".mp3\" basename = \"common_voice\" def", "match.group(2), \"\".join([audio_code, self.ext])) def _parse_type(self, path_type:str, type_:str) -> pd.DataFrame: path_label = os.path.join(path_type, \"transcripts.txt\")", "import json import pandas as pd from glob import glob from abc import", "os import json import pandas as pd from glob import glob from abc", "glob from abc import ABC, abstractmethod class DataBase(ABC): @abstractmethod def make_tidy(self): pass @abstractmethod", "\".flac\" basename = \"multi_speech_librespeech\" def __init__(self, data_train_dir, data_test_dir, data_dev_dir): self.train_path = data_train_dir self.test_path", "\"\".join([audio_code, self.ext])) def _parse_type(self, path_type:str, type_:str) -> pd.DataFrame: path_label = os.path.join(path_type, \"transcripts.txt\") df", "self.dev_path = os.path.join(main_path, \"validated.tsv\") self.audios_path = os.path.join(main_path, \"clips\") def _create_path(self, audio_name): return os.path.join(self.audios_path,", "ext = \".mp3\" basename = \"common_voice\" def __init__(self, main_path): self.train_path = os.path.join(main_path, \"train.tsv\")", "__init__(self, main_path): self.train_path = os.path.join(main_path, \"train.tsv\") self.test_path = os.path.join(main_path, \"test.tsv\") self.dev_path = os.path.join(main_path,", "df.filter([\"file\", \"text\", \"type\"]) def make_tidy(self): pass def parse_data(self) -> pd.DataFrame: df_train = self._parse_type(self.train_path,", "def parse_data(self) -> pd.DataFrame: pass class MLS(DataBase): ext = \".flac\" basename = \"multi_speech_librespeech\"", "\"common_voice\" def __init__(self, main_path): self.train_path = os.path.join(main_path, \"train.tsv\") self.test_path = os.path.join(main_path, \"test.tsv\") self.dev_path", "re.search(\"(\\d+)_(\\d+)_(\\d+)\",audio_code) return os.path.join(path_type, \"audio\", match.group(1), match.group(2), \"\".join([audio_code, self.ext])) def _parse_type(self, path_type:str, type_:str) ->", "pass def parse_data(self) -> pd.DataFrame: df_train = self._parse_type(self.train_path, \"train\") df_test = self._parse_type(self.test_path, \"test\")", "return os.path.join(path_type, \"audio\", match.group(1), match.group(2), \"\".join([audio_code, self.ext])) def _parse_type(self, path_type:str, type_:str) -> pd.DataFrame:", "CommonVoice(DataBase): ext = \".mp3\" basename = \"common_voice\" def __init__(self, main_path): self.train_path = os.path.join(main_path,", "abc import ABC, abstractmethod class DataBase(ABC): @abstractmethod def make_tidy(self): pass @abstractmethod def parse_data(self)", "data_test_dir self.dev_path = data_dev_dir def _create_path(self, path_type:str, audio_code:str): match = re.search(\"(\\d+)_(\\d+)_(\\d+)\",audio_code) return os.path.join(path_type,", "= self._parse_type(self.test_path, \"test\") df_dev = self._parse_type(self.dev_path, \"dev\") return pd.concat([df_train, df_test, df_dev], ignore_index=True).assign(base=self.basename) class", "type_:str) -> pd.DataFrame: path_label = os.path.join(path_type, \"transcripts.txt\") df = pd.read_csv(path_label, sep=\"\\t\",header=None,names=[\"audio_code\", \"text\"]) df", "basename = \"common_voice\" def __init__(self, main_path): self.train_path = os.path.join(main_path, \"train.tsv\") self.test_path = os.path.join(main_path,", "_parse_type(self, df_path, type_): df = pd.read_csv(df_path, sep = \"\\t\") return (df.assign(**{\"type\":type_, \"file\":df[\"path\"].apply(self._create_path)}) .rename(columns={\"sentence\":\"text\"})", "self.audios_path = os.path.join(main_path, \"clips\") def _create_path(self, audio_name): return os.path.join(self.audios_path, audio_name) def _parse_type(self, df_path,", "pd.read_csv(path_label, sep=\"\\t\",header=None,names=[\"audio_code\", \"text\"]) df = df.assign(**{\"type\":type_, \"file\":df.audio_code.apply(lambda x: self._create_path(path_type,x)) }) return df.filter([\"file\", \"text\",", "parse_data(self) -> pd.DataFrame: df_train = self._parse_type(self.train_path, \"train\") df_test = self._parse_type(self.test_path,\"test\") df_dev = self._parse_type(self.dev_path,\"dev\")", "match.group(1), match.group(2), \"\".join([audio_code, self.ext])) def _parse_type(self, path_type:str, type_:str) -> pd.DataFrame: path_label = os.path.join(path_type,", "os.path.join(path_type, \"audio\", match.group(1), match.group(2), \"\".join([audio_code, self.ext])) def _parse_type(self, path_type:str, type_:str) -> pd.DataFrame: path_label", "pd.read_csv(df_path, sep = \"\\t\") return (df.assign(**{\"type\":type_, \"file\":df[\"path\"].apply(self._create_path)}) .rename(columns={\"sentence\":\"text\"}) .filter([\"file\", \"text\", \"type\"])) def make_tidy(self):", "json import pandas as pd from glob import glob from abc import ABC,", "def make_tidy(self): pass def parse_data(self) -> pd.DataFrame: df_train = self._parse_type(self.train_path, \"train\") df_test =", ".rename(columns={\"sentence\":\"text\"}) .filter([\"file\", \"text\", \"type\"])) def make_tidy(self): pass def parse_data(self) -> pd.DataFrame: df_train =", "\"type\"])) def make_tidy(self): pass def parse_data(self) -> pd.DataFrame: df_train = self._parse_type(self.train_path, \"train\") df_test", "def _create_path(self, path_type:str, audio_code:str): match = re.search(\"(\\d+)_(\\d+)_(\\d+)\",audio_code) return os.path.join(path_type, \"audio\", match.group(1), match.group(2), \"\".join([audio_code,", "path_type:str, audio_code:str): match = re.search(\"(\\d+)_(\\d+)_(\\d+)\",audio_code) return os.path.join(path_type, \"audio\", match.group(1), match.group(2), \"\".join([audio_code, self.ext])) def", "def make_tidy(self): pass @abstractmethod def parse_data(self) -> pd.DataFrame: pass class MLS(DataBase): ext =", "= os.path.join(main_path, \"validated.tsv\") self.audios_path = os.path.join(main_path, \"clips\") def _create_path(self, audio_name): return os.path.join(self.audios_path, audio_name)", "match = re.search(\"(\\d+)_(\\d+)_(\\d+)\",audio_code) return os.path.join(path_type, \"audio\", match.group(1), match.group(2), \"\".join([audio_code, self.ext])) def _parse_type(self, path_type:str,", "df_dev = self._parse_type(self.dev_path, \"dev\") return pd.concat([df_train, df_test, df_dev], ignore_index=True).assign(base=self.basename) class CommonVoice(DataBase): ext =", "data_train_dir, data_test_dir, data_dev_dir): self.train_path = data_train_dir self.test_path = data_test_dir self.dev_path = data_dev_dir def", "audio_name): return os.path.join(self.audios_path, audio_name) def _parse_type(self, df_path, type_): df = pd.read_csv(df_path, sep =", "os.path.join(main_path, \"train.tsv\") self.test_path = os.path.join(main_path, \"test.tsv\") self.dev_path = os.path.join(main_path, \"validated.tsv\") self.audios_path = os.path.join(main_path,", "@abstractmethod def make_tidy(self): pass @abstractmethod def parse_data(self) -> pd.DataFrame: pass class MLS(DataBase): ext", "make_tidy(self): pass def parse_data(self) -> pd.DataFrame: df_train = self._parse_type(self.train_path, \"train\") df_test = self._parse_type(self.test_path,", "make_tidy(self): pass @abstractmethod def parse_data(self) -> pd.DataFrame: pass class MLS(DataBase): ext = \".flac\"", ".filter([\"file\", \"text\", \"type\"])) def make_tidy(self): pass def parse_data(self) -> pd.DataFrame: df_train = self._parse_type(self.train_path,", "data_dev_dir def _create_path(self, path_type:str, audio_code:str): match = re.search(\"(\\d+)_(\\d+)_(\\d+)\",audio_code) return os.path.join(path_type, \"audio\", match.group(1), match.group(2),", "= df.assign(**{\"type\":type_, \"file\":df.audio_code.apply(lambda x: self._create_path(path_type,x)) }) return df.filter([\"file\", \"text\", \"type\"]) def make_tidy(self): pass", "self._parse_type(self.train_path, \"train\") df_test = self._parse_type(self.test_path,\"test\") df_dev = self._parse_type(self.dev_path,\"dev\") return pd.concat([df_train, df_test, df_dev], ignore_index=True).assign(base=self.basename)", "\"test.tsv\") self.dev_path = os.path.join(main_path, \"validated.tsv\") self.audios_path = os.path.join(main_path, \"clips\") def _create_path(self, audio_name): return", "df = pd.read_csv(path_label, sep=\"\\t\",header=None,names=[\"audio_code\", \"text\"]) df = df.assign(**{\"type\":type_, \"file\":df.audio_code.apply(lambda x: self._create_path(path_type,x)) }) return", "from glob import glob from abc import ABC, abstractmethod class DataBase(ABC): @abstractmethod def", "\"audio\", match.group(1), match.group(2), \"\".join([audio_code, self.ext])) def _parse_type(self, path_type:str, type_:str) -> pd.DataFrame: path_label =", "x: self._create_path(path_type,x)) }) return df.filter([\"file\", \"text\", \"type\"]) def make_tidy(self): pass def parse_data(self) ->", "def parse_data(self) -> pd.DataFrame: df_train = self._parse_type(self.train_path, \"train\") df_test = self._parse_type(self.test_path, \"test\") df_dev", "parse_data(self) -> pd.DataFrame: pass class MLS(DataBase): ext = \".flac\" basename = \"multi_speech_librespeech\" def", "df_test = self._parse_type(self.test_path, \"test\") df_dev = self._parse_type(self.dev_path, \"dev\") return pd.concat([df_train, df_test, df_dev], ignore_index=True).assign(base=self.basename)", "df_train = self._parse_type(self.train_path, \"train\") df_test = self._parse_type(self.test_path, \"test\") df_dev = self._parse_type(self.dev_path, \"dev\") return", "class MLS(DataBase): ext = \".flac\" basename = \"multi_speech_librespeech\" def __init__(self, data_train_dir, data_test_dir, data_dev_dir):", "pd.DataFrame: pass class MLS(DataBase): ext = \".flac\" basename = \"multi_speech_librespeech\" def __init__(self, data_train_dir,", "_create_path(self, path_type:str, audio_code:str): match = re.search(\"(\\d+)_(\\d+)_(\\d+)\",audio_code) return os.path.join(path_type, \"audio\", match.group(1), match.group(2), \"\".join([audio_code, self.ext]))", "\"multi_speech_librespeech\" def __init__(self, data_train_dir, data_test_dir, data_dev_dir): self.train_path = data_train_dir self.test_path = data_test_dir self.dev_path", "self.ext])) def _parse_type(self, path_type:str, type_:str) -> pd.DataFrame: path_label = os.path.join(path_type, \"transcripts.txt\") df =", "audio_name) def _parse_type(self, df_path, type_): df = pd.read_csv(df_path, sep = \"\\t\") return (df.assign(**{\"type\":type_,", "= self._parse_type(self.train_path, \"train\") df_test = self._parse_type(self.test_path,\"test\") df_dev = self._parse_type(self.dev_path,\"dev\") return pd.concat([df_train, df_test, df_dev],", "sep=\"\\t\",header=None,names=[\"audio_code\", \"text\"]) df = df.assign(**{\"type\":type_, \"file\":df.audio_code.apply(lambda x: self._create_path(path_type,x)) }) return df.filter([\"file\", \"text\", \"type\"])", "pd.DataFrame: df_train = self._parse_type(self.train_path, \"train\") df_test = self._parse_type(self.test_path, \"test\") df_dev = self._parse_type(self.dev_path, \"dev\")", "= self._parse_type(self.train_path, \"train\") df_test = self._parse_type(self.test_path, \"test\") df_dev = self._parse_type(self.dev_path, \"dev\") return pd.concat([df_train,", "DataBase(ABC): @abstractmethod def make_tidy(self): pass @abstractmethod def parse_data(self) -> pd.DataFrame: pass class MLS(DataBase):", "os.path.join(main_path, \"clips\") def _create_path(self, audio_name): return os.path.join(self.audios_path, audio_name) def _parse_type(self, df_path, type_): df", "make_tidy(self): pass def parse_data(self) -> pd.DataFrame: df_train = self._parse_type(self.train_path, \"train\") df_test = self._parse_type(self.test_path,\"test\")", "ABC, abstractmethod class DataBase(ABC): @abstractmethod def make_tidy(self): pass @abstractmethod def parse_data(self) -> pd.DataFrame:", "class DataBase(ABC): @abstractmethod def make_tidy(self): pass @abstractmethod def parse_data(self) -> pd.DataFrame: pass class", "self._parse_type(self.train_path, \"train\") df_test = self._parse_type(self.test_path, \"test\") df_dev = self._parse_type(self.dev_path, \"dev\") return pd.concat([df_train, df_test,", "return (df.assign(**{\"type\":type_, \"file\":df[\"path\"].apply(self._create_path)}) .rename(columns={\"sentence\":\"text\"}) .filter([\"file\", \"text\", \"type\"])) def make_tidy(self): pass def parse_data(self) ->", "os.path.join(path_type, \"transcripts.txt\") df = pd.read_csv(path_label, sep=\"\\t\",header=None,names=[\"audio_code\", \"text\"]) df = df.assign(**{\"type\":type_, \"file\":df.audio_code.apply(lambda x: self._create_path(path_type,x))", "def _parse_type(self, path_type:str, type_:str) -> pd.DataFrame: path_label = os.path.join(path_type, \"transcripts.txt\") df = pd.read_csv(path_label,", "\"text\", \"type\"])) def make_tidy(self): pass def parse_data(self) -> pd.DataFrame: df_train = self._parse_type(self.train_path, \"train\")", "self._parse_type(self.test_path, \"test\") df_dev = self._parse_type(self.dev_path, \"dev\") return pd.concat([df_train, df_test, df_dev], ignore_index=True).assign(base=self.basename) class CommonVoice(DataBase):", "pass def parse_data(self) -> pd.DataFrame: df_train = self._parse_type(self.train_path, \"train\") df_test = self._parse_type(self.test_path,\"test\") df_dev", "parse_data(self) -> pd.DataFrame: df_train = self._parse_type(self.train_path, \"train\") df_test = self._parse_type(self.test_path, \"test\") df_dev =", "\"dev\") return pd.concat([df_train, df_test, df_dev], ignore_index=True).assign(base=self.basename) class CommonVoice(DataBase): ext = \".mp3\" basename =", "path_label = os.path.join(path_type, \"transcripts.txt\") df = pd.read_csv(path_label, sep=\"\\t\",header=None,names=[\"audio_code\", \"text\"]) df = df.assign(**{\"type\":type_, \"file\":df.audio_code.apply(lambda", "as pd from glob import glob from abc import ABC, abstractmethod class DataBase(ABC):", "= os.path.join(main_path, \"test.tsv\") self.dev_path = os.path.join(main_path, \"validated.tsv\") self.audios_path = os.path.join(main_path, \"clips\") def _create_path(self,", "audio_code:str): match = re.search(\"(\\d+)_(\\d+)_(\\d+)\",audio_code) return os.path.join(path_type, \"audio\", match.group(1), match.group(2), \"\".join([audio_code, self.ext])) def _parse_type(self,", "(df.assign(**{\"type\":type_, \"file\":df[\"path\"].apply(self._create_path)}) .rename(columns={\"sentence\":\"text\"}) .filter([\"file\", \"text\", \"type\"])) def make_tidy(self): pass def parse_data(self) -> pd.DataFrame:", "= os.path.join(main_path, \"clips\") def _create_path(self, audio_name): return os.path.join(self.audios_path, audio_name) def _parse_type(self, df_path, type_):", "df = pd.read_csv(df_path, sep = \"\\t\") return (df.assign(**{\"type\":type_, \"file\":df[\"path\"].apply(self._create_path)}) .rename(columns={\"sentence\":\"text\"}) .filter([\"file\", \"text\", \"type\"]))", "self.test_path = os.path.join(main_path, \"test.tsv\") self.dev_path = os.path.join(main_path, \"validated.tsv\") self.audios_path = os.path.join(main_path, \"clips\") def", "re import os import json import pandas as pd from glob import glob", "self.train_path = data_train_dir self.test_path = data_test_dir self.dev_path = data_dev_dir def _create_path(self, path_type:str, audio_code:str):", "\"validated.tsv\") self.audios_path = os.path.join(main_path, \"clips\") def _create_path(self, audio_name): return os.path.join(self.audios_path, audio_name) def _parse_type(self,", "= \".flac\" basename = \"multi_speech_librespeech\" def __init__(self, data_train_dir, data_test_dir, data_dev_dir): self.train_path = data_train_dir", "ignore_index=True).assign(base=self.basename) class CommonVoice(DataBase): ext = \".mp3\" basename = \"common_voice\" def __init__(self, main_path): self.train_path", "return pd.concat([df_train, df_test, df_dev], ignore_index=True).assign(base=self.basename) class CommonVoice(DataBase): ext = \".mp3\" basename = \"common_voice\"", "= pd.read_csv(path_label, sep=\"\\t\",header=None,names=[\"audio_code\", \"text\"]) df = df.assign(**{\"type\":type_, \"file\":df.audio_code.apply(lambda x: self._create_path(path_type,x)) }) return df.filter([\"file\",", "= self._parse_type(self.dev_path, \"dev\") return pd.concat([df_train, df_test, df_dev], ignore_index=True).assign(base=self.basename) class CommonVoice(DataBase): ext = \".mp3\"", "\"text\"]) df = df.assign(**{\"type\":type_, \"file\":df.audio_code.apply(lambda x: self._create_path(path_type,x)) }) return df.filter([\"file\", \"text\", \"type\"]) def", "}) return df.filter([\"file\", \"text\", \"type\"]) def make_tidy(self): pass def parse_data(self) -> pd.DataFrame: df_train", "path_type:str, type_:str) -> pd.DataFrame: path_label = os.path.join(path_type, \"transcripts.txt\") df = pd.read_csv(path_label, sep=\"\\t\",header=None,names=[\"audio_code\", \"text\"])", "df.assign(**{\"type\":type_, \"file\":df.audio_code.apply(lambda x: self._create_path(path_type,x)) }) return df.filter([\"file\", \"text\", \"type\"]) def make_tidy(self): pass def", "-> pd.DataFrame: df_train = self._parse_type(self.train_path, \"train\") df_test = self._parse_type(self.test_path,\"test\") df_dev = self._parse_type(self.dev_path,\"dev\") return", "= \".mp3\" basename = \"common_voice\" def __init__(self, main_path): self.train_path = os.path.join(main_path, \"train.tsv\") self.test_path", "pass @abstractmethod def parse_data(self) -> pd.DataFrame: pass class MLS(DataBase): ext = \".flac\" basename", "df_dev], ignore_index=True).assign(base=self.basename) class CommonVoice(DataBase): ext = \".mp3\" basename = \"common_voice\" def __init__(self, main_path):", "\"file\":df.audio_code.apply(lambda x: self._create_path(path_type,x)) }) return df.filter([\"file\", \"text\", \"type\"]) def make_tidy(self): pass def parse_data(self)", "pandas as pd from glob import glob from abc import ABC, abstractmethod class", "basename = \"multi_speech_librespeech\" def __init__(self, data_train_dir, data_test_dir, data_dev_dir): self.train_path = data_train_dir self.test_path =", "@abstractmethod def parse_data(self) -> pd.DataFrame: pass class MLS(DataBase): ext = \".flac\" basename =", "class CommonVoice(DataBase): ext = \".mp3\" basename = \"common_voice\" def __init__(self, main_path): self.train_path =", "\"clips\") def _create_path(self, audio_name): return os.path.join(self.audios_path, audio_name) def _parse_type(self, df_path, type_): df =", "def parse_data(self) -> pd.DataFrame: df_train = self._parse_type(self.train_path, \"train\") df_test = self._parse_type(self.test_path,\"test\") df_dev =", "import os import json import pandas as pd from glob import glob from", "self.train_path = os.path.join(main_path, \"train.tsv\") self.test_path = os.path.join(main_path, \"test.tsv\") self.dev_path = os.path.join(main_path, \"validated.tsv\") self.audios_path", "\"train\") df_test = self._parse_type(self.test_path, \"test\") df_dev = self._parse_type(self.dev_path, \"dev\") return pd.concat([df_train, df_test, df_dev],", "os.path.join(main_path, \"validated.tsv\") self.audios_path = os.path.join(main_path, \"clips\") def _create_path(self, audio_name): return os.path.join(self.audios_path, audio_name) def", "import glob from abc import ABC, abstractmethod class DataBase(ABC): @abstractmethod def make_tidy(self): pass", "= \"common_voice\" def __init__(self, main_path): self.train_path = os.path.join(main_path, \"train.tsv\") self.test_path = os.path.join(main_path, \"test.tsv\")", "self._create_path(path_type,x)) }) return df.filter([\"file\", \"text\", \"type\"]) def make_tidy(self): pass def parse_data(self) -> pd.DataFrame:", "= data_test_dir self.dev_path = data_dev_dir def _create_path(self, path_type:str, audio_code:str): match = re.search(\"(\\d+)_(\\d+)_(\\d+)\",audio_code) return", "sep = \"\\t\") return (df.assign(**{\"type\":type_, \"file\":df[\"path\"].apply(self._create_path)}) .rename(columns={\"sentence\":\"text\"}) .filter([\"file\", \"text\", \"type\"])) def make_tidy(self): pass", "data_train_dir self.test_path = data_test_dir self.dev_path = data_dev_dir def _create_path(self, path_type:str, audio_code:str): match =", "main_path): self.train_path = os.path.join(main_path, \"train.tsv\") self.test_path = os.path.join(main_path, \"test.tsv\") self.dev_path = os.path.join(main_path, \"validated.tsv\")", "MLS(DataBase): ext = \".flac\" basename = \"multi_speech_librespeech\" def __init__(self, data_train_dir, data_test_dir, data_dev_dir): self.train_path", "from abc import ABC, abstractmethod class DataBase(ABC): @abstractmethod def make_tidy(self): pass @abstractmethod def", "\"test\") df_dev = self._parse_type(self.dev_path, \"dev\") return pd.concat([df_train, df_test, df_dev], ignore_index=True).assign(base=self.basename) class CommonVoice(DataBase): ext", "self.test_path = data_test_dir self.dev_path = data_dev_dir def _create_path(self, path_type:str, audio_code:str): match = re.search(\"(\\d+)_(\\d+)_(\\d+)\",audio_code)", "return os.path.join(self.audios_path, audio_name) def _parse_type(self, df_path, type_): df = pd.read_csv(df_path, sep = \"\\t\")", "pd from glob import glob from abc import ABC, abstractmethod class DataBase(ABC): @abstractmethod", "ext = \".flac\" basename = \"multi_speech_librespeech\" def __init__(self, data_train_dir, data_test_dir, data_dev_dir): self.train_path =", "\"text\", \"type\"]) def make_tidy(self): pass def parse_data(self) -> pd.DataFrame: df_train = self._parse_type(self.train_path, \"train\")", "data_test_dir, data_dev_dir): self.train_path = data_train_dir self.test_path = data_test_dir self.dev_path = data_dev_dir def _create_path(self,", "\".mp3\" basename = \"common_voice\" def __init__(self, main_path): self.train_path = os.path.join(main_path, \"train.tsv\") self.test_path =", "def _create_path(self, audio_name): return os.path.join(self.audios_path, audio_name) def _parse_type(self, df_path, type_): df = pd.read_csv(df_path,", "= pd.read_csv(df_path, sep = \"\\t\") return (df.assign(**{\"type\":type_, \"file\":df[\"path\"].apply(self._create_path)}) .rename(columns={\"sentence\":\"text\"}) .filter([\"file\", \"text\", \"type\"])) def", "\"type\"]) def make_tidy(self): pass def parse_data(self) -> pd.DataFrame: df_train = self._parse_type(self.train_path, \"train\") df_test", "def __init__(self, data_train_dir, data_test_dir, data_dev_dir): self.train_path = data_train_dir self.test_path = data_test_dir self.dev_path =", "import ABC, abstractmethod class DataBase(ABC): @abstractmethod def make_tidy(self): pass @abstractmethod def parse_data(self) ->", "import re import os import json import pandas as pd from glob import", "data_dev_dir): self.train_path = data_train_dir self.test_path = data_test_dir self.dev_path = data_dev_dir def _create_path(self, path_type:str,", "= os.path.join(main_path, \"train.tsv\") self.test_path = os.path.join(main_path, \"test.tsv\") self.dev_path = os.path.join(main_path, \"validated.tsv\") self.audios_path =", "df_train = self._parse_type(self.train_path, \"train\") df_test = self._parse_type(self.test_path,\"test\") df_dev = self._parse_type(self.dev_path,\"dev\") return pd.concat([df_train, df_test,", "_create_path(self, audio_name): return os.path.join(self.audios_path, audio_name) def _parse_type(self, df_path, type_): df = pd.read_csv(df_path, sep", "df_path, type_): df = pd.read_csv(df_path, sep = \"\\t\") return (df.assign(**{\"type\":type_, \"file\":df[\"path\"].apply(self._create_path)}) .rename(columns={\"sentence\":\"text\"}) .filter([\"file\",", "_parse_type(self, path_type:str, type_:str) -> pd.DataFrame: path_label = os.path.join(path_type, \"transcripts.txt\") df = pd.read_csv(path_label, sep=\"\\t\",header=None,names=[\"audio_code\",", "pd.DataFrame: df_train = self._parse_type(self.train_path, \"train\") df_test = self._parse_type(self.test_path,\"test\") df_dev = self._parse_type(self.dev_path,\"dev\") return pd.concat([df_train,", "self._parse_type(self.dev_path, \"dev\") return pd.concat([df_train, df_test, df_dev], ignore_index=True).assign(base=self.basename) class CommonVoice(DataBase): ext = \".mp3\" basename", "\"file\":df[\"path\"].apply(self._create_path)}) .rename(columns={\"sentence\":\"text\"}) .filter([\"file\", \"text\", \"type\"])) def make_tidy(self): pass def parse_data(self) -> pd.DataFrame: df_train" ]
[ "pillow') FONT = ('/usr/share/fonts/truetype/freefont/FreeSansBold.ttf', 12) unicornhathd.rotation(270) unicornhathd.brightness(0.8) width, height = unicornhathd.get_shape() text_x =", "font_file, font_size = FONT font = ImageFont.truetype(font_file, font_size) text_width, text_height = width, 0", "unicornhathd.brightness(0.8) width, height = unicornhathd.get_shape() text_x = width text_y = height font_file, font_size", "unicornhathd.get_shape() text_x = width text_y = height font_file, font_size = FONT font =", "pillow module\\nInstall with: sudo pip install pillow') FONT = ('/usr/share/fonts/truetype/freefont/FreeSansBold.ttf', 12) unicornhathd.rotation(270) unicornhathd.brightness(0.8)", "time from sys import exit try: from PIL import Image, ImageDraw, ImageFont except", "#!/usr/bin/env python import colorsys import time from sys import exit try: from PIL", "height = unicornhathd.get_shape() text_x = width text_y = height font_file, font_size = FONT", "with: sudo pip install pillow') FONT = ('/usr/share/fonts/truetype/freefont/FreeSansBold.ttf', 12) unicornhathd.rotation(270) unicornhathd.brightness(0.8) width, height", "script requires the pillow module\\nInstall with: sudo pip install pillow') FONT = ('/usr/share/fonts/truetype/freefont/FreeSansBold.ttf',", "the pillow module\\nInstall with: sudo pip install pillow') FONT = ('/usr/share/fonts/truetype/freefont/FreeSansBold.ttf', 12) unicornhathd.rotation(270)", "text_y = height font_file, font_size = FONT font = ImageFont.truetype(font_file, font_size) text_width, text_height", "width, height = unicornhathd.get_shape() text_x = width text_y = height font_file, font_size =", "= height font_file, font_size = FONT font = ImageFont.truetype(font_file, font_size) text_width, text_height =", "install pillow') FONT = ('/usr/share/fonts/truetype/freefont/FreeSansBold.ttf', 12) unicornhathd.rotation(270) unicornhathd.brightness(0.8) width, height = unicornhathd.get_shape() text_x", "import Image, ImageDraw, ImageFont except ImportError: exit('This script requires the pillow module\\nInstall with:", "from PIL import Image, ImageDraw, ImageFont except ImportError: exit('This script requires the pillow", "sudo pip install pillow') FONT = ('/usr/share/fonts/truetype/freefont/FreeSansBold.ttf', 12) unicornhathd.rotation(270) unicornhathd.brightness(0.8) width, height =", "('/usr/share/fonts/truetype/freefont/FreeSansBold.ttf', 12) unicornhathd.rotation(270) unicornhathd.brightness(0.8) width, height = unicornhathd.get_shape() text_x = width text_y =", "FONT font = ImageFont.truetype(font_file, font_size) text_width, text_height = width, 0 except KeyboardInterrupt: unicornhathd.off()", "12) unicornhathd.rotation(270) unicornhathd.brightness(0.8) width, height = unicornhathd.get_shape() text_x = width text_y = height", "import colorsys import time from sys import exit try: from PIL import Image,", "sys import exit try: from PIL import Image, ImageDraw, ImageFont except ImportError: exit('This", "width text_y = height font_file, font_size = FONT font = ImageFont.truetype(font_file, font_size) text_width,", "PIL import Image, ImageDraw, ImageFont except ImportError: exit('This script requires the pillow module\\nInstall", "= FONT font = ImageFont.truetype(font_file, font_size) text_width, text_height = width, 0 except KeyboardInterrupt:", "unicornhathd.rotation(270) unicornhathd.brightness(0.8) width, height = unicornhathd.get_shape() text_x = width text_y = height font_file,", "FONT = ('/usr/share/fonts/truetype/freefont/FreeSansBold.ttf', 12) unicornhathd.rotation(270) unicornhathd.brightness(0.8) width, height = unicornhathd.get_shape() text_x = width", "font_size = FONT font = ImageFont.truetype(font_file, font_size) text_width, text_height = width, 0 except", "import time from sys import exit try: from PIL import Image, ImageDraw, ImageFont", "= width text_y = height font_file, font_size = FONT font = ImageFont.truetype(font_file, font_size)", "pip install pillow') FONT = ('/usr/share/fonts/truetype/freefont/FreeSansBold.ttf', 12) unicornhathd.rotation(270) unicornhathd.brightness(0.8) width, height = unicornhathd.get_shape()", "ImportError: exit('This script requires the pillow module\\nInstall with: sudo pip install pillow') FONT", "= ('/usr/share/fonts/truetype/freefont/FreeSansBold.ttf', 12) unicornhathd.rotation(270) unicornhathd.brightness(0.8) width, height = unicornhathd.get_shape() text_x = width text_y", "module\\nInstall with: sudo pip install pillow') FONT = ('/usr/share/fonts/truetype/freefont/FreeSansBold.ttf', 12) unicornhathd.rotation(270) unicornhathd.brightness(0.8) width,", "exit try: from PIL import Image, ImageDraw, ImageFont except ImportError: exit('This script requires", "ImageDraw, ImageFont except ImportError: exit('This script requires the pillow module\\nInstall with: sudo pip", "python import colorsys import time from sys import exit try: from PIL import", "except ImportError: exit('This script requires the pillow module\\nInstall with: sudo pip install pillow')", "text_x = width text_y = height font_file, font_size = FONT font = ImageFont.truetype(font_file,", "try: from PIL import Image, ImageDraw, ImageFont except ImportError: exit('This script requires the", "= unicornhathd.get_shape() text_x = width text_y = height font_file, font_size = FONT font", "colorsys import time from sys import exit try: from PIL import Image, ImageDraw,", "import exit try: from PIL import Image, ImageDraw, ImageFont except ImportError: exit('This script", "requires the pillow module\\nInstall with: sudo pip install pillow') FONT = ('/usr/share/fonts/truetype/freefont/FreeSansBold.ttf', 12)", "exit('This script requires the pillow module\\nInstall with: sudo pip install pillow') FONT =", "from sys import exit try: from PIL import Image, ImageDraw, ImageFont except ImportError:", "height font_file, font_size = FONT font = ImageFont.truetype(font_file, font_size) text_width, text_height = width,", "<gh_stars>0 #!/usr/bin/env python import colorsys import time from sys import exit try: from", "Image, ImageDraw, ImageFont except ImportError: exit('This script requires the pillow module\\nInstall with: sudo", "ImageFont except ImportError: exit('This script requires the pillow module\\nInstall with: sudo pip install" ]
[ "class RadioButtonPage(BasePage): RADIO_BTN_1 = (By.ID, 'radio-button-1') RADIO_BTN_2 = (By.XPATH, '/html/body/div/div[2]/input') RADIO_BTN_3 = (By.XPATH,", "btn_3.click() def click_on_btn_2(self): btn_1 = self.driver.find_element(*self.RADIO_BTN_1) btn_2 = self.driver.find_element(*self.RADIO_BTN_2) btn_3 = self.driver.find_element(*self.RADIO_BTN_3) btn_1.click()", "time.sleep(1) btn_2.click() def click_on_btn_1(self): btn_1 = self.driver.find_element(*self.RADIO_BTN_1) btn_2 = self.driver.find_element(*self.RADIO_BTN_2) btn_3 = self.driver.find_element(*self.RADIO_BTN_3)", "import BasePage import time from selenium.webdriver.common.by import By class RadioButtonPage(BasePage): RADIO_BTN_1 = (By.ID,", "def click_on_btn_1(self): btn_1 = self.driver.find_element(*self.RADIO_BTN_1) btn_2 = self.driver.find_element(*self.RADIO_BTN_2) btn_3 = self.driver.find_element(*self.RADIO_BTN_3) btn_3.click() time.sleep(1)", "= (By.XPATH, '/html/body/div/div[2]/input') RADIO_BTN_3 = (By.XPATH, '/html/body/div/div[3]/input') def click_on_btn_3(self): btn_1 = self.driver.find_element(*self.RADIO_BTN_1) btn_2", "RADIO_BTN_3 = (By.XPATH, '/html/body/div/div[3]/input') def click_on_btn_3(self): btn_1 = self.driver.find_element(*self.RADIO_BTN_1) btn_2 = self.driver.find_element(*self.RADIO_BTN_2) btn_3", "from selenium.webdriver.common.by import By class RadioButtonPage(BasePage): RADIO_BTN_1 = (By.ID, 'radio-button-1') RADIO_BTN_2 = (By.XPATH,", "them in different order \"\"\" from pages.base_page import BasePage import time from selenium.webdriver.common.by", "'/html/body/div/div[3]/input') def click_on_btn_3(self): btn_1 = self.driver.find_element(*self.RADIO_BTN_1) btn_2 = self.driver.find_element(*self.RADIO_BTN_2) btn_3 = self.driver.find_element(*self.RADIO_BTN_3) btn_2.click()", "<gh_stars>0 \"\"\" Radio button page clicking them in different order \"\"\" from pages.base_page", "def click_on_btn_3(self): btn_1 = self.driver.find_element(*self.RADIO_BTN_1) btn_2 = self.driver.find_element(*self.RADIO_BTN_2) btn_3 = self.driver.find_element(*self.RADIO_BTN_3) btn_2.click() time.sleep(1)", "btn_2.click() def click_on_btn_1(self): btn_1 = self.driver.find_element(*self.RADIO_BTN_1) btn_2 = self.driver.find_element(*self.RADIO_BTN_2) btn_3 = self.driver.find_element(*self.RADIO_BTN_3) btn_3.click()", "import time from selenium.webdriver.common.by import By class RadioButtonPage(BasePage): RADIO_BTN_1 = (By.ID, 'radio-button-1') RADIO_BTN_2", "from pages.base_page import BasePage import time from selenium.webdriver.common.by import By class RadioButtonPage(BasePage): RADIO_BTN_1", "click_on_btn_3(self): btn_1 = self.driver.find_element(*self.RADIO_BTN_1) btn_2 = self.driver.find_element(*self.RADIO_BTN_2) btn_3 = self.driver.find_element(*self.RADIO_BTN_3) btn_2.click() time.sleep(1) btn_1.click()", "= self.driver.find_element(*self.RADIO_BTN_3) btn_2.click() time.sleep(1) btn_1.click() time.sleep(1) btn_3.click() def click_on_btn_2(self): btn_1 = self.driver.find_element(*self.RADIO_BTN_1) btn_2", "= self.driver.find_element(*self.RADIO_BTN_1) btn_2 = self.driver.find_element(*self.RADIO_BTN_2) btn_3 = self.driver.find_element(*self.RADIO_BTN_3) btn_1.click() time.sleep(1) btn_3.click() time.sleep(1) btn_2.click()", "btn_3 = self.driver.find_element(*self.RADIO_BTN_3) btn_2.click() time.sleep(1) btn_1.click() time.sleep(1) btn_3.click() def click_on_btn_2(self): btn_1 = self.driver.find_element(*self.RADIO_BTN_1)", "RADIO_BTN_2 = (By.XPATH, '/html/body/div/div[2]/input') RADIO_BTN_3 = (By.XPATH, '/html/body/div/div[3]/input') def click_on_btn_3(self): btn_1 = self.driver.find_element(*self.RADIO_BTN_1)", "= self.driver.find_element(*self.RADIO_BTN_1) btn_2 = self.driver.find_element(*self.RADIO_BTN_2) btn_3 = self.driver.find_element(*self.RADIO_BTN_3) btn_3.click() time.sleep(1) btn_2.click() time.sleep(1) btn_1.click()", "self.driver.find_element(*self.RADIO_BTN_3) btn_1.click() time.sleep(1) btn_3.click() time.sleep(1) btn_2.click() def click_on_btn_1(self): btn_1 = self.driver.find_element(*self.RADIO_BTN_1) btn_2 =", "btn_1 = self.driver.find_element(*self.RADIO_BTN_1) btn_2 = self.driver.find_element(*self.RADIO_BTN_2) btn_3 = self.driver.find_element(*self.RADIO_BTN_3) btn_3.click() time.sleep(1) btn_2.click() time.sleep(1)", "in different order \"\"\" from pages.base_page import BasePage import time from selenium.webdriver.common.by import", "RADIO_BTN_1 = (By.ID, 'radio-button-1') RADIO_BTN_2 = (By.XPATH, '/html/body/div/div[2]/input') RADIO_BTN_3 = (By.XPATH, '/html/body/div/div[3]/input') def", "(By.XPATH, '/html/body/div/div[3]/input') def click_on_btn_3(self): btn_1 = self.driver.find_element(*self.RADIO_BTN_1) btn_2 = self.driver.find_element(*self.RADIO_BTN_2) btn_3 = self.driver.find_element(*self.RADIO_BTN_3)", "(By.XPATH, '/html/body/div/div[2]/input') RADIO_BTN_3 = (By.XPATH, '/html/body/div/div[3]/input') def click_on_btn_3(self): btn_1 = self.driver.find_element(*self.RADIO_BTN_1) btn_2 =", "button page clicking them in different order \"\"\" from pages.base_page import BasePage import", "self.driver.find_element(*self.RADIO_BTN_1) btn_2 = self.driver.find_element(*self.RADIO_BTN_2) btn_3 = self.driver.find_element(*self.RADIO_BTN_3) btn_1.click() time.sleep(1) btn_3.click() time.sleep(1) btn_2.click() def", "= (By.XPATH, '/html/body/div/div[3]/input') def click_on_btn_3(self): btn_1 = self.driver.find_element(*self.RADIO_BTN_1) btn_2 = self.driver.find_element(*self.RADIO_BTN_2) btn_3 =", "= (By.ID, 'radio-button-1') RADIO_BTN_2 = (By.XPATH, '/html/body/div/div[2]/input') RADIO_BTN_3 = (By.XPATH, '/html/body/div/div[3]/input') def click_on_btn_3(self):", "self.driver.find_element(*self.RADIO_BTN_2) btn_3 = self.driver.find_element(*self.RADIO_BTN_3) btn_2.click() time.sleep(1) btn_1.click() time.sleep(1) btn_3.click() def click_on_btn_2(self): btn_1 =", "RadioButtonPage(BasePage): RADIO_BTN_1 = (By.ID, 'radio-button-1') RADIO_BTN_2 = (By.XPATH, '/html/body/div/div[2]/input') RADIO_BTN_3 = (By.XPATH, '/html/body/div/div[3]/input')", "btn_3 = self.driver.find_element(*self.RADIO_BTN_3) btn_1.click() time.sleep(1) btn_3.click() time.sleep(1) btn_2.click() def click_on_btn_1(self): btn_1 = self.driver.find_element(*self.RADIO_BTN_1)", "pages.base_page import BasePage import time from selenium.webdriver.common.by import By class RadioButtonPage(BasePage): RADIO_BTN_1 =", "Radio button page clicking them in different order \"\"\" from pages.base_page import BasePage", "(By.ID, 'radio-button-1') RADIO_BTN_2 = (By.XPATH, '/html/body/div/div[2]/input') RADIO_BTN_3 = (By.XPATH, '/html/body/div/div[3]/input') def click_on_btn_3(self): btn_1", "different order \"\"\" from pages.base_page import BasePage import time from selenium.webdriver.common.by import By", "time.sleep(1) btn_1.click() time.sleep(1) btn_3.click() def click_on_btn_2(self): btn_1 = self.driver.find_element(*self.RADIO_BTN_1) btn_2 = self.driver.find_element(*self.RADIO_BTN_2) btn_3", "self.driver.find_element(*self.RADIO_BTN_3) btn_2.click() time.sleep(1) btn_1.click() time.sleep(1) btn_3.click() def click_on_btn_2(self): btn_1 = self.driver.find_element(*self.RADIO_BTN_1) btn_2 =", "time.sleep(1) btn_3.click() def click_on_btn_2(self): btn_1 = self.driver.find_element(*self.RADIO_BTN_1) btn_2 = self.driver.find_element(*self.RADIO_BTN_2) btn_3 = self.driver.find_element(*self.RADIO_BTN_3)", "= self.driver.find_element(*self.RADIO_BTN_3) btn_1.click() time.sleep(1) btn_3.click() time.sleep(1) btn_2.click() def click_on_btn_1(self): btn_1 = self.driver.find_element(*self.RADIO_BTN_1) btn_2", "btn_3.click() time.sleep(1) btn_2.click() def click_on_btn_1(self): btn_1 = self.driver.find_element(*self.RADIO_BTN_1) btn_2 = self.driver.find_element(*self.RADIO_BTN_2) btn_3 =", "self.driver.find_element(*self.RADIO_BTN_1) btn_2 = self.driver.find_element(*self.RADIO_BTN_2) btn_3 = self.driver.find_element(*self.RADIO_BTN_3) btn_2.click() time.sleep(1) btn_1.click() time.sleep(1) btn_3.click() def", "import By class RadioButtonPage(BasePage): RADIO_BTN_1 = (By.ID, 'radio-button-1') RADIO_BTN_2 = (By.XPATH, '/html/body/div/div[2]/input') RADIO_BTN_3", "btn_1 = self.driver.find_element(*self.RADIO_BTN_1) btn_2 = self.driver.find_element(*self.RADIO_BTN_2) btn_3 = self.driver.find_element(*self.RADIO_BTN_3) btn_1.click() time.sleep(1) btn_3.click() time.sleep(1)", "btn_2 = self.driver.find_element(*self.RADIO_BTN_2) btn_3 = self.driver.find_element(*self.RADIO_BTN_3) btn_1.click() time.sleep(1) btn_3.click() time.sleep(1) btn_2.click() def click_on_btn_1(self):", "self.driver.find_element(*self.RADIO_BTN_2) btn_3 = self.driver.find_element(*self.RADIO_BTN_3) btn_1.click() time.sleep(1) btn_3.click() time.sleep(1) btn_2.click() def click_on_btn_1(self): btn_1 =", "time.sleep(1) btn_3.click() time.sleep(1) btn_2.click() def click_on_btn_1(self): btn_1 = self.driver.find_element(*self.RADIO_BTN_1) btn_2 = self.driver.find_element(*self.RADIO_BTN_2) btn_3", "btn_1.click() time.sleep(1) btn_3.click() def click_on_btn_2(self): btn_1 = self.driver.find_element(*self.RADIO_BTN_1) btn_2 = self.driver.find_element(*self.RADIO_BTN_2) btn_3 =", "time from selenium.webdriver.common.by import By class RadioButtonPage(BasePage): RADIO_BTN_1 = (By.ID, 'radio-button-1') RADIO_BTN_2 =", "'/html/body/div/div[2]/input') RADIO_BTN_3 = (By.XPATH, '/html/body/div/div[3]/input') def click_on_btn_3(self): btn_1 = self.driver.find_element(*self.RADIO_BTN_1) btn_2 = self.driver.find_element(*self.RADIO_BTN_2)", "= self.driver.find_element(*self.RADIO_BTN_2) btn_3 = self.driver.find_element(*self.RADIO_BTN_3) btn_2.click() time.sleep(1) btn_1.click() time.sleep(1) btn_3.click() def click_on_btn_2(self): btn_1", "'radio-button-1') RADIO_BTN_2 = (By.XPATH, '/html/body/div/div[2]/input') RADIO_BTN_3 = (By.XPATH, '/html/body/div/div[3]/input') def click_on_btn_3(self): btn_1 =", "click_on_btn_1(self): btn_1 = self.driver.find_element(*self.RADIO_BTN_1) btn_2 = self.driver.find_element(*self.RADIO_BTN_2) btn_3 = self.driver.find_element(*self.RADIO_BTN_3) btn_3.click() time.sleep(1) btn_2.click()", "By class RadioButtonPage(BasePage): RADIO_BTN_1 = (By.ID, 'radio-button-1') RADIO_BTN_2 = (By.XPATH, '/html/body/div/div[2]/input') RADIO_BTN_3 =", "btn_2.click() time.sleep(1) btn_1.click() time.sleep(1) btn_3.click() def click_on_btn_2(self): btn_1 = self.driver.find_element(*self.RADIO_BTN_1) btn_2 = self.driver.find_element(*self.RADIO_BTN_2)", "btn_2 = self.driver.find_element(*self.RADIO_BTN_2) btn_3 = self.driver.find_element(*self.RADIO_BTN_3) btn_2.click() time.sleep(1) btn_1.click() time.sleep(1) btn_3.click() def click_on_btn_2(self):", "= self.driver.find_element(*self.RADIO_BTN_2) btn_3 = self.driver.find_element(*self.RADIO_BTN_3) btn_1.click() time.sleep(1) btn_3.click() time.sleep(1) btn_2.click() def click_on_btn_1(self): btn_1", "clicking them in different order \"\"\" from pages.base_page import BasePage import time from", "\"\"\" from pages.base_page import BasePage import time from selenium.webdriver.common.by import By class RadioButtonPage(BasePage):", "selenium.webdriver.common.by import By class RadioButtonPage(BasePage): RADIO_BTN_1 = (By.ID, 'radio-button-1') RADIO_BTN_2 = (By.XPATH, '/html/body/div/div[2]/input')", "BasePage import time from selenium.webdriver.common.by import By class RadioButtonPage(BasePage): RADIO_BTN_1 = (By.ID, 'radio-button-1')", "def click_on_btn_2(self): btn_1 = self.driver.find_element(*self.RADIO_BTN_1) btn_2 = self.driver.find_element(*self.RADIO_BTN_2) btn_3 = self.driver.find_element(*self.RADIO_BTN_3) btn_1.click() time.sleep(1)", "\"\"\" Radio button page clicking them in different order \"\"\" from pages.base_page import", "btn_1.click() time.sleep(1) btn_3.click() time.sleep(1) btn_2.click() def click_on_btn_1(self): btn_1 = self.driver.find_element(*self.RADIO_BTN_1) btn_2 = self.driver.find_element(*self.RADIO_BTN_2)", "page clicking them in different order \"\"\" from pages.base_page import BasePage import time", "order \"\"\" from pages.base_page import BasePage import time from selenium.webdriver.common.by import By class", "click_on_btn_2(self): btn_1 = self.driver.find_element(*self.RADIO_BTN_1) btn_2 = self.driver.find_element(*self.RADIO_BTN_2) btn_3 = self.driver.find_element(*self.RADIO_BTN_3) btn_1.click() time.sleep(1) btn_3.click()", "btn_1 = self.driver.find_element(*self.RADIO_BTN_1) btn_2 = self.driver.find_element(*self.RADIO_BTN_2) btn_3 = self.driver.find_element(*self.RADIO_BTN_3) btn_2.click() time.sleep(1) btn_1.click() time.sleep(1)", "= self.driver.find_element(*self.RADIO_BTN_1) btn_2 = self.driver.find_element(*self.RADIO_BTN_2) btn_3 = self.driver.find_element(*self.RADIO_BTN_3) btn_2.click() time.sleep(1) btn_1.click() time.sleep(1) btn_3.click()" ]
[ "frappe.db.sql(\"\"\" update `tabStock Ledger Entry` sle, `tab{0}` parent_doc set sle.project = parent_doc.project where", "parent_doc.name and sle.voucher_type = %s and sle.project is null and parent_doc.project is not", "Note', 'Stock Entry']: frappe.db.sql(\"\"\" update `tabStock Ledger Entry` sle, `tab{0}` parent_doc set sle.project", "'Stock Entry']: frappe.db.sql(\"\"\" update `tabStock Ledger Entry` sle, `tab{0}` parent_doc set sle.project =", "import unicode_literals import frappe def execute(): for doctype in ['Sales Invoice', 'Delivery Note',", "Entry']: frappe.db.sql(\"\"\" update `tabStock Ledger Entry` sle, `tab{0}` parent_doc set sle.project = parent_doc.project", "def execute(): for doctype in ['Sales Invoice', 'Delivery Note', 'Stock Entry']: frappe.db.sql(\"\"\" update", "in ['Sales Invoice', 'Delivery Note', 'Stock Entry']: frappe.db.sql(\"\"\" update `tabStock Ledger Entry` sle,", "License v3. See license.txt from __future__ import unicode_literals import frappe def execute(): for", "Copyright (c) 2017, Frappe and Contributors # License: GNU General Public License v3.", "# License: GNU General Public License v3. See license.txt from __future__ import unicode_literals", "set sle.project = parent_doc.project where sle.voucher_no = parent_doc.name and sle.voucher_type = %s and", "Entry` sle, `tab{0}` parent_doc set sle.project = parent_doc.project where sle.voucher_no = parent_doc.name and", "frappe def execute(): for doctype in ['Sales Invoice', 'Delivery Note', 'Stock Entry']: frappe.db.sql(\"\"\"", "Frappe and Contributors # License: GNU General Public License v3. See license.txt from", "execute(): for doctype in ['Sales Invoice', 'Delivery Note', 'Stock Entry']: frappe.db.sql(\"\"\" update `tabStock", "and Contributors # License: GNU General Public License v3. See license.txt from __future__", "license.txt from __future__ import unicode_literals import frappe def execute(): for doctype in ['Sales", "sle, `tab{0}` parent_doc set sle.project = parent_doc.project where sle.voucher_no = parent_doc.name and sle.voucher_type", "sle.project is null and parent_doc.project is not null and parent_doc.project != ''\"\"\".format(doctype), doctype)", "v3. See license.txt from __future__ import unicode_literals import frappe def execute(): for doctype", "sle.voucher_type = %s and sle.project is null and parent_doc.project is not null and", "where sle.voucher_no = parent_doc.name and sle.voucher_type = %s and sle.project is null and", "`tab{0}` parent_doc set sle.project = parent_doc.project where sle.voucher_no = parent_doc.name and sle.voucher_type =", "`tabStock Ledger Entry` sle, `tab{0}` parent_doc set sle.project = parent_doc.project where sle.voucher_no =", "and sle.voucher_type = %s and sle.project is null and parent_doc.project is not null", "# Copyright (c) 2017, Frappe and Contributors # License: GNU General Public License", "Ledger Entry` sle, `tab{0}` parent_doc set sle.project = parent_doc.project where sle.voucher_no = parent_doc.name", "2017, Frappe and Contributors # License: GNU General Public License v3. See license.txt", "['Sales Invoice', 'Delivery Note', 'Stock Entry']: frappe.db.sql(\"\"\" update `tabStock Ledger Entry` sle, `tab{0}`", "for doctype in ['Sales Invoice', 'Delivery Note', 'Stock Entry']: frappe.db.sql(\"\"\" update `tabStock Ledger", "See license.txt from __future__ import unicode_literals import frappe def execute(): for doctype in", "General Public License v3. See license.txt from __future__ import unicode_literals import frappe def", "update `tabStock Ledger Entry` sle, `tab{0}` parent_doc set sle.project = parent_doc.project where sle.voucher_no", "parent_doc set sle.project = parent_doc.project where sle.voucher_no = parent_doc.name and sle.voucher_type = %s", "= parent_doc.project where sle.voucher_no = parent_doc.name and sle.voucher_type = %s and sle.project is", "%s and sle.project is null and parent_doc.project is not null and parent_doc.project !=", "__future__ import unicode_literals import frappe def execute(): for doctype in ['Sales Invoice', 'Delivery", "'Delivery Note', 'Stock Entry']: frappe.db.sql(\"\"\" update `tabStock Ledger Entry` sle, `tab{0}` parent_doc set", "= parent_doc.name and sle.voucher_type = %s and sle.project is null and parent_doc.project is", "sle.project = parent_doc.project where sle.voucher_no = parent_doc.name and sle.voucher_type = %s and sle.project", "GNU General Public License v3. See license.txt from __future__ import unicode_literals import frappe", "Contributors # License: GNU General Public License v3. See license.txt from __future__ import", "from __future__ import unicode_literals import frappe def execute(): for doctype in ['Sales Invoice',", "doctype in ['Sales Invoice', 'Delivery Note', 'Stock Entry']: frappe.db.sql(\"\"\" update `tabStock Ledger Entry`", "(c) 2017, Frappe and Contributors # License: GNU General Public License v3. See", "unicode_literals import frappe def execute(): for doctype in ['Sales Invoice', 'Delivery Note', 'Stock", "License: GNU General Public License v3. See license.txt from __future__ import unicode_literals import", "= %s and sle.project is null and parent_doc.project is not null and parent_doc.project", "and sle.project is null and parent_doc.project is not null and parent_doc.project != ''\"\"\".format(doctype),", "Invoice', 'Delivery Note', 'Stock Entry']: frappe.db.sql(\"\"\" update `tabStock Ledger Entry` sle, `tab{0}` parent_doc", "parent_doc.project where sle.voucher_no = parent_doc.name and sle.voucher_type = %s and sle.project is null", "sle.voucher_no = parent_doc.name and sle.voucher_type = %s and sle.project is null and parent_doc.project", "import frappe def execute(): for doctype in ['Sales Invoice', 'Delivery Note', 'Stock Entry']:", "Public License v3. See license.txt from __future__ import unicode_literals import frappe def execute():" ]
[ "p3 = p*3 plt.plot(p,p2,color = 'b',ls = '-.',linewidth = 2) plt.plot(p,p3,color = 'y',ls", "plt p = np.array([0,10]) p2 = p*2 p3 = p*3 plt.plot(p,p2,color = 'b',ls", "'y',ls = '-',linewidth = 3) plt.title(\"Two line in same plot\") plt.xlabel(\"X-Axis\") plt.ylabel(\"Y-Axis\") plt.show()", "= 'y',ls = '-',linewidth = 3) plt.title(\"Two line in same plot\") plt.xlabel(\"X-Axis\") plt.ylabel(\"Y-Axis\")", "'b',ls = '-.',linewidth = 2) plt.plot(p,p3,color = 'y',ls = '-',linewidth = 3) plt.title(\"Two", "= 'b',ls = '-.',linewidth = 2) plt.plot(p,p3,color = 'y',ls = '-',linewidth = 3)", "np.array([0,10]) p2 = p*2 p3 = p*3 plt.plot(p,p2,color = 'b',ls = '-.',linewidth =", "np from matplotlib import pyplot as plt p = np.array([0,10]) p2 = p*2", "as plt p = np.array([0,10]) p2 = p*2 p3 = p*3 plt.plot(p,p2,color =", "as np from matplotlib import pyplot as plt p = np.array([0,10]) p2 =", "<reponame>abhayanigam/Learn_Python_Programming<filename>matplotlib/two_line_in_same_plot.py<gh_stars>1-10 import numpy as np from matplotlib import pyplot as plt p =", "= np.array([0,10]) p2 = p*2 p3 = p*3 plt.plot(p,p2,color = 'b',ls = '-.',linewidth", "= p*2 p3 = p*3 plt.plot(p,p2,color = 'b',ls = '-.',linewidth = 2) plt.plot(p,p3,color", "= 2) plt.plot(p,p3,color = 'y',ls = '-',linewidth = 3) plt.title(\"Two line in same", "import numpy as np from matplotlib import pyplot as plt p = np.array([0,10])", "matplotlib import pyplot as plt p = np.array([0,10]) p2 = p*2 p3 =", "p = np.array([0,10]) p2 = p*2 p3 = p*3 plt.plot(p,p2,color = 'b',ls =", "plt.plot(p,p2,color = 'b',ls = '-.',linewidth = 2) plt.plot(p,p3,color = 'y',ls = '-',linewidth =", "p*2 p3 = p*3 plt.plot(p,p2,color = 'b',ls = '-.',linewidth = 2) plt.plot(p,p3,color =", "from matplotlib import pyplot as plt p = np.array([0,10]) p2 = p*2 p3", "= '-.',linewidth = 2) plt.plot(p,p3,color = 'y',ls = '-',linewidth = 3) plt.title(\"Two line", "'-.',linewidth = 2) plt.plot(p,p3,color = 'y',ls = '-',linewidth = 3) plt.title(\"Two line in", "import pyplot as plt p = np.array([0,10]) p2 = p*2 p3 = p*3", "plt.plot(p,p3,color = 'y',ls = '-',linewidth = 3) plt.title(\"Two line in same plot\") plt.xlabel(\"X-Axis\")", "2) plt.plot(p,p3,color = 'y',ls = '-',linewidth = 3) plt.title(\"Two line in same plot\")", "= p*3 plt.plot(p,p2,color = 'b',ls = '-.',linewidth = 2) plt.plot(p,p3,color = 'y',ls =", "pyplot as plt p = np.array([0,10]) p2 = p*2 p3 = p*3 plt.plot(p,p2,color", "p2 = p*2 p3 = p*3 plt.plot(p,p2,color = 'b',ls = '-.',linewidth = 2)", "p*3 plt.plot(p,p2,color = 'b',ls = '-.',linewidth = 2) plt.plot(p,p3,color = 'y',ls = '-',linewidth", "numpy as np from matplotlib import pyplot as plt p = np.array([0,10]) p2" ]
[ "\"\"\" import sys, os class ProcessorAutodetectError(Exception): pass def autodetect(): mach = None try:", "'i86pc': 'i386', # Solaris/Intel 'x86': 'i386', # Apple 'Power Macintosh': 'ppc', }[mach] except", "'i386', # Solaris/Intel 'x86': 'i386', # Apple 'Power Macintosh': 'ppc', }[mach] except KeyError:", "not mach: platform = sys.platform.lower() if platform.startswith('win'): # assume an Intel Windows return", "raise ProcessorAutodetectError, \"cannot run 'uname -m'\" if mach == 'x86_64' and sys.maxint ==", "'i586': 'i386', 'i686': 'i386', 'i86pc': 'i386', # Solaris/Intel 'x86': 'i386', # Apple 'Power", "# assume an Intel Windows return 'i386' # assume we have 'uname' mach", "processor but in 32-bits mode, maybe try: return {'i386': 'i386', 'i486': 'i386', 'i586':", "try: import platform mach = platform.machine() except ImportError: pass if not mach: platform", "but in 32-bits mode, maybe try: return {'i386': 'i386', 'i486': 'i386', 'i586': 'i386',", "pass if not mach: platform = sys.platform.lower() if platform.startswith('win'): # assume an Intel", "sys.maxint == 2147483647: mach = 'x86' # it's a 64-bit processor but in", "assume we have 'uname' mach = os.popen('uname -m', 'r').read().strip() if not mach: raise", "'Power Macintosh': 'ppc', }[mach] except KeyError: raise ProcessorAutodetectError, \"unsupported processor '%s'\" % mach", "64-bit processor but in 32-bits mode, maybe try: return {'i386': 'i386', 'i486': 'i386',", "if mach == 'x86_64' and sys.maxint == 2147483647: mach = 'x86' # it's", "\"cannot run 'uname -m'\" if mach == 'x86_64' and sys.maxint == 2147483647: mach", "'x86' # it's a 64-bit processor but in 32-bits mode, maybe try: return", "autodetect(): mach = None try: import platform mach = platform.machine() except ImportError: pass", "except ImportError: pass if not mach: platform = sys.platform.lower() if platform.startswith('win'): # assume", "Apple 'Power Macintosh': 'ppc', }[mach] except KeyError: raise ProcessorAutodetectError, \"unsupported processor '%s'\" %", "None try: import platform mach = platform.machine() except ImportError: pass if not mach:", "have 'uname' mach = os.popen('uname -m', 'r').read().strip() if not mach: raise ProcessorAutodetectError, \"cannot", "== 'x86_64' and sys.maxint == 2147483647: mach = 'x86' # it's a 64-bit", "-m'\" if mach == 'x86_64' and sys.maxint == 2147483647: mach = 'x86' #", "'i486': 'i386', 'i586': 'i386', 'i686': 'i386', 'i86pc': 'i386', # Solaris/Intel 'x86': 'i386', #", "sys.platform.lower() if platform.startswith('win'): # assume an Intel Windows return 'i386' # assume we", "if not mach: raise ProcessorAutodetectError, \"cannot run 'uname -m'\" if mach == 'x86_64'", "= None try: import platform mach = platform.machine() except ImportError: pass if not", "'i386', 'i486': 'i386', 'i586': 'i386', 'i686': 'i386', 'i86pc': 'i386', # Solaris/Intel 'x86': 'i386',", "class ProcessorAutodetectError(Exception): pass def autodetect(): mach = None try: import platform mach =", "not mach: raise ProcessorAutodetectError, \"cannot run 'uname -m'\" if mach == 'x86_64' and", "# assume we have 'uname' mach = os.popen('uname -m', 'r').read().strip() if not mach:", "mach = None try: import platform mach = platform.machine() except ImportError: pass if", "assume an Intel Windows return 'i386' # assume we have 'uname' mach =", "Windows return 'i386' # assume we have 'uname' mach = os.popen('uname -m', 'r').read().strip()", "'i386' # assume we have 'uname' mach = os.popen('uname -m', 'r').read().strip() if not", "if not mach: platform = sys.platform.lower() if platform.startswith('win'): # assume an Intel Windows", "# Apple 'Power Macintosh': 'ppc', }[mach] except KeyError: raise ProcessorAutodetectError, \"unsupported processor '%s'\"", "platform = sys.platform.lower() if platform.startswith('win'): # assume an Intel Windows return 'i386' #", "we have 'uname' mach = os.popen('uname -m', 'r').read().strip() if not mach: raise ProcessorAutodetectError,", "ProcessorAutodetectError, \"cannot run 'uname -m'\" if mach == 'x86_64' and sys.maxint == 2147483647:", "if platform.startswith('win'): # assume an Intel Windows return 'i386' # assume we have", "mach = platform.machine() except ImportError: pass if not mach: platform = sys.platform.lower() if", "= sys.platform.lower() if platform.startswith('win'): # assume an Intel Windows return 'i386' # assume", "-m', 'r').read().strip() if not mach: raise ProcessorAutodetectError, \"cannot run 'uname -m'\" if mach", "'uname' mach = os.popen('uname -m', 'r').read().strip() if not mach: raise ProcessorAutodetectError, \"cannot run", "mach: platform = sys.platform.lower() if platform.startswith('win'): # assume an Intel Windows return 'i386'", "'x86': 'i386', # Apple 'Power Macintosh': 'ppc', }[mach] except KeyError: raise ProcessorAutodetectError, \"unsupported", "'i386', 'i586': 'i386', 'i686': 'i386', 'i86pc': 'i386', # Solaris/Intel 'x86': 'i386', # Apple", "os.popen('uname -m', 'r').read().strip() if not mach: raise ProcessorAutodetectError, \"cannot run 'uname -m'\" if", "an Intel Windows return 'i386' # assume we have 'uname' mach = os.popen('uname", "maybe try: return {'i386': 'i386', 'i486': 'i386', 'i586': 'i386', 'i686': 'i386', 'i86pc': 'i386',", "# it's a 64-bit processor but in 32-bits mode, maybe try: return {'i386':", "mach: raise ProcessorAutodetectError, \"cannot run 'uname -m'\" if mach == 'x86_64' and sys.maxint", "Solaris/Intel 'x86': 'i386', # Apple 'Power Macintosh': 'ppc', }[mach] except KeyError: raise ProcessorAutodetectError,", "it's a 64-bit processor but in 32-bits mode, maybe try: return {'i386': 'i386',", "platform mach = platform.machine() except ImportError: pass if not mach: platform = sys.platform.lower()", "mach = 'x86' # it's a 64-bit processor but in 32-bits mode, maybe", "run 'uname -m'\" if mach == 'x86_64' and sys.maxint == 2147483647: mach =", "32-bits mode, maybe try: return {'i386': 'i386', 'i486': 'i386', 'i586': 'i386', 'i686': 'i386',", "'r').read().strip() if not mach: raise ProcessorAutodetectError, \"cannot run 'uname -m'\" if mach ==", "os class ProcessorAutodetectError(Exception): pass def autodetect(): mach = None try: import platform mach", "'i386', # Apple 'Power Macintosh': 'ppc', }[mach] except KeyError: raise ProcessorAutodetectError, \"unsupported processor", "Processor auto-detection \"\"\" import sys, os class ProcessorAutodetectError(Exception): pass def autodetect(): mach =", "def autodetect(): mach = None try: import platform mach = platform.machine() except ImportError:", "in 32-bits mode, maybe try: return {'i386': 'i386', 'i486': 'i386', 'i586': 'i386', 'i686':", "# Solaris/Intel 'x86': 'i386', # Apple 'Power Macintosh': 'ppc', }[mach] except KeyError: raise", "ImportError: pass if not mach: platform = sys.platform.lower() if platform.startswith('win'): # assume an", "2147483647: mach = 'x86' # it's a 64-bit processor but in 32-bits mode,", "pass def autodetect(): mach = None try: import platform mach = platform.machine() except", "'uname -m'\" if mach == 'x86_64' and sys.maxint == 2147483647: mach = 'x86'", "return 'i386' # assume we have 'uname' mach = os.popen('uname -m', 'r').read().strip() if", "a 64-bit processor but in 32-bits mode, maybe try: return {'i386': 'i386', 'i486':", "= os.popen('uname -m', 'r').read().strip() if not mach: raise ProcessorAutodetectError, \"cannot run 'uname -m'\"", "<filename>pypy/jit/codegen/detect_cpu.py \"\"\" Processor auto-detection \"\"\" import sys, os class ProcessorAutodetectError(Exception): pass def autodetect():", "auto-detection \"\"\" import sys, os class ProcessorAutodetectError(Exception): pass def autodetect(): mach = None", "platform.machine() except ImportError: pass if not mach: platform = sys.platform.lower() if platform.startswith('win'): #", "and sys.maxint == 2147483647: mach = 'x86' # it's a 64-bit processor but", "'i686': 'i386', 'i86pc': 'i386', # Solaris/Intel 'x86': 'i386', # Apple 'Power Macintosh': 'ppc',", "try: return {'i386': 'i386', 'i486': 'i386', 'i586': 'i386', 'i686': 'i386', 'i86pc': 'i386', #", "{'i386': 'i386', 'i486': 'i386', 'i586': 'i386', 'i686': 'i386', 'i86pc': 'i386', # Solaris/Intel 'x86':", "mach == 'x86_64' and sys.maxint == 2147483647: mach = 'x86' # it's a", "mach = os.popen('uname -m', 'r').read().strip() if not mach: raise ProcessorAutodetectError, \"cannot run 'uname", "== 2147483647: mach = 'x86' # it's a 64-bit processor but in 32-bits", "= platform.machine() except ImportError: pass if not mach: platform = sys.platform.lower() if platform.startswith('win'):", "'i386', 'i86pc': 'i386', # Solaris/Intel 'x86': 'i386', # Apple 'Power Macintosh': 'ppc', }[mach]", "platform.startswith('win'): # assume an Intel Windows return 'i386' # assume we have 'uname'", "ProcessorAutodetectError(Exception): pass def autodetect(): mach = None try: import platform mach = platform.machine()", "'i386', 'i686': 'i386', 'i86pc': 'i386', # Solaris/Intel 'x86': 'i386', # Apple 'Power Macintosh':", "= 'x86' # it's a 64-bit processor but in 32-bits mode, maybe try:", "\"\"\" Processor auto-detection \"\"\" import sys, os class ProcessorAutodetectError(Exception): pass def autodetect(): mach", "'x86_64' and sys.maxint == 2147483647: mach = 'x86' # it's a 64-bit processor", "return {'i386': 'i386', 'i486': 'i386', 'i586': 'i386', 'i686': 'i386', 'i86pc': 'i386', # Solaris/Intel", "import sys, os class ProcessorAutodetectError(Exception): pass def autodetect(): mach = None try: import", "mode, maybe try: return {'i386': 'i386', 'i486': 'i386', 'i586': 'i386', 'i686': 'i386', 'i86pc':", "sys, os class ProcessorAutodetectError(Exception): pass def autodetect(): mach = None try: import platform", "import platform mach = platform.machine() except ImportError: pass if not mach: platform =", "Intel Windows return 'i386' # assume we have 'uname' mach = os.popen('uname -m'," ]
[ "7 class Style_Strikethrough(Style): def __init__(self): super() self.code = 9 class Colour(Style): def __init__(self):", "= 4 class Style_Invert(Style): def __init__(self): super() self.code = 7 class Style_Strikethrough(Style): def", "__init__(self): super() self.code = 32 class Colour_DarkYellow(Colour): def __init__(self): super() self.code = 33", "= 0 def _render(self): return \"{}[{}m\".format(term.ESCAPE_CHARACTER, self.code) class Style_Reset(Style): def __init__(self): super() self.code", "def __init__(self): super() def _render(self, isForeground = True): if isForeground: return \"{}[{}m\".format(term.ESCAPE_CHARACTER, self.code)", "<filename>vtk/styles.py import vtk.term as term class Style: def __init__(self): self.code = 0 def", "def _render(self): return \"{}[{}m\".format(term.ESCAPE_CHARACTER, self.code) class Style_Reset(Style): def __init__(self): super() self.code = 0", "10) class Colour_Transparent(Colour): def _render(self, isForeground = True): return \"\" class Colour_Black(Colour): def", "0 def _render(self): return \"{}[{}m\".format(term.ESCAPE_CHARACTER, self.code) class Style_Reset(Style): def __init__(self): super() self.code =", "class Colour_Black(Colour): def __init__(self): super() self.code = 30 class Colour_DarkRed(Colour): def __init__(self): super()", "class Colour_Grey(Colour): def __init__(self): super() self.code = 37 class Colour_DarkGrey(Colour): def __init__(self): super()", "class Colour_Blue(Colour): def __init__(self): super() self.code = 94 class Colour_Magenta(Colour): def __init__(self): super()", "= 35 class Colour_DarkCyan(Colour): def __init__(self): super() self.code = 36 class Colour_Grey(Colour): def", "isForeground = True): if isForeground: return \"{}[{}m\".format(term.ESCAPE_CHARACTER, self.code) else: return \"{}[{}m\".format(term.ESCAPE_CHARACTER, self.code +", "return \"\" class Colour_Black(Colour): def __init__(self): super() self.code = 30 class Colour_DarkRed(Colour): def", "super() self.code = 34 class Colour_DarkMagenta(Colour): def __init__(self): super() self.code = 35 class", "9 class Colour(Style): def __init__(self): super() def _render(self, isForeground = True): if isForeground:", "Colour_DarkGrey(Colour): def __init__(self): super() self.code = 90 class Colour_Red(Colour): def __init__(self): super() self.code", "self.code = 91 class Colour_Green(Colour): def __init__(self): super() self.code = 92 class Colour_Yellow(Colour):", "Style_Bold(Style): def __init__(self): super() self.code = 1 class Style_Faint(Style): def __init__(self): super() self.code", "def __init__(self): super() self.code = 36 class Colour_Grey(Colour): def __init__(self): super() self.code =", "super() self.code = 36 class Colour_Grey(Colour): def __init__(self): super() self.code = 37 class", "super() self.code = 35 class Colour_DarkCyan(Colour): def __init__(self): super() self.code = 36 class", "Style_Faint(Style): def __init__(self): super() self.code = 2 class Style_Italic(Style): def __init__(self): super() self.code", "import vtk.term as term class Style: def __init__(self): self.code = 0 def _render(self):", "super() self.code = 91 class Colour_Green(Colour): def __init__(self): super() self.code = 92 class", "self.code = 34 class Colour_DarkMagenta(Colour): def __init__(self): super() self.code = 35 class Colour_DarkCyan(Colour):", "36 class Colour_Grey(Colour): def __init__(self): super() self.code = 37 class Colour_DarkGrey(Colour): def __init__(self):", "= 91 class Colour_Green(Colour): def __init__(self): super() self.code = 92 class Colour_Yellow(Colour): def", "super() self.code = 0 class Style_Bold(Style): def __init__(self): super() self.code = 1 class", "def __init__(self): super() self.code = 3 class Style_Underline(Style): def __init__(self): super() self.code =", "Colour_DarkMagenta(Colour): def __init__(self): super() self.code = 35 class Colour_DarkCyan(Colour): def __init__(self): super() self.code", "= 2 class Style_Italic(Style): def __init__(self): super() self.code = 3 class Style_Underline(Style): def", "def __init__(self): super() self.code = 1 class Style_Faint(Style): def __init__(self): super() self.code =", "= 7 class Style_Strikethrough(Style): def __init__(self): super() self.code = 9 class Colour(Style): def", "Colour_DarkYellow(Colour): def __init__(self): super() self.code = 33 class Colour_DarkBlue(Colour): def __init__(self): super() self.code", "def __init__(self): super() self.code = 34 class Colour_DarkMagenta(Colour): def __init__(self): super() self.code =", "self.code = 2 class Style_Italic(Style): def __init__(self): super() self.code = 3 class Style_Underline(Style):", "True): if isForeground: return \"{}[{}m\".format(term.ESCAPE_CHARACTER, self.code) else: return \"{}[{}m\".format(term.ESCAPE_CHARACTER, self.code + 10) class", "def _render(self, isForeground = True): if isForeground: return \"{}[{}m\".format(term.ESCAPE_CHARACTER, self.code) else: return \"{}[{}m\".format(term.ESCAPE_CHARACTER,", "self.code = 4 class Style_Invert(Style): def __init__(self): super() self.code = 7 class Style_Strikethrough(Style):", "= 36 class Colour_Grey(Colour): def __init__(self): super() self.code = 37 class Colour_DarkGrey(Colour): def", "Colour_DarkGreen(Colour): def __init__(self): super() self.code = 32 class Colour_DarkYellow(Colour): def __init__(self): super() self.code", "super() self.code = 95 class Colour_Cyan(Colour): def __init__(self): super() self.code = 96 class", "= 31 class Colour_DarkGreen(Colour): def __init__(self): super() self.code = 32 class Colour_DarkYellow(Colour): def", "Style_Strikethrough(Style): def __init__(self): super() self.code = 9 class Colour(Style): def __init__(self): super() def", "__init__(self): super() self.code = 7 class Style_Strikethrough(Style): def __init__(self): super() self.code = 9", "self.code = 92 class Colour_Yellow(Colour): def __init__(self): super() self.code = 93 class Colour_Blue(Colour):", "\"{}[{}m\".format(term.ESCAPE_CHARACTER, self.code + 10) class Colour_Transparent(Colour): def _render(self, isForeground = True): return \"\"", "class Style_Invert(Style): def __init__(self): super() self.code = 7 class Style_Strikethrough(Style): def __init__(self): super()", "super() self.code = 1 class Style_Faint(Style): def __init__(self): super() self.code = 2 class", "__init__(self): super() self.code = 4 class Style_Invert(Style): def __init__(self): super() self.code = 7", "super() self.code = 30 class Colour_DarkRed(Colour): def __init__(self): super() self.code = 31 class", "return \"{}[{}m\".format(term.ESCAPE_CHARACTER, self.code + 10) class Colour_Transparent(Colour): def _render(self, isForeground = True): return", "class Style_Strikethrough(Style): def __init__(self): super() self.code = 9 class Colour(Style): def __init__(self): super()", "Colour_Grey(Colour): def __init__(self): super() self.code = 37 class Colour_DarkGrey(Colour): def __init__(self): super() self.code", "class Style_Underline(Style): def __init__(self): super() self.code = 4 class Style_Invert(Style): def __init__(self): super()", "Colour_DarkBlue(Colour): def __init__(self): super() self.code = 34 class Colour_DarkMagenta(Colour): def __init__(self): super() self.code", "self.code = 36 class Colour_Grey(Colour): def __init__(self): super() self.code = 37 class Colour_DarkGrey(Colour):", "self.code + 10) class Colour_Transparent(Colour): def _render(self, isForeground = True): return \"\" class", "__init__(self): super() self.code = 35 class Colour_DarkCyan(Colour): def __init__(self): super() self.code = 36", "4 class Style_Invert(Style): def __init__(self): super() self.code = 7 class Style_Strikethrough(Style): def __init__(self):", "super() self.code = 92 class Colour_Yellow(Colour): def __init__(self): super() self.code = 93 class", "__init__(self): super() self.code = 36 class Colour_Grey(Colour): def __init__(self): super() self.code = 37", "= True): if isForeground: return \"{}[{}m\".format(term.ESCAPE_CHARACTER, self.code) else: return \"{}[{}m\".format(term.ESCAPE_CHARACTER, self.code + 10)", "self.code = 32 class Colour_DarkYellow(Colour): def __init__(self): super() self.code = 33 class Colour_DarkBlue(Colour):", "def __init__(self): super() self.code = 35 class Colour_DarkCyan(Colour): def __init__(self): super() self.code =", "super() self.code = 93 class Colour_Blue(Colour): def __init__(self): super() self.code = 94 class", "def __init__(self): super() self.code = 31 class Colour_DarkGreen(Colour): def __init__(self): super() self.code =", "Colour_Cyan(Colour): def __init__(self): super() self.code = 96 class Colour_White(Colour): def __init__(self): super() self.code", "__init__(self): super() self.code = 96 class Colour_White(Colour): def __init__(self): super() self.code = 97", "self.code = 0 def _render(self): return \"{}[{}m\".format(term.ESCAPE_CHARACTER, self.code) class Style_Reset(Style): def __init__(self): super()", "Style_Underline(Style): def __init__(self): super() self.code = 4 class Style_Invert(Style): def __init__(self): super() self.code", "self.code = 93 class Colour_Blue(Colour): def __init__(self): super() self.code = 94 class Colour_Magenta(Colour):", "class Colour_Red(Colour): def __init__(self): super() self.code = 91 class Colour_Green(Colour): def __init__(self): super()", "def __init__(self): super() self.code = 92 class Colour_Yellow(Colour): def __init__(self): super() self.code =", "vtk.term as term class Style: def __init__(self): self.code = 0 def _render(self): return", "super() self.code = 4 class Style_Invert(Style): def __init__(self): super() self.code = 7 class", "= 94 class Colour_Magenta(Colour): def __init__(self): super() self.code = 95 class Colour_Cyan(Colour): def", "95 class Colour_Cyan(Colour): def __init__(self): super() self.code = 96 class Colour_White(Colour): def __init__(self):", "def __init__(self): super() self.code = 30 class Colour_DarkRed(Colour): def __init__(self): super() self.code =", "= 32 class Colour_DarkYellow(Colour): def __init__(self): super() self.code = 33 class Colour_DarkBlue(Colour): def", "3 class Style_Underline(Style): def __init__(self): super() self.code = 4 class Style_Invert(Style): def __init__(self):", "self.code = 37 class Colour_DarkGrey(Colour): def __init__(self): super() self.code = 90 class Colour_Red(Colour):", "Style_Reset(Style): def __init__(self): super() self.code = 0 class Style_Bold(Style): def __init__(self): super() self.code", "class Colour_DarkCyan(Colour): def __init__(self): super() self.code = 36 class Colour_Grey(Colour): def __init__(self): super()", "= 95 class Colour_Cyan(Colour): def __init__(self): super() self.code = 96 class Colour_White(Colour): def", "__init__(self): super() self.code = 0 class Style_Bold(Style): def __init__(self): super() self.code = 1", "__init__(self): super() self.code = 3 class Style_Underline(Style): def __init__(self): super() self.code = 4", "def __init__(self): super() self.code = 32 class Colour_DarkYellow(Colour): def __init__(self): super() self.code =", "+ 10) class Colour_Transparent(Colour): def _render(self, isForeground = True): return \"\" class Colour_Black(Colour):", "term class Style: def __init__(self): self.code = 0 def _render(self): return \"{}[{}m\".format(term.ESCAPE_CHARACTER, self.code)", "Style_Italic(Style): def __init__(self): super() self.code = 3 class Style_Underline(Style): def __init__(self): super() self.code", "2 class Style_Italic(Style): def __init__(self): super() self.code = 3 class Style_Underline(Style): def __init__(self):", "def _render(self, isForeground = True): return \"\" class Colour_Black(Colour): def __init__(self): super() self.code", "self.code) else: return \"{}[{}m\".format(term.ESCAPE_CHARACTER, self.code + 10) class Colour_Transparent(Colour): def _render(self, isForeground =", "super() self.code = 9 class Colour(Style): def __init__(self): super() def _render(self, isForeground =", "self.code) class Style_Reset(Style): def __init__(self): super() self.code = 0 class Style_Bold(Style): def __init__(self):", "class Colour_Transparent(Colour): def _render(self, isForeground = True): return \"\" class Colour_Black(Colour): def __init__(self):", "super() self.code = 2 class Style_Italic(Style): def __init__(self): super() self.code = 3 class", "class Colour_DarkYellow(Colour): def __init__(self): super() self.code = 33 class Colour_DarkBlue(Colour): def __init__(self): super()", "self.code = 33 class Colour_DarkBlue(Colour): def __init__(self): super() self.code = 34 class Colour_DarkMagenta(Colour):", "= 93 class Colour_Blue(Colour): def __init__(self): super() self.code = 94 class Colour_Magenta(Colour): def", "super() self.code = 3 class Style_Underline(Style): def __init__(self): super() self.code = 4 class", "class Colour_DarkGrey(Colour): def __init__(self): super() self.code = 90 class Colour_Red(Colour): def __init__(self): super()", "class Colour_Magenta(Colour): def __init__(self): super() self.code = 95 class Colour_Cyan(Colour): def __init__(self): super()", "def __init__(self): self.code = 0 def _render(self): return \"{}[{}m\".format(term.ESCAPE_CHARACTER, self.code) class Style_Reset(Style): def", "class Style_Reset(Style): def __init__(self): super() self.code = 0 class Style_Bold(Style): def __init__(self): super()", "def __init__(self): super() self.code = 96 class Colour_White(Colour): def __init__(self): super() self.code =", "isForeground = True): return \"\" class Colour_Black(Colour): def __init__(self): super() self.code = 30", "class Style_Italic(Style): def __init__(self): super() self.code = 3 class Style_Underline(Style): def __init__(self): super()", "__init__(self): super() self.code = 93 class Colour_Blue(Colour): def __init__(self): super() self.code = 94", "return \"{}[{}m\".format(term.ESCAPE_CHARACTER, self.code) class Style_Reset(Style): def __init__(self): super() self.code = 0 class Style_Bold(Style):", "def __init__(self): super() self.code = 7 class Style_Strikethrough(Style): def __init__(self): super() self.code =", "92 class Colour_Yellow(Colour): def __init__(self): super() self.code = 93 class Colour_Blue(Colour): def __init__(self):", "_render(self, isForeground = True): if isForeground: return \"{}[{}m\".format(term.ESCAPE_CHARACTER, self.code) else: return \"{}[{}m\".format(term.ESCAPE_CHARACTER, self.code", "def __init__(self): super() self.code = 33 class Colour_DarkBlue(Colour): def __init__(self): super() self.code =", "__init__(self): super() self.code = 1 class Style_Faint(Style): def __init__(self): super() self.code = 2", "Colour_Blue(Colour): def __init__(self): super() self.code = 94 class Colour_Magenta(Colour): def __init__(self): super() self.code", "94 class Colour_Magenta(Colour): def __init__(self): super() self.code = 95 class Colour_Cyan(Colour): def __init__(self):", "self.code = 3 class Style_Underline(Style): def __init__(self): super() self.code = 4 class Style_Invert(Style):", "__init__(self): super() self.code = 2 class Style_Italic(Style): def __init__(self): super() self.code = 3", "self.code = 35 class Colour_DarkCyan(Colour): def __init__(self): super() self.code = 36 class Colour_Grey(Colour):", "True): return \"\" class Colour_Black(Colour): def __init__(self): super() self.code = 30 class Colour_DarkRed(Colour):", "self.code = 0 class Style_Bold(Style): def __init__(self): super() self.code = 1 class Style_Faint(Style):", "return \"{}[{}m\".format(term.ESCAPE_CHARACTER, self.code) else: return \"{}[{}m\".format(term.ESCAPE_CHARACTER, self.code + 10) class Colour_Transparent(Colour): def _render(self,", "def __init__(self): super() self.code = 90 class Colour_Red(Colour): def __init__(self): super() self.code =", "self.code = 90 class Colour_Red(Colour): def __init__(self): super() self.code = 91 class Colour_Green(Colour):", "self.code = 95 class Colour_Cyan(Colour): def __init__(self): super() self.code = 96 class Colour_White(Colour):", "30 class Colour_DarkRed(Colour): def __init__(self): super() self.code = 31 class Colour_DarkGreen(Colour): def __init__(self):", "Colour_Green(Colour): def __init__(self): super() self.code = 92 class Colour_Yellow(Colour): def __init__(self): super() self.code", "class Style: def __init__(self): self.code = 0 def _render(self): return \"{}[{}m\".format(term.ESCAPE_CHARACTER, self.code) class", "isForeground: return \"{}[{}m\".format(term.ESCAPE_CHARACTER, self.code) else: return \"{}[{}m\".format(term.ESCAPE_CHARACTER, self.code + 10) class Colour_Transparent(Colour): def", "Colour_Red(Colour): def __init__(self): super() self.code = 91 class Colour_Green(Colour): def __init__(self): super() self.code", "self.code = 31 class Colour_DarkGreen(Colour): def __init__(self): super() self.code = 32 class Colour_DarkYellow(Colour):", "32 class Colour_DarkYellow(Colour): def __init__(self): super() self.code = 33 class Colour_DarkBlue(Colour): def __init__(self):", "class Style_Faint(Style): def __init__(self): super() self.code = 2 class Style_Italic(Style): def __init__(self): super()", "= 33 class Colour_DarkBlue(Colour): def __init__(self): super() self.code = 34 class Colour_DarkMagenta(Colour): def", "90 class Colour_Red(Colour): def __init__(self): super() self.code = 91 class Colour_Green(Colour): def __init__(self):", "self.code = 94 class Colour_Magenta(Colour): def __init__(self): super() self.code = 95 class Colour_Cyan(Colour):", "Colour_Transparent(Colour): def _render(self, isForeground = True): return \"\" class Colour_Black(Colour): def __init__(self): super()", "= 90 class Colour_Red(Colour): def __init__(self): super() self.code = 91 class Colour_Green(Colour): def", "= 1 class Style_Faint(Style): def __init__(self): super() self.code = 2 class Style_Italic(Style): def", "class Colour_Green(Colour): def __init__(self): super() self.code = 92 class Colour_Yellow(Colour): def __init__(self): super()", "class Colour_Cyan(Colour): def __init__(self): super() self.code = 96 class Colour_White(Colour): def __init__(self): super()", "__init__(self): super() self.code = 37 class Colour_DarkGrey(Colour): def __init__(self): super() self.code = 90", "super() self.code = 90 class Colour_Red(Colour): def __init__(self): super() self.code = 91 class", "_render(self, isForeground = True): return \"\" class Colour_Black(Colour): def __init__(self): super() self.code =", "\"\" class Colour_Black(Colour): def __init__(self): super() self.code = 30 class Colour_DarkRed(Colour): def __init__(self):", "__init__(self): super() self.code = 34 class Colour_DarkMagenta(Colour): def __init__(self): super() self.code = 35", "__init__(self): super() self.code = 31 class Colour_DarkGreen(Colour): def __init__(self): super() self.code = 32", "__init__(self): super() self.code = 9 class Colour(Style): def __init__(self): super() def _render(self, isForeground", "= 34 class Colour_DarkMagenta(Colour): def __init__(self): super() self.code = 35 class Colour_DarkCyan(Colour): def", "__init__(self): super() def _render(self, isForeground = True): if isForeground: return \"{}[{}m\".format(term.ESCAPE_CHARACTER, self.code) else:", "Colour_Black(Colour): def __init__(self): super() self.code = 30 class Colour_DarkRed(Colour): def __init__(self): super() self.code", "def __init__(self): super() self.code = 91 class Colour_Green(Colour): def __init__(self): super() self.code =", "def __init__(self): super() self.code = 9 class Colour(Style): def __init__(self): super() def _render(self,", "super() self.code = 7 class Style_Strikethrough(Style): def __init__(self): super() self.code = 9 class", "super() self.code = 37 class Colour_DarkGrey(Colour): def __init__(self): super() self.code = 90 class", "91 class Colour_Green(Colour): def __init__(self): super() self.code = 92 class Colour_Yellow(Colour): def __init__(self):", "class Colour_DarkRed(Colour): def __init__(self): super() self.code = 31 class Colour_DarkGreen(Colour): def __init__(self): super()", "93 class Colour_Blue(Colour): def __init__(self): super() self.code = 94 class Colour_Magenta(Colour): def __init__(self):", "__init__(self): super() self.code = 33 class Colour_DarkBlue(Colour): def __init__(self): super() self.code = 34", "self.code = 9 class Colour(Style): def __init__(self): super() def _render(self, isForeground = True):", "def __init__(self): super() self.code = 2 class Style_Italic(Style): def __init__(self): super() self.code =", "= True): return \"\" class Colour_Black(Colour): def __init__(self): super() self.code = 30 class", "class Colour_DarkMagenta(Colour): def __init__(self): super() self.code = 35 class Colour_DarkCyan(Colour): def __init__(self): super()", "def __init__(self): super() self.code = 37 class Colour_DarkGrey(Colour): def __init__(self): super() self.code =", "Colour_Yellow(Colour): def __init__(self): super() self.code = 93 class Colour_Blue(Colour): def __init__(self): super() self.code", "= 3 class Style_Underline(Style): def __init__(self): super() self.code = 4 class Style_Invert(Style): def", "as term class Style: def __init__(self): self.code = 0 def _render(self): return \"{}[{}m\".format(term.ESCAPE_CHARACTER,", "super() def _render(self, isForeground = True): if isForeground: return \"{}[{}m\".format(term.ESCAPE_CHARACTER, self.code) else: return", "class Colour_DarkGreen(Colour): def __init__(self): super() self.code = 32 class Colour_DarkYellow(Colour): def __init__(self): super()", "Colour(Style): def __init__(self): super() def _render(self, isForeground = True): if isForeground: return \"{}[{}m\".format(term.ESCAPE_CHARACTER,", "def __init__(self): super() self.code = 0 class Style_Bold(Style): def __init__(self): super() self.code =", "class Colour_DarkBlue(Colour): def __init__(self): super() self.code = 34 class Colour_DarkMagenta(Colour): def __init__(self): super()", "0 class Style_Bold(Style): def __init__(self): super() self.code = 1 class Style_Faint(Style): def __init__(self):", "__init__(self): super() self.code = 95 class Colour_Cyan(Colour): def __init__(self): super() self.code = 96", "Style_Invert(Style): def __init__(self): super() self.code = 7 class Style_Strikethrough(Style): def __init__(self): super() self.code", "35 class Colour_DarkCyan(Colour): def __init__(self): super() self.code = 36 class Colour_Grey(Colour): def __init__(self):", "_render(self): return \"{}[{}m\".format(term.ESCAPE_CHARACTER, self.code) class Style_Reset(Style): def __init__(self): super() self.code = 0 class", "super() self.code = 32 class Colour_DarkYellow(Colour): def __init__(self): super() self.code = 33 class", "class Style_Bold(Style): def __init__(self): super() self.code = 1 class Style_Faint(Style): def __init__(self): super()", "else: return \"{}[{}m\".format(term.ESCAPE_CHARACTER, self.code + 10) class Colour_Transparent(Colour): def _render(self, isForeground = True):", "= 92 class Colour_Yellow(Colour): def __init__(self): super() self.code = 93 class Colour_Blue(Colour): def", "= 9 class Colour(Style): def __init__(self): super() def _render(self, isForeground = True): if", "__init__(self): super() self.code = 91 class Colour_Green(Colour): def __init__(self): super() self.code = 92", "def __init__(self): super() self.code = 94 class Colour_Magenta(Colour): def __init__(self): super() self.code =", "1 class Style_Faint(Style): def __init__(self): super() self.code = 2 class Style_Italic(Style): def __init__(self):", "31 class Colour_DarkGreen(Colour): def __init__(self): super() self.code = 32 class Colour_DarkYellow(Colour): def __init__(self):", "class Colour_Yellow(Colour): def __init__(self): super() self.code = 93 class Colour_Blue(Colour): def __init__(self): super()", "__init__(self): super() self.code = 90 class Colour_Red(Colour): def __init__(self): super() self.code = 91", "self.code = 1 class Style_Faint(Style): def __init__(self): super() self.code = 2 class Style_Italic(Style):", "__init__(self): super() self.code = 92 class Colour_Yellow(Colour): def __init__(self): super() self.code = 93", "def __init__(self): super() self.code = 95 class Colour_Cyan(Colour): def __init__(self): super() self.code =", "__init__(self): super() self.code = 30 class Colour_DarkRed(Colour): def __init__(self): super() self.code = 31", "def __init__(self): super() self.code = 93 class Colour_Blue(Colour): def __init__(self): super() self.code =", "Colour_Magenta(Colour): def __init__(self): super() self.code = 95 class Colour_Cyan(Colour): def __init__(self): super() self.code", "\"{}[{}m\".format(term.ESCAPE_CHARACTER, self.code) class Style_Reset(Style): def __init__(self): super() self.code = 0 class Style_Bold(Style): def", "= 37 class Colour_DarkGrey(Colour): def __init__(self): super() self.code = 90 class Colour_Red(Colour): def", "Colour_DarkCyan(Colour): def __init__(self): super() self.code = 36 class Colour_Grey(Colour): def __init__(self): super() self.code", "= 0 class Style_Bold(Style): def __init__(self): super() self.code = 1 class Style_Faint(Style): def", "33 class Colour_DarkBlue(Colour): def __init__(self): super() self.code = 34 class Colour_DarkMagenta(Colour): def __init__(self):", "self.code = 7 class Style_Strikethrough(Style): def __init__(self): super() self.code = 9 class Colour(Style):", "34 class Colour_DarkMagenta(Colour): def __init__(self): super() self.code = 35 class Colour_DarkCyan(Colour): def __init__(self):", "super() self.code = 33 class Colour_DarkBlue(Colour): def __init__(self): super() self.code = 34 class", "__init__(self): self.code = 0 def _render(self): return \"{}[{}m\".format(term.ESCAPE_CHARACTER, self.code) class Style_Reset(Style): def __init__(self):", "self.code = 30 class Colour_DarkRed(Colour): def __init__(self): super() self.code = 31 class Colour_DarkGreen(Colour):", "super() self.code = 31 class Colour_DarkGreen(Colour): def __init__(self): super() self.code = 32 class", "37 class Colour_DarkGrey(Colour): def __init__(self): super() self.code = 90 class Colour_Red(Colour): def __init__(self):", "if isForeground: return \"{}[{}m\".format(term.ESCAPE_CHARACTER, self.code) else: return \"{}[{}m\".format(term.ESCAPE_CHARACTER, self.code + 10) class Colour_Transparent(Colour):", "= 30 class Colour_DarkRed(Colour): def __init__(self): super() self.code = 31 class Colour_DarkGreen(Colour): def", "Style: def __init__(self): self.code = 0 def _render(self): return \"{}[{}m\".format(term.ESCAPE_CHARACTER, self.code) class Style_Reset(Style):", "def __init__(self): super() self.code = 4 class Style_Invert(Style): def __init__(self): super() self.code =", "Colour_DarkRed(Colour): def __init__(self): super() self.code = 31 class Colour_DarkGreen(Colour): def __init__(self): super() self.code", "\"{}[{}m\".format(term.ESCAPE_CHARACTER, self.code) else: return \"{}[{}m\".format(term.ESCAPE_CHARACTER, self.code + 10) class Colour_Transparent(Colour): def _render(self, isForeground", "class Colour(Style): def __init__(self): super() def _render(self, isForeground = True): if isForeground: return", "super() self.code = 94 class Colour_Magenta(Colour): def __init__(self): super() self.code = 95 class", "__init__(self): super() self.code = 94 class Colour_Magenta(Colour): def __init__(self): super() self.code = 95" ]
[ "# RefURL: https://docs.pytest.org/en/latest/example/parametrize.html # # # # import # import pytest def pytest_addoption(parser):", "options.\"\"\" #RefURL: https://docs.python.org/3/library/argparse.html#the-add-argument-method parser.addoption( \"--change-conf\", action=\"store\", default=\"ftpdel.conf\", help=\"ftpdel setting file\" ) @pytest.fixture def", "def pytest_addoption(parser): \"\"\"Add pytest command options.\"\"\" #RefURL: https://docs.python.org/3/library/argparse.html#the-add-argument-method parser.addoption( \"--change-conf\", action=\"store\", default=\"ftpdel.conf\", help=\"ftpdel", "import pytest def pytest_addoption(parser): \"\"\"Add pytest command options.\"\"\" #RefURL: https://docs.python.org/3/library/argparse.html#the-add-argument-method parser.addoption( \"--change-conf\", action=\"store\",", "# # # import # import pytest def pytest_addoption(parser): \"\"\"Add pytest command options.\"\"\"", "import # import pytest def pytest_addoption(parser): \"\"\"Add pytest command options.\"\"\" #RefURL: https://docs.python.org/3/library/argparse.html#the-add-argument-method parser.addoption(", "parser.addoption( \"--change-conf\", action=\"store\", default=\"ftpdel.conf\", help=\"ftpdel setting file\" ) @pytest.fixture def cli_conf(request): return request.config.getoption('--change-conf')", "# # # # import # import pytest def pytest_addoption(parser): \"\"\"Add pytest command", "# import pytest def pytest_addoption(parser): \"\"\"Add pytest command options.\"\"\" #RefURL: https://docs.python.org/3/library/argparse.html#the-add-argument-method parser.addoption( \"--change-conf\",", "# # RefURL: https://docs.pytest.org/en/latest/example/parametrize.html # # # # import # import pytest def", "command options.\"\"\" #RefURL: https://docs.python.org/3/library/argparse.html#the-add-argument-method parser.addoption( \"--change-conf\", action=\"store\", default=\"ftpdel.conf\", help=\"ftpdel setting file\" ) @pytest.fixture", "https://docs.pytest.org/en/latest/example/parametrize.html # # # # import # import pytest def pytest_addoption(parser): \"\"\"Add pytest", "<filename>src/conftest.py # # # RefURL: https://docs.pytest.org/en/latest/example/parametrize.html # # # # import # import", "https://docs.python.org/3/library/argparse.html#the-add-argument-method parser.addoption( \"--change-conf\", action=\"store\", default=\"ftpdel.conf\", help=\"ftpdel setting file\" ) @pytest.fixture def cli_conf(request): return", "pytest_addoption(parser): \"\"\"Add pytest command options.\"\"\" #RefURL: https://docs.python.org/3/library/argparse.html#the-add-argument-method parser.addoption( \"--change-conf\", action=\"store\", default=\"ftpdel.conf\", help=\"ftpdel setting", "\"\"\"Add pytest command options.\"\"\" #RefURL: https://docs.python.org/3/library/argparse.html#the-add-argument-method parser.addoption( \"--change-conf\", action=\"store\", default=\"ftpdel.conf\", help=\"ftpdel setting file\"", "RefURL: https://docs.pytest.org/en/latest/example/parametrize.html # # # # import # import pytest def pytest_addoption(parser): \"\"\"Add", "pytest command options.\"\"\" #RefURL: https://docs.python.org/3/library/argparse.html#the-add-argument-method parser.addoption( \"--change-conf\", action=\"store\", default=\"ftpdel.conf\", help=\"ftpdel setting file\" )", "# # # RefURL: https://docs.pytest.org/en/latest/example/parametrize.html # # # # import # import pytest", "# import # import pytest def pytest_addoption(parser): \"\"\"Add pytest command options.\"\"\" #RefURL: https://docs.python.org/3/library/argparse.html#the-add-argument-method", "# # import # import pytest def pytest_addoption(parser): \"\"\"Add pytest command options.\"\"\" #RefURL:", "pytest def pytest_addoption(parser): \"\"\"Add pytest command options.\"\"\" #RefURL: https://docs.python.org/3/library/argparse.html#the-add-argument-method parser.addoption( \"--change-conf\", action=\"store\", default=\"ftpdel.conf\",", "#RefURL: https://docs.python.org/3/library/argparse.html#the-add-argument-method parser.addoption( \"--change-conf\", action=\"store\", default=\"ftpdel.conf\", help=\"ftpdel setting file\" ) @pytest.fixture def cli_conf(request):" ]
[ "'error in datetime string: %s' % e, 'value': val, 'row': idx, 'column': series.name,", "= validator_lookup[colspec['type']] errors.extend(v(df[colname], colspec, sheet)) return errors def validate_sheet_metadata(wb, spec): errors = []", "'data', 'sheet': 'data' })) return errors df = wb['data'] errors.extend(validate_sheet_generic(df, 'data', spec)) #", "open import arrow import os import oyaml as yaml import pandas as pd", "fn })) else: try: dt = arrow.get(m.group('date'), spec['file_date']) except ValueError as e: errors.append(error({", "errors = [] required_columns = list(spec['columns'][sheet].keys()) if df.columns.tolist()[:len(required_columns)] != required_columns: errors.append(error({ 'message': 'the", "a valid value. Remove empty cells before further checks series = series[series.str.len() >", "min_errors.iteritems(): errors.append(error({ 'message': 'value less than minimum of {}'.format(colspec['min']), 'value': val, 'row': idx,", "match %s' % colspec['format'], 'value': val, 'row': idx, 'column': series.name, 'sheet': sheet }))", "empty cells before further checks series = series[series.str.len() > 0] elif str(na) ==", "are not # valid values. Flag as errors. empty_errors = series[series.str.len() == 0]", "arrow.parser.ParserError as e: errors.append(error({ 'message': 'date in filename must be in %s format'", "})) if not re.match(r'^v.+$', m.group('version')): errors.append(error({ 'message': 'version string in filename must start", "len(validate_sheet_vars(wb, spec)) > 0: return errors # Now check custom data columns required_columns", "min_errors = None max_errors = None if colspec.get('min', None) is not None: min_errors", "in wb: errors.append(error({ 'message': '\"%s\" worksheet is missing' % 'dataset_meta_data', 'sheet': 'dataset_meta_data' }))", "errors = [] empty_errors, series = validate_column_generic(series, colspec, sheet) errors.extend(empty_errors) # Now look", "os import oyaml as yaml import pandas as pd import re import sys", "columns in 'data' sheet match data columns defined in 'vars' sheet. vars_defined =", "'row': idx, 'column': series.name, 'sheet': sheet })) return errors def validate_filename(input_path, spec): fn", "'message': '\"%s\" worksheet is missing' % 'data', 'sheet': 'data' })) return errors df", "except arrow.parser.ParserError as e: errors.append(error({ 'message': 'invalid datetime string - should match %s'", "from .error import error from io import open import arrow import os import", "spec)) return errors def validate_column_datetimes(series, colspec, sheet): errors = [] empty_errors, series =", "dt = arrow.get(val, colspec['format']) except ValueError as e: errors.append(error({ 'message': 'error in datetime", "Convert to floats converted = pd.to_numeric(series, errors='coerce') # Non-numeric strings are now NaN", "in filename datetime string: %s' % e, 'value': m.group('date') })) except arrow.parser.ParserError as", "e: errors.append(error({ 'message': 'date in filename must be in %s format' % spec['file_date'],", "vars_missing_value): if var not in extra_defined: sheet = 'vars_meta_data' colspec = { 'required':", "sheet): errors = [] empty_errors, series = validate_column_generic(series, colspec, sheet) errors.extend(empty_errors) # Convert", "worksheet but were not defined in the \"%s\" worksheet' % ('data', 'vars_meta_data'), 'value':", "errors = [] filename_re = re.compile(r'^(?P<shortname>.+)_(?P<date>[^_]+)_(?P<version>[^_]+)\\.xlsx$') m = filename_re.match(fn) if not m: errors.append(error({", "= [] required = colspec.get('required', None) na = colspec.get('na', None) if not required:", "colspec, sheet): errors = [] required = colspec.get('required', None) na = colspec.get('na', None)", "in wb: errors.append(error({ 'message': '\"%s\" worksheet is missing' % 'vars_meta_data', 'sheet': 'vars_meta_data' }))", "colspec['format']) except ValueError as e: errors.append(error({ 'message': 'error in datetime string: %s' %", "'sheet': 'dataset_meta_data' })) return errors df = wb['dataset_meta_data'] errors.extend(validate_sheet_generic(df, 'dataset_meta_data', spec)) return errors", "dtype=str) else: wb = pd.read_excel(input_path, sheet_name=None, na_values=[], keep_default_na=False, dtype=unicode) errors = [] errors.extend(validate_filename(input_path,", "% spec['file_date'], 'value': m.group('date') })) if not re.match(r'^v.+$', m.group('version')): errors.append(error({ 'message': 'version string", "errors = [] if not 'data' in wb: errors.append(error({ 'message': '\"%s\" worksheet is", "defined in 'vars_meta_data' # First make sure that 'vars_meta_data' doesn't have any errors,", "if not 'dataset_meta_data' in wb: errors.append(error({ 'message': '\"%s\" worksheet is missing' % 'dataset_meta_data',", "errors def validate_column_floats(series, colspec, sheet): errors = [] empty_errors, series = validate_column_generic(series, colspec,", "errors.append(error({ 'message': 'invalid value', 'value': val, 'row': idx, 'column': series.name, 'sheet': sheet }))", ".error import error from io import open import arrow import os import oyaml", "sheet })) return errors # Validate cells for colname, colspec in spec['columns'][sheet].items(): v", "sheet, required_columns), 'value': str(df.columns.tolist()), 'sheet': sheet })) return errors # Validate cells for", "'.join(extra_defined) })) if extra_found: errors.append(error({ 'message': 'some data variables were found in the", "None) na = colspec.get('na', None) if not required: # Empty cell is a", "= [] required_columns = list(spec['columns'][sheet].keys()) if df.columns.tolist()[:len(required_columns)] != required_columns: errors.append(error({ 'message': 'the first", "sheet })) # Check range min_errors = None max_errors = None if colspec.get('min',", "values. Flag as errors. empty_errors = series[series.str.len() == 0] for idx, val in", "'vars_meta_data' })) return errors df = wb['vars_meta_data'] errors.extend(validate_sheet_generic(df, 'vars_meta_data', spec)) return errors #", "= 'vars_meta_data' colspec = { 'required': True, 'na': na } empty_errors, _ =", "errors.append(error({ 'message': 'string length > %d' % colspec['max'], 'value': val, 'row': idx, 'column':", "errors def validate_sheet_vars(wb, spec=spec): errors = [] if not 'vars_meta_data' in wb: errors.append(error({", "e: errors.append(error({ 'message': 'error in filename datetime string: %s' % e, 'value': m.group('date')", "colspec['max']] for idx, val in max_errors.iteritems(): errors.append(error({ 'message': 'value greater than maximum of", "= wb['vars_meta_data']['var_short_name'].tolist() vars_found = df_data.columns.tolist() extra_defined = set(vars_defined).difference(set(vars_found)) extra_found = set(vars_found).difference(set(vars_defined)) if extra_defined:", "worksheet is missing' % 'dataset_meta_data', 'sheet': 'dataset_meta_data' })) return errors df = wb['dataset_meta_data']", "{}'.format(colspec['max']), 'value': val, 'row': idx, 'column': series.name, 'sheet': sheet })) return errors def", "errors.append(error({ 'message': 'some data variables were defined in the \"%s\" worksheet but were", "None) is not None: max_errors = series[converted > colspec['max']] for idx, val in", "return errors # Register column validators in lookup validator_lookup = { 'float': validate_column_floats,", "spec_file_path = os.path.join(os.path.dirname(__file__), spec_file_name) with open(spec_file_path, encoding='utf-8') as fh: spec = yaml.load(fh) def", "sheet_name=None, na_values=[], keep_default_na=False, dtype=unicode) errors = [] errors.extend(validate_filename(input_path, spec)) errors.extend(validate_sheet_metadata(wb, spec)) errors.extend(validate_sheet_vars(wb, spec))", "else: wb = pd.read_excel(input_path, sheet_name=None, na_values=[], keep_default_na=False, dtype=unicode) errors = [] errors.extend(validate_filename(input_path, spec))", "except arrow.parser.ParserError as e: errors.append(error({ 'message': 'date in filename must be in %s", "None if colspec.get('min', None) is not None: min_errors = series[converted < colspec['min']] for", "value. Remove empty cells before further checks series = series[series.str.len() > 0] elif", "# valid values. Flag as errors. empty_errors = series[series.str.len() == 0] for idx,", "m: errors.append(error({ 'message': 'filename does not match format <dataset_short_name>_<dataset_release_date>_v<dataset_version>.xlsx', 'value': fn })) else:", "the \"%s\" worksheet should be %s' % (len(required_columns), sheet, required_columns), 'value': str(df.columns.tolist()), 'sheet':", "as e: errors.append(error({ 'message': 'error in datetime string: %s' % e, 'value': val,", "% (len(required_columns), sheet, required_columns), 'value': str(df.columns.tolist()), 'sheet': sheet })) return errors # Validate", "_ = validate_column_generic(df_data[var], colspec, 'data') errors.extend(empty_errors) return errors def validate_sheet_generic(df, sheet, spec): errors", "filename datetime string: %s' % e, 'value': m.group('date') })) except arrow.parser.ParserError as e:", "'column': series.name, 'sheet': sheet })) except arrow.parser.ParserError as e: errors.append(error({ 'message': 'invalid datetime", "return errors def validate_column_floats(series, colspec, sheet): errors = [] empty_errors, series = validate_column_generic(series,", "= [] if not 'dataset_meta_data' in wb: errors.append(error({ 'message': '\"%s\" worksheet is missing'", "Load dataset file specifications spec_file_name = 'dataset_file_def.yaml' spec_file_path = os.path.join(os.path.dirname(__file__), spec_file_name) with open(spec_file_path,", "further checks series = series[series.str.len() > 0] else: # NA is None or", "validate_column_datetimes(series, colspec, sheet): errors = [] empty_errors, series = validate_column_generic(series, colspec, sheet) errors.extend(empty_errors)", "match format <dataset_short_name>_<dataset_release_date>_v<dataset_version>.xlsx', 'value': fn })) else: try: dt = arrow.get(m.group('date'), spec['file_date']) except", "floats converted = pd.to_numeric(series, errors='coerce') # Non-numeric strings are now NaN # Flag", "m.group('date') })) except arrow.parser.ParserError as e: errors.append(error({ 'message': 'date in filename must be", "fn = os.path.basename(input_path) errors = [] filename_re = re.compile(r'^(?P<shortname>.+)_(?P<date>[^_]+)_(?P<version>[^_]+)\\.xlsx$') m = filename_re.match(fn) if", "any more checks here if len(validate_sheet_vars(wb, spec)) > 0: return errors # Now", "import open import arrow import os import oyaml as yaml import pandas as", "colspec, sheet): errors = [] empty_errors, series = validate_column_generic(series, colspec, sheet) errors.extend(empty_errors) #", "found in the \"%s\" worksheet but were not defined in the \"%s\" worksheet'", "required_columns = list(spec['columns'][sheet].keys()) if df.columns.tolist()[:len(required_columns)] != required_columns: errors.append(error({ 'message': 'the first %d columns", "validate_column_floats(series, colspec, sheet): errors = [] empty_errors, series = validate_column_generic(series, colspec, sheet) errors.extend(empty_errors)", "maximum of {}'.format(colspec['max']), 'value': val, 'row': idx, 'column': series.name, 'sheet': sheet })) return", "[] empty_errors, series = validate_column_generic(series, colspec, sheet) errors.extend(empty_errors) # Now look for format", "> 0] else: # NA is None or is not the empty string,", "'value': fn })) return errors def validate_sheet_data(wb, spec): errors = [] if not", "for idx, val in max_errors.iteritems(): errors.append(error({ 'message': 'value greater than maximum of {}'.format(colspec['max']),", "remove empty cells series = series[series.str.len() > 0] if na is not None:", "0] else: # NA is None or is not the empty string, therefore", "Flag NaN as errors nonnumeric_errors = series[pd.isna(converted)] for idx, val in nonnumeric_errors.iteritems(): errors.append(error({", "errors.extend(validate_sheet_generic(df, 'dataset_meta_data', spec)) return errors def validate_sheet_vars(wb, spec=spec): errors = [] if not", "(sys.version_info > (3, 0)): wb = pd.read_excel(input_path, sheet_name=None, na_values=[], keep_default_na=False, dtype=str) else: wb", "na] return (errors, series) def validate_column_strings(series, colspec, sheet): errors = [] empty_errors, series", "is missing' % 'dataset_meta_data', 'sheet': 'dataset_meta_data' })) return errors df = wb['dataset_meta_data'] errors.extend(validate_sheet_generic(df,", "not the empty string, therefore empty cells are not # valid values. Flag", "NA is None or is not the empty string, therefore empty cells are", "empty cells series = series[series.str.len() > 0] if na is not None: #", "present = series[series.str.len() > 0] for idx, val in present.iteritems(): try: dt =", "that # data columns in 'data' sheet match data columns defined in 'vars'", "= [] errors.extend(validate_filename(input_path, spec)) errors.extend(validate_sheet_metadata(wb, spec)) errors.extend(validate_sheet_vars(wb, spec)) errors.extend(validate_sheet_data(wb, spec)) return errors def", "value. Remove empty cells before further checks series = series[series.str.len() > 0] else:", "'row': idx, 'column': series.name, 'sheet': sheet })) return errors def validate_column_floats(series, colspec, sheet):", "if (sys.version_info > (3, 0)): wb = pd.read_excel(input_path, sheet_name=None, na_values=[], keep_default_na=False, dtype=str) else:", "with any more checks here if len(validate_sheet_vars(wb, spec)) > 0: return errors #", "val in nonnumeric_errors.iteritems(): errors.append(error({ 'message': 'invalid value', 'value': val, 'row': idx, 'column': series.name,", "})) return errors def validate_sheet_data(wb, spec): errors = [] if not 'data' in", "list(spec['columns'][sheet].keys()) if df.columns.tolist()[:len(required_columns)] != required_columns: errors.append(error({ 'message': 'the first %d columns of the", "cells before further checks series = series[series.str.len() > 0] else: # NA is", "series[series.str.len() > colspec['max']] for idx, val in maxlen_errors.iteritems(): errors.append(error({ 'message': 'string length >", "not 'dataset_meta_data' in wb: errors.append(error({ 'message': '\"%s\" worksheet is missing' % 'dataset_meta_data', 'sheet':", "# Now validate the actual data only on the condition of # proper", "0] if na is not None: # Remove NA values before further checks", "'value': fn })) else: try: dt = arrow.get(m.group('date'), spec['file_date']) except ValueError as e:", "%s' % colspec['format'], 'value': val, 'row': idx, 'column': series.name, 'sheet': sheet })) return", "'version string in filename must start with \"v\"', 'value': fn })) return errors", "were found in the \"%s\" worksheet but were not defined in the \"%s\"", "spec)) errors.extend(validate_sheet_vars(wb, spec)) errors.extend(validate_sheet_data(wb, spec)) return errors def validate_column_datetimes(series, colspec, sheet): errors =", "', '.join(extra_defined) })) if extra_found: errors.append(error({ 'message': 'some data variables were found in", "missing values. # TODO: Is there any type-checking expected in custom vars? vars_missing_value", "== 0] for idx, val in empty_errors.iteritems(): errors.append(error({ 'message': 'missing required field', 'row':", "na is not None: # Remove NA values before further checks series =", "series) def validate_column_strings(series, colspec, sheet): errors = [] empty_errors, series = validate_column_generic(series, colspec,", "valid value. Remove empty cells before further checks series = series[series.str.len() > 0]", "# Validate cells for colname, colspec in spec['columns'][sheet].items(): v = validator_lookup[colspec['type']] errors.extend(v(df[colname], colspec,", "'data') errors.extend(empty_errors) return errors def validate_sheet_generic(df, sheet, spec): errors = [] required_columns =", "na in zip(vars_defined, vars_missing_value): if var not in extra_defined: sheet = 'vars_meta_data' colspec", "here if len(validate_sheet_vars(wb, spec)) > 0: return errors # Now check custom data", "validate_filename(input_path, spec): fn = os.path.basename(input_path) errors = [] filename_re = re.compile(r'^(?P<shortname>.+)_(?P<date>[^_]+)_(?P<version>[^_]+)\\.xlsx$') m =", "# Register column validators in lookup validator_lookup = { 'float': validate_column_floats, 'string': validate_column_strings,", "max_errors = None if colspec.get('min', None) is not None: min_errors = series[converted <", "'value': str(df.columns.tolist()), 'sheet': sheet })) return errors # Validate cells for colname, colspec", "sheet and check that # data columns in 'data' sheet match data columns", "else: try: dt = arrow.get(m.group('date'), spec['file_date']) except ValueError as e: errors.append(error({ 'message': 'error", "'value': val, 'row': idx, 'column': series.name, 'sheet': sheet })) return errors def validate_filename(input_path,", "idx, val in empty_errors.iteritems(): errors.append(error({ 'message': 'missing required field', 'row': idx, 'column': series.name,", "na_values=[], keep_default_na=False, dtype=str) else: wb = pd.read_excel(input_path, sheet_name=None, na_values=[], keep_default_na=False, dtype=unicode) errors =", "than minimum of {}'.format(colspec['min']), 'value': val, 'row': idx, 'column': series.name, 'sheet': sheet }))", "not None: # Remove NA values before further checks series = series[series !=", "sheet)) return errors def validate_sheet_metadata(wb, spec): errors = [] if not 'dataset_meta_data' in", "% ('vars_meta_data', 'data'), 'value': ', '.join(extra_defined) })) if extra_found: errors.append(error({ 'message': 'some data", "\"%s\" worksheet but were not defined in the \"%s\" worksheet' % ('data', 'vars_meta_data'),", "df.columns.tolist()[:len(required_columns)] != required_columns: errors.append(error({ 'message': 'the first %d columns of the \"%s\" worksheet", "errors.extend(validate_filename(input_path, spec)) errors.extend(validate_sheet_metadata(wb, spec)) errors.extend(validate_sheet_vars(wb, spec)) errors.extend(validate_sheet_data(wb, spec)) return errors def validate_column_datetimes(series, colspec,", "= os.path.basename(input_path) errors = [] filename_re = re.compile(r'^(?P<shortname>.+)_(?P<date>[^_]+)_(?P<version>[^_]+)\\.xlsx$') m = filename_re.match(fn) if not", "to floats converted = pd.to_numeric(series, errors='coerce') # Non-numeric strings are now NaN #", "found in the \"%s\" worksheet' % ('vars_meta_data', 'data'), 'value': ', '.join(extra_defined) })) if", "Remove NA values before further checks series = series[series != na] return (errors,", "oyaml as yaml import pandas as pd import re import sys # Load", "colspec['min']] for idx, val in min_errors.iteritems(): errors.append(error({ 'message': 'value less than minimum of", "First make sure that 'vars_meta_data' doesn't have any errors, if it does #", "that 'vars_meta_data' doesn't have any errors, if it does # don't bother with", "present.iteritems(): try: dt = arrow.get(val, colspec['format']) except ValueError as e: errors.append(error({ 'message': 'error", "os.path.join(os.path.dirname(__file__), spec_file_name) with open(spec_file_path, encoding='utf-8') as fh: spec = yaml.load(fh) def validate(input_path): if", "of {}'.format(colspec['max']), 'value': val, 'row': idx, 'column': series.name, 'sheet': sheet })) return errors", "= df_data.columns.tolist() extra_defined = set(vars_defined).difference(set(vars_found)) extra_found = set(vars_found).difference(set(vars_defined)) if extra_defined: errors.append(error({ 'message': 'some", "'value greater than maximum of {}'.format(colspec['max']), 'value': val, 'row': idx, 'column': series.name, 'sheet':", "errors.append(error({ 'message': 'error in datetime string: %s' % e, 'value': val, 'row': idx,", "} empty_errors, _ = validate_column_generic(df_data[var], colspec, 'data') errors.extend(empty_errors) return errors def validate_sheet_generic(df, sheet,", "= series[series != na] return (errors, series) def validate_column_strings(series, colspec, sheet): errors =", "val in min_errors.iteritems(): errors.append(error({ 'message': 'value less than minimum of {}'.format(colspec['min']), 'value': val,", "that were defined in 'vars_meta_data' # First make sure that 'vars_meta_data' doesn't have", "idx, 'column': series.name, 'sheet': sheet })) # Now remove empty cells series =", "if not required: # Empty cell is a valid value. Remove empty cells", "errors def validate_sheet_generic(df, sheet, spec): errors = [] required_columns = list(spec['columns'][sheet].keys()) if df.columns.tolist()[:len(required_columns)]", "def validate_column_strings(series, colspec, sheet): errors = [] empty_errors, series = validate_column_generic(series, colspec, sheet)", "return errors # Now check custom data columns required_columns = list(spec['columns']['data'].keys()) df_data =", "e, 'value': val, 'row': idx, 'column': series.name, 'sheet': sheet })) except arrow.parser.ParserError as", "the \"%s\" worksheet' % ('data', 'vars_meta_data'), 'value': ', '.join(extra_found) })) # Now validate", "'vars_meta_data' doesn't have any errors, if it does # don't bother with any", "for idx, val in empty_errors.iteritems(): errors.append(error({ 'message': 'missing required field', 'row': idx, 'column':", "validate(input_path): if (sys.version_info > (3, 0)): wb = pd.read_excel(input_path, sheet_name=None, na_values=[], keep_default_na=False, dtype=str)", "errors df = wb['vars_meta_data'] errors.extend(validate_sheet_generic(df, 'vars_meta_data', spec)) return errors # Register column validators", "idx, val in max_errors.iteritems(): errors.append(error({ 'message': 'value greater than maximum of {}'.format(colspec['max']), 'value':", "> %d' % colspec['max'], 'value': val, 'row': idx, 'column': series.name, 'sheet': sheet }))", "import os import oyaml as yaml import pandas as pd import re import", "val in empty_errors.iteritems(): errors.append(error({ 'message': 'missing required field', 'row': idx, 'column': series.name, 'sheet':", "variables were found in the \"%s\" worksheet but were not defined in the", "minimum of {}'.format(colspec['min']), 'value': val, 'row': idx, 'column': series.name, 'sheet': sheet })) if", "colspec.get('na', None) if not required: # Empty cell is a valid value. Remove", "return errors def validate_filename(input_path, spec): fn = os.path.basename(input_path) errors = [] filename_re =", "df_data.columns.tolist() extra_defined = set(vars_defined).difference(set(vars_found)) extra_found = set(vars_found).difference(set(vars_defined)) if extra_defined: errors.append(error({ 'message': 'some data", "datetime string: %s' % e, 'value': val, 'row': idx, 'column': series.name, 'sheet': sheet", "None: # Remove NA values before further checks series = series[series != na]", "series = validate_column_generic(series, colspec, sheet) errors.extend(empty_errors) if colspec.get('max', None) is not None: maxlen_errors", "'message': 'value less than minimum of {}'.format(colspec['min']), 'value': val, 'row': idx, 'column': series.name,", "return errors df = wb['dataset_meta_data'] errors.extend(validate_sheet_generic(df, 'dataset_meta_data', spec)) return errors def validate_sheet_vars(wb, spec=spec):", "sheet_name=None, na_values=[], keep_default_na=False, dtype=str) else: wb = pd.read_excel(input_path, sheet_name=None, na_values=[], keep_default_na=False, dtype=unicode) errors", "NA values before further checks series = series[series != na] return (errors, series)", "string: %s' % e, 'value': val, 'row': idx, 'column': series.name, 'sheet': sheet }))", "series.name, 'sheet': sheet })) except arrow.parser.ParserError as e: errors.append(error({ 'message': 'invalid datetime string", "# Collect variable short names from vars_meta_data sheet and check that # data", "colspec, sheet): errors = [] empty_errors, series = validate_column_generic(series, colspec, sheet) errors.extend(empty_errors) if", "is None or is not the empty string, therefore empty cells are not", "errors.append(error({ 'message': 'value less than minimum of {}'.format(colspec['min']), 'value': val, 'row': idx, 'column':", "empty_errors, series = validate_column_generic(series, colspec, sheet) errors.extend(empty_errors) if colspec.get('max', None) is not None:", "% e, 'value': m.group('date') })) except arrow.parser.ParserError as e: errors.append(error({ 'message': 'date in", "= series[series.str.len() > 0] for idx, val in present.iteritems(): try: dt = arrow.get(val,", "spec['file_date']) except ValueError as e: errors.append(error({ 'message': 'error in filename datetime string: %s'", "df = wb['dataset_meta_data'] errors.extend(validate_sheet_generic(df, 'dataset_meta_data', spec)) return errors def validate_sheet_vars(wb, spec=spec): errors =", "in custom vars? vars_missing_value = wb['vars_meta_data']['var_missing_value'].tolist() for var, na in zip(vars_defined, vars_missing_value): if", "= pd.to_numeric(series, errors='coerce') # Non-numeric strings are now NaN # Flag NaN as", "not required: # Empty cell is a valid value. Remove empty cells before", "is a valid value. Remove empty cells before further checks series = series[series.str.len()", "columns required_columns = list(spec['columns']['data'].keys()) df_data = df.drop(required_columns, axis='columns') # Collect variable short names", "are now NaN # Flag NaN as errors nonnumeric_errors = series[pd.isna(converted)] for idx,", "sys # Load dataset file specifications spec_file_name = 'dataset_file_def.yaml' spec_file_path = os.path.join(os.path.dirname(__file__), spec_file_name)", "not None: max_errors = series[converted > colspec['max']] for idx, val in max_errors.iteritems(): errors.append(error({", "\"%s\" worksheet' % ('vars_meta_data', 'data'), 'value': ', '.join(extra_defined) })) if extra_found: errors.append(error({ 'message':", "True, 'na': na } empty_errors, _ = validate_column_generic(df_data[var], colspec, 'data') errors.extend(empty_errors) return errors", "errors.extend(validate_sheet_generic(df, 'data', spec)) # Next check columns in 'data' that were defined in", "'invalid value', 'value': val, 'row': idx, 'column': series.name, 'sheet': sheet })) # Check", "errors.extend(validate_sheet_metadata(wb, spec)) errors.extend(validate_sheet_vars(wb, spec)) errors.extend(validate_sheet_data(wb, spec)) return errors def validate_column_datetimes(series, colspec, sheet): errors", "validate_column_strings(series, colspec, sheet): errors = [] empty_errors, series = validate_column_generic(series, colspec, sheet) errors.extend(empty_errors)", "dtype=unicode) errors = [] errors.extend(validate_filename(input_path, spec)) errors.extend(validate_sheet_metadata(wb, spec)) errors.extend(validate_sheet_vars(wb, spec)) errors.extend(validate_sheet_data(wb, spec)) return", "spec['file_date'], 'value': m.group('date') })) if not re.match(r'^v.+$', m.group('version')): errors.append(error({ 'message': 'version string in", "series.name, 'sheet': sheet })) return errors def validate_column_generic(series, colspec, sheet): errors = []", "not 'vars_meta_data' in wb: errors.append(error({ 'message': '\"%s\" worksheet is missing' % 'vars_meta_data', 'sheet':", "'column': series.name, 'sheet': sheet })) # Now remove empty cells series = series[series.str.len()", "filename_re.match(fn) if not m: errors.append(error({ 'message': 'filename does not match format <dataset_short_name>_<dataset_release_date>_v<dataset_version>.xlsx', 'value':", "!= required_columns: errors.append(error({ 'message': 'the first %d columns of the \"%s\" worksheet should", "error from io import open import arrow import os import oyaml as yaml", "import re import sys # Load dataset file specifications spec_file_name = 'dataset_file_def.yaml' spec_file_path", "df = wb['data'] errors.extend(validate_sheet_generic(df, 'data', spec)) # Next check columns in 'data' that", "columns in 'data' that were defined in 'vars_meta_data' # First make sure that", "= [] empty_errors, series = validate_column_generic(series, colspec, sheet) errors.extend(empty_errors) # Now look for", "were defined in the \"%s\" worksheet but were not found in the \"%s\"", "cells before further checks series = series[series.str.len() > 0] elif str(na) == '':", "strings are now NaN # Flag NaN as errors nonnumeric_errors = series[pd.isna(converted)] for", "[] required = colspec.get('required', None) na = colspec.get('na', None) if not required: #", "short names from vars_meta_data sheet and check that # data columns in 'data'", "should be %s' % (len(required_columns), sheet, required_columns), 'value': str(df.columns.tolist()), 'sheet': sheet })) return", "var not in extra_defined: sheet = 'vars_meta_data' colspec = { 'required': True, 'na':", "min_errors = series[converted < colspec['min']] for idx, val in min_errors.iteritems(): errors.append(error({ 'message': 'value", "start with \"v\"', 'value': fn })) return errors def validate_sheet_data(wb, spec): errors =", "'value': m.group('date') })) except arrow.parser.ParserError as e: errors.append(error({ 'message': 'date in filename must", "series = series[series.str.len() > 0] else: # NA is None or is not", "> 0] if na is not None: # Remove NA values before further", "% 'data', 'sheet': 'data' })) return errors df = wb['data'] errors.extend(validate_sheet_generic(df, 'data', spec))", "return errors df = wb['data'] errors.extend(validate_sheet_generic(df, 'data', spec)) # Next check columns in", "check custom data columns required_columns = list(spec['columns']['data'].keys()) df_data = df.drop(required_columns, axis='columns') # Collect", "'message': 'string length > %d' % colspec['max'], 'value': val, 'row': idx, 'column': series.name,", "})) if extra_found: errors.append(error({ 'message': 'some data variables were found in the \"%s\"", "0] elif str(na) == '': # Empty cell is a valid value. Remove", "colspec, sheet)) return errors def validate_sheet_metadata(wb, spec): errors = [] if not 'dataset_meta_data'", "import arrow import os import oyaml as yaml import pandas as pd import", "errors def validate_column_generic(series, colspec, sheet): errors = [] required = colspec.get('required', None) na", "of # proper missing values. # TODO: Is there any type-checking expected in", "string - should match %s' % colspec['format'], 'value': val, 'row': idx, 'column': series.name,", "if extra_defined: errors.append(error({ 'message': 'some data variables were defined in the \"%s\" worksheet", "max_errors = series[converted > colspec['max']] for idx, val in max_errors.iteritems(): errors.append(error({ 'message': 'value", "validate_column_generic(series, colspec, sheet): errors = [] required = colspec.get('required', None) na = colspec.get('na',", "string: %s' % e, 'value': m.group('date') })) except arrow.parser.ParserError as e: errors.append(error({ 'message':", "sheet) errors.extend(empty_errors) # Convert to floats converted = pd.to_numeric(series, errors='coerce') # Non-numeric strings", "series[converted < colspec['min']] for idx, val in min_errors.iteritems(): errors.append(error({ 'message': 'value less than", "validate_column_generic(series, colspec, sheet) errors.extend(empty_errors) if colspec.get('max', None) is not None: maxlen_errors = series[series.str.len()", "if colspec.get('min', None) is not None: min_errors = series[converted < colspec['min']] for idx,", "specifications spec_file_name = 'dataset_file_def.yaml' spec_file_path = os.path.join(os.path.dirname(__file__), spec_file_name) with open(spec_file_path, encoding='utf-8') as fh:", "})) return errors def validate_column_floats(series, colspec, sheet): errors = [] empty_errors, series =", "errors = [] required = colspec.get('required', None) na = colspec.get('na', None) if not", "val, 'row': idx, 'column': series.name, 'sheet': sheet })) return errors def validate_filename(input_path, spec):", "errors = [] if not 'vars_meta_data' in wb: errors.append(error({ 'message': '\"%s\" worksheet is", "'sheet': sheet })) if colspec.get('max', None) is not None: max_errors = series[converted >", "% ('data', 'vars_meta_data'), 'value': ', '.join(extra_found) })) # Now validate the actual data", "idx, 'column': series.name, 'sheet': sheet })) return errors def validate_column_generic(series, colspec, sheet): errors", "def validate_column_generic(series, colspec, sheet): errors = [] required = colspec.get('required', None) na =", "in the \"%s\" worksheet' % ('vars_meta_data', 'data'), 'value': ', '.join(extra_defined) })) if extra_found:", "is not the empty string, therefore empty cells are not # valid values.", "# proper missing values. # TODO: Is there any type-checking expected in custom", "errors.append(error({ 'message': 'the first %d columns of the \"%s\" worksheet should be %s'", "= series[series.str.len() > 0] else: # NA is None or is not the", "columns defined in 'vars' sheet. vars_defined = wb['vars_meta_data']['var_short_name'].tolist() vars_found = df_data.columns.tolist() extra_defined =", "idx, 'column': series.name, 'sheet': sheet })) return errors def validate_column_floats(series, colspec, sheet): errors", "})) return errors def validate_column_generic(series, colspec, sheet): errors = [] required = colspec.get('required',", "'message': 'date in filename must be in %s format' % spec['file_date'], 'value': m.group('date')", "check that # data columns in 'data' sheet match data columns defined in", "errors.extend(validate_sheet_generic(df, 'vars_meta_data', spec)) return errors # Register column validators in lookup validator_lookup =", "'column': series.name, 'sheet': sheet })) return errors def validate_column_floats(series, colspec, sheet): errors =", "should match %s' % colspec['format'], 'value': val, 'row': idx, 'column': series.name, 'sheet': sheet", "further checks series = series[series.str.len() > 0] elif str(na) == '': # Empty", "column validators in lookup validator_lookup = { 'float': validate_column_floats, 'string': validate_column_strings, 'datetime': validate_column_datetimes,", "Flag as errors. empty_errors = series[series.str.len() == 0] for idx, val in empty_errors.iteritems():", "= re.compile(r'^(?P<shortname>.+)_(?P<date>[^_]+)_(?P<version>[^_]+)\\.xlsx$') m = filename_re.match(fn) if not m: errors.append(error({ 'message': 'filename does not", "max_errors.iteritems(): errors.append(error({ 'message': 'value greater than maximum of {}'.format(colspec['max']), 'value': val, 'row': idx,", "required_columns: errors.append(error({ 'message': 'the first %d columns of the \"%s\" worksheet should be", "'message': 'error in datetime string: %s' % e, 'value': val, 'row': idx, 'column':", "not m: errors.append(error({ 'message': 'filename does not match format <dataset_short_name>_<dataset_release_date>_v<dataset_version>.xlsx', 'value': fn }))", "'message': 'some data variables were defined in the \"%s\" worksheet but were not", "# TODO: Is there any type-checking expected in custom vars? vars_missing_value = wb['vars_meta_data']['var_missing_value'].tolist()", "Collect variable short names from vars_meta_data sheet and check that # data columns", "as fh: spec = yaml.load(fh) def validate(input_path): if (sys.version_info > (3, 0)): wb", "datetime string - should match %s' % colspec['format'], 'value': val, 'row': idx, 'column':", "'sheet': sheet })) return errors # Validate cells for colname, colspec in spec['columns'][sheet].items():", "arrow.get(val, colspec['format']) except ValueError as e: errors.append(error({ 'message': 'error in datetime string: %s'", "the actual data only on the condition of # proper missing values. #", "doesn't have any errors, if it does # don't bother with any more", "'missing required field', 'row': idx, 'column': series.name, 'sheet': sheet })) # Now remove", "'vars_meta_data' colspec = { 'required': True, 'na': na } empty_errors, _ = validate_column_generic(df_data[var],", "errors = [] if not 'dataset_meta_data' in wb: errors.append(error({ 'message': '\"%s\" worksheet is", "# Non-numeric strings are now NaN # Flag NaN as errors nonnumeric_errors =", "dataset file specifications spec_file_name = 'dataset_file_def.yaml' spec_file_path = os.path.join(os.path.dirname(__file__), spec_file_name) with open(spec_file_path, encoding='utf-8')", "does not match format <dataset_short_name>_<dataset_release_date>_v<dataset_version>.xlsx', 'value': fn })) else: try: dt = arrow.get(m.group('date'),", "greater than maximum of {}'.format(colspec['max']), 'value': val, 'row': idx, 'column': series.name, 'sheet': sheet", "0)): wb = pd.read_excel(input_path, sheet_name=None, na_values=[], keep_default_na=False, dtype=str) else: wb = pd.read_excel(input_path, sheet_name=None,", "colspec, sheet) errors.extend(empty_errors) if colspec.get('max', None) is not None: maxlen_errors = series[series.str.len() >", "e: errors.append(error({ 'message': 'error in datetime string: %s' % e, 'value': val, 'row':", "non-empty rows present = series[series.str.len() > 0] for idx, val in present.iteritems(): try:", "errors in non-empty rows present = series[series.str.len() > 0] for idx, val in", "idx, 'column': series.name, 'sheet': sheet })) except arrow.parser.ParserError as e: errors.append(error({ 'message': 'invalid", "idx, val in min_errors.iteritems(): errors.append(error({ 'message': 'value less than minimum of {}'.format(colspec['min']), 'value':", "'data' that were defined in 'vars_meta_data' # First make sure that 'vars_meta_data' doesn't", "dt = arrow.get(m.group('date'), spec['file_date']) except ValueError as e: errors.append(error({ 'message': 'error in filename", "in 'data' sheet match data columns defined in 'vars' sheet. vars_defined = wb['vars_meta_data']['var_short_name'].tolist()", "is not None: min_errors = series[converted < colspec['min']] for idx, val in min_errors.iteritems():", "zip(vars_defined, vars_missing_value): if var not in extra_defined: sheet = 'vars_meta_data' colspec = {", "lookup validator_lookup = { 'float': validate_column_floats, 'string': validate_column_strings, 'datetime': validate_column_datetimes, 'generic': validate_column_generic }", "'sheet': 'vars_meta_data' })) return errors df = wb['vars_meta_data'] errors.extend(validate_sheet_generic(df, 'vars_meta_data', spec)) return errors", "sheet match data columns defined in 'vars' sheet. vars_defined = wb['vars_meta_data']['var_short_name'].tolist() vars_found =", "pd.to_numeric(series, errors='coerce') # Non-numeric strings are now NaN # Flag NaN as errors", "for format errors in non-empty rows present = series[series.str.len() > 0] for idx,", "for colname, colspec in spec['columns'][sheet].items(): v = validator_lookup[colspec['type']] errors.extend(v(df[colname], colspec, sheet)) return errors", "% 'dataset_meta_data', 'sheet': 'dataset_meta_data' })) return errors df = wb['dataset_meta_data'] errors.extend(validate_sheet_generic(df, 'dataset_meta_data', spec))", "= colspec.get('required', None) na = colspec.get('na', None) if not required: # Empty cell", "= validate_column_generic(series, colspec, sheet) errors.extend(empty_errors) # Now look for format errors in non-empty", "in min_errors.iteritems(): errors.append(error({ 'message': 'value less than minimum of {}'.format(colspec['min']), 'value': val, 'row':", "sheet })) if colspec.get('max', None) is not None: max_errors = series[converted > colspec['max']]", "format' % spec['file_date'], 'value': m.group('date') })) if not re.match(r'^v.+$', m.group('version')): errors.append(error({ 'message': 'version", "pandas as pd import re import sys # Load dataset file specifications spec_file_name", "filename_re = re.compile(r'^(?P<shortname>.+)_(?P<date>[^_]+)_(?P<version>[^_]+)\\.xlsx$') m = filename_re.match(fn) if not m: errors.append(error({ 'message': 'filename does", "missing' % 'vars_meta_data', 'sheet': 'vars_meta_data' })) return errors df = wb['vars_meta_data'] errors.extend(validate_sheet_generic(df, 'vars_meta_data',", "errors # Register column validators in lookup validator_lookup = { 'float': validate_column_floats, 'string':", "as e: errors.append(error({ 'message': 'invalid datetime string - should match %s' % colspec['format'],", "errors.append(error({ 'message': 'date in filename must be in %s format' % spec['file_date'], 'value':", "# Load dataset file specifications spec_file_name = 'dataset_file_def.yaml' spec_file_path = os.path.join(os.path.dirname(__file__), spec_file_name) with", "data variables were defined in the \"%s\" worksheet but were not found in", "filename must start with \"v\"', 'value': fn })) return errors def validate_sheet_data(wb, spec):", "is missing' % 'data', 'sheet': 'data' })) return errors df = wb['data'] errors.extend(validate_sheet_generic(df,", "'vars_meta_data' # First make sure that 'vars_meta_data' doesn't have any errors, if it", "= set(vars_defined).difference(set(vars_found)) extra_found = set(vars_found).difference(set(vars_defined)) if extra_defined: errors.append(error({ 'message': 'some data variables were", "validate_sheet_vars(wb, spec=spec): errors = [] if not 'vars_meta_data' in wb: errors.append(error({ 'message': '\"%s\"", "= None max_errors = None if colspec.get('min', None) is not None: min_errors =", "def validate_sheet_generic(df, sheet, spec): errors = [] required_columns = list(spec['columns'][sheet].keys()) if df.columns.tolist()[:len(required_columns)] !=", "})) # Now validate the actual data only on the condition of #", "return (errors, series) def validate_column_strings(series, colspec, sheet): errors = [] empty_errors, series =", "'': # Empty cell is a valid value. Remove empty cells before further", "in maxlen_errors.iteritems(): errors.append(error({ 'message': 'string length > %d' % colspec['max'], 'value': val, 'row':", "% colspec['format'], 'value': val, 'row': idx, 'column': series.name, 'sheet': sheet })) return errors", "spec): errors = [] if not 'data' in wb: errors.append(error({ 'message': '\"%s\" worksheet", "= set(vars_found).difference(set(vars_defined)) if extra_defined: errors.append(error({ 'message': 'some data variables were defined in the", "worksheet but were not found in the \"%s\" worksheet' % ('vars_meta_data', 'data'), 'value':", "cells series = series[series.str.len() > 0] if na is not None: # Remove", "fn })) return errors def validate_sheet_data(wb, spec): errors = [] if not 'data'", "extra_found: errors.append(error({ 'message': 'some data variables were found in the \"%s\" worksheet but", "})) except arrow.parser.ParserError as e: errors.append(error({ 'message': 'invalid datetime string - should match", "make sure that 'vars_meta_data' doesn't have any errors, if it does # don't", "checks series = series[series.str.len() > 0] else: # NA is None or is", "colspec = { 'required': True, 'na': na } empty_errors, _ = validate_column_generic(df_data[var], colspec,", "less than minimum of {}'.format(colspec['min']), 'value': val, 'row': idx, 'column': series.name, 'sheet': sheet", "rows present = series[series.str.len() > 0] for idx, val in present.iteritems(): try: dt", "%d columns of the \"%s\" worksheet should be %s' % (len(required_columns), sheet, required_columns),", "'value': val, 'row': idx, 'column': series.name, 'sheet': sheet })) return errors def validate_column_generic(series,", "colspec.get('max', None) is not None: maxlen_errors = series[series.str.len() > colspec['max']] for idx, val", "errors.append(error({ 'message': 'missing required field', 'row': idx, 'column': series.name, 'sheet': sheet })) #", "or is not the empty string, therefore empty cells are not # valid", "in empty_errors.iteritems(): errors.append(error({ 'message': 'missing required field', 'row': idx, 'column': series.name, 'sheet': sheet", "cells are not # valid values. Flag as errors. empty_errors = series[series.str.len() ==", "in 'vars' sheet. vars_defined = wb['vars_meta_data']['var_short_name'].tolist() vars_found = df_data.columns.tolist() extra_defined = set(vars_defined).difference(set(vars_found)) extra_found", "'dataset_meta_data', spec)) return errors def validate_sheet_vars(wb, spec=spec): errors = [] if not 'vars_meta_data'", "'sheet': sheet })) # Now remove empty cells series = series[series.str.len() > 0]", "Register column validators in lookup validator_lookup = { 'float': validate_column_floats, 'string': validate_column_strings, 'datetime':", "series[series.str.len() > 0] elif str(na) == '': # Empty cell is a valid", "None or is not the empty string, therefore empty cells are not #", "= df.drop(required_columns, axis='columns') # Collect variable short names from vars_meta_data sheet and check", "% e, 'value': val, 'row': idx, 'column': series.name, 'sheet': sheet })) except arrow.parser.ParserError", "pd import re import sys # Load dataset file specifications spec_file_name = 'dataset_file_def.yaml'", "not found in the \"%s\" worksheet' % ('vars_meta_data', 'data'), 'value': ', '.join(extra_defined) }))", "errors.append(error({ 'message': 'value greater than maximum of {}'.format(colspec['max']), 'value': val, 'row': idx, 'column':", "!= na] return (errors, series) def validate_column_strings(series, colspec, sheet): errors = [] empty_errors,", "must start with \"v\"', 'value': fn })) return errors def validate_sheet_data(wb, spec): errors", "errors.append(error({ 'message': 'invalid datetime string - should match %s' % colspec['format'], 'value': val,", "check columns in 'data' that were defined in 'vars_meta_data' # First make sure", "re.match(r'^v.+$', m.group('version')): errors.append(error({ 'message': 'version string in filename must start with \"v\"', 'value':", "{}'.format(colspec['min']), 'value': val, 'row': idx, 'column': series.name, 'sheet': sheet })) if colspec.get('max', None)", "= filename_re.match(fn) if not m: errors.append(error({ 'message': 'filename does not match format <dataset_short_name>_<dataset_release_date>_v<dataset_version>.xlsx',", "checks series = series[series.str.len() > 0] elif str(na) == '': # Empty cell", "the \"%s\" worksheet but were not defined in the \"%s\" worksheet' % ('data',", "in zip(vars_defined, vars_missing_value): if var not in extra_defined: sheet = 'vars_meta_data' colspec =", "if na is not None: # Remove NA values before further checks series", "errors='coerce') # Non-numeric strings are now NaN # Flag NaN as errors nonnumeric_errors", "'sheet': 'data' })) return errors df = wb['data'] errors.extend(validate_sheet_generic(df, 'data', spec)) # Next", "'data' sheet match data columns defined in 'vars' sheet. vars_defined = wb['vars_meta_data']['var_short_name'].tolist() vars_found", "'the first %d columns of the \"%s\" worksheet should be %s' % (len(required_columns),", "spec_file_name = 'dataset_file_def.yaml' spec_file_path = os.path.join(os.path.dirname(__file__), spec_file_name) with open(spec_file_path, encoding='utf-8') as fh: spec", "'error in filename datetime string: %s' % e, 'value': m.group('date') })) except arrow.parser.ParserError", "empty_errors, series = validate_column_generic(series, colspec, sheet) errors.extend(empty_errors) # Convert to floats converted =", "('data', 'vars_meta_data'), 'value': ', '.join(extra_found) })) # Now validate the actual data only", "idx, val in nonnumeric_errors.iteritems(): errors.append(error({ 'message': 'invalid value', 'value': val, 'row': idx, 'column':", "vars? vars_missing_value = wb['vars_meta_data']['var_missing_value'].tolist() for var, na in zip(vars_defined, vars_missing_value): if var not", "colspec in spec['columns'][sheet].items(): v = validator_lookup[colspec['type']] errors.extend(v(df[colname], colspec, sheet)) return errors def validate_sheet_metadata(wb,", "'sheet': sheet })) return errors def validate_filename(input_path, spec): fn = os.path.basename(input_path) errors =", "but were not found in the \"%s\" worksheet' % ('vars_meta_data', 'data'), 'value': ',", "errors, if it does # don't bother with any more checks here if", "'column': series.name, 'sheet': sheet })) return errors def validate_column_generic(series, colspec, sheet): errors =", "arrow import os import oyaml as yaml import pandas as pd import re", "'vars_meta_data'), 'value': ', '.join(extra_found) })) # Now validate the actual data only on", "on the condition of # proper missing values. # TODO: Is there any", "validate_column_generic(series, colspec, sheet) errors.extend(empty_errors) # Now look for format errors in non-empty rows", "errors df = wb['dataset_meta_data'] errors.extend(validate_sheet_generic(df, 'dataset_meta_data', spec)) return errors def validate_sheet_vars(wb, spec=spec): errors", "= colspec.get('na', None) if not required: # Empty cell is a valid value.", "series.name, 'sheet': sheet })) # Check range min_errors = None max_errors = None", "custom vars? vars_missing_value = wb['vars_meta_data']['var_missing_value'].tolist() for var, na in zip(vars_defined, vars_missing_value): if var", "var, na in zip(vars_defined, vars_missing_value): if var not in extra_defined: sheet = 'vars_meta_data'", "errors.extend(empty_errors) # Now look for format errors in non-empty rows present = series[series.str.len()", "type-checking expected in custom vars? vars_missing_value = wb['vars_meta_data']['var_missing_value'].tolist() for var, na in zip(vars_defined,", "return errors def validate_sheet_generic(df, sheet, spec): errors = [] required_columns = list(spec['columns'][sheet].keys()) if", "val, 'row': idx, 'column': series.name, 'sheet': sheet })) except arrow.parser.ParserError as e: errors.append(error({", "for idx, val in nonnumeric_errors.iteritems(): errors.append(error({ 'message': 'invalid value', 'value': val, 'row': idx,", "return errors def validate_sheet_metadata(wb, spec): errors = [] if not 'dataset_meta_data' in wb:", "the empty string, therefore empty cells are not # valid values. Flag as", "if not re.match(r'^v.+$', m.group('version')): errors.append(error({ 'message': 'version string in filename must start with", "sheet })) except arrow.parser.ParserError as e: errors.append(error({ 'message': 'invalid datetime string - should", "required_columns = list(spec['columns']['data'].keys()) df_data = df.drop(required_columns, axis='columns') # Collect variable short names from", "series = series[series != na] return (errors, series) def validate_column_strings(series, colspec, sheet): errors", "from vars_meta_data sheet and check that # data columns in 'data' sheet match", "'required': True, 'na': na } empty_errors, _ = validate_column_generic(df_data[var], colspec, 'data') errors.extend(empty_errors) return", "v = validator_lookup[colspec['type']] errors.extend(v(df[colname], colspec, sheet)) return errors def validate_sheet_metadata(wb, spec): errors =", "for idx, val in min_errors.iteritems(): errors.append(error({ 'message': 'value less than minimum of {}'.format(colspec['min']),", "[] if not 'dataset_meta_data' in wb: errors.append(error({ 'message': '\"%s\" worksheet is missing' %", "data columns in 'data' sheet match data columns defined in 'vars' sheet. vars_defined", "'message': '\"%s\" worksheet is missing' % 'vars_meta_data', 'sheet': 'vars_meta_data' })) return errors df", "range min_errors = None max_errors = None if colspec.get('min', None) is not None:", "'data' })) return errors df = wb['data'] errors.extend(validate_sheet_generic(df, 'data', spec)) # Next check", "return errors def validate_column_generic(series, colspec, sheet): errors = [] required = colspec.get('required', None)", "= [] filename_re = re.compile(r'^(?P<shortname>.+)_(?P<date>[^_]+)_(?P<version>[^_]+)\\.xlsx$') m = filename_re.match(fn) if not m: errors.append(error({ 'message':", "names from vars_meta_data sheet and check that # data columns in 'data' sheet", "vars_found = df_data.columns.tolist() extra_defined = set(vars_defined).difference(set(vars_found)) extra_found = set(vars_found).difference(set(vars_defined)) if extra_defined: errors.append(error({ 'message':", "series[series.str.len() == 0] for idx, val in empty_errors.iteritems(): errors.append(error({ 'message': 'missing required field',", "na_values=[], keep_default_na=False, dtype=unicode) errors = [] errors.extend(validate_filename(input_path, spec)) errors.extend(validate_sheet_metadata(wb, spec)) errors.extend(validate_sheet_vars(wb, spec)) errors.extend(validate_sheet_data(wb,", "validate_sheet_metadata(wb, spec): errors = [] if not 'dataset_meta_data' in wb: errors.append(error({ 'message': '\"%s\"", "try: dt = arrow.get(m.group('date'), spec['file_date']) except ValueError as e: errors.append(error({ 'message': 'error in", "> (3, 0)): wb = pd.read_excel(input_path, sheet_name=None, na_values=[], keep_default_na=False, dtype=str) else: wb =", "val in maxlen_errors.iteritems(): errors.append(error({ 'message': 'string length > %d' % colspec['max'], 'value': val,", "= series[converted < colspec['min']] for idx, val in min_errors.iteritems(): errors.append(error({ 'message': 'value less", "in datetime string: %s' % e, 'value': val, 'row': idx, 'column': series.name, 'sheet':", "[] empty_errors, series = validate_column_generic(series, colspec, sheet) errors.extend(empty_errors) # Convert to floats converted", "if len(validate_sheet_vars(wb, spec)) > 0: return errors # Now check custom data columns", "'data' in wb: errors.append(error({ 'message': '\"%s\" worksheet is missing' % 'data', 'sheet': 'data'", "= series[converted > colspec['max']] for idx, val in max_errors.iteritems(): errors.append(error({ 'message': 'value greater", "'na': na } empty_errors, _ = validate_column_generic(df_data[var], colspec, 'data') errors.extend(empty_errors) return errors def", "= series[series.str.len() > 0] elif str(na) == '': # Empty cell is a", "unicode_literals from .error import error from io import open import arrow import os", "from __future__ import unicode_literals from .error import error from io import open import", "in %s format' % spec['file_date'], 'value': m.group('date') })) if not re.match(r'^v.+$', m.group('version')): errors.append(error({", "(errors, series) def validate_column_strings(series, colspec, sheet): errors = [] empty_errors, series = validate_column_generic(series,", "series.name, 'sheet': sheet })) # Now remove empty cells series = series[series.str.len() >", "> colspec['max']] for idx, val in maxlen_errors.iteritems(): errors.append(error({ 'message': 'string length > %d'", "= 'dataset_file_def.yaml' spec_file_path = os.path.join(os.path.dirname(__file__), spec_file_name) with open(spec_file_path, encoding='utf-8') as fh: spec =", "errors.append(error({ 'message': '\"%s\" worksheet is missing' % 'data', 'sheet': 'data' })) return errors", "custom data columns required_columns = list(spec['columns']['data'].keys()) df_data = df.drop(required_columns, axis='columns') # Collect variable", "before further checks series = series[series.str.len() > 0] else: # NA is None", "'value less than minimum of {}'.format(colspec['min']), 'value': val, 'row': idx, 'column': series.name, 'sheet':", "errors.append(error({ 'message': 'some data variables were found in the \"%s\" worksheet but were", "validator_lookup[colspec['type']] errors.extend(v(df[colname], colspec, sheet)) return errors def validate_sheet_metadata(wb, spec): errors = [] if", "sheet. vars_defined = wb['vars_meta_data']['var_short_name'].tolist() vars_found = df_data.columns.tolist() extra_defined = set(vars_defined).difference(set(vars_found)) extra_found = set(vars_found).difference(set(vars_defined))", "series.name, 'sheet': sheet })) return errors def validate_column_floats(series, colspec, sheet): errors = []", "# Flag NaN as errors nonnumeric_errors = series[pd.isna(converted)] for idx, val in nonnumeric_errors.iteritems():", "'row': idx, 'column': series.name, 'sheet': sheet })) except arrow.parser.ParserError as e: errors.append(error({ 'message':", "as e: errors.append(error({ 'message': 'date in filename must be in %s format' %", "ValueError as e: errors.append(error({ 'message': 'error in filename datetime string: %s' % e,", "not # valid values. Flag as errors. empty_errors = series[series.str.len() == 0] for", "data columns required_columns = list(spec['columns']['data'].keys()) df_data = df.drop(required_columns, axis='columns') # Collect variable short", "data variables were found in the \"%s\" worksheet but were not defined in", "'dataset_meta_data' in wb: errors.append(error({ 'message': '\"%s\" worksheet is missing' % 'dataset_meta_data', 'sheet': 'dataset_meta_data'", "is missing' % 'vars_meta_data', 'sheet': 'vars_meta_data' })) return errors df = wb['vars_meta_data'] errors.extend(validate_sheet_generic(df,", "further checks series = series[series != na] return (errors, series) def validate_column_strings(series, colspec,", "sheet): errors = [] required = colspec.get('required', None) na = colspec.get('na', None) if", "with \"v\"', 'value': fn })) return errors def validate_sheet_data(wb, spec): errors = []", "errors def validate_sheet_data(wb, spec): errors = [] if not 'data' in wb: errors.append(error({", "colname, colspec in spec['columns'][sheet].items(): v = validator_lookup[colspec['type']] errors.extend(v(df[colname], colspec, sheet)) return errors def", "= [] if not 'data' in wb: errors.append(error({ 'message': '\"%s\" worksheet is missing'", "Remove empty cells before further checks series = series[series.str.len() > 0] else: #", "= arrow.get(val, colspec['format']) except ValueError as e: errors.append(error({ 'message': 'error in datetime string:", "for idx, val in present.iteritems(): try: dt = arrow.get(val, colspec['format']) except ValueError as", "'column': series.name, 'sheet': sheet })) if colspec.get('max', None) is not None: max_errors =", "series[pd.isna(converted)] for idx, val in nonnumeric_errors.iteritems(): errors.append(error({ 'message': 'invalid value', 'value': val, 'row':", "'filename does not match format <dataset_short_name>_<dataset_release_date>_v<dataset_version>.xlsx', 'value': fn })) else: try: dt =", "na = colspec.get('na', None) if not required: # Empty cell is a valid", "elif str(na) == '': # Empty cell is a valid value. Remove empty", "of {}'.format(colspec['min']), 'value': val, 'row': idx, 'column': series.name, 'sheet': sheet })) if colspec.get('max',", "spec)) return errors def validate_sheet_vars(wb, spec=spec): errors = [] if not 'vars_meta_data' in", "'sheet': sheet })) except arrow.parser.ParserError as e: errors.append(error({ 'message': 'invalid datetime string -", "required_columns), 'value': str(df.columns.tolist()), 'sheet': sheet })) return errors # Validate cells for colname,", "Validate cells for colname, colspec in spec['columns'][sheet].items(): v = validator_lookup[colspec['type']] errors.extend(v(df[colname], colspec, sheet))", "})) return errors df = wb['vars_meta_data'] errors.extend(validate_sheet_generic(df, 'vars_meta_data', spec)) return errors # Register", "spec['columns'][sheet].items(): v = validator_lookup[colspec['type']] errors.extend(v(df[colname], colspec, sheet)) return errors def validate_sheet_metadata(wb, spec): errors", "= pd.read_excel(input_path, sheet_name=None, na_values=[], keep_default_na=False, dtype=str) else: wb = pd.read_excel(input_path, sheet_name=None, na_values=[], keep_default_na=False,", "'message': 'error in filename datetime string: %s' % e, 'value': m.group('date') })) except", "None: max_errors = series[converted > colspec['max']] for idx, val in max_errors.iteritems(): errors.append(error({ 'message':", "'message': 'version string in filename must start with \"v\"', 'value': fn })) return", "vars_missing_value = wb['vars_meta_data']['var_missing_value'].tolist() for var, na in zip(vars_defined, vars_missing_value): if var not in", "errors.extend(empty_errors) return errors def validate_sheet_generic(df, sheet, spec): errors = [] required_columns = list(spec['columns'][sheet].keys())", "have any errors, if it does # don't bother with any more checks", "wb: errors.append(error({ 'message': '\"%s\" worksheet is missing' % 'vars_meta_data', 'sheet': 'vars_meta_data' })) return", "', '.join(extra_found) })) # Now validate the actual data only on the condition", "than maximum of {}'.format(colspec['max']), 'value': val, 'row': idx, 'column': series.name, 'sheet': sheet }))", "val, 'row': idx, 'column': series.name, 'sheet': sheet })) # Check range min_errors =", "only on the condition of # proper missing values. # TODO: Is there", "wb: errors.append(error({ 'message': '\"%s\" worksheet is missing' % 'data', 'sheet': 'data' })) return", "[] if not 'vars_meta_data' in wb: errors.append(error({ 'message': '\"%s\" worksheet is missing' %", "worksheet' % ('vars_meta_data', 'data'), 'value': ', '.join(extra_defined) })) if extra_found: errors.append(error({ 'message': 'some", "# Now look for format errors in non-empty rows present = series[series.str.len() >", "in non-empty rows present = series[series.str.len() > 0] for idx, val in present.iteritems():", "'date in filename must be in %s format' % spec['file_date'], 'value': m.group('date') }))", "# Next check columns in 'data' that were defined in 'vars_meta_data' # First", "it does # don't bother with any more checks here if len(validate_sheet_vars(wb, spec))", "vars_defined = wb['vars_meta_data']['var_short_name'].tolist() vars_found = df_data.columns.tolist() extra_defined = set(vars_defined).difference(set(vars_found)) extra_found = set(vars_found).difference(set(vars_defined)) if", "- should match %s' % colspec['format'], 'value': val, 'row': idx, 'column': series.name, 'sheet':", "in filename must start with \"v\"', 'value': fn })) return errors def validate_sheet_data(wb,", "spec=spec): errors = [] if not 'vars_meta_data' in wb: errors.append(error({ 'message': '\"%s\" worksheet", "be %s' % (len(required_columns), sheet, required_columns), 'value': str(df.columns.tolist()), 'sheet': sheet })) return errors", "= os.path.join(os.path.dirname(__file__), spec_file_name) with open(spec_file_path, encoding='utf-8') as fh: spec = yaml.load(fh) def validate(input_path):", "of the \"%s\" worksheet should be %s' % (len(required_columns), sheet, required_columns), 'value': str(df.columns.tolist()),", "valid values. Flag as errors. empty_errors = series[series.str.len() == 0] for idx, val", "% 'vars_meta_data', 'sheet': 'vars_meta_data' })) return errors df = wb['vars_meta_data'] errors.extend(validate_sheet_generic(df, 'vars_meta_data', spec))", "Is there any type-checking expected in custom vars? vars_missing_value = wb['vars_meta_data']['var_missing_value'].tolist() for var,", "fh: spec = yaml.load(fh) def validate(input_path): if (sys.version_info > (3, 0)): wb =", "'value': val, 'row': idx, 'column': series.name, 'sheet': sheet })) return errors def validate_column_floats(series,", "sheet) errors.extend(empty_errors) if colspec.get('max', None) is not None: maxlen_errors = series[series.str.len() > colspec['max']]", "# Now check custom data columns required_columns = list(spec['columns']['data'].keys()) df_data = df.drop(required_columns, axis='columns')", "})) else: try: dt = arrow.get(m.group('date'), spec['file_date']) except ValueError as e: errors.append(error({ 'message':", "(3, 0)): wb = pd.read_excel(input_path, sheet_name=None, na_values=[], keep_default_na=False, dtype=str) else: wb = pd.read_excel(input_path,", "Next check columns in 'data' that were defined in 'vars_meta_data' # First make", "os.path.basename(input_path) errors = [] filename_re = re.compile(r'^(?P<shortname>.+)_(?P<date>[^_]+)_(?P<version>[^_]+)\\.xlsx$') m = filename_re.match(fn) if not m:", "maxlen_errors = series[series.str.len() > colspec['max']] for idx, val in maxlen_errors.iteritems(): errors.append(error({ 'message': 'string", "errors df = wb['data'] errors.extend(validate_sheet_generic(df, 'data', spec)) # Next check columns in 'data'", "\"%s\" worksheet but were not found in the \"%s\" worksheet' % ('vars_meta_data', 'data'),", "list(spec['columns']['data'].keys()) df_data = df.drop(required_columns, axis='columns') # Collect variable short names from vars_meta_data sheet", "not in extra_defined: sheet = 'vars_meta_data' colspec = { 'required': True, 'na': na", "if df.columns.tolist()[:len(required_columns)] != required_columns: errors.append(error({ 'message': 'the first %d columns of the \"%s\"", "%s format' % spec['file_date'], 'value': m.group('date') })) if not re.match(r'^v.+$', m.group('version')): errors.append(error({ 'message':", "empty_errors.iteritems(): errors.append(error({ 'message': 'missing required field', 'row': idx, 'column': series.name, 'sheet': sheet }))", "\"%s\" worksheet' % ('data', 'vars_meta_data'), 'value': ', '.join(extra_found) })) # Now validate the", "= validate_column_generic(df_data[var], colspec, 'data') errors.extend(empty_errors) return errors def validate_sheet_generic(df, sheet, spec): errors =", "0: return errors # Now check custom data columns required_columns = list(spec['columns']['data'].keys()) df_data", "= series[series.str.len() > colspec['max']] for idx, val in maxlen_errors.iteritems(): errors.append(error({ 'message': 'string length", "errors = [] errors.extend(validate_filename(input_path, spec)) errors.extend(validate_sheet_metadata(wb, spec)) errors.extend(validate_sheet_vars(wb, spec)) errors.extend(validate_sheet_data(wb, spec)) return errors", "colspec, sheet) errors.extend(empty_errors) # Convert to floats converted = pd.to_numeric(series, errors='coerce') # Non-numeric", "maxlen_errors.iteritems(): errors.append(error({ 'message': 'string length > %d' % colspec['max'], 'value': val, 'row': idx,", "as errors nonnumeric_errors = series[pd.isna(converted)] for idx, val in nonnumeric_errors.iteritems(): errors.append(error({ 'message': 'invalid", "expected in custom vars? vars_missing_value = wb['vars_meta_data']['var_missing_value'].tolist() for var, na in zip(vars_defined, vars_missing_value):", "yaml import pandas as pd import re import sys # Load dataset file", "columns of the \"%s\" worksheet should be %s' % (len(required_columns), sheet, required_columns), 'value':", "series = validate_column_generic(series, colspec, sheet) errors.extend(empty_errors) # Convert to floats converted = pd.to_numeric(series,", "be in %s format' % spec['file_date'], 'value': m.group('date') })) if not re.match(r'^v.+$', m.group('version')):", "val in max_errors.iteritems(): errors.append(error({ 'message': 'value greater than maximum of {}'.format(colspec['max']), 'value': val,", "'vars' sheet. vars_defined = wb['vars_meta_data']['var_short_name'].tolist() vars_found = df_data.columns.tolist() extra_defined = set(vars_defined).difference(set(vars_found)) extra_found =", "extra_defined: errors.append(error({ 'message': 'some data variables were defined in the \"%s\" worksheet but", "= wb['dataset_meta_data'] errors.extend(validate_sheet_generic(df, 'dataset_meta_data', spec)) return errors def validate_sheet_vars(wb, spec=spec): errors = []", "'message': '\"%s\" worksheet is missing' % 'dataset_meta_data', 'sheet': 'dataset_meta_data' })) return errors df", "as e: errors.append(error({ 'message': 'error in filename datetime string: %s' % e, 'value':", "'value': val, 'row': idx, 'column': series.name, 'sheet': sheet })) except arrow.parser.ParserError as e:", "None) is not None: min_errors = series[converted < colspec['min']] for idx, val in", "None: maxlen_errors = series[series.str.len() > colspec['max']] for idx, val in maxlen_errors.iteritems(): errors.append(error({ 'message':", "= wb['data'] errors.extend(validate_sheet_generic(df, 'data', spec)) # Next check columns in 'data' that were", "empty_errors, _ = validate_column_generic(df_data[var], colspec, 'data') errors.extend(empty_errors) return errors def validate_sheet_generic(df, sheet, spec):", "# Empty cell is a valid value. Remove empty cells before further checks", "missing' % 'dataset_meta_data', 'sheet': 'dataset_meta_data' })) return errors df = wb['dataset_meta_data'] errors.extend(validate_sheet_generic(df, 'dataset_meta_data',", "'\"%s\" worksheet is missing' % 'dataset_meta_data', 'sheet': 'dataset_meta_data' })) return errors df =", "= series[pd.isna(converted)] for idx, val in nonnumeric_errors.iteritems(): errors.append(error({ 'message': 'invalid value', 'value': val,", "Non-numeric strings are now NaN # Flag NaN as errors nonnumeric_errors = series[pd.isna(converted)]", "e, 'value': m.group('date') })) except arrow.parser.ParserError as e: errors.append(error({ 'message': 'date in filename", "# data columns in 'data' sheet match data columns defined in 'vars' sheet.", "'.join(extra_found) })) # Now validate the actual data only on the condition of", "= pd.read_excel(input_path, sheet_name=None, na_values=[], keep_default_na=False, dtype=unicode) errors = [] errors.extend(validate_filename(input_path, spec)) errors.extend(validate_sheet_metadata(wb, spec))", "'row': idx, 'column': series.name, 'sheet': sheet })) return errors def validate_column_generic(series, colspec, sheet):", "therefore empty cells are not # valid values. Flag as errors. empty_errors =", "pd.read_excel(input_path, sheet_name=None, na_values=[], keep_default_na=False, dtype=str) else: wb = pd.read_excel(input_path, sheet_name=None, na_values=[], keep_default_na=False, dtype=unicode)", "keep_default_na=False, dtype=unicode) errors = [] errors.extend(validate_filename(input_path, spec)) errors.extend(validate_sheet_metadata(wb, spec)) errors.extend(validate_sheet_vars(wb, spec)) errors.extend(validate_sheet_data(wb, spec))", "})) # Now remove empty cells series = series[series.str.len() > 0] if na", "checks here if len(validate_sheet_vars(wb, spec)) > 0: return errors # Now check custom", "cell is a valid value. Remove empty cells before further checks series =", "re import sys # Load dataset file specifications spec_file_name = 'dataset_file_def.yaml' spec_file_path =", "= validate_column_generic(series, colspec, sheet) errors.extend(empty_errors) # Convert to floats converted = pd.to_numeric(series, errors='coerce')", "'\"%s\" worksheet is missing' % 'data', 'sheet': 'data' })) return errors df =", "variable short names from vars_meta_data sheet and check that # data columns in", "NaN as errors nonnumeric_errors = series[pd.isna(converted)] for idx, val in nonnumeric_errors.iteritems(): errors.append(error({ 'message':", "in present.iteritems(): try: dt = arrow.get(val, colspec['format']) except ValueError as e: errors.append(error({ 'message':", "'row': idx, 'column': series.name, 'sheet': sheet })) # Now remove empty cells series", "in 'data' that were defined in 'vars_meta_data' # First make sure that 'vars_meta_data'", "if colspec.get('max', None) is not None: max_errors = series[converted > colspec['max']] for idx,", "import unicode_literals from .error import error from io import open import arrow import", "errors def validate_filename(input_path, spec): fn = os.path.basename(input_path) errors = [] filename_re = re.compile(r'^(?P<shortname>.+)_(?P<date>[^_]+)_(?P<version>[^_]+)\\.xlsx$')", "idx, 'column': series.name, 'sheet': sheet })) if colspec.get('max', None) is not None: max_errors", "in extra_defined: sheet = 'vars_meta_data' colspec = { 'required': True, 'na': na }", "'message': 'missing required field', 'row': idx, 'column': series.name, 'sheet': sheet })) # Now", "<dataset_short_name>_<dataset_release_date>_v<dataset_version>.xlsx', 'value': fn })) else: try: dt = arrow.get(m.group('date'), spec['file_date']) except ValueError as", "'message': 'filename does not match format <dataset_short_name>_<dataset_release_date>_v<dataset_version>.xlsx', 'value': fn })) else: try: dt", "Now remove empty cells series = series[series.str.len() > 0] if na is not", "spec)) return errors # Register column validators in lookup validator_lookup = { 'float':", "empty cells are not # valid values. Flag as errors. empty_errors = series[series.str.len()", "if not m: errors.append(error({ 'message': 'filename does not match format <dataset_short_name>_<dataset_release_date>_v<dataset_version>.xlsx', 'value': fn", "ValueError as e: errors.append(error({ 'message': 'error in datetime string: %s' % e, 'value':", "'sheet': sheet })) return errors def validate_column_floats(series, colspec, sheet): errors = [] empty_errors,", "None max_errors = None if colspec.get('min', None) is not None: min_errors = series[converted", "> 0] elif str(na) == '': # Empty cell is a valid value.", "else: # NA is None or is not the empty string, therefore empty", "try: dt = arrow.get(val, colspec['format']) except ValueError as e: errors.append(error({ 'message': 'error in", "sheet, spec): errors = [] required_columns = list(spec['columns'][sheet].keys()) if df.columns.tolist()[:len(required_columns)] != required_columns: errors.append(error({", "any errors, if it does # don't bother with any more checks here", "in the \"%s\" worksheet' % ('data', 'vars_meta_data'), 'value': ', '.join(extra_found) })) # Now", "and check that # data columns in 'data' sheet match data columns defined", "string, therefore empty cells are not # valid values. Flag as errors. empty_errors", "'value': val, 'row': idx, 'column': series.name, 'sheet': sheet })) # Check range min_errors", "wb = pd.read_excel(input_path, sheet_name=None, na_values=[], keep_default_na=False, dtype=unicode) errors = [] errors.extend(validate_filename(input_path, spec)) errors.extend(validate_sheet_metadata(wb,", "but were not defined in the \"%s\" worksheet' % ('data', 'vars_meta_data'), 'value': ',", "%s' % e, 'value': val, 'row': idx, 'column': series.name, 'sheet': sheet })) except", "not re.match(r'^v.+$', m.group('version')): errors.append(error({ 'message': 'version string in filename must start with \"v\"',", "validate_sheet_data(wb, spec): errors = [] if not 'data' in wb: errors.append(error({ 'message': '\"%s\"", "field', 'row': idx, 'column': series.name, 'sheet': sheet })) # Now remove empty cells", "errors.extend(v(df[colname], colspec, sheet)) return errors def validate_sheet_metadata(wb, spec): errors = [] if not", "idx, val in present.iteritems(): try: dt = arrow.get(val, colspec['format']) except ValueError as e:", "data columns defined in 'vars' sheet. vars_defined = wb['vars_meta_data']['var_short_name'].tolist() vars_found = df_data.columns.tolist() extra_defined", "is not None: maxlen_errors = series[series.str.len() > colspec['max']] for idx, val in maxlen_errors.iteritems():", "if not 'vars_meta_data' in wb: errors.append(error({ 'message': '\"%s\" worksheet is missing' % 'vars_meta_data',", "from io import open import arrow import os import oyaml as yaml import", "wb['vars_meta_data']['var_short_name'].tolist() vars_found = df_data.columns.tolist() extra_defined = set(vars_defined).difference(set(vars_found)) extra_found = set(vars_found).difference(set(vars_defined)) if extra_defined: errors.append(error({", "= series[series.str.len() == 0] for idx, val in empty_errors.iteritems(): errors.append(error({ 'message': 'missing required", "as pd import re import sys # Load dataset file specifications spec_file_name =", "spec): fn = os.path.basename(input_path) errors = [] filename_re = re.compile(r'^(?P<shortname>.+)_(?P<date>[^_]+)_(?P<version>[^_]+)\\.xlsx$') m = filename_re.match(fn)", "wb: errors.append(error({ 'message': '\"%s\" worksheet is missing' % 'dataset_meta_data', 'sheet': 'dataset_meta_data' })) return", "= arrow.get(m.group('date'), spec['file_date']) except ValueError as e: errors.append(error({ 'message': 'error in filename datetime", "were not found in the \"%s\" worksheet' % ('vars_meta_data', 'data'), 'value': ', '.join(extra_defined)", "colspec['max']] for idx, val in maxlen_errors.iteritems(): errors.append(error({ 'message': 'string length > %d' %", "there any type-checking expected in custom vars? vars_missing_value = wb['vars_meta_data']['var_missing_value'].tolist() for var, na", "})) return errors # Validate cells for colname, colspec in spec['columns'][sheet].items(): v =", "def validate_sheet_vars(wb, spec=spec): errors = [] if not 'vars_meta_data' in wb: errors.append(error({ 'message':", "in lookup validator_lookup = { 'float': validate_column_floats, 'string': validate_column_strings, 'datetime': validate_column_datetimes, 'generic': validate_column_generic", "before further checks series = series[series.str.len() > 0] elif str(na) == '': #", "actual data only on the condition of # proper missing values. # TODO:", "spec = yaml.load(fh) def validate(input_path): if (sys.version_info > (3, 0)): wb = pd.read_excel(input_path,", "= [] empty_errors, series = validate_column_generic(series, colspec, sheet) errors.extend(empty_errors) # Convert to floats", "worksheet is missing' % 'vars_meta_data', 'sheet': 'vars_meta_data' })) return errors df = wb['vars_meta_data']", "= { 'required': True, 'na': na } empty_errors, _ = validate_column_generic(df_data[var], colspec, 'data')", "empty cells before further checks series = series[series.str.len() > 0] else: # NA", "in max_errors.iteritems(): errors.append(error({ 'message': 'value greater than maximum of {}'.format(colspec['max']), 'value': val, 'row':", "idx, 'column': series.name, 'sheet': sheet })) # Check range min_errors = None max_errors", "before further checks series = series[series != na] return (errors, series) def validate_column_strings(series,", "data only on the condition of # proper missing values. # TODO: Is", "TODO: Is there any type-checking expected in custom vars? vars_missing_value = wb['vars_meta_data']['var_missing_value'].tolist() for", "= wb['vars_meta_data'] errors.extend(validate_sheet_generic(df, 'vars_meta_data', spec)) return errors # Register column validators in lookup", "'message': 'value greater than maximum of {}'.format(colspec['max']), 'value': val, 'row': idx, 'column': series.name,", "val in present.iteritems(): try: dt = arrow.get(val, colspec['format']) except ValueError as e: errors.append(error({", "series.name, 'sheet': sheet })) if colspec.get('max', None) is not None: max_errors = series[converted", "NaN # Flag NaN as errors nonnumeric_errors = series[pd.isna(converted)] for idx, val in", "< colspec['min']] for idx, val in min_errors.iteritems(): errors.append(error({ 'message': 'value less than minimum", "__future__ import unicode_literals from .error import error from io import open import arrow", "as errors. empty_errors = series[series.str.len() == 0] for idx, val in empty_errors.iteritems(): errors.append(error({", "errors = [] empty_errors, series = validate_column_generic(series, colspec, sheet) errors.extend(empty_errors) if colspec.get('max', None)", "must be in %s format' % spec['file_date'], 'value': m.group('date') })) if not re.match(r'^v.+$',", "m.group('date') })) if not re.match(r'^v.+$', m.group('version')): errors.append(error({ 'message': 'version string in filename must", "return errors def validate_sheet_data(wb, spec): errors = [] if not 'data' in wb:", "wb = pd.read_excel(input_path, sheet_name=None, na_values=[], keep_default_na=False, dtype=str) else: wb = pd.read_excel(input_path, sheet_name=None, na_values=[],", "0] for idx, val in present.iteritems(): try: dt = arrow.get(val, colspec['format']) except ValueError", "colspec['format'], 'value': val, 'row': idx, 'column': series.name, 'sheet': sheet })) return errors def", "idx, val in maxlen_errors.iteritems(): errors.append(error({ 'message': 'string length > %d' % colspec['max'], 'value':", "'row': idx, 'column': series.name, 'sheet': sheet })) if colspec.get('max', None) is not None:", "now NaN # Flag NaN as errors nonnumeric_errors = series[pd.isna(converted)] for idx, val", "'string length > %d' % colspec['max'], 'value': val, 'row': idx, 'column': series.name, 'sheet':", "values before further checks series = series[series != na] return (errors, series) def", "if it does # don't bother with any more checks here if len(validate_sheet_vars(wb,", "the \"%s\" worksheet' % ('vars_meta_data', 'data'), 'value': ', '.join(extra_defined) })) if extra_found: errors.append(error({", "return errors def validate_sheet_vars(wb, spec=spec): errors = [] if not 'vars_meta_data' in wb:", "'\"%s\" worksheet is missing' % 'vars_meta_data', 'sheet': 'vars_meta_data' })) return errors df =", "errors.extend(validate_sheet_vars(wb, spec)) errors.extend(validate_sheet_data(wb, spec)) return errors def validate_column_datetimes(series, colspec, sheet): errors = []", "Now validate the actual data only on the condition of # proper missing", "format <dataset_short_name>_<dataset_release_date>_v<dataset_version>.xlsx', 'value': fn })) else: try: dt = arrow.get(m.group('date'), spec['file_date']) except ValueError", "spec)) errors.extend(validate_sheet_metadata(wb, spec)) errors.extend(validate_sheet_vars(wb, spec)) errors.extend(validate_sheet_data(wb, spec)) return errors def validate_column_datetimes(series, colspec, sheet):", "'message': 'invalid value', 'value': val, 'row': idx, 'column': series.name, 'sheet': sheet })) #", "})) return errors def validate_filename(input_path, spec): fn = os.path.basename(input_path) errors = [] filename_re", "wb['vars_meta_data']['var_missing_value'].tolist() for var, na in zip(vars_defined, vars_missing_value): if var not in extra_defined: sheet", "arrow.parser.ParserError as e: errors.append(error({ 'message': 'invalid datetime string - should match %s' %", "= list(spec['columns'][sheet].keys()) if df.columns.tolist()[:len(required_columns)] != required_columns: errors.append(error({ 'message': 'the first %d columns of", "0] for idx, val in empty_errors.iteritems(): errors.append(error({ 'message': 'missing required field', 'row': idx,", "= [] if not 'vars_meta_data' in wb: errors.append(error({ 'message': '\"%s\" worksheet is missing'", "# Convert to floats converted = pd.to_numeric(series, errors='coerce') # Non-numeric strings are now", "(len(required_columns), sheet, required_columns), 'value': str(df.columns.tolist()), 'sheet': sheet })) return errors # Validate cells", "not match format <dataset_short_name>_<dataset_release_date>_v<dataset_version>.xlsx', 'value': fn })) else: try: dt = arrow.get(m.group('date'), spec['file_date'])", "idx, 'column': series.name, 'sheet': sheet })) return errors def validate_filename(input_path, spec): fn =", "na } empty_errors, _ = validate_column_generic(df_data[var], colspec, 'data') errors.extend(empty_errors) return errors def validate_sheet_generic(df,", "missing' % 'data', 'sheet': 'data' })) return errors df = wb['data'] errors.extend(validate_sheet_generic(df, 'data',", "extra_defined: sheet = 'vars_meta_data' colspec = { 'required': True, 'na': na } empty_errors,", "# Check range min_errors = None max_errors = None if colspec.get('min', None) is", "were not defined in the \"%s\" worksheet' % ('data', 'vars_meta_data'), 'value': ', '.join(extra_found)", "'value': ', '.join(extra_found) })) # Now validate the actual data only on the", "'some data variables were defined in the \"%s\" worksheet but were not found", "string in filename must start with \"v\"', 'value': fn })) return errors def", "= [] empty_errors, series = validate_column_generic(series, colspec, sheet) errors.extend(empty_errors) if colspec.get('max', None) is", "encoding='utf-8') as fh: spec = yaml.load(fh) def validate(input_path): if (sys.version_info > (3, 0)):", "wb['dataset_meta_data'] errors.extend(validate_sheet_generic(df, 'dataset_meta_data', spec)) return errors def validate_sheet_vars(wb, spec=spec): errors = [] if", "series[series.str.len() > 0] if na is not None: # Remove NA values before", "series = series[series.str.len() > 0] if na is not None: # Remove NA", "if colspec.get('max', None) is not None: maxlen_errors = series[series.str.len() > colspec['max']] for idx,", "Now check custom data columns required_columns = list(spec['columns']['data'].keys()) df_data = df.drop(required_columns, axis='columns') #", "extra_defined = set(vars_defined).difference(set(vars_found)) extra_found = set(vars_found).difference(set(vars_defined)) if extra_defined: errors.append(error({ 'message': 'some data variables", "sheet) errors.extend(empty_errors) # Now look for format errors in non-empty rows present =", "value', 'value': val, 'row': idx, 'column': series.name, 'sheet': sheet })) # Check range", "spec): errors = [] if not 'dataset_meta_data' in wb: errors.append(error({ 'message': '\"%s\" worksheet", "Now look for format errors in non-empty rows present = series[series.str.len() > 0]", "required field', 'row': idx, 'column': series.name, 'sheet': sheet })) # Now remove empty", "'invalid datetime string - should match %s' % colspec['format'], 'value': val, 'row': idx,", "errors. empty_errors = series[series.str.len() == 0] for idx, val in empty_errors.iteritems(): errors.append(error({ 'message':", "file specifications spec_file_name = 'dataset_file_def.yaml' spec_file_path = os.path.join(os.path.dirname(__file__), spec_file_name) with open(spec_file_path, encoding='utf-8') as", "not 'data' in wb: errors.append(error({ 'message': '\"%s\" worksheet is missing' % 'data', 'sheet':", "match data columns defined in 'vars' sheet. vars_defined = wb['vars_meta_data']['var_short_name'].tolist() vars_found = df_data.columns.tolist()", "errors.append(error({ 'message': '\"%s\" worksheet is missing' % 'dataset_meta_data', 'sheet': 'dataset_meta_data' })) return errors", "Empty cell is a valid value. Remove empty cells before further checks series", "'vars_meta_data' in wb: errors.append(error({ 'message': '\"%s\" worksheet is missing' % 'vars_meta_data', 'sheet': 'vars_meta_data'", "variables were defined in the \"%s\" worksheet but were not found in the", "[] required_columns = list(spec['columns'][sheet].keys()) if df.columns.tolist()[:len(required_columns)] != required_columns: errors.append(error({ 'message': 'the first %d", "errors = [] empty_errors, series = validate_column_generic(series, colspec, sheet) errors.extend(empty_errors) # Convert to", "worksheet is missing' % 'data', 'sheet': 'data' })) return errors df = wb['data']", "'message': 'some data variables were found in the \"%s\" worksheet but were not", "colspec.get('min', None) is not None: min_errors = series[converted < colspec['min']] for idx, val", "None: min_errors = series[converted < colspec['min']] for idx, val in min_errors.iteritems(): errors.append(error({ 'message':", "converted = pd.to_numeric(series, errors='coerce') # Non-numeric strings are now NaN # Flag NaN", "in the \"%s\" worksheet but were not found in the \"%s\" worksheet' %", "= yaml.load(fh) def validate(input_path): if (sys.version_info > (3, 0)): wb = pd.read_excel(input_path, sheet_name=None,", "% colspec['max'], 'value': val, 'row': idx, 'column': series.name, 'sheet': sheet })) return errors", "required: # Empty cell is a valid value. Remove empty cells before further", "errors nonnumeric_errors = series[pd.isna(converted)] for idx, val in nonnumeric_errors.iteritems(): errors.append(error({ 'message': 'invalid value',", "('vars_meta_data', 'data'), 'value': ', '.join(extra_defined) })) if extra_found: errors.append(error({ 'message': 'some data variables", "def validate(input_path): if (sys.version_info > (3, 0)): wb = pd.read_excel(input_path, sheet_name=None, na_values=[], keep_default_na=False,", "import pandas as pd import re import sys # Load dataset file specifications", "errors.append(error({ 'message': 'error in filename datetime string: %s' % e, 'value': m.group('date') }))", "if var not in extra_defined: sheet = 'vars_meta_data' colspec = { 'required': True,", "})) except arrow.parser.ParserError as e: errors.append(error({ 'message': 'date in filename must be in", "filename must be in %s format' % spec['file_date'], 'value': m.group('date') })) if not", "= wb['vars_meta_data']['var_missing_value'].tolist() for var, na in zip(vars_defined, vars_missing_value): if var not in extra_defined:", "spec)) > 0: return errors # Now check custom data columns required_columns =", "arrow.get(m.group('date'), spec['file_date']) except ValueError as e: errors.append(error({ 'message': 'error in filename datetime string:", "spec): errors = [] required_columns = list(spec['columns'][sheet].keys()) if df.columns.tolist()[:len(required_columns)] != required_columns: errors.append(error({ 'message':", "for var, na in zip(vars_defined, vars_missing_value): if var not in extra_defined: sheet =", "sheet = 'vars_meta_data' colspec = { 'required': True, 'na': na } empty_errors, _", "df.drop(required_columns, axis='columns') # Collect variable short names from vars_meta_data sheet and check that", "series[series.str.len() > 0] else: # NA is None or is not the empty", "set(vars_defined).difference(set(vars_found)) extra_found = set(vars_found).difference(set(vars_defined)) if extra_defined: errors.append(error({ 'message': 'some data variables were defined", "sheet })) return errors def validate_column_generic(series, colspec, sheet): errors = [] required =", "with open(spec_file_path, encoding='utf-8') as fh: spec = yaml.load(fh) def validate(input_path): if (sys.version_info >", "the condition of # proper missing values. # TODO: Is there any type-checking", "str(df.columns.tolist()), 'sheet': sheet })) return errors # Validate cells for colname, colspec in", "return errors # Validate cells for colname, colspec in spec['columns'][sheet].items(): v = validator_lookup[colspec['type']]", "'vars_meta_data', spec)) return errors # Register column validators in lookup validator_lookup = {", "series = validate_column_generic(series, colspec, sheet) errors.extend(empty_errors) # Now look for format errors in", "e: errors.append(error({ 'message': 'invalid datetime string - should match %s' % colspec['format'], 'value':", "# Remove NA values before further checks series = series[series != na] return", "if extra_found: errors.append(error({ 'message': 'some data variables were found in the \"%s\" worksheet", "sheet): errors = [] empty_errors, series = validate_column_generic(series, colspec, sheet) errors.extend(empty_errors) if colspec.get('max',", "import error from io import open import arrow import os import oyaml as", "'some data variables were found in the \"%s\" worksheet but were not defined", "defined in 'vars' sheet. vars_defined = wb['vars_meta_data']['var_short_name'].tolist() vars_found = df_data.columns.tolist() extra_defined = set(vars_defined).difference(set(vars_found))", "> colspec['max']] for idx, val in max_errors.iteritems(): errors.append(error({ 'message': 'value greater than maximum", "})) return errors df = wb['data'] errors.extend(validate_sheet_generic(df, 'data', spec)) # Next check columns", "errors.append(error({ 'message': '\"%s\" worksheet is missing' % 'vars_meta_data', 'sheet': 'vars_meta_data' })) return errors", "wb['vars_meta_data'] errors.extend(validate_sheet_generic(df, 'vars_meta_data', spec)) return errors # Register column validators in lookup validator_lookup", "in wb: errors.append(error({ 'message': '\"%s\" worksheet is missing' % 'data', 'sheet': 'data' }))", "axis='columns') # Collect variable short names from vars_meta_data sheet and check that #", "values. # TODO: Is there any type-checking expected in custom vars? vars_missing_value =", "%s' % (len(required_columns), sheet, required_columns), 'value': str(df.columns.tolist()), 'sheet': sheet })) return errors #", "errors.extend(empty_errors) # Convert to floats converted = pd.to_numeric(series, errors='coerce') # Non-numeric strings are", "errors # Now check custom data columns required_columns = list(spec['columns']['data'].keys()) df_data = df.drop(required_columns,", "cells for colname, colspec in spec['columns'][sheet].items(): v = validator_lookup[colspec['type']] errors.extend(v(df[colname], colspec, sheet)) return", "if not 'data' in wb: errors.append(error({ 'message': '\"%s\" worksheet is missing' % 'data',", "errors.extend(empty_errors) if colspec.get('max', None) is not None: maxlen_errors = series[series.str.len() > colspec['max']] for", "datetime string: %s' % e, 'value': m.group('date') })) except arrow.parser.ParserError as e: errors.append(error({", "'sheet': sheet })) # Check range min_errors = None max_errors = None if", "colspec.get('max', None) is not None: max_errors = series[converted > colspec['max']] for idx, val", "'column': series.name, 'sheet': sheet })) # Check range min_errors = None max_errors =", "= series[series.str.len() > 0] if na is not None: # Remove NA values", "in nonnumeric_errors.iteritems(): errors.append(error({ 'message': 'invalid value', 'value': val, 'row': idx, 'column': series.name, 'sheet':", "# First make sure that 'vars_meta_data' doesn't have any errors, if it does", "str(na) == '': # Empty cell is a valid value. Remove empty cells", "None) if not required: # Empty cell is a valid value. Remove empty", "Remove empty cells before further checks series = series[series.str.len() > 0] elif str(na)", "empty string, therefore empty cells are not # valid values. Flag as errors.", "first %d columns of the \"%s\" worksheet should be %s' % (len(required_columns), sheet,", "in filename must be in %s format' % spec['file_date'], 'value': m.group('date') })) if", "format errors in non-empty rows present = series[series.str.len() > 0] for idx, val", "'dataset_meta_data' })) return errors df = wb['dataset_meta_data'] errors.extend(validate_sheet_generic(df, 'dataset_meta_data', spec)) return errors def", "= None if colspec.get('min', None) is not None: min_errors = series[converted < colspec['min']]", "def validate_column_floats(series, colspec, sheet): errors = [] empty_errors, series = validate_column_generic(series, colspec, sheet)", "io import open import arrow import os import oyaml as yaml import pandas", "= validate_column_generic(series, colspec, sheet) errors.extend(empty_errors) if colspec.get('max', None) is not None: maxlen_errors =", "sheet })) return errors def validate_filename(input_path, spec): fn = os.path.basename(input_path) errors = []", "sheet })) return errors def validate_column_floats(series, colspec, sheet): errors = [] empty_errors, series", "were defined in 'vars_meta_data' # First make sure that 'vars_meta_data' doesn't have any", "in the \"%s\" worksheet but were not defined in the \"%s\" worksheet' %", "condition of # proper missing values. # TODO: Is there any type-checking expected", "is not None: # Remove NA values before further checks series = series[series", "[] empty_errors, series = validate_column_generic(series, colspec, sheet) errors.extend(empty_errors) if colspec.get('max', None) is not", "in 'vars_meta_data' # First make sure that 'vars_meta_data' doesn't have any errors, if", "val, 'row': idx, 'column': series.name, 'sheet': sheet })) return errors def validate_column_floats(series, colspec,", "df = wb['vars_meta_data'] errors.extend(validate_sheet_generic(df, 'vars_meta_data', spec)) return errors # Register column validators in", "def validate_sheet_data(wb, spec): errors = [] if not 'data' in wb: errors.append(error({ 'message':", "look for format errors in non-empty rows present = series[series.str.len() > 0] for", "spec)) # Next check columns in 'data' that were defined in 'vars_meta_data' #", "errors def validate_sheet_metadata(wb, spec): errors = [] if not 'dataset_meta_data' in wb: errors.append(error({", "> 0] for idx, val in present.iteritems(): try: dt = arrow.get(val, colspec['format']) except", "required = colspec.get('required', None) na = colspec.get('na', None) if not required: # Empty", "proper missing values. # TODO: Is there any type-checking expected in custom vars?", "the \"%s\" worksheet but were not found in the \"%s\" worksheet' % ('vars_meta_data',", "validators in lookup validator_lookup = { 'float': validate_column_floats, 'string': validate_column_strings, 'datetime': validate_column_datetimes, 'generic':", "for idx, val in maxlen_errors.iteritems(): errors.append(error({ 'message': 'string length > %d' % colspec['max'],", "'sheet': sheet })) return errors def validate_column_generic(series, colspec, sheet): errors = [] required", "is not None: max_errors = series[converted > colspec['max']] for idx, val in max_errors.iteritems():", "'data', spec)) # Next check columns in 'data' that were defined in 'vars_meta_data'", "colspec, 'data') errors.extend(empty_errors) return errors def validate_sheet_generic(df, sheet, spec): errors = [] required_columns", "})) return errors df = wb['dataset_meta_data'] errors.extend(validate_sheet_generic(df, 'dataset_meta_data', spec)) return errors def validate_sheet_vars(wb,", "None) is not None: maxlen_errors = series[series.str.len() > colspec['max']] for idx, val in", "validate_column_generic(df_data[var], colspec, 'data') errors.extend(empty_errors) return errors def validate_sheet_generic(df, sheet, spec): errors = []", "Check range min_errors = None max_errors = None if colspec.get('min', None) is not", "checks series = series[series != na] return (errors, series) def validate_column_strings(series, colspec, sheet):", "import oyaml as yaml import pandas as pd import re import sys #", "\"v\"', 'value': fn })) return errors def validate_sheet_data(wb, spec): errors = [] if", "series[series.str.len() > 0] for idx, val in present.iteritems(): try: dt = arrow.get(val, colspec['format'])", "errors.append(error({ 'message': 'filename does not match format <dataset_short_name>_<dataset_release_date>_v<dataset_version>.xlsx', 'value': fn })) else: try:", "sheet): errors = [] empty_errors, series = validate_column_generic(series, colspec, sheet) errors.extend(empty_errors) # Now", "don't bother with any more checks here if len(validate_sheet_vars(wb, spec)) > 0: return", "'message': 'the first %d columns of the \"%s\" worksheet should be %s' %", "not None: min_errors = series[converted < colspec['min']] for idx, val in min_errors.iteritems(): errors.append(error({", "length > %d' % colspec['max'], 'value': val, 'row': idx, 'column': series.name, 'sheet': sheet", "sheet })) # Now remove empty cells series = series[series.str.len() > 0] if", "nonnumeric_errors = series[pd.isna(converted)] for idx, val in nonnumeric_errors.iteritems(): errors.append(error({ 'message': 'invalid value', 'value':", "def validate_filename(input_path, spec): fn = os.path.basename(input_path) errors = [] filename_re = re.compile(r'^(?P<shortname>.+)_(?P<date>[^_]+)_(?P<version>[^_]+)\\.xlsx$') m", "})) # Check range min_errors = None max_errors = None if colspec.get('min', None)", "nonnumeric_errors.iteritems(): errors.append(error({ 'message': 'invalid value', 'value': val, 'row': idx, 'column': series.name, 'sheet': sheet", "'data'), 'value': ', '.join(extra_defined) })) if extra_found: errors.append(error({ 'message': 'some data variables were", "pd.read_excel(input_path, sheet_name=None, na_values=[], keep_default_na=False, dtype=unicode) errors = [] errors.extend(validate_filename(input_path, spec)) errors.extend(validate_sheet_metadata(wb, spec)) errors.extend(validate_sheet_vars(wb,", "def validate_sheet_metadata(wb, spec): errors = [] if not 'dataset_meta_data' in wb: errors.append(error({ 'message':", "\"%s\" worksheet should be %s' % (len(required_columns), sheet, required_columns), 'value': str(df.columns.tolist()), 'sheet': sheet", "# Now remove empty cells series = series[series.str.len() > 0] if na is", "sure that 'vars_meta_data' doesn't have any errors, if it does # don't bother", "df_data = df.drop(required_columns, axis='columns') # Collect variable short names from vars_meta_data sheet and", "series.name, 'sheet': sheet })) return errors def validate_filename(input_path, spec): fn = os.path.basename(input_path) errors", "# don't bother with any more checks here if len(validate_sheet_vars(wb, spec)) > 0:", "spec)) errors.extend(validate_sheet_data(wb, spec)) return errors def validate_column_datetimes(series, colspec, sheet): errors = [] empty_errors,", "= list(spec['columns']['data'].keys()) df_data = df.drop(required_columns, axis='columns') # Collect variable short names from vars_meta_data", "except ValueError as e: errors.append(error({ 'message': 'error in filename datetime string: %s' %", "'dataset_file_def.yaml' spec_file_path = os.path.join(os.path.dirname(__file__), spec_file_name) with open(spec_file_path, encoding='utf-8') as fh: spec = yaml.load(fh)", "return errors def validate_column_datetimes(series, colspec, sheet): errors = [] empty_errors, series = validate_column_generic(series,", "set(vars_found).difference(set(vars_defined)) if extra_defined: errors.append(error({ 'message': 'some data variables were defined in the \"%s\"", "errors # Validate cells for colname, colspec in spec['columns'][sheet].items(): v = validator_lookup[colspec['type']] errors.extend(v(df[colname],", "%d' % colspec['max'], 'value': val, 'row': idx, 'column': series.name, 'sheet': sheet })) return", "vars_meta_data sheet and check that # data columns in 'data' sheet match data", "return errors df = wb['vars_meta_data'] errors.extend(validate_sheet_generic(df, 'vars_meta_data', spec)) return errors # Register column", "as yaml import pandas as pd import re import sys # Load dataset", "errors def validate_column_datetimes(series, colspec, sheet): errors = [] empty_errors, series = validate_column_generic(series, colspec,", "validate the actual data only on the condition of # proper missing values.", "'value': m.group('date') })) if not re.match(r'^v.+$', m.group('version')): errors.append(error({ 'message': 'version string in filename", "validate_column_generic(series, colspec, sheet) errors.extend(empty_errors) # Convert to floats converted = pd.to_numeric(series, errors='coerce') #", "%s' % e, 'value': m.group('date') })) except arrow.parser.ParserError as e: errors.append(error({ 'message': 'date", "errors.extend(validate_sheet_data(wb, spec)) return errors def validate_column_datetimes(series, colspec, sheet): errors = [] empty_errors, series", "val, 'row': idx, 'column': series.name, 'sheet': sheet })) if colspec.get('max', None) is not", "{ 'required': True, 'na': na } empty_errors, _ = validate_column_generic(df_data[var], colspec, 'data') errors.extend(empty_errors)", "'row': idx, 'column': series.name, 'sheet': sheet })) # Check range min_errors = None", "[] errors.extend(validate_filename(input_path, spec)) errors.extend(validate_sheet_metadata(wb, spec)) errors.extend(validate_sheet_vars(wb, spec)) errors.extend(validate_sheet_data(wb, spec)) return errors def validate_column_datetimes(series,", "'value': ', '.join(extra_defined) })) if extra_found: errors.append(error({ 'message': 'some data variables were found", "defined in the \"%s\" worksheet' % ('data', 'vars_meta_data'), 'value': ', '.join(extra_found) })) #", "bother with any more checks here if len(validate_sheet_vars(wb, spec)) > 0: return errors", "re.compile(r'^(?P<shortname>.+)_(?P<date>[^_]+)_(?P<version>[^_]+)\\.xlsx$') m = filename_re.match(fn) if not m: errors.append(error({ 'message': 'filename does not match", "empty_errors = series[series.str.len() == 0] for idx, val in empty_errors.iteritems(): errors.append(error({ 'message': 'missing", "not None: maxlen_errors = series[series.str.len() > colspec['max']] for idx, val in maxlen_errors.iteritems(): errors.append(error({", "m.group('version')): errors.append(error({ 'message': 'version string in filename must start with \"v\"', 'value': fn", "wb['data'] errors.extend(validate_sheet_generic(df, 'data', spec)) # Next check columns in 'data' that were defined", "# NA is None or is not the empty string, therefore empty cells", "colspec['max'], 'value': val, 'row': idx, 'column': series.name, 'sheet': sheet })) return errors def", "does # don't bother with any more checks here if len(validate_sheet_vars(wb, spec)) >", "not defined in the \"%s\" worksheet' % ('data', 'vars_meta_data'), 'value': ', '.join(extra_found) }))", "keep_default_na=False, dtype=str) else: wb = pd.read_excel(input_path, sheet_name=None, na_values=[], keep_default_na=False, dtype=unicode) errors = []", "series = series[series.str.len() > 0] elif str(na) == '': # Empty cell is", "[] filename_re = re.compile(r'^(?P<shortname>.+)_(?P<date>[^_]+)_(?P<version>[^_]+)\\.xlsx$') m = filename_re.match(fn) if not m: errors.append(error({ 'message': 'filename", "})) if colspec.get('max', None) is not None: max_errors = series[converted > colspec['max']] for", "series[series != na] return (errors, series) def validate_column_strings(series, colspec, sheet): errors = []", "worksheet' % ('data', 'vars_meta_data'), 'value': ', '.join(extra_found) })) # Now validate the actual", "colspec.get('required', None) na = colspec.get('na', None) if not required: # Empty cell is", "'value': val, 'row': idx, 'column': series.name, 'sheet': sheet })) if colspec.get('max', None) is", "'column': series.name, 'sheet': sheet })) return errors def validate_filename(input_path, spec): fn = os.path.basename(input_path)", "m = filename_re.match(fn) if not m: errors.append(error({ 'message': 'filename does not match format", "errors.append(error({ 'message': 'version string in filename must start with \"v\"', 'value': fn }))", "extra_found = set(vars_found).difference(set(vars_defined)) if extra_defined: errors.append(error({ 'message': 'some data variables were defined in", "== '': # Empty cell is a valid value. Remove empty cells before", "any type-checking expected in custom vars? vars_missing_value = wb['vars_meta_data']['var_missing_value'].tolist() for var, na in", "defined in the \"%s\" worksheet but were not found in the \"%s\" worksheet'", "open(spec_file_path, encoding='utf-8') as fh: spec = yaml.load(fh) def validate(input_path): if (sys.version_info > (3,", "> 0: return errors # Now check custom data columns required_columns = list(spec['columns']['data'].keys())", "series[converted > colspec['max']] for idx, val in max_errors.iteritems(): errors.append(error({ 'message': 'value greater than", "colspec, sheet) errors.extend(empty_errors) # Now look for format errors in non-empty rows present", "[] if not 'data' in wb: errors.append(error({ 'message': '\"%s\" worksheet is missing' %", "import sys # Load dataset file specifications spec_file_name = 'dataset_file_def.yaml' spec_file_path = os.path.join(os.path.dirname(__file__),", "validate_sheet_generic(df, sheet, spec): errors = [] required_columns = list(spec['columns'][sheet].keys()) if df.columns.tolist()[:len(required_columns)] != required_columns:", "in spec['columns'][sheet].items(): v = validator_lookup[colspec['type']] errors.extend(v(df[colname], colspec, sheet)) return errors def validate_sheet_metadata(wb, spec):", "except ValueError as e: errors.append(error({ 'message': 'error in datetime string: %s' % e,", "val, 'row': idx, 'column': series.name, 'sheet': sheet })) return errors def validate_column_generic(series, colspec,", "'dataset_meta_data', 'sheet': 'dataset_meta_data' })) return errors df = wb['dataset_meta_data'] errors.extend(validate_sheet_generic(df, 'dataset_meta_data', spec)) return", "'message': 'invalid datetime string - should match %s' % colspec['format'], 'value': val, 'row':", "more checks here if len(validate_sheet_vars(wb, spec)) > 0: return errors # Now check", "spec_file_name) with open(spec_file_path, encoding='utf-8') as fh: spec = yaml.load(fh) def validate(input_path): if (sys.version_info", "'vars_meta_data', 'sheet': 'vars_meta_data' })) return errors df = wb['vars_meta_data'] errors.extend(validate_sheet_generic(df, 'vars_meta_data', spec)) return", "worksheet should be %s' % (len(required_columns), sheet, required_columns), 'value': str(df.columns.tolist()), 'sheet': sheet }))", "yaml.load(fh) def validate(input_path): if (sys.version_info > (3, 0)): wb = pd.read_excel(input_path, sheet_name=None, na_values=[],", "empty_errors, series = validate_column_generic(series, colspec, sheet) errors.extend(empty_errors) # Now look for format errors", "def validate_column_datetimes(series, colspec, sheet): errors = [] empty_errors, series = validate_column_generic(series, colspec, sheet)" ]
[ "distance offsets from the box center in four directions, shape (N, 4). \"\"\"", "return torch.max(torch.tensor(0.), area_inner / area) def nms(Bboxes): Bboxes = sorted(Bboxes, key=lambda x:x[4], reverse=True)", "record_dict: record_dict.add(i) res.append(Bboxes[i]) else: continue for j in range(i + 1, len(Bboxes)): Iou", "if scores[i] > SCORE_THRESH: x = i % int(feat_w) * stride y =", "in record_dict: record_dict.add(i) res.append(Bboxes[i]) else: continue for j in range(i + 1, len(Bboxes)):", "y1 = y - bboxes[i][1] x2 = x + bboxes[i][2] y2 = y", "result of box locations, i.e., distance offsets from the box center in four", "inner_y1) area = (Box2[2] - Box2[0]) * (Box2[3] - Box2[1]) + \\ (Box1[2]", "5 total_bboxes = [] for level in range(levels): stride = 2**(level)*8 '''默认输出顺序为 小stride->大stride'''", "[] for level in range(levels): stride = 2**(level)*8 '''默认输出顺序为 小stride->大stride''' feat_h, feat_w =", "Box2[0]) * (Box2[3] - Box2[1]) + \\ (Box1[2] - Box1[0]) * (Box1[3] -", "Box2[0]) inner_y1 = torch.max(Box1[1], Box2[1]) inner_x2 = torch.min(Box1[2], Box2[2]) inner_y2 = torch.min(Box1[3], Box2[3])", "get integral result of bounding box location. Args: x (Tensor): Features of the", "regression head, shape (N, 4*(n+1)), n is self.reg_max. Returns: x (Tensor): Integral result", "area = (Box2[2] - Box2[0]) * (Box2[3] - Box2[1]) + \\ (Box1[2] -", "integral result of bounding box location. Args: x (Tensor): Features of the regression", "* stride x1 = x - bboxes[i][0] y1 = y - bboxes[i][1] x2", "for calculating integral result from distribution. This layer calculates the target location by", "level in range(levels): stride = 2**(level)*8 '''默认输出顺序为 小stride->大stride''' feat_h, feat_w = ml_scores[level].shape[2:] scores", "(inner_y2 - inner_y1) area = (Box2[2] - Box2[0]) * (Box2[3] - Box2[1]) +", "class Integral(nn.Module): \"\"\"A fixed layer for calculating integral result from distribution. This layer", "from distribution. This layer calculates the target location by :math: `sum{P(y_i) * y_i}`,", "box locations, i.e., distance offsets from the box center in four directions, shape", "1, len(Bboxes)): Iou = IouCal(Bboxes[i], Bboxes[j]) if Iou > IOU_THRESH: record_dict.add(j) continue return", "= x + bboxes[i][2] y2 = y + bboxes[i][3] score_loc = scores[i] box", "to your new dataset or related settings. \"\"\" def __init__(self, reg_max=16): super(Integral, self).__init__()", "1), dim=1) x = F.linear(x, self.project.type_as(x)).reshape(-1, 4) return x def IouCal(Box1, Box2): inner_x1", "area) def nms(Bboxes): Bboxes = sorted(Bboxes, key=lambda x:x[4], reverse=True) record_dict = set() res", "= scores[i] box = torch.stack([x1, y1, x2, y2], dim=0)/torch.tensor(scale_factor) total_bboxes.append(torch.cat([box, score_loc], dim=0)) nmsBoxes", "= 0.3 STRIDE_SCALE = 8 IOU_THRESH = 0.6 class Integral(nn.Module): \"\"\"A fixed layer", "x = F.softmax(x.reshape(-1, self.reg_max + 1), dim=1) x = F.linear(x, self.project.type_as(x)).reshape(-1, 4) return", "in four directions, shape (N, 4). \"\"\" x = F.softmax(x.reshape(-1, self.reg_max + 1),", "Integral(nn.Module): \"\"\"A fixed layer for calculating integral result from distribution. This layer calculates", "Integral(16) ml_scores, ml_bboxes = output scale_factor = extra_info[\"scale_factor\"] levels = 5 total_bboxes =", "+ 1), dim=1) x = F.linear(x, self.project.type_as(x)).reshape(-1, 4) return x def IouCal(Box1, Box2):", "in range(levels): stride = 2**(level)*8 '''默认输出顺序为 小stride->大stride''' feat_h, feat_w = ml_scores[level].shape[2:] scores =", "the discrete distribution y_i denotes the discrete set, usually {0, 1, 2, ...,", "gfl_post_process(output, extra_info): integral = Integral(16) ml_scores, ml_bboxes = output scale_factor = extra_info[\"scale_factor\"] levels", "Box1[0]) * (Box1[3] - Box1[1]) - \\ area_inner return torch.max(torch.tensor(0.), area_inner / area)", "vector that represents the discrete distribution y_i denotes the discrete set, usually {0,", "import torch.nn.functional as F import torch SCORE_THRESH = 0.3 STRIDE_SCALE = 8 IOU_THRESH", "= (Box2[2] - Box2[0]) * (Box2[3] - Box2[1]) + \\ (Box1[2] - Box1[0])", "record_dict = set() res = [] for i in range(len(Bboxes)): if i not", "i in range(len(scores)): if scores[i] > SCORE_THRESH: x = i % int(feat_w) *", "- inner_y1) area = (Box2[2] - Box2[0]) * (Box2[3] - Box2[1]) + \\", "x:x[4], reverse=True) record_dict = set() res = [] for i in range(len(Bboxes)): if", "Iou > IOU_THRESH: record_dict.add(j) continue return res def gfl_post_process(output, extra_info): integral = Integral(16)", "denotes the softmax vector that represents the discrete distribution y_i denotes the discrete", "= [] for i in range(len(Bboxes)): if i not in record_dict: record_dict.add(i) res.append(Bboxes[i])", "i in range(len(Bboxes)): if i not in record_dict: record_dict.add(i) res.append(Bboxes[i]) else: continue for", "super(Integral, self).__init__() self.reg_max = reg_max self.register_buffer('project', torch.linspace(0, self.reg_max, self.reg_max + 1)) def forward(self,", "four directions, shape (N, 4). \"\"\" x = F.softmax(x.reshape(-1, self.reg_max + 1), dim=1)", "it according to your new dataset or related settings. \"\"\" def __init__(self, reg_max=16):", "feature from the regression head to get integral result of bounding box location.", "reg_max (int): The maximal value of the discrete set. Default: 16. You may", "stride = 2**(level)*8 '''默认输出顺序为 小stride->大stride''' feat_h, feat_w = ml_scores[level].shape[2:] scores = ml_scores[level].permute(0, 2,", "calculating integral result from distribution. This layer calculates the target location by :math:", "def IouCal(Box1, Box2): inner_x1 = torch.max(Box1[0], Box2[0]) inner_y1 = torch.max(Box1[1], Box2[1]) inner_x2 =", "if Iou > IOU_THRESH: record_dict.add(j) continue return res def gfl_post_process(output, extra_info): integral =", "distribution y_i denotes the discrete set, usually {0, 1, 2, ..., reg_max} Args:", "directions, shape (N, 4). \"\"\" x = F.softmax(x.reshape(-1, self.reg_max + 1), dim=1) x", "2**(level)*8 '''默认输出顺序为 小stride->大stride''' feat_h, feat_w = ml_scores[level].shape[2:] scores = ml_scores[level].permute(0, 2, 3, 1).view(feat_h*feat_w,", "x): \"\"\"Forward feature from the regression head to get integral result of bounding", "shape (N, 4). \"\"\" x = F.softmax(x.reshape(-1, self.reg_max + 1), dim=1) x =", "reverse=True) record_dict = set() res = [] for i in range(len(Bboxes)): if i", "(Box2[3] - Box2[1]) + \\ (Box1[2] - Box1[0]) * (Box1[3] - Box1[1]) -", "stride y = i // int(feat_w) * stride x1 = x - bboxes[i][0]", "scores = ml_scores[level].permute(0, 2, 3, 1).view(feat_h*feat_w, 1).sigmoid() bboxes = integral(ml_bboxes[level].permute(0, 2, 3, 1))*stride", "record_dict.add(j) continue return res def gfl_post_process(output, extra_info): integral = Integral(16) ml_scores, ml_bboxes =", "as nn import torch.nn.functional as F import torch SCORE_THRESH = 0.3 STRIDE_SCALE =", "SCORE_THRESH = 0.3 STRIDE_SCALE = 8 IOU_THRESH = 0.6 class Integral(nn.Module): \"\"\"A fixed", "= 0.6 class Integral(nn.Module): \"\"\"A fixed layer for calculating integral result from distribution.", "inner_x1) * (inner_y2 - inner_y1) area = (Box2[2] - Box2[0]) * (Box2[3] -", "y + bboxes[i][3] score_loc = scores[i] box = torch.stack([x1, y1, x2, y2], dim=0)/torch.tensor(scale_factor)", "from the box center in four directions, shape (N, 4). \"\"\" x =", "- Box2[0]) * (Box2[3] - Box2[1]) + \\ (Box1[2] - Box1[0]) * (Box1[3]", "to reset it according to your new dataset or related settings. \"\"\" def", "+ bboxes[i][3] score_loc = scores[i] box = torch.stack([x1, y1, x2, y2], dim=0)/torch.tensor(scale_factor) total_bboxes.append(torch.cat([box,", "% int(feat_w) * stride y = i // int(feat_w) * stride x1 =", "- Box1[0]) * (Box1[3] - Box1[1]) - \\ area_inner return torch.max(torch.tensor(0.), area_inner /", "4). \"\"\" x = F.softmax(x.reshape(-1, self.reg_max + 1), dim=1) x = F.linear(x, self.project.type_as(x)).reshape(-1,", "Default: 16. You may want to reset it according to your new dataset", "self.reg_max, self.reg_max + 1)) def forward(self, x): \"\"\"Forward feature from the regression head", "[] for i in range(len(Bboxes)): if i not in record_dict: record_dict.add(i) res.append(Bboxes[i]) else:", "the target location by :math: `sum{P(y_i) * y_i}`, P(y_i) denotes the softmax vector", "score_loc = scores[i] box = torch.stack([x1, y1, x2, y2], dim=0)/torch.tensor(scale_factor) total_bboxes.append(torch.cat([box, score_loc], dim=0))", "may want to reset it according to your new dataset or related settings.", "= ml_scores[level].permute(0, 2, 3, 1).view(feat_h*feat_w, 1).sigmoid() bboxes = integral(ml_bboxes[level].permute(0, 2, 3, 1))*stride for", "Box2[1]) inner_x2 = torch.min(Box1[2], Box2[2]) inner_y2 = torch.min(Box1[3], Box2[3]) area_inner = (inner_x2 -", "dataset or related settings. \"\"\" def __init__(self, reg_max=16): super(Integral, self).__init__() self.reg_max = reg_max", "小stride->大stride''' feat_h, feat_w = ml_scores[level].shape[2:] scores = ml_scores[level].permute(0, 2, 3, 1).view(feat_h*feat_w, 1).sigmoid() bboxes", "of box locations, i.e., distance offsets from the box center in four directions,", "= 8 IOU_THRESH = 0.6 class Integral(nn.Module): \"\"\"A fixed layer for calculating integral", "nn import torch.nn.functional as F import torch SCORE_THRESH = 0.3 STRIDE_SCALE = 8", "bboxes[i][2] y2 = y + bboxes[i][3] score_loc = scores[i] box = torch.stack([x1, y1,", "2, ..., reg_max} Args: reg_max (int): The maximal value of the discrete set.", "return res def gfl_post_process(output, extra_info): integral = Integral(16) ml_scores, ml_bboxes = output scale_factor", "y = i // int(feat_w) * stride x1 = x - bboxes[i][0] y1", "for level in range(levels): stride = 2**(level)*8 '''默认输出顺序为 小stride->大stride''' feat_h, feat_w = ml_scores[level].shape[2:]", "discrete set. Default: 16. You may want to reset it according to your", "bboxes[i][0] y1 = y - bboxes[i][1] x2 = x + bboxes[i][2] y2 =", "IouCal(Box1, Box2): inner_x1 = torch.max(Box1[0], Box2[0]) inner_y1 = torch.max(Box1[1], Box2[1]) inner_x2 = torch.min(Box1[2],", "\"\"\"Forward feature from the regression head to get integral result of bounding box", "i not in record_dict: record_dict.add(i) res.append(Bboxes[i]) else: continue for j in range(i +", "from the regression head to get integral result of bounding box location. Args:", "result of bounding box location. Args: x (Tensor): Features of the regression head,", "of bounding box location. Args: x (Tensor): Features of the regression head, shape", "IOU_THRESH: record_dict.add(j) continue return res def gfl_post_process(output, extra_info): integral = Integral(16) ml_scores, ml_bboxes", "torch.min(Box1[2], Box2[2]) inner_y2 = torch.min(Box1[3], Box2[3]) area_inner = (inner_x2 - inner_x1) * (inner_y2", "int(feat_w) * stride y = i // int(feat_w) * stride x1 = x", "discrete distribution y_i denotes the discrete set, usually {0, 1, 2, ..., reg_max}", "IOU_THRESH = 0.6 class Integral(nn.Module): \"\"\"A fixed layer for calculating integral result from", "n is self.reg_max. Returns: x (Tensor): Integral result of box locations, i.e., distance", "self.reg_max. Returns: x (Tensor): Integral result of box locations, i.e., distance offsets from", "x2 = x + bboxes[i][2] y2 = y + bboxes[i][3] score_loc = scores[i]", "reg_max self.register_buffer('project', torch.linspace(0, self.reg_max, self.reg_max + 1)) def forward(self, x): \"\"\"Forward feature from", "in range(len(Bboxes)): if i not in record_dict: record_dict.add(i) res.append(Bboxes[i]) else: continue for j", "ml_bboxes = output scale_factor = extra_info[\"scale_factor\"] levels = 5 total_bboxes = [] for", "(Tensor): Integral result of box locations, i.e., distance offsets from the box center", "levels = 5 total_bboxes = [] for level in range(levels): stride = 2**(level)*8", "Bboxes[j]) if Iou > IOU_THRESH: record_dict.add(j) continue return res def gfl_post_process(output, extra_info): integral", "= sorted(Bboxes, key=lambda x:x[4], reverse=True) record_dict = set() res = [] for i", "not in record_dict: record_dict.add(i) res.append(Bboxes[i]) else: continue for j in range(i + 1,", "= Integral(16) ml_scores, ml_bboxes = output scale_factor = extra_info[\"scale_factor\"] levels = 5 total_bboxes", "new dataset or related settings. \"\"\" def __init__(self, reg_max=16): super(Integral, self).__init__() self.reg_max =", "Args: reg_max (int): The maximal value of the discrete set. Default: 16. You", "1))*stride for i in range(len(scores)): if scores[i] > SCORE_THRESH: x = i %", "= torch.max(Box1[0], Box2[0]) inner_y1 = torch.max(Box1[1], Box2[1]) inner_x2 = torch.min(Box1[2], Box2[2]) inner_y2 =", "bboxes = integral(ml_bboxes[level].permute(0, 2, 3, 1))*stride for i in range(len(scores)): if scores[i] >", "according to your new dataset or related settings. \"\"\" def __init__(self, reg_max=16): super(Integral,", "Box2[2]) inner_y2 = torch.min(Box1[3], Box2[3]) area_inner = (inner_x2 - inner_x1) * (inner_y2 -", "int(feat_w) * stride x1 = x - bboxes[i][0] y1 = y - bboxes[i][1]", "= 2**(level)*8 '''默认输出顺序为 小stride->大stride''' feat_h, feat_w = ml_scores[level].shape[2:] scores = ml_scores[level].permute(0, 2, 3,", "x def IouCal(Box1, Box2): inner_x1 = torch.max(Box1[0], Box2[0]) inner_y1 = torch.max(Box1[1], Box2[1]) inner_x2", "x (Tensor): Features of the regression head, shape (N, 4*(n+1)), n is self.reg_max.", "F import torch SCORE_THRESH = 0.3 STRIDE_SCALE = 8 IOU_THRESH = 0.6 class", "softmax vector that represents the discrete distribution y_i denotes the discrete set, usually", "1)) def forward(self, x): \"\"\"Forward feature from the regression head to get integral", "ml_scores, ml_bboxes = output scale_factor = extra_info[\"scale_factor\"] levels = 5 total_bboxes = []", "self.reg_max + 1), dim=1) x = F.linear(x, self.project.type_as(x)).reshape(-1, 4) return x def IouCal(Box1,", "for i in range(len(Bboxes)): if i not in record_dict: record_dict.add(i) res.append(Bboxes[i]) else: continue", "(Box1[3] - Box1[1]) - \\ area_inner return torch.max(torch.tensor(0.), area_inner / area) def nms(Bboxes):", "len(Bboxes)): Iou = IouCal(Bboxes[i], Bboxes[j]) if Iou > IOU_THRESH: record_dict.add(j) continue return res", "(int): The maximal value of the discrete set. Default: 16. You may want", "Box2[1]) + \\ (Box1[2] - Box1[0]) * (Box1[3] - Box1[1]) - \\ area_inner", "res.append(Bboxes[i]) else: continue for j in range(i + 1, len(Bboxes)): Iou = IouCal(Bboxes[i],", "torch.nn as nn import torch.nn.functional as F import torch SCORE_THRESH = 0.3 STRIDE_SCALE", "def gfl_post_process(output, extra_info): integral = Integral(16) ml_scores, ml_bboxes = output scale_factor = extra_info[\"scale_factor\"]", "by :math: `sum{P(y_i) * y_i}`, P(y_i) denotes the softmax vector that represents the", "i % int(feat_w) * stride y = i // int(feat_w) * stride x1", "inner_x1 = torch.max(Box1[0], Box2[0]) inner_y1 = torch.max(Box1[1], Box2[1]) inner_x2 = torch.min(Box1[2], Box2[2]) inner_y2", "dim=1) x = F.linear(x, self.project.type_as(x)).reshape(-1, 4) return x def IouCal(Box1, Box2): inner_x1 =", "value of the discrete set. Default: 16. You may want to reset it", "F.softmax(x.reshape(-1, self.reg_max + 1), dim=1) x = F.linear(x, self.project.type_as(x)).reshape(-1, 4) return x def", "= torch.min(Box1[3], Box2[3]) area_inner = (inner_x2 - inner_x1) * (inner_y2 - inner_y1) area", "x = F.linear(x, self.project.type_as(x)).reshape(-1, 4) return x def IouCal(Box1, Box2): inner_x1 = torch.max(Box1[0],", "self).__init__() self.reg_max = reg_max self.register_buffer('project', torch.linspace(0, self.reg_max, self.reg_max + 1)) def forward(self, x):", "inner_y1 = torch.max(Box1[1], Box2[1]) inner_x2 = torch.min(Box1[2], Box2[2]) inner_y2 = torch.min(Box1[3], Box2[3]) area_inner", "+ \\ (Box1[2] - Box1[0]) * (Box1[3] - Box1[1]) - \\ area_inner return", "the discrete set. Default: 16. You may want to reset it according to", "y - bboxes[i][1] x2 = x + bboxes[i][2] y2 = y + bboxes[i][3]", "Iou = IouCal(Bboxes[i], Bboxes[j]) if Iou > IOU_THRESH: record_dict.add(j) continue return res def", "Box2[3]) area_inner = (inner_x2 - inner_x1) * (inner_y2 - inner_y1) area = (Box2[2]", "set() res = [] for i in range(len(Bboxes)): if i not in record_dict:", "of the regression head, shape (N, 4*(n+1)), n is self.reg_max. Returns: x (Tensor):", "\\ area_inner return torch.max(torch.tensor(0.), area_inner / area) def nms(Bboxes): Bboxes = sorted(Bboxes, key=lambda", "y_i denotes the discrete set, usually {0, 1, 2, ..., reg_max} Args: reg_max", "> SCORE_THRESH: x = i % int(feat_w) * stride y = i //", "scale_factor = extra_info[\"scale_factor\"] levels = 5 total_bboxes = [] for level in range(levels):", "denotes the discrete set, usually {0, 1, 2, ..., reg_max} Args: reg_max (int):", "2, 3, 1).view(feat_h*feat_w, 1).sigmoid() bboxes = integral(ml_bboxes[level].permute(0, 2, 3, 1))*stride for i in", "= ml_scores[level].shape[2:] scores = ml_scores[level].permute(0, 2, 3, 1).view(feat_h*feat_w, 1).sigmoid() bboxes = integral(ml_bboxes[level].permute(0, 2,", "or related settings. \"\"\" def __init__(self, reg_max=16): super(Integral, self).__init__() self.reg_max = reg_max self.register_buffer('project',", "area_inner / area) def nms(Bboxes): Bboxes = sorted(Bboxes, key=lambda x:x[4], reverse=True) record_dict =", "result from distribution. This layer calculates the target location by :math: `sum{P(y_i) *", "* (Box2[3] - Box2[1]) + \\ (Box1[2] - Box1[0]) * (Box1[3] - Box1[1])", "head to get integral result of bounding box location. Args: x (Tensor): Features", "- bboxes[i][1] x2 = x + bboxes[i][2] y2 = y + bboxes[i][3] score_loc", "= x - bboxes[i][0] y1 = y - bboxes[i][1] x2 = x +", "(N, 4). \"\"\" x = F.softmax(x.reshape(-1, self.reg_max + 1), dim=1) x = F.linear(x,", "of the discrete set. Default: 16. You may want to reset it according", "torch.max(torch.tensor(0.), area_inner / area) def nms(Bboxes): Bboxes = sorted(Bboxes, key=lambda x:x[4], reverse=True) record_dict", "x1 = x - bboxes[i][0] y1 = y - bboxes[i][1] x2 = x", "\"\"\" x = F.softmax(x.reshape(-1, self.reg_max + 1), dim=1) x = F.linear(x, self.project.type_as(x)).reshape(-1, 4)", "+ 1)) def forward(self, x): \"\"\"Forward feature from the regression head to get", "`sum{P(y_i) * y_i}`, P(y_i) denotes the softmax vector that represents the discrete distribution", "set. Default: 16. You may want to reset it according to your new", "calculates the target location by :math: `sum{P(y_i) * y_i}`, P(y_i) denotes the softmax", "Box1[1]) - \\ area_inner return torch.max(torch.tensor(0.), area_inner / area) def nms(Bboxes): Bboxes =", "settings. \"\"\" def __init__(self, reg_max=16): super(Integral, self).__init__() self.reg_max = reg_max self.register_buffer('project', torch.linspace(0, self.reg_max,", "in range(i + 1, len(Bboxes)): Iou = IouCal(Bboxes[i], Bboxes[j]) if Iou > IOU_THRESH:", "+ bboxes[i][2] y2 = y + bboxes[i][3] score_loc = scores[i] box = torch.stack([x1,", "= reg_max self.register_buffer('project', torch.linspace(0, self.reg_max, self.reg_max + 1)) def forward(self, x): \"\"\"Forward feature", "(Tensor): Features of the regression head, shape (N, 4*(n+1)), n is self.reg_max. Returns:", "8 IOU_THRESH = 0.6 class Integral(nn.Module): \"\"\"A fixed layer for calculating integral result", "Returns: x (Tensor): Integral result of box locations, i.e., distance offsets from the", "self.project.type_as(x)).reshape(-1, 4) return x def IouCal(Box1, Box2): inner_x1 = torch.max(Box1[0], Box2[0]) inner_y1 =", "= extra_info[\"scale_factor\"] levels = 5 total_bboxes = [] for level in range(levels): stride", "integral = Integral(16) ml_scores, ml_bboxes = output scale_factor = extra_info[\"scale_factor\"] levels = 5", "i.e., distance offsets from the box center in four directions, shape (N, 4).", "- inner_x1) * (inner_y2 - inner_y1) area = (Box2[2] - Box2[0]) * (Box2[3]", "as F import torch SCORE_THRESH = 0.3 STRIDE_SCALE = 8 IOU_THRESH = 0.6", "= F.linear(x, self.project.type_as(x)).reshape(-1, 4) return x def IouCal(Box1, Box2): inner_x1 = torch.max(Box1[0], Box2[0])", "1, 2, ..., reg_max} Args: reg_max (int): The maximal value of the discrete", "range(i + 1, len(Bboxes)): Iou = IouCal(Bboxes[i], Bboxes[j]) if Iou > IOU_THRESH: record_dict.add(j)", "= torch.max(Box1[1], Box2[1]) inner_x2 = torch.min(Box1[2], Box2[2]) inner_y2 = torch.min(Box1[3], Box2[3]) area_inner =", "layer for calculating integral result from distribution. This layer calculates the target location", "output scale_factor = extra_info[\"scale_factor\"] levels = 5 total_bboxes = [] for level in", "offsets from the box center in four directions, shape (N, 4). \"\"\" x", "the regression head to get integral result of bounding box location. Args: x", "(Box2[2] - Box2[0]) * (Box2[3] - Box2[1]) + \\ (Box1[2] - Box1[0]) *", "torch.max(Box1[1], Box2[1]) inner_x2 = torch.min(Box1[2], Box2[2]) inner_y2 = torch.min(Box1[3], Box2[3]) area_inner = (inner_x2", "j in range(i + 1, len(Bboxes)): Iou = IouCal(Bboxes[i], Bboxes[j]) if Iou >", "else: continue for j in range(i + 1, len(Bboxes)): Iou = IouCal(Bboxes[i], Bboxes[j])", "sorted(Bboxes, key=lambda x:x[4], reverse=True) record_dict = set() res = [] for i in", "0.3 STRIDE_SCALE = 8 IOU_THRESH = 0.6 class Integral(nn.Module): \"\"\"A fixed layer for", "torch SCORE_THRESH = 0.3 STRIDE_SCALE = 8 IOU_THRESH = 0.6 class Integral(nn.Module): \"\"\"A", "> IOU_THRESH: record_dict.add(j) continue return res def gfl_post_process(output, extra_info): integral = Integral(16) ml_scores,", "res def gfl_post_process(output, extra_info): integral = Integral(16) ml_scores, ml_bboxes = output scale_factor =", "\"\"\"A fixed layer for calculating integral result from distribution. This layer calculates the", "= IouCal(Bboxes[i], Bboxes[j]) if Iou > IOU_THRESH: record_dict.add(j) continue return res def gfl_post_process(output,", "* (inner_y2 - inner_y1) area = (Box2[2] - Box2[0]) * (Box2[3] - Box2[1])", "your new dataset or related settings. \"\"\" def __init__(self, reg_max=16): super(Integral, self).__init__() self.reg_max", "total_bboxes = [] for level in range(levels): stride = 2**(level)*8 '''默认输出顺序为 小stride->大stride''' feat_h,", "box location. Args: x (Tensor): Features of the regression head, shape (N, 4*(n+1)),", "(inner_x2 - inner_x1) * (inner_y2 - inner_y1) area = (Box2[2] - Box2[0]) *", "- bboxes[i][0] y1 = y - bboxes[i][1] x2 = x + bboxes[i][2] y2", "extra_info[\"scale_factor\"] levels = 5 total_bboxes = [] for level in range(levels): stride =", "= i // int(feat_w) * stride x1 = x - bboxes[i][0] y1 =", "IouCal(Bboxes[i], Bboxes[j]) if Iou > IOU_THRESH: record_dict.add(j) continue return res def gfl_post_process(output, extra_info):", "3, 1).view(feat_h*feat_w, 1).sigmoid() bboxes = integral(ml_bboxes[level].permute(0, 2, 3, 1))*stride for i in range(len(scores)):", "related settings. \"\"\" def __init__(self, reg_max=16): super(Integral, self).__init__() self.reg_max = reg_max self.register_buffer('project', torch.linspace(0,", "x (Tensor): Integral result of box locations, i.e., distance offsets from the box", "scores[i] box = torch.stack([x1, y1, x2, y2], dim=0)/torch.tensor(scale_factor) total_bboxes.append(torch.cat([box, score_loc], dim=0)) nmsBoxes =", "area_inner = (inner_x2 - inner_x1) * (inner_y2 - inner_y1) area = (Box2[2] -", "= 5 total_bboxes = [] for level in range(levels): stride = 2**(level)*8 '''默认输出顺序为", "/ area) def nms(Bboxes): Bboxes = sorted(Bboxes, key=lambda x:x[4], reverse=True) record_dict = set()", "inner_y2 = torch.min(Box1[3], Box2[3]) area_inner = (inner_x2 - inner_x1) * (inner_y2 - inner_y1)", "box center in four directions, shape (N, 4). \"\"\" x = F.softmax(x.reshape(-1, self.reg_max", "scores[i] > SCORE_THRESH: x = i % int(feat_w) * stride y = i", "discrete set, usually {0, 1, 2, ..., reg_max} Args: reg_max (int): The maximal", "You may want to reset it according to your new dataset or related", "feat_w = ml_scores[level].shape[2:] scores = ml_scores[level].permute(0, 2, 3, 1).view(feat_h*feat_w, 1).sigmoid() bboxes = integral(ml_bboxes[level].permute(0,", "self.register_buffer('project', torch.linspace(0, self.reg_max, self.reg_max + 1)) def forward(self, x): \"\"\"Forward feature from the", "= (inner_x2 - inner_x1) * (inner_y2 - inner_y1) area = (Box2[2] - Box2[0])", "feat_h, feat_w = ml_scores[level].shape[2:] scores = ml_scores[level].permute(0, 2, 3, 1).view(feat_h*feat_w, 1).sigmoid() bboxes =", "distribution. This layer calculates the target location by :math: `sum{P(y_i) * y_i}`, P(y_i)", "- Box2[1]) + \\ (Box1[2] - Box1[0]) * (Box1[3] - Box1[1]) - \\", "i // int(feat_w) * stride x1 = x - bboxes[i][0] y1 = y", "ml_scores[level].permute(0, 2, 3, 1).view(feat_h*feat_w, 1).sigmoid() bboxes = integral(ml_bboxes[level].permute(0, 2, 3, 1))*stride for i", "usually {0, 1, 2, ..., reg_max} Args: reg_max (int): The maximal value of", "* y_i}`, P(y_i) denotes the softmax vector that represents the discrete distribution y_i", "for i in range(len(scores)): if scores[i] > SCORE_THRESH: x = i % int(feat_w)", "// int(feat_w) * stride x1 = x - bboxes[i][0] y1 = y -", "= torch.stack([x1, y1, x2, y2], dim=0)/torch.tensor(scale_factor) total_bboxes.append(torch.cat([box, score_loc], dim=0)) nmsBoxes = nms(total_bboxes) return", "layer calculates the target location by :math: `sum{P(y_i) * y_i}`, P(y_i) denotes the", "= y + bboxes[i][3] score_loc = scores[i] box = torch.stack([x1, y1, x2, y2],", "center in four directions, shape (N, 4). \"\"\" x = F.softmax(x.reshape(-1, self.reg_max +", "location. Args: x (Tensor): Features of the regression head, shape (N, 4*(n+1)), n", "torch.nn.functional as F import torch SCORE_THRESH = 0.3 STRIDE_SCALE = 8 IOU_THRESH =", "location by :math: `sum{P(y_i) * y_i}`, P(y_i) denotes the softmax vector that represents", "reset it according to your new dataset or related settings. \"\"\" def __init__(self,", "This layer calculates the target location by :math: `sum{P(y_i) * y_i}`, P(y_i) denotes", "= torch.min(Box1[2], Box2[2]) inner_y2 = torch.min(Box1[3], Box2[3]) area_inner = (inner_x2 - inner_x1) *", "4) return x def IouCal(Box1, Box2): inner_x1 = torch.max(Box1[0], Box2[0]) inner_y1 = torch.max(Box1[1],", "torch.min(Box1[3], Box2[3]) area_inner = (inner_x2 - inner_x1) * (inner_y2 - inner_y1) area =", "range(len(Bboxes)): if i not in record_dict: record_dict.add(i) res.append(Bboxes[i]) else: continue for j in", ":math: `sum{P(y_i) * y_i}`, P(y_i) denotes the softmax vector that represents the discrete", "Bboxes = sorted(Bboxes, key=lambda x:x[4], reverse=True) record_dict = set() res = [] for", "import torch.nn as nn import torch.nn.functional as F import torch SCORE_THRESH = 0.3", "{0, 1, 2, ..., reg_max} Args: reg_max (int): The maximal value of the", "fixed layer for calculating integral result from distribution. This layer calculates the target", "= output scale_factor = extra_info[\"scale_factor\"] levels = 5 total_bboxes = [] for level", "range(len(scores)): if scores[i] > SCORE_THRESH: x = i % int(feat_w) * stride y", "ml_scores[level].shape[2:] scores = ml_scores[level].permute(0, 2, 3, 1).view(feat_h*feat_w, 1).sigmoid() bboxes = integral(ml_bboxes[level].permute(0, 2, 3,", "range(levels): stride = 2**(level)*8 '''默认输出顺序为 小stride->大stride''' feat_h, feat_w = ml_scores[level].shape[2:] scores = ml_scores[level].permute(0,", "integral(ml_bboxes[level].permute(0, 2, 3, 1))*stride for i in range(len(scores)): if scores[i] > SCORE_THRESH: x", "y2 = y + bboxes[i][3] score_loc = scores[i] box = torch.stack([x1, y1, x2,", "represents the discrete distribution y_i denotes the discrete set, usually {0, 1, 2,", "import torch SCORE_THRESH = 0.3 STRIDE_SCALE = 8 IOU_THRESH = 0.6 class Integral(nn.Module):", "Args: x (Tensor): Features of the regression head, shape (N, 4*(n+1)), n is", "continue for j in range(i + 1, len(Bboxes)): Iou = IouCal(Bboxes[i], Bboxes[j]) if", "1).sigmoid() bboxes = integral(ml_bboxes[level].permute(0, 2, 3, 1))*stride for i in range(len(scores)): if scores[i]", "want to reset it according to your new dataset or related settings. \"\"\"", "(Box1[2] - Box1[0]) * (Box1[3] - Box1[1]) - \\ area_inner return torch.max(torch.tensor(0.), area_inner", "P(y_i) denotes the softmax vector that represents the discrete distribution y_i denotes the", "1).view(feat_h*feat_w, 1).sigmoid() bboxes = integral(ml_bboxes[level].permute(0, 2, 3, 1))*stride for i in range(len(scores)): if", "= F.softmax(x.reshape(-1, self.reg_max + 1), dim=1) x = F.linear(x, self.project.type_as(x)).reshape(-1, 4) return x", "to get integral result of bounding box location. Args: x (Tensor): Features of", "y_i}`, P(y_i) denotes the softmax vector that represents the discrete distribution y_i denotes", "+ 1, len(Bboxes)): Iou = IouCal(Bboxes[i], Bboxes[j]) if Iou > IOU_THRESH: record_dict.add(j) continue", "res = [] for i in range(len(Bboxes)): if i not in record_dict: record_dict.add(i)", "Integral result of box locations, i.e., distance offsets from the box center in", "\\ (Box1[2] - Box1[0]) * (Box1[3] - Box1[1]) - \\ area_inner return torch.max(torch.tensor(0.),", "def forward(self, x): \"\"\"Forward feature from the regression head to get integral result", "16. You may want to reset it according to your new dataset or", "__init__(self, reg_max=16): super(Integral, self).__init__() self.reg_max = reg_max self.register_buffer('project', torch.linspace(0, self.reg_max, self.reg_max + 1))", "x - bboxes[i][0] y1 = y - bboxes[i][1] x2 = x + bboxes[i][2]", "box = torch.stack([x1, y1, x2, y2], dim=0)/torch.tensor(scale_factor) total_bboxes.append(torch.cat([box, score_loc], dim=0)) nmsBoxes = nms(total_bboxes)", "bounding box location. Args: x (Tensor): Features of the regression head, shape (N,", "2, 3, 1))*stride for i in range(len(scores)): if scores[i] > SCORE_THRESH: x =", "\"\"\" def __init__(self, reg_max=16): super(Integral, self).__init__() self.reg_max = reg_max self.register_buffer('project', torch.linspace(0, self.reg_max, self.reg_max", "- \\ area_inner return torch.max(torch.tensor(0.), area_inner / area) def nms(Bboxes): Bboxes = sorted(Bboxes,", "def nms(Bboxes): Bboxes = sorted(Bboxes, key=lambda x:x[4], reverse=True) record_dict = set() res =", "def __init__(self, reg_max=16): super(Integral, self).__init__() self.reg_max = reg_max self.register_buffer('project', torch.linspace(0, self.reg_max, self.reg_max +", "F.linear(x, self.project.type_as(x)).reshape(-1, 4) return x def IouCal(Box1, Box2): inner_x1 = torch.max(Box1[0], Box2[0]) inner_y1", "area_inner return torch.max(torch.tensor(0.), area_inner / area) def nms(Bboxes): Bboxes = sorted(Bboxes, key=lambda x:x[4],", "reg_max=16): super(Integral, self).__init__() self.reg_max = reg_max self.register_buffer('project', torch.linspace(0, self.reg_max, self.reg_max + 1)) def", "in range(len(scores)): if scores[i] > SCORE_THRESH: x = i % int(feat_w) * stride", "bboxes[i][1] x2 = x + bboxes[i][2] y2 = y + bboxes[i][3] score_loc =", "the box center in four directions, shape (N, 4). \"\"\" x = F.softmax(x.reshape(-1,", "= [] for level in range(levels): stride = 2**(level)*8 '''默认输出顺序为 小stride->大stride''' feat_h, feat_w", "integral result from distribution. This layer calculates the target location by :math: `sum{P(y_i)", "x + bboxes[i][2] y2 = y + bboxes[i][3] score_loc = scores[i] box =", "= i % int(feat_w) * stride y = i // int(feat_w) * stride", "Features of the regression head, shape (N, 4*(n+1)), n is self.reg_max. Returns: x", "SCORE_THRESH: x = i % int(feat_w) * stride y = i // int(feat_w)", "= y - bboxes[i][1] x2 = x + bboxes[i][2] y2 = y +", "- Box1[1]) - \\ area_inner return torch.max(torch.tensor(0.), area_inner / area) def nms(Bboxes): Bboxes", "target location by :math: `sum{P(y_i) * y_i}`, P(y_i) denotes the softmax vector that", "extra_info): integral = Integral(16) ml_scores, ml_bboxes = output scale_factor = extra_info[\"scale_factor\"] levels =", "reg_max} Args: reg_max (int): The maximal value of the discrete set. Default: 16.", "= set() res = [] for i in range(len(Bboxes)): if i not in", "regression head to get integral result of bounding box location. Args: x (Tensor):", "for j in range(i + 1, len(Bboxes)): Iou = IouCal(Bboxes[i], Bboxes[j]) if Iou", "continue return res def gfl_post_process(output, extra_info): integral = Integral(16) ml_scores, ml_bboxes = output", "set, usually {0, 1, 2, ..., reg_max} Args: reg_max (int): The maximal value", "torch.stack([x1, y1, x2, y2], dim=0)/torch.tensor(scale_factor) total_bboxes.append(torch.cat([box, score_loc], dim=0)) nmsBoxes = nms(total_bboxes) return nmsBoxes", "Box2): inner_x1 = torch.max(Box1[0], Box2[0]) inner_y1 = torch.max(Box1[1], Box2[1]) inner_x2 = torch.min(Box1[2], Box2[2])", "key=lambda x:x[4], reverse=True) record_dict = set() res = [] for i in range(len(Bboxes)):", "record_dict.add(i) res.append(Bboxes[i]) else: continue for j in range(i + 1, len(Bboxes)): Iou =", "* stride y = i // int(feat_w) * stride x1 = x -", "torch.linspace(0, self.reg_max, self.reg_max + 1)) def forward(self, x): \"\"\"Forward feature from the regression", "self.reg_max = reg_max self.register_buffer('project', torch.linspace(0, self.reg_max, self.reg_max + 1)) def forward(self, x): \"\"\"Forward", "* (Box1[3] - Box1[1]) - \\ area_inner return torch.max(torch.tensor(0.), area_inner / area) def", "the softmax vector that represents the discrete distribution y_i denotes the discrete set,", "4*(n+1)), n is self.reg_max. Returns: x (Tensor): Integral result of box locations, i.e.,", "return x def IouCal(Box1, Box2): inner_x1 = torch.max(Box1[0], Box2[0]) inner_y1 = torch.max(Box1[1], Box2[1])", "STRIDE_SCALE = 8 IOU_THRESH = 0.6 class Integral(nn.Module): \"\"\"A fixed layer for calculating", "head, shape (N, 4*(n+1)), n is self.reg_max. Returns: x (Tensor): Integral result of", "is self.reg_max. Returns: x (Tensor): Integral result of box locations, i.e., distance offsets", "the discrete set, usually {0, 1, 2, ..., reg_max} Args: reg_max (int): The", "inner_x2 = torch.min(Box1[2], Box2[2]) inner_y2 = torch.min(Box1[3], Box2[3]) area_inner = (inner_x2 - inner_x1)", "0.6 class Integral(nn.Module): \"\"\"A fixed layer for calculating integral result from distribution. This", "forward(self, x): \"\"\"Forward feature from the regression head to get integral result of", "shape (N, 4*(n+1)), n is self.reg_max. Returns: x (Tensor): Integral result of box", "(N, 4*(n+1)), n is self.reg_max. Returns: x (Tensor): Integral result of box locations,", "nms(Bboxes): Bboxes = sorted(Bboxes, key=lambda x:x[4], reverse=True) record_dict = set() res = []", "= integral(ml_bboxes[level].permute(0, 2, 3, 1))*stride for i in range(len(scores)): if scores[i] > SCORE_THRESH:", "'''默认输出顺序为 小stride->大stride''' feat_h, feat_w = ml_scores[level].shape[2:] scores = ml_scores[level].permute(0, 2, 3, 1).view(feat_h*feat_w, 1).sigmoid()", "torch.max(Box1[0], Box2[0]) inner_y1 = torch.max(Box1[1], Box2[1]) inner_x2 = torch.min(Box1[2], Box2[2]) inner_y2 = torch.min(Box1[3],", "locations, i.e., distance offsets from the box center in four directions, shape (N,", "that represents the discrete distribution y_i denotes the discrete set, usually {0, 1,", "bboxes[i][3] score_loc = scores[i] box = torch.stack([x1, y1, x2, y2], dim=0)/torch.tensor(scale_factor) total_bboxes.append(torch.cat([box, score_loc],", "the regression head, shape (N, 4*(n+1)), n is self.reg_max. Returns: x (Tensor): Integral", "3, 1))*stride for i in range(len(scores)): if scores[i] > SCORE_THRESH: x = i", "x = i % int(feat_w) * stride y = i // int(feat_w) *", "maximal value of the discrete set. Default: 16. You may want to reset", "The maximal value of the discrete set. Default: 16. You may want to", "..., reg_max} Args: reg_max (int): The maximal value of the discrete set. Default:", "self.reg_max + 1)) def forward(self, x): \"\"\"Forward feature from the regression head to", "stride x1 = x - bboxes[i][0] y1 = y - bboxes[i][1] x2 =", "if i not in record_dict: record_dict.add(i) res.append(Bboxes[i]) else: continue for j in range(i" ]
[ "= 44, text_color = RED ).set_color(RED) qoute2 = Text(\"there is no permanent place", "ReplacementTransform(formula[2].copy(),formula[8]), ReplacementTransform(formula[4].copy(),formula[11]), ReplacementTransform(formula[3].copy(),formula[9]), run_time = 3 ) self.wait() self.play( ReplacementTransform(formula[0].copy(),formula[7]), ReplacementTransform(formula[0].copy(),formula[10]), run_time=3 )", "=\"Arial\" , font_size = 44, text_color = RED ).set_color(RED) qoute2 = Text(\"there is", "#7 \"u\", #8 \"+\", #9 \"\\\\frac{d}{dx}\", #10 \"v\" #11 , font_size=70) # formula", "textfootnotesize = Tex(\"{\\\\footnotesize footnotesize Text 012.\\\\#!?} Text\") textscriptsize = Tex(\"{\\\\scriptsize scriptsize Text 012.\\\\#!?}", "Text 012.\\\\#!?} Text\") textscriptsize = Tex(\"{\\\\scriptsize scriptsize Text 012.\\\\#!?} Text\") texttiny = Tex(\"{\\\\tiny", "self.wait() changes =[ [(2,3,4),(1,2,4)], [(0,),(3,)], [(1,0),(0,)] ] for pre_ind, post_ind in changes: self.play(*[", "\"x\", #1 \":\", #2 \"\\\\neg\", #3 \"P(x)\" ) for size,pos,formula in [(2,2*UP,formula1),(2,2*DOWN,formula2)]: formula.scale(size)", "i,j in zip(pre_ind,post_ind) ], run_time =2 ) self.wait() class rtl4(Scene): def construct(self): formula1", "= 3) # self.wait() class newc(Scene): def construct(self): text = Text(\"This is a", "self.play(Write(q),run_time = 3) self.wait() # self.play(Write(qoute2),run_time = 3) # self.wait() class newc(Scene): def", "run_time =2 ) self.wait() class rtl4(Scene): def construct(self): formula1 = Tex( \"\\\\neg\", #0", "\"\\\\exists\", #0 \"x\", #1 \":\", #2 \"\\\\neg\", #3 \"P(x)\" ) for size,pos,formula in", "Text(\"hello\") text2 = Text(\"how are you\") text3 = Text(\"who are you\") ltext =", "VGroup(a,b,c,d) # qoute2.next_to(qoute, DOWN) self.play(FadeIn(e)) self.play(Write(q),run_time = 3) self.wait() # self.play(Write(qoute2),run_time = 3)", "text2.next_to(text, LEFT, buff=1) self.play(Write(text2)) self.wait() text.shift(UP*3) self.play(Write(text)) self.wait() #rotation class ro(Scene): def construct(self):", "012.\\\\#!?} Text normal\") textHuge.to_edge(UP) texthuge.next_to(textHuge,DOWN,buff=0.1) textLARGE.next_to(texthuge,DOWN,buff=0.1) textLarge.next_to(textLARGE,DOWN,buff=0.1) textlarge.next_to(textLarge,DOWN,buff=0.1) textNormal.next_to(textlarge,DOWN,buff=0.1) textsmall.next_to(textNormal,DOWN,buff=0.1) textfootnotesize.next_to(textsmall,DOWN,buff=0.1) textscriptsize.next_to(textfootnotesize,DOWN,buff=0.1) texttiny.next_to(textscriptsize,DOWN,buff=0.1)", "self.wait(3) #transform class tr(Scene): def construct(self): self.play(Write(text)) self.wait() self.play(ReplacementTransform(text,text2)) self.wait() class trl(Scene): def", "= 0.8) source.set_color(GREEN) source1.set_color(RED) self.play( ShowCreation(grid) ) self.play(Write(source)) self.wait() kw = {\"run_time\": 3,", "world\") ltext2 = Tex(\"hey man!\") class intro(Scene): def construct(self): text = TexText(\"hello\",\" OVI\",\"", "[(0,),(3,)], [(1,0),(0,)] ] for pre_ind, post_ind in changes: self.play(*[ ReplacementTransform(formula1[i].copy(),formula2[j]) for i,j in", "= TexText(\"Hi RAKIB!\") text3 = TexText(\"hey ANOY bro!\") self.play(TransformMatchingShapes(text, text2, **kw)) self.wait() self.play(TransformMatchingShapes(text2,", "and what lies in front of you\", height=0.4) target = Text(\"pales in comparison", "permanent place in the world for ugly mathematics\") a = (q[1]).next_to(q[0],UR) b =", "text[0].to_edge(RIGHT) text[1].to_edge(DOWN) text[2].to_edge(LEFT) text[3].to_edge(UP) self.play(Write(text)) self.wait(3) class cp(Scene): def construct(self): text = Text(\"text\")", "is a formulas$, $$this is a formula$$ \"\"\") self.play(Write(tipes)) self.wait(3) class deff(Scene): def", "for size,pos,formula in [(2,2*UP,formula1),(2,2*DOWN,formula2)]: formula.scale(size) formula.move_to(pos) self.play(Write(formula1)) self.wait() changes = [ [(0,1,2,3,4), (3,0,1,2,4)],", "changes = [ [(0,1,2,3,4), (3,0,1,2,4)], ] for pre_ind,post_ind in changes: self.play(*[ ReplacementTransform(formula1[i].copy(),formula2[j]) for", "Text\") textsmall = Tex(\"{\\\\small small Text 012.\\\\#!?} Texto normal\") textfootnotesize = Tex(\"{\\\\footnotesize footnotesize", "parametters = [(2,2*UP,formula1,GREEN,\"\\\\forall\"), (2,2*DOWN,formula2,ORANGE,\"\\\\exists\")] for size,pos,formula,col,sim in parametters: formula.scale(size) formula.move_to(pos) formula.set_color_by_tex(sim,col) formula.set_color_by_tex(\"\\\\neg\",PINK) self.play(Write(formula1))", "= 3 ) self.wait() self.play( ReplacementTransform(formula[0].copy(),formula[7]), ReplacementTransform(formula[0].copy(),formula[10]), run_time=3 ) self.wait() class rtl2(Scene): def", "Tex(\"{\\\\small small Text 012.\\\\#!?} Texto normal\") textfootnotesize = Tex(\"{\\\\footnotesize footnotesize Text 012.\\\\#!?} Text\")", "textHuge.to_edge(UP) texthuge.next_to(textHuge,DOWN,buff=0.1) textLARGE.next_to(texthuge,DOWN,buff=0.1) textLarge.next_to(textLARGE,DOWN,buff=0.1) textlarge.next_to(textLarge,DOWN,buff=0.1) textNormal.next_to(textlarge,DOWN,buff=0.1) textsmall.next_to(textNormal,DOWN,buff=0.1) textfootnotesize.next_to(textsmall,DOWN,buff=0.1) textscriptsize.next_to(textfootnotesize,DOWN,buff=0.1) texttiny.next_to(textscriptsize,DOWN,buff=0.1) self.add(textHuge,texthuge,textLARGE,textLarge,textlarge,textNormal,textsmall,textfootnotesize,textscriptsize,texttiny) self.wait(3) #transform", "= Tex(\"{\\\\huge huge Text 012.\\\\#!?} Text\") textLARGE = Tex(\"{\\\\LARGE LARGE Text 012.\\\\#!?} Text\")", "\"=\",#6 \"\\\\frac{d}{dx}\",#7 \"u\",#8 \"+\",#9 \"\\\\frac{d}{dx}\",#10 \"v\", font_size = 70 ) for letter, color", "text.move_to(0.25*UP) self.play(Write(text),Write(text2)) self.wait(3) class cp2(Scene): def construct(self): text = Text(\"hello\") text2 = Text(\"how", "formula2 = Tex( \"\\\\exists\", #0 \"x\", #1 \":\", #2 \"\\\\neg\", #3 \"P(x)\" )", "= TexText(\"\"\" This is a regular text, $\\\\displaystyle\\\\frac{x}{y}$, $$x^2+y^2=a^2$$ \"\"\") self.play(Write(text)) self.wait(3) #position", "[(0,1,2,3,4), (3,0,1,2,4)], ] for pre_ind,post_ind in changes: self.play(*[ ReplacementTransform(formula1[i].copy(),formula2[j]) for i,j in zip(pre_ind,post_ind)", "ReplacementTransform(formula[0].copy(),formula[10]), run_time=3 ) self.wait() class rtl2(Scene): def construct(self): formula = Tex( \"\\\\frac{d}{dx}\", #0", "are you\") ltext = Tex(\"hello world\") ltext2 = Tex(\"hey man!\") class intro(Scene): def", "construct(self): self.play(Write(text)) self.wait() self.play(ReplacementTransform(text,text2)) self.wait() class trl(Scene): def construct(self): formula = Tex( \"\\\\frac{d}{dx}\",", "from manimlib import * # from manimlib.imports import * import numpy as np", "\"\"\") self.play(Write(tipes)) self.wait(3) class deff(Scene): def construct(self): text = TexText(\"\"\" This is a", "text.rotate(PI/4) self.wait() text.rotate(PI/4) self.wait() text.flip(DOWN) self.wait() #latex class la(Scene): def construct(self): textHuge =", "012.\\\\#!?} Text\") textscriptsize = Tex(\"{\\\\scriptsize scriptsize Text 012.\\\\#!?} Text\") texttiny = Tex(\"{\\\\tiny tiny", "self.play( ReplacementTransform(formula[2].copy(),formula[8]), ReplacementTransform(formula[4].copy(),formula[11]), ReplacementTransform(formula[3].copy(),formula[9]), run_time = 3 ) self.wait() self.play( ReplacementTransform(formula[0].copy(),formula[7]), ReplacementTransform(formula[0].copy(),formula[10]), run_time=3", "\"=\", #6 \"\\\\frac{d}{dx}\", #7 \"u\", #8 \"+\", #9 \"\\\\frac{d}{dx}\", #10 \"v\" #11 ,", "= Text(\"What's your Openion, MOHI?\", height = 0.8) source.set_color(GREEN) source1.set_color(RED) self.play( ShowCreation(grid) )", "numpy as np text = Text(\"hello\") text2 = Text(\"how are you\") text3 =", "def construct(self): formula = Tex( \"\\\\frac{d}{dx}\", #0 \"(\", #1 \"u\", #2 \"+\", #3", "typeOfText(Scene): def construct(self): tipes = TexText(\"\"\" This is a regular text, $this is", "self.wait() class rtl3(Scene): def construct(self): formula1 = Tex( \"\\\\neg\", #0 \"\\\\forall\", #1 \"x\",", ") self.wait() self.play( ReplacementTransform(formula[0].copy(),formula[7]), ReplacementTransform(formula[0].copy(),formula[10]) ) self.wait() class rtl3(Scene): def construct(self): formula1 =", "self.wait() self.play( ReplacementTransform(formula[0].copy(),formula[7]), ReplacementTransform(formula[0].copy(),formula[10]) ) self.wait() class rtl3(Scene): def construct(self): formula1 = Tex(", "for size,pos,formula,col,sim in parametters: formula.scale(size) formula.move_to(pos) formula.set_color_by_tex(sim,col) formula.set_color_by_tex(\"\\\\neg\",PINK) self.play(Write(formula1)) self.wait() changes =[ [(2,3,4),(1,2,4)],", "formula = Tex( \"\\\\frac{d}{dx}\", #0 \"(\", #1 \"u\", #2 \"+\", #3 \"v\", #4", "\"(\",#1 \"u\",#2 \"+\",#3 \"v\",#4 \")\",#5 \"=\",#6 \"\\\\frac{d}{dx}\",#7 \"u\",#8 \"+\",#9 \"\\\\frac{d}{dx}\",#10 \"v\", font_size =", "text[2].to_edge(LEFT) text[3].to_edge(UP) self.play(Write(text)) self.wait(3) class cp(Scene): def construct(self): text = Text(\"text\") text2 =", "self.play(Write(text)) self.wait(3) class cp(Scene): def construct(self): text = Text(\"text\") text2 = Text(\"central text\")", "# formula VGroup(formula[0::2]).set_color(RED) VGroup(formula[1::2]).set_color(BLUE) self.play(Write(formula[0:7])) self.wait() self.play( ReplacementTransform(formula[2].copy(),formula[8]), ReplacementTransform(formula[4].copy(),formula[11]), ReplacementTransform(formula[3].copy(),formula[9]), run_time = 3", "formula = Tex( \"\\\\frac{d}{dx}\", #0 \"(\",#1 \"u\",#2 \"+\",#3 \"v\",#4 \")\",#5 \"=\",#6 \"\\\\frac{d}{dx}\",#7 \"u\",#8", "i == 1: text[i].set_color(RED) else: text[i].set_color(GREEN) self.play(Write(text[i])) self.wait(1) kw = {\"run_time\": 3, \"path_arc\":", ") self.wait() class rtl2(Scene): def construct(self): formula = Tex( \"\\\\frac{d}{dx}\", #0 \"(\",#1 \"u\",#2", "def construct(self): tipes = TexText(\"\"\" This is a regular text, $this is a", "text\") text.move_to(0.25*UP) self.play(Write(text),Write(text2)) self.wait(3) class cp2(Scene): def construct(self): text = Text(\"hello\") text2 =", "ReplacementTransform(formula1[i].copy(),formula2[j]) for i,j in zip(pre_ind,post_ind) ], run_time =2 ) self.wait() class rtl4(Scene): def", "texthuge.next_to(textHuge,DOWN,buff=0.1) textLARGE.next_to(texthuge,DOWN,buff=0.1) textLarge.next_to(textLARGE,DOWN,buff=0.1) textlarge.next_to(textLarge,DOWN,buff=0.1) textNormal.next_to(textlarge,DOWN,buff=0.1) textsmall.next_to(textNormal,DOWN,buff=0.1) textfootnotesize.next_to(textsmall,DOWN,buff=0.1) textscriptsize.next_to(textfootnotesize,DOWN,buff=0.1) texttiny.next_to(textscriptsize,DOWN,buff=0.1) self.add(textHuge,texthuge,textLARGE,textLarge,textlarge,textNormal,textsmall,textfootnotesize,textscriptsize,texttiny) self.wait(3) #transform class", "$\\\\displaystyle\\\\frac{x}{y}$, $$x^2+y^2=a^2$$ \"\"\") self.play(Write(text)) self.wait(3) #position relative to scereen class tidp(Scene): def construct(self):", "Text(\"how are you\") text3 = Text(\"who are you\") text2.move_to(3*DOWN+3*LEFT) self.play(Write(text),Write(text2)) self.wait() text3.move_to(1*UP+2*RIGHT) self.play(Write(text3))", "e = VGroup(a,b,c,d) # qoute2.next_to(qoute, DOWN) self.play(FadeIn(e)) self.play(Write(q),run_time = 3) self.wait() # self.play(Write(qoute2),run_time", "\":\", #2 \"\\\\neg\", #3 \"P(x)\" ) for size,pos,formula in [(2,2*UP,formula1),(2,2*DOWN,formula2)]: formula.scale(size) formula.move_to(pos) self.play(Write(formula1))", "**kw)) self.wait() self.play(TransformMatchingShapes(target, source1, **kw)) self.wait() class intf(Scene): def construct(self): q = TexText(\"Beauty\",", "construct(self): formula = Tex( \"\\\\frac{d}{dx}\", #0 \"(\",#1 \"u\",#2 \"+\",#3 \"v\",#4 \")\",#5 \"=\",#6 \"\\\\frac{d}{dx}\",#7", "target, **kw)) self.wait() self.play(TransformMatchingShapes(target, source1, **kw)) self.wait() class intf(Scene): def construct(self): q =", "ShowCreation(grid) ) self.play(Write(source)) self.wait() kw = {\"run_time\": 3, \"path_arc\": PI} self.play(TransformMatchingShapes(source, target, **kw))", "construct(self): formula = Tex( \"\\\\frac{d}{dx}\", #0 \"(\", #1 \"u\", #2 \"+\", #3 \"v\",", "[(2,3,4),(1,2,4)], [(0,),(3,)], [(1,0),(0,)] ] for pre_ind, post_ind in changes: self.play(*[ ReplacementTransform(formula1[i].copy(),formula2[j]) for i,j", "= Text(\"how are you\") text3 = Text(\"who are you\") text2.move_to(3*DOWN+3*LEFT) self.play(Write(text),Write(text2)) self.wait() text3.move_to(1*UP+2*RIGHT)", "ReplacementTransform(formula[3].copy(),formula[9]) ) self.wait() self.play( ReplacementTransform(formula[0].copy(),formula[7]), ReplacementTransform(formula[0].copy(),formula[10]) ) self.wait() class rtl3(Scene): def construct(self): formula1", "\")\", #5 \"=\", #6 \"\\\\frac{d}{dx}\", #7 \"u\", #8 \"+\", #9 \"\\\\frac{d}{dx}\", #10 \"v\"", "text = Text(\"This is a regular text\") self.play(Write(text)) self.wait(3) class typeOfText(Scene): def construct(self):", "what lies in front of you\", height=0.4) target = Text(\"pales in comparison to", "you\", height=0.5) source1 = Text(\"What's your Openion, MOHI?\", height = 0.8) source.set_color(GREEN) source1.set_color(RED)", "class cp(Scene): def construct(self): text = Text(\"text\") text2 = Text(\"central text\") text.move_to(0.25*UP) self.play(Write(text),Write(text2))", "Tex(\"hey man!\") class intro(Scene): def construct(self): text = TexText(\"hello\",\" OVI\",\" how\",\" are\",\" you?\")", "self.wait() self.play( ReplacementTransform(formula[2].copy(),formula[8]), ReplacementTransform(formula[4].copy(),formula[11]), ReplacementTransform(formula[3].copy(),formula[9]), run_time = 3 ) self.wait() self.play( ReplacementTransform(formula[0].copy(),formula[7]), ReplacementTransform(formula[0].copy(),formula[10]),", "LEFT, buff=1) self.play(Write(text2)) self.wait() text.shift(UP*3) self.play(Write(text)) self.wait() #rotation class ro(Scene): def construct(self): text.shift(UP)", "position class cp3(Scene): def construct(self): self.play(Write(text)) self.wait() text2.next_to(text, LEFT, buff=1) self.play(Write(text2)) self.wait() text.shift(UP*3)", "[(2,2*UP,formula1),(2,2*DOWN,formula2)]: formula.scale(size) formula.move_to(pos) self.play(Write(formula1)) self.wait() changes = [ [(0,1,2,3,4), (3,0,1,2,4)], ] for pre_ind,post_ind", "\"\\\\exists\", #0 \"x\", #1 \":\", #2 \"\\\\neg\", #3 \"P(x)\" #4 ) parametters =", "formula.scale(size) formula.move_to(pos) self.play(Write(formula1)) self.wait() changes = [ [(0,1,2,3,4), (3,0,1,2,4)], ] for pre_ind,post_ind in", "d = (q[4]).next_to(q[0],DL) e = VGroup(a,b,c,d) # qoute2.next_to(qoute, DOWN) self.play(FadeIn(e)) self.play(Write(q),run_time = 3)", "self.play( ShowCreation(grid) ) self.play(Write(source)) self.wait() kw = {\"run_time\": 3, \"path_arc\": PI} self.play(TransformMatchingShapes(source, target,", "of you\", height=0.4) target = Text(\"pales in comparison to what lies inside of", "Text 012.\\\\#!?} Text\") textNormal = Tex(\"{\\\\normalsize normal Text 012.\\\\#!?} Text\") textsmall = Tex(\"{\\\\small", "rtl4(Scene): def construct(self): formula1 = Tex( \"\\\\neg\", #0 \"\\\\forall\", #1 \"x\", #2 \":\",", "def construct(self): self.play(Write(text)) self.wait() text2.next_to(text, LEFT, buff=1) self.play(Write(text2)) self.wait() text.shift(UP*3) self.play(Write(text)) self.wait() #rotation", "Tex(\"{\\\\LARGE LARGE Text 012.\\\\#!?} Text\") textLarge = Tex(\"{\\\\Large Large Text 012.\\\\#!?} Text\") textlarge", "= Tex(\"{\\\\small small Text 012.\\\\#!?} Texto normal\") textfootnotesize = Tex(\"{\\\\footnotesize footnotesize Text 012.\\\\#!?}", "\":\", #3 \"P(x)\", #4 ) formula2 = Tex( \"\\\\exists\", #0 \"x\", #1 \":\",", "**kw)) self.wait() class mohi(Scene): def construct(self): grid = NumberPlane((-10, 10), (-5, 5)) source", "hey\") text[0].to_edge(RIGHT) text[1].to_edge(DOWN) text[2].to_edge(LEFT) text[3].to_edge(UP) self.play(Write(text)) self.wait(3) class cp(Scene): def construct(self): text =", "=[ [(2,3,4),(1,2,4)], [(0,),(3,)], [(1,0),(0,)] ] for pre_ind, post_ind in changes: self.play(*[ ReplacementTransform(formula1[i].copy(),formula2[j]) for", "import * # from manimlib.imports import * import numpy as np text =", "Tex(\"hello world\") ltext2 = Tex(\"hey man!\") class intro(Scene): def construct(self): text = TexText(\"hello\",\"", "text3.move_to(1*UP+2*RIGHT) self.play(Write(text3)) self.wait() #relative position class cp3(Scene): def construct(self): self.play(Write(text)) self.wait() text2.next_to(text, LEFT,", "#2 \":\", #3 \"P(x)\", #4 ) formula2 = Tex( \"\\\\exists\", #0 \"x\", #1", "construct(self): text = TexText(\"\"\" This is a regular text, $\\\\displaystyle\\\\frac{x}{y}$, $$x^2+y^2=a^2$$ \"\"\") self.play(Write(text))", "man!\") class intro(Scene): def construct(self): text = TexText(\"hello\",\" OVI\",\" how\",\" are\",\" you?\") for", "#0 \"(\",#1 \"u\",#2 \"+\",#3 \"v\",#4 \")\",#5 \"=\",#6 \"\\\\frac{d}{dx}\",#7 \"u\",#8 \"+\",#9 \"\\\\frac{d}{dx}\",#10 \"v\", font_size", "\"\"\") self.play(Write(text)) self.wait(3) #position relative to scereen class tidp(Scene): def construct(self): text =", "This is a regular text, $\\\\displaystyle\\\\frac{x}{y}$, $$x^2+y^2=a^2$$ \"\"\") self.play(Write(text)) self.wait(3) #position relative to", "self.wait(3) class cp(Scene): def construct(self): text = Text(\"text\") text2 = Text(\"central text\") text.move_to(0.25*UP)", "formula2 = Tex( \"\\\\exists\", #0 \"x\", #1 \":\", #2 \"\\\\neg\", #3 \"P(x)\" #4", "you\") ltext = Tex(\"hello world\") ltext2 = Tex(\"hey man!\") class intro(Scene): def construct(self):", "self.wait() self.play(TransformMatchingShapes(target, source1, **kw)) self.wait() class intf(Scene): def construct(self): q = TexText(\"Beauty\", \"", "[(1,0),(0,)] ] for pre_ind, post_ind in changes: self.play(*[ ReplacementTransform(formula1[i].copy(),formula2[j]) for i,j in zip(pre_ind,post_ind)", "Tex(\"{\\\\Large Large Text 012.\\\\#!?} Text\") textlarge = Tex(\"{\\\\large large Text 012.\\\\#!?} Text\") textNormal", "= Tex(\"{\\\\Large Large Text 012.\\\\#!?} Text\") textlarge = Tex(\"{\\\\large large Text 012.\\\\#!?} Text\")", "text_color = RED ).set_color(RED) qoute2 = Text(\"there is no permanent place in the", "source.set_color(GREEN) source1.set_color(RED) self.play( ShowCreation(grid) ) self.play(Write(source)) self.wait() kw = {\"run_time\": 3, \"path_arc\": PI}", "textLARGE.next_to(texthuge,DOWN,buff=0.1) textLarge.next_to(textLARGE,DOWN,buff=0.1) textlarge.next_to(textLarge,DOWN,buff=0.1) textNormal.next_to(textlarge,DOWN,buff=0.1) textsmall.next_to(textNormal,DOWN,buff=0.1) textfootnotesize.next_to(textsmall,DOWN,buff=0.1) textscriptsize.next_to(textfootnotesize,DOWN,buff=0.1) texttiny.next_to(textscriptsize,DOWN,buff=0.1) self.add(textHuge,texthuge,textLARGE,textLarge,textlarge,textNormal,textsmall,textfootnotesize,textscriptsize,texttiny) self.wait(3) #transform class tr(Scene):", "a formula$$ \"\"\") self.play(Write(tipes)) self.wait(3) class deff(Scene): def construct(self): text = TexText(\"\"\" This", "kw = {\"run_time\": 3, \"path_arc\": PI / 2} text2 = TexText(\"Hi RAKIB!\") text3", "is a formula$$ \"\"\") self.play(Write(tipes)) self.wait(3) class deff(Scene): def construct(self): text = TexText(\"\"\"", "$this is a formulas$, $$this is a formula$$ \"\"\") self.play(Write(tipes)) self.wait(3) class deff(Scene):", "self.wait() #relative position class cp3(Scene): def construct(self): self.play(Write(text)) self.wait() text2.next_to(text, LEFT, buff=1) self.play(Write(text2))", "self.wait() #latex class la(Scene): def construct(self): textHuge = Tex(\"{\\\\Huge Huge Text 012.\\\\#!?} Text\")", "(q[4]).next_to(q[0],DL) e = VGroup(a,b,c,d) # qoute2.next_to(qoute, DOWN) self.play(FadeIn(e)) self.play(Write(q),run_time = 3) self.wait() #", "ANOY bro!\") self.play(TransformMatchingShapes(text, text2, **kw)) self.wait() self.play(TransformMatchingShapes(text2, text3, **kw)) self.wait() class mohi(Scene): def", "class mohi(Scene): def construct(self): grid = NumberPlane((-10, 10), (-5, 5)) source = Text(\"আমাএর", "VGroup(formula[1::2]).set_color(BLUE) self.play(Write(formula[0:7])) self.wait() self.play( ReplacementTransform(formula[2].copy(),formula[8]), ReplacementTransform(formula[4].copy(),formula[11]), ReplacementTransform(formula[3].copy(),formula[9]), run_time = 3 ) self.wait() self.play(", "Text 012.\\\\#!?} Text\") textsmall = Tex(\"{\\\\small small Text 012.\\\\#!?} Texto normal\") textfootnotesize =", "source1.set_color(RED) self.play( ShowCreation(grid) ) self.play(Write(source)) self.wait() kw = {\"run_time\": 3, \"path_arc\": PI} self.play(TransformMatchingShapes(source,", "huge Text 012.\\\\#!?} Text\") textLARGE = Tex(\"{\\\\LARGE LARGE Text 012.\\\\#!?} Text\") textLarge =", "ReplacementTransform(formula[4].copy(),formula[11]), ReplacementTransform(formula[3].copy(),formula[9]), run_time = 3 ) self.wait() self.play( ReplacementTransform(formula[0].copy(),formula[7]), ReplacementTransform(formula[0].copy(),formula[10]), run_time=3 ) self.wait()", "you\") text3 = Text(\"who are you\") ltext = Tex(\"hello world\") ltext2 = Tex(\"hey", "Tex(\"{\\\\tiny tiny Texto 012.\\\\#!?} Text normal\") textHuge.to_edge(UP) texthuge.next_to(textHuge,DOWN,buff=0.1) textLARGE.next_to(texthuge,DOWN,buff=0.1) textLarge.next_to(textLARGE,DOWN,buff=0.1) textlarge.next_to(textLarge,DOWN,buff=0.1) textNormal.next_to(textlarge,DOWN,buff=0.1) textsmall.next_to(textNormal,DOWN,buff=0.1)", "012.\\\\#!?} Text\") textNormal = Tex(\"{\\\\normalsize normal Text 012.\\\\#!?} Text\") textsmall = Tex(\"{\\\\small small", "== 1: text[i].set_color(RED) else: text[i].set_color(GREEN) self.play(Write(text[i])) self.wait(1) kw = {\"run_time\": 3, \"path_arc\": PI", "Text(\"আমাএর What lies behind you and what lies in front of you\", height=0.4)", "construct(self): text = Text(\"This is a regular text\") self.play(Write(text)) self.wait(3) class typeOfText(Scene): def", ", font_size=70) # formula VGroup(formula[0::2]).set_color(RED) VGroup(formula[1::2]).set_color(BLUE) self.play(Write(formula[0:7])) self.wait() self.play( ReplacementTransform(formula[2].copy(),formula[8]), ReplacementTransform(formula[4].copy(),formula[11]), ReplacementTransform(formula[3].copy(),formula[9]), run_time", "are you\") text3 = Text(\"who are you\") ltext = Tex(\"hello world\") ltext2 =", "scriptsize Text 012.\\\\#!?} Text\") texttiny = Tex(\"{\\\\tiny tiny Texto 012.\\\\#!?} Text normal\") textHuge.to_edge(UP)", "PI} self.play(TransformMatchingShapes(source, target, **kw)) self.wait() self.play(TransformMatchingShapes(target, source1, **kw)) self.wait() class intf(Scene): def construct(self):", "text2 = Text(\"central text\") text.move_to(0.25*UP) self.play(Write(text),Write(text2)) self.wait(3) class cp2(Scene): def construct(self): text =", "= Text(\"who are you\") ltext = Tex(\"hello world\") ltext2 = Tex(\"hey man!\") class", "5)) source = Text(\"আমাএর What lies behind you and what lies in front", "a regular text, $\\\\displaystyle\\\\frac{x}{y}$, $$x^2+y^2=a^2$$ \"\"\") self.play(Write(text)) self.wait(3) #position relative to scereen class", "cp(Scene): def construct(self): text = Text(\"text\") text2 = Text(\"central text\") text.move_to(0.25*UP) self.play(Write(text),Write(text2)) self.wait(3)", "Tex(\"{\\\\large large Text 012.\\\\#!?} Text\") textNormal = Tex(\"{\\\\normalsize normal Text 012.\\\\#!?} Text\") textsmall", "self.play(ReplacementTransform(text,text2)) self.wait() class trl(Scene): def construct(self): formula = Tex( \"\\\\frac{d}{dx}\", #0 \"(\", #1", "normal\") textfootnotesize = Tex(\"{\\\\footnotesize footnotesize Text 012.\\\\#!?} Text\") textscriptsize = Tex(\"{\\\\scriptsize scriptsize Text", "text[i].set_color(GREEN) self.play(Write(text[i])) self.wait(1) kw = {\"run_time\": 3, \"path_arc\": PI / 2} text2 =", "formula.move_to(pos) self.play(Write(formula1)) self.wait() changes = [ [(0,1,2,3,4), (3,0,1,2,4)], ] for pre_ind,post_ind in changes:", "\"path_arc\": PI} self.play(TransformMatchingShapes(source, target, **kw)) self.wait() self.play(TransformMatchingShapes(target, source1, **kw)) self.wait() class intf(Scene): def", "Text(\"hello\") text2 = Text(\"how are you\") text3 = Text(\"who are you\") text2.move_to(3*DOWN+3*LEFT) self.play(Write(text),Write(text2))", "Tex( \"\\\\frac{d}{dx}\", #0 \"(\",#1 \"u\",#2 \"+\",#3 \"v\",#4 \")\",#5 \"=\",#6 \"\\\\frac{d}{dx}\",#7 \"u\",#8 \"+\",#9 \"\\\\frac{d}{dx}\",#10", "self.wait() class rtl4(Scene): def construct(self): formula1 = Tex( \"\\\\neg\", #0 \"\\\\forall\", #1 \"x\",", "<reponame>samsmusa/My-manim-master from manimlib import * # from manimlib.imports import * import numpy as", "self.wait() class mohi(Scene): def construct(self): grid = NumberPlane((-10, 10), (-5, 5)) source =", "Tex(\"{\\\\normalsize normal Text 012.\\\\#!?} Text\") textsmall = Tex(\"{\\\\small small Text 012.\\\\#!?} Texto normal\")", "def construct(self): formula1 = Tex( \"\\\\neg\", #0 \"\\\\forall\", #1 \"x\", #2 \":\", #3", "\"x\", #2 \":\", #3 \"P(x)\", #4 ) formula2 = Tex( \"\\\\exists\", #0 \"x\",", "ltext2 = Tex(\"hey man!\") class intro(Scene): def construct(self): text = TexText(\"hello\",\" OVI\",\" how\",\"", "text.shift(UP*3) self.play(Write(text)) self.wait() #rotation class ro(Scene): def construct(self): text.shift(UP) text.rotate(PI/4) self.play(ShowCreation(text)) self.wait() text.rotate(PI/4)", "self.wait() text2.next_to(text, LEFT, buff=1) self.play(Write(text2)) self.wait() text.shift(UP*3) self.play(Write(text)) self.wait() #rotation class ro(Scene): def", "how\",\" are\",\" you?\") for i in range(5): if i == 1: text[i].set_color(RED) else:", "self.play(Write(text),Write(text2)) self.wait() text3.move_to(1*UP+2*RIGHT) self.play(Write(text3)) self.wait() #relative position class cp3(Scene): def construct(self): self.play(Write(text)) self.wait()", "= 70 ) for letter, color in [(\"u\",RED),(\"v\",BLUE)]: formula.set_color_by_tex(letter,color) self.play(Write(formula[0:7])) self.wait() self.play( ReplacementTransform(formula[2].copy(),formula[8]),", "ltext = Tex(\"hello world\") ltext2 = Tex(\"hey man!\") class intro(Scene): def construct(self): text", "class la(Scene): def construct(self): textHuge = Tex(\"{\\\\Huge Huge Text 012.\\\\#!?} Text\") texthuge =", "Texto 012.\\\\#!?} Text normal\") textHuge.to_edge(UP) texthuge.next_to(textHuge,DOWN,buff=0.1) textLARGE.next_to(texthuge,DOWN,buff=0.1) textLarge.next_to(textLARGE,DOWN,buff=0.1) textlarge.next_to(textLarge,DOWN,buff=0.1) textNormal.next_to(textlarge,DOWN,buff=0.1) textsmall.next_to(textNormal,DOWN,buff=0.1) textfootnotesize.next_to(textsmall,DOWN,buff=0.1) textscriptsize.next_to(textfootnotesize,DOWN,buff=0.1)", "#9 \"\\\\frac{d}{dx}\", #10 \"v\" #11 , font_size=70) # formula VGroup(formula[0::2]).set_color(RED) VGroup(formula[1::2]).set_color(BLUE) self.play(Write(formula[0:7])) self.wait()", "= [(2,2*UP,formula1,GREEN,\"\\\\forall\"), (2,2*DOWN,formula2,ORANGE,\"\\\\exists\")] for size,pos,formula,col,sim in parametters: formula.scale(size) formula.move_to(pos) formula.set_color_by_tex(sim,col) formula.set_color_by_tex(\"\\\\neg\",PINK) self.play(Write(formula1)) self.wait()", "{\"run_time\": 3, \"path_arc\": PI / 2} text2 = TexText(\"Hi RAKIB!\") text3 = TexText(\"hey", "= Tex( \"\\\\exists\", #0 \"x\", #1 \":\", #2 \"\\\\neg\", #3 \"P(x)\" #4 )", "] for pre_ind, post_ind in changes: self.play(*[ ReplacementTransform(formula1[i].copy(),formula2[j]) for i,j in zip(pre_ind,post_ind) ],", "Text\") texthuge = Tex(\"{\\\\huge huge Text 012.\\\\#!?} Text\") textLARGE = Tex(\"{\\\\LARGE LARGE Text", "\"path_arc\": PI / 2} text2 = TexText(\"Hi RAKIB!\") text3 = TexText(\"hey ANOY bro!\")", "text, $this is a formulas$, $$this is a formula$$ \"\"\") self.play(Write(tipes)) self.wait(3) class", "a regular text, $this is a formulas$, $$this is a formula$$ \"\"\") self.play(Write(tipes))", "#4 ) formula2 = Tex( \"\\\\exists\", #0 \"x\", #1 \":\", #2 \"\\\\neg\", #3", "intf(Scene): def construct(self): q = TexText(\"Beauty\", \" is\", \" the\",\" first\",\" test\", font", "self.wait() self.play( ReplacementTransform(formula[2].copy(),formula[8]), ReplacementTransform(formula[4].copy(),formula[11]), ReplacementTransform(formula[3].copy(),formula[9]) ) self.wait() self.play( ReplacementTransform(formula[0].copy(),formula[7]), ReplacementTransform(formula[0].copy(),formula[10]) ) self.wait() class", "textfootnotesize.next_to(textsmall,DOWN,buff=0.1) textscriptsize.next_to(textfootnotesize,DOWN,buff=0.1) texttiny.next_to(textscriptsize,DOWN,buff=0.1) self.add(textHuge,texthuge,textLARGE,textLarge,textlarge,textNormal,textsmall,textfootnotesize,textscriptsize,texttiny) self.wait(3) #transform class tr(Scene): def construct(self): self.play(Write(text)) self.wait() self.play(ReplacementTransform(text,text2))", "self.play( ReplacementTransform(formula[2].copy(),formula[8]), ReplacementTransform(formula[4].copy(),formula[11]), ReplacementTransform(formula[3].copy(),formula[9]) ) self.wait() self.play( ReplacementTransform(formula[0].copy(),formula[7]), ReplacementTransform(formula[0].copy(),formula[10]) ) self.wait() class rtl3(Scene):", "class ro(Scene): def construct(self): text.shift(UP) text.rotate(PI/4) self.play(ShowCreation(text)) self.wait() text.rotate(PI/4) self.wait() text.rotate(PI/4) self.wait() text.flip(DOWN)", "musa\", \" hey\") text[0].to_edge(RIGHT) text[1].to_edge(DOWN) text[2].to_edge(LEFT) text[3].to_edge(UP) self.play(Write(text)) self.wait(3) class cp(Scene): def construct(self):", "self.play(Write(tipes)) self.wait(3) class deff(Scene): def construct(self): text = TexText(\"\"\" This is a regular", "height=0.5) source1 = Text(\"What's your Openion, MOHI?\", height = 0.8) source.set_color(GREEN) source1.set_color(RED) self.play(", "Text\") textLarge = Tex(\"{\\\\Large Large Text 012.\\\\#!?} Text\") textlarge = Tex(\"{\\\\large large Text", "\"+\", #3 \"v\", #4 \")\", #5 \"=\", #6 \"\\\\frac{d}{dx}\", #7 \"u\", #8 \"+\",", "\" is\", \" the\",\" first\",\" test\", font =\"Arial\" , font_size = 44, text_color", "# self.play(Write(qoute2),run_time = 3) # self.wait() class newc(Scene): def construct(self): text = Text(\"This", "self.wait() text.shift(UP*3) self.play(Write(text)) self.wait() #rotation class ro(Scene): def construct(self): text.shift(UP) text.rotate(PI/4) self.play(ShowCreation(text)) self.wait()", "self.wait(3) class deff(Scene): def construct(self): text = TexText(\"\"\" This is a regular text,", "you\") text2.move_to(3*DOWN+3*LEFT) self.play(Write(text),Write(text2)) self.wait() text3.move_to(1*UP+2*RIGHT) self.play(Write(text3)) self.wait() #relative position class cp3(Scene): def construct(self):", "tr(Scene): def construct(self): self.play(Write(text)) self.wait() self.play(ReplacementTransform(text,text2)) self.wait() class trl(Scene): def construct(self): formula =", "#0 \"x\", #1 \":\", #2 \"\\\\neg\", #3 \"P(x)\" ) for size,pos,formula in [(2,2*UP,formula1),(2,2*DOWN,formula2)]:", "def construct(self): q = TexText(\"Beauty\", \" is\", \" the\",\" first\",\" test\", font =\"Arial\"", "manimlib.imports import * import numpy as np text = Text(\"hello\") text2 = Text(\"how", "formula$$ \"\"\") self.play(Write(tipes)) self.wait(3) class deff(Scene): def construct(self): text = TexText(\"\"\" This is", "DOWN) self.play(FadeIn(e)) self.play(Write(q),run_time = 3) self.wait() # self.play(Write(qoute2),run_time = 3) # self.wait() class", "10), (-5, 5)) source = Text(\"আমাএর What lies behind you and what lies", "buff=1) self.play(Write(text2)) self.wait() text.shift(UP*3) self.play(Write(text)) self.wait() #rotation class ro(Scene): def construct(self): text.shift(UP) text.rotate(PI/4)", "in the world for ugly mathematics\") a = (q[1]).next_to(q[0],UR) b = (q[2]).next_to(q[0],UL) c", "class intf(Scene): def construct(self): q = TexText(\"Beauty\", \" is\", \" the\",\" first\",\" test\",", "], run_time =2 ) self.wait() class rtl4(Scene): def construct(self): formula1 = Tex( \"\\\\neg\",", "self.play(TransformMatchingShapes(text, text2, **kw)) self.wait() self.play(TransformMatchingShapes(text2, text3, **kw)) self.wait() class mohi(Scene): def construct(self): grid", "Text\") texttiny = Tex(\"{\\\\tiny tiny Texto 012.\\\\#!?} Text normal\") textHuge.to_edge(UP) texthuge.next_to(textHuge,DOWN,buff=0.1) textLARGE.next_to(texthuge,DOWN,buff=0.1) textLarge.next_to(textLARGE,DOWN,buff=0.1)", "\" musa\", \" hey\") text[0].to_edge(RIGHT) text[1].to_edge(DOWN) text[2].to_edge(LEFT) text[3].to_edge(UP) self.play(Write(text)) self.wait(3) class cp(Scene): def", "newc(Scene): def construct(self): text = Text(\"This is a regular text\") self.play(Write(text)) self.wait(3) class", "manimlib import * # from manimlib.imports import * import numpy as np text", "deff(Scene): def construct(self): text = TexText(\"\"\" This is a regular text, $\\\\displaystyle\\\\frac{x}{y}$, $$x^2+y^2=a^2$$", "a regular text\") self.play(Write(text)) self.wait(3) class typeOfText(Scene): def construct(self): tipes = TexText(\"\"\" This", ") self.play(Write(source)) self.wait() kw = {\"run_time\": 3, \"path_arc\": PI} self.play(TransformMatchingShapes(source, target, **kw)) self.wait()", "font =\"Arial\" , font_size = 44, text_color = RED ).set_color(RED) qoute2 = Text(\"there", "Huge Text 012.\\\\#!?} Text\") texthuge = Tex(\"{\\\\huge huge Text 012.\\\\#!?} Text\") textLARGE =", "ro(Scene): def construct(self): text.shift(UP) text.rotate(PI/4) self.play(ShowCreation(text)) self.wait() text.rotate(PI/4) self.wait() text.rotate(PI/4) self.wait() text.flip(DOWN) self.wait()", "Tex( \"\\\\frac{d}{dx}\", #0 \"(\", #1 \"u\", #2 \"+\", #3 \"v\", #4 \")\", #5", "3 ) self.wait() self.play( ReplacementTransform(formula[0].copy(),formula[7]), ReplacementTransform(formula[0].copy(),formula[10]), run_time=3 ) self.wait() class rtl2(Scene): def construct(self):", "formula.move_to(pos) formula.set_color_by_tex(sim,col) formula.set_color_by_tex(\"\\\\neg\",PINK) self.play(Write(formula1)) self.wait() changes =[ [(2,3,4),(1,2,4)], [(0,),(3,)], [(1,0),(0,)] ] for pre_ind,", "#4 \")\", #5 \"=\", #6 \"\\\\frac{d}{dx}\", #7 \"u\", #8 \"+\", #9 \"\\\\frac{d}{dx}\", #10", "= Tex(\"hello world\") ltext2 = Tex(\"hey man!\") class intro(Scene): def construct(self): text =", "Text(\"central text\") text.move_to(0.25*UP) self.play(Write(text),Write(text2)) self.wait(3) class cp2(Scene): def construct(self): text = Text(\"hello\") text2", "self.wait() kw = {\"run_time\": 3, \"path_arc\": PI} self.play(TransformMatchingShapes(source, target, **kw)) self.wait() self.play(TransformMatchingShapes(target, source1,", "\":\", #2 \"\\\\neg\", #3 \"P(x)\" #4 ) parametters = [(2,2*UP,formula1,GREEN,\"\\\\forall\"), (2,2*DOWN,formula2,ORANGE,\"\\\\exists\")] for size,pos,formula,col,sim", "\"x\", #1 \":\", #2 \"\\\\neg\", #3 \"P(x)\" #4 ) parametters = [(2,2*UP,formula1,GREEN,\"\\\\forall\"), (2,2*DOWN,formula2,ORANGE,\"\\\\exists\")]", "MOHI?\", height = 0.8) source.set_color(GREEN) source1.set_color(RED) self.play( ShowCreation(grid) ) self.play(Write(source)) self.wait() kw =", "self.wait(3) class cp2(Scene): def construct(self): text = Text(\"hello\") text2 = Text(\"how are you\")", "\"\\\\frac{d}{dx}\",#7 \"u\",#8 \"+\",#9 \"\\\\frac{d}{dx}\",#10 \"v\", font_size = 70 ) for letter, color in", "def construct(self): self.play(Write(text)) self.wait() self.play(ReplacementTransform(text,text2)) self.wait() class trl(Scene): def construct(self): formula = Tex(", "#1 \"u\", #2 \"+\", #3 \"v\", #4 \")\", #5 \"=\", #6 \"\\\\frac{d}{dx}\", #7", "regular text, $this is a formulas$, $$this is a formula$$ \"\"\") self.play(Write(tipes)) self.wait(3)", "TexText(\"Hi RAKIB!\") text3 = TexText(\"hey ANOY bro!\") self.play(TransformMatchingShapes(text, text2, **kw)) self.wait() self.play(TransformMatchingShapes(text2, text3,", "formulas$, $$this is a formula$$ \"\"\") self.play(Write(tipes)) self.wait(3) class deff(Scene): def construct(self): text", "TexText(\"Hello\", \"i'm\", \" musa\", \" hey\") text[0].to_edge(RIGHT) text[1].to_edge(DOWN) text[2].to_edge(LEFT) text[3].to_edge(UP) self.play(Write(text)) self.wait(3) class", "range(5): if i == 1: text[i].set_color(RED) else: text[i].set_color(GREEN) self.play(Write(text[i])) self.wait(1) kw = {\"run_time\":", "RED ).set_color(RED) qoute2 = Text(\"there is no permanent place in the world for", "self.play(Write(source)) self.wait() kw = {\"run_time\": 3, \"path_arc\": PI} self.play(TransformMatchingShapes(source, target, **kw)) self.wait() self.play(TransformMatchingShapes(target,", "formula.set_color_by_tex(\"\\\\neg\",PINK) self.play(Write(formula1)) self.wait() changes =[ [(2,3,4),(1,2,4)], [(0,),(3,)], [(1,0),(0,)] ] for pre_ind, post_ind in", "from manimlib.imports import * import numpy as np text = Text(\"hello\") text2 =", "\"+\", #9 \"\\\\frac{d}{dx}\", #10 \"v\" #11 , font_size=70) # formula VGroup(formula[0::2]).set_color(RED) VGroup(formula[1::2]).set_color(BLUE) self.play(Write(formula[0:7]))", "ReplacementTransform(formula[0].copy(),formula[10]) ) self.wait() class rtl3(Scene): def construct(self): formula1 = Tex( \"\\\\neg\", #0 \"\\\\forall\",", ", font_size = 44, text_color = RED ).set_color(RED) qoute2 = Text(\"there is no", "source1, **kw)) self.wait() class intf(Scene): def construct(self): q = TexText(\"Beauty\", \" is\", \"", "= TexText(\"hello\",\" OVI\",\" how\",\" are\",\" you?\") for i in range(5): if i ==", "#rotation class ro(Scene): def construct(self): text.shift(UP) text.rotate(PI/4) self.play(ShowCreation(text)) self.wait() text.rotate(PI/4) self.wait() text.rotate(PI/4) self.wait()", "= Tex(\"{\\\\large large Text 012.\\\\#!?} Text\") textNormal = Tex(\"{\\\\normalsize normal Text 012.\\\\#!?} Text\")", "source = Text(\"আমাএর What lies behind you and what lies in front of", "self.wait() self.play( ReplacementTransform(formula[0].copy(),formula[7]), ReplacementTransform(formula[0].copy(),formula[10]), run_time=3 ) self.wait() class rtl2(Scene): def construct(self): formula =", "= Tex(\"{\\\\footnotesize footnotesize Text 012.\\\\#!?} Text\") textscriptsize = Tex(\"{\\\\scriptsize scriptsize Text 012.\\\\#!?} Text\")", "$$x^2+y^2=a^2$$ \"\"\") self.play(Write(text)) self.wait(3) #position relative to scereen class tidp(Scene): def construct(self): text", "textscriptsize = Tex(\"{\\\\scriptsize scriptsize Text 012.\\\\#!?} Text\") texttiny = Tex(\"{\\\\tiny tiny Texto 012.\\\\#!?}", "self.play(FadeIn(e)) self.play(Write(q),run_time = 3) self.wait() # self.play(Write(qoute2),run_time = 3) # self.wait() class newc(Scene):", "textLARGE = Tex(\"{\\\\LARGE LARGE Text 012.\\\\#!?} Text\") textLarge = Tex(\"{\\\\Large Large Text 012.\\\\#!?}", "= Tex( \"\\\\exists\", #0 \"x\", #1 \":\", #2 \"\\\\neg\", #3 \"P(x)\" ) for", "= Text(\"আমাএর What lies behind you and what lies in front of you\",", "texttiny.next_to(textscriptsize,DOWN,buff=0.1) self.add(textHuge,texthuge,textLARGE,textLarge,textlarge,textNormal,textsmall,textfootnotesize,textscriptsize,texttiny) self.wait(3) #transform class tr(Scene): def construct(self): self.play(Write(text)) self.wait() self.play(ReplacementTransform(text,text2)) self.wait() class", "self.wait() class intf(Scene): def construct(self): q = TexText(\"Beauty\", \" is\", \" the\",\" first\",\"", "text2, **kw)) self.wait() self.play(TransformMatchingShapes(text2, text3, **kw)) self.wait() class mohi(Scene): def construct(self): grid =", "text2 = Text(\"how are you\") text3 = Text(\"who are you\") text2.move_to(3*DOWN+3*LEFT) self.play(Write(text),Write(text2)) self.wait()", "class rtl4(Scene): def construct(self): formula1 = Tex( \"\\\\neg\", #0 \"\\\\forall\", #1 \"x\", #2", "#latex class la(Scene): def construct(self): textHuge = Tex(\"{\\\\Huge Huge Text 012.\\\\#!?} Text\") texthuge", "TexText(\"hello\",\" OVI\",\" how\",\" are\",\" you?\") for i in range(5): if i == 1:", "Text(\"This is a regular text\") self.play(Write(text)) self.wait(3) class typeOfText(Scene): def construct(self): tipes =", "tipes = TexText(\"\"\" This is a regular text, $this is a formulas$, $$this", "text[1].to_edge(DOWN) text[2].to_edge(LEFT) text[3].to_edge(UP) self.play(Write(text)) self.wait(3) class cp(Scene): def construct(self): text = Text(\"text\") text2", "Text 012.\\\\#!?} Text\") texthuge = Tex(\"{\\\\huge huge Text 012.\\\\#!?} Text\") textLARGE = Tex(\"{\\\\LARGE", "\"(\", #1 \"u\", #2 \"+\", #3 \"v\", #4 \")\", #5 \"=\", #6 \"\\\\frac{d}{dx}\",", "place in the world for ugly mathematics\") a = (q[1]).next_to(q[0],UR) b = (q[2]).next_to(q[0],UL)", "q = TexText(\"Beauty\", \" is\", \" the\",\" first\",\" test\", font =\"Arial\" , font_size", "\"\\\\neg\", #3 \"P(x)\" #4 ) parametters = [(2,2*UP,formula1,GREEN,\"\\\\forall\"), (2,2*DOWN,formula2,ORANGE,\"\\\\exists\")] for size,pos,formula,col,sim in parametters:", ") self.wait() class rtl3(Scene): def construct(self): formula1 = Tex( \"\\\\neg\", #0 \"\\\\forall\", #1", "#2 \"+\", #3 \"v\", #4 \")\", #5 \"=\", #6 \"\\\\frac{d}{dx}\", #7 \"u\", #8", "rtl2(Scene): def construct(self): formula = Tex( \"\\\\frac{d}{dx}\", #0 \"(\",#1 \"u\",#2 \"+\",#3 \"v\",#4 \")\",#5", "class deff(Scene): def construct(self): text = TexText(\"\"\" This is a regular text, $\\\\displaystyle\\\\frac{x}{y}$,", "for i,j in zip(pre_ind,post_ind) ], run_time =2 ) self.wait() class rtl4(Scene): def construct(self):", "self.wait() self.play(TransformMatchingShapes(text2, text3, **kw)) self.wait() class mohi(Scene): def construct(self): grid = NumberPlane((-10, 10),", "regular text\") self.play(Write(text)) self.wait(3) class typeOfText(Scene): def construct(self): tipes = TexText(\"\"\" This is", "def construct(self): textHuge = Tex(\"{\\\\Huge Huge Text 012.\\\\#!?} Text\") texthuge = Tex(\"{\\\\huge huge", "text = TexText(\"Hello\", \"i'm\", \" musa\", \" hey\") text[0].to_edge(RIGHT) text[1].to_edge(DOWN) text[2].to_edge(LEFT) text[3].to_edge(UP) self.play(Write(text))", "Text(\"pales in comparison to what lies inside of you\", height=0.5) source1 = Text(\"What's", "tidp(Scene): def construct(self): text = TexText(\"Hello\", \"i'm\", \" musa\", \" hey\") text[0].to_edge(RIGHT) text[1].to_edge(DOWN)", "012.\\\\#!?} Text\") textLarge = Tex(\"{\\\\Large Large Text 012.\\\\#!?} Text\") textlarge = Tex(\"{\\\\large large", "text, $\\\\displaystyle\\\\frac{x}{y}$, $$x^2+y^2=a^2$$ \"\"\") self.play(Write(text)) self.wait(3) #position relative to scereen class tidp(Scene): def", "of you\", height=0.5) source1 = Text(\"What's your Openion, MOHI?\", height = 0.8) source.set_color(GREEN)", "textNormal.next_to(textlarge,DOWN,buff=0.1) textsmall.next_to(textNormal,DOWN,buff=0.1) textfootnotesize.next_to(textsmall,DOWN,buff=0.1) textscriptsize.next_to(textfootnotesize,DOWN,buff=0.1) texttiny.next_to(textscriptsize,DOWN,buff=0.1) self.add(textHuge,texthuge,textLARGE,textLarge,textlarge,textNormal,textsmall,textfootnotesize,textscriptsize,texttiny) self.wait(3) #transform class tr(Scene): def construct(self): self.play(Write(text))", "#1 \":\", #2 \"\\\\neg\", #3 \"P(x)\" ) for size,pos,formula in [(2,2*UP,formula1),(2,2*DOWN,formula2)]: formula.scale(size) formula.move_to(pos)", "small Text 012.\\\\#!?} Texto normal\") textfootnotesize = Tex(\"{\\\\footnotesize footnotesize Text 012.\\\\#!?} Text\") textscriptsize", "012.\\\\#!?} Text\") textlarge = Tex(\"{\\\\large large Text 012.\\\\#!?} Text\") textNormal = Tex(\"{\\\\normalsize normal", "in parametters: formula.scale(size) formula.move_to(pos) formula.set_color_by_tex(sim,col) formula.set_color_by_tex(\"\\\\neg\",PINK) self.play(Write(formula1)) self.wait() changes =[ [(2,3,4),(1,2,4)], [(0,),(3,)], [(1,0),(0,)]", "for i in range(5): if i == 1: text[i].set_color(RED) else: text[i].set_color(GREEN) self.play(Write(text[i])) self.wait(1)", "in changes: self.play(*[ ReplacementTransform(formula1[i].copy(),formula2[j]) for i,j in zip(pre_ind,post_ind) ], run_time =2 ) self.wait()", "= Tex(\"hey man!\") class intro(Scene): def construct(self): text = TexText(\"hello\",\" OVI\",\" how\",\" are\",\"", "self.wait(3) #position relative to scereen class tidp(Scene): def construct(self): text = TexText(\"Hello\", \"i'm\",", "text2 = TexText(\"Hi RAKIB!\") text3 = TexText(\"hey ANOY bro!\") self.play(TransformMatchingShapes(text, text2, **kw)) self.wait()", "012.\\\\#!?} Text\") textLARGE = Tex(\"{\\\\LARGE LARGE Text 012.\\\\#!?} Text\") textLarge = Tex(\"{\\\\Large Large", "for letter, color in [(\"u\",RED),(\"v\",BLUE)]: formula.set_color_by_tex(letter,color) self.play(Write(formula[0:7])) self.wait() self.play( ReplacementTransform(formula[2].copy(),formula[8]), ReplacementTransform(formula[4].copy(),formula[11]), ReplacementTransform(formula[3].copy(),formula[9]) )", "012.\\\\#!?} Text\") texthuge = Tex(\"{\\\\huge huge Text 012.\\\\#!?} Text\") textLARGE = Tex(\"{\\\\LARGE LARGE", "text.flip(DOWN) self.wait() #latex class la(Scene): def construct(self): textHuge = Tex(\"{\\\\Huge Huge Text 012.\\\\#!?}", "you\", height=0.4) target = Text(\"pales in comparison to what lies inside of you\",", "#3 \"P(x)\", #4 ) formula2 = Tex( \"\\\\exists\", #0 \"x\", #1 \":\", #2", "(3,0,1,2,4)], ] for pre_ind,post_ind in changes: self.play(*[ ReplacementTransform(formula1[i].copy(),formula2[j]) for i,j in zip(pre_ind,post_ind) ],", "self.play(TransformMatchingShapes(target, source1, **kw)) self.wait() class intf(Scene): def construct(self): q = TexText(\"Beauty\", \" is\",", "self.play( ReplacementTransform(formula[0].copy(),formula[7]), ReplacementTransform(formula[0].copy(),formula[10]) ) self.wait() class rtl3(Scene): def construct(self): formula1 = Tex( \"\\\\neg\",", ") parametters = [(2,2*UP,formula1,GREEN,\"\\\\forall\"), (2,2*DOWN,formula2,ORANGE,\"\\\\exists\")] for size,pos,formula,col,sim in parametters: formula.scale(size) formula.move_to(pos) formula.set_color_by_tex(sim,col) formula.set_color_by_tex(\"\\\\neg\",PINK)", "Text(\"who are you\") ltext = Tex(\"hello world\") ltext2 = Tex(\"hey man!\") class intro(Scene):", "= Tex(\"{\\\\LARGE LARGE Text 012.\\\\#!?} Text\") textLarge = Tex(\"{\\\\Large Large Text 012.\\\\#!?} Text\")", "# from manimlib.imports import * import numpy as np text = Text(\"hello\") text2", "= Text(\"text\") text2 = Text(\"central text\") text.move_to(0.25*UP) self.play(Write(text),Write(text2)) self.wait(3) class cp2(Scene): def construct(self):", "first\",\" test\", font =\"Arial\" , font_size = 44, text_color = RED ).set_color(RED) qoute2", "= TexText(\"Beauty\", \" is\", \" the\",\" first\",\" test\", font =\"Arial\" , font_size =", "\")\",#5 \"=\",#6 \"\\\\frac{d}{dx}\",#7 \"u\",#8 \"+\",#9 \"\\\\frac{d}{dx}\",#10 \"v\", font_size = 70 ) for letter,", "= Tex(\"{\\\\scriptsize scriptsize Text 012.\\\\#!?} Text\") texttiny = Tex(\"{\\\\tiny tiny Texto 012.\\\\#!?} Text", "your Openion, MOHI?\", height = 0.8) source.set_color(GREEN) source1.set_color(RED) self.play( ShowCreation(grid) ) self.play(Write(source)) self.wait()", "Text\") textNormal = Tex(\"{\\\\normalsize normal Text 012.\\\\#!?} Text\") textsmall = Tex(\"{\\\\small small Text", "Text 012.\\\\#!?} Text\") textlarge = Tex(\"{\\\\large large Text 012.\\\\#!?} Text\") textNormal = Tex(\"{\\\\normalsize", "formula1 = Tex( \"\\\\neg\", #0 \"\\\\forall\", #1 \"x\", #2 \":\", #3 \"P(x)\", #4", "\"\\\\frac{d}{dx}\", #0 \"(\", #1 \"u\", #2 \"+\", #3 \"v\", #4 \")\", #5 \"=\",", "textHuge = Tex(\"{\\\\Huge Huge Text 012.\\\\#!?} Text\") texthuge = Tex(\"{\\\\huge huge Text 012.\\\\#!?}", "TexText(\"hey ANOY bro!\") self.play(TransformMatchingShapes(text, text2, **kw)) self.wait() self.play(TransformMatchingShapes(text2, text3, **kw)) self.wait() class mohi(Scene):", "normal Text 012.\\\\#!?} Text\") textsmall = Tex(\"{\\\\small small Text 012.\\\\#!?} Texto normal\") textfootnotesize", "source1 = Text(\"What's your Openion, MOHI?\", height = 0.8) source.set_color(GREEN) source1.set_color(RED) self.play( ShowCreation(grid)", "text[3].to_edge(UP) self.play(Write(text)) self.wait(3) class cp(Scene): def construct(self): text = Text(\"text\") text2 = Text(\"central", "\"v\" #11 , font_size=70) # formula VGroup(formula[0::2]).set_color(RED) VGroup(formula[1::2]).set_color(BLUE) self.play(Write(formula[0:7])) self.wait() self.play( ReplacementTransform(formula[2].copy(),formula[8]), ReplacementTransform(formula[4].copy(),formula[11]),", "# self.wait() class newc(Scene): def construct(self): text = Text(\"This is a regular text\")", "= TexText(\"hey ANOY bro!\") self.play(TransformMatchingShapes(text, text2, **kw)) self.wait() self.play(TransformMatchingShapes(text2, text3, **kw)) self.wait() class", "3) self.wait() # self.play(Write(qoute2),run_time = 3) # self.wait() class newc(Scene): def construct(self): text", "\"P(x)\" ) for size,pos,formula in [(2,2*UP,formula1),(2,2*DOWN,formula2)]: formula.scale(size) formula.move_to(pos) self.play(Write(formula1)) self.wait() changes = [", "b = (q[2]).next_to(q[0],UL) c = (q[3]).next_to(q[0],DR) d = (q[4]).next_to(q[0],DL) e = VGroup(a,b,c,d) #", "class cp3(Scene): def construct(self): self.play(Write(text)) self.wait() text2.next_to(text, LEFT, buff=1) self.play(Write(text2)) self.wait() text.shift(UP*3) self.play(Write(text))", "self.wait() self.play(ReplacementTransform(text,text2)) self.wait() class trl(Scene): def construct(self): formula = Tex( \"\\\\frac{d}{dx}\", #0 \"(\",", "(2,2*DOWN,formula2,ORANGE,\"\\\\exists\")] for size,pos,formula,col,sim in parametters: formula.scale(size) formula.move_to(pos) formula.set_color_by_tex(sim,col) formula.set_color_by_tex(\"\\\\neg\",PINK) self.play(Write(formula1)) self.wait() changes =[", "(-5, 5)) source = Text(\"আমাএর What lies behind you and what lies in", "for pre_ind,post_ind in changes: self.play(*[ ReplacementTransform(formula1[i].copy(),formula2[j]) for i,j in zip(pre_ind,post_ind) ], run_time =2", "text.shift(UP) text.rotate(PI/4) self.play(ShowCreation(text)) self.wait() text.rotate(PI/4) self.wait() text.rotate(PI/4) self.wait() text.flip(DOWN) self.wait() #latex class la(Scene):", "NumberPlane((-10, 10), (-5, 5)) source = Text(\"আমাএর What lies behind you and what", "self.play(Write(text3)) self.wait() #relative position class cp3(Scene): def construct(self): self.play(Write(text)) self.wait() text2.next_to(text, LEFT, buff=1)", "Text 012.\\\\#!?} Texto normal\") textfootnotesize = Tex(\"{\\\\footnotesize footnotesize Text 012.\\\\#!?} Text\") textscriptsize =", "textscriptsize.next_to(textfootnotesize,DOWN,buff=0.1) texttiny.next_to(textscriptsize,DOWN,buff=0.1) self.add(textHuge,texthuge,textLARGE,textLarge,textlarge,textNormal,textsmall,textfootnotesize,textscriptsize,texttiny) self.wait(3) #transform class tr(Scene): def construct(self): self.play(Write(text)) self.wait() self.play(ReplacementTransform(text,text2)) self.wait()", "= Tex(\"{\\\\tiny tiny Texto 012.\\\\#!?} Text normal\") textHuge.to_edge(UP) texthuge.next_to(textHuge,DOWN,buff=0.1) textLARGE.next_to(texthuge,DOWN,buff=0.1) textLarge.next_to(textLARGE,DOWN,buff=0.1) textlarge.next_to(textLarge,DOWN,buff=0.1) textNormal.next_to(textlarge,DOWN,buff=0.1)", "self.wait() class trl(Scene): def construct(self): formula = Tex( \"\\\\frac{d}{dx}\", #0 \"(\", #1 \"u\",", "qoute2 = Text(\"there is no permanent place in the world for ugly mathematics\")", "intro(Scene): def construct(self): text = TexText(\"hello\",\" OVI\",\" how\",\" are\",\" you?\") for i in", "Tex( \"\\\\exists\", #0 \"x\", #1 \":\", #2 \"\\\\neg\", #3 \"P(x)\" ) for size,pos,formula", "0.8) source.set_color(GREEN) source1.set_color(RED) self.play( ShowCreation(grid) ) self.play(Write(source)) self.wait() kw = {\"run_time\": 3, \"path_arc\":", "if i == 1: text[i].set_color(RED) else: text[i].set_color(GREEN) self.play(Write(text[i])) self.wait(1) kw = {\"run_time\": 3,", "TexText(\"\"\" This is a regular text, $this is a formulas$, $$this is a", "\" the\",\" first\",\" test\", font =\"Arial\" , font_size = 44, text_color = RED", "(q[2]).next_to(q[0],UL) c = (q[3]).next_to(q[0],DR) d = (q[4]).next_to(q[0],DL) e = VGroup(a,b,c,d) # qoute2.next_to(qoute, DOWN)", "letter, color in [(\"u\",RED),(\"v\",BLUE)]: formula.set_color_by_tex(letter,color) self.play(Write(formula[0:7])) self.wait() self.play( ReplacementTransform(formula[2].copy(),formula[8]), ReplacementTransform(formula[4].copy(),formula[11]), ReplacementTransform(formula[3].copy(),formula[9]) ) self.wait()", "text\") self.play(Write(text)) self.wait(3) class typeOfText(Scene): def construct(self): tipes = TexText(\"\"\" This is a", "Tex(\"{\\\\huge huge Text 012.\\\\#!?} Text\") textLARGE = Tex(\"{\\\\LARGE LARGE Text 012.\\\\#!?} Text\") textLarge", "= NumberPlane((-10, 10), (-5, 5)) source = Text(\"আমাএর What lies behind you and", "# qoute2.next_to(qoute, DOWN) self.play(FadeIn(e)) self.play(Write(q),run_time = 3) self.wait() # self.play(Write(qoute2),run_time = 3) #", "\"v\",#4 \")\",#5 \"=\",#6 \"\\\\frac{d}{dx}\",#7 \"u\",#8 \"+\",#9 \"\\\\frac{d}{dx}\",#10 \"v\", font_size = 70 ) for", "= Tex(\"{\\\\Huge Huge Text 012.\\\\#!?} Text\") texthuge = Tex(\"{\\\\huge huge Text 012.\\\\#!?} Text\")", "in [(2,2*UP,formula1),(2,2*DOWN,formula2)]: formula.scale(size) formula.move_to(pos) self.play(Write(formula1)) self.wait() changes = [ [(0,1,2,3,4), (3,0,1,2,4)], ] for", "post_ind in changes: self.play(*[ ReplacementTransform(formula1[i].copy(),formula2[j]) for i,j in zip(pre_ind,post_ind) ], run_time =2 )", "{\"run_time\": 3, \"path_arc\": PI} self.play(TransformMatchingShapes(source, target, **kw)) self.wait() self.play(TransformMatchingShapes(target, source1, **kw)) self.wait() class", "What lies behind you and what lies in front of you\", height=0.4) target", "textNormal = Tex(\"{\\\\normalsize normal Text 012.\\\\#!?} Text\") textsmall = Tex(\"{\\\\small small Text 012.\\\\#!?}", "for pre_ind, post_ind in changes: self.play(*[ ReplacementTransform(formula1[i].copy(),formula2[j]) for i,j in zip(pre_ind,post_ind) ], run_time", "lies in front of you\", height=0.4) target = Text(\"pales in comparison to what", "text3 = Text(\"who are you\") ltext = Tex(\"hello world\") ltext2 = Tex(\"hey man!\")", "= RED ).set_color(RED) qoute2 = Text(\"there is no permanent place in the world", "height=0.4) target = Text(\"pales in comparison to what lies inside of you\", height=0.5)", "def construct(self): text = TexText(\"hello\",\" OVI\",\" how\",\" are\",\" you?\") for i in range(5):", "are\",\" you?\") for i in range(5): if i == 1: text[i].set_color(RED) else: text[i].set_color(GREEN)", "import * import numpy as np text = Text(\"hello\") text2 = Text(\"how are", ") for letter, color in [(\"u\",RED),(\"v\",BLUE)]: formula.set_color_by_tex(letter,color) self.play(Write(formula[0:7])) self.wait() self.play( ReplacementTransform(formula[2].copy(),formula[8]), ReplacementTransform(formula[4].copy(),formula[11]), ReplacementTransform(formula[3].copy(),formula[9])", "tiny Texto 012.\\\\#!?} Text normal\") textHuge.to_edge(UP) texthuge.next_to(textHuge,DOWN,buff=0.1) textLARGE.next_to(texthuge,DOWN,buff=0.1) textLarge.next_to(textLARGE,DOWN,buff=0.1) textlarge.next_to(textLarge,DOWN,buff=0.1) textNormal.next_to(textlarge,DOWN,buff=0.1) textsmall.next_to(textNormal,DOWN,buff=0.1) textfootnotesize.next_to(textsmall,DOWN,buff=0.1)", "font_size=70) # formula VGroup(formula[0::2]).set_color(RED) VGroup(formula[1::2]).set_color(BLUE) self.play(Write(formula[0:7])) self.wait() self.play( ReplacementTransform(formula[2].copy(),formula[8]), ReplacementTransform(formula[4].copy(),formula[11]), ReplacementTransform(formula[3].copy(),formula[9]), run_time =", "= Text(\"pales in comparison to what lies inside of you\", height=0.5) source1 =", "= 3) self.wait() # self.play(Write(qoute2),run_time = 3) # self.wait() class newc(Scene): def construct(self):", "#transform class tr(Scene): def construct(self): self.play(Write(text)) self.wait() self.play(ReplacementTransform(text,text2)) self.wait() class trl(Scene): def construct(self):", "012.\\\\#!?} Text\") textsmall = Tex(\"{\\\\small small Text 012.\\\\#!?} Texto normal\") textfootnotesize = Tex(\"{\\\\footnotesize", "self.play(Write(text)) self.wait(3) class typeOfText(Scene): def construct(self): tipes = TexText(\"\"\" This is a regular", "\"\\\\frac{d}{dx}\", #10 \"v\" #11 , font_size=70) # formula VGroup(formula[0::2]).set_color(RED) VGroup(formula[1::2]).set_color(BLUE) self.play(Write(formula[0:7])) self.wait() self.play(", "class typeOfText(Scene): def construct(self): tipes = TexText(\"\"\" This is a regular text, $this", "text2 = Text(\"how are you\") text3 = Text(\"who are you\") ltext = Tex(\"hello", "= Tex( \"\\\\neg\", #0 \"\\\\forall\", #1 \"x\", #2 \":\", #3 \"P(x)\", #4 )", "class intro(Scene): def construct(self): text = TexText(\"hello\",\" OVI\",\" how\",\" are\",\" you?\") for i", "text.rotate(PI/4) self.play(ShowCreation(text)) self.wait() text.rotate(PI/4) self.wait() text.rotate(PI/4) self.wait() text.flip(DOWN) self.wait() #latex class la(Scene): def", "construct(self): textHuge = Tex(\"{\\\\Huge Huge Text 012.\\\\#!?} Text\") texthuge = Tex(\"{\\\\huge huge Text", "= (q[1]).next_to(q[0],UR) b = (q[2]).next_to(q[0],UL) c = (q[3]).next_to(q[0],DR) d = (q[4]).next_to(q[0],DL) e =", "inside of you\", height=0.5) source1 = Text(\"What's your Openion, MOHI?\", height = 0.8)", "target = Text(\"pales in comparison to what lies inside of you\", height=0.5) source1", "in [(\"u\",RED),(\"v\",BLUE)]: formula.set_color_by_tex(letter,color) self.play(Write(formula[0:7])) self.wait() self.play( ReplacementTransform(formula[2].copy(),formula[8]), ReplacementTransform(formula[4].copy(),formula[11]), ReplacementTransform(formula[3].copy(),formula[9]) ) self.wait() self.play( ReplacementTransform(formula[0].copy(),formula[7]),", "self.play(Write(formula1)) self.wait() changes = [ [(0,1,2,3,4), (3,0,1,2,4)], ] for pre_ind,post_ind in changes: self.play(*[", "def construct(self): text = TexText(\"\"\" This is a regular text, $\\\\displaystyle\\\\frac{x}{y}$, $$x^2+y^2=a^2$$ \"\"\")", "Text\") textlarge = Tex(\"{\\\\large large Text 012.\\\\#!?} Text\") textNormal = Tex(\"{\\\\normalsize normal Text", "self.play(Write(text)) self.wait() self.play(ReplacementTransform(text,text2)) self.wait() class trl(Scene): def construct(self): formula = Tex( \"\\\\frac{d}{dx}\", #0", "self.wait() class rtl2(Scene): def construct(self): formula = Tex( \"\\\\frac{d}{dx}\", #0 \"(\",#1 \"u\",#2 \"+\",#3", "= (q[2]).next_to(q[0],UL) c = (q[3]).next_to(q[0],DR) d = (q[4]).next_to(q[0],DL) e = VGroup(a,b,c,d) # qoute2.next_to(qoute,", "= Text(\"there is no permanent place in the world for ugly mathematics\") a", "ugly mathematics\") a = (q[1]).next_to(q[0],UR) b = (q[2]).next_to(q[0],UL) c = (q[3]).next_to(q[0],DR) d =", "texttiny = Tex(\"{\\\\tiny tiny Texto 012.\\\\#!?} Text normal\") textHuge.to_edge(UP) texthuge.next_to(textHuge,DOWN,buff=0.1) textLARGE.next_to(texthuge,DOWN,buff=0.1) textLarge.next_to(textLARGE,DOWN,buff=0.1) textlarge.next_to(textLarge,DOWN,buff=0.1)", "cp3(Scene): def construct(self): self.play(Write(text)) self.wait() text2.next_to(text, LEFT, buff=1) self.play(Write(text2)) self.wait() text.shift(UP*3) self.play(Write(text)) self.wait()", "for ugly mathematics\") a = (q[1]).next_to(q[0],UR) b = (q[2]).next_to(q[0],UL) c = (q[3]).next_to(q[0],DR) d", "#11 , font_size=70) # formula VGroup(formula[0::2]).set_color(RED) VGroup(formula[1::2]).set_color(BLUE) self.play(Write(formula[0:7])) self.wait() self.play( ReplacementTransform(formula[2].copy(),formula[8]), ReplacementTransform(formula[4].copy(),formula[11]), ReplacementTransform(formula[3].copy(),formula[9]),", ") for size,pos,formula in [(2,2*UP,formula1),(2,2*DOWN,formula2)]: formula.scale(size) formula.move_to(pos) self.play(Write(formula1)) self.wait() changes = [ [(0,1,2,3,4),", "else: text[i].set_color(GREEN) self.play(Write(text[i])) self.wait(1) kw = {\"run_time\": 3, \"path_arc\": PI / 2} text2", "Text(\"text\") text2 = Text(\"central text\") text.move_to(0.25*UP) self.play(Write(text),Write(text2)) self.wait(3) class cp2(Scene): def construct(self): text", "1: text[i].set_color(RED) else: text[i].set_color(GREEN) self.play(Write(text[i])) self.wait(1) kw = {\"run_time\": 3, \"path_arc\": PI /", "construct(self): tipes = TexText(\"\"\" This is a regular text, $this is a formulas$,", "\"\\\\frac{d}{dx}\",#10 \"v\", font_size = 70 ) for letter, color in [(\"u\",RED),(\"v\",BLUE)]: formula.set_color_by_tex(letter,color) self.play(Write(formula[0:7]))", "\"v\", #4 \")\", #5 \"=\", #6 \"\\\\frac{d}{dx}\", #7 \"u\", #8 \"+\", #9 \"\\\\frac{d}{dx}\",", "self.wait() changes = [ [(0,1,2,3,4), (3,0,1,2,4)], ] for pre_ind,post_ind in changes: self.play(*[ ReplacementTransform(formula1[i].copy(),formula2[j])", "normal\") textHuge.to_edge(UP) texthuge.next_to(textHuge,DOWN,buff=0.1) textLARGE.next_to(texthuge,DOWN,buff=0.1) textLarge.next_to(textLARGE,DOWN,buff=0.1) textlarge.next_to(textLarge,DOWN,buff=0.1) textNormal.next_to(textlarge,DOWN,buff=0.1) textsmall.next_to(textNormal,DOWN,buff=0.1) textfootnotesize.next_to(textsmall,DOWN,buff=0.1) textscriptsize.next_to(textfootnotesize,DOWN,buff=0.1) texttiny.next_to(textscriptsize,DOWN,buff=0.1) self.add(textHuge,texthuge,textLARGE,textLarge,textlarge,textNormal,textsmall,textfootnotesize,textscriptsize,texttiny) self.wait(3)", "Tex( \"\\\\exists\", #0 \"x\", #1 \":\", #2 \"\\\\neg\", #3 \"P(x)\" #4 ) parametters", "text3, **kw)) self.wait() class mohi(Scene): def construct(self): grid = NumberPlane((-10, 10), (-5, 5))", "class trl(Scene): def construct(self): formula = Tex( \"\\\\frac{d}{dx}\", #0 \"(\", #1 \"u\", #2", "text.rotate(PI/4) self.wait() text.flip(DOWN) self.wait() #latex class la(Scene): def construct(self): textHuge = Tex(\"{\\\\Huge Huge", "[(\"u\",RED),(\"v\",BLUE)]: formula.set_color_by_tex(letter,color) self.play(Write(formula[0:7])) self.wait() self.play( ReplacementTransform(formula[2].copy(),formula[8]), ReplacementTransform(formula[4].copy(),formula[11]), ReplacementTransform(formula[3].copy(),formula[9]) ) self.wait() self.play( ReplacementTransform(formula[0].copy(),formula[7]), ReplacementTransform(formula[0].copy(),formula[10])", "self.play(Write(text)) self.wait() text2.next_to(text, LEFT, buff=1) self.play(Write(text2)) self.wait() text.shift(UP*3) self.play(Write(text)) self.wait() #rotation class ro(Scene):", "color in [(\"u\",RED),(\"v\",BLUE)]: formula.set_color_by_tex(letter,color) self.play(Write(formula[0:7])) self.wait() self.play( ReplacementTransform(formula[2].copy(),formula[8]), ReplacementTransform(formula[4].copy(),formula[11]), ReplacementTransform(formula[3].copy(),formula[9]) ) self.wait() self.play(", "= Text(\"how are you\") text3 = Text(\"who are you\") ltext = Tex(\"hello world\")", "you\") text3 = Text(\"who are you\") text2.move_to(3*DOWN+3*LEFT) self.play(Write(text),Write(text2)) self.wait() text3.move_to(1*UP+2*RIGHT) self.play(Write(text3)) self.wait() #relative", "self.play(Write(qoute2),run_time = 3) # self.wait() class newc(Scene): def construct(self): text = Text(\"This is", "= (q[4]).next_to(q[0],DL) e = VGroup(a,b,c,d) # qoute2.next_to(qoute, DOWN) self.play(FadeIn(e)) self.play(Write(q),run_time = 3) self.wait()", "\"\\\\forall\", #1 \"x\", #2 \":\", #3 \"P(x)\", #4 ) formula2 = Tex( \"\\\\exists\",", "np text = Text(\"hello\") text2 = Text(\"how are you\") text3 = Text(\"who are", "changes =[ [(2,3,4),(1,2,4)], [(0,),(3,)], [(1,0),(0,)] ] for pre_ind, post_ind in changes: self.play(*[ ReplacementTransform(formula1[i].copy(),formula2[j])", "as np text = Text(\"hello\") text2 = Text(\"how are you\") text3 = Text(\"who", "\"\\\\frac{d}{dx}\", #7 \"u\", #8 \"+\", #9 \"\\\\frac{d}{dx}\", #10 \"v\" #11 , font_size=70) #", "ReplacementTransform(formula[0].copy(),formula[7]), ReplacementTransform(formula[0].copy(),formula[10]) ) self.wait() class rtl3(Scene): def construct(self): formula1 = Tex( \"\\\\neg\", #0", "/ 2} text2 = TexText(\"Hi RAKIB!\") text3 = TexText(\"hey ANOY bro!\") self.play(TransformMatchingShapes(text, text2,", "] for pre_ind,post_ind in changes: self.play(*[ ReplacementTransform(formula1[i].copy(),formula2[j]) for i,j in zip(pre_ind,post_ind) ], run_time", "3, \"path_arc\": PI / 2} text2 = TexText(\"Hi RAKIB!\") text3 = TexText(\"hey ANOY", "lies behind you and what lies in front of you\", height=0.4) target =", "#2 \"\\\\neg\", #3 \"P(x)\" #4 ) parametters = [(2,2*UP,formula1,GREEN,\"\\\\forall\"), (2,2*DOWN,formula2,ORANGE,\"\\\\exists\")] for size,pos,formula,col,sim in", "self.play( ReplacementTransform(formula[0].copy(),formula[7]), ReplacementTransform(formula[0].copy(),formula[10]), run_time=3 ) self.wait() class rtl2(Scene): def construct(self): formula = Tex(", "font_size = 44, text_color = RED ).set_color(RED) qoute2 = Text(\"there is no permanent", "is\", \" the\",\" first\",\" test\", font =\"Arial\" , font_size = 44, text_color =", "zip(pre_ind,post_ind) ], run_time =2 ) self.wait() class rtl4(Scene): def construct(self): formula1 = Tex(", "self.play(Write(formula1)) self.wait() changes =[ [(2,3,4),(1,2,4)], [(0,),(3,)], [(1,0),(0,)] ] for pre_ind, post_ind in changes:", "is no permanent place in the world for ugly mathematics\") a = (q[1]).next_to(q[0],UR)", "construct(self): text = Text(\"text\") text2 = Text(\"central text\") text.move_to(0.25*UP) self.play(Write(text),Write(text2)) self.wait(3) class cp2(Scene):", "OVI\",\" how\",\" are\",\" you?\") for i in range(5): if i == 1: text[i].set_color(RED)", "self.play(Write(formula[0:7])) self.wait() self.play( ReplacementTransform(formula[2].copy(),formula[8]), ReplacementTransform(formula[4].copy(),formula[11]), ReplacementTransform(formula[3].copy(),formula[9]) ) self.wait() self.play( ReplacementTransform(formula[0].copy(),formula[7]), ReplacementTransform(formula[0].copy(),formula[10]) ) self.wait()", "= Text(\"hello\") text2 = Text(\"how are you\") text3 = Text(\"who are you\") ltext", "class tr(Scene): def construct(self): self.play(Write(text)) self.wait() self.play(ReplacementTransform(text,text2)) self.wait() class trl(Scene): def construct(self): formula", "\"P(x)\", #4 ) formula2 = Tex( \"\\\\exists\", #0 \"x\", #1 \":\", #2 \"\\\\neg\",", "#1 \":\", #2 \"\\\\neg\", #3 \"P(x)\" #4 ) parametters = [(2,2*UP,formula1,GREEN,\"\\\\forall\"), (2,2*DOWN,formula2,ORANGE,\"\\\\exists\")] for", "class tidp(Scene): def construct(self): text = TexText(\"Hello\", \"i'm\", \" musa\", \" hey\") text[0].to_edge(RIGHT)", "in front of you\", height=0.4) target = Text(\"pales in comparison to what lies", "Text(\"What's your Openion, MOHI?\", height = 0.8) source.set_color(GREEN) source1.set_color(RED) self.play( ShowCreation(grid) ) self.play(Write(source))", "class cp2(Scene): def construct(self): text = Text(\"hello\") text2 = Text(\"how are you\") text3", "def construct(self): text = TexText(\"Hello\", \"i'm\", \" musa\", \" hey\") text[0].to_edge(RIGHT) text[1].to_edge(DOWN) text[2].to_edge(LEFT)", "ReplacementTransform(formula[3].copy(),formula[9]), run_time = 3 ) self.wait() self.play( ReplacementTransform(formula[0].copy(),formula[7]), ReplacementTransform(formula[0].copy(),formula[10]), run_time=3 ) self.wait() class", "012.\\\\#!?} Texto normal\") textfootnotesize = Tex(\"{\\\\footnotesize footnotesize Text 012.\\\\#!?} Text\") textscriptsize = Tex(\"{\\\\scriptsize", "no permanent place in the world for ugly mathematics\") a = (q[1]).next_to(q[0],UR) b", "#3 \"P(x)\" #4 ) parametters = [(2,2*UP,formula1,GREEN,\"\\\\forall\"), (2,2*DOWN,formula2,ORANGE,\"\\\\exists\")] for size,pos,formula,col,sim in parametters: formula.scale(size)", "(q[3]).next_to(q[0],DR) d = (q[4]).next_to(q[0],DL) e = VGroup(a,b,c,d) # qoute2.next_to(qoute, DOWN) self.play(FadeIn(e)) self.play(Write(q),run_time =", "ReplacementTransform(formula[0].copy(),formula[7]), ReplacementTransform(formula[0].copy(),formula[10]), run_time=3 ) self.wait() class rtl2(Scene): def construct(self): formula = Tex( \"\\\\frac{d}{dx}\",", "70 ) for letter, color in [(\"u\",RED),(\"v\",BLUE)]: formula.set_color_by_tex(letter,color) self.play(Write(formula[0:7])) self.wait() self.play( ReplacementTransform(formula[2].copy(),formula[8]), ReplacementTransform(formula[4].copy(),formula[11]),", "changes: self.play(*[ ReplacementTransform(formula1[i].copy(),formula2[j]) for i,j in zip(pre_ind,post_ind) ], run_time =2 ) self.wait() class", "self.play(Write(text),Write(text2)) self.wait(3) class cp2(Scene): def construct(self): text = Text(\"hello\") text2 = Text(\"how are", "#6 \"\\\\frac{d}{dx}\", #7 \"u\", #8 \"+\", #9 \"\\\\frac{d}{dx}\", #10 \"v\" #11 , font_size=70)", "Openion, MOHI?\", height = 0.8) source.set_color(GREEN) source1.set_color(RED) self.play( ShowCreation(grid) ) self.play(Write(source)) self.wait() kw", "\"i'm\", \" musa\", \" hey\") text[0].to_edge(RIGHT) text[1].to_edge(DOWN) text[2].to_edge(LEFT) text[3].to_edge(UP) self.play(Write(text)) self.wait(3) class cp(Scene):", "text = Text(\"hello\") text2 = Text(\"how are you\") text3 = Text(\"who are you\")", "3) # self.wait() class newc(Scene): def construct(self): text = Text(\"This is a regular", "front of you\", height=0.4) target = Text(\"pales in comparison to what lies inside", "construct(self): text = TexText(\"hello\",\" OVI\",\" how\",\" are\",\" you?\") for i in range(5): if", ").set_color(RED) qoute2 = Text(\"there is no permanent place in the world for ugly", "construct(self): grid = NumberPlane((-10, 10), (-5, 5)) source = Text(\"আমাএর What lies behind", "= Tex( \"\\\\frac{d}{dx}\", #0 \"(\", #1 \"u\", #2 \"+\", #3 \"v\", #4 \")\",", "lies inside of you\", height=0.5) source1 = Text(\"What's your Openion, MOHI?\", height =", "c = (q[3]).next_to(q[0],DR) d = (q[4]).next_to(q[0],DL) e = VGroup(a,b,c,d) # qoute2.next_to(qoute, DOWN) self.play(FadeIn(e))", "class newc(Scene): def construct(self): text = Text(\"This is a regular text\") self.play(Write(text)) self.wait(3)", "Text\") textscriptsize = Tex(\"{\\\\scriptsize scriptsize Text 012.\\\\#!?} Text\") texttiny = Tex(\"{\\\\tiny tiny Texto", "a formulas$, $$this is a formula$$ \"\"\") self.play(Write(tipes)) self.wait(3) class deff(Scene): def construct(self):", "formula.set_color_by_tex(sim,col) formula.set_color_by_tex(\"\\\\neg\",PINK) self.play(Write(formula1)) self.wait() changes =[ [(2,3,4),(1,2,4)], [(0,),(3,)], [(1,0),(0,)] ] for pre_ind, post_ind", "construct(self): text.shift(UP) text.rotate(PI/4) self.play(ShowCreation(text)) self.wait() text.rotate(PI/4) self.wait() text.rotate(PI/4) self.wait() text.flip(DOWN) self.wait() #latex class", "self.play(ShowCreation(text)) self.wait() text.rotate(PI/4) self.wait() text.rotate(PI/4) self.wait() text.flip(DOWN) self.wait() #latex class la(Scene): def construct(self):", "#0 \"\\\\forall\", #1 \"x\", #2 \":\", #3 \"P(x)\", #4 ) formula2 = Tex(", "= Text(\"central text\") text.move_to(0.25*UP) self.play(Write(text),Write(text2)) self.wait(3) class cp2(Scene): def construct(self): text = Text(\"hello\")", "self.wait() #rotation class ro(Scene): def construct(self): text.shift(UP) text.rotate(PI/4) self.play(ShowCreation(text)) self.wait() text.rotate(PI/4) self.wait() text.rotate(PI/4)", "kw = {\"run_time\": 3, \"path_arc\": PI} self.play(TransformMatchingShapes(source, target, **kw)) self.wait() self.play(TransformMatchingShapes(target, source1, **kw))", "construct(self): q = TexText(\"Beauty\", \" is\", \" the\",\" first\",\" test\", font =\"Arial\" ,", "Text\") textLARGE = Tex(\"{\\\\LARGE LARGE Text 012.\\\\#!?} Text\") textLarge = Tex(\"{\\\\Large Large Text", "to what lies inside of you\", height=0.5) source1 = Text(\"What's your Openion, MOHI?\",", "grid = NumberPlane((-10, 10), (-5, 5)) source = Text(\"আমাএর What lies behind you", "formula.scale(size) formula.move_to(pos) formula.set_color_by_tex(sim,col) formula.set_color_by_tex(\"\\\\neg\",PINK) self.play(Write(formula1)) self.wait() changes =[ [(2,3,4),(1,2,4)], [(0,),(3,)], [(1,0),(0,)] ] for", "ReplacementTransform(formula[2].copy(),formula[8]), ReplacementTransform(formula[4].copy(),formula[11]), ReplacementTransform(formula[3].copy(),formula[9]) ) self.wait() self.play( ReplacementTransform(formula[0].copy(),formula[7]), ReplacementTransform(formula[0].copy(),formula[10]) ) self.wait() class rtl3(Scene): def", "construct(self): text = Text(\"hello\") text2 = Text(\"how are you\") text3 = Text(\"who are", "trl(Scene): def construct(self): formula = Tex( \"\\\\frac{d}{dx}\", #0 \"(\", #1 \"u\", #2 \"+\",", "#3 \"P(x)\" ) for size,pos,formula in [(2,2*UP,formula1),(2,2*DOWN,formula2)]: formula.scale(size) formula.move_to(pos) self.play(Write(formula1)) self.wait() changes =", "run_time=3 ) self.wait() class rtl2(Scene): def construct(self): formula = Tex( \"\\\\frac{d}{dx}\", #0 \"(\",#1", "text = TexText(\"hello\",\" OVI\",\" how\",\" are\",\" you?\") for i in range(5): if i", "self.wait() # self.play(Write(qoute2),run_time = 3) # self.wait() class newc(Scene): def construct(self): text =", "pre_ind, post_ind in changes: self.play(*[ ReplacementTransform(formula1[i].copy(),formula2[j]) for i,j in zip(pre_ind,post_ind) ], run_time =2", "self.play(TransformMatchingShapes(source, target, **kw)) self.wait() self.play(TransformMatchingShapes(target, source1, **kw)) self.wait() class intf(Scene): def construct(self): q", "qoute2.next_to(qoute, DOWN) self.play(FadeIn(e)) self.play(Write(q),run_time = 3) self.wait() # self.play(Write(qoute2),run_time = 3) # self.wait()", "= Text(\"This is a regular text\") self.play(Write(text)) self.wait(3) class typeOfText(Scene): def construct(self): tipes", "scereen class tidp(Scene): def construct(self): text = TexText(\"Hello\", \"i'm\", \" musa\", \" hey\")", "ReplacementTransform(formula[4].copy(),formula[11]), ReplacementTransform(formula[3].copy(),formula[9]) ) self.wait() self.play( ReplacementTransform(formula[0].copy(),formula[7]), ReplacementTransform(formula[0].copy(),formula[10]) ) self.wait() class rtl3(Scene): def construct(self):", "parametters: formula.scale(size) formula.move_to(pos) formula.set_color_by_tex(sim,col) formula.set_color_by_tex(\"\\\\neg\",PINK) self.play(Write(formula1)) self.wait() changes =[ [(2,3,4),(1,2,4)], [(0,),(3,)], [(1,0),(0,)] ]", "[(2,2*UP,formula1,GREEN,\"\\\\forall\"), (2,2*DOWN,formula2,ORANGE,\"\\\\exists\")] for size,pos,formula,col,sim in parametters: formula.scale(size) formula.move_to(pos) formula.set_color_by_tex(sim,col) formula.set_color_by_tex(\"\\\\neg\",PINK) self.play(Write(formula1)) self.wait() changes", "def construct(self): grid = NumberPlane((-10, 10), (-5, 5)) source = Text(\"আমাএর What lies", "text2.move_to(3*DOWN+3*LEFT) self.play(Write(text),Write(text2)) self.wait() text3.move_to(1*UP+2*RIGHT) self.play(Write(text3)) self.wait() #relative position class cp3(Scene): def construct(self): self.play(Write(text))", "= [ [(0,1,2,3,4), (3,0,1,2,4)], ] for pre_ind,post_ind in changes: self.play(*[ ReplacementTransform(formula1[i].copy(),formula2[j]) for i,j", "\"\\\\frac{d}{dx}\", #0 \"(\",#1 \"u\",#2 \"+\",#3 \"v\",#4 \")\",#5 \"=\",#6 \"\\\\frac{d}{dx}\",#7 \"u\",#8 \"+\",#9 \"\\\\frac{d}{dx}\",#10 \"v\",", "formula VGroup(formula[0::2]).set_color(RED) VGroup(formula[1::2]).set_color(BLUE) self.play(Write(formula[0:7])) self.wait() self.play( ReplacementTransform(formula[2].copy(),formula[8]), ReplacementTransform(formula[4].copy(),formula[11]), ReplacementTransform(formula[3].copy(),formula[9]), run_time = 3 )", "self.wait() text.rotate(PI/4) self.wait() text.flip(DOWN) self.wait() #latex class la(Scene): def construct(self): textHuge = Tex(\"{\\\\Huge", ") formula2 = Tex( \"\\\\exists\", #0 \"x\", #1 \":\", #2 \"\\\\neg\", #3 \"P(x)\"", "def construct(self): text.shift(UP) text.rotate(PI/4) self.play(ShowCreation(text)) self.wait() text.rotate(PI/4) self.wait() text.rotate(PI/4) self.wait() text.flip(DOWN) self.wait() #latex", "Tex( \"\\\\neg\", #0 \"\\\\forall\", #1 \"x\", #2 \":\", #3 \"P(x)\", #4 ) formula2", "3, \"path_arc\": PI} self.play(TransformMatchingShapes(source, target, **kw)) self.wait() self.play(TransformMatchingShapes(target, source1, **kw)) self.wait() class intf(Scene):", "#1 \"x\", #2 \":\", #3 \"P(x)\", #4 ) formula2 = Tex( \"\\\\exists\", #0", "in range(5): if i == 1: text[i].set_color(RED) else: text[i].set_color(GREEN) self.play(Write(text[i])) self.wait(1) kw =", ") self.wait() self.play( ReplacementTransform(formula[0].copy(),formula[7]), ReplacementTransform(formula[0].copy(),formula[10]), run_time=3 ) self.wait() class rtl2(Scene): def construct(self): formula", "self.wait() class newc(Scene): def construct(self): text = Text(\"This is a regular text\") self.play(Write(text))", "$$this is a formula$$ \"\"\") self.play(Write(tipes)) self.wait(3) class deff(Scene): def construct(self): text =", "cp2(Scene): def construct(self): text = Text(\"hello\") text2 = Text(\"how are you\") text3 =", "you and what lies in front of you\", height=0.4) target = Text(\"pales in", "are you\") text2.move_to(3*DOWN+3*LEFT) self.play(Write(text),Write(text2)) self.wait() text3.move_to(1*UP+2*RIGHT) self.play(Write(text3)) self.wait() #relative position class cp3(Scene): def", "Texto normal\") textfootnotesize = Tex(\"{\\\\footnotesize footnotesize Text 012.\\\\#!?} Text\") textscriptsize = Tex(\"{\\\\scriptsize scriptsize", "[ [(0,1,2,3,4), (3,0,1,2,4)], ] for pre_ind,post_ind in changes: self.play(*[ ReplacementTransform(formula1[i].copy(),formula2[j]) for i,j in", "textlarge = Tex(\"{\\\\large large Text 012.\\\\#!?} Text\") textNormal = Tex(\"{\\\\normalsize normal Text 012.\\\\#!?}", "height = 0.8) source.set_color(GREEN) source1.set_color(RED) self.play( ShowCreation(grid) ) self.play(Write(source)) self.wait() kw = {\"run_time\":", "#3 \"v\", #4 \")\", #5 \"=\", #6 \"\\\\frac{d}{dx}\", #7 \"u\", #8 \"+\", #9", "**kw)) self.wait() self.play(TransformMatchingShapes(text2, text3, **kw)) self.wait() class mohi(Scene): def construct(self): grid = NumberPlane((-10,", "#4 ) parametters = [(2,2*UP,formula1,GREEN,\"\\\\forall\"), (2,2*DOWN,formula2,ORANGE,\"\\\\exists\")] for size,pos,formula,col,sim in parametters: formula.scale(size) formula.move_to(pos) formula.set_color_by_tex(sim,col)", "size,pos,formula in [(2,2*UP,formula1),(2,2*DOWN,formula2)]: formula.scale(size) formula.move_to(pos) self.play(Write(formula1)) self.wait() changes = [ [(0,1,2,3,4), (3,0,1,2,4)], ]", "* # from manimlib.imports import * import numpy as np text = Text(\"hello\")", "TexText(\"\"\" This is a regular text, $\\\\displaystyle\\\\frac{x}{y}$, $$x^2+y^2=a^2$$ \"\"\") self.play(Write(text)) self.wait(3) #position relative", "\"\\\\neg\", #3 \"P(x)\" ) for size,pos,formula in [(2,2*UP,formula1),(2,2*DOWN,formula2)]: formula.scale(size) formula.move_to(pos) self.play(Write(formula1)) self.wait() changes", "rtl3(Scene): def construct(self): formula1 = Tex( \"\\\\neg\", #0 \"\\\\forall\", #1 \"x\", #2 \":\",", "def construct(self): text = Text(\"hello\") text2 = Text(\"how are you\") text3 = Text(\"who", "self.wait() text.flip(DOWN) self.wait() #latex class la(Scene): def construct(self): textHuge = Tex(\"{\\\\Huge Huge Text", "self.wait(3) class typeOfText(Scene): def construct(self): tipes = TexText(\"\"\" This is a regular text,", "self.add(textHuge,texthuge,textLARGE,textLarge,textlarge,textNormal,textsmall,textfootnotesize,textscriptsize,texttiny) self.wait(3) #transform class tr(Scene): def construct(self): self.play(Write(text)) self.wait() self.play(ReplacementTransform(text,text2)) self.wait() class trl(Scene):", "VGroup(formula[0::2]).set_color(RED) VGroup(formula[1::2]).set_color(BLUE) self.play(Write(formula[0:7])) self.wait() self.play( ReplacementTransform(formula[2].copy(),formula[8]), ReplacementTransform(formula[4].copy(),formula[11]), ReplacementTransform(formula[3].copy(),formula[9]), run_time = 3 ) self.wait()", "Text normal\") textHuge.to_edge(UP) texthuge.next_to(textHuge,DOWN,buff=0.1) textLARGE.next_to(texthuge,DOWN,buff=0.1) textLarge.next_to(textLARGE,DOWN,buff=0.1) textlarge.next_to(textLarge,DOWN,buff=0.1) textNormal.next_to(textlarge,DOWN,buff=0.1) textsmall.next_to(textNormal,DOWN,buff=0.1) textfootnotesize.next_to(textsmall,DOWN,buff=0.1) textscriptsize.next_to(textfootnotesize,DOWN,buff=0.1) texttiny.next_to(textscriptsize,DOWN,buff=0.1) self.add(textHuge,texthuge,textLARGE,textLarge,textlarge,textNormal,textsmall,textfootnotesize,textscriptsize,texttiny)", "Tex(\"{\\\\Huge Huge Text 012.\\\\#!?} Text\") texthuge = Tex(\"{\\\\huge huge Text 012.\\\\#!?} Text\") textLARGE", "44, text_color = RED ).set_color(RED) qoute2 = Text(\"there is no permanent place in", "self.play(Write(text2)) self.wait() text.shift(UP*3) self.play(Write(text)) self.wait() #rotation class ro(Scene): def construct(self): text.shift(UP) text.rotate(PI/4) self.play(ShowCreation(text))", "self.wait() text.rotate(PI/4) self.wait() text.rotate(PI/4) self.wait() text.flip(DOWN) self.wait() #latex class la(Scene): def construct(self): textHuge", "PI / 2} text2 = TexText(\"Hi RAKIB!\") text3 = TexText(\"hey ANOY bro!\") self.play(TransformMatchingShapes(text,", "what lies inside of you\", height=0.5) source1 = Text(\"What's your Openion, MOHI?\", height", "self.play(Write(formula[0:7])) self.wait() self.play( ReplacementTransform(formula[2].copy(),formula[8]), ReplacementTransform(formula[4].copy(),formula[11]), ReplacementTransform(formula[3].copy(),formula[9]), run_time = 3 ) self.wait() self.play( ReplacementTransform(formula[0].copy(),formula[7]),", "text3 = Text(\"who are you\") text2.move_to(3*DOWN+3*LEFT) self.play(Write(text),Write(text2)) self.wait() text3.move_to(1*UP+2*RIGHT) self.play(Write(text3)) self.wait() #relative position", "= Tex(\"{\\\\normalsize normal Text 012.\\\\#!?} Text\") textsmall = Tex(\"{\\\\small small Text 012.\\\\#!?} Texto", "\"+\",#9 \"\\\\frac{d}{dx}\",#10 \"v\", font_size = 70 ) for letter, color in [(\"u\",RED),(\"v\",BLUE)]: formula.set_color_by_tex(letter,color)", "relative to scereen class tidp(Scene): def construct(self): text = TexText(\"Hello\", \"i'm\", \" musa\",", "in comparison to what lies inside of you\", height=0.5) source1 = Text(\"What's your", "\"u\", #8 \"+\", #9 \"\\\\frac{d}{dx}\", #10 \"v\" #11 , font_size=70) # formula VGroup(formula[0::2]).set_color(RED)", "behind you and what lies in front of you\", height=0.4) target = Text(\"pales", "test\", font =\"Arial\" , font_size = 44, text_color = RED ).set_color(RED) qoute2 =", "formula.set_color_by_tex(letter,color) self.play(Write(formula[0:7])) self.wait() self.play( ReplacementTransform(formula[2].copy(),formula[8]), ReplacementTransform(formula[4].copy(),formula[11]), ReplacementTransform(formula[3].copy(),formula[9]) ) self.wait() self.play( ReplacementTransform(formula[0].copy(),formula[7]), ReplacementTransform(formula[0].copy(),formula[10]) )", "#8 \"+\", #9 \"\\\\frac{d}{dx}\", #10 \"v\" #11 , font_size=70) # formula VGroup(formula[0::2]).set_color(RED) VGroup(formula[1::2]).set_color(BLUE)", "self.play(TransformMatchingShapes(text2, text3, **kw)) self.wait() class mohi(Scene): def construct(self): grid = NumberPlane((-10, 10), (-5,", "construct(self): text = TexText(\"Hello\", \"i'm\", \" musa\", \" hey\") text[0].to_edge(RIGHT) text[1].to_edge(DOWN) text[2].to_edge(LEFT) text[3].to_edge(UP)", "pre_ind,post_ind in changes: self.play(*[ ReplacementTransform(formula1[i].copy(),formula2[j]) for i,j in zip(pre_ind,post_ind) ], run_time =2 )", "texthuge = Tex(\"{\\\\huge huge Text 012.\\\\#!?} Text\") textLARGE = Tex(\"{\\\\LARGE LARGE Text 012.\\\\#!?}", "Tex(\"{\\\\scriptsize scriptsize Text 012.\\\\#!?} Text\") texttiny = Tex(\"{\\\\tiny tiny Texto 012.\\\\#!?} Text normal\")", "Text(\"there is no permanent place in the world for ugly mathematics\") a =", "self.play(*[ ReplacementTransform(formula1[i].copy(),formula2[j]) for i,j in zip(pre_ind,post_ind) ], run_time =2 ) self.wait() class rtl4(Scene):", "a = (q[1]).next_to(q[0],UR) b = (q[2]).next_to(q[0],UL) c = (q[3]).next_to(q[0],DR) d = (q[4]).next_to(q[0],DL) e", "\"\\\\neg\", #0 \"\\\\forall\", #1 \"x\", #2 \":\", #3 \"P(x)\", #4 ) formula2 =", "This is a regular text, $this is a formulas$, $$this is a formula$$", "Text 012.\\\\#!?} Text\") textLarge = Tex(\"{\\\\Large Large Text 012.\\\\#!?} Text\") textlarge = Tex(\"{\\\\large", "textsmall.next_to(textNormal,DOWN,buff=0.1) textfootnotesize.next_to(textsmall,DOWN,buff=0.1) textscriptsize.next_to(textfootnotesize,DOWN,buff=0.1) texttiny.next_to(textscriptsize,DOWN,buff=0.1) self.add(textHuge,texthuge,textLARGE,textLarge,textlarge,textNormal,textsmall,textfootnotesize,textscriptsize,texttiny) self.wait(3) #transform class tr(Scene): def construct(self): self.play(Write(text)) self.wait()", "= Text(\"who are you\") text2.move_to(3*DOWN+3*LEFT) self.play(Write(text),Write(text2)) self.wait() text3.move_to(1*UP+2*RIGHT) self.play(Write(text3)) self.wait() #relative position class", "la(Scene): def construct(self): textHuge = Tex(\"{\\\\Huge Huge Text 012.\\\\#!?} Text\") texthuge = Tex(\"{\\\\huge", "bro!\") self.play(TransformMatchingShapes(text, text2, **kw)) self.wait() self.play(TransformMatchingShapes(text2, text3, **kw)) self.wait() class mohi(Scene): def construct(self):", "= VGroup(a,b,c,d) # qoute2.next_to(qoute, DOWN) self.play(FadeIn(e)) self.play(Write(q),run_time = 3) self.wait() # self.play(Write(qoute2),run_time =", "i in range(5): if i == 1: text[i].set_color(RED) else: text[i].set_color(GREEN) self.play(Write(text[i])) self.wait(1) kw", "def construct(self): text = Text(\"text\") text2 = Text(\"central text\") text.move_to(0.25*UP) self.play(Write(text),Write(text2)) self.wait(3) class", "self.wait() text3.move_to(1*UP+2*RIGHT) self.play(Write(text3)) self.wait() #relative position class cp3(Scene): def construct(self): self.play(Write(text)) self.wait() text2.next_to(text,", "are you\") text3 = Text(\"who are you\") text2.move_to(3*DOWN+3*LEFT) self.play(Write(text),Write(text2)) self.wait() text3.move_to(1*UP+2*RIGHT) self.play(Write(text3)) self.wait()", "\"u\",#2 \"+\",#3 \"v\",#4 \")\",#5 \"=\",#6 \"\\\\frac{d}{dx}\",#7 \"u\",#8 \"+\",#9 \"\\\\frac{d}{dx}\",#10 \"v\", font_size = 70", "\"u\",#8 \"+\",#9 \"\\\\frac{d}{dx}\",#10 \"v\", font_size = 70 ) for letter, color in [(\"u\",RED),(\"v\",BLUE)]:", "footnotesize Text 012.\\\\#!?} Text\") textscriptsize = Tex(\"{\\\\scriptsize scriptsize Text 012.\\\\#!?} Text\") texttiny =", "mathematics\") a = (q[1]).next_to(q[0],UR) b = (q[2]).next_to(q[0],UL) c = (q[3]).next_to(q[0],DR) d = (q[4]).next_to(q[0],DL)", "self.play(Write(text[i])) self.wait(1) kw = {\"run_time\": 3, \"path_arc\": PI / 2} text2 = TexText(\"Hi", "012.\\\\#!?} Text\") texttiny = Tex(\"{\\\\tiny tiny Texto 012.\\\\#!?} Text normal\") textHuge.to_edge(UP) texthuge.next_to(textHuge,DOWN,buff=0.1) textLARGE.next_to(texthuge,DOWN,buff=0.1)", "in zip(pre_ind,post_ind) ], run_time =2 ) self.wait() class rtl4(Scene): def construct(self): formula1 =", "#relative position class cp3(Scene): def construct(self): self.play(Write(text)) self.wait() text2.next_to(text, LEFT, buff=1) self.play(Write(text2)) self.wait()", "textlarge.next_to(textLarge,DOWN,buff=0.1) textNormal.next_to(textlarge,DOWN,buff=0.1) textsmall.next_to(textNormal,DOWN,buff=0.1) textfootnotesize.next_to(textsmall,DOWN,buff=0.1) textscriptsize.next_to(textfootnotesize,DOWN,buff=0.1) texttiny.next_to(textscriptsize,DOWN,buff=0.1) self.add(textHuge,texthuge,textLARGE,textLarge,textlarge,textNormal,textsmall,textfootnotesize,textscriptsize,texttiny) self.wait(3) #transform class tr(Scene): def construct(self):", "#0 \"x\", #1 \":\", #2 \"\\\\neg\", #3 \"P(x)\" #4 ) parametters = [(2,2*UP,formula1,GREEN,\"\\\\forall\"),", "comparison to what lies inside of you\", height=0.5) source1 = Text(\"What's your Openion,", "regular text, $\\\\displaystyle\\\\frac{x}{y}$, $$x^2+y^2=a^2$$ \"\"\") self.play(Write(text)) self.wait(3) #position relative to scereen class tidp(Scene):", "class rtl2(Scene): def construct(self): formula = Tex( \"\\\\frac{d}{dx}\", #0 \"(\",#1 \"u\",#2 \"+\",#3 \"v\",#4", "text = TexText(\"\"\" This is a regular text, $\\\\displaystyle\\\\frac{x}{y}$, $$x^2+y^2=a^2$$ \"\"\") self.play(Write(text)) self.wait(3)", "#10 \"v\" #11 , font_size=70) # formula VGroup(formula[0::2]).set_color(RED) VGroup(formula[1::2]).set_color(BLUE) self.play(Write(formula[0:7])) self.wait() self.play( ReplacementTransform(formula[2].copy(),formula[8]),", "Text 012.\\\\#!?} Text\") textLARGE = Tex(\"{\\\\LARGE LARGE Text 012.\\\\#!?} Text\") textLarge = Tex(\"{\\\\Large", "= Text(\"hello\") text2 = Text(\"how are you\") text3 = Text(\"who are you\") text2.move_to(3*DOWN+3*LEFT)", "= (q[3]).next_to(q[0],DR) d = (q[4]).next_to(q[0],DL) e = VGroup(a,b,c,d) # qoute2.next_to(qoute, DOWN) self.play(FadeIn(e)) self.play(Write(q),run_time", "text3 = TexText(\"hey ANOY bro!\") self.play(TransformMatchingShapes(text, text2, **kw)) self.wait() self.play(TransformMatchingShapes(text2, text3, **kw)) self.wait()", "#position relative to scereen class tidp(Scene): def construct(self): text = TexText(\"Hello\", \"i'm\", \"", "import numpy as np text = Text(\"hello\") text2 = Text(\"how are you\") text3", "run_time = 3 ) self.wait() self.play( ReplacementTransform(formula[0].copy(),formula[7]), ReplacementTransform(formula[0].copy(),formula[10]), run_time=3 ) self.wait() class rtl2(Scene):", "Text 012.\\\\#!?} Text\") texttiny = Tex(\"{\\\\tiny tiny Texto 012.\\\\#!?} Text normal\") textHuge.to_edge(UP) texthuge.next_to(textHuge,DOWN,buff=0.1)", "=2 ) self.wait() class rtl4(Scene): def construct(self): formula1 = Tex( \"\\\\neg\", #0 \"\\\\forall\",", "the world for ugly mathematics\") a = (q[1]).next_to(q[0],UR) b = (q[2]).next_to(q[0],UL) c =", "= TexText(\"\"\" This is a regular text, $this is a formulas$, $$this is", "= {\"run_time\": 3, \"path_arc\": PI / 2} text2 = TexText(\"Hi RAKIB!\") text3 =", "TexText(\"Beauty\", \" is\", \" the\",\" first\",\" test\", font =\"Arial\" , font_size = 44,", "= Tex( \"\\\\frac{d}{dx}\", #0 \"(\",#1 \"u\",#2 \"+\",#3 \"v\",#4 \")\",#5 \"=\",#6 \"\\\\frac{d}{dx}\",#7 \"u\",#8 \"+\",#9", "def construct(self): text = Text(\"This is a regular text\") self.play(Write(text)) self.wait(3) class typeOfText(Scene):", "is a regular text\") self.play(Write(text)) self.wait(3) class typeOfText(Scene): def construct(self): tipes = TexText(\"\"\"", "Large Text 012.\\\\#!?} Text\") textlarge = Tex(\"{\\\\large large Text 012.\\\\#!?} Text\") textNormal =", ") self.wait() class rtl4(Scene): def construct(self): formula1 = Tex( \"\\\\neg\", #0 \"\\\\forall\", #1", "textsmall = Tex(\"{\\\\small small Text 012.\\\\#!?} Texto normal\") textfootnotesize = Tex(\"{\\\\footnotesize footnotesize Text", "\"P(x)\" #4 ) parametters = [(2,2*UP,formula1,GREEN,\"\\\\forall\"), (2,2*DOWN,formula2,ORANGE,\"\\\\exists\")] for size,pos,formula,col,sim in parametters: formula.scale(size) formula.move_to(pos)", "textLarge = Tex(\"{\\\\Large Large Text 012.\\\\#!?} Text\") textlarge = Tex(\"{\\\\large large Text 012.\\\\#!?}", "text[i].set_color(RED) else: text[i].set_color(GREEN) self.play(Write(text[i])) self.wait(1) kw = {\"run_time\": 3, \"path_arc\": PI / 2}", "mohi(Scene): def construct(self): grid = NumberPlane((-10, 10), (-5, 5)) source = Text(\"আমাএর What", "= {\"run_time\": 3, \"path_arc\": PI} self.play(TransformMatchingShapes(source, target, **kw)) self.wait() self.play(TransformMatchingShapes(target, source1, **kw)) self.wait()", "Text(\"who are you\") text2.move_to(3*DOWN+3*LEFT) self.play(Write(text),Write(text2)) self.wait() text3.move_to(1*UP+2*RIGHT) self.play(Write(text3)) self.wait() #relative position class cp3(Scene):", "construct(self): self.play(Write(text)) self.wait() text2.next_to(text, LEFT, buff=1) self.play(Write(text2)) self.wait() text.shift(UP*3) self.play(Write(text)) self.wait() #rotation class", "Tex(\"{\\\\footnotesize footnotesize Text 012.\\\\#!?} Text\") textscriptsize = Tex(\"{\\\\scriptsize scriptsize Text 012.\\\\#!?} Text\") texttiny", "\"u\", #2 \"+\", #3 \"v\", #4 \")\", #5 \"=\", #6 \"\\\\frac{d}{dx}\", #7 \"u\",", "(q[1]).next_to(q[0],UR) b = (q[2]).next_to(q[0],UL) c = (q[3]).next_to(q[0],DR) d = (q[4]).next_to(q[0],DL) e = VGroup(a,b,c,d)", "\" hey\") text[0].to_edge(RIGHT) text[1].to_edge(DOWN) text[2].to_edge(LEFT) text[3].to_edge(UP) self.play(Write(text)) self.wait(3) class cp(Scene): def construct(self): text", "self.wait(1) kw = {\"run_time\": 3, \"path_arc\": PI / 2} text2 = TexText(\"Hi RAKIB!\")", "text = Text(\"text\") text2 = Text(\"central text\") text.move_to(0.25*UP) self.play(Write(text),Write(text2)) self.wait(3) class cp2(Scene): def", "construct(self): formula1 = Tex( \"\\\\neg\", #0 \"\\\\forall\", #1 \"x\", #2 \":\", #3 \"P(x)\",", "Text(\"how are you\") text3 = Text(\"who are you\") ltext = Tex(\"hello world\") ltext2", "RAKIB!\") text3 = TexText(\"hey ANOY bro!\") self.play(TransformMatchingShapes(text, text2, **kw)) self.wait() self.play(TransformMatchingShapes(text2, text3, **kw))", "the\",\" first\",\" test\", font =\"Arial\" , font_size = 44, text_color = RED ).set_color(RED)", "self.play(Write(text)) self.wait(3) #position relative to scereen class tidp(Scene): def construct(self): text = TexText(\"Hello\",", "size,pos,formula,col,sim in parametters: formula.scale(size) formula.move_to(pos) formula.set_color_by_tex(sim,col) formula.set_color_by_tex(\"\\\\neg\",PINK) self.play(Write(formula1)) self.wait() changes =[ [(2,3,4),(1,2,4)], [(0,),(3,)],", "LARGE Text 012.\\\\#!?} Text\") textLarge = Tex(\"{\\\\Large Large Text 012.\\\\#!?} Text\") textlarge =", "\"+\",#3 \"v\",#4 \")\",#5 \"=\",#6 \"\\\\frac{d}{dx}\",#7 \"u\",#8 \"+\",#9 \"\\\\frac{d}{dx}\",#10 \"v\", font_size = 70 )", "large Text 012.\\\\#!?} Text\") textNormal = Tex(\"{\\\\normalsize normal Text 012.\\\\#!?} Text\") textsmall =", "textLarge.next_to(textLARGE,DOWN,buff=0.1) textlarge.next_to(textLarge,DOWN,buff=0.1) textNormal.next_to(textlarge,DOWN,buff=0.1) textsmall.next_to(textNormal,DOWN,buff=0.1) textfootnotesize.next_to(textsmall,DOWN,buff=0.1) textscriptsize.next_to(textfootnotesize,DOWN,buff=0.1) texttiny.next_to(textscriptsize,DOWN,buff=0.1) self.add(textHuge,texthuge,textLARGE,textLarge,textlarge,textNormal,textsmall,textfootnotesize,textscriptsize,texttiny) self.wait(3) #transform class tr(Scene): def", "#0 \"(\", #1 \"u\", #2 \"+\", #3 \"v\", #4 \")\", #5 \"=\", #6", "world for ugly mathematics\") a = (q[1]).next_to(q[0],UR) b = (q[2]).next_to(q[0],UL) c = (q[3]).next_to(q[0],DR)", "is a regular text, $this is a formulas$, $$this is a formula$$ \"\"\")", "self.play(Write(text)) self.wait() #rotation class ro(Scene): def construct(self): text.shift(UP) text.rotate(PI/4) self.play(ShowCreation(text)) self.wait() text.rotate(PI/4) self.wait()", "**kw)) self.wait() class intf(Scene): def construct(self): q = TexText(\"Beauty\", \" is\", \" the\",\"", "you?\") for i in range(5): if i == 1: text[i].set_color(RED) else: text[i].set_color(GREEN) self.play(Write(text[i]))", "= TexText(\"Hello\", \"i'm\", \" musa\", \" hey\") text[0].to_edge(RIGHT) text[1].to_edge(DOWN) text[2].to_edge(LEFT) text[3].to_edge(UP) self.play(Write(text)) self.wait(3)", "class rtl3(Scene): def construct(self): formula1 = Tex( \"\\\\neg\", #0 \"\\\\forall\", #1 \"x\", #2", "#2 \"\\\\neg\", #3 \"P(x)\" ) for size,pos,formula in [(2,2*UP,formula1),(2,2*DOWN,formula2)]: formula.scale(size) formula.move_to(pos) self.play(Write(formula1)) self.wait()", "to scereen class tidp(Scene): def construct(self): text = TexText(\"Hello\", \"i'm\", \" musa\", \"", "\"v\", font_size = 70 ) for letter, color in [(\"u\",RED),(\"v\",BLUE)]: formula.set_color_by_tex(letter,color) self.play(Write(formula[0:7])) self.wait()", "font_size = 70 ) for letter, color in [(\"u\",RED),(\"v\",BLUE)]: formula.set_color_by_tex(letter,color) self.play(Write(formula[0:7])) self.wait() self.play(", "* import numpy as np text = Text(\"hello\") text2 = Text(\"how are you\")", "is a regular text, $\\\\displaystyle\\\\frac{x}{y}$, $$x^2+y^2=a^2$$ \"\"\") self.play(Write(text)) self.wait(3) #position relative to scereen", "2} text2 = TexText(\"Hi RAKIB!\") text3 = TexText(\"hey ANOY bro!\") self.play(TransformMatchingShapes(text, text2, **kw))", "#5 \"=\", #6 \"\\\\frac{d}{dx}\", #7 \"u\", #8 \"+\", #9 \"\\\\frac{d}{dx}\", #10 \"v\" #11", "def construct(self): formula = Tex( \"\\\\frac{d}{dx}\", #0 \"(\",#1 \"u\",#2 \"+\",#3 \"v\",#4 \")\",#5 \"=\",#6" ]
[ "{{if eq (len .Args) 1}} {{$r := joinStr \" \" $b.StringSlice}} {{$r}} {{else", "(eq $a \"gifss\" ) ( reFind $c $value ) ) -}} {{$s =", "0 }} {{if eq (len .Args) 1}} {{$r := joinStr \" \" $b.StringSlice}}", "\" names = [] for fileName in files: names.append(fileName.split('.')[0]) names = sorted(names) n", "'/stickersraw') st = \"{{$a := index .CmdArgs 0 }} \\n\" st += \"{{$b", "\" $b.StringSlice}} {{$r}} {{else if eq (len .Args) 2}} {{$c := index .CmdArgs", ":= str $r}} {{$r}} {{ deleteResponse 30 }} {{end}} {{end}} {{range $b}} {{-", "{{range $b}} {{- if eq . $a -}} {{- $link := joinStr \"\"", "names = sorted(names) n = \"\" for name in names: n += \"\\\"\"", "{{$r := joinStr \" \" $s.StringSlice}} {{$r := str $r}} {{$r}} {{ deleteResponse", "-}} {{- end}} {{$r := joinStr \" \" $s.StringSlice}} {{$r := str $r}}", "joinStr \" \" $s.StringSlice}} {{$r := str $r}} {{$r}} {{ deleteResponse 30 }}", "\" st += n st += \"\"\" }} {{if or (eq $a \"stickers\")", "{{$r := str $r}} {{$r}} {{ deleteResponse 30 }} {{end}} {{end}} {{range $b}}", "st = \"{{$a := index .CmdArgs 0 }} \\n\" st += \"{{$b :=", "{{range $index,$value := $b}} {{- if or (hasPrefix $value $c) ( and (eq", "+= \"\"\" }} {{if or (eq $a \"stickers\") (eq $a \"gifs\") (eq $a", ". $a -}} {{- $link := joinStr \"\" \"https://github.com/42ip/animatedStickersDB/blob/main/stickersraw/\" $a \".gif?raw=true\" -}} {{-", "\"gifss\") }} {{deleteTrigger 0 }} {{if eq (len .Args) 1}} {{$r := joinStr", "\" }} {{range $index,$value := $b}} {{- if or (hasPrefix $value $c) (", "+= \"\\\"\" + name + \"\\\" \" st += n st += \"\"\"", "(len .Args) 2}} {{$c := index .CmdArgs 1}} {{$s := cslice \" \"", "(hasPrefix $value $c) ( and (eq $a \"gifss\" ) ( reFind $c $value", "st += \"{{$b := cslice \" names = [] for fileName in files:", "$b}} {{- if eq . $a -}} {{- $link := joinStr \"\" \"https://github.com/42ip/animatedStickersDB/blob/main/stickersraw/\"", "$a \"stickers\") (eq $a \"gifs\") (eq $a \"gif\") (eq $a \"gifss\") }} {{deleteTrigger", "$link := joinStr \"\" \"https://github.com/42ip/animatedStickersDB/blob/main/stickersraw/\" $a \".gif?raw=true\" -}} {{- $link -}} {{- end", ".Args) 1}} {{$r := joinStr \" \" $b.StringSlice}} {{$r}} {{else if eq (len", ".CmdArgs 0 }} \\n\" st += \"{{$b := cslice \" names = []", "$a \".gif?raw=true\" -}} {{- $link -}} {{- end -}} {{- end}}\"\"\" with open(sys.path[0]", "{{- end -}} {{- end}}\"\"\" with open(sys.path[0] + \"/output.yag\", \"w\") as text_file: text_file.write(st)", "(eq $a \"gifs\") (eq $a \"gif\") (eq $a \"gifss\") }} {{deleteTrigger 0 }}", "}} {{if or (eq $a \"stickers\") (eq $a \"gifs\") (eq $a \"gif\") (eq", "{{ deleteResponse 30 }} {{end}} {{end}} {{range $b}} {{- if eq . $a", "}} {{range $index,$value := $b}} {{- if or (hasPrefix $value $c) ( and", "(eq $a \"stickers\") (eq $a \"gifs\") (eq $a \"gif\") (eq $a \"gifss\") }}", "{{deleteTrigger 0 }} {{if eq (len .Args) 1}} {{$r := joinStr \" \"", "{{- end -}} {{- end}} {{$r := joinStr \" \" $s.StringSlice}} {{$r :=", "= sorted(names) n = \"\" for name in names: n += \"\\\"\" +", "name + \"\\\" \" st += n st += \"\"\" }} {{if or", "{{- if eq . $a -}} {{- $link := joinStr \"\" \"https://github.com/42ip/animatedStickersDB/blob/main/stickersraw/\" $a", "names = [] for fileName in files: names.append(fileName.split('.')[0]) names = sorted(names) n =", "\"\" for name in names: n += \"\\\"\" + name + \"\\\" \"", "{{$s = $s.Append $value}} {{- end -}} {{- end}} {{$r := joinStr \"", "[] for fileName in files: names.append(fileName.split('.')[0]) names = sorted(names) n = \"\" for", "\"\\\" \" st += n st += \"\"\" }} {{if or (eq $a", "index .CmdArgs 1}} {{$s := cslice \" \" }} {{range $index,$value := $b}}", "+ \"\\\" \" st += n st += \"\"\" }} {{if or (eq", "\"\"\" }} {{if or (eq $a \"stickers\") (eq $a \"gifs\") (eq $a \"gif\")", "{{- $link -}} {{- end -}} {{- end}}\"\"\" with open(sys.path[0] + \"/output.yag\", \"w\")", "}} {{deleteTrigger 0 }} {{if eq (len .Args) 1}} {{$r := joinStr \"", "{{$r}} {{ deleteResponse 30 }} {{end}} {{end}} {{range $b}} {{- if eq .", "names: n += \"\\\"\" + name + \"\\\" \" st += n st", ":= index .CmdArgs 0 }} \\n\" st += \"{{$b := cslice \" names", "\"gifss\" ) ( reFind $c $value ) ) -}} {{$s = $s.Append $value}}", ") -}} {{$s = $s.Append $value}} {{- end -}} {{- end}} {{$r :=", "-}} {{- end -}} {{- end}}\"\"\" with open(sys.path[0] + \"/output.yag\", \"w\") as text_file:", "$index,$value := $b}} {{- if or (hasPrefix $value $c) ( and (eq $a", "{{$s := cslice \" \" }} {{range $index,$value := $b}} {{- if or", ") ( reFind $c $value ) ) -}} {{$s = $s.Append $value}} {{-", "-}} {{- $link := joinStr \"\" \"https://github.com/42ip/animatedStickersDB/blob/main/stickersraw/\" $a \".gif?raw=true\" -}} {{- $link -}}", ":= $b}} {{- if or (hasPrefix $value $c) ( and (eq $a \"gifss\"", "for fileName in files: names.append(fileName.split('.')[0]) names = sorted(names) n = \"\" for name", "1}} {{$s := cslice \" \" }} {{range $index,$value := $b}} {{- if", "$b.StringSlice}} {{$r}} {{else if eq (len .Args) 2}} {{$c := index .CmdArgs 1}}", "( and (eq $a \"gifss\" ) ( reFind $c $value ) ) -}}", "( reFind $c $value ) ) -}} {{$s = $s.Append $value}} {{- end", "name in names: n += \"\\\"\" + name + \"\\\" \" st +=", "= \"\" for name in names: n += \"\\\"\" + name + \"\\\"", "\" $s.StringSlice}} {{$r := str $r}} {{$r}} {{ deleteResponse 30 }} {{end}} {{end}}", "+= n st += \"\"\" }} {{if or (eq $a \"stickers\") (eq $a", "$value $c) ( and (eq $a \"gifss\" ) ( reFind $c $value )", "files = os.listdir(sys.path[0] + '/stickersraw') st = \"{{$a := index .CmdArgs 0 }}", "$a \"gifs\") (eq $a \"gif\") (eq $a \"gifss\") }} {{deleteTrigger 0 }} {{if", "$c $value ) ) -}} {{$s = $s.Append $value}} {{- end -}} {{-", ":= joinStr \" \" $b.StringSlice}} {{$r}} {{else if eq (len .Args) 2}} {{$c", ":= cslice \" \" }} {{range $index,$value := $b}} {{- if or (hasPrefix", ":= index .CmdArgs 1}} {{$s := cslice \" \" }} {{range $index,$value :=", "{{- $link := joinStr \"\" \"https://github.com/42ip/animatedStickersDB/blob/main/stickersraw/\" $a \".gif?raw=true\" -}} {{- $link -}} {{-", "n = \"\" for name in names: n += \"\\\"\" + name +", "$a -}} {{- $link := joinStr \"\" \"https://github.com/42ip/animatedStickersDB/blob/main/stickersraw/\" $a \".gif?raw=true\" -}} {{- $link", "= [] files = os.listdir(sys.path[0] + '/stickersraw') st = \"{{$a := index .CmdArgs", "if eq (len .Args) 2}} {{$c := index .CmdArgs 1}} {{$s := cslice", "-}} {{$s = $s.Append $value}} {{- end -}} {{- end}} {{$r := joinStr", "= [] for fileName in files: names.append(fileName.split('.')[0]) names = sorted(names) n = \"\"", "}} \\n\" st += \"{{$b := cslice \" names = [] for fileName", "fileName in files: names.append(fileName.split('.')[0]) names = sorted(names) n = \"\" for name in", "n st += \"\"\" }} {{if or (eq $a \"stickers\") (eq $a \"gifs\")", "str $r}} {{$r}} {{ deleteResponse 30 }} {{end}} {{end}} {{range $b}} {{- if", ".CmdArgs 1}} {{$s := cslice \" \" }} {{range $index,$value := $b}} {{-", "eq (len .Args) 1}} {{$r := joinStr \" \" $b.StringSlice}} {{$r}} {{else if", "st += n st += \"\"\" }} {{if or (eq $a \"stickers\") (eq", "= \"{{$a := index .CmdArgs 0 }} \\n\" st += \"{{$b := cslice", "30 }} {{end}} {{end}} {{range $b}} {{- if eq . $a -}} {{-", "cslice \" \" }} {{range $index,$value := $b}} {{- if or (hasPrefix $value", "+ '/stickersraw') st = \"{{$a := index .CmdArgs 0 }} \\n\" st +=", "\"{{$a := index .CmdArgs 0 }} \\n\" st += \"{{$b := cslice \"", "import sys,os arr = [] files = os.listdir(sys.path[0] + '/stickersraw') st = \"{{$a", "+ name + \"\\\" \" st += n st += \"\"\" }} {{if", "\" \" $b.StringSlice}} {{$r}} {{else if eq (len .Args) 2}} {{$c := index", "or (hasPrefix $value $c) ( and (eq $a \"gifss\" ) ( reFind $c", "\".gif?raw=true\" -}} {{- $link -}} {{- end -}} {{- end}}\"\"\" with open(sys.path[0] +", "= os.listdir(sys.path[0] + '/stickersraw') st = \"{{$a := index .CmdArgs 0 }} \\n\"", "{{$r := joinStr \" \" $b.StringSlice}} {{$r}} {{else if eq (len .Args) 2}}", "in files: names.append(fileName.split('.')[0]) names = sorted(names) n = \"\" for name in names:", "1}} {{$r := joinStr \" \" $b.StringSlice}} {{$r}} {{else if eq (len .Args)", "st += \"\"\" }} {{if or (eq $a \"stickers\") (eq $a \"gifs\") (eq", "eq (len .Args) 2}} {{$c := index .CmdArgs 1}} {{$s := cslice \"", "deleteResponse 30 }} {{end}} {{end}} {{range $b}} {{- if eq . $a -}}", "2}} {{$c := index .CmdArgs 1}} {{$s := cslice \" \" }} {{range", "-}} {{- $link -}} {{- end -}} {{- end}}\"\"\" with open(sys.path[0] + \"/output.yag\",", "\"stickers\") (eq $a \"gifs\") (eq $a \"gif\") (eq $a \"gifss\") }} {{deleteTrigger 0", "\"gifs\") (eq $a \"gif\") (eq $a \"gifss\") }} {{deleteTrigger 0 }} {{if eq", "reFind $c $value ) ) -}} {{$s = $s.Append $value}} {{- end -}}", "and (eq $a \"gifss\" ) ( reFind $c $value ) ) -}} {{$s", "$a \"gif\") (eq $a \"gifss\") }} {{deleteTrigger 0 }} {{if eq (len .Args)", "{{end}} {{range $b}} {{- if eq . $a -}} {{- $link := joinStr", "+= \"{{$b := cslice \" names = [] for fileName in files: names.append(fileName.split('.')[0])", "if eq . $a -}} {{- $link := joinStr \"\" \"https://github.com/42ip/animatedStickersDB/blob/main/stickersraw/\" $a \".gif?raw=true\"", "end -}} {{- end}} {{$r := joinStr \" \" $s.StringSlice}} {{$r := str", "names.append(fileName.split('.')[0]) names = sorted(names) n = \"\" for name in names: n +=", "}} {{if eq (len .Args) 1}} {{$r := joinStr \" \" $b.StringSlice}} {{$r}}", "{{if or (eq $a \"stickers\") (eq $a \"gifs\") (eq $a \"gif\") (eq $a", "\"\\\"\" + name + \"\\\" \" st += n st += \"\"\" }}", "$b}} {{- if or (hasPrefix $value $c) ( and (eq $a \"gifss\" )", ":= joinStr \" \" $s.StringSlice}} {{$r := str $r}} {{$r}} {{ deleteResponse 30", "$c) ( and (eq $a \"gifss\" ) ( reFind $c $value ) )", "sys,os arr = [] files = os.listdir(sys.path[0] + '/stickersraw') st = \"{{$a :=", "$a \"gifss\" ) ( reFind $c $value ) ) -}} {{$s = $s.Append", "{{- if or (hasPrefix $value $c) ( and (eq $a \"gifss\" ) (", ".Args) 2}} {{$c := index .CmdArgs 1}} {{$s := cslice \" \" }}", "$s.Append $value}} {{- end -}} {{- end}} {{$r := joinStr \" \" $s.StringSlice}}", "end}} {{$r := joinStr \" \" $s.StringSlice}} {{$r := str $r}} {{$r}} {{", "{{else if eq (len .Args) 2}} {{$c := index .CmdArgs 1}} {{$s :=", "or (eq $a \"stickers\") (eq $a \"gifs\") (eq $a \"gif\") (eq $a \"gifss\")", "\"\" \"https://github.com/42ip/animatedStickersDB/blob/main/stickersraw/\" $a \".gif?raw=true\" -}} {{- $link -}} {{- end -}} {{- end}}\"\"\"", "joinStr \" \" $b.StringSlice}} {{$r}} {{else if eq (len .Args) 2}} {{$c :=", "$s.StringSlice}} {{$r := str $r}} {{$r}} {{ deleteResponse 30 }} {{end}} {{end}} {{range", "[] files = os.listdir(sys.path[0] + '/stickersraw') st = \"{{$a := index .CmdArgs 0", "{{$c := index .CmdArgs 1}} {{$s := cslice \" \" }} {{range $index,$value", "$r}} {{$r}} {{ deleteResponse 30 }} {{end}} {{end}} {{range $b}} {{- if eq", "$value ) ) -}} {{$s = $s.Append $value}} {{- end -}} {{- end}}", "$link -}} {{- end -}} {{- end}}\"\"\" with open(sys.path[0] + \"/output.yag\", \"w\") as", "sorted(names) n = \"\" for name in names: n += \"\\\"\" + name", "\" \" $s.StringSlice}} {{$r := str $r}} {{$r}} {{ deleteResponse 30 }} {{end}}", "}} {{end}} {{end}} {{range $b}} {{- if eq . $a -}} {{- $link", "n += \"\\\"\" + name + \"\\\" \" st += n st +=", "os.listdir(sys.path[0] + '/stickersraw') st = \"{{$a := index .CmdArgs 0 }} \\n\" st", "\"{{$b := cslice \" names = [] for fileName in files: names.append(fileName.split('.')[0]) names", ") ) -}} {{$s = $s.Append $value}} {{- end -}} {{- end}} {{$r", "\"https://github.com/42ip/animatedStickersDB/blob/main/stickersraw/\" $a \".gif?raw=true\" -}} {{- $link -}} {{- end -}} {{- end}}\"\"\" with", "in names: n += \"\\\"\" + name + \"\\\" \" st += n", "arr = [] files = os.listdir(sys.path[0] + '/stickersraw') st = \"{{$a := index", ":= joinStr \"\" \"https://github.com/42ip/animatedStickersDB/blob/main/stickersraw/\" $a \".gif?raw=true\" -}} {{- $link -}} {{- end -}}", "{{end}} {{end}} {{range $b}} {{- if eq . $a -}} {{- $link :=", "\"gif\") (eq $a \"gifss\") }} {{deleteTrigger 0 }} {{if eq (len .Args) 1}}", "if or (hasPrefix $value $c) ( and (eq $a \"gifss\" ) ( reFind", "$a \"gifss\") }} {{deleteTrigger 0 }} {{if eq (len .Args) 1}} {{$r :=", "{{$r}} {{else if eq (len .Args) 2}} {{$c := index .CmdArgs 1}} {{$s", "$value}} {{- end -}} {{- end}} {{$r := joinStr \" \" $s.StringSlice}} {{$r", "index .CmdArgs 0 }} \\n\" st += \"{{$b := cslice \" names =", "{{- end}} {{$r := joinStr \" \" $s.StringSlice}} {{$r := str $r}} {{$r}}", "cslice \" names = [] for fileName in files: names.append(fileName.split('.')[0]) names = sorted(names)", "(eq $a \"gif\") (eq $a \"gifss\") }} {{deleteTrigger 0 }} {{if eq (len", "\" \" }} {{range $index,$value := $b}} {{- if or (hasPrefix $value $c)", "files: names.append(fileName.split('.')[0]) names = sorted(names) n = \"\" for name in names: n", "(eq $a \"gifss\") }} {{deleteTrigger 0 }} {{if eq (len .Args) 1}} {{$r", "\\n\" st += \"{{$b := cslice \" names = [] for fileName in", "= $s.Append $value}} {{- end -}} {{- end}} {{$r := joinStr \" \"", "joinStr \"\" \"https://github.com/42ip/animatedStickersDB/blob/main/stickersraw/\" $a \".gif?raw=true\" -}} {{- $link -}} {{- end -}} {{-", "(len .Args) 1}} {{$r := joinStr \" \" $b.StringSlice}} {{$r}} {{else if eq", ":= cslice \" names = [] for fileName in files: names.append(fileName.split('.')[0]) names =", "for name in names: n += \"\\\"\" + name + \"\\\" \" st", "0 }} \\n\" st += \"{{$b := cslice \" names = [] for", "eq . $a -}} {{- $link := joinStr \"\" \"https://github.com/42ip/animatedStickersDB/blob/main/stickersraw/\" $a \".gif?raw=true\" -}}" ]
[ "import matplotlib.pyplot as plt def blur_display(infile, nogui=False): # The first argument is the", "Show all 3 images cv2.imwrite(\"Original_Image.png\", image) cv2.imwrite(\"Gray_Image.png\", gray_image) cv2.imwrite(\"Blurred_Image.png\", blurred_image) cv2.waitKey(0) if __name__", "gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) #blur it blurred_image = cv2.GaussianBlur(image, (7,7), 0) if nogui:", "(7,7), 0) if nogui: cv2.imwrite('test_blurred.png', blurred_image) else: # Show all 3 images cv2.imwrite(\"Original_Image.png\",", "#conver to grayscale gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) #blur it blurred_image = cv2.GaussianBlur(image, (7,7),", "all 3 images cv2.imwrite(\"Original_Image.png\", image) cv2.imwrite(\"Gray_Image.png\", gray_image) cv2.imwrite(\"Blurred_Image.png\", blurred_image) cv2.waitKey(0) if __name__ ==", "blurred_image) else: # Show all 3 images cv2.imwrite(\"Original_Image.png\", image) cv2.imwrite(\"Gray_Image.png\", gray_image) cv2.imwrite(\"Blurred_Image.png\", blurred_image)", "else: # Show all 3 images cv2.imwrite(\"Original_Image.png\", image) cv2.imwrite(\"Gray_Image.png\", gray_image) cv2.imwrite(\"Blurred_Image.png\", blurred_image) cv2.waitKey(0)", "import sys import matplotlib.pyplot as plt def blur_display(infile, nogui=False): # The first argument", "= cv2.GaussianBlur(image, (7,7), 0) if nogui: cv2.imwrite('test_blurred.png', blurred_image) else: # Show all 3", "first argument is the image image = cv2.imread(infile) #conver to grayscale gray_image =", "cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) #blur it blurred_image = cv2.GaussianBlur(image, (7,7), 0) if nogui: cv2.imwrite('test_blurred.png', blurred_image)", "import cv2 import sys import matplotlib.pyplot as plt def blur_display(infile, nogui=False): # The", "cv2 import sys import matplotlib.pyplot as plt def blur_display(infile, nogui=False): # The first", "# The first argument is the image image = cv2.imread(infile) #conver to grayscale", "cv2.GaussianBlur(image, (7,7), 0) if nogui: cv2.imwrite('test_blurred.png', blurred_image) else: # Show all 3 images", "cv2.imwrite(\"Gray_Image.png\", gray_image) cv2.imwrite(\"Blurred_Image.png\", blurred_image) cv2.waitKey(0) if __name__ == \"__main__\": blur_display(sys.argv[1]) plt.savefig('output/Original_Image.png') plt.savefig('output/Gray_Image.png') plt.savefig('output/Blurred_Image.png')", "nogui=False): # The first argument is the image image = cv2.imread(infile) #conver to", "to grayscale gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) #blur it blurred_image = cv2.GaussianBlur(image, (7,7), 0)", "if nogui: cv2.imwrite('test_blurred.png', blurred_image) else: # Show all 3 images cv2.imwrite(\"Original_Image.png\", image) cv2.imwrite(\"Gray_Image.png\",", "image) cv2.imwrite(\"Gray_Image.png\", gray_image) cv2.imwrite(\"Blurred_Image.png\", blurred_image) cv2.waitKey(0) if __name__ == \"__main__\": blur_display(sys.argv[1]) plt.savefig('output/Original_Image.png') plt.savefig('output/Gray_Image.png')", "cv2.imwrite('test_blurred.png', blurred_image) else: # Show all 3 images cv2.imwrite(\"Original_Image.png\", image) cv2.imwrite(\"Gray_Image.png\", gray_image) cv2.imwrite(\"Blurred_Image.png\",", "images cv2.imwrite(\"Original_Image.png\", image) cv2.imwrite(\"Gray_Image.png\", gray_image) cv2.imwrite(\"Blurred_Image.png\", blurred_image) cv2.waitKey(0) if __name__ == \"__main__\": blur_display(sys.argv[1])", "3 images cv2.imwrite(\"Original_Image.png\", image) cv2.imwrite(\"Gray_Image.png\", gray_image) cv2.imwrite(\"Blurred_Image.png\", blurred_image) cv2.waitKey(0) if __name__ == \"__main__\":", "cv2.imwrite(\"Original_Image.png\", image) cv2.imwrite(\"Gray_Image.png\", gray_image) cv2.imwrite(\"Blurred_Image.png\", blurred_image) cv2.waitKey(0) if __name__ == \"__main__\": blur_display(sys.argv[1]) plt.savefig('output/Original_Image.png')", "image image = cv2.imread(infile) #conver to grayscale gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) #blur it", "cv2.imread(infile) #conver to grayscale gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) #blur it blurred_image = cv2.GaussianBlur(image,", "The first argument is the image image = cv2.imread(infile) #conver to grayscale gray_image", "as plt def blur_display(infile, nogui=False): # The first argument is the image image", "blurred_image = cv2.GaussianBlur(image, (7,7), 0) if nogui: cv2.imwrite('test_blurred.png', blurred_image) else: # Show all", "blur_display(infile, nogui=False): # The first argument is the image image = cv2.imread(infile) #conver", "= cv2.imread(infile) #conver to grayscale gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) #blur it blurred_image =", "nogui: cv2.imwrite('test_blurred.png', blurred_image) else: # Show all 3 images cv2.imwrite(\"Original_Image.png\", image) cv2.imwrite(\"Gray_Image.png\", gray_image)", "def blur_display(infile, nogui=False): # The first argument is the image image = cv2.imread(infile)", "grayscale gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) #blur it blurred_image = cv2.GaussianBlur(image, (7,7), 0) if", "argument is the image image = cv2.imread(infile) #conver to grayscale gray_image = cv2.cvtColor(image,", "matplotlib.pyplot as plt def blur_display(infile, nogui=False): # The first argument is the image", "#blur it blurred_image = cv2.GaussianBlur(image, (7,7), 0) if nogui: cv2.imwrite('test_blurred.png', blurred_image) else: #", "0) if nogui: cv2.imwrite('test_blurred.png', blurred_image) else: # Show all 3 images cv2.imwrite(\"Original_Image.png\", image)", "plt def blur_display(infile, nogui=False): # The first argument is the image image =", "is the image image = cv2.imread(infile) #conver to grayscale gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)", "= cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) #blur it blurred_image = cv2.GaussianBlur(image, (7,7), 0) if nogui: cv2.imwrite('test_blurred.png',", "# Show all 3 images cv2.imwrite(\"Original_Image.png\", image) cv2.imwrite(\"Gray_Image.png\", gray_image) cv2.imwrite(\"Blurred_Image.png\", blurred_image) cv2.waitKey(0) if", "image = cv2.imread(infile) #conver to grayscale gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) #blur it blurred_image", "it blurred_image = cv2.GaussianBlur(image, (7,7), 0) if nogui: cv2.imwrite('test_blurred.png', blurred_image) else: # Show", "sys import matplotlib.pyplot as plt def blur_display(infile, nogui=False): # The first argument is", "the image image = cv2.imread(infile) #conver to grayscale gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) #blur", "cv2.COLOR_BGR2GRAY) #blur it blurred_image = cv2.GaussianBlur(image, (7,7), 0) if nogui: cv2.imwrite('test_blurred.png', blurred_image) else:" ]
[ "the License. \"\"\" __license__ = \"Apache 2.0\" def fibonacci_wrong_way(n): x = 0 y", "x + y x = t print(l) def fibonacci_correct_way(n): x, y, l =", "the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in", "variables.\"\"\" __author__ = \"<NAME>\" __copyright__ = \"\"\" Copyright 2018 multiple_state_variables_bp.py Licensed under the", "CONDITIONS OF ANY KIND, either express or implied. See the License for the", "OR CONDITIONS OF ANY KIND, either express or implied. See the License for", "= \"<NAME>\" __copyright__ = \"\"\" Copyright 2018 multiple_state_variables_bp.py Licensed under the Apache License,", "OF ANY KIND, either express or implied. See the License for the specific", "to in writing, software distributed under the License is distributed on an \"AS", "= t print(l) def fibonacci_correct_way(n): x, y, l = 0, 1, [] for", "an example of best practicing with multiple state variables.\"\"\" __author__ = \"<NAME>\" __copyright__", "distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "__copyright__ = \"\"\" Copyright 2018 multiple_state_variables_bp.py Licensed under the Apache License, Version 2.0", "x = 0 y = 1 l = [] for i in range(n):", "2018 multiple_state_variables_bp.py Licensed under the Apache License, Version 2.0 (the \"License\"); you may", "not use this file except in compliance with the License. You may obtain", "License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required", "l = 0, 1, [] for i in range(n): l.append(x) x, y =", "governing permissions and limitations under the License. \"\"\" __license__ = \"Apache 2.0\" def", "except in compliance with the License. You may obtain a copy of the", "may not use this file except in compliance with the License. You may", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the", "[] for i in range(n): l.append(x) x, y = y, x + y", "under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR", "l = [] for i in range(n): l.append(x) t = y y =", "= \"Apache 2.0\" def fibonacci_wrong_way(n): x = 0 y = 1 l =", "__author__ = \"<NAME>\" __copyright__ = \"\"\" Copyright 2018 multiple_state_variables_bp.py Licensed under the Apache", "an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "x, y, l = 0, 1, [] for i in range(n): l.append(x) x,", "on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "print(l) def fibonacci_correct_way(n): x, y, l = 0, 1, [] for i in", "\"Apache 2.0\" def fibonacci_wrong_way(n): x = 0 y = 1 l = []", "of best practicing with multiple state variables.\"\"\" __author__ = \"<NAME>\" __copyright__ = \"\"\"", "obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law", "the License for the specific language governing permissions and limitations under the License.", "2.0\" def fibonacci_wrong_way(n): x = 0 y = 1 l = [] for", "+ y x = t print(l) def fibonacci_correct_way(n): x, y, l = 0,", "\"<NAME>\" __copyright__ = \"\"\" Copyright 2018 multiple_state_variables_bp.py Licensed under the Apache License, Version", "ANY KIND, either express or implied. See the License for the specific language", "l.append(x) t = y y = x + y x = t print(l)", "= y y = x + y x = t print(l) def fibonacci_correct_way(n):", "x + y print(l) # Entry point if __name__ == '__main__': fibonacci_wrong_way(100) fibonacci_correct_way(100)", "file except in compliance with the License. You may obtain a copy of", "in range(n): l.append(x) x, y = y, x + y print(l) # Entry", "\"\"\" Copyright 2018 multiple_state_variables_bp.py Licensed under the Apache License, Version 2.0 (the \"License\");", "License for the specific language governing permissions and limitations under the License. \"\"\"", "fibonacci_wrong_way(n): x = 0 y = 1 l = [] for i in", "y = y, x + y print(l) # Entry point if __name__ ==", "1, [] for i in range(n): l.append(x) x, y = y, x +", "Unless required by applicable law or agreed to in writing, software distributed under", "language governing permissions and limitations under the License. \"\"\" __license__ = \"Apache 2.0\"", "range(n): l.append(x) t = y y = x + y x = t", "0 y = 1 l = [] for i in range(n): l.append(x) t", "License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing,", "\"\"\" __license__ = \"Apache 2.0\" def fibonacci_wrong_way(n): x = 0 y = 1", "2.0 (the \"License\"); you may not use this file except in compliance with", "\"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "= [] for i in range(n): l.append(x) t = y y = x", "0, 1, [] for i in range(n): l.append(x) x, y = y, x", "See the License for the specific language governing permissions and limitations under the", "y = 1 l = [] for i in range(n): l.append(x) t =", "[] for i in range(n): l.append(x) t = y y = x +", "y = x + y x = t print(l) def fibonacci_correct_way(n): x, y,", "y, x + y print(l) # Entry point if __name__ == '__main__': fibonacci_wrong_way(100)", "t print(l) def fibonacci_correct_way(n): x, y, l = 0, 1, [] for i", "copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed", "example of best practicing with multiple state variables.\"\"\" __author__ = \"<NAME>\" __copyright__ =", "the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless", "\"\"\"multiple_state_variables_bp.py: Giving an example of best practicing with multiple state variables.\"\"\" __author__ =", "the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS", "License, Version 2.0 (the \"License\"); you may not use this file except in", "compliance with the License. You may obtain a copy of the License at", "(the \"License\"); you may not use this file except in compliance with the", "for the specific language governing permissions and limitations under the License. \"\"\" __license__", "this file except in compliance with the License. You may obtain a copy", "for i in range(n): l.append(x) t = y y = x + y", "\"License\"); you may not use this file except in compliance with the License.", "express or implied. See the License for the specific language governing permissions and", "for i in range(n): l.append(x) x, y = y, x + y print(l)", "is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY", "= x + y x = t print(l) def fibonacci_correct_way(n): x, y, l", "you may not use this file except in compliance with the License. You", "agreed to in writing, software distributed under the License is distributed on an", "= 0 y = 1 l = [] for i in range(n): l.append(x)", "x, y = y, x + y print(l) # Entry point if __name__", "distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES", "x = t print(l) def fibonacci_correct_way(n): x, y, l = 0, 1, []", "You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by", "may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable", "y x = t print(l) def fibonacci_correct_way(n): x, y, l = 0, 1,", "i in range(n): l.append(x) x, y = y, x + y print(l) #", "software distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT", "by applicable law or agreed to in writing, software distributed under the License", "applicable law or agreed to in writing, software distributed under the License is", "implied. See the License for the specific language governing permissions and limitations under", "Giving an example of best practicing with multiple state variables.\"\"\" __author__ = \"<NAME>\"", "under the License. \"\"\" __license__ = \"Apache 2.0\" def fibonacci_wrong_way(n): x = 0", "http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed", "Copyright 2018 multiple_state_variables_bp.py Licensed under the Apache License, Version 2.0 (the \"License\"); you", "= y, x + y print(l) # Entry point if __name__ == '__main__':", "fibonacci_correct_way(n): x, y, l = 0, 1, [] for i in range(n): l.append(x)", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License", "License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF", "i in range(n): l.append(x) t = y y = x + y x", "= 1 l = [] for i in range(n): l.append(x) t = y", "1 l = [] for i in range(n): l.append(x) t = y y", "law or agreed to in writing, software distributed under the License is distributed", "IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "def fibonacci_correct_way(n): x, y, l = 0, 1, [] for i in range(n):", "multiple_state_variables_bp.py Licensed under the Apache License, Version 2.0 (the \"License\"); you may not", "BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See", "<reponame>kallelzied/PythonTutoriel<filename>best_practice_examples/multiple_state_variables_bp.py \"\"\"multiple_state_variables_bp.py: Giving an example of best practicing with multiple state variables.\"\"\" __author__", "Version 2.0 (the \"License\"); you may not use this file except in compliance", "__license__ = \"Apache 2.0\" def fibonacci_wrong_way(n): x = 0 y = 1 l", "in compliance with the License. You may obtain a copy of the License", "and limitations under the License. \"\"\" __license__ = \"Apache 2.0\" def fibonacci_wrong_way(n): x", "def fibonacci_wrong_way(n): x = 0 y = 1 l = [] for i", "the Apache License, Version 2.0 (the \"License\"); you may not use this file", "use this file except in compliance with the License. You may obtain a", "KIND, either express or implied. See the License for the specific language governing", "of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to", "t = y y = x + y x = t print(l) def", "= \"\"\" Copyright 2018 multiple_state_variables_bp.py Licensed under the Apache License, Version 2.0 (the", "Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use", "in writing, software distributed under the License is distributed on an \"AS IS\"", "best practicing with multiple state variables.\"\"\" __author__ = \"<NAME>\" __copyright__ = \"\"\" Copyright", "under the Apache License, Version 2.0 (the \"License\"); you may not use this", "writing, software distributed under the License is distributed on an \"AS IS\" BASIS,", "a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or", "either express or implied. See the License for the specific language governing permissions", "multiple state variables.\"\"\" __author__ = \"<NAME>\" __copyright__ = \"\"\" Copyright 2018 multiple_state_variables_bp.py Licensed", "practicing with multiple state variables.\"\"\" __author__ = \"<NAME>\" __copyright__ = \"\"\" Copyright 2018", "or agreed to in writing, software distributed under the License is distributed on", "in range(n): l.append(x) t = y y = x + y x =", "the specific language governing permissions and limitations under the License. \"\"\" __license__ =", "License. \"\"\" __license__ = \"Apache 2.0\" def fibonacci_wrong_way(n): x = 0 y =", "range(n): l.append(x) x, y = y, x + y print(l) # Entry point", "state variables.\"\"\" __author__ = \"<NAME>\" __copyright__ = \"\"\" Copyright 2018 multiple_state_variables_bp.py Licensed under", "with multiple state variables.\"\"\" __author__ = \"<NAME>\" __copyright__ = \"\"\" Copyright 2018 multiple_state_variables_bp.py", "l.append(x) x, y = y, x + y print(l) # Entry point if", "permissions and limitations under the License. \"\"\" __license__ = \"Apache 2.0\" def fibonacci_wrong_way(n):", "y y = x + y x = t print(l) def fibonacci_correct_way(n): x,", "y, l = 0, 1, [] for i in range(n): l.append(x) x, y", "= 0, 1, [] for i in range(n): l.append(x) x, y = y,", "Apache License, Version 2.0 (the \"License\"); you may not use this file except", "or implied. See the License for the specific language governing permissions and limitations", "specific language governing permissions and limitations under the License. \"\"\" __license__ = \"Apache", "with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0", "limitations under the License. \"\"\" __license__ = \"Apache 2.0\" def fibonacci_wrong_way(n): x =", "required by applicable law or agreed to in writing, software distributed under the", "at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software" ]
[ "% __file__) # Parse CMD parser = argparse.ArgumentParser(prog=\"chromatindyn_wf\", description=\"Chromatin Dynamics workflow\") parser.add_argument(\"--config\", required=True,", "# print localhost import logging import re import pprint import multiprocessing #import psutil", "Run pipeline x_rnd = int(random.random()*10000000) outfiles = run_pipeline(args, num_cores, x_rnd) # Results prepare_results(args,", "extensions: files.extend(glob.glob(out_dir+\"/\"+extension)) tmp_dir = args.root_dir + \"/\" + args.config[\"arguments\"][\"project\"] out_tar = tmp_dir +", "= args.root_dir + \"/\" + args.config[\"arguments\"][\"project\"] pdb_file = tmp_dir + \"/chromdyn_str.pdb\" pdbMeta[\"file_path\"] =", "tmp_dir + \"/chromdyn_str.dcd\" trajMeta[\"file_path\"] = traj_file # Set source_id & taxon_id trajMeta[\"source_id\"] =", "in enumerate(args.metadata)) args.metadata = metadata_by_id logger.debug(\"VRE metadata for input_files is:\\n %s \" %", "else: inputs_by_name[name]=d[\"value\"] args.config[\"input_files\"] = inputs_by_name logger.debug(\"Configuration file arguments and input_files are:\\n %s \"", "= \"PDB_chromatin_starting_structure\" # Set file_path. Absolute path. Should be better relative to root_dir?", "is not a writeable dir\".format(d)) else: return f @staticmethod def process_arguments(args): global out_dir", "proj_idx = next(idx for (idx, d) in enumerate(args.config[\"arguments\"]) if d[\"name\"] == \"project\") out_dir", "proc_stdout = process.communicate()[0].strip() print proc_stdout if \"createTrajectory\" in args.config[\"arguments\"][\"operations\"]: tmp_dir = \"{0}/{1}/tra_{2}\".format(args.root_dir, args.config[\"arguments\"][\"project\"],", "in a existing directory path, or not accessible\".format(d)) else: if os.access(d, os.W_OK): return", "= pdb_file # Set source_id & taxon_id pdbMeta1[\"source_id\"] = [] if \"sequence\" in", "arguments by name arguments_by_name = dict((d[\"name\"], d[\"value\"]) for (index, d) in enumerate(args.config[\"arguments\"])) args.config[\"arguments\"]", "('*.txt','*.csv','*.png') out_dirs = [] if \"createStructure\" in args.config[\"arguments\"][\"operations\"]: out_dirs.append(\"{0}/{1}/str_{2}/output\".format(args.root_dir, args.config[\"arguments\"][\"project\"], x_rnd)) if \"createTrajectory\"", "is not a readable dir\".format(d)) @staticmethod def writeable_file(f): if not os.path.isfile(f): d =", "proc_stdout = process.communicate()[0].strip() print proc_stdout if \"createTrajectory\" in args.config[\"arguments\"][\"operations\"]: print \"do Trajectory\" sequence_file_id", "= subprocess.Popen(bashCommand,stdout=subprocess.PIPE, shell=True) proc_stdout = process.communicate()[0].strip() print proc_stdout bashCommand = \"cd /home/MuG/MuG_Chromatin_equ_structure/src_test; bash", "print proc_stdout if \"createTrajectory\" in args.config[\"arguments\"][\"operations\"]: tmp_dir = \"{0}/{1}/tra_{2}\".format(args.root_dir, args.config[\"arguments\"][\"project\"], x_rnd) bashCommand =", "logging.getLogger(\"lg\") # Indexing metadata files by file_id ([_id]) metadata_by_id = dict((d[\"_id\"], dict(d)) for", "(\"create3DfromNucleaR\" in args.config[\"arguments\"][\"operations\"]): ### PDB_chromatin_structure pdbMeta = {} # Set name # Should", "in args.config['input_files']: result[\"source_id\"].append(args.config['input_files'][\"sequence\"]) result[\"file_path\"] = out_tar result[\"taxon_id\"] = 0 json_data['output_files'].append(result) # Write down", "= process.communicate()[0].strip() print proc_stdout usr_dir = args.root_dir + \"/\" + args.config[\"arguments\"][\"project\"] bashCommand =", "args.config[\"arguments\"][\"operations\"]: print \"do Trajectory\" sequence_file_id = args.config[\"input_files\"][\"sequence\"] sequence = args.root_dir + \"/\" +", "in not a valid json file.\" % json_file) return data @staticmethod def readable_dir(d):", "output file metadata J = open(args.out_metadata, 'wb') json.dump(json_data,J, indent=4) J.close logger.info(\"Output files annotated", "pdb_file # Set source_id & taxon_id pdbMeta[\"source_id\"] = [] if \"sequence\" in args.config['input_files']:", "dict((d[\"_id\"], dict(d)) for (index, d) in enumerate(args.metadata)) args.metadata = metadata_by_id logger.debug(\"VRE metadata for", "args.metadata[file_id][\"taxon_id\"] break # Append output_file metadata into JSON data json_data['output_files'].append(pdbMeta) if \"createTrajectory\" in", "file (i.e the source_id) pdbMeta2[\"taxon_id\"] = 0 if pdbMeta2[\"source_id\"]: for file_id in pdbMeta2[\"source_id\"]:", "for file_id in pdbMeta2[\"source_id\"]: pprint.pprint(args.metadata) if args.metadata[file_id][\"taxon_id\"]: pdbMeta2[\"taxon_id\"] = args.metadata[file_id][\"taxon_id\"] break # Append", "# available memory import subprocess import shutil import glob import tarfile import subprocess", "data json_data['output_files'].append(pdbMeta) if \"createTrajectory\" in args.config[\"arguments\"][\"operations\"]: ### chromatin_starting_trajectory_structure pdbMeta1 = {} # Set", "args.root_dir + \"/\" + args.config[\"arguments\"][\"project\"] pdb_file = tmp_dir + \"/chromdyn_str.pdb\" pdbMeta[\"file_path\"] = pdb_file", "\"PDB_dummy_chromatin_structure\" # Set file_path. Absolute path. Should be better relative to root_dir? tmp_dir", "trajMeta[\"taxon_id\"] = 0 if trajMeta[\"source_id\"]: for file_id in trajMeta[\"source_id\"]: pprint.pprint(args.metadata) if args.metadata[file_id][\"taxon_id\"]: trajMeta[\"taxon_id\"]", "Should coincide with tool.json pdbMeta1[\"name\"] = \"PDB_chromatin_starting_structure\" # Set file_path. Absolute path. Should", "data json_data['output_files'].append(pdbMeta1) ### chromatin_dummy_trajectory_structure pdbMeta2 = {} # Set name. Should coincide with", "%s/output/chromdyn_str.dcd %s/output/chromdyn_dummy_str.pdb %s\" % (tmp_dir, tmp_dir, tmp_dir, usr_dir) print bashCommand process = subprocess.Popen(bashCommand,stdout=subprocess.PIPE,", "process.communicate()[0].strip() print proc_stdout bashCommand = \"cd /home/MuG/MuG_Chromatin_equ_structure/src_test; bash run.sh %s %s %s\" %", "args.out_metadata) # Delete temporary directory if \"createStructure\" in args.config[\"arguments\"][\"operations\"]: tmp_dir = \"{0}/{1}/str_{2}\".format(args.root_dir, args.config[\"arguments\"][\"project\"],", "coincide with tool.json pdbMeta[\"name\"] = \"PDB_chromatin_structure\" # Set file_path # Absolute path. Should", "\"PDB_chromatin_starting_structure\" # Set file_path. Absolute path. Should be better relative to root_dir? tmp_dir", "MuG_Chromatin_equ_structure and MuG_Chromatin_sampling software in a subprocess def run_pipeline(args, num_cores, x_rnd): sort =", "0 if pdbMeta[\"source_id\"]: for file_id in pdbMeta[\"source_id\"]: pprint.pprint(args.metadata) if args.metadata[file_id][\"taxon_id\"]: pdbMeta[\"taxon_id\"] = args.metadata[file_id][\"taxon_id\"]", "args.root_dir + \"/\" + args.config[\"arguments\"][\"project\"] pdb_file = tmp_dir + \"/chromdyn_start_str.pdb\" pdbMeta1[\"file_path\"] = pdb_file", "= tmp_dir + \"/results.tar.gz\" tar = tarfile.open(out_tar, \"w:gz\") for fil in files: logger.info", "process.communicate()[0].strip() print proc_stdout if \"createTrajectory\" in args.config[\"arguments\"][\"operations\"]: print \"do Trajectory\" sequence_file_id = args.config[\"input_files\"][\"sequence\"]", "data @staticmethod def readable_dir(d): if not os.path.isdir(d): raise Exception(\"readable_dir:{0} is not a directory", "[] if \"createStructure\" in args.config[\"arguments\"][\"operations\"]: out_dirs.append(\"{0}/{1}/str_{2}/output\".format(args.root_dir, args.config[\"arguments\"][\"project\"], x_rnd)) if \"createTrajectory\" in args.config[\"arguments\"][\"operations\"]: out_dirs.append(\"{0}/{1}/tra_{2}/output\".format(args.root_dir,", "args.metadata[file_id][\"taxon_id\"] break # Append output_file metadata into JSON data json_data['output_files'].append(pdbMeta1) ### chromatin_dummy_trajectory_structure pdbMeta2", "+ \"/\" + args.config[\"arguments\"][\"project\"] bashCommand = \"cp %s/output/chromdyn_str.pdb %s\" % (tmp_dir, usr_dir) print", "tmp_dir +\"/nucleR_to_3D_seq.txt\", tmp_dir) print bashCommand process = subprocess.Popen(bashCommand,stdout=subprocess.PIPE, shell=True) proc_stdout = process.communicate()[0].strip() print", "# Append output_file metadata into JSON data json_data['output_files'].append(pdbMeta1) ### chromatin_dummy_trajectory_structure pdbMeta2 = {}", "& taxon_id trajMeta[\"source_id\"] = [] if \"sequence\" in args.config['input_files']: trajMeta[\"source_id\"].append(args.config['input_files'][\"sequence\"]) # Set taxon_id.", "the user data directory.\") parser.add_argument(\"--public_dir\", required=False, type=Mugparams.readable_dir, metavar=\"PUBLIC_PATH\", help=\"Absolute path of the MuG", "= ('*.txt','*.csv','*.png') out_dirs = [] if \"createStructure\" in args.config[\"arguments\"][\"operations\"]: out_dirs.append(\"{0}/{1}/str_{2}/output\".format(args.root_dir, args.config[\"arguments\"][\"project\"], x_rnd)) if", "logging.getLogger(\"lg\") # Setting working directory (project) proj_idx = next(idx for (idx, d) in", "logger = logging.getLogger(\"lg\") # Indexing metadata files by file_id ([_id]) metadata_by_id = dict((d[\"_id\"],", "usr_dir = args.root_dir + \"/\" + args.config[\"arguments\"][\"project\"] bashCommand = \"cp %s/output/chromdyn_start_str.pdb %s/output/chromdyn_str.dcd %s/output/chromdyn_dummy_str.pdb", "metadata J = open(args.out_metadata, 'wb') json.dump(json_data,J, indent=4) J.close logger.info(\"Output files annotated into %s\"", "tool.json trajMeta[\"name\"] = \"chromatin_trajectory\" # Set file_path # Absolute path. Should be better", "down output file metadata J = open(args.out_metadata, 'wb') json.dump(json_data,J, indent=4) J.close logger.info(\"Output files", "import pprint import multiprocessing #import psutil # available memory import subprocess import shutil", "name. Should coincide with tool.json pdbMeta2[\"name\"] = \"PDB_dummy_chromatin_structure\" # Set file_path. Absolute path.", "print proc_stdout assembly = args.metadata[gff_file_id][\"meta_data\"][\"assembly\"] genome_file = \"{0}/refGenomes/{1}/{1}.fa\".format(args.public_dir,assembly) bashCommand = \" /home/MuG/MuG_Chromatin_equ_structure/src_test/nucleR2structure.py --calls", "pprint import multiprocessing #import psutil # available memory import subprocess import shutil import", "num_cores, x_rnd): sort = args.config[\"input_files\"][\"sequence\"] sequence = args.root_dir + \"/\" + args.metadata[sequence_file_id][\"file_path\"] nucl_pos_file_id", "%s %s %s %s\" % (nucl_pos, sequence, iterations, tmp_dir) print bashCommand process =", "logger.debug(\"VRE metadata for input_files is:\\n %s \" % pprint.pformat(args.metadata)) return 1 # #", "proc_stdout = process.communicate()[0].strip() print proc_stdout bashCommand = \"cd /home/MuG/MuG_Chromatin_equ_structure/src_test; bash run.sh %s %s", "or not accessible\".format(d)) else: if os.access(d, os.W_OK): return f else: raise Exception(\"writeable_file:{0} is", "streamhandler.setLevel(logging.INFO) streamhandler.setFormatter(formatter) logger.addHandler(streamhandler) logger.info('Starting %s' % __file__) # Parse CMD parser = argparse.ArgumentParser(prog=\"chromatindyn_wf\",", "is inherited from the input file (i.e the source_id) pdbMeta1[\"taxon_id\"] = 0 if", "process_metadata(args): global out_dir logger = logging.getLogger(\"lg\") # Indexing metadata files by file_id ([_id])", "subprocess.Popen(bashCommand,stdout=subprocess.PIPE, shell=True) proc_stdout = process.communicate()[0].strip() print proc_stdout if \"createTrajectory\" in args.config[\"arguments\"][\"operations\"]: print \"do", "def writeable_file(f): if not os.path.isfile(f): d = os.path.dirname(f) # TODO Fails if relative", "parameters\") parser.add_argument(\"--root_dir\", required=True, type=Mugparams.readable_dir, metavar=\"ABS_PATH\", help=\"Absolute path of the user data directory.\") parser.add_argument(\"--public_dir\",", "MuG public directory (with reference genome data, etc).\") parser.add_argument(\"--metadata\", required=True, type=Mugparams.check_json, metavar=\"METADATA_JSON\", help=\"JSON", "if d[\"name\"] == \"project\") out_dir = args.root_dir+\"/\"+args.config[\"arguments\"][proj_idx][\"value\"] logger.info(\"Output file directory set to %s\"", "temporary directory if \"createStructure\" in args.config[\"arguments\"][\"operations\"]: tmp_dir = \"{0}/{1}/str_{2}\".format(args.root_dir, args.config[\"arguments\"][\"project\"], x_rnd) bashCommand =", "Prepare last output file: TAR of outputs, *CSVs and *PNGs files = []", "\"{0}/{1}/str_{2}\".format(args.root_dir, args.config[\"arguments\"][\"project\"], x_rnd) bashCommand = \"cd /home/MuG/MuG_Chromatin_equ_structure/src_test; bash run.sh %s %s %s\" %", "action=\"store_true\", help=\"increase output verbosity\") parser.add_argument('--version', action='version', version='%(prog)s 0.1') args = parser.parse_args() if args.verbose:", "process.communicate()[0].strip() print proc_stdout assembly = args.metadata[gff_file_id][\"meta_data\"][\"assembly\"] genome_file = \"{0}/refGenomes/{1}/{1}.fa\".format(args.public_dir,assembly) bashCommand = \" /home/MuG/MuG_Chromatin_equ_structure/src_test/nucleR2structure.py", "enumerate(args.config[\"arguments\"])) args.config[\"arguments\"] = arguments_by_name # Indexing config input_files by name (name could not", "- %(module)s - %(levelname)s - %(message)s') handler = logging.FileHandler('%s.log' % os.path.splitext(os.path.basename(__file__))[0]) handler.setLevel(logging.INFO) handler.setFormatter(formatter)", "file (i.e the source_id) pdbMeta1[\"taxon_id\"] = 0 if pdbMeta1[\"source_id\"]: for file_id in pdbMeta1[\"source_id\"]:", "def process_metadata(args): global out_dir logger = logging.getLogger(\"lg\") # Indexing metadata files by file_id", "pdbMeta2[\"source_id\"]: pprint.pprint(args.metadata) if args.metadata[file_id][\"taxon_id\"]: pdbMeta2[\"taxon_id\"] = args.metadata[file_id][\"taxon_id\"] break # Append output_file metadata into", "process = subprocess.Popen(bashCommand,stdout=subprocess.PIPE, shell=True) proc_stdout = process.communicate()[0].strip() print proc_stdout if \"createTrajectory\" in args.config[\"arguments\"][\"operations\"]:", "run.sh %s %s %s\" % (tmp_dir +\"/nucleR_to_3D_nucl_pos.txt\", tmp_dir +\"/nucleR_to_3D_seq.txt\", tmp_dir) print bashCommand process", "import subprocess import random out_dir=\"\" class Mugparams(object): @staticmethod def check_json(json_file): logger = logging.getLogger(\"lg\")", "import socket # print localhost import logging import re import pprint import multiprocessing", "str): prev = inputs_by_name[name] inputs_by_name[name]= list() inputs_by_name[name].append(prev) inputs_by_name[name].append(d[\"value\"]) else: inputs_by_name[name]=d[\"value\"] args.config[\"input_files\"] = inputs_by_name", "os import sys import argparse import json import time import socket # print", "f @staticmethod def process_arguments(args): global out_dir logger = logging.getLogger(\"lg\") # Setting working directory", "== \"project\") out_dir = args.root_dir+\"/\"+args.config[\"arguments\"][proj_idx][\"value\"] logger.info(\"Output file directory set to %s\" % out_dir)", "= multiprocessing.cpu_count() host = socket.gethostname() #mem = psutil.virtual_memory() logger.debug('HOST=%s CPUs=%s MEM=x' %(host,num_cores)) #", "process.communicate()[0].strip() print proc_stdout usr_dir = args.root_dir + \"/\" + args.config[\"arguments\"][\"project\"] bashCommand = \"cp", "name (name could not be unique - because of allow_multiple) inputs_by_name = {}", "\"/\" + args.config[\"arguments\"][\"project\"] bashCommand = \"cp %s/output/chromdyn_start_str.pdb %s/output/chromdyn_str.dcd %s/output/chromdyn_dummy_str.pdb %s\" % (tmp_dir, tmp_dir,", "f else: raise Exception(\"writeable_file:{0} is not a writeable dir\".format(d)) else: return f @staticmethod", "= socket.gethostname() #mem = psutil.virtual_memory() logger.debug('HOST=%s CPUs=%s MEM=x' %(host,num_cores)) # Run pipeline x_rnd", "import random out_dir=\"\" class Mugparams(object): @staticmethod def check_json(json_file): logger = logging.getLogger(\"lg\") if not", "\"--verbose\", required=False, action=\"store_true\", help=\"increase output verbosity\") parser.add_argument('--version', action='version', version='%(prog)s 0.1') args = parser.parse_args()", "(index, d) in enumerate(args.metadata)) args.metadata = metadata_by_id logger.debug(\"VRE metadata for input_files is:\\n %s", "metadata required for TAR output file result = {} result[\"name\"] = \"summary\" result[\"source_id\"]", "if (args.out_metadata): # Create out_metadata JSON json_data = {} json_data['output_files']= [] if (\"createStructure\"", "sequence_file_id = args.config[\"input_files\"][\"sequence\"] sequence = args.root_dir + \"/\" + args.metadata[sequence_file_id][\"file_path\"] nucl_pos_file_id = args.config[\"input_files\"][\"nuclPos\"]", "MuG_Chromatin_sampling software in a subprocess def run_pipeline(args, num_cores, x_rnd): sort = args.config[\"input_files\"][\"sequence\"] sequence", "in pdbMeta2[\"source_id\"]: pprint.pprint(args.metadata) if args.metadata[file_id][\"taxon_id\"]: pdbMeta2[\"taxon_id\"] = args.metadata[file_id][\"taxon_id\"] break # Append output_file metadata", "usr_dir = args.root_dir + \"/\" + args.config[\"arguments\"][\"project\"] bashCommand = \"cp %s/output/chromdyn_str.pdb %s\" %", "if (\"createStructure\" in args.config[\"arguments\"][\"operations\"]) or (\"create3DfromNucleaR\" in args.config[\"arguments\"][\"operations\"]): ### PDB_chromatin_structure pdbMeta = {}", "shell=True) proc_stdout = process.communicate()[0].strip() print proc_stdout if \"create3DfromNucleaR\" in args.config[\"arguments\"][\"operations\"]: tmp_dir = \"{0}/{1}/str_{2}\".format(args.root_dir,", "= \" /home/MuG/MuG_Chromatin_equ_structure/src_test/nucleR2structure.py --calls %s --genome_file %s --range %s --seq_output %s --nucs_output %s", "with tool.json pdbMeta1[\"name\"] = \"PDB_chromatin_starting_structure\" # Set file_path. Absolute path. Should be better", "is inherited from the input file (i.e the source_id) trajMeta[\"taxon_id\"] = 0 if", "tmp_dir, usr_dir) print bashCommand process = subprocess.Popen(bashCommand,stdout=subprocess.PIPE, shell=True) proc_stdout = process.communicate()[0].strip() print proc_stdout", "arguments_by_name # Indexing config input_files by name (name could not be unique -", "inputs_by_name[name]= list() inputs_by_name[name].append(prev) inputs_by_name[name].append(d[\"value\"]) else: inputs_by_name[name]=d[\"value\"] args.config[\"input_files\"] = inputs_by_name logger.debug(\"Configuration file arguments and", "args.verbose: logger.setLevel(logging.DEBUG) handler.setLevel(logging.DEBUG) handler.setLevel(logging.DEBUG) logger.addHandler(handler) streamhandler.setLevel(logging.DEBUG) logger.addHandler(streamhandler) logger.debug(\"Verbose mode on\") # Parse config", "glob import tarfile import subprocess import random out_dir=\"\" class Mugparams(object): @staticmethod def check_json(json_file):", "--margin 4\" % (gff_file, genome_file, gen_reg, tmp_dir +\"/nucleR_to_3D_seq.txt\", tmp_dir +\"/nucleR_to_3D_nucl_pos.txt\") print bashCommand process", "args.config['input_files']: pdbMeta1[\"source_id\"].append(args.config['input_files'][\"sequence\"]) # Set taxon_id. taxon_id is inherited from the input file (i.e", "/home/MuG/MuG_Chromatin_equ_structure/src_test/nucleR2structure.py --calls %s --genome_file %s --range %s --seq_output %s --nucs_output %s --margin 4\"", "%s' % __file__) # Parse CMD parser = argparse.ArgumentParser(prog=\"chromatindyn_wf\", description=\"Chromatin Dynamics workflow\") parser.add_argument(\"--config\",", "= [] if \"sequence\" in args.config['input_files']: pdbMeta1[\"source_id\"].append(args.config['input_files'][\"sequence\"]) # Set taxon_id. taxon_id is inherited", "a readable dir\".format(d)) @staticmethod def writeable_file(f): if not os.path.isfile(f): d = os.path.dirname(f) #", "relative to root_dir? tmp_dir = args.root_dir + \"/\" + args.config[\"arguments\"][\"project\"] traj_file = tmp_dir", "args.config[\"arguments\"][\"operations\"]: tmp_dir = \"{0}/{1}/tra_{2}\".format(args.root_dir, args.config[\"arguments\"][\"project\"], x_rnd) bashCommand = \"rm -r %s\" % tmp_dir", "subprocess import shutil import glob import tarfile import subprocess import random out_dir=\"\" class", "os.path.splitext(os.path.basename(__file__))[0]) handler.setLevel(logging.INFO) handler.setFormatter(formatter) logger.addHandler(handler) streamhandler = logging.StreamHandler() streamhandler.setLevel(logging.INFO) streamhandler.setFormatter(formatter) logger.addHandler(streamhandler) logger.info('Starting %s' %", "if not os.path.isdir(d): raise Exception(\"writeable_file:{0} not in a existing directory path, or not", "enumerate(args.config[\"input_files\"]): name = args.config[\"input_files\"][index][\"name\"] if name in inputs_by_name: pprint.pprint(inputs_by_name[name]) if type(inputs_by_name[name] is str):", "and MuG_Chromatin_sampling software in a subprocess def run_pipeline(args, num_cores, x_rnd): sort = args.config[\"input_files\"][\"sequence\"]", "= \"rm -r %s\" % tmp_dir print bashCommand process = subprocess.Popen(bashCommand,stdout=subprocess.PIPE, shell=True) proc_stdout", "proc_stdout = process.communicate()[0].strip() print proc_stdout if \"create3DfromNucleaR\" in args.config[\"arguments\"][\"operations\"]: tmp_dir = \"{0}/{1}/str_{2}\".format(args.root_dir, args.config[\"arguments\"][\"project\"],", "tmp_dir = \"{0}/{1}/tra_{2}\".format(args.root_dir, args.config[\"arguments\"][\"project\"], x_rnd) iterations = args.config[\"arguments\"][\"createTrajectory:numStruct\"] bashCommand = \"cd /home/MuG/MuG_Chromatin_sampling/src_test; bash", "--range %s --seq_output %s --nucs_output %s --margin 4\" % (gff_file, genome_file, gen_reg, tmp_dir", "outfiles = run_pipeline(args, num_cores, x_rnd) # Results prepare_results(args, x_rnd) if __name__ == '__main__':", "streamhandler.setFormatter(formatter) logger.addHandler(streamhandler) logger.info('Starting %s' % __file__) # Parse CMD parser = argparse.ArgumentParser(prog=\"chromatindyn_wf\", description=\"Chromatin", "1 @staticmethod def process_metadata(args): global out_dir logger = logging.getLogger(\"lg\") # Indexing metadata files", "trajMeta[\"name\"] = \"chromatin_trajectory\" # Set file_path # Absolute path. Should be better relative", "os.path.isfile(f): d = os.path.dirname(f) # TODO Fails if relative path given if not", "args.config['input_files']: trajMeta[\"source_id\"].append(args.config['input_files'][\"sequence\"]) # Set taxon_id. taxon_id is inherited from the input file (i.e", "next(idx for (idx, d) in enumerate(args.config[\"arguments\"]) if d[\"name\"] == \"project\") out_dir = args.root_dir+\"/\"+args.config[\"arguments\"][proj_idx][\"value\"]", "proc_stdout if \"createTrajectory\" in args.config[\"arguments\"][\"operations\"]: tmp_dir = \"{0}/{1}/tra_{2}\".format(args.root_dir, args.config[\"arguments\"][\"project\"], x_rnd) bashCommand = \"rm", "is inherited from the input file (i.e the source_id) pdbMeta[\"taxon_id\"] = 0 if", "help=\"Absolute path of the user data directory.\") parser.add_argument(\"--public_dir\", required=False, type=Mugparams.readable_dir, metavar=\"PUBLIC_PATH\", help=\"Absolute path", "directory path or is not accessible\".format(d)) if os.access(d, os.R_OK): return d else: raise", "into %s\" % args.out_metadata) # Delete temporary directory if \"createStructure\" in args.config[\"arguments\"][\"operations\"]: tmp_dir", "parser.add_argument('--version', action='version', version='%(prog)s 0.1') args = parser.parse_args() if args.verbose: logger.setLevel(logging.DEBUG) handler.setLevel(logging.DEBUG) handler.setLevel(logging.DEBUG) logger.addHandler(handler)", "taxon_id is inherited from the input file (i.e the source_id) pdbMeta2[\"taxon_id\"] = 0", "usr_dir) print bashCommand process = subprocess.Popen(bashCommand,stdout=subprocess.PIPE, shell=True) proc_stdout = process.communicate()[0].strip() print proc_stdout if", "json_file) return data @staticmethod def readable_dir(d): if not os.path.isdir(d): raise Exception(\"readable_dir:{0} is not", "to root_dir? tmp_dir = args.root_dir + \"/\" + args.config[\"arguments\"][\"project\"] traj_file = tmp_dir +", "name in inputs_by_name: pprint.pprint(inputs_by_name[name]) if type(inputs_by_name[name] is str): prev = inputs_by_name[name] inputs_by_name[name]= list()", "not accessible\".format(d)) if os.access(d, os.R_OK): return d else: raise Exception(\"readable_dir:{0} is not a", "are:\\n %s \" % pprint.pformat(args.config)) return 1 @staticmethod def process_metadata(args): global out_dir logger", "sequence, iterations, tmp_dir) print bashCommand process = subprocess.Popen(bashCommand,stdout=subprocess.PIPE, shell=True) proc_stdout = process.communicate()[0].strip() print", "Executing pipeline # Calling MuG_Chromatin_equ_structure and MuG_Chromatin_sampling software in a subprocess def run_pipeline(args,", "dict((d[\"name\"], d[\"value\"]) for (index, d) in enumerate(args.config[\"arguments\"])) args.config[\"arguments\"] = arguments_by_name # Indexing config", "not accessible\".format(d)) else: if os.access(d, os.W_OK): return f else: raise Exception(\"writeable_file:{0} is not", "global out_dir logger = logging.getLogger(\"lg\") # Indexing metadata files by file_id ([_id]) metadata_by_id", "'wb') json.dump(json_data,J, indent=4) J.close logger.info(\"Output files annotated into %s\" % args.out_metadata) # Delete", "+ \"/\" + args.config[\"arguments\"][\"project\"] out_tar = tmp_dir + \"/results.tar.gz\" tar = tarfile.open(out_tar, \"w:gz\")", "def main(): # Start logging logger = logging.getLogger(\"lg\") logger.setLevel(logging.INFO) formatter = logging.Formatter(fmt='%(asctime)s -", "if pdbMeta1[\"source_id\"]: for file_id in pdbMeta1[\"source_id\"]: pprint.pprint(args.metadata) if args.metadata[file_id][\"taxon_id\"]: pdbMeta1[\"taxon_id\"] = args.metadata[file_id][\"taxon_id\"] break", "file containing results metadata\") parser.add_argument(\"-v\", \"--verbose\", required=False, action=\"store_true\", help=\"increase output verbosity\") parser.add_argument('--version', action='version',", "\"summary\" result[\"source_id\"] = [] if \"sequence\" in args.config['input_files']: result[\"source_id\"].append(args.config['input_files'][\"sequence\"]) result[\"file_path\"] = out_tar result[\"taxon_id\"]", "pprint.pprint(args.metadata) if args.metadata[file_id][\"taxon_id\"]: pdbMeta1[\"taxon_id\"] = args.metadata[file_id][\"taxon_id\"] break # Append output_file metadata into JSON", "import json import time import socket # print localhost import logging import re", "(\"createStructure\" in args.config[\"arguments\"][\"operations\"]) or (\"create3DfromNucleaR\" in args.config[\"arguments\"][\"operations\"]): ### PDB_chromatin_structure pdbMeta = {} #", "metadata for the output files def prepare_results(args, x_rnd): global out_dir logger = logging.getLogger(\"lg\")", "data json_data['output_files'].append(pdbMeta2) ### chromatin_trajectory trajMeta = {} # Set name # Should coincide", "os.path.exists(json_file): raise argparse.ArgumentTypeError(\"%s does not exist\" % json_file) with open(json_file,'r') as file_data: try:", "input_files are:\\n %s \" % pprint.pformat(args.config)) return 1 @staticmethod def process_metadata(args): global out_dir", "%s --nucs_output %s --margin 4\" % (gff_file, genome_file, gen_reg, tmp_dir +\"/nucleR_to_3D_seq.txt\", tmp_dir +\"/nucleR_to_3D_nucl_pos.txt\")", "= subprocess.Popen(bashCommand,stdout=subprocess.PIPE, shell=True) proc_stdout = process.communicate()[0].strip() print proc_stdout usr_dir = args.root_dir + \"/\"", "source_id & taxon_id trajMeta[\"source_id\"] = [] if \"sequence\" in args.config['input_files']: trajMeta[\"source_id\"].append(args.config['input_files'][\"sequence\"]) # Set", "+ \"/\" + args.metadata[nucl_pos_file_id][\"file_path\"] tmp_dir = \"{0}/{1}/tra_{2}\".format(args.root_dir, args.config[\"arguments\"][\"project\"], x_rnd) iterations = args.config[\"arguments\"][\"createTrajectory:numStruct\"] bashCommand", "output_file metadata into JSON data json_data['output_files'].append(pdbMeta) if \"createTrajectory\" in args.config[\"arguments\"][\"operations\"]: ### chromatin_starting_trajectory_structure pdbMeta1", "+ args.metadata[nucl_pos_file_id][\"file_path\"] tmp_dir = \"{0}/{1}/tra_{2}\".format(args.root_dir, args.config[\"arguments\"][\"project\"], x_rnd) iterations = args.config[\"arguments\"][\"createTrajectory:numStruct\"] bashCommand = \"cd", "(with reference genome data, etc).\") parser.add_argument(\"--metadata\", required=True, type=Mugparams.check_json, metavar=\"METADATA_JSON\", help=\"JSON file containing MuG", "MEM=x' %(host,num_cores)) # Run pipeline x_rnd = int(random.random()*10000000) outfiles = run_pipeline(args, num_cores, x_rnd)", "proc_stdout = process.communicate()[0].strip() print proc_stdout usr_dir = args.root_dir + \"/\" + args.config[\"arguments\"][\"project\"] bashCommand", "out_dirs.append(\"{0}/{1}/tra_{2}/output\".format(args.root_dir, args.config[\"arguments\"][\"project\"], x_rnd)) if \"create3DfromNucleaR\" in args.config[\"arguments\"][\"operations\"]: out_dirs.append(\"{0}/{1}/str_{2}/output\".format(args.root_dir, args.config[\"arguments\"][\"project\"], x_rnd)) for out_dir in", "logger = logging.getLogger(\"lg\") logger.setLevel(logging.INFO) formatter = logging.Formatter(fmt='%(asctime)s - %(module)s - %(levelname)s - %(message)s')", "break # Append output_file metadata into JSON data json_data['output_files'].append(pdbMeta1) ### chromatin_dummy_trajectory_structure pdbMeta2 =", "+ args.config[\"arguments\"][\"project\"] pdb_file = tmp_dir + \"/chromdyn_str.pdb\" pdbMeta[\"file_path\"] = pdb_file # Set source_id", "iterations = args.config[\"arguments\"][\"createTrajectory:numStruct\"] bashCommand = \"cd /home/MuG/MuG_Chromatin_sampling/src_test; bash run.sh %s %s %s %s\"", "pdbMeta1[\"source_id\"]: for file_id in pdbMeta1[\"source_id\"]: pprint.pprint(args.metadata) if args.metadata[file_id][\"taxon_id\"]: pdbMeta1[\"taxon_id\"] = args.metadata[file_id][\"taxon_id\"] break #", "if \"createTrajectory\" in args.config[\"arguments\"][\"operations\"]: print \"do Trajectory\" sequence_file_id = args.config[\"input_files\"][\"sequence\"] sequence = args.root_dir", "# Print host info num_cores = multiprocessing.cpu_count() host = socket.gethostname() #mem = psutil.virtual_memory()", "args.config[\"input_files\"][index][\"name\"] if name in inputs_by_name: pprint.pprint(inputs_by_name[name]) if type(inputs_by_name[name] is str): prev = inputs_by_name[name]", "args.config[\"arguments\"][\"operations\"]: out_dirs.append(\"{0}/{1}/str_{2}/output\".format(args.root_dir, args.config[\"arguments\"][\"project\"], x_rnd)) for out_dir in out_dirs: for extension in extensions: files.extend(glob.glob(out_dir+\"/\"+extension))", "{} for index,d in enumerate(args.config[\"input_files\"]): name = args.config[\"input_files\"][index][\"name\"] if name in inputs_by_name: pprint.pprint(inputs_by_name[name])", "host = socket.gethostname() #mem = psutil.virtual_memory() logger.debug('HOST=%s CPUs=%s MEM=x' %(host,num_cores)) # Run pipeline", "\"cd /home/MuG/MuG_Chromatin_equ_structure/src_test; bash run.sh %s %s %s\" % (tmp_dir +\"/nucleR_to_3D_nucl_pos.txt\", tmp_dir +\"/nucleR_to_3D_seq.txt\", tmp_dir)", "inherited from the input file (i.e the source_id) pdbMeta1[\"taxon_id\"] = 0 if pdbMeta1[\"source_id\"]:", "inherited from the input file (i.e the source_id) trajMeta[\"taxon_id\"] = 0 if trajMeta[\"source_id\"]:", "% os.path.splitext(os.path.basename(__file__))[0]) handler.setLevel(logging.INFO) handler.setFormatter(formatter) logger.addHandler(handler) streamhandler = logging.StreamHandler() streamhandler.setLevel(logging.INFO) streamhandler.setFormatter(formatter) logger.addHandler(streamhandler) logger.info('Starting %s'", "# Set metadata required for TAR output file result = {} result[\"name\"] =", "(nucl_pos, sequence, iterations, tmp_dir) print bashCommand process = subprocess.Popen(bashCommand,stdout=subprocess.PIPE, shell=True) proc_stdout = process.communicate()[0].strip()", "= args.root_dir + \"/\" + args.metadata[gff_file_id][\"file_path\"] gen_reg = args.config[\"arguments\"][\"create3DfromNucleaR:genRegion\"] tmp_dir = \"{0}/{1}/str_{2}\".format(args.root_dir, args.config[\"arguments\"][\"project\"],", "import logging import re import pprint import multiprocessing #import psutil # available memory", "%s/output/chromdyn_dummy_str.pdb %s\" % (tmp_dir, tmp_dir, tmp_dir, usr_dir) print bashCommand process = subprocess.Popen(bashCommand,stdout=subprocess.PIPE, shell=True)", "print bashCommand process = subprocess.Popen(bashCommand,stdout=subprocess.PIPE, shell=True) proc_stdout = process.communicate()[0].strip() print proc_stdout def main():", "print bashCommand process = subprocess.Popen(bashCommand,stdout=subprocess.PIPE, shell=True) proc_stdout = process.communicate()[0].strip() print proc_stdout if \"createTrajectory\"", "bashCommand process = subprocess.Popen(bashCommand,stdout=subprocess.PIPE, shell=True) proc_stdout = process.communicate()[0].strip() print proc_stdout def main(): #", "+ \"/\" + args.metadata[nucl_pos_file_id][\"file_path\"] tmp_dir = \"{0}/{1}/str_{2}\".format(args.root_dir, args.config[\"arguments\"][\"project\"], x_rnd) bashCommand = \"cd /home/MuG/MuG_Chromatin_equ_structure/src_test;", "if \"createTrajectory\" in args.config[\"arguments\"][\"operations\"]: tmp_dir = \"{0}/{1}/tra_{2}\".format(args.root_dir, args.config[\"arguments\"][\"project\"], x_rnd) bashCommand = \"rm -r", "subprocess def run_pipeline(args, num_cores, x_rnd): sort = args.config[\"input_files\"][\"sequence\"] sequence = args.root_dir + \"/\"", "= process.communicate()[0].strip() print proc_stdout def main(): # Start logging logger = logging.getLogger(\"lg\") logger.setLevel(logging.INFO)", "trajMeta[\"file_path\"] = traj_file # Set source_id & taxon_id trajMeta[\"source_id\"] = [] if \"sequence\"", "to %s\" % out_dir) # Indexing config arguments by name arguments_by_name = dict((d[\"name\"],", "tool.json pdbMeta1[\"name\"] = \"PDB_chromatin_starting_structure\" # Set file_path. Absolute path. Should be better relative", "\"/chromdyn_str.pdb\" pdbMeta[\"file_path\"] = pdb_file # Set source_id & taxon_id pdbMeta[\"source_id\"] = [] if", "input file (i.e the source_id) trajMeta[\"taxon_id\"] = 0 if trajMeta[\"source_id\"]: for file_id in", "\"sequence\" in args.config['input_files']: pdbMeta2[\"source_id\"].append(args.config['input_files'][\"sequence\"]) # Set taxon_id. taxon_id is inherited from the input", "time import socket # print localhost import logging import re import pprint import", "logging.getLogger(\"lg\") if (args.out_metadata): # Create out_metadata JSON json_data = {} json_data['output_files']= [] if", "args.config[\"arguments\"][\"project\"] pdb_file = tmp_dir + \"/chromdyn_dummy_str.pdb\" pdbMeta1[\"file_path\"] = pdb_file # Set source_id &", "public directory (with reference genome data, etc).\") parser.add_argument(\"--metadata\", required=True, type=Mugparams.check_json, metavar=\"METADATA_JSON\", help=\"JSON file", "os.R_OK): return d else: raise Exception(\"readable_dir:{0} is not a readable dir\".format(d)) @staticmethod def", "Indexing config arguments by name arguments_by_name = dict((d[\"name\"], d[\"value\"]) for (index, d) in", "arguments and input_files are:\\n %s \" % pprint.pformat(args.config)) return 1 @staticmethod def process_metadata(args):", "sort = args.config[\"input_files\"][\"sequence\"] sequence = args.root_dir + \"/\" + args.metadata[sequence_file_id][\"file_path\"] nucl_pos_file_id = args.config[\"input_files\"][\"nuclPos\"]", "= [] if \"sequence\" in args.config['input_files']: pdbMeta2[\"source_id\"].append(args.config['input_files'][\"sequence\"]) # Set taxon_id. taxon_id is inherited", "could not be unique - because of allow_multiple) inputs_by_name = {} for index,d", "os.W_OK): return f else: raise Exception(\"writeable_file:{0} is not a writeable dir\".format(d)) else: return", "(args.out_metadata): # Create out_metadata JSON json_data = {} json_data['output_files']= [] if (\"createStructure\" in", "subprocess.Popen(bashCommand,stdout=subprocess.PIPE, shell=True) proc_stdout = process.communicate()[0].strip() print proc_stdout def main(): # Start logging logger", "be unique - because of allow_multiple) inputs_by_name = {} for index,d in enumerate(args.config[\"input_files\"]):", "\"createTrajectory\" in args.config[\"arguments\"][\"operations\"]: tmp_dir = \"{0}/{1}/tra_{2}\".format(args.root_dir, args.config[\"arguments\"][\"project\"], x_rnd) bashCommand = \"rm -r %s\"", "shell=True) proc_stdout = process.communicate()[0].strip() print proc_stdout if \"createTrajectory\" in args.config[\"arguments\"][\"operations\"]: tmp_dir = \"{0}/{1}/tra_{2}\".format(args.root_dir,", "in args.config[\"arguments\"][\"operations\"]) or (\"create3DfromNucleaR\" in args.config[\"arguments\"][\"operations\"]): ### PDB_chromatin_structure pdbMeta = {} # Set", "([_id]) metadata_by_id = dict((d[\"_id\"], dict(d)) for (index, d) in enumerate(args.metadata)) args.metadata = metadata_by_id", "[] if \"sequence\" in args.config['input_files']: result[\"source_id\"].append(args.config['input_files'][\"sequence\"]) result[\"file_path\"] = out_tar result[\"taxon_id\"] = 0 json_data['output_files'].append(result)", "is not a directory path or is not accessible\".format(d)) if os.access(d, os.R_OK): return", "= logging.Formatter(fmt='%(asctime)s - %(module)s - %(levelname)s - %(message)s') handler = logging.FileHandler('%s.log' % os.path.splitext(os.path.basename(__file__))[0])", "= args.config[\"input_files\"][\"sequence\"] sequence = args.root_dir + \"/\" + args.metadata[sequence_file_id][\"file_path\"] nucl_pos_file_id = args.config[\"input_files\"][\"nuclPos\"] nucl_pos", "# # Executing pipeline # Calling MuG_Chromatin_equ_structure and MuG_Chromatin_sampling software in a subprocess", "os.path.dirname(f) # TODO Fails if relative path given if not os.path.isdir(d): raise Exception(\"writeable_file:{0}", "proc_stdout = process.communicate()[0].strip() print proc_stdout assembly = args.metadata[gff_file_id][\"meta_data\"][\"assembly\"] genome_file = \"{0}/refGenomes/{1}/{1}.fa\".format(args.public_dir,assembly) bashCommand =", "Prepare metadata for the output files def prepare_results(args, x_rnd): global out_dir logger =", "handler.setLevel(logging.INFO) handler.setFormatter(formatter) logger.addHandler(handler) streamhandler = logging.StreamHandler() streamhandler.setLevel(logging.INFO) streamhandler.setFormatter(formatter) logger.addHandler(streamhandler) logger.info('Starting %s' % __file__)", "help=\"JSON file containing results metadata\") parser.add_argument(\"-v\", \"--verbose\", required=False, action=\"store_true\", help=\"increase output verbosity\") parser.add_argument('--version',", "print proc_stdout usr_dir = args.root_dir + \"/\" + args.config[\"arguments\"][\"project\"] bashCommand = \"cp %s/output/chromdyn_start_str.pdb", "/home/MuG/MuG_Chromatin_sampling/src_test; bash run.sh %s %s %s %s\" % (nucl_pos, sequence, iterations, tmp_dir) print", "trajMeta = {} # Set name # Should coincide with tool.json trajMeta[\"name\"] =", "# # Prepare metadata for the output files def prepare_results(args, x_rnd): global out_dir", "args.config[\"input_files\"] = inputs_by_name logger.debug(\"Configuration file arguments and input_files are:\\n %s \" % pprint.pformat(args.config))", "args.config[\"arguments\"][\"project\"] pdb_file = tmp_dir + \"/chromdyn_str.pdb\" pdbMeta[\"file_path\"] = pdb_file # Set source_id &", "raise Exception(\"writeable_file:{0} is not a writeable dir\".format(d)) else: return f @staticmethod def process_arguments(args):", "### chromatin_trajectory trajMeta = {} # Set name # Should coincide with tool.json", "is not accessible\".format(d)) if os.access(d, os.R_OK): return d else: raise Exception(\"readable_dir:{0} is not", "import tarfile import subprocess import random out_dir=\"\" class Mugparams(object): @staticmethod def check_json(json_file): logger", "chromatin_starting_trajectory_structure pdbMeta1 = {} # Set name. Should coincide with tool.json pdbMeta1[\"name\"] =", "= [] if \"sequence\" in args.config['input_files']: trajMeta[\"source_id\"].append(args.config['input_files'][\"sequence\"]) # Set taxon_id. taxon_id is inherited", "{} # Set name. Should coincide with tool.json pdbMeta1[\"name\"] = \"PDB_chromatin_starting_structure\" # Set", "not a readable dir\".format(d)) @staticmethod def writeable_file(f): if not os.path.isfile(f): d = os.path.dirname(f)", "from the input file (i.e the source_id) pdbMeta2[\"taxon_id\"] = 0 if pdbMeta2[\"source_id\"]: for", "pdbMeta[\"source_id\"] = [] if \"sequence\" in args.config['input_files']: pdbMeta[\"source_id\"].append(args.config['input_files'][\"sequence\"]) # Set taxon_id # taxon_id", "args.metadata[nucl_pos_file_id][\"file_path\"] tmp_dir = \"{0}/{1}/str_{2}\".format(args.root_dir, args.config[\"arguments\"][\"project\"], x_rnd) bashCommand = \"cd /home/MuG/MuG_Chromatin_equ_structure/src_test; bash run.sh %s", "% (gff_file, genome_file, gen_reg, tmp_dir +\"/nucleR_to_3D_seq.txt\", tmp_dir +\"/nucleR_to_3D_nucl_pos.txt\") print bashCommand process = subprocess.Popen(bashCommand,stdout=subprocess.PIPE,", "if name in inputs_by_name: pprint.pprint(inputs_by_name[name]) if type(inputs_by_name[name] is str): prev = inputs_by_name[name] inputs_by_name[name]=", "handler.setLevel(logging.DEBUG) handler.setLevel(logging.DEBUG) logger.addHandler(handler) streamhandler.setLevel(logging.DEBUG) logger.addHandler(streamhandler) logger.debug(\"Verbose mode on\") # Parse config Mugparams.process_arguments(args) Mugparams.process_metadata(args)", "args.root_dir + \"/\" + args.config[\"arguments\"][\"project\"] out_tar = tmp_dir + \"/results.tar.gz\" tar = tarfile.open(out_tar,", "file result = {} result[\"name\"] = \"summary\" result[\"source_id\"] = [] if \"sequence\" in", "logging.Formatter(fmt='%(asctime)s - %(module)s - %(levelname)s - %(message)s') handler = logging.FileHandler('%s.log' % os.path.splitext(os.path.basename(__file__))[0]) handler.setLevel(logging.INFO)", "os.access(d, os.R_OK): return d else: raise Exception(\"readable_dir:{0} is not a readable dir\".format(d)) @staticmethod", "json_file) with open(json_file,'r') as file_data: try: data = json.load(file_data) except ValueError, e: logger.exception(\"%s", "# Parse CMD parser = argparse.ArgumentParser(prog=\"chromatindyn_wf\", description=\"Chromatin Dynamics workflow\") parser.add_argument(\"--config\", required=True, type=Mugparams.check_json, metavar=\"CONFIG_JSON\",", "[] if \"sequence\" in args.config['input_files']: trajMeta[\"source_id\"].append(args.config['input_files'][\"sequence\"]) # Set taxon_id. taxon_id is inherited from", "{} result[\"name\"] = \"summary\" result[\"source_id\"] = [] if \"sequence\" in args.config['input_files']: result[\"source_id\"].append(args.config['input_files'][\"sequence\"]) result[\"file_path\"]", "tar.add(fil, arcname=os.path.basename(fil)) tar.close() # Set metadata required for TAR output file result =", "subprocess.Popen(bashCommand,stdout=subprocess.PIPE, shell=True) proc_stdout = process.communicate()[0].strip() print proc_stdout if \"create3DfromNucleaR\" in args.config[\"arguments\"][\"operations\"]: tmp_dir =", "statistics TAR\" % os.path.basename(fil)) tar.add(fil, arcname=os.path.basename(fil)) tar.close() # Set metadata required for TAR", "tmp_dir +\"/nucleR_to_3D_nucl_pos.txt\") print bashCommand process = subprocess.Popen(bashCommand,stdout=subprocess.PIPE, shell=True) proc_stdout = process.communicate()[0].strip() print proc_stdout", "logging import re import pprint import multiprocessing #import psutil # available memory import", "(nucl_pos, sequence, tmp_dir) print bashCommand process = subprocess.Popen(bashCommand,stdout=subprocess.PIPE, shell=True) proc_stdout = process.communicate()[0].strip() print", "return d else: raise Exception(\"readable_dir:{0} is not a readable dir\".format(d)) @staticmethod def writeable_file(f):", "= process.communicate()[0].strip() print proc_stdout return 1 # # Prepare metadata for the output", "\"{0}/{1}/str_{2}\".format(args.root_dir, args.config[\"arguments\"][\"project\"], x_rnd) bashCommand = \" mkdir %s\" % tmp_dir print bashCommand process", "= \"chromatin_trajectory\" # Set file_path # Absolute path. Should be better relative to", "& taxon_id pdbMeta[\"source_id\"] = [] if \"sequence\" in args.config['input_files']: pdbMeta[\"source_id\"].append(args.config['input_files'][\"sequence\"]) # Set taxon_id", "= logging.FileHandler('%s.log' % os.path.splitext(os.path.basename(__file__))[0]) handler.setLevel(logging.INFO) handler.setFormatter(formatter) logger.addHandler(handler) streamhandler = logging.StreamHandler() streamhandler.setLevel(logging.INFO) streamhandler.setFormatter(formatter) logger.addHandler(streamhandler)", "description=\"Chromatin Dynamics workflow\") parser.add_argument(\"--config\", required=True, type=Mugparams.check_json, metavar=\"CONFIG_JSON\", help=\"JSON file containing workflow parameters\") parser.add_argument(\"--root_dir\",", "[] extensions = ('*.txt','*.csv','*.png') out_dirs = [] if \"createStructure\" in args.config[\"arguments\"][\"operations\"]: out_dirs.append(\"{0}/{1}/str_{2}/output\".format(args.root_dir, args.config[\"arguments\"][\"project\"],", "enumerate(args.config[\"arguments\"]) if d[\"name\"] == \"project\") out_dir = args.root_dir+\"/\"+args.config[\"arguments\"][proj_idx][\"value\"] logger.info(\"Output file directory set to", "# Set file_path. Absolute path. Should be better relative to root_dir? tmp_dir =", "args.root_dir + \"/\" + args.metadata[sequence_file_id][\"file_path\"] nucl_pos_file_id = args.config[\"input_files\"][\"nuclPos\"] nucl_pos = args.root_dir + \"/\"", "parser.add_argument(\"--public_dir\", required=False, type=Mugparams.readable_dir, metavar=\"PUBLIC_PATH\", help=\"Absolute path of the MuG public directory (with reference", "\"createTrajectory\" in args.config[\"arguments\"][\"operations\"]: ### chromatin_starting_trajectory_structure pdbMeta1 = {} # Set name. Should coincide", "re import pprint import multiprocessing #import psutil # available memory import subprocess import", "pdbMeta = {} # Set name # Should coincide with tool.json pdbMeta[\"name\"] =", "args.metadata[file_id][\"taxon_id\"] break # Append output_file metadata into JSON data json_data['output_files'].append(pdbMeta2) ### chromatin_trajectory trajMeta", "% out_dir) # Indexing config arguments by name arguments_by_name = dict((d[\"name\"], d[\"value\"]) for", "handler = logging.FileHandler('%s.log' % os.path.splitext(os.path.basename(__file__))[0]) handler.setLevel(logging.INFO) handler.setFormatter(formatter) logger.addHandler(handler) streamhandler = logging.StreamHandler() streamhandler.setLevel(logging.INFO) streamhandler.setFormatter(formatter)", "workflow parameters\") parser.add_argument(\"--root_dir\", required=True, type=Mugparams.readable_dir, metavar=\"ABS_PATH\", help=\"Absolute path of the user data directory.\")", "= \"cd /home/MuG/MuG_Chromatin_equ_structure/src_test; bash run.sh %s %s %s\" % (tmp_dir +\"/nucleR_to_3D_nucl_pos.txt\", tmp_dir +\"/nucleR_to_3D_seq.txt\",", "proc_stdout = process.communicate()[0].strip() print proc_stdout if \"create3DfromNucleaR\" in args.config[\"arguments\"][\"operations\"]: print \"do 3D from", "input file (i.e the source_id) pdbMeta2[\"taxon_id\"] = 0 if pdbMeta2[\"source_id\"]: for file_id in", "\"createTrajectory\" in args.config[\"arguments\"][\"operations\"]: out_dirs.append(\"{0}/{1}/tra_{2}/output\".format(args.root_dir, args.config[\"arguments\"][\"project\"], x_rnd)) if \"create3DfromNucleaR\" in args.config[\"arguments\"][\"operations\"]: out_dirs.append(\"{0}/{1}/str_{2}/output\".format(args.root_dir, args.config[\"arguments\"][\"project\"], x_rnd))", "tarfile import subprocess import random out_dir=\"\" class Mugparams(object): @staticmethod def check_json(json_file): logger =", "prev = inputs_by_name[name] inputs_by_name[name]= list() inputs_by_name[name].append(prev) inputs_by_name[name].append(d[\"value\"]) else: inputs_by_name[name]=d[\"value\"] args.config[\"input_files\"] = inputs_by_name logger.debug(\"Configuration", "= subprocess.Popen(bashCommand,stdout=subprocess.PIPE, shell=True) proc_stdout = process.communicate()[0].strip() print proc_stdout if \"createTrajectory\" in args.config[\"arguments\"][\"operations\"]: print", "Parse config Mugparams.process_arguments(args) Mugparams.process_metadata(args) # Print host info num_cores = multiprocessing.cpu_count() host =", "subprocess.Popen(bashCommand,stdout=subprocess.PIPE, shell=True) proc_stdout = process.communicate()[0].strip() print proc_stdout usr_dir = args.root_dir + \"/\" +", "tmp_dir = args.root_dir + \"/\" + args.config[\"arguments\"][\"project\"] out_tar = tmp_dir + \"/results.tar.gz\" tar", "= args.config[\"arguments\"][\"createTrajectory:numStruct\"] bashCommand = \"cd /home/MuG/MuG_Chromatin_sampling/src_test; bash run.sh %s %s %s %s\" %", "\"createStructure\" in args.config[\"arguments\"][\"operations\"]: out_dirs.append(\"{0}/{1}/str_{2}/output\".format(args.root_dir, args.config[\"arguments\"][\"project\"], x_rnd)) if \"createTrajectory\" in args.config[\"arguments\"][\"operations\"]: out_dirs.append(\"{0}/{1}/tra_{2}/output\".format(args.root_dir, args.config[\"arguments\"][\"project\"], x_rnd))", "as file_data: try: data = json.load(file_data) except ValueError, e: logger.exception(\"%s in not a", "tmp_dir + \"/chromdyn_dummy_str.pdb\" pdbMeta1[\"file_path\"] = pdb_file # Set source_id & taxon_id pdbMeta2[\"source_id\"] =", "= arguments_by_name # Indexing config input_files by name (name could not be unique", "data = json.load(file_data) except ValueError, e: logger.exception(\"%s in not a valid json file.\"", "taxon_id. taxon_id is inherited from the input file (i.e the source_id) pdbMeta2[\"taxon_id\"] =", "traj_file = tmp_dir + \"/chromdyn_str.dcd\" trajMeta[\"file_path\"] = traj_file # Set source_id & taxon_id", "pdbMeta1[\"taxon_id\"] = 0 if pdbMeta1[\"source_id\"]: for file_id in pdbMeta1[\"source_id\"]: pprint.pprint(args.metadata) if args.metadata[file_id][\"taxon_id\"]: pdbMeta1[\"taxon_id\"]", "# Indexing config arguments by name arguments_by_name = dict((d[\"name\"], d[\"value\"]) for (index, d)", "subprocess.Popen(bashCommand,stdout=subprocess.PIPE, shell=True) proc_stdout = process.communicate()[0].strip() print proc_stdout bashCommand = \"cd /home/MuG/MuG_Chromatin_equ_structure/src_test; bash run.sh", "if os.access(d, os.W_OK): return f else: raise Exception(\"writeable_file:{0} is not a writeable dir\".format(d))", "args.metadata[sequence_file_id][\"file_path\"] nucl_pos_file_id = args.config[\"input_files\"][\"nuclPos\"] nucl_pos = args.root_dir + \"/\" + args.metadata[nucl_pos_file_id][\"file_path\"] tmp_dir =", "--genome_file %s --range %s --seq_output %s --nucs_output %s --margin 4\" % (gff_file, genome_file,", "name # Should coincide with tool.json pdbMeta[\"name\"] = \"PDB_chromatin_structure\" # Set file_path #", "+ args.metadata[sequence_file_id][\"file_path\"] nucl_pos_file_id = args.config[\"input_files\"][\"nuclPos\"] nucl_pos = args.root_dir + \"/\" + args.metadata[nucl_pos_file_id][\"file_path\"] tmp_dir", "a valid json file.\" % json_file) return data @staticmethod def readable_dir(d): if not", "tmp_dir + \"/chromdyn_start_str.pdb\" pdbMeta1[\"file_path\"] = pdb_file # Set source_id & taxon_id pdbMeta1[\"source_id\"] =", "out_dir logger = logging.getLogger(\"lg\") # Setting working directory (project) proj_idx = next(idx for", "Trajectory\" sequence_file_id = args.config[\"input_files\"][\"sequence\"] sequence = args.root_dir + \"/\" + args.metadata[sequence_file_id][\"file_path\"] nucl_pos_file_id =", "parser = argparse.ArgumentParser(prog=\"chromatindyn_wf\", description=\"Chromatin Dynamics workflow\") parser.add_argument(\"--config\", required=True, type=Mugparams.check_json, metavar=\"CONFIG_JSON\", help=\"JSON file containing", "[] if (\"createStructure\" in args.config[\"arguments\"][\"operations\"]) or (\"create3DfromNucleaR\" in args.config[\"arguments\"][\"operations\"]): ### PDB_chromatin_structure pdbMeta =", "in inputs_by_name: pprint.pprint(inputs_by_name[name]) if type(inputs_by_name[name] is str): prev = inputs_by_name[name] inputs_by_name[name]= list() inputs_by_name[name].append(prev)", "print proc_stdout if \"create3DfromNucleaR\" in args.config[\"arguments\"][\"operations\"]: print \"do 3D from NucleaR\" gff_file_id =", "file directory set to %s\" % out_dir) # Indexing config arguments by name", "streamhandler = logging.StreamHandler() streamhandler.setLevel(logging.INFO) streamhandler.setFormatter(formatter) logger.addHandler(streamhandler) logger.info('Starting %s' % __file__) # Parse CMD", "Set source_id & taxon_id trajMeta[\"source_id\"] = [] if \"sequence\" in args.config['input_files']: trajMeta[\"source_id\"].append(args.config['input_files'][\"sequence\"]) #", "args.config[\"arguments\"][\"operations\"]): ### PDB_chromatin_structure pdbMeta = {} # Set name # Should coincide with", "\"{0}/{1}/str_{2}\".format(args.root_dir, args.config[\"arguments\"][\"project\"], x_rnd) bashCommand = \"rm -r %s\" % tmp_dir print bashCommand process", "be better relative to root_dir? tmp_dir = args.root_dir + \"/\" + args.config[\"arguments\"][\"project\"] pdb_file", "= {} # Set name # Should coincide with tool.json pdbMeta[\"name\"] = \"PDB_chromatin_structure\"", "tmp_dir + \"/results.tar.gz\" tar = tarfile.open(out_tar, \"w:gz\") for fil in files: logger.info (\"Packing", "trajMeta[\"source_id\"]: pprint.pprint(args.metadata) if args.metadata[file_id][\"taxon_id\"]: trajMeta[\"taxon_id\"] = args.metadata[file_id][\"taxon_id\"] break # Append output_file metadata into", "arcname=os.path.basename(fil)) tar.close() # Set metadata required for TAR output file result = {}", "= args.root_dir+\"/\"+args.config[\"arguments\"][proj_idx][\"value\"] logger.info(\"Output file directory set to %s\" % out_dir) # Indexing config", "if \"create3DfromNucleaR\" in args.config[\"arguments\"][\"operations\"]: tmp_dir = \"{0}/{1}/str_{2}\".format(args.root_dir, args.config[\"arguments\"][\"project\"], x_rnd) bashCommand = \"rm -r", "the input file (i.e the source_id) pdbMeta[\"taxon_id\"] = 0 if pdbMeta[\"source_id\"]: for file_id", "nucl_pos = args.root_dir + \"/\" + args.metadata[nucl_pos_file_id][\"file_path\"] tmp_dir = \"{0}/{1}/tra_{2}\".format(args.root_dir, args.config[\"arguments\"][\"project\"], x_rnd) iterations", "+ \"/chromdyn_str.pdb\" pdbMeta[\"file_path\"] = pdb_file # Set source_id & taxon_id pdbMeta[\"source_id\"] = []", "metadata into JSON data json_data['output_files'].append(pdbMeta) if \"createTrajectory\" in args.config[\"arguments\"][\"operations\"]: ### chromatin_starting_trajectory_structure pdbMeta1 =", "path of the MuG public directory (with reference genome data, etc).\") parser.add_argument(\"--metadata\", required=True,", "results metadata\") parser.add_argument(\"-v\", \"--verbose\", required=False, action=\"store_true\", help=\"increase output verbosity\") parser.add_argument('--version', action='version', version='%(prog)s 0.1')", "psutil.virtual_memory() logger.debug('HOST=%s CPUs=%s MEM=x' %(host,num_cores)) # Run pipeline x_rnd = int(random.random()*10000000) outfiles =", "= {} json_data['output_files']= [] if (\"createStructure\" in args.config[\"arguments\"][\"operations\"]) or (\"create3DfromNucleaR\" in args.config[\"arguments\"][\"operations\"]): ###", "if \"createStructure\" in args.config[\"arguments\"][\"operations\"]: tmp_dir = \"{0}/{1}/str_{2}\".format(args.root_dir, args.config[\"arguments\"][\"project\"], x_rnd) bashCommand = \"rm -r", "+\"/nucleR_to_3D_nucl_pos.txt\") print bashCommand process = subprocess.Popen(bashCommand,stdout=subprocess.PIPE, shell=True) proc_stdout = process.communicate()[0].strip() print proc_stdout bashCommand", "of outputs, *CSVs and *PNGs files = [] extensions = ('*.txt','*.csv','*.png') out_dirs =", "\"/\" + args.metadata[gff_file_id][\"file_path\"] gen_reg = args.config[\"arguments\"][\"create3DfromNucleaR:genRegion\"] tmp_dir = \"{0}/{1}/str_{2}\".format(args.root_dir, args.config[\"arguments\"][\"project\"], x_rnd) bashCommand =", "args.config[\"arguments\"][\"project\"] traj_file = tmp_dir + \"/chromdyn_str.dcd\" trajMeta[\"file_path\"] = traj_file # Set source_id &", "= tmp_dir + \"/chromdyn_str.dcd\" trajMeta[\"file_path\"] = traj_file # Set source_id & taxon_id trajMeta[\"source_id\"]", "source_id) pdbMeta1[\"taxon_id\"] = 0 if pdbMeta1[\"source_id\"]: for file_id in pdbMeta1[\"source_id\"]: pprint.pprint(args.metadata) if args.metadata[file_id][\"taxon_id\"]:", "pdbMeta[\"name\"] = \"PDB_chromatin_structure\" # Set file_path # Absolute path. Should be better relative", "the source_id) trajMeta[\"taxon_id\"] = 0 if trajMeta[\"source_id\"]: for file_id in trajMeta[\"source_id\"]: pprint.pprint(args.metadata) if", "from the input file (i.e the source_id) pdbMeta1[\"taxon_id\"] = 0 if pdbMeta1[\"source_id\"]: for", "trajMeta[\"source_id\"]: for file_id in trajMeta[\"source_id\"]: pprint.pprint(args.metadata) if args.metadata[file_id][\"taxon_id\"]: trajMeta[\"taxon_id\"] = args.metadata[file_id][\"taxon_id\"] break #", "for file_id in pdbMeta1[\"source_id\"]: pprint.pprint(args.metadata) if args.metadata[file_id][\"taxon_id\"]: pdbMeta1[\"taxon_id\"] = args.metadata[file_id][\"taxon_id\"] break # Append", "x_rnd) bashCommand = \"cd /home/MuG/MuG_Chromatin_equ_structure/src_test; bash run.sh %s %s %s\" % (nucl_pos, sequence,", "logger.setLevel(logging.INFO) formatter = logging.Formatter(fmt='%(asctime)s - %(module)s - %(levelname)s - %(message)s') handler = logging.FileHandler('%s.log'", "shell=True) proc_stdout = process.communicate()[0].strip() print proc_stdout assembly = args.metadata[gff_file_id][\"meta_data\"][\"assembly\"] genome_file = \"{0}/refGenomes/{1}/{1}.fa\".format(args.public_dir,assembly) bashCommand", "process = subprocess.Popen(bashCommand,stdout=subprocess.PIPE, shell=True) proc_stdout = process.communicate()[0].strip() print proc_stdout bashCommand = \"cd /home/MuG/MuG_Chromatin_equ_structure/src_test;", "args.config[\"arguments\"][\"operations\"]: print \"do 3D from NucleaR\" gff_file_id = args.config[\"input_files\"][\"gffNucleaR\"] gff_file = args.root_dir +", "the MuG public directory (with reference genome data, etc).\") parser.add_argument(\"--metadata\", required=True, type=Mugparams.check_json, metavar=\"METADATA_JSON\",", "metadata_by_id = dict((d[\"_id\"], dict(d)) for (index, d) in enumerate(args.metadata)) args.metadata = metadata_by_id logger.debug(\"VRE", "bashCommand process = subprocess.Popen(bashCommand,stdout=subprocess.PIPE, shell=True) proc_stdout = process.communicate()[0].strip() print proc_stdout assembly = args.metadata[gff_file_id][\"meta_data\"][\"assembly\"]", "outputs, *CSVs and *PNGs files = [] extensions = ('*.txt','*.csv','*.png') out_dirs = []", "type=Mugparams.writeable_file, metavar=\"RESULTS_JSON\", help=\"JSON file containing results metadata\") parser.add_argument(\"-v\", \"--verbose\", required=False, action=\"store_true\", help=\"increase output", "inputs_by_name[name].append(d[\"value\"]) else: inputs_by_name[name]=d[\"value\"] args.config[\"input_files\"] = inputs_by_name logger.debug(\"Configuration file arguments and input_files are:\\n %s", "out_dirs.append(\"{0}/{1}/str_{2}/output\".format(args.root_dir, args.config[\"arguments\"][\"project\"], x_rnd)) if \"createTrajectory\" in args.config[\"arguments\"][\"operations\"]: out_dirs.append(\"{0}/{1}/tra_{2}/output\".format(args.root_dir, args.config[\"arguments\"][\"project\"], x_rnd)) if \"create3DfromNucleaR\" in", "args.metadata[file_id][\"taxon_id\"]: trajMeta[\"taxon_id\"] = args.metadata[file_id][\"taxon_id\"] break # Append output_file metadata into JSON data json_data['output_files'].append(trajMeta)", "pdbMeta2[\"taxon_id\"] = args.metadata[file_id][\"taxon_id\"] break # Append output_file metadata into JSON data json_data['output_files'].append(pdbMeta2) ###", "result = {} result[\"name\"] = \"summary\" result[\"source_id\"] = [] if \"sequence\" in args.config['input_files']:", "Delete temporary directory if \"createStructure\" in args.config[\"arguments\"][\"operations\"]: tmp_dir = \"{0}/{1}/str_{2}\".format(args.root_dir, args.config[\"arguments\"][\"project\"], x_rnd) bashCommand", "taxon_id. taxon_id is inherited from the input file (i.e the source_id) trajMeta[\"taxon_id\"] =", "x_rnd) iterations = args.config[\"arguments\"][\"createTrajectory:numStruct\"] bashCommand = \"cd /home/MuG/MuG_Chromatin_sampling/src_test; bash run.sh %s %s %s", "pdb_file = tmp_dir + \"/chromdyn_dummy_str.pdb\" pdbMeta1[\"file_path\"] = pdb_file # Set source_id & taxon_id", "shell=True) proc_stdout = process.communicate()[0].strip() print proc_stdout if \"createTrajectory\" in args.config[\"arguments\"][\"operations\"]: print \"do Trajectory\"", "num_cores = multiprocessing.cpu_count() host = socket.gethostname() #mem = psutil.virtual_memory() logger.debug('HOST=%s CPUs=%s MEM=x' %(host,num_cores))", "os.path.isdir(d): raise Exception(\"readable_dir:{0} is not a directory path or is not accessible\".format(d)) if", "# Should coincide with tool.json trajMeta[\"name\"] = \"chromatin_trajectory\" # Set file_path # Absolute", "bash run.sh %s %s %s %s\" % (nucl_pos, sequence, iterations, tmp_dir) print bashCommand", "= args.config[\"input_files\"][\"gffNucleaR\"] gff_file = args.root_dir + \"/\" + args.metadata[gff_file_id][\"file_path\"] gen_reg = args.config[\"arguments\"][\"create3DfromNucleaR:genRegion\"] tmp_dir", "with open(json_file,'r') as file_data: try: data = json.load(file_data) except ValueError, e: logger.exception(\"%s in", "required for TAR output file result = {} result[\"name\"] = \"summary\" result[\"source_id\"] =", "else: raise Exception(\"readable_dir:{0} is not a readable dir\".format(d)) @staticmethod def writeable_file(f): if not", "json_data['output_files'].append(pdbMeta2) ### chromatin_trajectory trajMeta = {} # Set name # Should coincide with", "memory import subprocess import shutil import glob import tarfile import subprocess import random", "# Executing pipeline # Calling MuG_Chromatin_equ_structure and MuG_Chromatin_sampling software in a subprocess def", "be better relative to root_dir? tmp_dir = args.root_dir + \"/\" + args.config[\"arguments\"][\"project\"] traj_file", "files = [] extensions = ('*.txt','*.csv','*.png') out_dirs = [] if \"createStructure\" in args.config[\"arguments\"][\"operations\"]:", "logger = logging.getLogger(\"lg\") if (args.out_metadata): # Create out_metadata JSON json_data = {} json_data['output_files']=", "# Append output_file metadata into JSON data json_data['output_files'].append(trajMeta) # Prepare last output file:", "x_rnd)) if \"create3DfromNucleaR\" in args.config[\"arguments\"][\"operations\"]: out_dirs.append(\"{0}/{1}/str_{2}/output\".format(args.root_dir, args.config[\"arguments\"][\"project\"], x_rnd)) for out_dir in out_dirs: for", "0 if pdbMeta2[\"source_id\"]: for file_id in pdbMeta2[\"source_id\"]: pprint.pprint(args.metadata) if args.metadata[file_id][\"taxon_id\"]: pdbMeta2[\"taxon_id\"] = args.metadata[file_id][\"taxon_id\"]", "relative to root_dir? tmp_dir = args.root_dir + \"/\" + args.config[\"arguments\"][\"project\"] pdb_file = tmp_dir", "--calls %s --genome_file %s --range %s --seq_output %s --nucs_output %s --margin 4\" %", "file_path # Absolute path. Should be better relative to root_dir? tmp_dir = args.root_dir", "@staticmethod def writeable_file(f): if not os.path.isfile(f): d = os.path.dirname(f) # TODO Fails if", "with tool.json trajMeta[\"name\"] = \"chromatin_trajectory\" # Set file_path # Absolute path. Should be", "type=Mugparams.check_json, metavar=\"CONFIG_JSON\", help=\"JSON file containing workflow parameters\") parser.add_argument(\"--root_dir\", required=True, type=Mugparams.readable_dir, metavar=\"ABS_PATH\", help=\"Absolute path", "args.config[\"input_files\"][\"nuclPos\"] nucl_pos = args.root_dir + \"/\" + args.metadata[nucl_pos_file_id][\"file_path\"] tmp_dir = \"{0}/{1}/tra_{2}\".format(args.root_dir, args.config[\"arguments\"][\"project\"], x_rnd)", "= logging.getLogger(\"lg\") if (args.out_metadata): # Create out_metadata JSON json_data = {} json_data['output_files']= []", "for (idx, d) in enumerate(args.config[\"arguments\"]) if d[\"name\"] == \"project\") out_dir = args.root_dir+\"/\"+args.config[\"arguments\"][proj_idx][\"value\"] logger.info(\"Output", "\"do Trajectory\" sequence_file_id = args.config[\"input_files\"][\"sequence\"] sequence = args.root_dir + \"/\" + args.metadata[sequence_file_id][\"file_path\"] nucl_pos_file_id", "bashCommand = \"cd /home/MuG/MuG_Chromatin_equ_structure/src_test; bash run.sh %s %s %s\" % (tmp_dir +\"/nucleR_to_3D_nucl_pos.txt\", tmp_dir", "+\"/nucleR_to_3D_seq.txt\", tmp_dir) print bashCommand process = subprocess.Popen(bashCommand,stdout=subprocess.PIPE, shell=True) proc_stdout = process.communicate()[0].strip() print proc_stdout", "the output files def prepare_results(args, x_rnd): global out_dir logger = logging.getLogger(\"lg\") if (args.out_metadata):", "args.config[\"arguments\"][\"project\"] pdb_file = tmp_dir + \"/chromdyn_start_str.pdb\" pdbMeta1[\"file_path\"] = pdb_file # Set source_id &", "file containing MuG metadata files\") parser.add_argument(\"--out_metadata\", required=False, type=Mugparams.writeable_file, metavar=\"RESULTS_JSON\", help=\"JSON file containing results", "args.config['input_files']: result[\"source_id\"].append(args.config['input_files'][\"sequence\"]) result[\"file_path\"] = out_tar result[\"taxon_id\"] = 0 json_data['output_files'].append(result) # Write down output", "if args.metadata[file_id][\"taxon_id\"]: pdbMeta[\"taxon_id\"] = args.metadata[file_id][\"taxon_id\"] break # Append output_file metadata into JSON data", "logger.info (\"Packing %s into statistics TAR\" % os.path.basename(fil)) tar.add(fil, arcname=os.path.basename(fil)) tar.close() # Set", "taxon_id pdbMeta2[\"source_id\"] = [] if \"sequence\" in args.config['input_files']: pdbMeta2[\"source_id\"].append(args.config['input_files'][\"sequence\"]) # Set taxon_id. taxon_id", "from the input file (i.e the source_id) trajMeta[\"taxon_id\"] = 0 if trajMeta[\"source_id\"]: for", "logging logger = logging.getLogger(\"lg\") logger.setLevel(logging.INFO) formatter = logging.Formatter(fmt='%(asctime)s - %(module)s - %(levelname)s -", "process.communicate()[0].strip() print proc_stdout def main(): # Start logging logger = logging.getLogger(\"lg\") logger.setLevel(logging.INFO) formatter", "process = subprocess.Popen(bashCommand,stdout=subprocess.PIPE, shell=True) proc_stdout = process.communicate()[0].strip() print proc_stdout usr_dir = args.root_dir +", "\"/chromdyn_str.dcd\" trajMeta[\"file_path\"] = traj_file # Set source_id & taxon_id trajMeta[\"source_id\"] = [] if", "main(): # Start logging logger = logging.getLogger(\"lg\") logger.setLevel(logging.INFO) formatter = logging.Formatter(fmt='%(asctime)s - %(module)s", "CMD parser = argparse.ArgumentParser(prog=\"chromatindyn_wf\", description=\"Chromatin Dynamics workflow\") parser.add_argument(\"--config\", required=True, type=Mugparams.check_json, metavar=\"CONFIG_JSON\", help=\"JSON file", "__file__) # Parse CMD parser = argparse.ArgumentParser(prog=\"chromatindyn_wf\", description=\"Chromatin Dynamics workflow\") parser.add_argument(\"--config\", required=True, type=Mugparams.check_json,", "print \"do Trajectory\" sequence_file_id = args.config[\"input_files\"][\"sequence\"] sequence = args.root_dir + \"/\" + args.metadata[sequence_file_id][\"file_path\"]", "dir\".format(d)) @staticmethod def writeable_file(f): if not os.path.isfile(f): d = os.path.dirname(f) # TODO Fails", "file_id in trajMeta[\"source_id\"]: pprint.pprint(args.metadata) if args.metadata[file_id][\"taxon_id\"]: trajMeta[\"taxon_id\"] = args.metadata[file_id][\"taxon_id\"] break # Append output_file", "Print host info num_cores = multiprocessing.cpu_count() host = socket.gethostname() #mem = psutil.virtual_memory() logger.debug('HOST=%s", "= {} # Set name. Should coincide with tool.json pdbMeta2[\"name\"] = \"PDB_dummy_chromatin_structure\" #", "sequence = args.root_dir + \"/\" + args.metadata[sequence_file_id][\"file_path\"] nucl_pos_file_id = args.config[\"input_files\"][\"nuclPos\"] nucl_pos = args.root_dir", "process.communicate()[0].strip() print proc_stdout if \"create3DfromNucleaR\" in args.config[\"arguments\"][\"operations\"]: tmp_dir = \"{0}/{1}/str_{2}\".format(args.root_dir, args.config[\"arguments\"][\"project\"], x_rnd) bashCommand", "argparse.ArgumentParser(prog=\"chromatindyn_wf\", description=\"Chromatin Dynamics workflow\") parser.add_argument(\"--config\", required=True, type=Mugparams.check_json, metavar=\"CONFIG_JSON\", help=\"JSON file containing workflow parameters\")", "raise Exception(\"readable_dir:{0} is not a directory path or is not accessible\".format(d)) if os.access(d,", "= args.root_dir + \"/\" + args.config[\"arguments\"][\"project\"] traj_file = tmp_dir + \"/chromdyn_str.dcd\" trajMeta[\"file_path\"] =", "%s --margin 4\" % (gff_file, genome_file, gen_reg, tmp_dir +\"/nucleR_to_3D_seq.txt\", tmp_dir +\"/nucleR_to_3D_nucl_pos.txt\") print bashCommand", "chromatin_dummy_trajectory_structure pdbMeta2 = {} # Set name. Should coincide with tool.json pdbMeta2[\"name\"] =", "@staticmethod def readable_dir(d): if not os.path.isdir(d): raise Exception(\"readable_dir:{0} is not a directory path", "input file (i.e the source_id) pdbMeta1[\"taxon_id\"] = 0 if pdbMeta1[\"source_id\"]: for file_id in", "import multiprocessing #import psutil # available memory import subprocess import shutil import glob", "logging.getLogger(\"lg\") logger.setLevel(logging.INFO) formatter = logging.Formatter(fmt='%(asctime)s - %(module)s - %(levelname)s - %(message)s') handler =", "file (i.e the source_id) pdbMeta[\"taxon_id\"] = 0 if pdbMeta[\"source_id\"]: for file_id in pdbMeta[\"source_id\"]:", "metadata into JSON data json_data['output_files'].append(trajMeta) # Prepare last output file: TAR of outputs,", "tmp_dir = args.root_dir + \"/\" + args.config[\"arguments\"][\"project\"] pdb_file = tmp_dir + \"/chromdyn_str.pdb\" pdbMeta[\"file_path\"]", "Indexing metadata files by file_id ([_id]) metadata_by_id = dict((d[\"_id\"], dict(d)) for (index, d)", "try: data = json.load(file_data) except ValueError, e: logger.exception(\"%s in not a valid json", "if relative path given if not os.path.isdir(d): raise Exception(\"writeable_file:{0} not in a existing", "+ args.config[\"arguments\"][\"project\"] out_tar = tmp_dir + \"/results.tar.gz\" tar = tarfile.open(out_tar, \"w:gz\") for fil", "%s --seq_output %s --nucs_output %s --margin 4\" % (gff_file, genome_file, gen_reg, tmp_dir +\"/nucleR_to_3D_seq.txt\",", "containing workflow parameters\") parser.add_argument(\"--root_dir\", required=True, type=Mugparams.readable_dir, metavar=\"ABS_PATH\", help=\"Absolute path of the user data", "if type(inputs_by_name[name] is str): prev = inputs_by_name[name] inputs_by_name[name]= list() inputs_by_name[name].append(prev) inputs_by_name[name].append(d[\"value\"]) else: inputs_by_name[name]=d[\"value\"]", "print bashCommand process = subprocess.Popen(bashCommand,stdout=subprocess.PIPE, shell=True) proc_stdout = process.communicate()[0].strip() print proc_stdout bashCommand =", "files def prepare_results(args, x_rnd): global out_dir logger = logging.getLogger(\"lg\") if (args.out_metadata): # Create", "+\"/nucleR_to_3D_seq.txt\", tmp_dir +\"/nucleR_to_3D_nucl_pos.txt\") print bashCommand process = subprocess.Popen(bashCommand,stdout=subprocess.PIPE, shell=True) proc_stdout = process.communicate()[0].strip() print", "help=\"Absolute path of the MuG public directory (with reference genome data, etc).\") parser.add_argument(\"--metadata\",", "Exception(\"readable_dir:{0} is not a readable dir\".format(d)) @staticmethod def writeable_file(f): if not os.path.isfile(f): d", "in args.config[\"arguments\"][\"operations\"]: out_dirs.append(\"{0}/{1}/str_{2}/output\".format(args.root_dir, args.config[\"arguments\"][\"project\"], x_rnd)) if \"createTrajectory\" in args.config[\"arguments\"][\"operations\"]: out_dirs.append(\"{0}/{1}/tra_{2}/output\".format(args.root_dir, args.config[\"arguments\"][\"project\"], x_rnd)) if", "= \"{0}/refGenomes/{1}/{1}.fa\".format(args.public_dir,assembly) bashCommand = \" /home/MuG/MuG_Chromatin_equ_structure/src_test/nucleR2structure.py --calls %s --genome_file %s --range %s --seq_output", "print bashCommand process = subprocess.Popen(bashCommand,stdout=subprocess.PIPE, shell=True) proc_stdout = process.communicate()[0].strip() print proc_stdout return 1", "%s\" % (nucl_pos, sequence, iterations, tmp_dir) print bashCommand process = subprocess.Popen(bashCommand,stdout=subprocess.PIPE, shell=True) proc_stdout", "for out_dir in out_dirs: for extension in extensions: files.extend(glob.glob(out_dir+\"/\"+extension)) tmp_dir = args.root_dir +", "= subprocess.Popen(bashCommand,stdout=subprocess.PIPE, shell=True) proc_stdout = process.communicate()[0].strip() print proc_stdout if \"createTrajectory\" in args.config[\"arguments\"][\"operations\"]: tmp_dir", "args.config[\"arguments\"][\"operations\"]: ### chromatin_starting_trajectory_structure pdbMeta1 = {} # Set name. Should coincide with tool.json", "path of the user data directory.\") parser.add_argument(\"--public_dir\", required=False, type=Mugparams.readable_dir, metavar=\"PUBLIC_PATH\", help=\"Absolute path of", "tmp_dir +\"/nucleR_to_3D_seq.txt\", tmp_dir +\"/nucleR_to_3D_nucl_pos.txt\") print bashCommand process = subprocess.Popen(bashCommand,stdout=subprocess.PIPE, shell=True) proc_stdout = process.communicate()[0].strip()", "# Delete temporary directory if \"createStructure\" in args.config[\"arguments\"][\"operations\"]: tmp_dir = \"{0}/{1}/str_{2}\".format(args.root_dir, args.config[\"arguments\"][\"project\"], x_rnd)", "x_rnd = int(random.random()*10000000) outfiles = run_pipeline(args, num_cores, x_rnd) # Results prepare_results(args, x_rnd) if", "the source_id) pdbMeta2[\"taxon_id\"] = 0 if pdbMeta2[\"source_id\"]: for file_id in pdbMeta2[\"source_id\"]: pprint.pprint(args.metadata) if", "CPUs=%s MEM=x' %(host,num_cores)) # Run pipeline x_rnd = int(random.random()*10000000) outfiles = run_pipeline(args, num_cores,", "%(levelname)s - %(message)s') handler = logging.FileHandler('%s.log' % os.path.splitext(os.path.basename(__file__))[0]) handler.setLevel(logging.INFO) handler.setFormatter(formatter) logger.addHandler(handler) streamhandler =", "logger.setLevel(logging.DEBUG) handler.setLevel(logging.DEBUG) handler.setLevel(logging.DEBUG) logger.addHandler(handler) streamhandler.setLevel(logging.DEBUG) logger.addHandler(streamhandler) logger.debug(\"Verbose mode on\") # Parse config Mugparams.process_arguments(args)", "etc).\") parser.add_argument(\"--metadata\", required=True, type=Mugparams.check_json, metavar=\"METADATA_JSON\", help=\"JSON file containing MuG metadata files\") parser.add_argument(\"--out_metadata\", required=False,", "%s --genome_file %s --range %s --seq_output %s --nucs_output %s --margin 4\" % (gff_file,", "out_dir=\"\" class Mugparams(object): @staticmethod def check_json(json_file): logger = logging.getLogger(\"lg\") if not os.path.exists(json_file): raise", "bashCommand process = subprocess.Popen(bashCommand,stdout=subprocess.PIPE, shell=True) proc_stdout = process.communicate()[0].strip() print proc_stdout bashCommand = \"cd", "import os import sys import argparse import json import time import socket #", "if \"sequence\" in args.config['input_files']: pdbMeta[\"source_id\"].append(args.config['input_files'][\"sequence\"]) # Set taxon_id # taxon_id is inherited from", "logger.exception(\"%s in not a valid json file.\" % json_file) return data @staticmethod def", "def readable_dir(d): if not os.path.isdir(d): raise Exception(\"readable_dir:{0} is not a directory path or", "= subprocess.Popen(bashCommand,stdout=subprocess.PIPE, shell=True) proc_stdout = process.communicate()[0].strip() print proc_stdout def main(): # Start logging", "annotated into %s\" % args.out_metadata) # Delete temporary directory if \"createStructure\" in args.config[\"arguments\"][\"operations\"]:", "file (i.e the source_id) trajMeta[\"taxon_id\"] = 0 if trajMeta[\"source_id\"]: for file_id in trajMeta[\"source_id\"]:", "logging.getLogger(\"lg\") if not os.path.exists(json_file): raise argparse.ArgumentTypeError(\"%s does not exist\" % json_file) with open(json_file,'r')", "tmp_dir = args.root_dir + \"/\" + args.config[\"arguments\"][\"project\"] pdb_file = tmp_dir + \"/chromdyn_dummy_str.pdb\" pdbMeta1[\"file_path\"]", "help=\"JSON file containing MuG metadata files\") parser.add_argument(\"--out_metadata\", required=False, type=Mugparams.writeable_file, metavar=\"RESULTS_JSON\", help=\"JSON file containing", "nucl_pos = args.root_dir + \"/\" + args.metadata[nucl_pos_file_id][\"file_path\"] tmp_dir = \"{0}/{1}/str_{2}\".format(args.root_dir, args.config[\"arguments\"][\"project\"], x_rnd) bashCommand", "\"{0}/refGenomes/{1}/{1}.fa\".format(args.public_dir,assembly) bashCommand = \" /home/MuG/MuG_Chromatin_equ_structure/src_test/nucleR2structure.py --calls %s --genome_file %s --range %s --seq_output %s", "{} # Set name # Should coincide with tool.json pdbMeta[\"name\"] = \"PDB_chromatin_structure\" #", "nucl_pos_file_id = args.config[\"input_files\"][\"nuclPos\"] nucl_pos = args.root_dir + \"/\" + args.metadata[nucl_pos_file_id][\"file_path\"] tmp_dir = \"{0}/{1}/tra_{2}\".format(args.root_dir,", "else: raise Exception(\"writeable_file:{0} is not a writeable dir\".format(d)) else: return f @staticmethod def", "[] if \"sequence\" in args.config['input_files']: pdbMeta1[\"source_id\"].append(args.config['input_files'][\"sequence\"]) # Set taxon_id. taxon_id is inherited from", "process.communicate()[0].strip() print proc_stdout if \"createTrajectory\" in args.config[\"arguments\"][\"operations\"]: tmp_dir = \"{0}/{1}/tra_{2}\".format(args.root_dir, args.config[\"arguments\"][\"project\"], x_rnd) bashCommand", "(idx, d) in enumerate(args.config[\"arguments\"]) if d[\"name\"] == \"project\") out_dir = args.root_dir+\"/\"+args.config[\"arguments\"][proj_idx][\"value\"] logger.info(\"Output file", "root_dir? tmp_dir = args.root_dir + \"/\" + args.config[\"arguments\"][\"project\"] traj_file = tmp_dir + \"/chromdyn_str.dcd\"", "args = parser.parse_args() if args.verbose: logger.setLevel(logging.DEBUG) handler.setLevel(logging.DEBUG) handler.setLevel(logging.DEBUG) logger.addHandler(handler) streamhandler.setLevel(logging.DEBUG) logger.addHandler(streamhandler) logger.debug(\"Verbose mode", "parser.add_argument(\"--config\", required=True, type=Mugparams.check_json, metavar=\"CONFIG_JSON\", help=\"JSON file containing workflow parameters\") parser.add_argument(\"--root_dir\", required=True, type=Mugparams.readable_dir, metavar=\"ABS_PATH\",", "J.close logger.info(\"Output files annotated into %s\" % args.out_metadata) # Delete temporary directory if", "bashCommand process = subprocess.Popen(bashCommand,stdout=subprocess.PIPE, shell=True) proc_stdout = process.communicate()[0].strip() print proc_stdout if \"create3DfromNucleaR\" in", "[] if \"sequence\" in args.config['input_files']: pdbMeta2[\"source_id\"].append(args.config['input_files'][\"sequence\"]) # Set taxon_id. taxon_id is inherited from", "= {} result[\"name\"] = \"summary\" result[\"source_id\"] = [] if \"sequence\" in args.config['input_files']: result[\"source_id\"].append(args.config['input_files'][\"sequence\"])", "# Set source_id & taxon_id trajMeta[\"source_id\"] = [] if \"sequence\" in args.config['input_files']: trajMeta[\"source_id\"].append(args.config['input_files'][\"sequence\"])", "assembly = args.metadata[gff_file_id][\"meta_data\"][\"assembly\"] genome_file = \"{0}/refGenomes/{1}/{1}.fa\".format(args.public_dir,assembly) bashCommand = \" /home/MuG/MuG_Chromatin_equ_structure/src_test/nucleR2structure.py --calls %s --genome_file", "= args.root_dir + \"/\" + args.metadata[sequence_file_id][\"file_path\"] nucl_pos_file_id = args.config[\"input_files\"][\"nuclPos\"] nucl_pos = args.root_dir +", "in args.config[\"arguments\"][\"operations\"]: ### chromatin_starting_trajectory_structure pdbMeta1 = {} # Set name. Should coincide with", "Mugparams.process_arguments(args) Mugparams.process_metadata(args) # Print host info num_cores = multiprocessing.cpu_count() host = socket.gethostname() #mem", "in args.config[\"arguments\"][\"operations\"]: print \"do Trajectory\" sequence_file_id = args.config[\"input_files\"][\"sequence\"] sequence = args.root_dir + \"/\"", "data, etc).\") parser.add_argument(\"--metadata\", required=True, type=Mugparams.check_json, metavar=\"METADATA_JSON\", help=\"JSON file containing MuG metadata files\") parser.add_argument(\"--out_metadata\",", "\"project\") out_dir = args.root_dir+\"/\"+args.config[\"arguments\"][proj_idx][\"value\"] logger.info(\"Output file directory set to %s\" % out_dir) #", "Set metadata required for TAR output file result = {} result[\"name\"] = \"summary\"", "print bashCommand process = subprocess.Popen(bashCommand,stdout=subprocess.PIPE, shell=True) proc_stdout = process.communicate()[0].strip() print proc_stdout if \"create3DfromNucleaR\"", "directory (with reference genome data, etc).\") parser.add_argument(\"--metadata\", required=True, type=Mugparams.check_json, metavar=\"METADATA_JSON\", help=\"JSON file containing", "pdbMeta[\"taxon_id\"] = 0 if pdbMeta[\"source_id\"]: for file_id in pdbMeta[\"source_id\"]: pprint.pprint(args.metadata) if args.metadata[file_id][\"taxon_id\"]: pdbMeta[\"taxon_id\"]", "root_dir? tmp_dir = args.root_dir + \"/\" + args.config[\"arguments\"][\"project\"] pdb_file = tmp_dir + \"/chromdyn_dummy_str.pdb\"", "file metadata J = open(args.out_metadata, 'wb') json.dump(json_data,J, indent=4) J.close logger.info(\"Output files annotated into", "parser.parse_args() if args.verbose: logger.setLevel(logging.DEBUG) handler.setLevel(logging.DEBUG) handler.setLevel(logging.DEBUG) logger.addHandler(handler) streamhandler.setLevel(logging.DEBUG) logger.addHandler(streamhandler) logger.debug(\"Verbose mode on\") #", "not a directory path or is not accessible\".format(d)) if os.access(d, os.R_OK): return d", "proc_stdout usr_dir = args.root_dir + \"/\" + args.config[\"arguments\"][\"project\"] bashCommand = \"cp %s/output/chromdyn_str.pdb %s\"", "# Set source_id & taxon_id pdbMeta2[\"source_id\"] = [] if \"sequence\" in args.config['input_files']: pdbMeta2[\"source_id\"].append(args.config['input_files'][\"sequence\"])", "in a subprocess def run_pipeline(args, num_cores, x_rnd): sort = args.config[\"input_files\"][\"sequence\"] sequence = args.root_dir", "(i.e the source_id) pdbMeta2[\"taxon_id\"] = 0 if pdbMeta2[\"source_id\"]: for file_id in pdbMeta2[\"source_id\"]: pprint.pprint(args.metadata)", "bashCommand process = subprocess.Popen(bashCommand,stdout=subprocess.PIPE, shell=True) proc_stdout = process.communicate()[0].strip() print proc_stdout usr_dir = args.root_dir", "= args.config[\"input_files\"][\"nuclPos\"] nucl_pos = args.root_dir + \"/\" + args.metadata[nucl_pos_file_id][\"file_path\"] tmp_dir = \"{0}/{1}/str_{2}\".format(args.root_dir, args.config[\"arguments\"][\"project\"],", "metavar=\"PUBLIC_PATH\", help=\"Absolute path of the MuG public directory (with reference genome data, etc).\")", "Set taxon_id. taxon_id is inherited from the input file (i.e the source_id) pdbMeta1[\"taxon_id\"]", "4\" % (gff_file, genome_file, gen_reg, tmp_dir +\"/nucleR_to_3D_seq.txt\", tmp_dir +\"/nucleR_to_3D_nucl_pos.txt\") print bashCommand process =", "parser.add_argument(\"--metadata\", required=True, type=Mugparams.check_json, metavar=\"METADATA_JSON\", help=\"JSON file containing MuG metadata files\") parser.add_argument(\"--out_metadata\", required=False, type=Mugparams.writeable_file,", "TODO Fails if relative path given if not os.path.isdir(d): raise Exception(\"writeable_file:{0} not in", "\"/\" + args.metadata[nucl_pos_file_id][\"file_path\"] tmp_dir = \"{0}/{1}/tra_{2}\".format(args.root_dir, args.config[\"arguments\"][\"project\"], x_rnd) iterations = args.config[\"arguments\"][\"createTrajectory:numStruct\"] bashCommand =", "= process.communicate()[0].strip() print proc_stdout if \"createTrajectory\" in args.config[\"arguments\"][\"operations\"]: tmp_dir = \"{0}/{1}/tra_{2}\".format(args.root_dir, args.config[\"arguments\"][\"project\"], x_rnd)", "logger.debug(\"Configuration file arguments and input_files are:\\n %s \" % pprint.pformat(args.config)) return 1 @staticmethod", "\"create3DfromNucleaR\" in args.config[\"arguments\"][\"operations\"]: out_dirs.append(\"{0}/{1}/str_{2}/output\".format(args.root_dir, args.config[\"arguments\"][\"project\"], x_rnd)) for out_dir in out_dirs: for extension in", "x_rnd)) if \"createTrajectory\" in args.config[\"arguments\"][\"operations\"]: out_dirs.append(\"{0}/{1}/tra_{2}/output\".format(args.root_dir, args.config[\"arguments\"][\"project\"], x_rnd)) if \"create3DfromNucleaR\" in args.config[\"arguments\"][\"operations\"]: out_dirs.append(\"{0}/{1}/str_{2}/output\".format(args.root_dir,", "metadata into JSON data json_data['output_files'].append(pdbMeta1) ### chromatin_dummy_trajectory_structure pdbMeta2 = {} # Set name.", "args.config[\"input_files\"][\"nuclPos\"] nucl_pos = args.root_dir + \"/\" + args.metadata[nucl_pos_file_id][\"file_path\"] tmp_dir = \"{0}/{1}/str_{2}\".format(args.root_dir, args.config[\"arguments\"][\"project\"], x_rnd)", "args.config[\"arguments\"][\"project\"] bashCommand = \"cp %s/output/chromdyn_start_str.pdb %s/output/chromdyn_str.dcd %s/output/chromdyn_dummy_str.pdb %s\" % (tmp_dir, tmp_dir, tmp_dir, usr_dir)", "json_data['output_files'].append(result) # Write down output file metadata J = open(args.out_metadata, 'wb') json.dump(json_data,J, indent=4)", "e: logger.exception(\"%s in not a valid json file.\" % json_file) return data @staticmethod", "+ \"/chromdyn_start_str.pdb\" pdbMeta1[\"file_path\"] = pdb_file # Set source_id & taxon_id pdbMeta1[\"source_id\"] = []", "x_rnd) bashCommand = \"rm -r %s\" % tmp_dir print bashCommand process = subprocess.Popen(bashCommand,stdout=subprocess.PIPE,", "in files: logger.info (\"Packing %s into statistics TAR\" % os.path.basename(fil)) tar.add(fil, arcname=os.path.basename(fil)) tar.close()", "\"/chromdyn_start_str.pdb\" pdbMeta1[\"file_path\"] = pdb_file # Set source_id & taxon_id pdbMeta1[\"source_id\"] = [] if", "0 json_data['output_files'].append(result) # Write down output file metadata J = open(args.out_metadata, 'wb') json.dump(json_data,J,", "type=Mugparams.readable_dir, metavar=\"PUBLIC_PATH\", help=\"Absolute path of the MuG public directory (with reference genome data,", "config input_files by name (name could not be unique - because of allow_multiple)", "bashCommand = \"cd /home/MuG/MuG_Chromatin_equ_structure/src_test; bash run.sh %s %s %s\" % (nucl_pos, sequence, tmp_dir)", "+ \"/\" + args.config[\"arguments\"][\"project\"] pdb_file = tmp_dir + \"/chromdyn_dummy_str.pdb\" pdbMeta1[\"file_path\"] = pdb_file #", "/home/MuG/MuG_Chromatin_equ_structure/src_test; bash run.sh %s %s %s\" % (nucl_pos, sequence, tmp_dir) print bashCommand process", "input file (i.e the source_id) pdbMeta[\"taxon_id\"] = 0 if pdbMeta[\"source_id\"]: for file_id in", "%s\" % (tmp_dir, usr_dir) print bashCommand process = subprocess.Popen(bashCommand,stdout=subprocess.PIPE, shell=True) proc_stdout = process.communicate()[0].strip()", "0.1') args = parser.parse_args() if args.verbose: logger.setLevel(logging.DEBUG) handler.setLevel(logging.DEBUG) handler.setLevel(logging.DEBUG) logger.addHandler(handler) streamhandler.setLevel(logging.DEBUG) logger.addHandler(streamhandler) logger.debug(\"Verbose", "in enumerate(args.config[\"input_files\"]): name = args.config[\"input_files\"][index][\"name\"] if name in inputs_by_name: pprint.pprint(inputs_by_name[name]) if type(inputs_by_name[name] is", "print \"do 3D from NucleaR\" gff_file_id = args.config[\"input_files\"][\"gffNucleaR\"] gff_file = args.root_dir + \"/\"", "not exist\" % json_file) with open(json_file,'r') as file_data: try: data = json.load(file_data) except", "+ args.config[\"arguments\"][\"project\"] bashCommand = \"cp %s/output/chromdyn_start_str.pdb %s/output/chromdyn_str.dcd %s/output/chromdyn_dummy_str.pdb %s\" % (tmp_dir, tmp_dir, tmp_dir,", "global out_dir logger = logging.getLogger(\"lg\") # Setting working directory (project) proj_idx = next(idx", "open(json_file,'r') as file_data: try: data = json.load(file_data) except ValueError, e: logger.exception(\"%s in not", "if \"create3DfromNucleaR\" in args.config[\"arguments\"][\"operations\"]: print \"do 3D from NucleaR\" gff_file_id = args.config[\"input_files\"][\"gffNucleaR\"] gff_file", "#import psutil # available memory import subprocess import shutil import glob import tarfile", "result[\"file_path\"] = out_tar result[\"taxon_id\"] = 0 json_data['output_files'].append(result) # Write down output file metadata", "# Run pipeline x_rnd = int(random.random()*10000000) outfiles = run_pipeline(args, num_cores, x_rnd) # Results", "in args.config[\"arguments\"][\"operations\"]: tmp_dir = \"{0}/{1}/str_{2}\".format(args.root_dir, args.config[\"arguments\"][\"project\"], x_rnd) bashCommand = \"rm -r %s\" %", "Append output_file metadata into JSON data json_data['output_files'].append(trajMeta) # Prepare last output file: TAR", "output file result = {} result[\"name\"] = \"summary\" result[\"source_id\"] = [] if \"sequence\"", "coincide with tool.json pdbMeta2[\"name\"] = \"PDB_dummy_chromatin_structure\" # Set file_path. Absolute path. Should be", "= inputs_by_name logger.debug(\"Configuration file arguments and input_files are:\\n %s \" % pprint.pformat(args.config)) return", "if args.verbose: logger.setLevel(logging.DEBUG) handler.setLevel(logging.DEBUG) handler.setLevel(logging.DEBUG) logger.addHandler(handler) streamhandler.setLevel(logging.DEBUG) logger.addHandler(streamhandler) logger.debug(\"Verbose mode on\") # Parse", "+ args.config[\"arguments\"][\"project\"] bashCommand = \"cp %s/output/chromdyn_str.pdb %s\" % (tmp_dir, usr_dir) print bashCommand process", "(tmp_dir, usr_dir) print bashCommand process = subprocess.Popen(bashCommand,stdout=subprocess.PIPE, shell=True) proc_stdout = process.communicate()[0].strip() print proc_stdout", "out_dir = args.root_dir+\"/\"+args.config[\"arguments\"][proj_idx][\"value\"] logger.info(\"Output file directory set to %s\" % out_dir) # Indexing", "= tmp_dir + \"/chromdyn_start_str.pdb\" pdbMeta1[\"file_path\"] = pdb_file # Set source_id & taxon_id pdbMeta1[\"source_id\"]", "x_rnd)) for out_dir in out_dirs: for extension in extensions: files.extend(glob.glob(out_dir+\"/\"+extension)) tmp_dir = args.root_dir", "proc_stdout def main(): # Start logging logger = logging.getLogger(\"lg\") logger.setLevel(logging.INFO) formatter = logging.Formatter(fmt='%(asctime)s", "pdb_file = tmp_dir + \"/chromdyn_str.pdb\" pdbMeta[\"file_path\"] = pdb_file # Set source_id & taxon_id", "logger.addHandler(streamhandler) logger.debug(\"Verbose mode on\") # Parse config Mugparams.process_arguments(args) Mugparams.process_metadata(args) # Print host info", "= [] extensions = ('*.txt','*.csv','*.png') out_dirs = [] if \"createStructure\" in args.config[\"arguments\"][\"operations\"]: out_dirs.append(\"{0}/{1}/str_{2}/output\".format(args.root_dir,", "--nucs_output %s --margin 4\" % (gff_file, genome_file, gen_reg, tmp_dir +\"/nucleR_to_3D_seq.txt\", tmp_dir +\"/nucleR_to_3D_nucl_pos.txt\") print", "to root_dir? tmp_dir = args.root_dir + \"/\" + args.config[\"arguments\"][\"project\"] pdb_file = tmp_dir +", "Exception(\"readable_dir:{0} is not a directory path or is not accessible\".format(d)) if os.access(d, os.R_OK):", "args.root_dir + \"/\" + args.metadata[nucl_pos_file_id][\"file_path\"] tmp_dir = \"{0}/{1}/tra_{2}\".format(args.root_dir, args.config[\"arguments\"][\"project\"], x_rnd) iterations = args.config[\"arguments\"][\"createTrajectory:numStruct\"]", "(tmp_dir +\"/nucleR_to_3D_nucl_pos.txt\", tmp_dir +\"/nucleR_to_3D_seq.txt\", tmp_dir) print bashCommand process = subprocess.Popen(bashCommand,stdout=subprocess.PIPE, shell=True) proc_stdout =", "return f @staticmethod def process_arguments(args): global out_dir logger = logging.getLogger(\"lg\") # Setting working", "% (tmp_dir, usr_dir) print bashCommand process = subprocess.Popen(bashCommand,stdout=subprocess.PIPE, shell=True) proc_stdout = process.communicate()[0].strip() print", "= logging.getLogger(\"lg\") # Indexing metadata files by file_id ([_id]) metadata_by_id = dict((d[\"_id\"], dict(d))", "- %(message)s') handler = logging.FileHandler('%s.log' % os.path.splitext(os.path.basename(__file__))[0]) handler.setLevel(logging.INFO) handler.setFormatter(formatter) logger.addHandler(handler) streamhandler = logging.StreamHandler()", "proc_stdout return 1 # # Prepare metadata for the output files def prepare_results(args,", "= args.config[\"input_files\"][\"nuclPos\"] nucl_pos = args.root_dir + \"/\" + args.metadata[nucl_pos_file_id][\"file_path\"] tmp_dir = \"{0}/{1}/tra_{2}\".format(args.root_dir, args.config[\"arguments\"][\"project\"],", "= [] if \"createStructure\" in args.config[\"arguments\"][\"operations\"]: out_dirs.append(\"{0}/{1}/str_{2}/output\".format(args.root_dir, args.config[\"arguments\"][\"project\"], x_rnd)) if \"createTrajectory\" in args.config[\"arguments\"][\"operations\"]:", "input_files by name (name could not be unique - because of allow_multiple) inputs_by_name", "traj_file # Set source_id & taxon_id trajMeta[\"source_id\"] = [] if \"sequence\" in args.config['input_files']:", "# Indexing metadata files by file_id ([_id]) metadata_by_id = dict((d[\"_id\"], dict(d)) for (index,", "= tmp_dir + \"/chromdyn_dummy_str.pdb\" pdbMeta1[\"file_path\"] = pdb_file # Set source_id & taxon_id pdbMeta2[\"source_id\"]", "pipeline x_rnd = int(random.random()*10000000) outfiles = run_pipeline(args, num_cores, x_rnd) # Results prepare_results(args, x_rnd)", "break # Append output_file metadata into JSON data json_data['output_files'].append(pdbMeta) if \"createTrajectory\" in args.config[\"arguments\"][\"operations\"]:", "logger.addHandler(handler) streamhandler.setLevel(logging.DEBUG) logger.addHandler(streamhandler) logger.debug(\"Verbose mode on\") # Parse config Mugparams.process_arguments(args) Mugparams.process_metadata(args) # Print", "= \"{0}/{1}/tra_{2}\".format(args.root_dir, args.config[\"arguments\"][\"project\"], x_rnd) iterations = args.config[\"arguments\"][\"createTrajectory:numStruct\"] bashCommand = \"cd /home/MuG/MuG_Chromatin_sampling/src_test; bash run.sh", "x_rnd): sort = args.config[\"input_files\"][\"sequence\"] sequence = args.root_dir + \"/\" + args.metadata[sequence_file_id][\"file_path\"] nucl_pos_file_id =", "version='%(prog)s 0.1') args = parser.parse_args() if args.verbose: logger.setLevel(logging.DEBUG) handler.setLevel(logging.DEBUG) handler.setLevel(logging.DEBUG) logger.addHandler(handler) streamhandler.setLevel(logging.DEBUG) logger.addHandler(streamhandler)", "logger = logging.getLogger(\"lg\") if not os.path.exists(json_file): raise argparse.ArgumentTypeError(\"%s does not exist\" % json_file)", "taxon_id. taxon_id is inherited from the input file (i.e the source_id) pdbMeta1[\"taxon_id\"] =", "= \" mkdir %s\" % tmp_dir print bashCommand process = subprocess.Popen(bashCommand,stdout=subprocess.PIPE, shell=True) proc_stdout", "%s --range %s --seq_output %s --nucs_output %s --margin 4\" % (gff_file, genome_file, gen_reg,", "import glob import tarfile import subprocess import random out_dir=\"\" class Mugparams(object): @staticmethod def", "# Create out_metadata JSON json_data = {} json_data['output_files']= [] if (\"createStructure\" in args.config[\"arguments\"][\"operations\"])", "if \"create3DfromNucleaR\" in args.config[\"arguments\"][\"operations\"]: out_dirs.append(\"{0}/{1}/str_{2}/output\".format(args.root_dir, args.config[\"arguments\"][\"project\"], x_rnd)) for out_dir in out_dirs: for extension", "gen_reg, tmp_dir +\"/nucleR_to_3D_seq.txt\", tmp_dir +\"/nucleR_to_3D_nucl_pos.txt\") print bashCommand process = subprocess.Popen(bashCommand,stdout=subprocess.PIPE, shell=True) proc_stdout =", "%(module)s - %(levelname)s - %(message)s') handler = logging.FileHandler('%s.log' % os.path.splitext(os.path.basename(__file__))[0]) handler.setLevel(logging.INFO) handler.setFormatter(formatter) logger.addHandler(handler)", "= 0 if trajMeta[\"source_id\"]: for file_id in trajMeta[\"source_id\"]: pprint.pprint(args.metadata) if args.metadata[file_id][\"taxon_id\"]: trajMeta[\"taxon_id\"] =", "= logging.getLogger(\"lg\") if not os.path.exists(json_file): raise argparse.ArgumentTypeError(\"%s does not exist\" % json_file) with", "= dict((d[\"name\"], d[\"value\"]) for (index, d) in enumerate(args.config[\"arguments\"])) args.config[\"arguments\"] = arguments_by_name # Indexing", "mkdir %s\" % tmp_dir print bashCommand process = subprocess.Popen(bashCommand,stdout=subprocess.PIPE, shell=True) proc_stdout = process.communicate()[0].strip()", "= subprocess.Popen(bashCommand,stdout=subprocess.PIPE, shell=True) proc_stdout = process.communicate()[0].strip() print proc_stdout if \"create3DfromNucleaR\" in args.config[\"arguments\"][\"operations\"]: tmp_dir", "\"{0}/{1}/tra_{2}\".format(args.root_dir, args.config[\"arguments\"][\"project\"], x_rnd) bashCommand = \"rm -r %s\" % tmp_dir print bashCommand process", "+ \"/\" + args.config[\"arguments\"][\"project\"] pdb_file = tmp_dir + \"/chromdyn_str.pdb\" pdbMeta[\"file_path\"] = pdb_file #", "Should coincide with tool.json pdbMeta[\"name\"] = \"PDB_chromatin_structure\" # Set file_path # Absolute path.", "return 1 # # Prepare metadata for the output files def prepare_results(args, x_rnd):", "directory (project) proj_idx = next(idx for (idx, d) in enumerate(args.config[\"arguments\"]) if d[\"name\"] ==", "= args.metadata[file_id][\"taxon_id\"] break # Append output_file metadata into JSON data json_data['output_files'].append(pdbMeta1) ### chromatin_dummy_trajectory_structure", "file_id in pdbMeta2[\"source_id\"]: pprint.pprint(args.metadata) if args.metadata[file_id][\"taxon_id\"]: pdbMeta2[\"taxon_id\"] = args.metadata[file_id][\"taxon_id\"] break # Append output_file", "= \"summary\" result[\"source_id\"] = [] if \"sequence\" in args.config['input_files']: result[\"source_id\"].append(args.config['input_files'][\"sequence\"]) result[\"file_path\"] = out_tar", "= args.config[\"arguments\"][\"create3DfromNucleaR:genRegion\"] tmp_dir = \"{0}/{1}/str_{2}\".format(args.root_dir, args.config[\"arguments\"][\"project\"], x_rnd) bashCommand = \" mkdir %s\" %", "gff_file_id = args.config[\"input_files\"][\"gffNucleaR\"] gff_file = args.root_dir + \"/\" + args.metadata[gff_file_id][\"file_path\"] gen_reg = args.config[\"arguments\"][\"create3DfromNucleaR:genRegion\"]", "= open(args.out_metadata, 'wb') json.dump(json_data,J, indent=4) J.close logger.info(\"Output files annotated into %s\" % args.out_metadata)", "TAR of outputs, *CSVs and *PNGs files = [] extensions = ('*.txt','*.csv','*.png') out_dirs", "*PNGs files = [] extensions = ('*.txt','*.csv','*.png') out_dirs = [] if \"createStructure\" in", "result[\"source_id\"] = [] if \"sequence\" in args.config['input_files']: result[\"source_id\"].append(args.config['input_files'][\"sequence\"]) result[\"file_path\"] = out_tar result[\"taxon_id\"] =", "args.config[\"arguments\"][\"operations\"]: tmp_dir = \"{0}/{1}/str_{2}\".format(args.root_dir, args.config[\"arguments\"][\"project\"], x_rnd) bashCommand = \"rm -r %s\" % tmp_dir", "output verbosity\") parser.add_argument('--version', action='version', version='%(prog)s 0.1') args = parser.parse_args() if args.verbose: logger.setLevel(logging.DEBUG) handler.setLevel(logging.DEBUG)", "(i.e the source_id) trajMeta[\"taxon_id\"] = 0 if trajMeta[\"source_id\"]: for file_id in trajMeta[\"source_id\"]: pprint.pprint(args.metadata)", "= args.metadata[file_id][\"taxon_id\"] break # Append output_file metadata into JSON data json_data['output_files'].append(trajMeta) # Prepare", "output files def prepare_results(args, x_rnd): global out_dir logger = logging.getLogger(\"lg\") if (args.out_metadata): #", "if not os.path.isfile(f): d = os.path.dirname(f) # TODO Fails if relative path given", "file arguments and input_files are:\\n %s \" % pprint.pformat(args.config)) return 1 @staticmethod def", "%s \" % pprint.pformat(args.config)) return 1 @staticmethod def process_metadata(args): global out_dir logger =", "args.root_dir + \"/\" + args.config[\"arguments\"][\"project\"] bashCommand = \"cp %s/output/chromdyn_start_str.pdb %s/output/chromdyn_str.dcd %s/output/chromdyn_dummy_str.pdb %s\" %", "result[\"taxon_id\"] = 0 json_data['output_files'].append(result) # Write down output file metadata J = open(args.out_metadata,", "not a valid json file.\" % json_file) return data @staticmethod def readable_dir(d): if", "if os.access(d, os.R_OK): return d else: raise Exception(\"readable_dir:{0} is not a readable dir\".format(d))", "not os.path.isdir(d): raise Exception(\"writeable_file:{0} not in a existing directory path, or not accessible\".format(d))", "Set taxon_id. taxon_id is inherited from the input file (i.e the source_id) trajMeta[\"taxon_id\"]", "pdbMeta2[\"taxon_id\"] = 0 if pdbMeta2[\"source_id\"]: for file_id in pdbMeta2[\"source_id\"]: pprint.pprint(args.metadata) if args.metadata[file_id][\"taxon_id\"]: pdbMeta2[\"taxon_id\"]", "multiprocessing.cpu_count() host = socket.gethostname() #mem = psutil.virtual_memory() logger.debug('HOST=%s CPUs=%s MEM=x' %(host,num_cores)) # Run", "chromatin_trajectory trajMeta = {} # Set name # Should coincide with tool.json trajMeta[\"name\"]", "info num_cores = multiprocessing.cpu_count() host = socket.gethostname() #mem = psutil.virtual_memory() logger.debug('HOST=%s CPUs=%s MEM=x'", "metadata for input_files is:\\n %s \" % pprint.pformat(args.metadata)) return 1 # # Executing", "/home/MuG/MuG_Chromatin_equ_structure/src_test; bash run.sh %s %s %s\" % (tmp_dir +\"/nucleR_to_3D_nucl_pos.txt\", tmp_dir +\"/nucleR_to_3D_seq.txt\", tmp_dir) print", "proc_stdout assembly = args.metadata[gff_file_id][\"meta_data\"][\"assembly\"] genome_file = \"{0}/refGenomes/{1}/{1}.fa\".format(args.public_dir,assembly) bashCommand = \" /home/MuG/MuG_Chromatin_equ_structure/src_test/nucleR2structure.py --calls %s", "open(args.out_metadata, 'wb') json.dump(json_data,J, indent=4) J.close logger.info(\"Output files annotated into %s\" % args.out_metadata) #", "verbosity\") parser.add_argument('--version', action='version', version='%(prog)s 0.1') args = parser.parse_args() if args.verbose: logger.setLevel(logging.DEBUG) handler.setLevel(logging.DEBUG) handler.setLevel(logging.DEBUG)", "if trajMeta[\"source_id\"]: for file_id in trajMeta[\"source_id\"]: pprint.pprint(args.metadata) if args.metadata[file_id][\"taxon_id\"]: trajMeta[\"taxon_id\"] = args.metadata[file_id][\"taxon_id\"] break", "\" % pprint.pformat(args.config)) return 1 @staticmethod def process_metadata(args): global out_dir logger = logging.getLogger(\"lg\")", "%s\" % (tmp_dir, tmp_dir, tmp_dir, usr_dir) print bashCommand process = subprocess.Popen(bashCommand,stdout=subprocess.PIPE, shell=True) proc_stdout", "and input_files are:\\n %s \" % pprint.pformat(args.config)) return 1 @staticmethod def process_metadata(args): global", "= subprocess.Popen(bashCommand,stdout=subprocess.PIPE, shell=True) proc_stdout = process.communicate()[0].strip() print proc_stdout return 1 # # Prepare", "coincide with tool.json trajMeta[\"name\"] = \"chromatin_trajectory\" # Set file_path # Absolute path. Should", "tmp_dir print bashCommand process = subprocess.Popen(bashCommand,stdout=subprocess.PIPE, shell=True) proc_stdout = process.communicate()[0].strip() print proc_stdout def", "# Set name # Should coincide with tool.json trajMeta[\"name\"] = \"chromatin_trajectory\" # Set", "tool.json pdbMeta2[\"name\"] = \"PDB_dummy_chromatin_structure\" # Set file_path. Absolute path. Should be better relative", "pdbMeta2[\"source_id\"] = [] if \"sequence\" in args.config['input_files']: pdbMeta2[\"source_id\"].append(args.config['input_files'][\"sequence\"]) # Set taxon_id. taxon_id is", "% json_file) with open(json_file,'r') as file_data: try: data = json.load(file_data) except ValueError, e:", "= process.communicate()[0].strip() print proc_stdout assembly = args.metadata[gff_file_id][\"meta_data\"][\"assembly\"] genome_file = \"{0}/refGenomes/{1}/{1}.fa\".format(args.public_dir,assembly) bashCommand = \"", "= [] if \"sequence\" in args.config['input_files']: result[\"source_id\"].append(args.config['input_files'][\"sequence\"]) result[\"file_path\"] = out_tar result[\"taxon_id\"] = 0", "d) in enumerate(args.metadata)) args.metadata = metadata_by_id logger.debug(\"VRE metadata for input_files is:\\n %s \"", "%(host,num_cores)) # Run pipeline x_rnd = int(random.random()*10000000) outfiles = run_pipeline(args, num_cores, x_rnd) #", "out_dir logger = logging.getLogger(\"lg\") if (args.out_metadata): # Create out_metadata JSON json_data = {}", "type(inputs_by_name[name] is str): prev = inputs_by_name[name] inputs_by_name[name]= list() inputs_by_name[name].append(prev) inputs_by_name[name].append(d[\"value\"]) else: inputs_by_name[name]=d[\"value\"] args.config[\"input_files\"]", "args.config[\"arguments\"][\"operations\"]: out_dirs.append(\"{0}/{1}/str_{2}/output\".format(args.root_dir, args.config[\"arguments\"][\"project\"], x_rnd)) if \"createTrajectory\" in args.config[\"arguments\"][\"operations\"]: out_dirs.append(\"{0}/{1}/tra_{2}/output\".format(args.root_dir, args.config[\"arguments\"][\"project\"], x_rnd)) if \"create3DfromNucleaR\"", "file containing workflow parameters\") parser.add_argument(\"--root_dir\", required=True, type=Mugparams.readable_dir, metavar=\"ABS_PATH\", help=\"Absolute path of the user", "metadata into JSON data json_data['output_files'].append(pdbMeta2) ### chromatin_trajectory trajMeta = {} # Set name", "source_id) pdbMeta[\"taxon_id\"] = 0 if pdbMeta[\"source_id\"]: for file_id in pdbMeta[\"source_id\"]: pprint.pprint(args.metadata) if args.metadata[file_id][\"taxon_id\"]:", "*CSVs and *PNGs files = [] extensions = ('*.txt','*.csv','*.png') out_dirs = [] if", "files\") parser.add_argument(\"--out_metadata\", required=False, type=Mugparams.writeable_file, metavar=\"RESULTS_JSON\", help=\"JSON file containing results metadata\") parser.add_argument(\"-v\", \"--verbose\", required=False,", "subprocess import random out_dir=\"\" class Mugparams(object): @staticmethod def check_json(json_file): logger = logging.getLogger(\"lg\") if", "in trajMeta[\"source_id\"]: pprint.pprint(args.metadata) if args.metadata[file_id][\"taxon_id\"]: trajMeta[\"taxon_id\"] = args.metadata[file_id][\"taxon_id\"] break # Append output_file metadata", "def check_json(json_file): logger = logging.getLogger(\"lg\") if not os.path.exists(json_file): raise argparse.ArgumentTypeError(\"%s does not exist\"", "1 # # Executing pipeline # Calling MuG_Chromatin_equ_structure and MuG_Chromatin_sampling software in a", "# Append output_file metadata into JSON data json_data['output_files'].append(pdbMeta) if \"createTrajectory\" in args.config[\"arguments\"][\"operations\"]: ###", "if \"sequence\" in args.config['input_files']: trajMeta[\"source_id\"].append(args.config['input_files'][\"sequence\"]) # Set taxon_id. taxon_id is inherited from the", "except ValueError, e: logger.exception(\"%s in not a valid json file.\" % json_file) return", "else: if os.access(d, os.W_OK): return f else: raise Exception(\"writeable_file:{0} is not a writeable", "import re import pprint import multiprocessing #import psutil # available memory import subprocess", "+ \"/\" + args.metadata[gff_file_id][\"file_path\"] gen_reg = args.config[\"arguments\"][\"create3DfromNucleaR:genRegion\"] tmp_dir = \"{0}/{1}/str_{2}\".format(args.root_dir, args.config[\"arguments\"][\"project\"], x_rnd) bashCommand", "in args.config['input_files']: trajMeta[\"source_id\"].append(args.config['input_files'][\"sequence\"]) # Set taxon_id. taxon_id is inherited from the input file", "= args.root_dir + \"/\" + args.config[\"arguments\"][\"project\"] pdb_file = tmp_dir + \"/chromdyn_dummy_str.pdb\" pdbMeta1[\"file_path\"] =", "pdbMeta2 = {} # Set name. Should coincide with tool.json pdbMeta2[\"name\"] = \"PDB_dummy_chromatin_structure\"", "+ \"/\" + args.config[\"arguments\"][\"project\"] traj_file = tmp_dir + \"/chromdyn_str.dcd\" trajMeta[\"file_path\"] = traj_file #", "is:\\n %s \" % pprint.pformat(args.metadata)) return 1 # # Executing pipeline # Calling", "= {} # Set name # Should coincide with tool.json trajMeta[\"name\"] = \"chromatin_trajectory\"", "= json.load(file_data) except ValueError, e: logger.exception(\"%s in not a valid json file.\" %", "metavar=\"RESULTS_JSON\", help=\"JSON file containing results metadata\") parser.add_argument(\"-v\", \"--verbose\", required=False, action=\"store_true\", help=\"increase output verbosity\")", "= args.root_dir + \"/\" + args.config[\"arguments\"][\"project\"] bashCommand = \"cp %s/output/chromdyn_str.pdb %s\" % (tmp_dir,", "# Calling MuG_Chromatin_equ_structure and MuG_Chromatin_sampling software in a subprocess def run_pipeline(args, num_cores, x_rnd):", "pdbMeta1[\"name\"] = \"PDB_chromatin_starting_structure\" # Set file_path. Absolute path. Should be better relative to", "Absolute path. Should be better relative to root_dir? tmp_dir = args.root_dir + \"/\"", "raise Exception(\"readable_dir:{0} is not a readable dir\".format(d)) @staticmethod def writeable_file(f): if not os.path.isfile(f):", "= pdb_file # Set source_id & taxon_id pdbMeta2[\"source_id\"] = [] if \"sequence\" in", "tmp_dir = \"{0}/{1}/tra_{2}\".format(args.root_dir, args.config[\"arguments\"][\"project\"], x_rnd) bashCommand = \"rm -r %s\" % tmp_dir print", "= args.config[\"input_files\"][index][\"name\"] if name in inputs_by_name: pprint.pprint(inputs_by_name[name]) if type(inputs_by_name[name] is str): prev =", "genome_file = \"{0}/refGenomes/{1}/{1}.fa\".format(args.public_dir,assembly) bashCommand = \" /home/MuG/MuG_Chromatin_equ_structure/src_test/nucleR2structure.py --calls %s --genome_file %s --range %s", "= args.metadata[file_id][\"taxon_id\"] break # Append output_file metadata into JSON data json_data['output_files'].append(pdbMeta2) ### chromatin_trajectory", "inputs_by_name: pprint.pprint(inputs_by_name[name]) if type(inputs_by_name[name] is str): prev = inputs_by_name[name] inputs_by_name[name]= list() inputs_by_name[name].append(prev) inputs_by_name[name].append(d[\"value\"])", "= parser.parse_args() if args.verbose: logger.setLevel(logging.DEBUG) handler.setLevel(logging.DEBUG) handler.setLevel(logging.DEBUG) logger.addHandler(handler) streamhandler.setLevel(logging.DEBUG) logger.addHandler(streamhandler) logger.debug(\"Verbose mode on\")", "genome data, etc).\") parser.add_argument(\"--metadata\", required=True, type=Mugparams.check_json, metavar=\"METADATA_JSON\", help=\"JSON file containing MuG metadata files\")", "localhost import logging import re import pprint import multiprocessing #import psutil # available", "iterations, tmp_dir) print bashCommand process = subprocess.Popen(bashCommand,stdout=subprocess.PIPE, shell=True) proc_stdout = process.communicate()[0].strip() print proc_stdout", "Exception(\"writeable_file:{0} is not a writeable dir\".format(d)) else: return f @staticmethod def process_arguments(args): global", "0 if pdbMeta1[\"source_id\"]: for file_id in pdbMeta1[\"source_id\"]: pprint.pprint(args.metadata) if args.metadata[file_id][\"taxon_id\"]: pdbMeta1[\"taxon_id\"] = args.metadata[file_id][\"taxon_id\"]", "proc_stdout if \"createTrajectory\" in args.config[\"arguments\"][\"operations\"]: print \"do Trajectory\" sequence_file_id = args.config[\"input_files\"][\"sequence\"] sequence =", "= logging.StreamHandler() streamhandler.setLevel(logging.INFO) streamhandler.setFormatter(formatter) logger.addHandler(streamhandler) logger.info('Starting %s' % __file__) # Parse CMD parser", "is inherited from the input file (i.e the source_id) pdbMeta2[\"taxon_id\"] = 0 if", "source_id) trajMeta[\"taxon_id\"] = 0 if trajMeta[\"source_id\"]: for file_id in trajMeta[\"source_id\"]: pprint.pprint(args.metadata) if args.metadata[file_id][\"taxon_id\"]:", "trajMeta[\"source_id\"].append(args.config['input_files'][\"sequence\"]) # Set taxon_id. taxon_id is inherited from the input file (i.e the", "%s/output/chromdyn_start_str.pdb %s/output/chromdyn_str.dcd %s/output/chromdyn_dummy_str.pdb %s\" % (tmp_dir, tmp_dir, tmp_dir, usr_dir) print bashCommand process =", "file_id in pdbMeta1[\"source_id\"]: pprint.pprint(args.metadata) if args.metadata[file_id][\"taxon_id\"]: pdbMeta1[\"taxon_id\"] = args.metadata[file_id][\"taxon_id\"] break # Append output_file", "+ args.metadata[nucl_pos_file_id][\"file_path\"] tmp_dir = \"{0}/{1}/str_{2}\".format(args.root_dir, args.config[\"arguments\"][\"project\"], x_rnd) bashCommand = \"cd /home/MuG/MuG_Chromatin_equ_structure/src_test; bash run.sh", "\"PDB_chromatin_structure\" # Set file_path # Absolute path. Should be better relative to root_dir?", "Mugparams(object): @staticmethod def check_json(json_file): logger = logging.getLogger(\"lg\") if not os.path.exists(json_file): raise argparse.ArgumentTypeError(\"%s does", "= process.communicate()[0].strip() print proc_stdout if \"create3DfromNucleaR\" in args.config[\"arguments\"][\"operations\"]: tmp_dir = \"{0}/{1}/str_{2}\".format(args.root_dir, args.config[\"arguments\"][\"project\"], x_rnd)", "for file_id in pdbMeta[\"source_id\"]: pprint.pprint(args.metadata) if args.metadata[file_id][\"taxon_id\"]: pdbMeta[\"taxon_id\"] = args.metadata[file_id][\"taxon_id\"] break # Append", "= \"cd /home/MuG/MuG_Chromatin_equ_structure/src_test; bash run.sh %s %s %s\" % (nucl_pos, sequence, tmp_dir) print", "json file.\" % json_file) return data @staticmethod def readable_dir(d): if not os.path.isdir(d): raise", "trajMeta[\"source_id\"] = [] if \"sequence\" in args.config['input_files']: trajMeta[\"source_id\"].append(args.config['input_files'][\"sequence\"]) # Set taxon_id. taxon_id is", "= logging.getLogger(\"lg\") logger.setLevel(logging.INFO) formatter = logging.Formatter(fmt='%(asctime)s - %(module)s - %(levelname)s - %(message)s') handler", "in args.config['input_files']: pdbMeta2[\"source_id\"].append(args.config['input_files'][\"sequence\"]) # Set taxon_id. taxon_id is inherited from the input file", "for file_id in trajMeta[\"source_id\"]: pprint.pprint(args.metadata) if args.metadata[file_id][\"taxon_id\"]: trajMeta[\"taxon_id\"] = args.metadata[file_id][\"taxon_id\"] break # Append", "gen_reg = args.config[\"arguments\"][\"create3DfromNucleaR:genRegion\"] tmp_dir = \"{0}/{1}/str_{2}\".format(args.root_dir, args.config[\"arguments\"][\"project\"], x_rnd) bashCommand = \" mkdir %s\"", "\"cp %s/output/chromdyn_str.pdb %s\" % (tmp_dir, usr_dir) print bashCommand process = subprocess.Popen(bashCommand,stdout=subprocess.PIPE, shell=True) proc_stdout", "host info num_cores = multiprocessing.cpu_count() host = socket.gethostname() #mem = psutil.virtual_memory() logger.debug('HOST=%s CPUs=%s", "metavar=\"CONFIG_JSON\", help=\"JSON file containing workflow parameters\") parser.add_argument(\"--root_dir\", required=True, type=Mugparams.readable_dir, metavar=\"ABS_PATH\", help=\"Absolute path of", "JSON data json_data['output_files'].append(pdbMeta) if \"createTrajectory\" in args.config[\"arguments\"][\"operations\"]: ### chromatin_starting_trajectory_structure pdbMeta1 = {} #", "Write down output file metadata J = open(args.out_metadata, 'wb') json.dump(json_data,J, indent=4) J.close logger.info(\"Output", "in enumerate(args.config[\"arguments\"]) if d[\"name\"] == \"project\") out_dir = args.root_dir+\"/\"+args.config[\"arguments\"][proj_idx][\"value\"] logger.info(\"Output file directory set", "= args.root_dir + \"/\" + args.metadata[nucl_pos_file_id][\"file_path\"] tmp_dir = \"{0}/{1}/tra_{2}\".format(args.root_dir, args.config[\"arguments\"][\"project\"], x_rnd) iterations =", "the source_id) pdbMeta1[\"taxon_id\"] = 0 if pdbMeta1[\"source_id\"]: for file_id in pdbMeta1[\"source_id\"]: pprint.pprint(args.metadata) if", "args.root_dir + \"/\" + args.config[\"arguments\"][\"project\"] pdb_file = tmp_dir + \"/chromdyn_dummy_str.pdb\" pdbMeta1[\"file_path\"] = pdb_file", "or is not accessible\".format(d)) if os.access(d, os.R_OK): return d else: raise Exception(\"readable_dir:{0} is", "files: logger.info (\"Packing %s into statistics TAR\" % os.path.basename(fil)) tar.add(fil, arcname=os.path.basename(fil)) tar.close() #", "@staticmethod def process_metadata(args): global out_dir logger = logging.getLogger(\"lg\") # Indexing metadata files by", "# Setting working directory (project) proj_idx = next(idx for (idx, d) in enumerate(args.config[\"arguments\"])", "out_tar result[\"taxon_id\"] = 0 json_data['output_files'].append(result) # Write down output file metadata J =", "import sys import argparse import json import time import socket # print localhost", "pdbMeta1[\"taxon_id\"] = args.metadata[file_id][\"taxon_id\"] break # Append output_file metadata into JSON data json_data['output_files'].append(pdbMeta1) ###", "existing directory path, or not accessible\".format(d)) else: if os.access(d, os.W_OK): return f else:", "%s \" % pprint.pformat(args.metadata)) return 1 # # Executing pipeline # Calling MuG_Chromatin_equ_structure", "multiprocessing #import psutil # available memory import subprocess import shutil import glob import", "(index, d) in enumerate(args.config[\"arguments\"])) args.config[\"arguments\"] = arguments_by_name # Indexing config input_files by name", "help=\"increase output verbosity\") parser.add_argument('--version', action='version', version='%(prog)s 0.1') args = parser.parse_args() if args.verbose: logger.setLevel(logging.DEBUG)", "logger.info(\"Output files annotated into %s\" % args.out_metadata) # Delete temporary directory if \"createStructure\"", "Dynamics workflow\") parser.add_argument(\"--config\", required=True, type=Mugparams.check_json, metavar=\"CONFIG_JSON\", help=\"JSON file containing workflow parameters\") parser.add_argument(\"--root_dir\", required=True,", "output_file metadata into JSON data json_data['output_files'].append(pdbMeta2) ### chromatin_trajectory trajMeta = {} # Set", "allow_multiple) inputs_by_name = {} for index,d in enumerate(args.config[\"input_files\"]): name = args.config[\"input_files\"][index][\"name\"] if name", "# Prepare last output file: TAR of outputs, *CSVs and *PNGs files =", "required=True, type=Mugparams.check_json, metavar=\"CONFIG_JSON\", help=\"JSON file containing workflow parameters\") parser.add_argument(\"--root_dir\", required=True, type=Mugparams.readable_dir, metavar=\"ABS_PATH\", help=\"Absolute", "%s\" % (nucl_pos, sequence, tmp_dir) print bashCommand process = subprocess.Popen(bashCommand,stdout=subprocess.PIPE, shell=True) proc_stdout =", "tmp_dir, tmp_dir, usr_dir) print bashCommand process = subprocess.Popen(bashCommand,stdout=subprocess.PIPE, shell=True) proc_stdout = process.communicate()[0].strip() print", "return 1 @staticmethod def process_metadata(args): global out_dir logger = logging.getLogger(\"lg\") # Indexing metadata", "result[\"name\"] = \"summary\" result[\"source_id\"] = [] if \"sequence\" in args.config['input_files']: result[\"source_id\"].append(args.config['input_files'][\"sequence\"]) result[\"file_path\"] =", "args.config[\"arguments\"][\"project\"], x_rnd) iterations = args.config[\"arguments\"][\"createTrajectory:numStruct\"] bashCommand = \"cd /home/MuG/MuG_Chromatin_sampling/src_test; bash run.sh %s %s", "process.communicate()[0].strip() print proc_stdout return 1 # # Prepare metadata for the output files", "directory path, or not accessible\".format(d)) else: if os.access(d, os.W_OK): return f else: raise", "by name (name could not be unique - because of allow_multiple) inputs_by_name =", "does not exist\" % json_file) with open(json_file,'r') as file_data: try: data = json.load(file_data)", "Set taxon_id # taxon_id is inherited from the input file (i.e the source_id)", "\"rm -r %s\" % tmp_dir print bashCommand process = subprocess.Popen(bashCommand,stdout=subprocess.PIPE, shell=True) proc_stdout =", "= \"cp %s/output/chromdyn_str.pdb %s\" % (tmp_dir, usr_dir) print bashCommand process = subprocess.Popen(bashCommand,stdout=subprocess.PIPE, shell=True)", "% args.out_metadata) # Delete temporary directory if \"createStructure\" in args.config[\"arguments\"][\"operations\"]: tmp_dir = \"{0}/{1}/str_{2}\".format(args.root_dir,", "check_json(json_file): logger = logging.getLogger(\"lg\") if not os.path.exists(json_file): raise argparse.ArgumentTypeError(\"%s does not exist\" %", "working directory (project) proj_idx = next(idx for (idx, d) in enumerate(args.config[\"arguments\"]) if d[\"name\"]", "print localhost import logging import re import pprint import multiprocessing #import psutil #", "+ args.config[\"arguments\"][\"project\"] traj_file = tmp_dir + \"/chromdyn_str.dcd\" trajMeta[\"file_path\"] = traj_file # Set source_id", "args.config[\"arguments\"][\"project\"] out_tar = tmp_dir + \"/results.tar.gz\" tar = tarfile.open(out_tar, \"w:gz\") for fil in", "required=True, type=Mugparams.readable_dir, metavar=\"ABS_PATH\", help=\"Absolute path of the user data directory.\") parser.add_argument(\"--public_dir\", required=False, type=Mugparams.readable_dir,", "into statistics TAR\" % os.path.basename(fil)) tar.add(fil, arcname=os.path.basename(fil)) tar.close() # Set metadata required for", "### PDB_chromatin_structure pdbMeta = {} # Set name # Should coincide with tool.json", "else: return f @staticmethod def process_arguments(args): global out_dir logger = logging.getLogger(\"lg\") # Setting", "argparse import json import time import socket # print localhost import logging import", "files annotated into %s\" % args.out_metadata) # Delete temporary directory if \"createStructure\" in", "handler.setFormatter(formatter) logger.addHandler(handler) streamhandler = logging.StreamHandler() streamhandler.setLevel(logging.INFO) streamhandler.setFormatter(formatter) logger.addHandler(streamhandler) logger.info('Starting %s' % __file__) #", "-r %s\" % tmp_dir print bashCommand process = subprocess.Popen(bashCommand,stdout=subprocess.PIPE, shell=True) proc_stdout = process.communicate()[0].strip()", "not os.path.isdir(d): raise Exception(\"readable_dir:{0} is not a directory path or is not accessible\".format(d))", "shell=True) proc_stdout = process.communicate()[0].strip() print proc_stdout bashCommand = \"cd /home/MuG/MuG_Chromatin_equ_structure/src_test; bash run.sh %s", "args.config[\"arguments\"][\"project\"], x_rnd)) for out_dir in out_dirs: for extension in extensions: files.extend(glob.glob(out_dir+\"/\"+extension)) tmp_dir =", "tmp_dir) print bashCommand process = subprocess.Popen(bashCommand,stdout=subprocess.PIPE, shell=True) proc_stdout = process.communicate()[0].strip() print proc_stdout usr_dir", "source_id) pdbMeta2[\"taxon_id\"] = 0 if pdbMeta2[\"source_id\"]: for file_id in pdbMeta2[\"source_id\"]: pprint.pprint(args.metadata) if args.metadata[file_id][\"taxon_id\"]:", "def prepare_results(args, x_rnd): global out_dir logger = logging.getLogger(\"lg\") if (args.out_metadata): # Create out_metadata", "file_data: try: data = json.load(file_data) except ValueError, e: logger.exception(\"%s in not a valid", "into JSON data json_data['output_files'].append(pdbMeta2) ### chromatin_trajectory trajMeta = {} # Set name #", "in args.config[\"arguments\"][\"operations\"]: out_dirs.append(\"{0}/{1}/str_{2}/output\".format(args.root_dir, args.config[\"arguments\"][\"project\"], x_rnd)) for out_dir in out_dirs: for extension in extensions:", "metadata files by file_id ([_id]) metadata_by_id = dict((d[\"_id\"], dict(d)) for (index, d) in", "source_id & taxon_id pdbMeta2[\"source_id\"] = [] if \"sequence\" in args.config['input_files']: pdbMeta2[\"source_id\"].append(args.config['input_files'][\"sequence\"]) # Set", "pdb_file # Set source_id & taxon_id pdbMeta2[\"source_id\"] = [] if \"sequence\" in args.config['input_files']:", "files.extend(glob.glob(out_dir+\"/\"+extension)) tmp_dir = args.root_dir + \"/\" + args.config[\"arguments\"][\"project\"] out_tar = tmp_dir + \"/results.tar.gz\"", "bashCommand = \"cp %s/output/chromdyn_str.pdb %s\" % (tmp_dir, usr_dir) print bashCommand process = subprocess.Popen(bashCommand,stdout=subprocess.PIPE,", "(gff_file, genome_file, gen_reg, tmp_dir +\"/nucleR_to_3D_seq.txt\", tmp_dir +\"/nucleR_to_3D_nucl_pos.txt\") print bashCommand process = subprocess.Popen(bashCommand,stdout=subprocess.PIPE, shell=True)", "print proc_stdout if \"createTrajectory\" in args.config[\"arguments\"][\"operations\"]: print \"do Trajectory\" sequence_file_id = args.config[\"input_files\"][\"sequence\"] sequence", "output_file metadata into JSON data json_data['output_files'].append(pdbMeta1) ### chromatin_dummy_trajectory_structure pdbMeta2 = {} # Set", "taxon_id # taxon_id is inherited from the input file (i.e the source_id) pdbMeta[\"taxon_id\"]", "Calling MuG_Chromatin_equ_structure and MuG_Chromatin_sampling software in a subprocess def run_pipeline(args, num_cores, x_rnd): sort", "gff_file = args.root_dir + \"/\" + args.metadata[gff_file_id][\"file_path\"] gen_reg = args.config[\"arguments\"][\"create3DfromNucleaR:genRegion\"] tmp_dir = \"{0}/{1}/str_{2}\".format(args.root_dir,", "bashCommand = \" /home/MuG/MuG_Chromatin_equ_structure/src_test/nucleR2structure.py --calls %s --genome_file %s --range %s --seq_output %s --nucs_output", "type=Mugparams.check_json, metavar=\"METADATA_JSON\", help=\"JSON file containing MuG metadata files\") parser.add_argument(\"--out_metadata\", required=False, type=Mugparams.writeable_file, metavar=\"RESULTS_JSON\", help=\"JSON", "and *PNGs files = [] extensions = ('*.txt','*.csv','*.png') out_dirs = [] if \"createStructure\"", "= logging.getLogger(\"lg\") # Setting working directory (project) proj_idx = next(idx for (idx, d)", "or (\"create3DfromNucleaR\" in args.config[\"arguments\"][\"operations\"]): ### PDB_chromatin_structure pdbMeta = {} # Set name #", "= process.communicate()[0].strip() print proc_stdout bashCommand = \"cd /home/MuG/MuG_Chromatin_equ_structure/src_test; bash run.sh %s %s %s\"", "arguments_by_name = dict((d[\"name\"], d[\"value\"]) for (index, d) in enumerate(args.config[\"arguments\"])) args.config[\"arguments\"] = arguments_by_name #", "parser.add_argument(\"--root_dir\", required=True, type=Mugparams.readable_dir, metavar=\"ABS_PATH\", help=\"Absolute path of the user data directory.\") parser.add_argument(\"--public_dir\", required=False,", "mode on\") # Parse config Mugparams.process_arguments(args) Mugparams.process_metadata(args) # Print host info num_cores =", "# Set source_id & taxon_id pdbMeta1[\"source_id\"] = [] if \"sequence\" in args.config['input_files']: pdbMeta1[\"source_id\"].append(args.config['input_files'][\"sequence\"])", "args.root_dir + \"/\" + args.metadata[nucl_pos_file_id][\"file_path\"] tmp_dir = \"{0}/{1}/str_{2}\".format(args.root_dir, args.config[\"arguments\"][\"project\"], x_rnd) bashCommand = \"cd", "by name arguments_by_name = dict((d[\"name\"], d[\"value\"]) for (index, d) in enumerate(args.config[\"arguments\"])) args.config[\"arguments\"] =", "json.dump(json_data,J, indent=4) J.close logger.info(\"Output files annotated into %s\" % args.out_metadata) # Delete temporary", "pdbMeta1[\"file_path\"] = pdb_file # Set source_id & taxon_id pdbMeta1[\"source_id\"] = [] if \"sequence\"", "user data directory.\") parser.add_argument(\"--public_dir\", required=False, type=Mugparams.readable_dir, metavar=\"PUBLIC_PATH\", help=\"Absolute path of the MuG public", "better relative to root_dir? tmp_dir = args.root_dir + \"/\" + args.config[\"arguments\"][\"project\"] traj_file =", "metadata\") parser.add_argument(\"-v\", \"--verbose\", required=False, action=\"store_true\", help=\"increase output verbosity\") parser.add_argument('--version', action='version', version='%(prog)s 0.1') args", "= tarfile.open(out_tar, \"w:gz\") for fil in files: logger.info (\"Packing %s into statistics TAR\"", "logger.addHandler(handler) streamhandler = logging.StreamHandler() streamhandler.setLevel(logging.INFO) streamhandler.setFormatter(formatter) logger.addHandler(streamhandler) logger.info('Starting %s' % __file__) # Parse", "required=False, type=Mugparams.writeable_file, metavar=\"RESULTS_JSON\", help=\"JSON file containing results metadata\") parser.add_argument(\"-v\", \"--verbose\", required=False, action=\"store_true\", help=\"increase", "\"/results.tar.gz\" tar = tarfile.open(out_tar, \"w:gz\") for fil in files: logger.info (\"Packing %s into", "print proc_stdout def main(): # Start logging logger = logging.getLogger(\"lg\") logger.setLevel(logging.INFO) formatter =", "pprint.pprint(args.metadata) if args.metadata[file_id][\"taxon_id\"]: pdbMeta[\"taxon_id\"] = args.metadata[file_id][\"taxon_id\"] break # Append output_file metadata into JSON", "json import time import socket # print localhost import logging import re import", "a writeable dir\".format(d)) else: return f @staticmethod def process_arguments(args): global out_dir logger =", "tar.close() # Set metadata required for TAR output file result = {} result[\"name\"]", "extensions = ('*.txt','*.csv','*.png') out_dirs = [] if \"createStructure\" in args.config[\"arguments\"][\"operations\"]: out_dirs.append(\"{0}/{1}/str_{2}/output\".format(args.root_dir, args.config[\"arguments\"][\"project\"], x_rnd))", "\"/\" + args.config[\"arguments\"][\"project\"] pdb_file = tmp_dir + \"/chromdyn_dummy_str.pdb\" pdbMeta1[\"file_path\"] = pdb_file # Set", "tmp_dir = args.root_dir + \"/\" + args.config[\"arguments\"][\"project\"] pdb_file = tmp_dir + \"/chromdyn_start_str.pdb\" pdbMeta1[\"file_path\"]", "x_rnd) bashCommand = \" mkdir %s\" % tmp_dir print bashCommand process = subprocess.Popen(bashCommand,stdout=subprocess.PIPE,", "\"{0}/{1}/tra_{2}\".format(args.root_dir, args.config[\"arguments\"][\"project\"], x_rnd) iterations = args.config[\"arguments\"][\"createTrajectory:numStruct\"] bashCommand = \"cd /home/MuG/MuG_Chromatin_sampling/src_test; bash run.sh %s", "print proc_stdout return 1 # # Prepare metadata for the output files def", "last output file: TAR of outputs, *CSVs and *PNGs files = [] extensions", "%s\" % tmp_dir print bashCommand process = subprocess.Popen(bashCommand,stdout=subprocess.PIPE, shell=True) proc_stdout = process.communicate()[0].strip() print", "for the output files def prepare_results(args, x_rnd): global out_dir logger = logging.getLogger(\"lg\") if", "metavar=\"METADATA_JSON\", help=\"JSON file containing MuG metadata files\") parser.add_argument(\"--out_metadata\", required=False, type=Mugparams.writeable_file, metavar=\"RESULTS_JSON\", help=\"JSON file", "Should coincide with tool.json trajMeta[\"name\"] = \"chromatin_trajectory\" # Set file_path # Absolute path.", "taxon_id pdbMeta1[\"source_id\"] = [] if \"sequence\" in args.config['input_files']: pdbMeta1[\"source_id\"].append(args.config['input_files'][\"sequence\"]) # Set taxon_id. taxon_id", "in pdbMeta[\"source_id\"]: pprint.pprint(args.metadata) if args.metadata[file_id][\"taxon_id\"]: pdbMeta[\"taxon_id\"] = args.metadata[file_id][\"taxon_id\"] break # Append output_file metadata", "args.config[\"input_files\"][\"sequence\"] sequence = args.root_dir + \"/\" + args.metadata[sequence_file_id][\"file_path\"] nucl_pos_file_id = args.config[\"input_files\"][\"nuclPos\"] nucl_pos =", "json_data = {} json_data['output_files']= [] if (\"createStructure\" in args.config[\"arguments\"][\"operations\"]) or (\"create3DfromNucleaR\" in args.config[\"arguments\"][\"operations\"]):", "args.config[\"arguments\"] = arguments_by_name # Indexing config input_files by name (name could not be", "pdbMeta2[\"name\"] = \"PDB_dummy_chromatin_structure\" # Set file_path. Absolute path. Should be better relative to", "\"do 3D from NucleaR\" gff_file_id = args.config[\"input_files\"][\"gffNucleaR\"] gff_file = args.root_dir + \"/\" +", "[] if \"sequence\" in args.config['input_files']: pdbMeta[\"source_id\"].append(args.config['input_files'][\"sequence\"]) # Set taxon_id # taxon_id is inherited", "json.load(file_data) except ValueError, e: logger.exception(\"%s in not a valid json file.\" % json_file)", "better relative to root_dir? tmp_dir = args.root_dir + \"/\" + args.config[\"arguments\"][\"project\"] pdb_file =", "import shutil import glob import tarfile import subprocess import random out_dir=\"\" class Mugparams(object):", "pdbMeta2[\"source_id\"].append(args.config['input_files'][\"sequence\"]) # Set taxon_id. taxon_id is inherited from the input file (i.e the", "Set file_path # Absolute path. Should be better relative to root_dir? tmp_dir =", "subprocess.Popen(bashCommand,stdout=subprocess.PIPE, shell=True) proc_stdout = process.communicate()[0].strip() print proc_stdout if \"create3DfromNucleaR\" in args.config[\"arguments\"][\"operations\"]: print \"do", "# Parse config Mugparams.process_arguments(args) Mugparams.process_metadata(args) # Print host info num_cores = multiprocessing.cpu_count() host", "if pdbMeta[\"source_id\"]: for file_id in pdbMeta[\"source_id\"]: pprint.pprint(args.metadata) if args.metadata[file_id][\"taxon_id\"]: pdbMeta[\"taxon_id\"] = args.metadata[file_id][\"taxon_id\"] break", "pdbMeta1 = {} # Set name. Should coincide with tool.json pdbMeta1[\"name\"] = \"PDB_chromatin_starting_structure\"", "os.path.isdir(d): raise Exception(\"writeable_file:{0} not in a existing directory path, or not accessible\".format(d)) else:", "help=\"JSON file containing workflow parameters\") parser.add_argument(\"--root_dir\", required=True, type=Mugparams.readable_dir, metavar=\"ABS_PATH\", help=\"Absolute path of the", "pprint.pformat(args.metadata)) return 1 # # Executing pipeline # Calling MuG_Chromatin_equ_structure and MuG_Chromatin_sampling software", "ValueError, e: logger.exception(\"%s in not a valid json file.\" % json_file) return data", "out_dir logger = logging.getLogger(\"lg\") # Indexing metadata files by file_id ([_id]) metadata_by_id =", "pdbMeta1[\"source_id\"] = [] if \"sequence\" in args.config['input_files']: pdbMeta1[\"source_id\"].append(args.config['input_files'][\"sequence\"]) # Set taxon_id. taxon_id is", "Set name. Should coincide with tool.json pdbMeta1[\"name\"] = \"PDB_chromatin_starting_structure\" # Set file_path. Absolute", "indent=4) J.close logger.info(\"Output files annotated into %s\" % args.out_metadata) # Delete temporary directory", "# Set name. Should coincide with tool.json pdbMeta1[\"name\"] = \"PDB_chromatin_starting_structure\" # Set file_path.", "parser.add_argument(\"-v\", \"--verbose\", required=False, action=\"store_true\", help=\"increase output verbosity\") parser.add_argument('--version', action='version', version='%(prog)s 0.1') args =", "int(random.random()*10000000) outfiles = run_pipeline(args, num_cores, x_rnd) # Results prepare_results(args, x_rnd) if __name__ ==", "Set source_id & taxon_id pdbMeta[\"source_id\"] = [] if \"sequence\" in args.config['input_files']: pdbMeta[\"source_id\"].append(args.config['input_files'][\"sequence\"]) #", "proc_stdout = process.communicate()[0].strip() print proc_stdout def main(): # Start logging logger = logging.getLogger(\"lg\")", "args.config[\"arguments\"][\"project\"], x_rnd) bashCommand = \"rm -r %s\" % tmp_dir print bashCommand process =", "logger.debug(\"Verbose mode on\") # Parse config Mugparams.process_arguments(args) Mugparams.process_metadata(args) # Print host info num_cores", "args.config[\"arguments\"][\"operations\"]: out_dirs.append(\"{0}/{1}/tra_{2}/output\".format(args.root_dir, args.config[\"arguments\"][\"project\"], x_rnd)) if \"create3DfromNucleaR\" in args.config[\"arguments\"][\"operations\"]: out_dirs.append(\"{0}/{1}/str_{2}/output\".format(args.root_dir, args.config[\"arguments\"][\"project\"], x_rnd)) for out_dir", "sys import argparse import json import time import socket # print localhost import", "bashCommand = \"cp %s/output/chromdyn_start_str.pdb %s/output/chromdyn_str.dcd %s/output/chromdyn_dummy_str.pdb %s\" % (tmp_dir, tmp_dir, tmp_dir, usr_dir) print", "+\"/nucleR_to_3D_nucl_pos.txt\", tmp_dir +\"/nucleR_to_3D_seq.txt\", tmp_dir) print bashCommand process = subprocess.Popen(bashCommand,stdout=subprocess.PIPE, shell=True) proc_stdout = process.communicate()[0].strip()", "prepare_results(args, x_rnd): global out_dir logger = logging.getLogger(\"lg\") if (args.out_metadata): # Create out_metadata JSON", "= out_tar result[\"taxon_id\"] = 0 json_data['output_files'].append(result) # Write down output file metadata J", "of allow_multiple) inputs_by_name = {} for index,d in enumerate(args.config[\"input_files\"]): name = args.config[\"input_files\"][index][\"name\"] if", "(project) proj_idx = next(idx for (idx, d) in enumerate(args.config[\"arguments\"]) if d[\"name\"] == \"project\")", "= \"PDB_dummy_chromatin_structure\" # Set file_path. Absolute path. Should be better relative to root_dir?", "software in a subprocess def run_pipeline(args, num_cores, x_rnd): sort = args.config[\"input_files\"][\"sequence\"] sequence =", "a existing directory path, or not accessible\".format(d)) else: if os.access(d, os.W_OK): return f", "source_id & taxon_id pdbMeta1[\"source_id\"] = [] if \"sequence\" in args.config['input_files']: pdbMeta1[\"source_id\"].append(args.config['input_files'][\"sequence\"]) # Set", "pdbMeta1[\"source_id\"].append(args.config['input_files'][\"sequence\"]) # Set taxon_id. taxon_id is inherited from the input file (i.e the", "pprint.pformat(args.config)) return 1 @staticmethod def process_metadata(args): global out_dir logger = logging.getLogger(\"lg\") # Indexing", "Set name # Should coincide with tool.json pdbMeta[\"name\"] = \"PDB_chromatin_structure\" # Set file_path", "args.metadata[file_id][\"taxon_id\"]: pdbMeta1[\"taxon_id\"] = args.metadata[file_id][\"taxon_id\"] break # Append output_file metadata into JSON data json_data['output_files'].append(pdbMeta1)", "if pdbMeta2[\"source_id\"]: for file_id in pdbMeta2[\"source_id\"]: pprint.pprint(args.metadata) if args.metadata[file_id][\"taxon_id\"]: pdbMeta2[\"taxon_id\"] = args.metadata[file_id][\"taxon_id\"] break", "args.config[\"arguments\"][\"operations\"]) or (\"create3DfromNucleaR\" in args.config[\"arguments\"][\"operations\"]): ### PDB_chromatin_structure pdbMeta = {} # Set name", "shutil import glob import tarfile import subprocess import random out_dir=\"\" class Mugparams(object): @staticmethod", "for index,d in enumerate(args.config[\"input_files\"]): name = args.config[\"input_files\"][index][\"name\"] if name in inputs_by_name: pprint.pprint(inputs_by_name[name]) if", "result[\"source_id\"].append(args.config['input_files'][\"sequence\"]) result[\"file_path\"] = out_tar result[\"taxon_id\"] = 0 json_data['output_files'].append(result) # Write down output file", "\"/\" + args.metadata[sequence_file_id][\"file_path\"] nucl_pos_file_id = args.config[\"input_files\"][\"nuclPos\"] nucl_pos = args.root_dir + \"/\" + args.metadata[nucl_pos_file_id][\"file_path\"]", "if args.metadata[file_id][\"taxon_id\"]: pdbMeta1[\"taxon_id\"] = args.metadata[file_id][\"taxon_id\"] break # Append output_file metadata into JSON data", "available memory import subprocess import shutil import glob import tarfile import subprocess import", "bashCommand process = subprocess.Popen(bashCommand,stdout=subprocess.PIPE, shell=True) proc_stdout = process.communicate()[0].strip() print proc_stdout return 1 #", "\"sequence\" in args.config['input_files']: pdbMeta1[\"source_id\"].append(args.config['input_files'][\"sequence\"]) # Set taxon_id. taxon_id is inherited from the input", "%s into statistics TAR\" % os.path.basename(fil)) tar.add(fil, arcname=os.path.basename(fil)) tar.close() # Set metadata required", "Append output_file metadata into JSON data json_data['output_files'].append(pdbMeta) if \"createTrajectory\" in args.config[\"arguments\"][\"operations\"]: ### chromatin_starting_trajectory_structure", "out_dir in out_dirs: for extension in extensions: files.extend(glob.glob(out_dir+\"/\"+extension)) tmp_dir = args.root_dir + \"/\"", "### chromatin_dummy_trajectory_structure pdbMeta2 = {} # Set name. Should coincide with tool.json pdbMeta2[\"name\"]", "(i.e the source_id) pdbMeta[\"taxon_id\"] = 0 if pdbMeta[\"source_id\"]: for file_id in pdbMeta[\"source_id\"]: pprint.pprint(args.metadata)", "if \"createTrajectory\" in args.config[\"arguments\"][\"operations\"]: ### chromatin_starting_trajectory_structure pdbMeta1 = {} # Set name. Should", "inherited from the input file (i.e the source_id) pdbMeta2[\"taxon_id\"] = 0 if pdbMeta2[\"source_id\"]:", "def run_pipeline(args, num_cores, x_rnd): sort = args.config[\"input_files\"][\"sequence\"] sequence = args.root_dir + \"/\" +", "run.sh %s %s %s %s\" % (nucl_pos, sequence, iterations, tmp_dir) print bashCommand process", "index,d in enumerate(args.config[\"input_files\"]): name = args.config[\"input_files\"][index][\"name\"] if name in inputs_by_name: pprint.pprint(inputs_by_name[name]) if type(inputs_by_name[name]", "\"/\" + args.config[\"arguments\"][\"project\"] traj_file = tmp_dir + \"/chromdyn_str.dcd\" trajMeta[\"file_path\"] = traj_file # Set", "% pprint.pformat(args.metadata)) return 1 # # Executing pipeline # Calling MuG_Chromatin_equ_structure and MuG_Chromatin_sampling", "required=False, type=Mugparams.readable_dir, metavar=\"PUBLIC_PATH\", help=\"Absolute path of the MuG public directory (with reference genome", "set to %s\" % out_dir) # Indexing config arguments by name arguments_by_name =", "not os.path.isfile(f): d = os.path.dirname(f) # TODO Fails if relative path given if", "Should coincide with tool.json pdbMeta2[\"name\"] = \"PDB_dummy_chromatin_structure\" # Set file_path. Absolute path. Should", "\"create3DfromNucleaR\" in args.config[\"arguments\"][\"operations\"]: tmp_dir = \"{0}/{1}/str_{2}\".format(args.root_dir, args.config[\"arguments\"][\"project\"], x_rnd) bashCommand = \"rm -r %s\"", "name # Should coincide with tool.json trajMeta[\"name\"] = \"chromatin_trajectory\" # Set file_path #", "pdb_file = tmp_dir + \"/chromdyn_start_str.pdb\" pdbMeta1[\"file_path\"] = pdb_file # Set source_id & taxon_id", "print proc_stdout bashCommand = \"cd /home/MuG/MuG_Chromatin_equ_structure/src_test; bash run.sh %s %s %s\" % (tmp_dir", "args.config[\"arguments\"][\"create3DfromNucleaR:genRegion\"] tmp_dir = \"{0}/{1}/str_{2}\".format(args.root_dir, args.config[\"arguments\"][\"project\"], x_rnd) bashCommand = \" mkdir %s\" % tmp_dir", "pdbMeta[\"source_id\"].append(args.config['input_files'][\"sequence\"]) # Set taxon_id # taxon_id is inherited from the input file (i.e", "= {} for index,d in enumerate(args.config[\"input_files\"]): name = args.config[\"input_files\"][index][\"name\"] if name in inputs_by_name:", "dir\".format(d)) else: return f @staticmethod def process_arguments(args): global out_dir logger = logging.getLogger(\"lg\") #", "# Start logging logger = logging.getLogger(\"lg\") logger.setLevel(logging.INFO) formatter = logging.Formatter(fmt='%(asctime)s - %(module)s -", "metadata_by_id logger.debug(\"VRE metadata for input_files is:\\n %s \" % pprint.pformat(args.metadata)) return 1 #", "files by file_id ([_id]) metadata_by_id = dict((d[\"_id\"], dict(d)) for (index, d) in enumerate(args.metadata))", "bashCommand process = subprocess.Popen(bashCommand,stdout=subprocess.PIPE, shell=True) proc_stdout = process.communicate()[0].strip() print proc_stdout if \"createTrajectory\" in", "subprocess.Popen(bashCommand,stdout=subprocess.PIPE, shell=True) proc_stdout = process.communicate()[0].strip() print proc_stdout assembly = args.metadata[gff_file_id][\"meta_data\"][\"assembly\"] genome_file = \"{0}/refGenomes/{1}/{1}.fa\".format(args.public_dir,assembly)", "on\") # Parse config Mugparams.process_arguments(args) Mugparams.process_metadata(args) # Print host info num_cores = multiprocessing.cpu_count()", "= run_pipeline(args, num_cores, x_rnd) # Results prepare_results(args, x_rnd) if __name__ == '__main__': main()", "Setting working directory (project) proj_idx = next(idx for (idx, d) in enumerate(args.config[\"arguments\"]) if", "= 0 if pdbMeta[\"source_id\"]: for file_id in pdbMeta[\"source_id\"]: pprint.pprint(args.metadata) if args.metadata[file_id][\"taxon_id\"]: pdbMeta[\"taxon_id\"] =", "= \"{0}/{1}/str_{2}\".format(args.root_dir, args.config[\"arguments\"][\"project\"], x_rnd) bashCommand = \"rm -r %s\" % tmp_dir print bashCommand", "proc_stdout if \"create3DfromNucleaR\" in args.config[\"arguments\"][\"operations\"]: tmp_dir = \"{0}/{1}/str_{2}\".format(args.root_dir, args.config[\"arguments\"][\"project\"], x_rnd) bashCommand = \"rm", "readable_dir(d): if not os.path.isdir(d): raise Exception(\"readable_dir:{0} is not a directory path or is", "out_metadata JSON json_data = {} json_data['output_files']= [] if (\"createStructure\" in args.config[\"arguments\"][\"operations\"]) or (\"create3DfromNucleaR\"", "if not os.path.isdir(d): raise Exception(\"readable_dir:{0} is not a directory path or is not", "the input file (i.e the source_id) trajMeta[\"taxon_id\"] = 0 if trajMeta[\"source_id\"]: for file_id", "from the input file (i.e the source_id) pdbMeta[\"taxon_id\"] = 0 if pdbMeta[\"source_id\"]: for", "extension in extensions: files.extend(glob.glob(out_dir+\"/\"+extension)) tmp_dir = args.root_dir + \"/\" + args.config[\"arguments\"][\"project\"] out_tar =", "args.root_dir + \"/\" + args.metadata[gff_file_id][\"file_path\"] gen_reg = args.config[\"arguments\"][\"create3DfromNucleaR:genRegion\"] tmp_dir = \"{0}/{1}/str_{2}\".format(args.root_dir, args.config[\"arguments\"][\"project\"], x_rnd)", "args.config['input_files']: pdbMeta2[\"source_id\"].append(args.config['input_files'][\"sequence\"]) # Set taxon_id. taxon_id is inherited from the input file (i.e", "the source_id) pdbMeta[\"taxon_id\"] = 0 if pdbMeta[\"source_id\"]: for file_id in pdbMeta[\"source_id\"]: pprint.pprint(args.metadata) if", "(\"Packing %s into statistics TAR\" % os.path.basename(fil)) tar.add(fil, arcname=os.path.basename(fil)) tar.close() # Set metadata", "not in a existing directory path, or not accessible\".format(d)) else: if os.access(d, os.W_OK):", "= subprocess.Popen(bashCommand,stdout=subprocess.PIPE, shell=True) proc_stdout = process.communicate()[0].strip() print proc_stdout if \"create3DfromNucleaR\" in args.config[\"arguments\"][\"operations\"]: print", "args.config[\"arguments\"][\"project\"], x_rnd) bashCommand = \"cd /home/MuG/MuG_Chromatin_equ_structure/src_test; bash run.sh %s %s %s\" % (nucl_pos,", "args.metadata[gff_file_id][\"file_path\"] gen_reg = args.config[\"arguments\"][\"create3DfromNucleaR:genRegion\"] tmp_dir = \"{0}/{1}/str_{2}\".format(args.root_dir, args.config[\"arguments\"][\"project\"], x_rnd) bashCommand = \" mkdir", "inherited from the input file (i.e the source_id) pdbMeta[\"taxon_id\"] = 0 if pdbMeta[\"source_id\"]:", "process.communicate()[0].strip() print proc_stdout if \"create3DfromNucleaR\" in args.config[\"arguments\"][\"operations\"]: print \"do 3D from NucleaR\" gff_file_id", "proc_stdout usr_dir = args.root_dir + \"/\" + args.config[\"arguments\"][\"project\"] bashCommand = \"cp %s/output/chromdyn_start_str.pdb %s/output/chromdyn_str.dcd", "pdbMeta[\"source_id\"]: for file_id in pdbMeta[\"source_id\"]: pprint.pprint(args.metadata) if args.metadata[file_id][\"taxon_id\"]: pdbMeta[\"taxon_id\"] = args.metadata[file_id][\"taxon_id\"] break #", "into JSON data json_data['output_files'].append(pdbMeta1) ### chromatin_dummy_trajectory_structure pdbMeta2 = {} # Set name. Should", "config Mugparams.process_arguments(args) Mugparams.process_metadata(args) # Print host info num_cores = multiprocessing.cpu_count() host = socket.gethostname()", "d else: raise Exception(\"readable_dir:{0} is not a readable dir\".format(d)) @staticmethod def writeable_file(f): if", "+ \"/chromdyn_str.dcd\" trajMeta[\"file_path\"] = traj_file # Set source_id & taxon_id trajMeta[\"source_id\"] = []", "type=Mugparams.readable_dir, metavar=\"ABS_PATH\", help=\"Absolute path of the user data directory.\") parser.add_argument(\"--public_dir\", required=False, type=Mugparams.readable_dir, metavar=\"PUBLIC_PATH\",", "with tool.json pdbMeta[\"name\"] = \"PDB_chromatin_structure\" # Set file_path # Absolute path. Should be", "taxon_id is inherited from the input file (i.e the source_id) trajMeta[\"taxon_id\"] = 0", "exist\" % json_file) with open(json_file,'r') as file_data: try: data = json.load(file_data) except ValueError,", "return 1 # # Executing pipeline # Calling MuG_Chromatin_equ_structure and MuG_Chromatin_sampling software in", "x_rnd): global out_dir logger = logging.getLogger(\"lg\") if (args.out_metadata): # Create out_metadata JSON json_data", "args.config[\"arguments\"][\"project\"], x_rnd)) if \"createTrajectory\" in args.config[\"arguments\"][\"operations\"]: out_dirs.append(\"{0}/{1}/tra_{2}/output\".format(args.root_dir, args.config[\"arguments\"][\"project\"], x_rnd)) if \"create3DfromNucleaR\" in args.config[\"arguments\"][\"operations\"]:", "name arguments_by_name = dict((d[\"name\"], d[\"value\"]) for (index, d) in enumerate(args.config[\"arguments\"])) args.config[\"arguments\"] = arguments_by_name", "metadata files\") parser.add_argument(\"--out_metadata\", required=False, type=Mugparams.writeable_file, metavar=\"RESULTS_JSON\", help=\"JSON file containing results metadata\") parser.add_argument(\"-v\", \"--verbose\",", "inputs_by_name logger.debug(\"Configuration file arguments and input_files are:\\n %s \" % pprint.pformat(args.config)) return 1", "not be unique - because of allow_multiple) inputs_by_name = {} for index,d in", "for (index, d) in enumerate(args.config[\"arguments\"])) args.config[\"arguments\"] = arguments_by_name # Indexing config input_files by", "directory.\") parser.add_argument(\"--public_dir\", required=False, type=Mugparams.readable_dir, metavar=\"PUBLIC_PATH\", help=\"Absolute path of the MuG public directory (with", "file.\" % json_file) return data @staticmethod def readable_dir(d): if not os.path.isdir(d): raise Exception(\"readable_dir:{0}", "sequence, tmp_dir) print bashCommand process = subprocess.Popen(bashCommand,stdout=subprocess.PIPE, shell=True) proc_stdout = process.communicate()[0].strip() print proc_stdout", "action='version', version='%(prog)s 0.1') args = parser.parse_args() if args.verbose: logger.setLevel(logging.DEBUG) handler.setLevel(logging.DEBUG) handler.setLevel(logging.DEBUG) logger.addHandler(handler) streamhandler.setLevel(logging.DEBUG)", "a subprocess def run_pipeline(args, num_cores, x_rnd): sort = args.config[\"input_files\"][\"sequence\"] sequence = args.root_dir +", "if args.metadata[file_id][\"taxon_id\"]: trajMeta[\"taxon_id\"] = args.metadata[file_id][\"taxon_id\"] break # Append output_file metadata into JSON data", "\"/\" + args.config[\"arguments\"][\"project\"] out_tar = tmp_dir + \"/results.tar.gz\" tar = tarfile.open(out_tar, \"w:gz\") for", "raise Exception(\"writeable_file:{0} not in a existing directory path, or not accessible\".format(d)) else: if", "print proc_stdout usr_dir = args.root_dir + \"/\" + args.config[\"arguments\"][\"project\"] bashCommand = \"cp %s/output/chromdyn_str.pdb", "in args.config['input_files']: pdbMeta1[\"source_id\"].append(args.config['input_files'][\"sequence\"]) # Set taxon_id. taxon_id is inherited from the input file", "containing MuG metadata files\") parser.add_argument(\"--out_metadata\", required=False, type=Mugparams.writeable_file, metavar=\"RESULTS_JSON\", help=\"JSON file containing results metadata\")", "args.root_dir+\"/\"+args.config[\"arguments\"][proj_idx][\"value\"] logger.info(\"Output file directory set to %s\" % out_dir) # Indexing config arguments", "not a writeable dir\".format(d)) else: return f @staticmethod def process_arguments(args): global out_dir logger", "out_dirs: for extension in extensions: files.extend(glob.glob(out_dir+\"/\"+extension)) tmp_dir = args.root_dir + \"/\" + args.config[\"arguments\"][\"project\"]", "@staticmethod def process_arguments(args): global out_dir logger = logging.getLogger(\"lg\") # Setting working directory (project)", "class Mugparams(object): @staticmethod def check_json(json_file): logger = logging.getLogger(\"lg\") if not os.path.exists(json_file): raise argparse.ArgumentTypeError(\"%s", "\"createStructure\" in args.config[\"arguments\"][\"operations\"]: tmp_dir = \"{0}/{1}/str_{2}\".format(args.root_dir, args.config[\"arguments\"][\"project\"], x_rnd) bashCommand = \"rm -r %s\"", "= args.root_dir + \"/\" + args.config[\"arguments\"][\"project\"] pdb_file = tmp_dir + \"/chromdyn_start_str.pdb\" pdbMeta1[\"file_path\"] =", "input_files is:\\n %s \" % pprint.pformat(args.metadata)) return 1 # # Executing pipeline #", "JSON data json_data['output_files'].append(pdbMeta2) ### chromatin_trajectory trajMeta = {} # Set name # Should", "proc_stdout = process.communicate()[0].strip() print proc_stdout return 1 # # Prepare metadata for the", "path or is not accessible\".format(d)) if os.access(d, os.R_OK): return d else: raise Exception(\"readable_dir:{0}", "streamhandler.setLevel(logging.DEBUG) logger.addHandler(streamhandler) logger.debug(\"Verbose mode on\") # Parse config Mugparams.process_arguments(args) Mugparams.process_metadata(args) # Print host", "- %(levelname)s - %(message)s') handler = logging.FileHandler('%s.log' % os.path.splitext(os.path.basename(__file__))[0]) handler.setLevel(logging.INFO) handler.setFormatter(formatter) logger.addHandler(handler) streamhandler", "+ args.config[\"arguments\"][\"project\"] pdb_file = tmp_dir + \"/chromdyn_dummy_str.pdb\" pdbMeta1[\"file_path\"] = pdb_file # Set source_id", "logging.FileHandler('%s.log' % os.path.splitext(os.path.basename(__file__))[0]) handler.setLevel(logging.INFO) handler.setFormatter(formatter) logger.addHandler(handler) streamhandler = logging.StreamHandler() streamhandler.setLevel(logging.INFO) streamhandler.setFormatter(formatter) logger.addHandler(streamhandler) logger.info('Starting", "print proc_stdout if \"create3DfromNucleaR\" in args.config[\"arguments\"][\"operations\"]: tmp_dir = \"{0}/{1}/str_{2}\".format(args.root_dir, args.config[\"arguments\"][\"project\"], x_rnd) bashCommand =", "run_pipeline(args, num_cores, x_rnd): sort = args.config[\"input_files\"][\"sequence\"] sequence = args.root_dir + \"/\" + args.metadata[sequence_file_id][\"file_path\"]", "process = subprocess.Popen(bashCommand,stdout=subprocess.PIPE, shell=True) proc_stdout = process.communicate()[0].strip() print proc_stdout return 1 # #", "process = subprocess.Popen(bashCommand,stdout=subprocess.PIPE, shell=True) proc_stdout = process.communicate()[0].strip() print proc_stdout def main(): # Start", "path, or not accessible\".format(d)) else: if os.access(d, os.W_OK): return f else: raise Exception(\"writeable_file:{0}", "%s\" % (tmp_dir +\"/nucleR_to_3D_nucl_pos.txt\", tmp_dir +\"/nucleR_to_3D_seq.txt\", tmp_dir) print bashCommand process = subprocess.Popen(bashCommand,stdout=subprocess.PIPE, shell=True)", "Set source_id & taxon_id pdbMeta2[\"source_id\"] = [] if \"sequence\" in args.config['input_files']: pdbMeta2[\"source_id\"].append(args.config['input_files'][\"sequence\"]) #", "handler.setLevel(logging.DEBUG) logger.addHandler(handler) streamhandler.setLevel(logging.DEBUG) logger.addHandler(streamhandler) logger.debug(\"Verbose mode on\") # Parse config Mugparams.process_arguments(args) Mugparams.process_metadata(args) #", "print bashCommand process = subprocess.Popen(bashCommand,stdout=subprocess.PIPE, shell=True) proc_stdout = process.communicate()[0].strip() print proc_stdout assembly =", "3D from NucleaR\" gff_file_id = args.config[\"input_files\"][\"gffNucleaR\"] gff_file = args.root_dir + \"/\" + args.metadata[gff_file_id][\"file_path\"]", "shell=True) proc_stdout = process.communicate()[0].strip() print proc_stdout return 1 # # Prepare metadata for", "directory set to %s\" % out_dir) # Indexing config arguments by name arguments_by_name", "in enumerate(args.config[\"arguments\"])) args.config[\"arguments\"] = arguments_by_name # Indexing config input_files by name (name could", "if \"sequence\" in args.config['input_files']: result[\"source_id\"].append(args.config['input_files'][\"sequence\"]) result[\"file_path\"] = out_tar result[\"taxon_id\"] = 0 json_data['output_files'].append(result) #", "if \"sequence\" in args.config['input_files']: pdbMeta1[\"source_id\"].append(args.config['input_files'][\"sequence\"]) # Set taxon_id. taxon_id is inherited from the", "in pdbMeta1[\"source_id\"]: pprint.pprint(args.metadata) if args.metadata[file_id][\"taxon_id\"]: pdbMeta1[\"taxon_id\"] = args.metadata[file_id][\"taxon_id\"] break # Append output_file metadata", "unique - because of allow_multiple) inputs_by_name = {} for index,d in enumerate(args.config[\"input_files\"]): name", "directory if \"createStructure\" in args.config[\"arguments\"][\"operations\"]: tmp_dir = \"{0}/{1}/str_{2}\".format(args.root_dir, args.config[\"arguments\"][\"project\"], x_rnd) bashCommand = \"rm", "out_dirs = [] if \"createStructure\" in args.config[\"arguments\"][\"operations\"]: out_dirs.append(\"{0}/{1}/str_{2}/output\".format(args.root_dir, args.config[\"arguments\"][\"project\"], x_rnd)) if \"createTrajectory\" in", "J = open(args.out_metadata, 'wb') json.dump(json_data,J, indent=4) J.close logger.info(\"Output files annotated into %s\" %", "Set name # Should coincide with tool.json trajMeta[\"name\"] = \"chromatin_trajectory\" # Set file_path", "Parse CMD parser = argparse.ArgumentParser(prog=\"chromatindyn_wf\", description=\"Chromatin Dynamics workflow\") parser.add_argument(\"--config\", required=True, type=Mugparams.check_json, metavar=\"CONFIG_JSON\", help=\"JSON", "Indexing config input_files by name (name could not be unique - because of", "json_data['output_files'].append(pdbMeta) if \"createTrajectory\" in args.config[\"arguments\"][\"operations\"]: ### chromatin_starting_trajectory_structure pdbMeta1 = {} # Set name.", "= metadata_by_id logger.debug(\"VRE metadata for input_files is:\\n %s \" % pprint.pformat(args.metadata)) return 1", "(tmp_dir, tmp_dir, tmp_dir, usr_dir) print bashCommand process = subprocess.Popen(bashCommand,stdout=subprocess.PIPE, shell=True) proc_stdout = process.communicate()[0].strip()", "logger.addHandler(streamhandler) logger.info('Starting %s' % __file__) # Parse CMD parser = argparse.ArgumentParser(prog=\"chromatindyn_wf\", description=\"Chromatin Dynamics", "proc_stdout if \"create3DfromNucleaR\" in args.config[\"arguments\"][\"operations\"]: print \"do 3D from NucleaR\" gff_file_id = args.config[\"input_files\"][\"gffNucleaR\"]", "= 0 json_data['output_files'].append(result) # Write down output file metadata J = open(args.out_metadata, 'wb')", "dict(d)) for (index, d) in enumerate(args.metadata)) args.metadata = metadata_by_id logger.debug(\"VRE metadata for input_files", "Set file_path. Absolute path. Should be better relative to root_dir? tmp_dir = args.root_dir", "% (nucl_pos, sequence, tmp_dir) print bashCommand process = subprocess.Popen(bashCommand,stdout=subprocess.PIPE, shell=True) proc_stdout = process.communicate()[0].strip()", "pdb_file # Set source_id & taxon_id pdbMeta1[\"source_id\"] = [] if \"sequence\" in args.config['input_files']:", "for fil in files: logger.info (\"Packing %s into statistics TAR\" % os.path.basename(fil)) tar.add(fil,", "# Write down output file metadata J = open(args.out_metadata, 'wb') json.dump(json_data,J, indent=4) J.close", "%s %s\" % (tmp_dir +\"/nucleR_to_3D_nucl_pos.txt\", tmp_dir +\"/nucleR_to_3D_seq.txt\", tmp_dir) print bashCommand process = subprocess.Popen(bashCommand,stdout=subprocess.PIPE,", "TAR output file result = {} result[\"name\"] = \"summary\" result[\"source_id\"] = [] if", "# TODO Fails if relative path given if not os.path.isdir(d): raise Exception(\"writeable_file:{0} not", "args.config[\"arguments\"][\"createTrajectory:numStruct\"] bashCommand = \"cd /home/MuG/MuG_Chromatin_sampling/src_test; bash run.sh %s %s %s %s\" % (nucl_pos,", "reference genome data, etc).\") parser.add_argument(\"--metadata\", required=True, type=Mugparams.check_json, metavar=\"METADATA_JSON\", help=\"JSON file containing MuG metadata", "\"sequence\" in args.config['input_files']: result[\"source_id\"].append(args.config['input_files'][\"sequence\"]) result[\"file_path\"] = out_tar result[\"taxon_id\"] = 0 json_data['output_files'].append(result) # Write", "of the MuG public directory (with reference genome data, etc).\") parser.add_argument(\"--metadata\", required=True, type=Mugparams.check_json,", "= pdb_file # Set source_id & taxon_id pdbMeta[\"source_id\"] = [] if \"sequence\" in", "# Absolute path. Should be better relative to root_dir? tmp_dir = args.root_dir +", "\"create3DfromNucleaR\" in args.config[\"arguments\"][\"operations\"]: print \"do 3D from NucleaR\" gff_file_id = args.config[\"input_files\"][\"gffNucleaR\"] gff_file =", "\" /home/MuG/MuG_Chromatin_equ_structure/src_test/nucleR2structure.py --calls %s --genome_file %s --range %s --seq_output %s --nucs_output %s --margin", "shell=True) proc_stdout = process.communicate()[0].strip() print proc_stdout if \"create3DfromNucleaR\" in args.config[\"arguments\"][\"operations\"]: print \"do 3D", "metavar=\"ABS_PATH\", help=\"Absolute path of the user data directory.\") parser.add_argument(\"--public_dir\", required=False, type=Mugparams.readable_dir, metavar=\"PUBLIC_PATH\", help=\"Absolute", "<reponame>inab/openEBench_vre #!/usr/bin/python2.7 import os import sys import argparse import json import time import", "in out_dirs: for extension in extensions: files.extend(glob.glob(out_dir+\"/\"+extension)) tmp_dir = args.root_dir + \"/\" +", "d[\"value\"]) for (index, d) in enumerate(args.config[\"arguments\"])) args.config[\"arguments\"] = arguments_by_name # Indexing config input_files", "= args.metadata[file_id][\"taxon_id\"] break # Append output_file metadata into JSON data json_data['output_files'].append(pdbMeta) if \"createTrajectory\"", "+ args.metadata[gff_file_id][\"file_path\"] gen_reg = args.config[\"arguments\"][\"create3DfromNucleaR:genRegion\"] tmp_dir = \"{0}/{1}/str_{2}\".format(args.root_dir, args.config[\"arguments\"][\"project\"], x_rnd) bashCommand = \"", "+ \"/\" + args.metadata[sequence_file_id][\"file_path\"] nucl_pos_file_id = args.config[\"input_files\"][\"nuclPos\"] nucl_pos = args.root_dir + \"/\" +", "tool.json pdbMeta[\"name\"] = \"PDB_chromatin_structure\" # Set file_path # Absolute path. Should be better", "% (tmp_dir +\"/nucleR_to_3D_nucl_pos.txt\", tmp_dir +\"/nucleR_to_3D_seq.txt\", tmp_dir) print bashCommand process = subprocess.Popen(bashCommand,stdout=subprocess.PIPE, shell=True) proc_stdout", "= \"cd /home/MuG/MuG_Chromatin_sampling/src_test; bash run.sh %s %s %s %s\" % (nucl_pos, sequence, iterations,", "root_dir? tmp_dir = args.root_dir + \"/\" + args.config[\"arguments\"][\"project\"] pdb_file = tmp_dir + \"/chromdyn_start_str.pdb\"", "args.config[\"arguments\"][\"project\"] bashCommand = \"cp %s/output/chromdyn_str.pdb %s\" % (tmp_dir, usr_dir) print bashCommand process =", "fil in files: logger.info (\"Packing %s into statistics TAR\" % os.path.basename(fil)) tar.add(fil, arcname=os.path.basename(fil))", "a directory path or is not accessible\".format(d)) if os.access(d, os.R_OK): return d else:", "\"cd /home/MuG/MuG_Chromatin_equ_structure/src_test; bash run.sh %s %s %s\" % (nucl_pos, sequence, tmp_dir) print bashCommand", "from NucleaR\" gff_file_id = args.config[\"input_files\"][\"gffNucleaR\"] gff_file = args.root_dir + \"/\" + args.metadata[gff_file_id][\"file_path\"] gen_reg", "args.metadata[gff_file_id][\"meta_data\"][\"assembly\"] genome_file = \"{0}/refGenomes/{1}/{1}.fa\".format(args.public_dir,assembly) bashCommand = \" /home/MuG/MuG_Chromatin_equ_structure/src_test/nucleR2structure.py --calls %s --genome_file %s --range", "%s/output/chromdyn_str.pdb %s\" % (tmp_dir, usr_dir) print bashCommand process = subprocess.Popen(bashCommand,stdout=subprocess.PIPE, shell=True) proc_stdout =", "\"/\" + args.config[\"arguments\"][\"project\"] pdb_file = tmp_dir + \"/chromdyn_str.pdb\" pdbMeta[\"file_path\"] = pdb_file # Set", "import subprocess import shutil import glob import tarfile import subprocess import random out_dir=\"\"", "JSON data json_data['output_files'].append(trajMeta) # Prepare last output file: TAR of outputs, *CSVs and", "subprocess.Popen(bashCommand,stdout=subprocess.PIPE, shell=True) proc_stdout = process.communicate()[0].strip() print proc_stdout if \"createTrajectory\" in args.config[\"arguments\"][\"operations\"]: tmp_dir =", "print bashCommand process = subprocess.Popen(bashCommand,stdout=subprocess.PIPE, shell=True) proc_stdout = process.communicate()[0].strip() print proc_stdout usr_dir =", "genome_file, gen_reg, tmp_dir +\"/nucleR_to_3D_seq.txt\", tmp_dir +\"/nucleR_to_3D_nucl_pos.txt\") print bashCommand process = subprocess.Popen(bashCommand,stdout=subprocess.PIPE, shell=True) proc_stdout", "NucleaR\" gff_file_id = args.config[\"input_files\"][\"gffNucleaR\"] gff_file = args.root_dir + \"/\" + args.metadata[gff_file_id][\"file_path\"] gen_reg =", "in args.config[\"arguments\"][\"operations\"]): ### PDB_chromatin_structure pdbMeta = {} # Set name # Should coincide", "taxon_id trajMeta[\"source_id\"] = [] if \"sequence\" in args.config['input_files']: trajMeta[\"source_id\"].append(args.config['input_files'][\"sequence\"]) # Set taxon_id. taxon_id", "= {} # Set name. Should coincide with tool.json pdbMeta1[\"name\"] = \"PDB_chromatin_starting_structure\" #", "d[\"name\"] == \"project\") out_dir = args.root_dir+\"/\"+args.config[\"arguments\"][proj_idx][\"value\"] logger.info(\"Output file directory set to %s\" %", "\"/\" + args.config[\"arguments\"][\"project\"] bashCommand = \"cp %s/output/chromdyn_str.pdb %s\" % (tmp_dir, usr_dir) print bashCommand", "= next(idx for (idx, d) in enumerate(args.config[\"arguments\"]) if d[\"name\"] == \"project\") out_dir =", "= inputs_by_name[name] inputs_by_name[name]= list() inputs_by_name[name].append(prev) inputs_by_name[name].append(d[\"value\"]) else: inputs_by_name[name]=d[\"value\"] args.config[\"input_files\"] = inputs_by_name logger.debug(\"Configuration file", "bashCommand = \" mkdir %s\" % tmp_dir print bashCommand process = subprocess.Popen(bashCommand,stdout=subprocess.PIPE, shell=True)", "if args.metadata[file_id][\"taxon_id\"]: pdbMeta2[\"taxon_id\"] = args.metadata[file_id][\"taxon_id\"] break # Append output_file metadata into JSON data", "readable dir\".format(d)) @staticmethod def writeable_file(f): if not os.path.isfile(f): d = os.path.dirname(f) # TODO", "path given if not os.path.isdir(d): raise Exception(\"writeable_file:{0} not in a existing directory path,", "into JSON data json_data['output_files'].append(pdbMeta) if \"createTrajectory\" in args.config[\"arguments\"][\"operations\"]: ### chromatin_starting_trajectory_structure pdbMeta1 = {}", "= psutil.virtual_memory() logger.debug('HOST=%s CPUs=%s MEM=x' %(host,num_cores)) # Run pipeline x_rnd = int(random.random()*10000000) outfiles", "bashCommand = \"rm -r %s\" % tmp_dir print bashCommand process = subprocess.Popen(bashCommand,stdout=subprocess.PIPE, shell=True)", "\" mkdir %s\" % tmp_dir print bashCommand process = subprocess.Popen(bashCommand,stdout=subprocess.PIPE, shell=True) proc_stdout =", "--seq_output %s --nucs_output %s --margin 4\" % (gff_file, genome_file, gen_reg, tmp_dir +\"/nucleR_to_3D_seq.txt\", tmp_dir", "\"/\" + args.config[\"arguments\"][\"project\"] pdb_file = tmp_dir + \"/chromdyn_start_str.pdb\" pdbMeta1[\"file_path\"] = pdb_file # Set", "pdbMeta[\"source_id\"]: pprint.pprint(args.metadata) if args.metadata[file_id][\"taxon_id\"]: pdbMeta[\"taxon_id\"] = args.metadata[file_id][\"taxon_id\"] break # Append output_file metadata into", "proc_stdout bashCommand = \"cd /home/MuG/MuG_Chromatin_equ_structure/src_test; bash run.sh %s %s %s\" % (tmp_dir +\"/nucleR_to_3D_nucl_pos.txt\",", "in args.config[\"arguments\"][\"operations\"]: tmp_dir = \"{0}/{1}/tra_{2}\".format(args.root_dir, args.config[\"arguments\"][\"project\"], x_rnd) bashCommand = \"rm -r %s\" %", "writeable dir\".format(d)) else: return f @staticmethod def process_arguments(args): global out_dir logger = logging.getLogger(\"lg\")", "%(message)s') handler = logging.FileHandler('%s.log' % os.path.splitext(os.path.basename(__file__))[0]) handler.setLevel(logging.INFO) handler.setFormatter(formatter) logger.addHandler(handler) streamhandler = logging.StreamHandler() streamhandler.setLevel(logging.INFO)", "+ \"/\" + args.config[\"arguments\"][\"project\"] pdb_file = tmp_dir + \"/chromdyn_start_str.pdb\" pdbMeta1[\"file_path\"] = pdb_file #", "break # Append output_file metadata into JSON data json_data['output_files'].append(pdbMeta2) ### chromatin_trajectory trajMeta =", "# Should coincide with tool.json pdbMeta[\"name\"] = \"PDB_chromatin_structure\" # Set file_path # Absolute", "parser.add_argument(\"--out_metadata\", required=False, type=Mugparams.writeable_file, metavar=\"RESULTS_JSON\", help=\"JSON file containing results metadata\") parser.add_argument(\"-v\", \"--verbose\", required=False, action=\"store_true\",", "% tmp_dir print bashCommand process = subprocess.Popen(bashCommand,stdout=subprocess.PIPE, shell=True) proc_stdout = process.communicate()[0].strip() print proc_stdout", "+ args.config[\"arguments\"][\"project\"] pdb_file = tmp_dir + \"/chromdyn_start_str.pdb\" pdbMeta1[\"file_path\"] = pdb_file # Set source_id", "+ \"/\" + args.config[\"arguments\"][\"project\"] bashCommand = \"cp %s/output/chromdyn_start_str.pdb %s/output/chromdyn_str.dcd %s/output/chromdyn_dummy_str.pdb %s\" % (tmp_dir,", "def process_arguments(args): global out_dir logger = logging.getLogger(\"lg\") # Setting working directory (project) proj_idx", "args.config[\"arguments\"][\"project\"], x_rnd)) if \"create3DfromNucleaR\" in args.config[\"arguments\"][\"operations\"]: out_dirs.append(\"{0}/{1}/str_{2}/output\".format(args.root_dir, args.config[\"arguments\"][\"project\"], x_rnd)) for out_dir in out_dirs:", "logger.debug('HOST=%s CPUs=%s MEM=x' %(host,num_cores)) # Run pipeline x_rnd = int(random.random()*10000000) outfiles = run_pipeline(args,", "+ \"/results.tar.gz\" tar = tarfile.open(out_tar, \"w:gz\") for fil in files: logger.info (\"Packing %s", "inputs_by_name[name].append(prev) inputs_by_name[name].append(d[\"value\"]) else: inputs_by_name[name]=d[\"value\"] args.config[\"input_files\"] = inputs_by_name logger.debug(\"Configuration file arguments and input_files are:\\n", "logger = logging.getLogger(\"lg\") # Setting working directory (project) proj_idx = next(idx for (idx,", "%s %s %s\" % (nucl_pos, sequence, iterations, tmp_dir) print bashCommand process = subprocess.Popen(bashCommand,stdout=subprocess.PIPE,", "= \"{0}/{1}/str_{2}\".format(args.root_dir, args.config[\"arguments\"][\"project\"], x_rnd) bashCommand = \" mkdir %s\" % tmp_dir print bashCommand", "writeable_file(f): if not os.path.isfile(f): d = os.path.dirname(f) # TODO Fails if relative path", "process = subprocess.Popen(bashCommand,stdout=subprocess.PIPE, shell=True) proc_stdout = process.communicate()[0].strip() print proc_stdout if \"create3DfromNucleaR\" in args.config[\"arguments\"][\"operations\"]:", "#!/usr/bin/python2.7 import os import sys import argparse import json import time import socket", "name = args.config[\"input_files\"][index][\"name\"] if name in inputs_by_name: pprint.pprint(inputs_by_name[name]) if type(inputs_by_name[name] is str): prev", "accessible\".format(d)) if os.access(d, os.R_OK): return d else: raise Exception(\"readable_dir:{0} is not a readable", "& taxon_id pdbMeta2[\"source_id\"] = [] if \"sequence\" in args.config['input_files']: pdbMeta2[\"source_id\"].append(args.config['input_files'][\"sequence\"]) # Set taxon_id.", "if \"createStructure\" in args.config[\"arguments\"][\"operations\"]: out_dirs.append(\"{0}/{1}/str_{2}/output\".format(args.root_dir, args.config[\"arguments\"][\"project\"], x_rnd)) if \"createTrajectory\" in args.config[\"arguments\"][\"operations\"]: out_dirs.append(\"{0}/{1}/tra_{2}/output\".format(args.root_dir, args.config[\"arguments\"][\"project\"],", "= 0 if pdbMeta1[\"source_id\"]: for file_id in pdbMeta1[\"source_id\"]: pprint.pprint(args.metadata) if args.metadata[file_id][\"taxon_id\"]: pdbMeta1[\"taxon_id\"] =", "for (index, d) in enumerate(args.metadata)) args.metadata = metadata_by_id logger.debug(\"VRE metadata for input_files is:\\n", "args.config[\"input_files\"][\"gffNucleaR\"] gff_file = args.root_dir + \"/\" + args.metadata[gff_file_id][\"file_path\"] gen_reg = args.config[\"arguments\"][\"create3DfromNucleaR:genRegion\"] tmp_dir =", "raise argparse.ArgumentTypeError(\"%s does not exist\" % json_file) with open(json_file,'r') as file_data: try: data", "Mugparams.process_metadata(args) # Print host info num_cores = multiprocessing.cpu_count() host = socket.gethostname() #mem =", "args.config['input_files']: pdbMeta[\"source_id\"].append(args.config['input_files'][\"sequence\"]) # Set taxon_id # taxon_id is inherited from the input file", "TAR\" % os.path.basename(fil)) tar.add(fil, arcname=os.path.basename(fil)) tar.close() # Set metadata required for TAR output", "(i.e the source_id) pdbMeta1[\"taxon_id\"] = 0 if pdbMeta1[\"source_id\"]: for file_id in pdbMeta1[\"source_id\"]: pprint.pprint(args.metadata)", "root_dir? tmp_dir = args.root_dir + \"/\" + args.config[\"arguments\"][\"project\"] pdb_file = tmp_dir + \"/chromdyn_str.pdb\"", "socket.gethostname() #mem = psutil.virtual_memory() logger.debug('HOST=%s CPUs=%s MEM=x' %(host,num_cores)) # Run pipeline x_rnd =", "% os.path.basename(fil)) tar.add(fil, arcname=os.path.basename(fil)) tar.close() # Set metadata required for TAR output file", "= \"cp %s/output/chromdyn_start_str.pdb %s/output/chromdyn_str.dcd %s/output/chromdyn_dummy_str.pdb %s\" % (tmp_dir, tmp_dir, tmp_dir, usr_dir) print bashCommand", "JSON json_data = {} json_data['output_files']= [] if (\"createStructure\" in args.config[\"arguments\"][\"operations\"]) or (\"create3DfromNucleaR\" in", "# Set taxon_id. taxon_id is inherited from the input file (i.e the source_id)", "= args.root_dir + \"/\" + args.metadata[nucl_pos_file_id][\"file_path\"] tmp_dir = \"{0}/{1}/str_{2}\".format(args.root_dir, args.config[\"arguments\"][\"project\"], x_rnd) bashCommand =", "file_id in pdbMeta[\"source_id\"]: pprint.pprint(args.metadata) if args.metadata[file_id][\"taxon_id\"]: pdbMeta[\"taxon_id\"] = args.metadata[file_id][\"taxon_id\"] break # Append output_file", "Start logging logger = logging.getLogger(\"lg\") logger.setLevel(logging.INFO) formatter = logging.Formatter(fmt='%(asctime)s - %(module)s - %(levelname)s", "# Prepare metadata for the output files def prepare_results(args, x_rnd): global out_dir logger", "Append output_file metadata into JSON data json_data['output_files'].append(pdbMeta2) ### chromatin_trajectory trajMeta = {} #", "if \"sequence\" in args.config['input_files']: pdbMeta2[\"source_id\"].append(args.config['input_files'][\"sequence\"]) # Set taxon_id. taxon_id is inherited from the", "process = subprocess.Popen(bashCommand,stdout=subprocess.PIPE, shell=True) proc_stdout = process.communicate()[0].strip() print proc_stdout assembly = args.metadata[gff_file_id][\"meta_data\"][\"assembly\"] genome_file", "= traj_file # Set source_id & taxon_id trajMeta[\"source_id\"] = [] if \"sequence\" in", "containing results metadata\") parser.add_argument(\"-v\", \"--verbose\", required=False, action=\"store_true\", help=\"increase output verbosity\") parser.add_argument('--version', action='version', version='%(prog)s", "%s\" % args.out_metadata) # Delete temporary directory if \"createStructure\" in args.config[\"arguments\"][\"operations\"]: tmp_dir =", "pdbMeta[\"taxon_id\"] = args.metadata[file_id][\"taxon_id\"] break # Append output_file metadata into JSON data json_data['output_files'].append(pdbMeta) if", "### chromatin_starting_trajectory_structure pdbMeta1 = {} # Set name. Should coincide with tool.json pdbMeta1[\"name\"]", "out_tar = tmp_dir + \"/results.tar.gz\" tar = tarfile.open(out_tar, \"w:gz\") for fil in files:", "% (tmp_dir, tmp_dir, tmp_dir, usr_dir) print bashCommand process = subprocess.Popen(bashCommand,stdout=subprocess.PIPE, shell=True) proc_stdout =", "& taxon_id pdbMeta1[\"source_id\"] = [] if \"sequence\" in args.config['input_files']: pdbMeta1[\"source_id\"].append(args.config['input_files'][\"sequence\"]) # Set taxon_id.", "# Set name. Should coincide with tool.json pdbMeta2[\"name\"] = \"PDB_dummy_chromatin_structure\" # Set file_path.", "= [] if \"sequence\" in args.config['input_files']: pdbMeta[\"source_id\"].append(args.config['input_files'][\"sequence\"]) # Set taxon_id # taxon_id is", "Exception(\"writeable_file:{0} not in a existing directory path, or not accessible\".format(d)) else: if os.access(d,", "process_arguments(args): global out_dir logger = logging.getLogger(\"lg\") # Setting working directory (project) proj_idx =", "= \"PDB_chromatin_structure\" # Set file_path # Absolute path. Should be better relative to", "json_data['output_files'].append(pdbMeta1) ### chromatin_dummy_trajectory_structure pdbMeta2 = {} # Set name. Should coincide with tool.json", "Set taxon_id. taxon_id is inherited from the input file (i.e the source_id) pdbMeta2[\"taxon_id\"]", "json_data['output_files'].append(trajMeta) # Prepare last output file: TAR of outputs, *CSVs and *PNGs files", "return data @staticmethod def readable_dir(d): if not os.path.isdir(d): raise Exception(\"readable_dir:{0} is not a", "path. Should be better relative to root_dir? tmp_dir = args.root_dir + \"/\" +", "nucl_pos_file_id = args.config[\"input_files\"][\"nuclPos\"] nucl_pos = args.root_dir + \"/\" + args.metadata[nucl_pos_file_id][\"file_path\"] tmp_dir = \"{0}/{1}/str_{2}\".format(args.root_dir,", "\"createTrajectory\" in args.config[\"arguments\"][\"operations\"]: print \"do Trajectory\" sequence_file_id = args.config[\"input_files\"][\"sequence\"] sequence = args.root_dir +", "Create out_metadata JSON json_data = {} json_data['output_files']= [] if (\"createStructure\" in args.config[\"arguments\"][\"operations\"]) or", "os.path.basename(fil)) tar.add(fil, arcname=os.path.basename(fil)) tar.close() # Set metadata required for TAR output file result", "enumerate(args.metadata)) args.metadata = metadata_by_id logger.debug(\"VRE metadata for input_files is:\\n %s \" % pprint.pformat(args.metadata))", "# Append output_file metadata into JSON data json_data['output_files'].append(pdbMeta2) ### chromatin_trajectory trajMeta = {}", "in args.config[\"arguments\"][\"operations\"]: out_dirs.append(\"{0}/{1}/tra_{2}/output\".format(args.root_dir, args.config[\"arguments\"][\"project\"], x_rnd)) if \"create3DfromNucleaR\" in args.config[\"arguments\"][\"operations\"]: out_dirs.append(\"{0}/{1}/str_{2}/output\".format(args.root_dir, args.config[\"arguments\"][\"project\"], x_rnd)) for", "tmp_dir = args.root_dir + \"/\" + args.config[\"arguments\"][\"project\"] traj_file = tmp_dir + \"/chromdyn_str.dcd\" trajMeta[\"file_path\"]", "required=True, type=Mugparams.check_json, metavar=\"METADATA_JSON\", help=\"JSON file containing MuG metadata files\") parser.add_argument(\"--out_metadata\", required=False, type=Mugparams.writeable_file, metavar=\"RESULTS_JSON\",", "pprint.pprint(inputs_by_name[name]) if type(inputs_by_name[name] is str): prev = inputs_by_name[name] inputs_by_name[name]= list() inputs_by_name[name].append(prev) inputs_by_name[name].append(d[\"value\"]) else:", "args.metadata = metadata_by_id logger.debug(\"VRE metadata for input_files is:\\n %s \" % pprint.pformat(args.metadata)) return", "list() inputs_by_name[name].append(prev) inputs_by_name[name].append(d[\"value\"]) else: inputs_by_name[name]=d[\"value\"] args.config[\"input_files\"] = inputs_by_name logger.debug(\"Configuration file arguments and input_files", "taxon_id is inherited from the input file (i.e the source_id) pdbMeta1[\"taxon_id\"] = 0", "args.config[\"arguments\"][\"project\"], x_rnd) bashCommand = \" mkdir %s\" % tmp_dir print bashCommand process =", "required=False, action=\"store_true\", help=\"increase output verbosity\") parser.add_argument('--version', action='version', version='%(prog)s 0.1') args = parser.parse_args() if", "\" % pprint.pformat(args.metadata)) return 1 # # Executing pipeline # Calling MuG_Chromatin_equ_structure and", "# Set source_id & taxon_id pdbMeta[\"source_id\"] = [] if \"sequence\" in args.config['input_files']: pdbMeta[\"source_id\"].append(args.config['input_files'][\"sequence\"])", "import argparse import json import time import socket # print localhost import logging", "pipeline # Calling MuG_Chromatin_equ_structure and MuG_Chromatin_sampling software in a subprocess def run_pipeline(args, num_cores,", "MuG metadata files\") parser.add_argument(\"--out_metadata\", required=False, type=Mugparams.writeable_file, metavar=\"RESULTS_JSON\", help=\"JSON file containing results metadata\") parser.add_argument(\"-v\",", "%s\" % out_dir) # Indexing config arguments by name arguments_by_name = dict((d[\"name\"], d[\"value\"])", "= \"{0}/{1}/tra_{2}\".format(args.root_dir, args.config[\"arguments\"][\"project\"], x_rnd) bashCommand = \"rm -r %s\" % tmp_dir print bashCommand", "file_path. Absolute path. Should be better relative to root_dir? tmp_dir = args.root_dir +", "= args.root_dir + \"/\" + args.config[\"arguments\"][\"project\"] bashCommand = \"cp %s/output/chromdyn_start_str.pdb %s/output/chromdyn_str.dcd %s/output/chromdyn_dummy_str.pdb %s\"", "psutil # available memory import subprocess import shutil import glob import tarfile import", "\"sequence\" in args.config['input_files']: trajMeta[\"source_id\"].append(args.config['input_files'][\"sequence\"]) # Set taxon_id. taxon_id is inherited from the input", "json_data['output_files']= [] if (\"createStructure\" in args.config[\"arguments\"][\"operations\"]) or (\"create3DfromNucleaR\" in args.config[\"arguments\"][\"operations\"]): ### PDB_chromatin_structure pdbMeta", "given if not os.path.isdir(d): raise Exception(\"writeable_file:{0} not in a existing directory path, or", "{} # Set name. Should coincide with tool.json pdbMeta2[\"name\"] = \"PDB_dummy_chromatin_structure\" # Set", "= 0 if pdbMeta2[\"source_id\"]: for file_id in pdbMeta2[\"source_id\"]: pprint.pprint(args.metadata) if args.metadata[file_id][\"taxon_id\"]: pdbMeta2[\"taxon_id\"] =", "formatter = logging.Formatter(fmt='%(asctime)s - %(module)s - %(levelname)s - %(message)s') handler = logging.FileHandler('%s.log' %", "d = os.path.dirname(f) # TODO Fails if relative path given if not os.path.isdir(d):", "\"w:gz\") for fil in files: logger.info (\"Packing %s into statistics TAR\" % os.path.basename(fil))", "% pprint.pformat(args.config)) return 1 @staticmethod def process_metadata(args): global out_dir logger = logging.getLogger(\"lg\") #", "socket # print localhost import logging import re import pprint import multiprocessing #import", "in args.config[\"arguments\"][\"operations\"]: print \"do 3D from NucleaR\" gff_file_id = args.config[\"input_files\"][\"gffNucleaR\"] gff_file = args.root_dir", "# Set file_path # Absolute path. Should be better relative to root_dir? tmp_dir", "tmp_dir = \"{0}/{1}/str_{2}\".format(args.root_dir, args.config[\"arguments\"][\"project\"], x_rnd) bashCommand = \"rm -r %s\" % tmp_dir print", "# Indexing config input_files by name (name could not be unique - because", "the input file (i.e the source_id) pdbMeta1[\"taxon_id\"] = 0 if pdbMeta1[\"source_id\"]: for file_id", "\"chromatin_trajectory\" # Set file_path # Absolute path. Should be better relative to root_dir?", "accessible\".format(d)) else: if os.access(d, os.W_OK): return f else: raise Exception(\"writeable_file:{0} is not a", "tmp_dir + \"/chromdyn_str.pdb\" pdbMeta[\"file_path\"] = pdb_file # Set source_id & taxon_id pdbMeta[\"source_id\"] =", "relative path given if not os.path.isdir(d): raise Exception(\"writeable_file:{0} not in a existing directory", "output_file metadata into JSON data json_data['output_files'].append(trajMeta) # Prepare last output file: TAR of", "d) in enumerate(args.config[\"arguments\"])) args.config[\"arguments\"] = arguments_by_name # Indexing config input_files by name (name", "Append output_file metadata into JSON data json_data['output_files'].append(pdbMeta1) ### chromatin_dummy_trajectory_structure pdbMeta2 = {} #", "+ \"/chromdyn_dummy_str.pdb\" pdbMeta1[\"file_path\"] = pdb_file # Set source_id & taxon_id pdbMeta2[\"source_id\"] = []", "bashCommand = \"cd /home/MuG/MuG_Chromatin_sampling/src_test; bash run.sh %s %s %s %s\" % (nucl_pos, sequence,", "\"sequence\" in args.config['input_files']: pdbMeta[\"source_id\"].append(args.config['input_files'][\"sequence\"]) # Set taxon_id # taxon_id is inherited from the", "\"/chromdyn_dummy_str.pdb\" pdbMeta1[\"file_path\"] = pdb_file # Set source_id & taxon_id pdbMeta2[\"source_id\"] = [] if", "pdbMeta1[\"file_path\"] = pdb_file # Set source_id & taxon_id pdbMeta2[\"source_id\"] = [] if \"sequence\"", "shell=True) proc_stdout = process.communicate()[0].strip() print proc_stdout usr_dir = args.root_dir + \"/\" + args.config[\"arguments\"][\"project\"]", "Set name. Should coincide with tool.json pdbMeta2[\"name\"] = \"PDB_dummy_chromatin_structure\" # Set file_path. Absolute", "tmp_dir print bashCommand process = subprocess.Popen(bashCommand,stdout=subprocess.PIPE, shell=True) proc_stdout = process.communicate()[0].strip() print proc_stdout if", "= os.path.dirname(f) # TODO Fails if relative path given if not os.path.isdir(d): raise", "run.sh %s %s %s\" % (nucl_pos, sequence, tmp_dir) print bashCommand process = subprocess.Popen(bashCommand,stdout=subprocess.PIPE,", "output file: TAR of outputs, *CSVs and *PNGs files = [] extensions =", "- because of allow_multiple) inputs_by_name = {} for index,d in enumerate(args.config[\"input_files\"]): name =", "for extension in extensions: files.extend(glob.glob(out_dir+\"/\"+extension)) tmp_dir = args.root_dir + \"/\" + args.config[\"arguments\"][\"project\"] out_tar", "bash run.sh %s %s %s\" % (tmp_dir +\"/nucleR_to_3D_nucl_pos.txt\", tmp_dir +\"/nucleR_to_3D_seq.txt\", tmp_dir) print bashCommand", "{} # Set name # Should coincide with tool.json trajMeta[\"name\"] = \"chromatin_trajectory\" #", "random out_dir=\"\" class Mugparams(object): @staticmethod def check_json(json_file): logger = logging.getLogger(\"lg\") if not os.path.exists(json_file):", "data json_data['output_files'].append(trajMeta) # Prepare last output file: TAR of outputs, *CSVs and *PNGs", "= int(random.random()*10000000) outfiles = run_pipeline(args, num_cores, x_rnd) # Results prepare_results(args, x_rnd) if __name__", "pdbMeta2[\"source_id\"]: for file_id in pdbMeta2[\"source_id\"]: pprint.pprint(args.metadata) if args.metadata[file_id][\"taxon_id\"]: pdbMeta2[\"taxon_id\"] = args.metadata[file_id][\"taxon_id\"] break #", "%s %s %s\" % (tmp_dir +\"/nucleR_to_3D_nucl_pos.txt\", tmp_dir +\"/nucleR_to_3D_seq.txt\", tmp_dir) print bashCommand process =", "because of allow_multiple) inputs_by_name = {} for index,d in enumerate(args.config[\"input_files\"]): name = args.config[\"input_files\"][index][\"name\"]", "of the user data directory.\") parser.add_argument(\"--public_dir\", required=False, type=Mugparams.readable_dir, metavar=\"PUBLIC_PATH\", help=\"Absolute path of the", "argparse.ArgumentTypeError(\"%s does not exist\" % json_file) with open(json_file,'r') as file_data: try: data =", "Should be better relative to root_dir? tmp_dir = args.root_dir + \"/\" + args.config[\"arguments\"][\"project\"]", "return f else: raise Exception(\"writeable_file:{0} is not a writeable dir\".format(d)) else: return f", "pprint.pprint(args.metadata) if args.metadata[file_id][\"taxon_id\"]: pdbMeta2[\"taxon_id\"] = args.metadata[file_id][\"taxon_id\"] break # Append output_file metadata into JSON", "logger.info('Starting %s' % __file__) # Parse CMD parser = argparse.ArgumentParser(prog=\"chromatindyn_wf\", description=\"Chromatin Dynamics workflow\")", "= dict((d[\"_id\"], dict(d)) for (index, d) in enumerate(args.metadata)) args.metadata = metadata_by_id logger.debug(\"VRE metadata", "args.root_dir + \"/\" + args.config[\"arguments\"][\"project\"] bashCommand = \"cp %s/output/chromdyn_str.pdb %s\" % (tmp_dir, usr_dir)", "= process.communicate()[0].strip() print proc_stdout if \"create3DfromNucleaR\" in args.config[\"arguments\"][\"operations\"]: print \"do 3D from NucleaR\"", "name. Should coincide with tool.json pdbMeta1[\"name\"] = \"PDB_chromatin_starting_structure\" # Set file_path. Absolute path.", "trajMeta[\"taxon_id\"] = args.metadata[file_id][\"taxon_id\"] break # Append output_file metadata into JSON data json_data['output_files'].append(trajMeta) #", "% (nucl_pos, sequence, iterations, tmp_dir) print bashCommand process = subprocess.Popen(bashCommand,stdout=subprocess.PIPE, shell=True) proc_stdout =", "d) in enumerate(args.config[\"arguments\"]) if d[\"name\"] == \"project\") out_dir = args.root_dir+\"/\"+args.config[\"arguments\"][proj_idx][\"value\"] logger.info(\"Output file directory", "pdbMeta1[\"source_id\"]: pprint.pprint(args.metadata) if args.metadata[file_id][\"taxon_id\"]: pdbMeta1[\"taxon_id\"] = args.metadata[file_id][\"taxon_id\"] break # Append output_file metadata into", "= args.root_dir + \"/\" + args.config[\"arguments\"][\"project\"] out_tar = tmp_dir + \"/results.tar.gz\" tar =", "\"/\" + args.metadata[nucl_pos_file_id][\"file_path\"] tmp_dir = \"{0}/{1}/str_{2}\".format(args.root_dir, args.config[\"arguments\"][\"project\"], x_rnd) bashCommand = \"cd /home/MuG/MuG_Chromatin_equ_structure/src_test; bash", "file_id ([_id]) metadata_by_id = dict((d[\"_id\"], dict(d)) for (index, d) in enumerate(args.metadata)) args.metadata =", "for TAR output file result = {} result[\"name\"] = \"summary\" result[\"source_id\"] = []", "is str): prev = inputs_by_name[name] inputs_by_name[name]= list() inputs_by_name[name].append(prev) inputs_by_name[name].append(d[\"value\"]) else: inputs_by_name[name]=d[\"value\"] args.config[\"input_files\"] =", "= \"{0}/{1}/str_{2}\".format(args.root_dir, args.config[\"arguments\"][\"project\"], x_rnd) bashCommand = \"cd /home/MuG/MuG_Chromatin_equ_structure/src_test; bash run.sh %s %s %s\"", "1 # # Prepare metadata for the output files def prepare_results(args, x_rnd): global", "args.metadata[file_id][\"taxon_id\"] break # Append output_file metadata into JSON data json_data['output_files'].append(trajMeta) # Prepare last", "by file_id ([_id]) metadata_by_id = dict((d[\"_id\"], dict(d)) for (index, d) in enumerate(args.metadata)) args.metadata", "config arguments by name arguments_by_name = dict((d[\"name\"], d[\"value\"]) for (index, d) in enumerate(args.config[\"arguments\"]))", "source_id & taxon_id pdbMeta[\"source_id\"] = [] if \"sequence\" in args.config['input_files']: pdbMeta[\"source_id\"].append(args.config['input_files'][\"sequence\"]) # Set", "valid json file.\" % json_file) return data @staticmethod def readable_dir(d): if not os.path.isdir(d):", "data directory.\") parser.add_argument(\"--public_dir\", required=False, type=Mugparams.readable_dir, metavar=\"PUBLIC_PATH\", help=\"Absolute path of the MuG public directory", "{} json_data['output_files']= [] if (\"createStructure\" in args.config[\"arguments\"][\"operations\"]) or (\"create3DfromNucleaR\" in args.config[\"arguments\"][\"operations\"]): ### PDB_chromatin_structure", "# Set taxon_id # taxon_id is inherited from the input file (i.e the", "out_dir) # Indexing config arguments by name arguments_by_name = dict((d[\"name\"], d[\"value\"]) for (index,", "for input_files is:\\n %s \" % pprint.pformat(args.metadata)) return 1 # # Executing pipeline", "PDB_chromatin_structure pdbMeta = {} # Set name # Should coincide with tool.json pdbMeta[\"name\"]", "%s %s %s\" % (nucl_pos, sequence, tmp_dir) print bashCommand process = subprocess.Popen(bashCommand,stdout=subprocess.PIPE, shell=True)", "file: TAR of outputs, *CSVs and *PNGs files = [] extensions = ('*.txt','*.csv','*.png')", "shell=True) proc_stdout = process.communicate()[0].strip() print proc_stdout def main(): # Start logging logger =", "import time import socket # print localhost import logging import re import pprint", "not os.path.exists(json_file): raise argparse.ArgumentTypeError(\"%s does not exist\" % json_file) with open(json_file,'r') as file_data:", "\"cp %s/output/chromdyn_start_str.pdb %s/output/chromdyn_str.dcd %s/output/chromdyn_dummy_str.pdb %s\" % (tmp_dir, tmp_dir, tmp_dir, usr_dir) print bashCommand process", "args.root_dir + \"/\" + args.config[\"arguments\"][\"project\"] traj_file = tmp_dir + \"/chromdyn_str.dcd\" trajMeta[\"file_path\"] = traj_file", "taxon_id pdbMeta[\"source_id\"] = [] if \"sequence\" in args.config['input_files']: pdbMeta[\"source_id\"].append(args.config['input_files'][\"sequence\"]) # Set taxon_id #", "JSON data json_data['output_files'].append(pdbMeta1) ### chromatin_dummy_trajectory_structure pdbMeta2 = {} # Set name. Should coincide", "the input file (i.e the source_id) pdbMeta2[\"taxon_id\"] = 0 if pdbMeta2[\"source_id\"]: for file_id", "into JSON data json_data['output_files'].append(trajMeta) # Prepare last output file: TAR of outputs, *CSVs", "#mem = psutil.virtual_memory() logger.debug('HOST=%s CPUs=%s MEM=x' %(host,num_cores)) # Run pipeline x_rnd = int(random.random()*10000000)", "taxon_id is inherited from the input file (i.e the source_id) pdbMeta[\"taxon_id\"] = 0", "if not os.path.exists(json_file): raise argparse.ArgumentTypeError(\"%s does not exist\" % json_file) with open(json_file,'r') as", "workflow\") parser.add_argument(\"--config\", required=True, type=Mugparams.check_json, metavar=\"CONFIG_JSON\", help=\"JSON file containing workflow parameters\") parser.add_argument(\"--root_dir\", required=True, type=Mugparams.readable_dir,", "global out_dir logger = logging.getLogger(\"lg\") if (args.out_metadata): # Create out_metadata JSON json_data =", "% json_file) return data @staticmethod def readable_dir(d): if not os.path.isdir(d): raise Exception(\"readable_dir:{0} is", "@staticmethod def check_json(json_file): logger = logging.getLogger(\"lg\") if not os.path.exists(json_file): raise argparse.ArgumentTypeError(\"%s does not", "\"cd /home/MuG/MuG_Chromatin_sampling/src_test; bash run.sh %s %s %s %s\" % (nucl_pos, sequence, iterations, tmp_dir)", "pdbMeta[\"file_path\"] = pdb_file # Set source_id & taxon_id pdbMeta[\"source_id\"] = [] if \"sequence\"", "tar = tarfile.open(out_tar, \"w:gz\") for fil in files: logger.info (\"Packing %s into statistics", "out_dirs.append(\"{0}/{1}/str_{2}/output\".format(args.root_dir, args.config[\"arguments\"][\"project\"], x_rnd)) for out_dir in out_dirs: for extension in extensions: files.extend(glob.glob(out_dir+\"/\"+extension)) tmp_dir", "logging.StreamHandler() streamhandler.setLevel(logging.INFO) streamhandler.setFormatter(formatter) logger.addHandler(streamhandler) logger.info('Starting %s' % __file__) # Parse CMD parser =", "= subprocess.Popen(bashCommand,stdout=subprocess.PIPE, shell=True) proc_stdout = process.communicate()[0].strip() print proc_stdout assembly = args.metadata[gff_file_id][\"meta_data\"][\"assembly\"] genome_file =", "args.metadata[nucl_pos_file_id][\"file_path\"] tmp_dir = \"{0}/{1}/tra_{2}\".format(args.root_dir, args.config[\"arguments\"][\"project\"], x_rnd) iterations = args.config[\"arguments\"][\"createTrajectory:numStruct\"] bashCommand = \"cd /home/MuG/MuG_Chromatin_sampling/src_test;", "subprocess.Popen(bashCommand,stdout=subprocess.PIPE, shell=True) proc_stdout = process.communicate()[0].strip() print proc_stdout return 1 # # Prepare metadata", "args.metadata[file_id][\"taxon_id\"]: pdbMeta2[\"taxon_id\"] = args.metadata[file_id][\"taxon_id\"] break # Append output_file metadata into JSON data json_data['output_files'].append(pdbMeta2)", "os.access(d, os.W_OK): return f else: raise Exception(\"writeable_file:{0} is not a writeable dir\".format(d)) else:", "break # Append output_file metadata into JSON data json_data['output_files'].append(trajMeta) # Prepare last output", "0 if trajMeta[\"source_id\"]: for file_id in trajMeta[\"source_id\"]: pprint.pprint(args.metadata) if args.metadata[file_id][\"taxon_id\"]: trajMeta[\"taxon_id\"] = args.metadata[file_id][\"taxon_id\"]", "tarfile.open(out_tar, \"w:gz\") for fil in files: logger.info (\"Packing %s into statistics TAR\" %", "in args.config['input_files']: pdbMeta[\"source_id\"].append(args.config['input_files'][\"sequence\"]) # Set taxon_id # taxon_id is inherited from the input", "usr_dir) print bashCommand process = subprocess.Popen(bashCommand,stdout=subprocess.PIPE, shell=True) proc_stdout = process.communicate()[0].strip() print proc_stdout return", "Set source_id & taxon_id pdbMeta1[\"source_id\"] = [] if \"sequence\" in args.config['input_files']: pdbMeta1[\"source_id\"].append(args.config['input_files'][\"sequence\"]) #", "inputs_by_name[name]=d[\"value\"] args.config[\"input_files\"] = inputs_by_name logger.debug(\"Configuration file arguments and input_files are:\\n %s \" %", "%s %s\" % (nucl_pos, sequence, iterations, tmp_dir) print bashCommand process = subprocess.Popen(bashCommand,stdout=subprocess.PIPE, shell=True)", "# Set name # Should coincide with tool.json pdbMeta[\"name\"] = \"PDB_chromatin_structure\" # Set", "if \"createTrajectory\" in args.config[\"arguments\"][\"operations\"]: out_dirs.append(\"{0}/{1}/tra_{2}/output\".format(args.root_dir, args.config[\"arguments\"][\"project\"], x_rnd)) if \"create3DfromNucleaR\" in args.config[\"arguments\"][\"operations\"]: out_dirs.append(\"{0}/{1}/str_{2}/output\".format(args.root_dir, args.config[\"arguments\"][\"project\"],", "tmp_dir = \"{0}/{1}/str_{2}\".format(args.root_dir, args.config[\"arguments\"][\"project\"], x_rnd) bashCommand = \"cd /home/MuG/MuG_Chromatin_equ_structure/src_test; bash run.sh %s %s", "(name could not be unique - because of allow_multiple) inputs_by_name = {} for", "= argparse.ArgumentParser(prog=\"chromatindyn_wf\", description=\"Chromatin Dynamics workflow\") parser.add_argument(\"--config\", required=True, type=Mugparams.check_json, metavar=\"CONFIG_JSON\", help=\"JSON file containing workflow", "in extensions: files.extend(glob.glob(out_dir+\"/\"+extension)) tmp_dir = args.root_dir + \"/\" + args.config[\"arguments\"][\"project\"] out_tar = tmp_dir", "logger.info(\"Output file directory set to %s\" % out_dir) # Indexing config arguments by", "coincide with tool.json pdbMeta1[\"name\"] = \"PDB_chromatin_starting_structure\" # Set file_path. Absolute path. Should be", "with tool.json pdbMeta2[\"name\"] = \"PDB_dummy_chromatin_structure\" # Set file_path. Absolute path. Should be better", "inputs_by_name = {} for index,d in enumerate(args.config[\"input_files\"]): name = args.config[\"input_files\"][index][\"name\"] if name in", "tmp_dir = \"{0}/{1}/str_{2}\".format(args.root_dir, args.config[\"arguments\"][\"project\"], x_rnd) bashCommand = \" mkdir %s\" % tmp_dir print", "bash run.sh %s %s %s\" % (nucl_pos, sequence, tmp_dir) print bashCommand process =", "= args.metadata[gff_file_id][\"meta_data\"][\"assembly\"] genome_file = \"{0}/refGenomes/{1}/{1}.fa\".format(args.public_dir,assembly) bashCommand = \" /home/MuG/MuG_Chromatin_equ_structure/src_test/nucleR2structure.py --calls %s --genome_file %s", "= process.communicate()[0].strip() print proc_stdout if \"createTrajectory\" in args.config[\"arguments\"][\"operations\"]: print \"do Trajectory\" sequence_file_id =", "# taxon_id is inherited from the input file (i.e the source_id) pdbMeta[\"taxon_id\"] =", "Fails if relative path given if not os.path.isdir(d): raise Exception(\"writeable_file:{0} not in a", "%s %s\" % (nucl_pos, sequence, tmp_dir) print bashCommand process = subprocess.Popen(bashCommand,stdout=subprocess.PIPE, shell=True) proc_stdout", "inputs_by_name[name] inputs_by_name[name]= list() inputs_by_name[name].append(prev) inputs_by_name[name].append(d[\"value\"]) else: inputs_by_name[name]=d[\"value\"] args.config[\"input_files\"] = inputs_by_name logger.debug(\"Configuration file arguments", "tmp_dir print bashCommand process = subprocess.Popen(bashCommand,stdout=subprocess.PIPE, shell=True) proc_stdout = process.communicate()[0].strip() print proc_stdout assembly", "args.metadata[file_id][\"taxon_id\"]: pdbMeta[\"taxon_id\"] = args.metadata[file_id][\"taxon_id\"] break # Append output_file metadata into JSON data json_data['output_files'].append(pdbMeta)", "= tmp_dir + \"/chromdyn_str.pdb\" pdbMeta[\"file_path\"] = pdb_file # Set source_id & taxon_id pdbMeta[\"source_id\"]", "pprint.pprint(args.metadata) if args.metadata[file_id][\"taxon_id\"]: trajMeta[\"taxon_id\"] = args.metadata[file_id][\"taxon_id\"] break # Append output_file metadata into JSON" ]
[ "import matplotlib.pyplot as plt from matplotlib.patches import FancyBboxPatch from matplotlib.colors import LinearSegmentedColormap from", "!= None: vmin = np.log10(vmin) if vmax != None: vmax = np.log10(vmax) #", "a basemap Arguments: info (info): ``info`` object containing metadata spatial (list): List with", "if sidebar: text1, text2, text3, text4 = make_legend_text(info,d.attrs) ax2 = plt.subplot2grid((1,24),(0,0),colspan=4) # Turn", "'#cccccc', fillcolor2 = '#a6a6a6', yoffset = (0.01*(m.ymax-m.ymin)), labelstyle='simple',zorder=60) if not sidebar: cbaxes2 =", "minlon = spatial[2] maxlon = spatial[3] # Create map m = Basemap(projection='mill', llcrnrlat=minlat,urcrnrlat=maxlat,", "# m.drawcoastlines(color='#A27D0C',linewidth=0.5,zorder=25) # m.fillcontinents(color='#E1E1A0',zorder=23) m.drawcoastlines(color='#a6a6a6',linewidth=0.5,zorder=25) m.fillcontinents(color='#e6e6e6',zorder=23) m.drawmapboundary() def setcolor(x, color): for m in", "{'red': ((0.0, 1.0, 1.0), # (1.0, 0.5, 0.5)), # 'green': ((0.0, 1.0, 1.0),", "0.2, 0.03],zorder=60) cbar = plt.colorbar(extend='both', cax = cbaxes2, orientation='horizontal') # Change colorbar labels", "cbar = plt.colorbar(extend='both', cax = cbaxes2, orientation='horizontal') # Change colorbar labels for easier", "file_in, sidebar=False, save=True): ''' Creates a map of \"pings\" rather than gridded density", "if info.grid.minlat == None or info.grid.maxlat == None or info.grid.minlon == None or", "+ '---------------------------------------------------------------\\n' ) return text1, text2, text3, text4 def map_dots(info, file_in, sidebar=False, save=True):", "description: ' + md['unit_description'] + '\\n\\n' + 'Data source: ' + md['data_source'] +", "merged file to map. If ``None`` it looks for ``merged_grid.nc`` in the `\\merged`", "str(info.grid.bin_number) + '.png' # plt.savefig(os.path.join(filedir,filename), dpi=300) return def define_path_to_map(info, path_to_basemap='auto'): ''' Figures out", "print('Min: ' + str(np.min(Hmasked))) print('Max: ' + str(np.max(Hmasked))) print('Mean: ' + str(np.nanmean(Hmasked))) print('Std:", "xr.open_dataset(file_in) # Define boundaries if info.grid.minlat == None or info.grid.maxlat == None or", "= os.path.join(path_to_map, info.grid.basemap + '.grid') pickle.dump(info, open(info_picklename, 'wb'), -1) print('!!! Pickles were just", "maxlat = info.grid.maxlat minlon = info.grid.minlon maxlon = info.grid.maxlon path_to_basemap = info.dirs.project_path /", "Management Division\\n' + 'Ecosystem Management Branch\\n' + 'Fisheries and Oceans Canada – Maritimes", "- (4*Hmasked.std()) alat = (d.attrs['maxlat'] - d.attrs['minlat'])/2 cellsize = sm.degrees_to_meters(d.attrs['bin_size'], alat) # max_speed", "# ax.spines['top'].set_color('#00a3cc') ax.spines['right'].set_color('#00a3cc') ax.spines['bottom'].set_color('#00a3cc') ax.spines['left'].set_color('#00a3cc') for k, spine in ax.spines.items(): #ax.spines is a", "'basemap_spots.p') m = sm.make_basemap(info.dirs.project_path,[minlat,maxlat,minlon,maxlon]) # if not os.path.exists(str(path_to_basemap / 'basemap.p')): # m =", "m/min ...roughly 20 knots max_speed = 316.66 # m/min ...roughly 20 knots vmin", "- d.attrs['minlat'])/2 cellsize = sm.degrees_to_meters(d.attrs['bin_size'], alat) # max_speed = 616.66 # m/min ...roughly", "vessels: ' + info.sidebar.included_vessel_types + '\\n\\n' + 'Grid size: ' + str(md['bin_size']) +", "(or merged) file Arguments: info (info): ``info`` object containing metadata Keyword Arguments: file_in", "text3, text4 def map_dots(info, file_in, sidebar=False, save=True): ''' Creates a map of \"pings\"", "boundaries if info.grid.minlat == None or info.grid.maxlat == None or info.grid.minlon == None", "0.9), (1.0, 1.0,1.0)), 'blue': ((0.0, 0.9, 0.9), (1.0, 1.0, 1.0))} my_cmap = LinearSegmentedColormap('my_colormap',cdict,256)", "m.drawcoastlines(color='#a6a6a6',linewidth=0.5,zorder=25) m.fillcontinents(color='#e6e6e6',zorder=23) m.drawmapboundary() def setcolor(x, color): for m in x: for t in", "# ----------------------------------------------------------------------------- path_to_map = define_path_to_map(info, path_to_basemap=path_to_basemap) sm.checkDir(str(path_to_map)) minlat = spatial[0] maxlat = spatial[1]", "plt.savefig(os.path.join(filedir,filename), dpi=300) # Close netCDF file d.close() if to_screen == False: plt.close() return", "produced basemap. If ``'auto'`` then path is setup by :func:`~ship_mapper.mapper.define_path_to_map` sidebar (bool): If", "0.145, text3, horizontalalignment='left', verticalalignment='top', size=7, color= '#808080', transform=plt.gca().transAxes) plt.text(0.02, 0.25, text4, style='italic', horizontalalignment='left',", "llcrnrlat=minlat,urcrnrlat=maxlat, llcrnrlon=minlon, urcrnrlon=maxlon,resolution=info.maps.resolution) # TOPO # Read data from: http://coastwatch.pfeg.noaa.gov/erddap/griddap/usgsCeSrtm30v6.html # using the", "path_to_map = os.path.abspath(os.path.join(info.dirs.project_path,'ancillary')) else: path_to_map = path_to_basemap return path_to_map def make_basemap(info,spatial,path_to_basemap='auto', sidebar=False): '''", "1.0), (1.0, 0.5, 0.5)), 'green': ((0.0, 0.85, 0.85), (1.0, 0.0, 0.0)), 'blue': ((0.0,", "source: ' + md['data_source'] + '\\n\\n' + 'Data source description:\\n' + md['data_description'] +", "= nc.variables lon = ncv['longitude'][:] lat = ncv['latitude'][:] lons, lats = np.meshgrid(lon,lat) topo", "1.0,1.0)), 'blue': ((0.0, 0.9, 0.9), (1.0, 1.0, 1.0))} my_cmap = LinearSegmentedColormap('my_colormap',cdict,256) elif name", "object containing metadata Keyword Arguments: path_to_basemap (str): If ``'auto'`` it looks in ``grids``", "xr import cmocean from pathlib import Path import _pickle as pickle import os", "the produced basemap. If ``'auto'`` then path is setup by :func:`~ship_mapper.mapper.define_path_to_map` sidebar (bool):", "md['enddate'][0:-3] + '\\n\\n' + 'Included speeds: ' + info.sidebar.included_speeds + '\\n' + 'Included", "ax.axvline(linewidth=4, color=\"#00a3cc\") # ax.spines['top'].set_color('#00a3cc') ax.spines['right'].set_color('#00a3cc') ax.spines['bottom'].set_color('#00a3cc') ax.spines['left'].set_color('#00a3cc') for k, spine in ax.spines.items(): #ax.spines", "See also: :mod:`pickle` ''' # # basemap = [grid, m] # f =", "in ax.spines.items(): #ax.spines is a dictionary spine.set_zorder(35) # ax.spines['top'].set_visible(False) # ax.spines['right'].set_visible(False) # ax.spines['bottom'].set_visible(False)", "ncv['longitude'][:] lat = ncv['latitude'][:] lons, lats = np.meshgrid(lon,lat) topo = ncv['topo'][:,:] # fig", "png if save: if filedir_out == 'auto': filedir = str(info.dirs.pngs) else: filedir =", "fig.add_axes([0.05,0.05,0.94,0.94]) TOPOmasked = np.ma.masked_where(topo>0,topo) cs = m.pcolormesh(lons,lats,TOPOmasked,cmap=load_my_cmap('my_cmap_lightblue'),latlon=True,zorder=5) # m.drawcoastlines(color='#A27D0C',linewidth=0.5,zorder=25) # m.fillcontinents(color='#E1E1A0',zorder=23) m.drawcoastlines(color='#a6a6a6',linewidth=0.5,zorder=25) m.fillcontinents(color='#e6e6e6',zorder=23)", "('Unit description: ' + md['unit_description'] + '\\n\\n' + 'Data source: ' + md['data_source']", "[left,right,top,bottom] par = m.drawparallels(parallels,labels=[True,False,False,False],dashes=[20,20],color='#00a3cc', linewidth=0.2, zorder=25) setcolor(par,'#00a3cc') meridians = np.arange(minlon,maxlon,info.maps.meridians) mers = m.drawmeridians(meridians,labels=[False,False,False,True],dashes=[20,20],color='#00a3cc',", "and Coastal Management Division\\n' + 'Ecosystem Management Branch\\n' + 'Fisheries and Oceans Canada", "product. ​Data is provided\\n' + 'on an \"AS IS\" basis. ​USE AT YOUR", "/ (info.grid.basemap + '.p')) # pickle.dump(basemap, open(picklename, 'wb'), -1) # print('!!! Pickle just", "ax2 = plt.subplot2grid((1,24),(0,0),colspan=4) # Turn off tick labels ax2.get_xaxis().set_visible(False) ax2.get_yaxis().set_visible(False) ax2.add_patch(FancyBboxPatch((0,0), width=1, height=1,", "interpreting label_values = cbar._tick_data_values log_label_values = np.round(10 ** label_values,decimals=0) labels = [] for", "0.0, 0.0))} # my_cmap_yellow2red = LinearSegmentedColormap('my_colormap',cdict,256) cdict = {'red': ((0.0, 1.0, 1.0), (1.0,", "+ basemap_file) # Check for basemap.p and, if doesn;t exist, make it if", "basemap. If ``'auto'`` then path is setup by :func:`~ship_mapper.mapper.define_path_to_map` sidebar (bool): If ``True``", "basemap_file = info.dirs.basemap print('Basemap file: ' + basemap_file) # Check for basemap.p and,", "to a pickle file Arguments: m (mpl_toolkits.basemap.Basemap): Basemap object info (info): ``info`` object", "warnings np.warnings.filterwarnings('ignore') import xarray as xr import cmocean from pathlib import Path import", "ax.spines['right'].set_visible(False) # ax.spines['bottom'].set_visible(False) # ax.spines['left'].set_visible(False) # fig.tight_layout(pad=0.25) fig.tight_layout(rect=[0.01,0.01,.99,.99]) plt.show() if sidebar: basemap_name =", "os.path.exists(str(path_to_basemap / 'basemap.p')): # m = sm.make_basemap(info.dirs.project_path,[minlat,maxlat,minlon,maxlon]) # else: # print('Found basemap...') #", "cmapcolor =plt.get_cmap(cmap) cs = m.pcolor(xx,yy,Hmasked, cmap=cmapcolor, zorder=10, vmin=vmin, vmax=vmax) #scalebar sblon = minlon", "# if not os.path.exists(str(path_to_basemap / 'basemap.p')): # m = sm.make_basemap(info.dirs.project_path,[minlat,maxlat,minlon,maxlon]) # else: #", "not os.path.exists(basemap_file): m = sm.make_basemap(info,[minlat,maxlat,minlon,maxlon]) else: print('Found basemap...') m = pickle.load(open(basemap_file,'rb')) x, y", "os import ship_mapper as sm import urllib.request import netCDF4 def map_density(info, file_in=None, cmap='Default',", "''' print('map_density ------------------------------------------------------') # Load data if file_in == None: file_in = os.path.join(str(info.dirs.merged_grid),'merged_grid.nc')", "file d.close() if to_screen == False: plt.close() return def make_legend_text(info,md): ''' Makes text", "x, y = m(singleship['longitude'].values,singleship['latitude'].values) # x, y = m(d['longitude'].values,d['latitude'].values) cs = m.scatter(x,y,2,marker='o',color='r', zorder=30)", "= {'red': ((0.0, 1.0, 1.0), # (1.0, 0.5, 0.5)), # 'green': ((0.0, 1.0,", "object ''' print('map_density ------------------------------------------------------') # Load data if file_in == None: file_in =", "basemap...') m = pickle.load(open(basemap_file,'rb')) # Create grid for mapping lons_grid, lats_grid = np.meshgrid(d['lon'].values,d['lat'].values)", ") text4 = ('---------------------------------------------------------------\\n' + 'WARNING: This is a preliminary data product.\\n' +", "Basemap object info (info): ``info`` object containing metadata Keyword Arguments: path_to_basemap (str): If", "(and correspoding info.grid) to a pickle file Arguments: m (mpl_toolkits.basemap.Basemap): Basemap object info", "boxstyle=\"square,pad=0\", zorder=3, facecolor='#e6e6e6', alpha=1.0, edgecolor='#a6a6a6', transform=plt.gca().transAxes)) plt.text(0.15, 0.99, text1, verticalalignment='top', horizontalalignment='left', weight='bold', size=10,", "plot save (bool): If ``True`` a ``.png`` figure is saved to hardrive '''", "= filename_out sm.checkDir(filedir) plt.savefig(os.path.join(filedir,filename), dpi=300) # Close netCDF file d.close() if to_screen ==", "in the `\\merged` directory sidebar (bool): If ``True``, includes side panel with metadata", "http://coastwatch.pfeg.noaa.gov/erddap/griddap/usgsCeSrtm30v6.html # using the netCDF output option # bathymetry_file = str(path_to_map / 'usgsCeSrtm30v6.nc')", "cbaxes2 = fig.add_axes([0.019, 0.9, 0.15, 0.02],zorder=60) cbar = plt.colorbar(extend='both', cax = cbaxes2, orientation='horizontal')", "plt.figure() # plt.plot(filtered_data['longitude'].values,filtered_data['latitude'].values,'.') # plt.show() # # Save map as png # if", "' + basemap_file) # Check for basemap.p and, if doesn;t exist, make it", "'Default': cmapcolor = load_my_cmap('my_cmap_amber2red') elif cmap == 'red2black': cmapcolor = load_my_cmap('my_cmap_red2black') else: cmapcolor", "= m.scatter(x,y,2,marker='o',color='r', zorder=30) # fig = plt.figure() # plt.plot(filtered_data['longitude'].values,filtered_data['latitude'].values,'.') # plt.show() # #", "size=9, color='#808080') # TODO: maybe delete this? # mng = plt.get_current_fig_manager() # mng.frame.Maximize(True)", "os.path.exists(basemap_file): m = sm.make_basemap(info,[minlat,maxlat,minlon,maxlon]) else: print('Found basemap...') m = pickle.load(open(basemap_file,'rb')) x, y =", "degrees (~' + str(int(round(sm.degrees_to_meters(md['bin_size'], alat))))+ ' m)\\n' + 'EPGS code: ' + md['epsg_code']", "print('Found basemap...') m = pickle.load(open(basemap_file,'rb')) # Create grid for mapping lons_grid, lats_grid =", "picklename = str(path_to_map / basemap_name) # pickle.dump(m,open(picklename,'wb'),-1) # print('!!! Pickle just made: '", "save_basemap(m,info,path_to_basemap=path_to_map) # picklename = str(path_to_map / basemap_name) # pickle.dump(m,open(picklename,'wb'),-1) # print('!!! Pickle just", "labels for easier interpreting label_values = cbar._tick_data_values # print(\"values\") # print(label_values) log_label_values =", "path_to_basemap = info.dirs.project_path / 'ancillary' print('-----------------------------------------------------') print('-----------------------------------------------------') # basemap_file = str(path_to_basemap / 'basemap_spots.p')", "== 'auto': if info.grid.type == 'one-off': path_to_map = os.path.join(info.dirs.project_path,info.grid.region,'ancillary') elif info.grid.type == 'generic':", "# ax.axvline(linewidth=4, color=\"#00a3cc\") # ax.spines['top'].set_color('#00a3cc') ax.spines['right'].set_color('#00a3cc') ax.spines['bottom'].set_color('#00a3cc') ax.spines['left'].set_color('#00a3cc') for k, spine in ax.spines.items():", "size=10, color= '#737373', transform=plt.gca().transAxes) plt.text(0.02, 0.83, text2, horizontalalignment='left', verticalalignment='top', size=9, color= '#808080', transform=plt.gca().transAxes)", "m.drawmeridians(meridians,labels=[False,False,False,True],dashes=[20,20],color='#00a3cc', linewidth=0.2, zorder=25) setcolor(mers,'#00a3cc') ax = plt.gca() # ax.axhline(linewidth=4, color=\"#00a3cc\") # ax.axvline(linewidth=4, color=\"#00a3cc\")", "my_cmap def save_basemap(m,info,path_to_basemap='auto'): ''' Saves basemap (and correspoding info.grid) to a pickle file", ":func:`~ship_mapper.mapper.define_path_to_map` sidebar (bool): If ``True`` space for a side panel is added to", "' + str(np.max(Hmasked))) print('Mean: ' + str(np.nanmean(Hmasked))) print('Std: ' + str(Hmasked.std())) if info.maps.cbarmax", "plt.subplot2grid((1,24),(0,0),colspan=4) # Turn off tick labels ax2.get_xaxis().set_visible(False) ax2.get_yaxis().set_visible(False) ax2.add_patch(FancyBboxPatch((0,0), width=1, height=1, clip_on=False, boxstyle=\"square,pad=0\",", "info.grid.minlon maxlon = info.grid.maxlon path_to_basemap = info.dirs.project_path / 'ancillary' print('-----------------------------------------------------') print('-----------------------------------------------------') if sidebar:", "def save_basemap(m,info,path_to_basemap='auto'): ''' Saves basemap (and correspoding info.grid) to a pickle file Arguments:", ":return: text for legend ''' import datetime alat = (md['maxlat'] - md['minlat'])/2 text1", "for ``merged_grid.nc`` in the `\\merged` directory cmap (str): Colormap to use sidebar (bool):", "Pickle file See also: :mod:`pickle` ''' # # basemap = [grid, m] #", "file: ' + basemap_file) # Check for basemap.p and, if doesn;t exist, make", "alat = (d.attrs['maxlat'] - d.attrs['minlat'])/2 cellsize = sm.degrees_to_meters(d.attrs['bin_size'], alat) # max_speed = 616.66", "colorbar labels for easier interpreting label_values = cbar._tick_data_values log_label_values = np.round(10 ** label_values,decimals=0)", "str(path_to_basemap / 'basemap.p') if not os.path.exists(basemap_file): m = sm.make_basemap(info,[minlat,maxlat,minlon,maxlon]) else: print('Found basemap...') m", "m (mpl_toolkits.basemap.Basemap): Basemap object info (info): ``info`` object containing metadata Keyword Arguments: path_to_basemap", "else: minlat = info.grid.minlat maxlat = info.grid.maxlat minlon = info.grid.minlon maxlon = info.grid.maxlon", "= netCDF4.Dataset(bathymetry_file) ncv = nc.variables lon = ncv['longitude'][:] lat = ncv['latitude'][:] lons, lats", "(info.grid.basemap + '.p')),'w') # pickle.dump(grid, f) # pickle.dump(m, f) # f.close() # picklename", "make_legend_text(info,md): ''' Makes text for legend in left block of map :param info", "map using a gridded (or merged) file Arguments: info (info): ``info`` object containing", "of the ship to plot save (bool): If ``True`` a ``.png`` figure is", "in log_label_values: labels.append(str(int(log_label_value))) cbar.ax.set_xticklabels(labels) cbar.ax.set_xlabel(d.attrs['units'], size=9, color='#808080') # TODO: maybe delete this? #", "md['interpolation'] + '\\n' + 'Interpolation threshold: ' + str(md['interp_threshold']) + ' knots\\n' +", "import pandas as pd print('Mapping...') # ----------------------------------------------------------------------------- d = xr.open_dataset(file_in) # Define boundaries", "# 'green': ((0.0, 0.25, 0.25), # (1.0, 0.85, 0.85)), # 'blue': ((0.0, 0.5,", "'Time range: \\n' + md['startdate'][0:-3] + ' to ' + md['enddate'][0:-3] + '\\n\\n'", "else: cmapcolor =plt.get_cmap(cmap) cs = m.pcolor(xx,yy,Hmasked, cmap=cmapcolor, zorder=10, vmin=vmin, vmax=vmax) #scalebar sblon =", "includes side panel with metadata save (bool): If ``True`` a ``.png`` figure is", "# Turn off tick labels ax2.get_xaxis().set_visible(False) ax2.get_yaxis().set_visible(False) ax2.add_patch(FancyBboxPatch((0,0), width=1, height=1, clip_on=False, boxstyle=\"square,pad=0\", zorder=3,", "= info.grid.maxlon path_to_basemap = info.dirs.project_path / 'ancillary' print('-----------------------------------------------------') print('-----------------------------------------------------') if sidebar: basemap_file =", "(info): ``info`` object containing metadata ''' if path_to_basemap == 'auto': if info.grid.type ==", "for easier interpreting label_values = cbar._tick_data_values log_label_values = np.round(10 ** label_values,decimals=0) labels =", "np.meshgrid(d['lon'].values,d['lat'].values) xx,yy = m(lons_grid, lats_grid) H = d['ship_density'].values # Rotate and flip H...", "vmin=vmin, vmax=vmax) #scalebar sblon = minlon + ((maxlon-minlon)/10) sblat = minlat + ((maxlat-minlat)/20)", "save: if filedir_out == 'auto': filedir = str(info.dirs.pngs) else: filedir = filedir_out if", "ax = plt.gca() if cmap == 'Default': cmapcolor = load_my_cmap('my_cmap_amber2red') elif cmap ==", "nc.variables lon = ncv['longitude'][:] lat = ncv['latitude'][:] lons, lats = np.meshgrid(lon,lat) topo =", "= [] for log_label_value in log_label_values: labels.append(str(int(log_label_value))) cbar.ax.set_yticklabels(labels) cbar.ax.set_xlabel(d.attrs['units']) if sidebar: text1, text2,", "printed to screen save (bool): If ``True`` a ``.png`` figure is saved to", "maxlat = d['lat'].values.max() minlon = d['lon'].values.min() maxlon = d['lon'].values.max() else: minlat = d.attrs['minlat']", "basemap_name = 'basemap_sidebar.p' else: basemap_name = 'basemap.p' info = sm.calculate_gridcell_areas(info) # Save basemap", "info.grid.maxlat minlon = info.grid.minlon maxlon = info.grid.maxlon path_to_basemap = info.dirs.project_path / 'ancillary' print('-----------------------------------------------------')", "c2[2], c2[2]))} my_cmap = LinearSegmentedColormap('my_colormap',cdict,256) else: print('cmap name does not match any of", "# my_cmap_yellow2red = LinearSegmentedColormap('my_colormap',cdict,256) cdict = {'red': ((0.0, 1.0, 1.0), (1.0, 0.5, 0.5)),", "if sidebar: basemap_name = 'basemap_sidebar.p' else: basemap_name = 'basemap.p' info = sm.calculate_gridcell_areas(info) #", "my_cmap = LinearSegmentedColormap('my_colormap',cdict,256) if name == 'my_cmap_lightblue': cdict = {'red': ((0.0, 0.0, 0.0),", "'or quality of this product. ​Data is provided\\n' + 'on an \"AS IS\"", "for a side panel is added to the basemap Returns: A ``.basemap`` and", "' + str(round(md['time_bin']*1440,1)) + ' minutes\\n' + 'Mask below: ' + str(md['mask_below']) +", "plot is printed to screen save (bool): If ``True`` a ``.png`` figure is", "((0.0, 0.0, 0.0), # (1.0, 0.7, 0.7)), # 'green': ((0.0, 0.25, 0.25), #", "os.path.isfile(bathymetry_file): isub = 1 base_url='http://coastwatch.pfeg.noaa.gov/erddap/griddap/usgsCeSrtm30v6.nc?' query='topo[(%f):%d:(%f)][(%f):%d:(%f)]' % (maxlat,isub,minlat,minlon,isub,maxlon) url = base_url+query # store", "0.85, 0.85), (1.0, 0.0, 0.0)), 'blue': ((0.0, 0.3, 0.3), (1.0, 0.0, 0.0))} my_cmap", "file Arguments: info (info): ``info`` object containing metadata Keyword Arguments: file_in (str): Gridded", "((0.0, 0.5, 0.5), # (1.0, 1.0, 1.0))} # my_cmap = LinearSegmentedColormap('my_colormap',cdict,256) if name", "np.array([252,142,110])/256 #RGB/256 c1 = np.array([250,59,59])/256 #RGB/256 c2 = np.array([103,0,13])/256 #RGB/256 cdict = {'red':", "None if info.maps.cbarmin == 'auto': # vmin = (np.median(Hmasked)) - (4*Hmasked.std()) alat =", "Create map m = Basemap(projection='mill', llcrnrlat=minlat,urcrnrlat=maxlat, llcrnrlon=minlon, urcrnrlon=maxlon,resolution=info.maps.resolution) # TOPO # Read data", "= np.log10(vmax) # Make colormap fig = plt.gcf() ax = plt.gca() if cmap", "else: path_to_map = path_to_basemap return path_to_map def make_basemap(info,spatial,path_to_basemap='auto', sidebar=False): ''' Makes a basemap", "'.grid') pickle.dump(info, open(info_picklename, 'wb'), -1) print('!!! Pickles were just made: ' + basemap_picklename)", "(d['latitude']> minlat) & (d['latitude']<= maxlat)) filtered_data = d.sel(Dindex=indx) ship_id = info.ship_id unis =", "# (1.0, 0.7, 0.7)), # 'green': ((0.0, 0.25, 0.25), # (1.0, 0.85, 0.85)),", "path_to_basemap='auto'): ''' Figures out where is the .basemap and .grid files Arguments: info", "= d['lat'].values.max() minlon = d['lon'].values.min() maxlon = d['lon'].values.max() else: minlat = d.attrs['minlat'] maxlat", "m = pickle.load(open(basemap_file,'rb')) indx = ((d['longitude']> minlon) & (d['longitude']<= maxlon) & (d['latitude']> minlat)", "----------------------------------------------------------------------------- d = xr.open_dataset(file_in) # Define boundaries if info.grid.minlat == None or info.grid.maxlat", "None or info.grid.minlon == None or info.grid.maxlon == None: minlat = d['lat'].values.min() maxlat", "m.pcolormesh(lons,lats,TOPOmasked,cmap=load_my_cmap('my_cmap_lightblue'),latlon=True,zorder=5) # m.drawcoastlines(color='#A27D0C',linewidth=0.5,zorder=25) # m.fillcontinents(color='#E1E1A0',zorder=23) m.drawcoastlines(color='#a6a6a6',linewidth=0.5,zorder=25) m.fillcontinents(color='#e6e6e6',zorder=23) m.drawmapboundary() def setcolor(x, color): for m", "FancyBboxPatch from matplotlib.colors import LinearSegmentedColormap from mpl_toolkits.basemap import Basemap import numpy as np", "+ (4*Hmasked.std()) vmax = (np.max(Hmasked)) - (2*Hmasked.std()) elif info.maps.cbarmax != None: vmax =", "if doesn;t exist, make it if not os.path.exists(basemap_file): m = sm.make_basemap(info,info.dirs.project_path,[minlat,maxlat,minlon,maxlon]) else: print('Found", "``.png`` figure is saved to hardrive filename_out (str): Name of produced figure. If", "'basemap.p') if not os.path.exists(basemap_file): m = sm.make_basemap(info,[minlat,maxlat,minlon,maxlon]) else: print('Found basemap...') m = pickle.load(open(basemap_file,'rb'))", "= LinearSegmentedColormap('my_colormap',cdict,256) cdict = {'red': ((0.0, 1.0, 1.0), (1.0, 0.5, 0.5)), 'green': ((0.0,", "0.15, 0.02],zorder=60) cbar = plt.colorbar(extend='both', cax = cbaxes2, orientation='horizontal') cbar.ax.tick_params(labelsize=8, labelcolor='#808080') # Change", "sm.calculate_gridcell_areas(info) # Save basemap save_basemap(m,info,path_to_basemap=path_to_map) # picklename = str(path_to_map / basemap_name) # pickle.dump(m,open(picklename,'wb'),-1)", "+ file_in + '.png'`` filedir_out (str): Directory where figure is saved. If ``auto``", "0.0))} my_cmap = LinearSegmentedColormap('my_colormap',cdict,256) elif name == 'my_cmap_red2black': # c1 = np.array([252,142,110])/256 #RGB/256", "metadata to_screen (bool): If ``True``, a plot is printed to screen save (bool):", "'\\n\\n' + 'Grid size: ' + str(md['bin_size']) + ' degrees (~' + str(int(round(sm.degrees_to_meters(md['bin_size'],", "`\\merged` directory cmap (str): Colormap to use sidebar (bool): If ``True``, includes side", "``info.dirs.pngs`` Returns: Basemap object ''' print('map_density ------------------------------------------------------') # Load data if file_in ==", "= (np.median(Hmasked)) + (4*Hmasked.std()) vmax = (np.max(Hmasked)) - (2*Hmasked.std()) elif info.maps.cbarmax != None:", "This is a preliminary data product.\\n' + 'We cannot ​guarantee the validity, accuracy,", "fillcolor2 = '#a6a6a6', yoffset = (0.01*(m.ymax-m.ymin)), labelstyle='simple',zorder=60) if not sidebar: cbaxes2 = fig.add_axes([0.70,", "-------------------------------------------------------- text2 = ('Unit description: ' + md['unit_description'] + '\\n\\n' + 'Data source:", "threshold: ' + str(md['interp_threshold']) + ' knots\\n' + 'Time bin: ' + str(round(md['time_bin']*1440,1))", "Mask zeros d.attrs['mask_below'] = info.maps.mask_below Hmasked = np.ma.masked_where(H<=d.attrs['mask_below'],H) # Set vman and vmin", "(~' + str(int(round(sm.degrees_to_meters(md['bin_size'], alat))))+ ' m)\\n' + 'EPGS code: ' + md['epsg_code'] +", "pngDir = 'C:\\\\Users\\\\IbarraD\\\\Documents\\\\VMS\\\\png\\\\' ## plt.savefig(datadir[0:-5] + 'png\\\\' + filename + '- Grid' +", "+ 'PO Box 1006, Dartmouth, NS, Canada, B2Y 4A2' ) text4 = ('---------------------------------------------------------------\\n'", "if cmap == 'Default': cmapcolor = load_my_cmap('my_cmap_amber2red') elif cmap == 'red2black': cmapcolor =", "basemap.p and, if doesn;t exist, make it if not os.path.exists(basemap_file): m = sm.make_basemap(info,info.dirs.project_path,[minlat,maxlat,minlon,maxlon])", "Keyword Arguments: file_in (str): Gridded or merged file to map. If ``None`` it", "Change colorbar labels for easier interpreting label_values = cbar._tick_data_values log_label_values = np.round(10 **", "color='#808080') # TODO: maybe delete this? # mng = plt.get_current_fig_manager() # mng.frame.Maximize(True) #", "orientation='horizontal') # Change colorbar labels for easier interpreting label_values = cbar._tick_data_values log_label_values =", "# Change colorbar labels for easier interpreting label_values = cbar._tick_data_values # print(\"values\") #", "(bool): If ``True`` a ``.png`` figure is saved to hardrive ''' import pandas", "transform=plt.gca().transAxes)) plt.text(0.15, 0.99, text1, verticalalignment='top', horizontalalignment='left', weight='bold', size=10, color= '#737373', transform=plt.gca().transAxes) plt.text(0.02, 0.83,", "info.maps.cbarmax else: vmax = None if info.maps.cbarmin == 'auto': # vmin = (np.median(Hmasked))", "\"pings\" (i.e. not gridded density) of only one ship Arguments: info (info): ``info``", "if info.maps.cbarmin == 'auto': # vmin = (np.median(Hmasked)) - (4*Hmasked.std()) alat = (d.attrs['maxlat']", "(bool): If ``True``, includes side panel with metadata to_screen (bool): If ``True``, a", "# cdict = {'red': ((0.0, 0.0, 0.0), # (1.0, 0.7, 0.7)), # 'green':", "'Included speeds: ' + info.sidebar.included_speeds + '\\n' + 'Included vessels: ' + info.sidebar.included_vessel_types", "Path import _pickle as pickle import os import ship_mapper as sm import urllib.request", "np.rot90(H) H = np.flipud(H) # Mask zeros d.attrs['mask_below'] = info.maps.mask_below Hmasked = np.ma.masked_where(H<=d.attrs['mask_below'],H)", "''' if path_to_basemap == 'auto': if info.grid.type == 'one-off': path_to_map = os.path.join(info.dirs.project_path,info.grid.region,'ancillary') elif", "maxlon = d.attrs['maxlon'] basemap_file = info.dirs.basemap print('Basemap file: ' + basemap_file) # Check", "# pickle.dump(grid, f) # pickle.dump(m, f) # f.close() # picklename = str(path_to_map /", "'Bedford Institute of Oceanography\\n' + 'PO Box 1006, Dartmouth, NS, Canada, B2Y 4A2'", "0.9), (1.0, 1.0, 1.0))} my_cmap = LinearSegmentedColormap('my_colormap',cdict,256) elif name == 'my_cmap_amber2red': # cdict", "bathymetry_file = str(path_to_map / 'usgsCeSrtm30v6.nc') bathymetry_file = os.path.join(path_to_map, 'usgsCeSrtm30v6.nc') if not os.path.isfile(bathymetry_file): isub", "plt.plot(filtered_data['longitude'].values,filtered_data['latitude'].values,'.') # plt.show() # # Save map as png # if save: #", "= (filtered_data[ship_id] == ship) singleship = filtered_data.sel(Dindex=indxship) print('Ship id:'+ str(ship)) # print(singleship['longitude'].values) #", "0.0, 0.0))} my_cmap = LinearSegmentedColormap('my_colormap',cdict,256) elif name == 'my_cmap_red2black': # c1 = np.array([252,142,110])/256", "a plot is printed to screen save (bool): If ``True`` a ``.png`` figure", "' + str(np.min(Hmasked))) print('Max: ' + str(np.max(Hmasked))) print('Mean: ' + str(np.nanmean(Hmasked))) print('Std: '", "text3, horizontalalignment='left', verticalalignment='top', size=7, color= '#808080', transform=plt.gca().transAxes) plt.text(0.02, 0.25, text4, style='italic', horizontalalignment='left', verticalalignment='top',", "#RGB/256 c2 = np.array([103,0,13])/256 #RGB/256 cdict = {'red': ((0.0, c1[0], c1[0]), (1.0, c2[0],", "(d['longitude']<= maxlon) & (d['latitude']> minlat) & (d['latitude']<= maxlat)) filtered_data = d.sel(Dindex=indx) ship_id =", "+ 'Created by:\\n' + 'Oceans and Coastal Management Division\\n' + 'Ecosystem Management Branch\\n'", "it if not os.path.exists(basemap_file): m = sm.make_basemap(info,info.dirs.project_path,[minlat,maxlat,minlon,maxlon]) else: print('Found basemap...') m = pickle.load(open(basemap_file,'rb'))", "= filedir_out if filename_out == 'auto': filename = info.run_name + '__' + sm.get_filename_from_fullpath(file_in)", "if not os.path.isfile(bathymetry_file): isub = 1 base_url='http://coastwatch.pfeg.noaa.gov/erddap/griddap/usgsCeSrtm30v6.nc?' query='topo[(%f):%d:(%f)][(%f):%d:(%f)]' % (maxlat,isub,minlat,minlon,isub,maxlon) url = base_url+query", "for easier interpreting label_values = cbar._tick_data_values # print(\"values\") # print(label_values) log_label_values = np.round(10", "# sm.checkDir(filedir) # filename = info.project_name + '_' + str(info.grid.bin_number) + '.png' #", "= fig.add_axes([0.05,0.05,0.80,1]) # ax = fig.add_axes([0,0,0.80,1]) # ax = fig.add_axes([0.23,0.035,0.85,0.9]) if sidebar: ax", "+ ' - Filter' +str(downLim) + '-' + str(upLim) + '.png') # plt.savefig('test.png')", "below: ' + str(md['mask_below']) + ' vessels per grid' ) text3 = ('Creation", "== 'my_cmap_lightblue': cdict = {'red': ((0.0, 0.0, 0.0), # Dark (1.0, 0.9, 0.9)),", "the validity, accuracy, \\n' + 'or quality of this product. ​Data is provided\\n'", "c1 = np.array([252,142,110])/256 #RGB/256 c1 = np.array([250,59,59])/256 #RGB/256 c2 = np.array([103,0,13])/256 #RGB/256 cdict", "plt.get_current_fig_manager() # mng.frame.Maximize(True) # # fig.tight_layout() plt.show() # Save map as png if", "md['data_source'] + '\\n\\n' + 'Data source description:\\n' + md['data_description'] + '\\n\\n' + 'Time", "+ '\\n\\n' + 'Included speeds: ' + info.sidebar.included_speeds + '\\n' + 'Included vessels:", "'.p')) # pickle.dump(basemap, open(picklename, 'wb'), -1) # print('!!! Pickle just made: ' +", "basemap Arguments: info (info): ``info`` object containing metadata spatial (list): List with corners...", "map of \"pings\" rather than gridded density Arguments: info (info): ``info`` object containing", "spatial[1] minlon = spatial[2] maxlon = spatial[3] # Create map m = Basemap(projection='mill',", "and .grid files Arguments: info (info): ``info`` object containing metadata ''' if path_to_basemap", "Plots a map using a gridded (or merged) file Arguments: info (info): ``info``", "Makes a basemap Arguments: info (info): ``info`` object containing metadata spatial (list): List", "the .basemap and .grid files Arguments: info (info): ``info`` object containing metadata '''", "text4, style='italic', horizontalalignment='left', verticalalignment='top', size=8, color= '#808080', transform=plt.gca().transAxes) cbaxes2 = fig.add_axes([0.019, 0.9, 0.15,", "m.scatter(x,y,s=0.1,marker='o',color='r', zorder=10) # plt.show() # # Save map as png # if save:", "' + md['unit_description'] + '\\n\\n' + 'Data source: ' + md['data_source'] + '\\n\\n'", "'blue': ((0.0, 0.5, 0.5), # (1.0, 1.0, 1.0))} # my_cmap = LinearSegmentedColormap('my_colormap',cdict,256) if", "y = m(singleship['longitude'].values,singleship['latitude'].values) # x, y = m(d['longitude'].values,d['latitude'].values) cs = m.scatter(x,y,2,marker='o',color='r', zorder=30) #", "Light 'green': ((0.0, 0.9, 0.9), (1.0, 1.0,1.0)), 'blue': ((0.0, 0.9, 0.9), (1.0, 1.0,", "Arguments: info (info): ``info`` object containing metadata ''' if path_to_basemap == 'auto': if", "plt.subplot2grid((1,24),(0,5),colspan=19) else: ax = fig.add_axes([0.05,0.05,0.94,0.94]) TOPOmasked = np.ma.masked_where(topo>0,topo) cs = m.pcolormesh(lons,lats,TOPOmasked,cmap=load_my_cmap('my_cmap_lightblue'),latlon=True,zorder=5) # m.drawcoastlines(color='#A27D0C',linewidth=0.5,zorder=25)", "save_basemap(m,info,path_to_basemap='auto'): ''' Saves basemap (and correspoding info.grid) to a pickle file Arguments: m", "provided\\n' + 'on an \"AS IS\" basis. ​USE AT YOUR OWN RISK.\\n' +", "= pd.unique(filtered_data[ship_id].values) ship = unis[Ship_No] indxship = (filtered_data[ship_id] == ship) singleship = filtered_data.sel(Dindex=indxship)", "print('!!! Pickle just made: ' + picklename) path_to_map = define_path_to_map(info, path_to_basemap=path_to_basemap) # basemap_picklename", "m.fillcontinents(color='#e6e6e6',zorder=23) m.drawmapboundary() def setcolor(x, color): for m in x: for t in x[m][1]:", "in nc = netCDF4.Dataset(bathymetry_file) ncv = nc.variables lon = ncv['longitude'][:] lat = ncv['latitude'][:]", "= fig.add_axes([0.019, 0.9, 0.15, 0.02],zorder=60) cbar = plt.colorbar(extend='both', cax = cbaxes2, orientation='horizontal') cbar.ax.tick_params(labelsize=8,", "# fig = plt.figure(figsize=(19,9)) # ax = fig.add_axes([0.05,0.05,0.80,1]) # ax = fig.add_axes([0,0,0.80,1]) #", "style='italic', horizontalalignment='left', verticalalignment='top', size=8, color= '#808080', transform=plt.gca().transAxes) cbaxes2 = fig.add_axes([0.019, 0.9, 0.15, 0.02],zorder=60)", "file Arguments: m (mpl_toolkits.basemap.Basemap): Basemap object info (info): ``info`` object containing metadata Keyword", "tick labels ax2.get_xaxis().set_visible(False) ax2.get_yaxis().set_visible(False) ax2.add_patch(FancyBboxPatch((0,0), width=1, height=1, clip_on=False, boxstyle=\"square,pad=0\", zorder=3, facecolor='#e6e6e6', alpha=1.0, edgecolor='#a6a6a6',", "'.png' else: filename = filename_out sm.checkDir(filedir) plt.savefig(os.path.join(filedir,filename), dpi=300) # Close netCDF file d.close()", "pickle.dump(m,open(picklename,'wb'),-1) # print('!!! Pickle just made: ' + picklename) # ## pngDir =", "is the .basemap and .grid files Arguments: info (info): ``info`` object containing metadata", "plt.gca() # ax.axhline(linewidth=4, color=\"#00a3cc\") # ax.axvline(linewidth=4, color=\"#00a3cc\") # ax.spines['top'].set_color('#00a3cc') ax.spines['right'].set_color('#00a3cc') ax.spines['bottom'].set_color('#00a3cc') ax.spines['left'].set_color('#00a3cc') for", "cdict = {'red': ((0.0, 1.0, 1.0), # (1.0, 0.5, 0.5)), # 'green': ((0.0,", "includes side panel with metadata to_screen (bool): If ``True``, a plot is printed", "info.grid.type == 'generic': path_to_map = os.path.abspath(os.path.join(info.dirs.project_path,'ancillary')) else: path_to_map = path_to_basemap return path_to_map def", "d.attrs['minlon'] maxlon = d.attrs['maxlon'] basemap_file = info.dirs.basemap print('Basemap file: ' + basemap_file) #", "any of the available cmaps') return my_cmap def save_basemap(m,info,path_to_basemap='auto'): ''' Saves basemap (and", "filename = info.project_name + '_' + str(info.grid.bin_number) + '.png' # plt.savefig(os.path.join(filedir,filename), dpi=300) return", "If ``None`` it looks for ``merged_grid.nc`` in the `\\merged` directory cmap (str): Colormap", "it looks for ``merged_grid.nc`` in the `\\merged` directory sidebar (bool): If ``True``, includes", "directory Ship_No (str): Unique identifier of the ship to plot save (bool): If", "= load_my_cmap('my_cmap_red2black') else: cmapcolor =plt.get_cmap(cmap) cs = m.pcolor(xx,yy,Hmasked, cmap=cmapcolor, zorder=10, vmin=vmin, vmax=vmax) #scalebar", "+ ((maxlat-minlat)/20) m.drawmapscale(sblon, sblat, minlon, minlat, info.maps.scalebar_km, barstyle='fancy', units='km', fontsize=8, fontcolor='#808080', fillcolor1 =", "map. If ``None`` it looks for ``merged_grid.nc`` in the `\\merged` directory Ship_No (str):", "filedir_out == 'auto': filedir = str(info.dirs.pngs) else: filedir = filedir_out if filename_out ==", "my_cmap = LinearSegmentedColormap('my_colormap',cdict,256) else: print('cmap name does not match any of the available", "flip H... ---------------------------------------------------------------------------- H = np.rot90(H) H = np.flipud(H) # Mask zeros d.attrs['mask_below']", "0.0, 0.0)), 'blue': ((0.0, 0.3, 0.3), (1.0, 0.0, 0.0))} my_cmap = LinearSegmentedColormap('my_colormap',cdict,256) elif", "= np.arange(minlon,maxlon,info.maps.meridians) mers = m.drawmeridians(meridians,labels=[False,False,False,True],dashes=[20,20],color='#00a3cc', linewidth=0.2, zorder=25) setcolor(mers,'#00a3cc') ax = plt.gca() # ax.axhline(linewidth=4,", "map_dots_one_ship(info, file_in, Ship_No, save=True): ''' Creates a map of \"pings\" (i.e. not gridded", "None: vmax = info.maps.cbarmax else: vmax = None if info.maps.cbarmin == 'auto': #", "saved to hardrive filename_out (str): Name of produced figure. If ``auto`` then name", "ax2.get_xaxis().set_visible(False) ax2.get_yaxis().set_visible(False) ax2.add_patch(FancyBboxPatch((0,0), width=1, height=1, clip_on=False, boxstyle=\"square,pad=0\", zorder=3, facecolor='#e6e6e6', alpha=1.0, edgecolor='#a6a6a6', transform=plt.gca().transAxes)) plt.text(0.15,", "0.85)), # 'blue': ((0.0, 0.5, 0.5), # (1.0, 1.0, 1.0))} # my_cmap =", "# cdict = {'red': ((0.0, 1.0, 1.0), # (1.0, 0.5, 0.5)), # 'green':", "DENSITY HEATMAP' # print(info) # -------------------------------------------------------- text2 = ('Unit description: ' + md['unit_description']", "''' # # basemap = [grid, m] # f = open(str(path_to_map / (info.grid.basemap", "to use sidebar (bool): If ``True``, includes side panel with metadata to_screen (bool):", "Name of produced figure. If ``auto`` then name is ``info.run_name + '__' +", "basemap Returns: A ``.basemap`` and a ``.grid`` files ''' print('Making basemap...') # -----------------------------------------------------------------------------", "'my_cmap_lightblue': cdict = {'red': ((0.0, 0.0, 0.0), # Dark (1.0, 0.9, 0.9)), #", "'\\n\\n' + 'Included speeds: ' + info.sidebar.included_speeds + '\\n' + 'Included vessels: '", "'.basemap') pickle.dump(m, open(basemap_picklename, 'wb'), -1) # info_picklename = str(path_to_map / (info.grid.basemap + '.grid'))", "containing metadata :return: text for legend ''' import datetime alat = (md['maxlat'] -", "# ax.axhline(linewidth=4, color=\"#00a3cc\") # ax.axvline(linewidth=4, color=\"#00a3cc\") # ax.spines['top'].set_color('#00a3cc') ax.spines['right'].set_color('#00a3cc') ax.spines['bottom'].set_color('#00a3cc') ax.spines['left'].set_color('#00a3cc') for k,", "== 'Default': cmapcolor = load_my_cmap('my_cmap_amber2red') elif cmap == 'red2black': cmapcolor = load_my_cmap('my_cmap_red2black') else:", "+ ' minutes\\n' + 'Mask below: ' + str(md['mask_below']) + ' vessels per", "info (info): ``info`` object containing metadata Keyword Arguments: file_in (str): Gridded or merged", "vmax = np.log10(vmax) # Make colormap fig = plt.gcf() ax = plt.gca() if", "load_my_cmap('my_cmap_amber2red') elif cmap == 'red2black': cmapcolor = load_my_cmap('my_cmap_red2black') else: cmapcolor =plt.get_cmap(cmap) cs =", "cmap (str): Colormap to use sidebar (bool): If ``True``, includes side panel with", "{'red': ((0.0, 0.0, 0.0), # (1.0, 0.7, 0.7)), # 'green': ((0.0, 0.25, 0.25),", "cax = cbaxes2, orientation='horizontal') # Change colorbar labels for easier interpreting label_values =", "/ 'basemap.p') if not os.path.exists(basemap_file): m = sm.make_basemap(info,[minlat,maxlat,minlon,maxlon]) else: print('Found basemap...') m =", "base_url='http://coastwatch.pfeg.noaa.gov/erddap/griddap/usgsCeSrtm30v6.nc?' query='topo[(%f):%d:(%f)][(%f):%d:(%f)]' % (maxlat,isub,minlat,minlon,isub,maxlon) url = base_url+query # store data in NetCDF file", "+ '\\n' + 'Interpolation threshold: ' + str(md['interp_threshold']) + ' knots\\n' + 'Time", "filename + '- Grid' + str(BinNo) + ' - Filter' +str(downLim) + '-'", "path_to_basemap=path_to_basemap) sm.checkDir(str(path_to_map)) minlat = spatial[0] maxlat = spatial[1] minlon = spatial[2] maxlon =", "'usgsCeSrtm30v6.nc') if not os.path.isfile(bathymetry_file): isub = 1 base_url='http://coastwatch.pfeg.noaa.gov/erddap/griddap/usgsCeSrtm30v6.nc?' query='topo[(%f):%d:(%f)][(%f):%d:(%f)]' % (maxlat,isub,minlat,minlon,isub,maxlon) url =", "= 1 base_url='http://coastwatch.pfeg.noaa.gov/erddap/griddap/usgsCeSrtm30v6.nc?' query='topo[(%f):%d:(%f)][(%f):%d:(%f)]' % (maxlat,isub,minlat,minlon,isub,maxlon) url = base_url+query # store data in", "import os import ship_mapper as sm import urllib.request import netCDF4 def map_density(info, file_in=None,", "(md['maxlat'] - md['minlat'])/2 text1 = 'VESSEL DENSITY HEATMAP' # print(info) # -------------------------------------------------------- text2", "cdict = {'red': ((0.0, 0.0, 0.0), # (1.0, 0.7, 0.7)), # 'green': ((0.0,", "speeds: ' + info.sidebar.included_speeds + '\\n' + 'Included vessels: ' + info.sidebar.included_vessel_types +", "metadata save (bool): If ``True`` a ``.png`` figure is saved to hardrive '''", "define_path_to_map(info, path_to_basemap=path_to_basemap) # basemap_picklename = str(path_to_map / (info.grid.basemap + '.basemap')) basemap_picklename = os.path.join(path_to_map,info.grid.basemap", "'#808080', transform=plt.gca().transAxes) cbaxes2 = fig.add_axes([0.019, 0.9, 0.15, 0.02],zorder=60) cbar = plt.colorbar(extend='both', cax =", "m.scatter(x,y,2,marker='o',color='r', zorder=30) # fig = plt.figure() # plt.plot(filtered_data['longitude'].values,filtered_data['latitude'].values,'.') # plt.show() # # Save", "If ``True`` space for a side panel is added to the basemap Returns:", "os.path.join(path_to_map, info.grid.basemap + '.grid') pickle.dump(info, open(info_picklename, 'wb'), -1) print('!!! Pickles were just made:", "accuracy, \\n' + 'or quality of this product. ​Data is provided\\n' + 'on", "None: vmax = np.log10(vmax) # Make colormap fig = plt.gcf() ax = plt.gca()", "= pickle.load(open(basemap_file,'rb')) indx = ((d['longitude']> minlon) & (d['longitude']<= maxlon) & (d['latitude']> minlat) &", "from pathlib import Path import _pickle as pickle import os import ship_mapper as", "str(path_to_basemap / 'basemap_spots.p') m = sm.make_basemap(info.dirs.project_path,[minlat,maxlat,minlon,maxlon]) # if not os.path.exists(str(path_to_basemap / 'basemap.p')): #", "cmocean from pathlib import Path import _pickle as pickle import os import ship_mapper", "Hmasked = np.log10(Hmasked) if vmin != None: vmin = np.log10(vmin) if vmax !=", "``True``, includes side panel with metadata save (bool): If ``True`` a ``.png`` figure", "m(singleship['longitude'].values,singleship['latitude'].values) # x, y = m(d['longitude'].values,d['latitude'].values) cs = m.scatter(x,y,2,marker='o',color='r', zorder=30) # fig =", "str(ship)) # print(singleship['longitude'].values) # print(singleship['latitude'].values) x, y = m(singleship['longitude'].values,singleship['latitude'].values) # x, y =", "is a dictionary spine.set_zorder(35) # ax.spines['top'].set_visible(False) # ax.spines['right'].set_visible(False) # ax.spines['bottom'].set_visible(False) # ax.spines['left'].set_visible(False) #", "an \"AS IS\" basis. ​USE AT YOUR OWN RISK.\\n' + '---------------------------------------------------------------\\n' ) return", "/ (info.grid.basemap + '.basemap')) basemap_picklename = os.path.join(path_to_map,info.grid.basemap + '.basemap') pickle.dump(m, open(basemap_picklename, 'wb'), -1)", "'basemap_sidebar.p') else: basemap_file = str(path_to_basemap / 'basemap.p') if not os.path.exists(basemap_file): m = sm.make_basemap(info,[minlat,maxlat,minlon,maxlon])", "deprecated soon Keyword arguments: path_to_basemap (str): Directory where to save the produced basemap.", "print('-----------------------------------------------------') if sidebar: basemap_file = str(path_to_basemap / 'basemap_sidebar.p') else: basemap_file = str(path_to_basemap /", "text4 = ('---------------------------------------------------------------\\n' + 'WARNING: This is a preliminary data product.\\n' + 'We", "= np.meshgrid(d['lon'].values,d['lat'].values) xx,yy = m(lons_grid, lats_grid) H = d['ship_density'].values # Rotate and flip", "d.attrs['mask_below'] = info.maps.mask_below Hmasked = np.ma.masked_where(H<=d.attrs['mask_below'],H) # Set vman and vmin print('Min: '", "= plt.gca() if cmap == 'Default': cmapcolor = load_my_cmap('my_cmap_amber2red') elif cmap == 'red2black':", "+ '\\n' + 'Included vessels: ' + info.sidebar.included_vessel_types + '\\n\\n' + 'Grid size:", "facecolor='#e6e6e6', alpha=1.0, edgecolor='#a6a6a6', transform=plt.gca().transAxes)) plt.text(0.15, 0.99, text1, verticalalignment='top', horizontalalignment='left', weight='bold', size=10, color= '#737373',", "path_to_map = define_path_to_map(info, path_to_basemap=path_to_basemap) sm.checkDir(str(path_to_map)) minlat = spatial[0] maxlat = spatial[1] minlon =", "= m.drawparallels(parallels,labels=[True,False,False,False],dashes=[20,20],color='#00a3cc', linewidth=0.2, zorder=25) setcolor(par,'#00a3cc') meridians = np.arange(minlon,maxlon,info.maps.meridians) mers = m.drawmeridians(meridians,labels=[False,False,False,True],dashes=[20,20],color='#00a3cc', linewidth=0.2, zorder=25)", "/ 'ancillary' print('-----------------------------------------------------') print('-----------------------------------------------------') if sidebar: basemap_file = str(path_to_basemap / 'basemap_sidebar.p') else: basemap_file", "0.85), (1.0, 0.0, 0.0)), 'blue': ((0.0, 0.3, 0.3), (1.0, 0.0, 0.0))} my_cmap =", "print('Mean: ' + str(np.nanmean(Hmasked))) print('Std: ' + str(Hmasked.std())) if info.maps.cbarmax == 'auto': #", "'blue': ((0.0, 0.3, 0.3), (1.0, 0.0, 0.0))} my_cmap = LinearSegmentedColormap('my_colormap',cdict,256) elif name ==", "save=True, filename_out='auto',filedir_out='auto'): ''' Plots a map using a gridded (or merged) file Arguments:", "text3 = ('Creation date: ' + datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') + '\\n' + 'Creation script:", "info.maps.cbarmin != None: vmin = info.maps.cbarmin else: vmin = None # Log H", "produced figure. If ``auto`` then name is ``info.run_name + '__' + file_in +", "1.0, 1.0), (1.0, 0.5, 0.5)), 'green': ((0.0, 0.85, 0.85), (1.0, 0.0, 0.0)), 'blue':", "def load_my_cmap(name): ''' Creates and loads custom colormap ''' # cdict = {'red':", "and Oceans Canada – Maritimes Region\\n' + 'Bedford Institute of Oceanography\\n' + 'PO", "cs = m.scatter(x,y,s=0.1,marker='o',color='r', zorder=10) # plt.show() # # Save map as png #", "pickle.dump(info, open(info_picklename, 'wb'), -1) print('!!! Pickles were just made: ' + basemap_picklename) return", "0.5)), 'green': ((0.0, 0.85, 0.85), (1.0, 0.0, 0.0)), 'blue': ((0.0, 0.3, 0.3), (1.0,", "pickle.dump(m, open(basemap_picklename, 'wb'), -1) # info_picklename = str(path_to_map / (info.grid.basemap + '.grid')) info_picklename", "pickle import os import ship_mapper as sm import urllib.request import netCDF4 def map_density(info,", "= info.maps.cbarmax else: vmax = None if info.maps.cbarmin == 'auto': # vmin =", "text2 = ('Unit description: ' + md['unit_description'] + '\\n\\n' + 'Data source: '", "sidebar=False): ''' Makes a basemap Arguments: info (info): ``info`` object containing metadata spatial", "if filename_out == 'auto': filename = info.run_name + '__' + sm.get_filename_from_fullpath(file_in) + '.png'", "md['startdate'][0:-3] + ' to ' + md['enddate'][0:-3] + '\\n\\n' + 'Included speeds: '", "# fig = plt.figure() # plt.plot(filtered_data['longitude'].values,filtered_data['latitude'].values,'.') # plt.show() # # Save map as", "-1) # info_picklename = str(path_to_map / (info.grid.basemap + '.grid')) info_picklename = os.path.join(path_to_map, info.grid.basemap", "using the netCDF output option # bathymetry_file = str(path_to_map / 'usgsCeSrtm30v6.nc') bathymetry_file =", "xx,yy = m(lons_grid, lats_grid) H = d['ship_density'].values # Rotate and flip H... ----------------------------------------------------------------------------", "save: # filedir = str(info.dirs.pngs) # sm.checkDir(filedir) # filename = info.project_name + '_'", "fig.add_axes([0.019, 0.9, 0.15, 0.02],zorder=60) cbar = plt.colorbar(extend='both', cax = cbaxes2, orientation='horizontal') cbar.ax.tick_params(labelsize=8, labelcolor='#808080')", "1.0, 1.0), # (1.0, 0.0, 0.0)), # 'blue': ((0.0, 0.0, 0.0), # (1.0,", "(1.0, 0.5, 0.5)), 'green': ((0.0, 0.85, 0.85), (1.0, 0.0, 0.0)), 'blue': ((0.0, 0.3,", "= [left,right,top,bottom] par = m.drawparallels(parallels,labels=[True,False,False,False],dashes=[20,20],color='#00a3cc', linewidth=0.2, zorder=25) setcolor(par,'#00a3cc') meridians = np.arange(minlon,maxlon,info.maps.meridians) mers =", "m(lons_grid, lats_grid) H = d['ship_density'].values # Rotate and flip H... ---------------------------------------------------------------------------- H =", "if filedir_out == 'auto': filedir = str(info.dirs.pngs) else: filedir = filedir_out if filename_out", "+ str(info.grid.bin_number) + '.png' # plt.savefig(os.path.join(filedir,filename), dpi=300) return def map_dots_one_ship(info, file_in, Ship_No, save=True):", "import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt from matplotlib.patches import FancyBboxPatch from matplotlib.colors", "= plt.subplot2grid((1,24),(0,5),colspan=19) else: ax = fig.add_axes([0.05,0.05,0.94,0.94]) TOPOmasked = np.ma.masked_where(topo>0,topo) cs = m.pcolormesh(lons,lats,TOPOmasked,cmap=load_my_cmap('my_cmap_lightblue'),latlon=True,zorder=5) #", "# Load data if file_in == None: file_in = os.path.join(str(info.dirs.merged_grid),'merged_grid.nc') print(file_in) d =", "'.png') # plt.savefig('test.png') return m def load_my_cmap(name): ''' Creates and loads custom colormap", "netCDF output option # bathymetry_file = str(path_to_map / 'usgsCeSrtm30v6.nc') bathymetry_file = os.path.join(path_to_map, 'usgsCeSrtm30v6.nc')", "= [] for log_label_value in log_label_values: labels.append(str(int(log_label_value))) cbar.ax.set_xticklabels(labels) cbar.ax.set_xlabel(d.attrs['units'], size=9, color='#808080') # TODO:", "horizontalalignment='left', verticalalignment='top', size=9, color= '#808080', transform=plt.gca().transAxes) plt.text(0.02, 0.145, text3, horizontalalignment='left', verticalalignment='top', size=7, color=", "``merged_grid.nc`` in the `\\merged` directory sidebar (bool): If ``True``, includes side panel with", "If ``auto`` then name is ``info.run_name + '__' + file_in + '.png'`` filedir_out", "+ str(upLim) + '.png') # plt.savefig('test.png') return m def load_my_cmap(name): ''' Creates and", "spatial[0] maxlat = spatial[1] minlon = spatial[2] maxlon = spatial[3] # Create map", "+ '.basemap')) basemap_picklename = os.path.join(path_to_map,info.grid.basemap + '.basemap') pickle.dump(m, open(basemap_picklename, 'wb'), -1) # info_picklename", "= info.grid.minlon maxlon = info.grid.maxlon path_to_basemap = info.dirs.project_path / 'ancillary' print('-----------------------------------------------------') print('-----------------------------------------------------') #", "= m.pcolormesh(lons,lats,TOPOmasked,cmap=load_my_cmap('my_cmap_lightblue'),latlon=True,zorder=5) # m.drawcoastlines(color='#A27D0C',linewidth=0.5,zorder=25) # m.fillcontinents(color='#E1E1A0',zorder=23) m.drawcoastlines(color='#a6a6a6',linewidth=0.5,zorder=25) m.fillcontinents(color='#e6e6e6',zorder=23) m.drawmapboundary() def setcolor(x, color): for", "else: print('Found basemap...') m = pickle.load(open(basemap_file,'rb')) # Create grid for mapping lons_grid, lats_grid", "+ '\\n\\n' + 'Data source: ' + md['data_source'] + '\\n\\n' + 'Data source", "print('Found basemap...') m = pickle.load(open(basemap_file,'rb')) x, y = m(d['longitude'].values,d['latitude'].values) cs = m.scatter(x,y,s=0.1,marker='o',color='r', zorder=10)", "file See also: :mod:`pickle` ''' # # basemap = [grid, m] # f", "str(upLim) + '.png') # plt.savefig('test.png') return m def load_my_cmap(name): ''' Creates and loads", "= info.ship_id unis = pd.unique(filtered_data[ship_id].values) ship = unis[Ship_No] indxship = (filtered_data[ship_id] == ship)", "to_screen (bool): If ``True``, a plot is printed to screen save (bool): If", "(info): ``info`` object containing metadata Keyword Arguments: path_to_basemap (str): If ``'auto'`` it looks", "(info): ``info`` object containing metadata spatial (list): List with corners... this will be", "info (info): ``info`` object containing metadata ''' if path_to_basemap == 'auto': if info.grid.type", "TODO: maybe delete this? # mng = plt.get_current_fig_manager() # mng.frame.Maximize(True) # # fig.tight_layout()", "# m/min ...roughly 20 knots vmin = cellsize / max_speed elif info.maps.cbarmin !=", "''' Makes a basemap Arguments: info (info): ``info`` object containing metadata spatial (list):", "0.25), # (1.0, 0.85, 0.85)), # 'blue': ((0.0, 0.5, 0.5), # (1.0, 1.0,", "to_screen == False: plt.close() return def make_legend_text(info,md): ''' Makes text for legend in", "sm.get_filename_from_fullpath(file_in) + '.png' else: filename = filename_out sm.checkDir(filedir) plt.savefig(os.path.join(filedir,filename), dpi=300) # Close netCDF", "plt.show() # Save map as png if save: if filedir_out == 'auto': filedir", "ax.spines.items(): #ax.spines is a dictionary spine.set_zorder(35) # ax.spines['top'].set_visible(False) # ax.spines['right'].set_visible(False) # ax.spines['bottom'].set_visible(False) #", "= fig.add_axes([0.70, 0.18, 0.2, 0.03],zorder=60) cbar = plt.colorbar(extend='both', cax = cbaxes2, orientation='horizontal') #", "ship mapper v0.1\\n\\n' + 'Created by:\\n' + 'Oceans and Coastal Management Division\\n' +", "save=True): ''' Creates a map of \"pings\" rather than gridded density Arguments: info", "(bool): If ``True``, a plot is printed to screen save (bool): If ``True``", "import cmocean from pathlib import Path import _pickle as pickle import os import", "for ``merged_grid.nc`` in the `\\merged` directory sidebar (bool): If ``True``, includes side panel", "'- Grid' + str(BinNo) + ' - Filter' +str(downLim) + '-' + str(upLim)", "from mpl_toolkits.basemap import Basemap import numpy as np # Suppress matplotlib warnings np.warnings.filterwarnings('ignore')", "basemap_file = str(path_to_basemap / 'basemap.p') if not os.path.exists(basemap_file): m = sm.make_basemap(info,[minlat,maxlat,minlon,maxlon]) else: print('Found", "= str(path_to_map / (info.grid.basemap + '.p')) # pickle.dump(basemap, open(picklename, 'wb'), -1) # print('!!!", "sm.make_basemap(info,[minlat,maxlat,minlon,maxlon]) else: print('Found basemap...') m = pickle.load(open(basemap_file,'rb')) x, y = m(d['longitude'].values,d['latitude'].values) cs =", "name == 'my_cmap_amber2red': # cdict = {'red': ((0.0, 1.0, 1.0), # (1.0, 0.5,", "just made: ' + picklename) path_to_map = define_path_to_map(info, path_to_basemap=path_to_basemap) # basemap_picklename = str(path_to_map", "vessels per grid' ) text3 = ('Creation date: ' + datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') +", "``None`` it looks for ``merged_grid.nc`` in the `\\merged` directory Ship_No (str): Unique identifier", "path_to_map = path_to_basemap return path_to_map def make_basemap(info,spatial,path_to_basemap='auto', sidebar=False): ''' Makes a basemap Arguments:", "# print(label_values) log_label_values = np.round(10 ** label_values,decimals=0) # print(log_label_values) labels = [] for", "info.grid.maxlon == None: minlat = d['lat'].values.min() maxlat = d['lat'].values.max() minlon = d['lon'].values.min() maxlon", "(maxlat,isub,minlat,minlon,isub,maxlon) url = base_url+query # store data in NetCDF file urllib.request.urlretrieve(url, bathymetry_file) #", "option # bathymetry_file = str(path_to_map / 'usgsCeSrtm30v6.nc') bathymetry_file = os.path.join(path_to_map, 'usgsCeSrtm30v6.nc') if not", "ax = fig.add_axes([0.05,0.05,0.94,0.94]) TOPOmasked = np.ma.masked_where(topo>0,topo) cs = m.pcolormesh(lons,lats,TOPOmasked,cmap=load_my_cmap('my_cmap_lightblue'),latlon=True,zorder=5) # m.drawcoastlines(color='#A27D0C',linewidth=0.5,zorder=25) # m.fillcontinents(color='#E1E1A0',zorder=23)", "cmapcolor = load_my_cmap('my_cmap_amber2red') elif cmap == 'red2black': cmapcolor = load_my_cmap('my_cmap_red2black') else: cmapcolor =plt.get_cmap(cmap)", "in x: for t in x[m][1]: t.set_color(color) parallels = np.arange(minlat,maxlat,info.maps.parallels) # labels =", "map_density(info, file_in=None, cmap='Default', sidebar=False, to_screen=True, save=True, filename_out='auto',filedir_out='auto'): ''' Plots a map using a", "== False: plt.close() return def make_legend_text(info,md): ''' Makes text for legend in left", "``auto`` then name is ``info.run_name + '__' + file_in + '.png'`` filedir_out (str):", "+ 'Creation script: ' + info.run_name + '.py\\n' + 'Software: ship mapper v0.1\\n\\n'", "hardrive ''' print('Mapping...') # ----------------------------------------------------------------------------- d = xr.open_dataset(file_in) # Define boundaries if info.grid.minlat", "+ '.png' else: filename = filename_out sm.checkDir(filedir) plt.savefig(os.path.join(filedir,filename), dpi=300) # Close netCDF file", "Ship_No (str): Unique identifier of the ship to plot save (bool): If ``True``", "= d.attrs['maxlat'] minlon = d.attrs['minlon'] maxlon = d.attrs['maxlon'] basemap_file = info.dirs.basemap print('Basemap file:", "+ str(np.nanmean(Hmasked))) print('Std: ' + str(Hmasked.std())) if info.maps.cbarmax == 'auto': # vmax =", "minlat = spatial[0] maxlat = spatial[1] minlon = spatial[2] maxlon = spatial[3] #", "as plt from matplotlib.patches import FancyBboxPatch from matplotlib.colors import LinearSegmentedColormap from mpl_toolkits.basemap import", "= plt.figure(figsize=(19,9)) # ax = fig.add_axes([0.05,0.05,0.80,1]) # ax = fig.add_axes([0,0,0.80,1]) # ax =", "ax = plt.gca() # ax.axhline(linewidth=4, color=\"#00a3cc\") # ax.axvline(linewidth=4, color=\"#00a3cc\") # ax.spines['top'].set_color('#00a3cc') ax.spines['right'].set_color('#00a3cc') ax.spines['bottom'].set_color('#00a3cc')", "also: :mod:`pickle` ''' # # basemap = [grid, m] # f = open(str(path_to_map", "' knots\\n' + 'Time bin: ' + str(round(md['time_bin']*1440,1)) + ' minutes\\n' + 'Mask", "cmapcolor = load_my_cmap('my_cmap_red2black') else: cmapcolor =plt.get_cmap(cmap) cs = m.pcolor(xx,yy,Hmasked, cmap=cmapcolor, zorder=10, vmin=vmin, vmax=vmax)", "'ancillary' print('-----------------------------------------------------') print('-----------------------------------------------------') if sidebar: basemap_file = str(path_to_basemap / 'basemap_sidebar.p') else: basemap_file =", "= m(d['longitude'].values,d['latitude'].values) cs = m.scatter(x,y,2,marker='o',color='r', zorder=30) # fig = plt.figure() # plt.plot(filtered_data['longitude'].values,filtered_data['latitude'].values,'.') #", "vmax != None: vmax = np.log10(vmax) # Make colormap fig = plt.gcf() ax", "side panel with metadata to_screen (bool): If ``True``, a plot is printed to", "dpi=300) # Close netCDF file d.close() if to_screen == False: plt.close() return def", "basemap = [grid, m] # f = open(str(path_to_map / (info.grid.basemap + '.p')),'w') #", "= np.arange(minlat,maxlat,info.maps.parallels) # labels = [left,right,top,bottom] par = m.drawparallels(parallels,labels=[True,False,False,False],dashes=[20,20],color='#00a3cc', linewidth=0.2, zorder=25) setcolor(par,'#00a3cc') meridians", "and, if doesn;t exist, make it if not os.path.exists(basemap_file): m = sm.make_basemap(info,info.dirs.project_path,[minlat,maxlat,minlon,maxlon]) else:", "Box 1006, Dartmouth, NS, Canada, B2Y 4A2' ) text4 = ('---------------------------------------------------------------\\n' + 'WARNING:", "maxlon = spatial[3] # Create map m = Basemap(projection='mill', llcrnrlat=minlat,urcrnrlat=maxlat, llcrnrlon=minlon, urcrnrlon=maxlon,resolution=info.maps.resolution) #", "Hmasked = np.ma.masked_where(H<=d.attrs['mask_below'],H) # Set vman and vmin print('Min: ' + str(np.min(Hmasked))) print('Max:", "preliminary data product.\\n' + 'We cannot ​guarantee the validity, accuracy, \\n' + 'or", "open(picklename, 'wb'), -1) # print('!!! Pickle just made: ' + picklename) path_to_map =", "m = sm.make_basemap(info.dirs.project_path,[minlat,maxlat,minlon,maxlon]) # else: # print('Found basemap...') # m = pickle.load(open(basemap_file,'rb')) indx", "``True`` a ``.png`` figure is saved to hardrive ''' import pandas as pd", "if sidebar: basemap_file = str(path_to_basemap / 'basemap_sidebar.p') else: basemap_file = str(path_to_basemap / 'basemap.p')", "setcolor(x, color): for m in x: for t in x[m][1]: t.set_color(color) parallels =", "+ '-' + str(upLim) + '.png') # plt.savefig('test.png') return m def load_my_cmap(name): '''", "metadata :return: text for legend ''' import datetime alat = (md['maxlat'] - md['minlat'])/2", "fig = plt.figure(figsize=(19,9)) # ax = fig.add_axes([0.05,0.05,0.80,1]) # ax = fig.add_axes([0,0,0.80,1]) # ax", "'png\\\\' + filename + '- Grid' + str(BinNo) + ' - Filter' +str(downLim)", "= ('---------------------------------------------------------------\\n' + 'WARNING: This is a preliminary data product.\\n' + 'We cannot", "(1.0, c2[2], c2[2]))} my_cmap = LinearSegmentedColormap('my_colormap',cdict,256) else: print('cmap name does not match any", "or info.grid.maxlat == None or info.grid.minlon == None or info.grid.maxlon == None: minlat", "of this product. ​Data is provided\\n' + 'on an \"AS IS\" basis. ​USE", "urllib.request.urlretrieve(url, bathymetry_file) # open NetCDF data in nc = netCDF4.Dataset(bathymetry_file) ncv = nc.variables", "vmin = cellsize / max_speed elif info.maps.cbarmin != None: vmin = info.maps.cbarmin else:", "str(int(round(sm.degrees_to_meters(md['bin_size'], alat))))+ ' m)\\n' + 'EPGS code: ' + md['epsg_code'] + '\\n' +", "Dark (1.0, 0.9, 0.9)), # Light 'green': ((0.0, 0.9, 0.9), (1.0, 1.0,1.0)), 'blue':", "ax = fig.add_axes([0.05,0.05,0.80,1]) # ax = fig.add_axes([0,0,0.80,1]) # ax = fig.add_axes([0.23,0.035,0.85,0.9]) if sidebar:", "path_to_basemap=path_to_basemap) # basemap_picklename = str(path_to_map / (info.grid.basemap + '.basemap')) basemap_picklename = os.path.join(path_to_map,info.grid.basemap +", "(str): If ``'auto'`` it looks in ``grids`` directory Returns: Pickle file See also:", "where is the .basemap and .grid files Arguments: info (info): ``info`` object containing", "md['unit_description'] + '\\n\\n' + 'Data source: ' + md['data_source'] + '\\n\\n' + 'Data", "c1[0]), (1.0, c2[0], c2[0])), 'green': ((0.0, c1[1], c1[1]), (1.0, c2[1], c2[1])), 'blue': ((0.0,", "(info): ``info`` object containing metadata Keyword Arguments: file_in (str): Gridded or merged file", "path_to_basemap return path_to_map def make_basemap(info,spatial,path_to_basemap='auto', sidebar=False): ''' Makes a basemap Arguments: info (info):", "side panel is added to the basemap Returns: A ``.basemap`` and a ``.grid``", "False: plt.close() return def make_legend_text(info,md): ''' Makes text for legend in left block", "colormap fig = plt.gcf() ax = plt.gca() if cmap == 'Default': cmapcolor =", "containing metadata ''' if path_to_basemap == 'auto': if info.grid.type == 'one-off': path_to_map =", "= d.attrs['minlon'] maxlon = d.attrs['maxlon'] basemap_file = info.dirs.basemap print('Basemap file: ' + basemap_file)", "# m/min ...roughly 20 knots max_speed = 316.66 # m/min ...roughly 20 knots", "= info.project_name + '_' + str(info.grid.bin_number) + '.png' # plt.savefig(os.path.join(filedir,filename), dpi=300) return def", "ax.spines['left'].set_visible(False) # fig.tight_layout(pad=0.25) fig.tight_layout(rect=[0.01,0.01,.99,.99]) plt.show() if sidebar: basemap_name = 'basemap_sidebar.p' else: basemap_name =", "data from: http://coastwatch.pfeg.noaa.gov/erddap/griddap/usgsCeSrtm30v6.html # using the netCDF output option # bathymetry_file = str(path_to_map", "delete this? # mng = plt.get_current_fig_manager() # mng.frame.Maximize(True) # # fig.tight_layout() plt.show() #", "str(path_to_basemap / 'basemap_sidebar.p') else: basemap_file = str(path_to_basemap / 'basemap.p') if not os.path.exists(basemap_file): m", "make it if not os.path.exists(basemap_file): m = sm.make_basemap(info,info.dirs.project_path,[minlat,maxlat,minlon,maxlon]) else: print('Found basemap...') m =", "'.grid')) info_picklename = os.path.join(path_to_map, info.grid.basemap + '.grid') pickle.dump(info, open(info_picklename, 'wb'), -1) print('!!! Pickles", "= plt.figure() # plt.plot(filtered_data['longitude'].values,filtered_data['latitude'].values,'.') # plt.show() # # Save map as png #", "= os.path.abspath(os.path.join(info.dirs.project_path,'ancillary')) else: path_to_map = path_to_basemap return path_to_map def make_basemap(info,spatial,path_to_basemap='auto', sidebar=False): ''' Makes", "(1.0, 0.7, 0.7)), # 'green': ((0.0, 0.25, 0.25), # (1.0, 0.85, 0.85)), #", "text for legend in left block of map :param info info: ``info`` object", "then output directory is ``info.dirs.pngs`` Returns: Basemap object ''' print('map_density ------------------------------------------------------') # Load", "return def make_legend_text(info,md): ''' Makes text for legend in left block of map", "## pngDir = 'C:\\\\Users\\\\IbarraD\\\\Documents\\\\VMS\\\\png\\\\' ## plt.savefig(datadir[0:-5] + 'png\\\\' + filename + '- Grid'", "= info.maps.mask_below Hmasked = np.ma.masked_where(H<=d.attrs['mask_below'],H) # Set vman and vmin print('Min: ' +", "(filtered_data[ship_id] == ship) singleship = filtered_data.sel(Dindex=indxship) print('Ship id:'+ str(ship)) # print(singleship['longitude'].values) # print(singleship['latitude'].values)", "# ax = fig.add_axes([0,0,0.80,1]) # ax = fig.add_axes([0.23,0.035,0.85,0.9]) if sidebar: ax = plt.subplot2grid((1,24),(0,5),colspan=19)", "0.9, 0.9)), # Light 'green': ((0.0, 0.9, 0.9), (1.0, 1.0,1.0)), 'blue': ((0.0, 0.9,", "doesn;t exist, make it if not os.path.exists(basemap_file): m = sm.make_basemap(info,info.dirs.project_path,[minlat,maxlat,minlon,maxlon]) else: print('Found basemap...')", "as sm import urllib.request import netCDF4 def map_density(info, file_in=None, cmap='Default', sidebar=False, to_screen=True, save=True,", "((0.0, 1.0, 1.0), # (1.0, 0.5, 0.5)), # 'green': ((0.0, 1.0, 1.0), #", "+ str(np.max(Hmasked))) print('Mean: ' + str(np.nanmean(Hmasked))) print('Std: ' + str(Hmasked.std())) if info.maps.cbarmax ==", "' + picklename) # ## pngDir = 'C:\\\\Users\\\\IbarraD\\\\Documents\\\\VMS\\\\png\\\\' ## plt.savefig(datadir[0:-5] + 'png\\\\' +", "text for legend ''' import datetime alat = (md['maxlat'] - md['minlat'])/2 text1 =", "' + str(Hmasked.std())) if info.maps.cbarmax == 'auto': # vmax = (np.median(Hmasked)) + (4*Hmasked.std())", "info.project_name + '_' + str(info.grid.bin_number) + '.png' # plt.savefig(os.path.join(filedir,filename), dpi=300) return def map_dots_one_ship(info,", "((0.0, 0.0, 0.0), # Dark (1.0, 0.9, 0.9)), # Light 'green': ((0.0, 0.9,", "'wb'), -1) # print('!!! Pickle just made: ' + picklename) path_to_map = define_path_to_map(info,", "path_to_basemap = info.dirs.project_path / 'ancillary' print('-----------------------------------------------------') print('-----------------------------------------------------') if sidebar: basemap_file = str(path_to_basemap /", "is ``info.run_name + '__' + file_in + '.png'`` filedir_out (str): Directory where figure", "+ '\\n\\n' + 'Data source description:\\n' + md['data_description'] + '\\n\\n' + 'Time range:", "vmax=vmax) #scalebar sblon = minlon + ((maxlon-minlon)/10) sblat = minlat + ((maxlat-minlat)/20) m.drawmapscale(sblon,", "Coastal Management Division\\n' + 'Ecosystem Management Branch\\n' + 'Fisheries and Oceans Canada –", "+ picklename) # ## pngDir = 'C:\\\\Users\\\\IbarraD\\\\Documents\\\\VMS\\\\png\\\\' ## plt.savefig(datadir[0:-5] + 'png\\\\' + filename", "the available cmaps') return my_cmap def save_basemap(m,info,path_to_basemap='auto'): ''' Saves basemap (and correspoding info.grid)", "== 'one-off': path_to_map = os.path.join(info.dirs.project_path,info.grid.region,'ancillary') elif info.grid.type == 'generic': path_to_map = os.path.abspath(os.path.join(info.dirs.project_path,'ancillary')) else:", "LinearSegmentedColormap('my_colormap',cdict,256) else: print('cmap name does not match any of the available cmaps') return", "# basemap_picklename = str(path_to_map / (info.grid.basemap + '.basemap')) basemap_picklename = os.path.join(path_to_map,info.grid.basemap + '.basemap')", "ncv = nc.variables lon = ncv['longitude'][:] lat = ncv['latitude'][:] lons, lats = np.meshgrid(lon,lat)", "filename_out sm.checkDir(filedir) plt.savefig(os.path.join(filedir,filename), dpi=300) # Close netCDF file d.close() if to_screen == False:", "# (1.0, 1.0, 1.0))} # my_cmap = LinearSegmentedColormap('my_colormap',cdict,256) if name == 'my_cmap_lightblue': cdict", "'_' + str(info.grid.bin_number) + '.png' # plt.savefig(os.path.join(filedir,filename), dpi=300) return def define_path_to_map(info, path_to_basemap='auto'): '''", "os.path.join(path_to_map,info.grid.basemap + '.basemap') pickle.dump(m, open(basemap_picklename, 'wb'), -1) # info_picklename = str(path_to_map / (info.grid.basemap", "0.25, 0.25), # (1.0, 0.85, 0.85)), # 'blue': ((0.0, 0.5, 0.5), # (1.0,", "= m(d['longitude'].values,d['latitude'].values) cs = m.scatter(x,y,s=0.1,marker='o',color='r', zorder=10) # plt.show() # # Save map as", "= os.path.join(info.dirs.project_path,info.grid.region,'ancillary') elif info.grid.type == 'generic': path_to_map = os.path.abspath(os.path.join(info.dirs.project_path,'ancillary')) else: path_to_map = path_to_basemap", "max_speed elif info.maps.cbarmin != None: vmin = info.maps.cbarmin else: vmin = None #", "ship_id = info.ship_id unis = pd.unique(filtered_data[ship_id].values) ship = unis[Ship_No] indxship = (filtered_data[ship_id] ==", "figure. If ``auto`` then name is ``info.run_name + '__' + file_in + '.png'``", "if sidebar: ax = plt.subplot2grid((1,24),(0,5),colspan=19) else: ax = fig.add_axes([0.05,0.05,0.94,0.94]) TOPOmasked = np.ma.masked_where(topo>0,topo) cs", "Division\\n' + 'Ecosystem Management Branch\\n' + 'Fisheries and Oceans Canada – Maritimes Region\\n'", "gridded density Arguments: info (info): ``info`` object containing metadata Keyword Arguments: file_in (str):", "is saved to hardrive ''' print('Mapping...') # ----------------------------------------------------------------------------- d = xr.open_dataset(file_in) # Define", "label_values = cbar._tick_data_values # print(\"values\") # print(label_values) log_label_values = np.round(10 ** label_values,decimals=0) #", "if not os.path.exists(basemap_file): m = sm.make_basemap(info,info.dirs.project_path,[minlat,maxlat,minlon,maxlon]) else: print('Found basemap...') m = pickle.load(open(basemap_file,'rb')) #", "Returns: A ``.basemap`` and a ``.grid`` files ''' print('Making basemap...') # ----------------------------------------------------------------------------- path_to_map", "cmap='Default', sidebar=False, to_screen=True, save=True, filename_out='auto',filedir_out='auto'): ''' Plots a map using a gridded (or", "Gridded or merged file to map. If ``None`` it looks for ``merged_grid.nc`` in", "If ``True`` a ``.png`` figure is saved to hardrive ''' print('Mapping...') # -----------------------------------------------------------------------------", "by :func:`~ship_mapper.mapper.define_path_to_map` sidebar (bool): If ``True`` space for a side panel is added", "...roughly 20 knots max_speed = 316.66 # m/min ...roughly 20 knots vmin =", "maxlon = info.grid.maxlon path_to_basemap = info.dirs.project_path / 'ancillary' print('-----------------------------------------------------') print('-----------------------------------------------------') # basemap_file =", "+ 'EPGS code: ' + md['epsg_code'] + '\\n' + 'Interpolation: ' + md['interpolation']", "# print(singleship['latitude'].values) x, y = m(singleship['longitude'].values,singleship['latitude'].values) # x, y = m(d['longitude'].values,d['latitude'].values) cs =", "c2[1], c2[1])), 'blue': ((0.0, c1[2], c1[2]), (1.0, c2[2], c2[2]))} my_cmap = LinearSegmentedColormap('my_colormap',cdict,256) else:", "looks in ``grids`` directory Returns: Pickle file See also: :mod:`pickle` ''' # #", "minlon + ((maxlon-minlon)/10) sblat = minlat + ((maxlat-minlat)/20) m.drawmapscale(sblon, sblat, minlon, minlat, info.maps.scalebar_km,", "transform=plt.gca().transAxes) plt.text(0.02, 0.25, text4, style='italic', horizontalalignment='left', verticalalignment='top', size=8, color= '#808080', transform=plt.gca().transAxes) cbaxes2 =", "else: ax = fig.add_axes([0.05,0.05,0.94,0.94]) TOPOmasked = np.ma.masked_where(topo>0,topo) cs = m.pcolormesh(lons,lats,TOPOmasked,cmap=load_my_cmap('my_cmap_lightblue'),latlon=True,zorder=5) # m.drawcoastlines(color='#A27D0C',linewidth=0.5,zorder=25) #", "TOPOmasked = np.ma.masked_where(topo>0,topo) cs = m.pcolormesh(lons,lats,TOPOmasked,cmap=load_my_cmap('my_cmap_lightblue'),latlon=True,zorder=5) # m.drawcoastlines(color='#A27D0C',linewidth=0.5,zorder=25) # m.fillcontinents(color='#E1E1A0',zorder=23) m.drawcoastlines(color='#a6a6a6',linewidth=0.5,zorder=25) m.fillcontinents(color='#e6e6e6',zorder=23) m.drawmapboundary()", "info.maps.mask_below Hmasked = np.ma.masked_where(H<=d.attrs['mask_below'],H) # Set vman and vmin print('Min: ' + str(np.min(Hmasked)))", "made: ' + picklename) # ## pngDir = 'C:\\\\Users\\\\IbarraD\\\\Documents\\\\VMS\\\\png\\\\' ## plt.savefig(datadir[0:-5] + 'png\\\\'", "= str(path_to_basemap / 'basemap.p') if not os.path.exists(basemap_file): m = sm.make_basemap(info,[minlat,maxlat,minlon,maxlon]) else: print('Found basemap...')", "ship_mapper as sm import urllib.request import netCDF4 def map_density(info, file_in=None, cmap='Default', sidebar=False, to_screen=True,", "or info.grid.maxlon == None: minlat = d['lat'].values.min() maxlat = d['lat'].values.max() minlon = d['lon'].values.min()", "+ 'Interpolation threshold: ' + str(md['interp_threshold']) + ' knots\\n' + 'Time bin: '", "+ '\\n' + 'Creation script: ' + info.run_name + '.py\\n' + 'Software: ship", "= sm.make_basemap(info,info.dirs.project_path,[minlat,maxlat,minlon,maxlon]) else: print('Found basemap...') m = pickle.load(open(basemap_file,'rb')) # Create grid for mapping", "a map of \"pings\" rather than gridded density Arguments: info (info): ``info`` object", "script: ' + info.run_name + '.py\\n' + 'Software: ship mapper v0.1\\n\\n' + 'Created", "[grid, m] # f = open(str(path_to_map / (info.grid.basemap + '.p')),'w') # pickle.dump(grid, f)", "m)\\n' + 'EPGS code: ' + md['epsg_code'] + '\\n' + 'Interpolation: ' +", "& (d['longitude']<= maxlon) & (d['latitude']> minlat) & (d['latitude']<= maxlat)) filtered_data = d.sel(Dindex=indx) ship_id", "to ' + md['enddate'][0:-3] + '\\n\\n' + 'Included speeds: ' + info.sidebar.included_speeds +", "B2Y 4A2' ) text4 = ('---------------------------------------------------------------\\n' + 'WARNING: This is a preliminary data", "= ncv['latitude'][:] lons, lats = np.meshgrid(lon,lat) topo = ncv['topo'][:,:] # fig = plt.figure(figsize=(19,9))", "- Filter' +str(downLim) + '-' + str(upLim) + '.png') # plt.savefig('test.png') return m", "YOUR OWN RISK.\\n' + '---------------------------------------------------------------\\n' ) return text1, text2, text3, text4 def map_dots(info,", "sm.make_basemap(info.dirs.project_path,[minlat,maxlat,minlon,maxlon]) # else: # print('Found basemap...') # m = pickle.load(open(basemap_file,'rb')) indx = ((d['longitude']>", "pickle file Arguments: m (mpl_toolkits.basemap.Basemap): Basemap object info (info): ``info`` object containing metadata", "NetCDF data in nc = netCDF4.Dataset(bathymetry_file) ncv = nc.variables lon = ncv['longitude'][:] lat", "fig.tight_layout(pad=0.25) fig.tight_layout(rect=[0.01,0.01,.99,.99]) plt.show() if sidebar: basemap_name = 'basemap_sidebar.p' else: basemap_name = 'basemap.p' info", "'Included vessels: ' + info.sidebar.included_vessel_types + '\\n\\n' + 'Grid size: ' + str(md['bin_size'])", "size=7, color= '#808080', transform=plt.gca().transAxes) plt.text(0.02, 0.25, text4, style='italic', horizontalalignment='left', verticalalignment='top', size=8, color= '#808080',", "= (md['maxlat'] - md['minlat'])/2 text1 = 'VESSEL DENSITY HEATMAP' # print(info) # --------------------------------------------------------", "= (d.attrs['maxlat'] - d.attrs['minlat'])/2 cellsize = sm.degrees_to_meters(d.attrs['bin_size'], alat) # max_speed = 616.66 #", "= d.attrs['minlat'] maxlat = d.attrs['maxlat'] minlon = d.attrs['minlon'] maxlon = d.attrs['maxlon'] basemap_file =", "''' Saves basemap (and correspoding info.grid) to a pickle file Arguments: m (mpl_toolkits.basemap.Basemap):", "text1, verticalalignment='top', horizontalalignment='left', weight='bold', size=10, color= '#737373', transform=plt.gca().transAxes) plt.text(0.02, 0.83, text2, horizontalalignment='left', verticalalignment='top',", "'Data source: ' + md['data_source'] + '\\n\\n' + 'Data source description:\\n' + md['data_description']", "of the available cmaps') return my_cmap def save_basemap(m,info,path_to_basemap='auto'): ''' Saves basemap (and correspoding", "labelcolor='#808080') # Change colorbar labels for easier interpreting label_values = cbar._tick_data_values # print(\"values\")", "text4 def map_dots(info, file_in, sidebar=False, save=True): ''' Creates a map of \"pings\" rather", "else: vmax = None if info.maps.cbarmin == 'auto': # vmin = (np.median(Hmasked)) -", "+ md['epsg_code'] + '\\n' + 'Interpolation: ' + md['interpolation'] + '\\n' + 'Interpolation", "horizontalalignment='left', weight='bold', size=10, color= '#737373', transform=plt.gca().transAxes) plt.text(0.02, 0.83, text2, horizontalalignment='left', verticalalignment='top', size=9, color=", "parallels = np.arange(minlat,maxlat,info.maps.parallels) # labels = [left,right,top,bottom] par = m.drawparallels(parallels,labels=[True,False,False,False],dashes=[20,20],color='#00a3cc', linewidth=0.2, zorder=25) setcolor(par,'#00a3cc')", "labels = [left,right,top,bottom] par = m.drawparallels(parallels,labels=[True,False,False,False],dashes=[20,20],color='#00a3cc', linewidth=0.2, zorder=25) setcolor(par,'#00a3cc') meridians = np.arange(minlon,maxlon,info.maps.meridians) mers", "Saves basemap (and correspoding info.grid) to a pickle file Arguments: m (mpl_toolkits.basemap.Basemap): Basemap", "(4*Hmasked.std()) vmax = (np.max(Hmasked)) - (2*Hmasked.std()) elif info.maps.cbarmax != None: vmax = info.maps.cbarmax", "figure is saved to hardrive filename_out (str): Name of produced figure. If ``auto``", "maxlon = info.grid.maxlon path_to_basemap = info.dirs.project_path / 'ancillary' print('-----------------------------------------------------') print('-----------------------------------------------------') if sidebar: basemap_file", "Grid' + str(BinNo) + ' - Filter' +str(downLim) + '-' + str(upLim) +", "def define_path_to_map(info, path_to_basemap='auto'): ''' Figures out where is the .basemap and .grid files", "== 'auto': # vmax = (np.median(Hmasked)) + (4*Hmasked.std()) vmax = (np.max(Hmasked)) - (2*Hmasked.std())", "verticalalignment='top', size=8, color= '#808080', transform=plt.gca().transAxes) cbaxes2 = fig.add_axes([0.019, 0.9, 0.15, 0.02],zorder=60) cbar =", "xarray as xr import cmocean from pathlib import Path import _pickle as pickle", "# x, y = m(d['longitude'].values,d['latitude'].values) cs = m.scatter(x,y,2,marker='o',color='r', zorder=30) # fig = plt.figure()", "# -------------------------------------------------------- text2 = ('Unit description: ' + md['unit_description'] + '\\n\\n' + 'Data", "Institute of Oceanography\\n' + 'PO Box 1006, Dartmouth, NS, Canada, B2Y 4A2' )", "sm.make_basemap(info.dirs.project_path,[minlat,maxlat,minlon,maxlon]) # if not os.path.exists(str(path_to_basemap / 'basemap.p')): # m = sm.make_basemap(info.dirs.project_path,[minlat,maxlat,minlon,maxlon]) # else:", "= [grid, m] # f = open(str(path_to_map / (info.grid.basemap + '.p')),'w') # pickle.dump(grid,", "and vmin print('Min: ' + str(np.min(Hmasked))) print('Max: ' + str(np.max(Hmasked))) print('Mean: ' +", "d.attrs['maxlat'] minlon = d.attrs['minlon'] maxlon = d.attrs['maxlon'] basemap_file = info.dirs.basemap print('Basemap file: '", "!= None: vmax = np.log10(vmax) # Make colormap fig = plt.gcf() ax =", "if save: if filedir_out == 'auto': filedir = str(info.dirs.pngs) else: filedir = filedir_out", "+ md['interpolation'] + '\\n' + 'Interpolation threshold: ' + str(md['interp_threshold']) + ' knots\\n'", "llcrnrlon=minlon, urcrnrlon=maxlon,resolution=info.maps.resolution) # TOPO # Read data from: http://coastwatch.pfeg.noaa.gov/erddap/griddap/usgsCeSrtm30v6.html # using the netCDF", "= None if info.maps.cbarmin == 'auto': # vmin = (np.median(Hmasked)) - (4*Hmasked.std()) alat", "output option # bathymetry_file = str(path_to_map / 'usgsCeSrtm30v6.nc') bathymetry_file = os.path.join(path_to_map, 'usgsCeSrtm30v6.nc') if", "\"AS IS\" basis. ​USE AT YOUR OWN RISK.\\n' + '---------------------------------------------------------------\\n' ) return text1,", "'on an \"AS IS\" basis. ​USE AT YOUR OWN RISK.\\n' + '---------------------------------------------------------------\\n' )", "is saved. If ``auto`` then output directory is ``info.dirs.pngs`` Returns: Basemap object '''", "a side panel is added to the basemap Returns: A ``.basemap`` and a", "maxlon = d['lon'].values.max() else: minlat = d.attrs['minlat'] maxlat = d.attrs['maxlat'] minlon = d.attrs['minlon']", "+ 'Included vessels: ' + info.sidebar.included_vessel_types + '\\n\\n' + 'Grid size: ' +", "metadata Keyword Arguments: path_to_basemap (str): If ``'auto'`` it looks in ``grids`` directory Returns:", "cellsize / max_speed elif info.maps.cbarmin != None: vmin = info.maps.cbarmin else: vmin =", "+ '__' + sm.get_filename_from_fullpath(file_in) + '.png' else: filename = filename_out sm.checkDir(filedir) plt.savefig(os.path.join(filedir,filename), dpi=300)", "d.attrs['minlat'] maxlat = d.attrs['maxlat'] minlon = d.attrs['minlon'] maxlon = d.attrs['maxlon'] basemap_file = info.dirs.basemap", "maxlat = d['lat'].values.max() minlon = d['lon'].values.min() maxlon = d['lon'].values.max() else: minlat = info.grid.minlat", "!= None: vmax = info.maps.cbarmax else: vmax = None if info.maps.cbarmin == 'auto':", "Creates a map of \"pings\" rather than gridded density Arguments: info (info): ``info``", "vmax = (np.median(Hmasked)) + (4*Hmasked.std()) vmax = (np.max(Hmasked)) - (2*Hmasked.std()) elif info.maps.cbarmax !=", "+ 'Software: ship mapper v0.1\\n\\n' + 'Created by:\\n' + 'Oceans and Coastal Management", "----------------------------------------------------------------------------- path_to_map = define_path_to_map(info, path_to_basemap=path_to_basemap) sm.checkDir(str(path_to_map)) minlat = spatial[0] maxlat = spatial[1] minlon", "= info.maps.cbarmin else: vmin = None # Log H for better display Hmasked", "plt.text(0.02, 0.25, text4, style='italic', horizontalalignment='left', verticalalignment='top', size=8, color= '#808080', transform=plt.gca().transAxes) cbaxes2 = fig.add_axes([0.019,", "mng = plt.get_current_fig_manager() # mng.frame.Maximize(True) # # fig.tight_layout() plt.show() # Save map as", "to the basemap Returns: A ``.basemap`` and a ``.grid`` files ''' print('Making basemap...')", "spatial[3] # Create map m = Basemap(projection='mill', llcrnrlat=minlat,urcrnrlat=maxlat, llcrnrlon=minlon, urcrnrlon=maxlon,resolution=info.maps.resolution) # TOPO #", "(bool): If ``True`` a ``.png`` figure is saved to hardrive ''' print('Mapping...') #", "'Grid size: ' + str(md['bin_size']) + ' degrees (~' + str(int(round(sm.degrees_to_meters(md['bin_size'], alat))))+ '", "# info_picklename = str(path_to_map / (info.grid.basemap + '.grid')) info_picklename = os.path.join(path_to_map, info.grid.basemap +", "to map. If ``None`` it looks for ``merged_grid.nc`` in the `\\merged` directory Ship_No", "fontsize=8, fontcolor='#808080', fillcolor1 = '#cccccc', fillcolor2 = '#a6a6a6', yoffset = (0.01*(m.ymax-m.ymin)), labelstyle='simple',zorder=60) if", "1.0, 1.0))} my_cmap = LinearSegmentedColormap('my_colormap',cdict,256) elif name == 'my_cmap_amber2red': # cdict = {'red':", "= fig.add_axes([0.05,0.05,0.94,0.94]) TOPOmasked = np.ma.masked_where(topo>0,topo) cs = m.pcolormesh(lons,lats,TOPOmasked,cmap=load_my_cmap('my_cmap_lightblue'),latlon=True,zorder=5) # m.drawcoastlines(color='#A27D0C',linewidth=0.5,zorder=25) # m.fillcontinents(color='#E1E1A0',zorder=23) m.drawcoastlines(color='#a6a6a6',linewidth=0.5,zorder=25)", "and loads custom colormap ''' # cdict = {'red': ((0.0, 0.0, 0.0), #", "else: print('Found basemap...') m = pickle.load(open(basemap_file,'rb')) x, y = m(d['longitude'].values,d['latitude'].values) cs = m.scatter(x,y,s=0.1,marker='o',color='r',", "== None or info.grid.maxlon == None: minlat = d['lat'].values.min() maxlat = d['lat'].values.max() minlon", "+ 'or quality of this product. ​Data is provided\\n' + 'on an \"AS", "Directory where to save the produced basemap. If ``'auto'`` then path is setup", "minlat = d['lat'].values.min() maxlat = d['lat'].values.max() minlon = d['lon'].values.min() maxlon = d['lon'].values.max() else:", "(1.0, 0.0, 0.0)), 'blue': ((0.0, 0.3, 0.3), (1.0, 0.0, 0.0))} my_cmap = LinearSegmentedColormap('my_colormap',cdict,256)", "Maritimes Region\\n' + 'Bedford Institute of Oceanography\\n' + 'PO Box 1006, Dartmouth, NS,", "= xr.open_dataset(file_in) # Define boundaries if info.grid.minlat == None or info.grid.maxlat == None", "' - Filter' +str(downLim) + '-' + str(upLim) + '.png') # plt.savefig('test.png') return", "path_to_basemap == 'auto': if info.grid.type == 'one-off': path_to_map = os.path.join(info.dirs.project_path,info.grid.region,'ancillary') elif info.grid.type ==", "def map_dots(info, file_in, sidebar=False, save=True): ''' Creates a map of \"pings\" rather than", "`\\merged` directory sidebar (bool): If ``True``, includes side panel with metadata save (bool):", "info.grid.minlon == None or info.grid.maxlon == None: minlat = d['lat'].values.min() maxlat = d['lat'].values.max()", "size: ' + str(md['bin_size']) + ' degrees (~' + str(int(round(sm.degrees_to_meters(md['bin_size'], alat))))+ ' m)\\n'", "info.grid.maxlat == None or info.grid.minlon == None or info.grid.maxlon == None: minlat =", "minlat = d.attrs['minlat'] maxlat = d.attrs['maxlat'] minlon = d.attrs['minlon'] maxlon = d.attrs['maxlon'] basemap_file", "and a ``.grid`` files ''' print('Making basemap...') # ----------------------------------------------------------------------------- path_to_map = define_path_to_map(info, path_to_basemap=path_to_basemap)", "(1.0, 0.85, 0.85)), # 'blue': ((0.0, 0.5, 0.5), # (1.0, 1.0, 1.0))} #", "f) # pickle.dump(m, f) # f.close() # picklename = str(path_to_map / (info.grid.basemap +", "``.png`` figure is saved to hardrive ''' import pandas as pd print('Mapping...') #", "+ '\\n\\n' + 'Grid size: ' + str(md['bin_size']) + ' degrees (~' +", "zorder=10, vmin=vmin, vmax=vmax) #scalebar sblon = minlon + ((maxlon-minlon)/10) sblat = minlat +", "# open NetCDF data in nc = netCDF4.Dataset(bathymetry_file) ncv = nc.variables lon =", "= info.grid.maxlon path_to_basemap = info.dirs.project_path / 'ancillary' print('-----------------------------------------------------') print('-----------------------------------------------------') # basemap_file = str(path_to_basemap", "' + md['data_source'] + '\\n\\n' + 'Data source description:\\n' + md['data_description'] + '\\n\\n'", "metadata spatial (list): List with corners... this will be deprecated soon Keyword arguments:", "bathymetry_file = os.path.join(path_to_map, 'usgsCeSrtm30v6.nc') if not os.path.isfile(bathymetry_file): isub = 1 base_url='http://coastwatch.pfeg.noaa.gov/erddap/griddap/usgsCeSrtm30v6.nc?' query='topo[(%f):%d:(%f)][(%f):%d:(%f)]' %", "str(md['mask_below']) + ' vessels per grid' ) text3 = ('Creation date: ' +", "minlon, minlat, info.maps.scalebar_km, barstyle='fancy', units='km', fontsize=8, fontcolor='#808080', fillcolor1 = '#cccccc', fillcolor2 = '#a6a6a6',", "'Creation script: ' + info.run_name + '.py\\n' + 'Software: ship mapper v0.1\\n\\n' +", "(np.median(Hmasked)) - (4*Hmasked.std()) alat = (d.attrs['maxlat'] - d.attrs['minlat'])/2 cellsize = sm.degrees_to_meters(d.attrs['bin_size'], alat) #", "== 'auto': filedir = str(info.dirs.pngs) else: filedir = filedir_out if filename_out == 'auto':", "sm.checkDir(filedir) plt.savefig(os.path.join(filedir,filename), dpi=300) # Close netCDF file d.close() if to_screen == False: plt.close()", "# Create map m = Basemap(projection='mill', llcrnrlat=minlat,urcrnrlat=maxlat, llcrnrlon=minlon, urcrnrlon=maxlon,resolution=info.maps.resolution) # TOPO # Read", "mpl_toolkits.basemap import Basemap import numpy as np # Suppress matplotlib warnings np.warnings.filterwarnings('ignore') import", "cax = cbaxes2, orientation='horizontal') cbar.ax.tick_params(labelsize=8, labelcolor='#808080') # Change colorbar labels for easier interpreting", "saved to hardrive ''' print('Mapping...') # ----------------------------------------------------------------------------- d = xr.open_dataset(file_in) # Define boundaries", "query='topo[(%f):%d:(%f)][(%f):%d:(%f)]' % (maxlat,isub,minlat,minlon,isub,maxlon) url = base_url+query # store data in NetCDF file urllib.request.urlretrieve(url,", "in left block of map :param info info: ``info`` object containing metadata :return:", "– Maritimes Region\\n' + 'Bedford Institute of Oceanography\\n' + 'PO Box 1006, Dartmouth,", "file urllib.request.urlretrieve(url, bathymetry_file) # open NetCDF data in nc = netCDF4.Dataset(bathymetry_file) ncv =", "ax2.get_yaxis().set_visible(False) ax2.add_patch(FancyBboxPatch((0,0), width=1, height=1, clip_on=False, boxstyle=\"square,pad=0\", zorder=3, facecolor='#e6e6e6', alpha=1.0, edgecolor='#a6a6a6', transform=plt.gca().transAxes)) plt.text(0.15, 0.99,", "basemap_file = str(path_to_basemap / 'basemap_sidebar.p') else: basemap_file = str(path_to_basemap / 'basemap.p') if not", "{'red': ((0.0, 1.0, 1.0), (1.0, 0.5, 0.5)), 'green': ((0.0, 0.85, 0.85), (1.0, 0.0,", "print(singleship['longitude'].values) # print(singleship['latitude'].values) x, y = m(singleship['longitude'].values,singleship['latitude'].values) # x, y = m(d['longitude'].values,d['latitude'].values) cs", "/ 'basemap_spots.p') m = sm.make_basemap(info.dirs.project_path,[minlat,maxlat,minlon,maxlon]) # if not os.path.exists(str(path_to_basemap / 'basemap.p')): # m", "+ '__' + file_in + '.png'`` filedir_out (str): Directory where figure is saved.", "pickle.load(open(basemap_file,'rb')) # Create grid for mapping lons_grid, lats_grid = np.meshgrid(d['lon'].values,d['lat'].values) xx,yy = m(lons_grid,", "+ '.basemap') pickle.dump(m, open(basemap_picklename, 'wb'), -1) # info_picklename = str(path_to_map / (info.grid.basemap +", "transform=plt.gca().transAxes) cbaxes2 = fig.add_axes([0.019, 0.9, 0.15, 0.02],zorder=60) cbar = plt.colorbar(extend='both', cax = cbaxes2,", "alat))))+ ' m)\\n' + 'EPGS code: ' + md['epsg_code'] + '\\n' + 'Interpolation:", "'.p')),'w') # pickle.dump(grid, f) # pickle.dump(m, f) # f.close() # picklename = str(path_to_map", "Arguments: info (info): ``info`` object containing metadata Keyword Arguments: file_in (str): Gridded or", "info.grid.maxlon path_to_basemap = info.dirs.project_path / 'ancillary' print('-----------------------------------------------------') print('-----------------------------------------------------') if sidebar: basemap_file = str(path_to_basemap", "info.grid.minlon maxlon = info.grid.maxlon path_to_basemap = info.dirs.project_path / 'ancillary' print('-----------------------------------------------------') print('-----------------------------------------------------') # basemap_file", "info.run_name + '__' + sm.get_filename_from_fullpath(file_in) + '.png' else: filename = filename_out sm.checkDir(filedir) plt.savefig(os.path.join(filedir,filename),", "('---------------------------------------------------------------\\n' + 'WARNING: This is a preliminary data product.\\n' + 'We cannot ​guarantee", "from: http://coastwatch.pfeg.noaa.gov/erddap/griddap/usgsCeSrtm30v6.html # using the netCDF output option # bathymetry_file = str(path_to_map /", "vmax = None if info.maps.cbarmin == 'auto': # vmin = (np.median(Hmasked)) - (4*Hmasked.std())", "'Interpolation threshold: ' + str(md['interp_threshold']) + ' knots\\n' + 'Time bin: ' +", "netCDF4.Dataset(bathymetry_file) ncv = nc.variables lon = ncv['longitude'][:] lat = ncv['latitude'][:] lons, lats =", "= ncv['longitude'][:] lat = ncv['latitude'][:] lons, lats = np.meshgrid(lon,lat) topo = ncv['topo'][:,:] #", "file_in == None: file_in = os.path.join(str(info.dirs.merged_grid),'merged_grid.nc') print(file_in) d = xr.open_dataset(file_in) # Define boundaries", "or merged file to map. If ``None`` it looks for ``merged_grid.nc`` in the", "the `\\merged` directory Ship_No (str): Unique identifier of the ship to plot save", "+ '.p')),'w') # pickle.dump(grid, f) # pickle.dump(m, f) # f.close() # picklename =", "-1) # print('!!! Pickle just made: ' + picklename) path_to_map = define_path_to_map(info, path_to_basemap=path_to_basemap)", "= m.drawmeridians(meridians,labels=[False,False,False,True],dashes=[20,20],color='#00a3cc', linewidth=0.2, zorder=25) setcolor(mers,'#00a3cc') ax = plt.gca() # ax.axhline(linewidth=4, color=\"#00a3cc\") # ax.axvline(linewidth=4,", ") return text1, text2, text3, text4 def map_dots(info, file_in, sidebar=False, save=True): ''' Creates", "vmax = (np.max(Hmasked)) - (2*Hmasked.std()) elif info.maps.cbarmax != None: vmax = info.maps.cbarmax else:", "'\\n' + 'Interpolation threshold: ' + str(md['interp_threshold']) + ' knots\\n' + 'Time bin:", "ax.spines['bottom'].set_visible(False) # ax.spines['left'].set_visible(False) # fig.tight_layout(pad=0.25) fig.tight_layout(rect=[0.01,0.01,.99,.99]) plt.show() if sidebar: basemap_name = 'basemap_sidebar.p' else:", "panel with metadata save (bool): If ``True`` a ``.png`` figure is saved to", "object containing metadata :return: text for legend ''' import datetime alat = (md['maxlat']", "def make_legend_text(info,md): ''' Makes text for legend in left block of map :param", "alat) # max_speed = 616.66 # m/min ...roughly 20 knots max_speed = 316.66", "# print(\"values\") # print(label_values) log_label_values = np.round(10 ** label_values,decimals=0) # print(log_label_values) labels =", "vmin = info.maps.cbarmin else: vmin = None # Log H for better display", "numpy as np # Suppress matplotlib warnings np.warnings.filterwarnings('ignore') import xarray as xr import", "0.0, 0.0)), # 'blue': ((0.0, 0.0, 0.0), # (1.0, 0.0, 0.0))} # my_cmap_yellow2red", "''' Creates a map of \"pings\" rather than gridded density Arguments: info (info):", "maxlon) & (d['latitude']> minlat) & (d['latitude']<= maxlat)) filtered_data = d.sel(Dindex=indx) ship_id = info.ship_id", "= {'red': ((0.0, 0.0, 0.0), # (1.0, 0.7, 0.7)), # 'green': ((0.0, 0.25,", "Makes text for legend in left block of map :param info info: ``info``", "(1.0, 1.0,1.0)), 'blue': ((0.0, 0.9, 0.9), (1.0, 1.0, 1.0))} my_cmap = LinearSegmentedColormap('my_colormap',cdict,256) elif", "str(round(md['time_bin']*1440,1)) + ' minutes\\n' + 'Mask below: ' + str(md['mask_below']) + ' vessels", "= None # Log H for better display Hmasked = np.log10(Hmasked) if vmin", "print('-----------------------------------------------------') print('-----------------------------------------------------') if sidebar: basemap_file = str(path_to_basemap / 'basemap_sidebar.p') else: basemap_file = str(path_to_basemap", "urllib.request import netCDF4 def map_density(info, file_in=None, cmap='Default', sidebar=False, to_screen=True, save=True, filename_out='auto',filedir_out='auto'): ''' Plots", "print('Max: ' + str(np.max(Hmasked))) print('Mean: ' + str(np.nanmean(Hmasked))) print('Std: ' + str(Hmasked.std())) if", "cdict = {'red': ((0.0, 0.0, 0.0), # Dark (1.0, 0.9, 0.9)), # Light", "minlon = info.grid.minlon maxlon = info.grid.maxlon path_to_basemap = info.dirs.project_path / 'ancillary' print('-----------------------------------------------------') print('-----------------------------------------------------')", "= {'red': ((0.0, 1.0, 1.0), (1.0, 0.5, 0.5)), 'green': ((0.0, 0.85, 0.85), (1.0,", "(mpl_toolkits.basemap.Basemap): Basemap object info (info): ``info`` object containing metadata Keyword Arguments: path_to_basemap (str):", "= np.flipud(H) # Mask zeros d.attrs['mask_below'] = info.maps.mask_below Hmasked = np.ma.masked_where(H<=d.attrs['mask_below'],H) # Set", "+ 'Data source description:\\n' + md['data_description'] + '\\n\\n' + 'Time range: \\n' +", "verticalalignment='top', size=7, color= '#808080', transform=plt.gca().transAxes) plt.text(0.02, 0.25, text4, style='italic', horizontalalignment='left', verticalalignment='top', size=8, color=", "yoffset = (0.01*(m.ymax-m.ymin)), labelstyle='simple',zorder=60) if not sidebar: cbaxes2 = fig.add_axes([0.70, 0.18, 0.2, 0.03],zorder=60)", "def setcolor(x, color): for m in x: for t in x[m][1]: t.set_color(color) parallels", "m = sm.make_basemap(info,info.dirs.project_path,[minlat,maxlat,minlon,maxlon]) else: print('Found basemap...') m = pickle.load(open(basemap_file,'rb')) # Create grid for", "if save: # filedir = str(info.dirs.pngs) # sm.checkDir(filedir) # filename = info.project_name +", "1.0))} my_cmap = LinearSegmentedColormap('my_colormap',cdict,256) elif name == 'my_cmap_amber2red': # cdict = {'red': ((0.0,", "data in nc = netCDF4.Dataset(bathymetry_file) ncv = nc.variables lon = ncv['longitude'][:] lat =", "c2[1])), 'blue': ((0.0, c1[2], c1[2]), (1.0, c2[2], c2[2]))} my_cmap = LinearSegmentedColormap('my_colormap',cdict,256) else: print('cmap", "str(path_to_map / 'usgsCeSrtm30v6.nc') bathymetry_file = os.path.join(path_to_map, 'usgsCeSrtm30v6.nc') if not os.path.isfile(bathymetry_file): isub = 1", "info.grid.maxlon path_to_basemap = info.dirs.project_path / 'ancillary' print('-----------------------------------------------------') print('-----------------------------------------------------') # basemap_file = str(path_to_basemap /", "+ str(md['mask_below']) + ' vessels per grid' ) text3 = ('Creation date: '", "name == 'my_cmap_red2black': # c1 = np.array([252,142,110])/256 #RGB/256 c1 = np.array([250,59,59])/256 #RGB/256 c2", "= np.round(10 ** label_values,decimals=0) labels = [] for log_label_value in log_label_values: labels.append(str(int(log_label_value))) cbar.ax.set_yticklabels(labels)", "knots\\n' + 'Time bin: ' + str(round(md['time_bin']*1440,1)) + ' minutes\\n' + 'Mask below:", "if not os.path.exists(basemap_file): m = sm.make_basemap(info,[minlat,maxlat,minlon,maxlon]) else: print('Found basemap...') m = pickle.load(open(basemap_file,'rb')) x,", "of only one ship Arguments: info (info): ``info`` object containing metadata Keyword Arguments:", "''' Makes text for legend in left block of map :param info info:", "the netCDF output option # bathymetry_file = str(path_to_map / 'usgsCeSrtm30v6.nc') bathymetry_file = os.path.join(path_to_map,", "= plt.colorbar(extend='both', cax = cbaxes2, orientation='horizontal') cbar.ax.tick_params(labelsize=8, labelcolor='#808080') # Change colorbar labels for", "plt from matplotlib.patches import FancyBboxPatch from matplotlib.colors import LinearSegmentedColormap from mpl_toolkits.basemap import Basemap", "d['ship_density'].values # Rotate and flip H... ---------------------------------------------------------------------------- H = np.rot90(H) H = np.flipud(H)", "if name == 'my_cmap_lightblue': cdict = {'red': ((0.0, 0.0, 0.0), # Dark (1.0,", "mapper v0.1\\n\\n' + 'Created by:\\n' + 'Oceans and Coastal Management Division\\n' + 'Ecosystem", "== None: minlat = d['lat'].values.min() maxlat = d['lat'].values.max() minlon = d['lon'].values.min() maxlon =", "Canada – Maritimes Region\\n' + 'Bedford Institute of Oceanography\\n' + 'PO Box 1006,", "np # Suppress matplotlib warnings np.warnings.filterwarnings('ignore') import xarray as xr import cmocean from", "def make_basemap(info,spatial,path_to_basemap='auto', sidebar=False): ''' Makes a basemap Arguments: info (info): ``info`` object containing", "pickle.load(open(basemap_file,'rb')) x, y = m(d['longitude'].values,d['latitude'].values) cs = m.scatter(x,y,s=0.1,marker='o',color='r', zorder=10) # plt.show() # #", "d = xr.open_dataset(file_in) # Define boundaries if info.grid.minlat == None or info.grid.maxlat ==", "\"pings\" rather than gridded density Arguments: info (info): ``info`` object containing metadata Keyword", "# Set vman and vmin print('Min: ' + str(np.min(Hmasked))) print('Max: ' + str(np.max(Hmasked)))", "cmaps') return my_cmap def save_basemap(m,info,path_to_basemap='auto'): ''' Saves basemap (and correspoding info.grid) to a", "= info.dirs.project_path / 'ancillary' print('-----------------------------------------------------') print('-----------------------------------------------------') if sidebar: basemap_file = str(path_to_basemap / 'basemap_sidebar.p')", "to save the produced basemap. If ``'auto'`` then path is setup by :func:`~ship_mapper.mapper.define_path_to_map`", "= spatial[2] maxlon = spatial[3] # Create map m = Basemap(projection='mill', llcrnrlat=minlat,urcrnrlat=maxlat, llcrnrlon=minlon,", "available cmaps') return my_cmap def save_basemap(m,info,path_to_basemap='auto'): ''' Saves basemap (and correspoding info.grid) to", "fig.add_axes([0,0,0.80,1]) # ax = fig.add_axes([0.23,0.035,0.85,0.9]) if sidebar: ax = plt.subplot2grid((1,24),(0,5),colspan=19) else: ax =", "barstyle='fancy', units='km', fontsize=8, fontcolor='#808080', fillcolor1 = '#cccccc', fillcolor2 = '#a6a6a6', yoffset = (0.01*(m.ymax-m.ymin)),", "a ``.png`` figure is saved to hardrive ''' import pandas as pd print('Mapping...')", "'#808080', transform=plt.gca().transAxes) plt.text(0.02, 0.145, text3, horizontalalignment='left', verticalalignment='top', size=7, color= '#808080', transform=plt.gca().transAxes) plt.text(0.02, 0.25,", "matplotlib.colors import LinearSegmentedColormap from mpl_toolkits.basemap import Basemap import numpy as np # Suppress", "mng.frame.Maximize(True) # # fig.tight_layout() plt.show() # Save map as png if save: if", "+ '.grid')) info_picklename = os.path.join(path_to_map, info.grid.basemap + '.grid') pickle.dump(info, open(info_picklename, 'wb'), -1) print('!!!", "if info.maps.cbarmax == 'auto': # vmax = (np.median(Hmasked)) + (4*Hmasked.std()) vmax = (np.max(Hmasked))", "not sidebar: cbaxes2 = fig.add_axes([0.70, 0.18, 0.2, 0.03],zorder=60) cbar = plt.colorbar(extend='both', cax =", "= info.dirs.basemap print('Basemap file: ' + basemap_file) # Check for basemap.p and, if", "in ``grids`` directory Returns: Pickle file See also: :mod:`pickle` ''' # # basemap", "md['epsg_code'] + '\\n' + 'Interpolation: ' + md['interpolation'] + '\\n' + 'Interpolation threshold:", "'WARNING: This is a preliminary data product.\\n' + 'We cannot ​guarantee the validity,", "define_path_to_map(info, path_to_basemap=path_to_basemap) sm.checkDir(str(path_to_map)) minlat = spatial[0] maxlat = spatial[1] minlon = spatial[2] maxlon", "pd print('Mapping...') # ----------------------------------------------------------------------------- d = xr.open_dataset(file_in) # Define boundaries if info.grid.minlat ==", "cs = m.pcolor(xx,yy,Hmasked, cmap=cmapcolor, zorder=10, vmin=vmin, vmax=vmax) #scalebar sblon = minlon + ((maxlon-minlon)/10)", "+ 'Mask below: ' + str(md['mask_below']) + ' vessels per grid' ) text3", "horizontalalignment='left', verticalalignment='top', size=7, color= '#808080', transform=plt.gca().transAxes) plt.text(0.02, 0.25, text4, style='italic', horizontalalignment='left', verticalalignment='top', size=8,", "# filedir = str(info.dirs.pngs) # sm.checkDir(filedir) # filename = info.project_name + '_' +", "file_in (str): Gridded or merged file to map. If ``None`` it looks for", "'Software: ship mapper v0.1\\n\\n' + 'Created by:\\n' + 'Oceans and Coastal Management Division\\n'", "1.0, 1.0), # (1.0, 0.5, 0.5)), # 'green': ((0.0, 1.0, 1.0), # (1.0,", "plt.colorbar(extend='both', cax = cbaxes2, orientation='horizontal') # Change colorbar labels for easier interpreting label_values", "quality of this product. ​Data is provided\\n' + 'on an \"AS IS\" basis.", "0.5, 0.5), # (1.0, 1.0, 1.0))} # my_cmap = LinearSegmentedColormap('my_colormap',cdict,256) if name ==", "+ str(int(round(sm.degrees_to_meters(md['bin_size'], alat))))+ ' m)\\n' + 'EPGS code: ' + md['epsg_code'] + '\\n'", "' degrees (~' + str(int(round(sm.degrees_to_meters(md['bin_size'], alat))))+ ' m)\\n' + 'EPGS code: ' +", "= plt.colorbar(extend='both', cax = cbaxes2, orientation='horizontal') # Change colorbar labels for easier interpreting", "= fig.add_axes([0,0,0.80,1]) # ax = fig.add_axes([0.23,0.035,0.85,0.9]) if sidebar: ax = plt.subplot2grid((1,24),(0,5),colspan=19) else: ax", "0.0, 0.0), # (1.0, 0.7, 0.7)), # 'green': ((0.0, 0.25, 0.25), # (1.0,", "LinearSegmentedColormap from mpl_toolkits.basemap import Basemap import numpy as np # Suppress matplotlib warnings", "max_speed = 616.66 # m/min ...roughly 20 knots max_speed = 316.66 # m/min", "= ('Creation date: ' + datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') + '\\n' + 'Creation script: '", "vmin = (np.median(Hmasked)) - (4*Hmasked.std()) alat = (d.attrs['maxlat'] - d.attrs['minlat'])/2 cellsize = sm.degrees_to_meters(d.attrs['bin_size'],", "#RGB/256 c1 = np.array([250,59,59])/256 #RGB/256 c2 = np.array([103,0,13])/256 #RGB/256 cdict = {'red': ((0.0,", "= (0.01*(m.ymax-m.ymin)), labelstyle='simple',zorder=60) if not sidebar: cbaxes2 = fig.add_axes([0.70, 0.18, 0.2, 0.03],zorder=60) cbar", "A ``.basemap`` and a ``.grid`` files ''' print('Making basemap...') # ----------------------------------------------------------------------------- path_to_map =", "minlon) & (d['longitude']<= maxlon) & (d['latitude']> minlat) & (d['latitude']<= maxlat)) filtered_data = d.sel(Dindex=indx)", "for log_label_value in log_label_values: labels.append(str(int(log_label_value))) cbar.ax.set_yticklabels(labels) cbar.ax.set_xlabel(d.attrs['units']) if sidebar: text1, text2, text3, text4", "zorder=3, facecolor='#e6e6e6', alpha=1.0, edgecolor='#a6a6a6', transform=plt.gca().transAxes)) plt.text(0.15, 0.99, text1, verticalalignment='top', horizontalalignment='left', weight='bold', size=10, color=", "import numpy as np # Suppress matplotlib warnings np.warnings.filterwarnings('ignore') import xarray as xr", "Log H for better display Hmasked = np.log10(Hmasked) if vmin != None: vmin", "rather than gridded density Arguments: info (info): ``info`` object containing metadata Keyword Arguments:", "ship Arguments: info (info): ``info`` object containing metadata Keyword Arguments: file_in (str): Gridded", "/ max_speed elif info.maps.cbarmin != None: vmin = info.maps.cbarmin else: vmin = None", "'.png' # plt.savefig(os.path.join(filedir,filename), dpi=300) return def define_path_to_map(info, path_to_basemap='auto'): ''' Figures out where is", "elif name == 'my_cmap_amber2red': # cdict = {'red': ((0.0, 1.0, 1.0), # (1.0,", "List with corners... this will be deprecated soon Keyword arguments: path_to_basemap (str): Directory", "minutes\\n' + 'Mask below: ' + str(md['mask_below']) + ' vessels per grid' )", "str(BinNo) + ' - Filter' +str(downLim) + '-' + str(upLim) + '.png') #", "0.5), # (1.0, 1.0, 1.0))} # my_cmap = LinearSegmentedColormap('my_colormap',cdict,256) if name == 'my_cmap_lightblue':", "info_picklename = os.path.join(path_to_map, info.grid.basemap + '.grid') pickle.dump(info, open(info_picklename, 'wb'), -1) print('!!! Pickles were", "None: vmin = np.log10(vmin) if vmax != None: vmax = np.log10(vmax) # Make", "1.0), # (1.0, 0.0, 0.0)), # 'blue': ((0.0, 0.0, 0.0), # (1.0, 0.0,", "+ str(Hmasked.std())) if info.maps.cbarmax == 'auto': # vmax = (np.median(Hmasked)) + (4*Hmasked.std()) vmax", "info.dirs.project_path / 'ancillary' print('-----------------------------------------------------') print('-----------------------------------------------------') # basemap_file = str(path_to_basemap / 'basemap_spots.p') m =", "png # if save: # filedir = str(info.dirs.pngs) # sm.checkDir(filedir) # filename =", "c1[1]), (1.0, c2[1], c2[1])), 'blue': ((0.0, c1[2], c1[2]), (1.0, c2[2], c2[2]))} my_cmap =", "open(str(path_to_map / (info.grid.basemap + '.p')),'w') # pickle.dump(grid, f) # pickle.dump(m, f) # f.close()", "name is ``info.run_name + '__' + file_in + '.png'`` filedir_out (str): Directory where", "m def load_my_cmap(name): ''' Creates and loads custom colormap ''' # cdict =", "color= '#808080', transform=plt.gca().transAxes) plt.text(0.02, 0.25, text4, style='italic', horizontalalignment='left', verticalalignment='top', size=8, color= '#808080', transform=plt.gca().transAxes)", "f.close() # picklename = str(path_to_map / (info.grid.basemap + '.p')) # pickle.dump(basemap, open(picklename, 'wb'),", "plt.savefig('test.png') return m def load_my_cmap(name): ''' Creates and loads custom colormap ''' #", "NS, Canada, B2Y 4A2' ) text4 = ('---------------------------------------------------------------\\n' + 'WARNING: This is a", "= info.grid.minlon maxlon = info.grid.maxlon path_to_basemap = info.dirs.project_path / 'ancillary' print('-----------------------------------------------------') print('-----------------------------------------------------') if", "plt.show() if sidebar: basemap_name = 'basemap_sidebar.p' else: basemap_name = 'basemap.p' info = sm.calculate_gridcell_areas(info)", "for k, spine in ax.spines.items(): #ax.spines is a dictionary spine.set_zorder(35) # ax.spines['top'].set_visible(False) #", "0.0, 0.0), # Dark (1.0, 0.9, 0.9)), # Light 'green': ((0.0, 0.9, 0.9),", "with corners... this will be deprecated soon Keyword arguments: path_to_basemap (str): Directory where", "# Define boundaries if info.grid.minlat == None or info.grid.maxlat == None or info.grid.minlon", "color=\"#00a3cc\") # ax.spines['top'].set_color('#00a3cc') ax.spines['right'].set_color('#00a3cc') ax.spines['bottom'].set_color('#00a3cc') ax.spines['left'].set_color('#00a3cc') for k, spine in ax.spines.items(): #ax.spines is", "Unique identifier of the ship to plot save (bool): If ``True`` a ``.png``", "bathymetry_file) # open NetCDF data in nc = netCDF4.Dataset(bathymetry_file) ncv = nc.variables lon", "/ 'basemap.p')): # m = sm.make_basemap(info.dirs.project_path,[minlat,maxlat,minlon,maxlon]) # else: # print('Found basemap...') # m", "'__' + file_in + '.png'`` filedir_out (str): Directory where figure is saved. If", "info: ``info`` object containing metadata :return: text for legend ''' import datetime alat", "ax.spines['top'].set_color('#00a3cc') ax.spines['right'].set_color('#00a3cc') ax.spines['bottom'].set_color('#00a3cc') ax.spines['left'].set_color('#00a3cc') for k, spine in ax.spines.items(): #ax.spines is a dictionary", "singleship = filtered_data.sel(Dindex=indxship) print('Ship id:'+ str(ship)) # print(singleship['longitude'].values) # print(singleship['latitude'].values) x, y =", "0.0)), 'blue': ((0.0, 0.3, 0.3), (1.0, 0.0, 0.0))} my_cmap = LinearSegmentedColormap('my_colormap',cdict,256) elif name", "= sm.degrees_to_meters(d.attrs['bin_size'], alat) # max_speed = 616.66 # m/min ...roughly 20 knots max_speed", "using a gridded (or merged) file Arguments: info (info): ``info`` object containing metadata", "save the produced basemap. If ``'auto'`` then path is setup by :func:`~ship_mapper.mapper.define_path_to_map` sidebar", "= pickle.load(open(basemap_file,'rb')) # Create grid for mapping lons_grid, lats_grid = np.meshgrid(d['lon'].values,d['lat'].values) xx,yy =", "Keyword Arguments: path_to_basemap (str): If ``'auto'`` it looks in ``grids`` directory Returns: Pickle", "np.log10(vmin) if vmax != None: vmax = np.log10(vmax) # Make colormap fig =", "filedir_out if filename_out == 'auto': filename = info.run_name + '__' + sm.get_filename_from_fullpath(file_in) +", "​USE AT YOUR OWN RISK.\\n' + '---------------------------------------------------------------\\n' ) return text1, text2, text3, text4", "= pickle.load(open(basemap_file,'rb')) x, y = m(d['longitude'].values,d['latitude'].values) cs = m.scatter(x,y,s=0.1,marker='o',color='r', zorder=10) # plt.show() #", "filename_out (str): Name of produced figure. If ``auto`` then name is ``info.run_name +", "basemap (and correspoding info.grid) to a pickle file Arguments: m (mpl_toolkits.basemap.Basemap): Basemap object", "# filename = info.project_name + '_' + str(info.grid.bin_number) + '.png' # plt.savefig(os.path.join(filedir,filename), dpi=300)", "clip_on=False, boxstyle=\"square,pad=0\", zorder=3, facecolor='#e6e6e6', alpha=1.0, edgecolor='#a6a6a6', transform=plt.gca().transAxes)) plt.text(0.15, 0.99, text1, verticalalignment='top', horizontalalignment='left', weight='bold',", "minlat = info.grid.minlat maxlat = info.grid.maxlat minlon = info.grid.minlon maxlon = info.grid.maxlon path_to_basemap", "edgecolor='#a6a6a6', transform=plt.gca().transAxes)) plt.text(0.15, 0.99, text1, verticalalignment='top', horizontalalignment='left', weight='bold', size=10, color= '#737373', transform=plt.gca().transAxes) plt.text(0.02,", "interpreting label_values = cbar._tick_data_values # print(\"values\") # print(label_values) log_label_values = np.round(10 ** label_values,decimals=0)", "'Fisheries and Oceans Canada – Maritimes Region\\n' + 'Bedford Institute of Oceanography\\n' +", "space for a side panel is added to the basemap Returns: A ``.basemap``", "dictionary spine.set_zorder(35) # ax.spines['top'].set_visible(False) # ax.spines['right'].set_visible(False) # ax.spines['bottom'].set_visible(False) # ax.spines['left'].set_visible(False) # fig.tight_layout(pad=0.25) fig.tight_layout(rect=[0.01,0.01,.99,.99])", "= cellsize / max_speed elif info.maps.cbarmin != None: vmin = info.maps.cbarmin else: vmin", "filedir = str(info.dirs.pngs) else: filedir = filedir_out if filename_out == 'auto': filename =", "# print(info) # -------------------------------------------------------- text2 = ('Unit description: ' + md['unit_description'] + '\\n\\n'", "20 knots max_speed = 316.66 # m/min ...roughly 20 knots vmin = cellsize", "file_in, Ship_No, save=True): ''' Creates a map of \"pings\" (i.e. not gridded density)", "does not match any of the available cmaps') return my_cmap def save_basemap(m,info,path_to_basemap='auto'): '''", "the `\\merged` directory cmap (str): Colormap to use sidebar (bool): If ``True``, includes", "info.maps.cbarmin else: vmin = None # Log H for better display Hmasked =", "a preliminary data product.\\n' + 'We cannot ​guarantee the validity, accuracy, \\n' +", "basis. ​USE AT YOUR OWN RISK.\\n' + '---------------------------------------------------------------\\n' ) return text1, text2, text3,", "Returns: Pickle file See also: :mod:`pickle` ''' # # basemap = [grid, m]", "vmin = None # Log H for better display Hmasked = np.log10(Hmasked) if", "= ((d['longitude']> minlon) & (d['longitude']<= maxlon) & (d['latitude']> minlat) & (d['latitude']<= maxlat)) filtered_data", "a ``.png`` figure is saved to hardrive filename_out (str): Name of produced figure.", "info info: ``info`` object containing metadata :return: text for legend ''' import datetime", "lats_grid = np.meshgrid(d['lon'].values,d['lat'].values) xx,yy = m(lons_grid, lats_grid) H = d['ship_density'].values # Rotate and", "RISK.\\n' + '---------------------------------------------------------------\\n' ) return text1, text2, text3, text4 def map_dots(info, file_in, sidebar=False,", "not match any of the available cmaps') return my_cmap def save_basemap(m,info,path_to_basemap='auto'): ''' Saves", "labels.append(str(int(log_label_value))) cbar.ax.set_xticklabels(labels) cbar.ax.set_xlabel(d.attrs['units'], size=9, color='#808080') # TODO: maybe delete this? # mng =", "else: filedir = filedir_out if filename_out == 'auto': filename = info.run_name + '__'", "of produced figure. If ``auto`` then name is ``info.run_name + '__' + file_in", "= 'C:\\\\Users\\\\IbarraD\\\\Documents\\\\VMS\\\\png\\\\' ## plt.savefig(datadir[0:-5] + 'png\\\\' + filename + '- Grid' + str(BinNo)", "open(basemap_picklename, 'wb'), -1) # info_picklename = str(path_to_map / (info.grid.basemap + '.grid')) info_picklename =", "'.basemap')) basemap_picklename = os.path.join(path_to_map,info.grid.basemap + '.basemap') pickle.dump(m, open(basemap_picklename, 'wb'), -1) # info_picklename =", "vmin = np.log10(vmin) if vmax != None: vmax = np.log10(vmax) # Make colormap", "# pickle.dump(m,open(picklename,'wb'),-1) # print('!!! Pickle just made: ' + picklename) # ## pngDir", "picklename) # ## pngDir = 'C:\\\\Users\\\\IbarraD\\\\Documents\\\\VMS\\\\png\\\\' ## plt.savefig(datadir[0:-5] + 'png\\\\' + filename +", "``info`` object containing metadata ''' if path_to_basemap == 'auto': if info.grid.type == 'one-off':", "picklename = str(path_to_map / (info.grid.basemap + '.p')) # pickle.dump(basemap, open(picklename, 'wb'), -1) #", "'.py\\n' + 'Software: ship mapper v0.1\\n\\n' + 'Created by:\\n' + 'Oceans and Coastal", "return m def load_my_cmap(name): ''' Creates and loads custom colormap ''' # cdict", "info.grid.basemap + '.grid') pickle.dump(info, open(info_picklename, 'wb'), -1) print('!!! Pickles were just made: '", "'red2black': cmapcolor = load_my_cmap('my_cmap_red2black') else: cmapcolor =plt.get_cmap(cmap) cs = m.pcolor(xx,yy,Hmasked, cmap=cmapcolor, zorder=10, vmin=vmin,", "zeros d.attrs['mask_below'] = info.maps.mask_below Hmasked = np.ma.masked_where(H<=d.attrs['mask_below'],H) # Set vman and vmin print('Min:", "+ md['unit_description'] + '\\n\\n' + 'Data source: ' + md['data_source'] + '\\n\\n' +", "= m(lons_grid, lats_grid) H = d['ship_density'].values # Rotate and flip H... ---------------------------------------------------------------------------- H", "corners... this will be deprecated soon Keyword arguments: path_to_basemap (str): Directory where to", "((0.0, 0.0, 0.0), # (1.0, 0.0, 0.0))} # my_cmap_yellow2red = LinearSegmentedColormap('my_colormap',cdict,256) cdict =", "= 'basemap_sidebar.p' else: basemap_name = 'basemap.p' info = sm.calculate_gridcell_areas(info) # Save basemap save_basemap(m,info,path_to_basemap=path_to_map)", "directory sidebar (bool): If ``True``, includes side panel with metadata save (bool): If", "(info.grid.basemap + '.p')) # pickle.dump(basemap, open(picklename, 'wb'), -1) # print('!!! Pickle just made:", "# Save map as png # if save: # filedir = str(info.dirs.pngs) #", "cbar.ax.set_xticklabels(labels) cbar.ax.set_xlabel(d.attrs['units'], size=9, color='#808080') # TODO: maybe delete this? # mng = plt.get_current_fig_manager()", "# # Save map as png # if save: # filedir = str(info.dirs.pngs)", "max_speed = 316.66 # m/min ...roughly 20 knots vmin = cellsize / max_speed", "vmin != None: vmin = np.log10(vmin) if vmax != None: vmax = np.log10(vmax)", "------------------------------------------------------') # Load data if file_in == None: file_in = os.path.join(str(info.dirs.merged_grid),'merged_grid.nc') print(file_in) d", "alat = (md['maxlat'] - md['minlat'])/2 text1 = 'VESSEL DENSITY HEATMAP' # print(info) #", "map m = Basemap(projection='mill', llcrnrlat=minlat,urcrnrlat=maxlat, llcrnrlon=minlon, urcrnrlon=maxlon,resolution=info.maps.resolution) # TOPO # Read data from:", "linewidth=0.2, zorder=25) setcolor(par,'#00a3cc') meridians = np.arange(minlon,maxlon,info.maps.meridians) mers = m.drawmeridians(meridians,labels=[False,False,False,True],dashes=[20,20],color='#00a3cc', linewidth=0.2, zorder=25) setcolor(mers,'#00a3cc') ax", "d['lon'].values.max() else: minlat = d.attrs['minlat'] maxlat = d.attrs['maxlat'] minlon = d.attrs['minlon'] maxlon =", "+ ' to ' + md['enddate'][0:-3] + '\\n\\n' + 'Included speeds: ' +", "---------------------------------------------------------------------------- H = np.rot90(H) H = np.flipud(H) # Mask zeros d.attrs['mask_below'] = info.maps.mask_below", "LinearSegmentedColormap('my_colormap',cdict,256) cdict = {'red': ((0.0, 1.0, 1.0), (1.0, 0.5, 0.5)), 'green': ((0.0, 0.85,", "path_to_basemap (str): Directory where to save the produced basemap. If ``'auto'`` then path", "Arguments: file_in (str): Gridded or merged file to map. If ``None`` it looks", "0.5, 0.5)), 'green': ((0.0, 0.85, 0.85), (1.0, 0.0, 0.0)), 'blue': ((0.0, 0.3, 0.3),", "str(info.dirs.pngs) # sm.checkDir(filedir) # filename = info.project_name + '_' + str(info.grid.bin_number) + '.png'", "sblat, minlon, minlat, info.maps.scalebar_km, barstyle='fancy', units='km', fontsize=8, fontcolor='#808080', fillcolor1 = '#cccccc', fillcolor2 =", "figure is saved to hardrive ''' import pandas as pd print('Mapping...') # -----------------------------------------------------------------------------", "= info.grid.maxlat minlon = info.grid.minlon maxlon = info.grid.maxlon path_to_basemap = info.dirs.project_path / 'ancillary'", "Save map as png # if save: # filedir = str(info.dirs.pngs) # sm.checkDir(filedir)", "indx = ((d['longitude']> minlon) & (d['longitude']<= maxlon) & (d['latitude']> minlat) & (d['latitude']<= maxlat))", "# m = pickle.load(open(basemap_file,'rb')) indx = ((d['longitude']> minlon) & (d['longitude']<= maxlon) & (d['latitude']>", "nc = netCDF4.Dataset(bathymetry_file) ncv = nc.variables lon = ncv['longitude'][:] lat = ncv['latitude'][:] lons,", "+ md['data_description'] + '\\n\\n' + 'Time range: \\n' + md['startdate'][0:-3] + ' to", "import netCDF4 def map_density(info, file_in=None, cmap='Default', sidebar=False, to_screen=True, save=True, filename_out='auto',filedir_out='auto'): ''' Plots a", "``auto`` then output directory is ``info.dirs.pngs`` Returns: Basemap object ''' print('map_density ------------------------------------------------------') #", "# Check for basemap.p and, if doesn;t exist, make it if not os.path.exists(basemap_file):", "# # basemap = [grid, m] # f = open(str(path_to_map / (info.grid.basemap +", "# m.fillcontinents(color='#E1E1A0',zorder=23) m.drawcoastlines(color='#a6a6a6',linewidth=0.5,zorder=25) m.fillcontinents(color='#e6e6e6',zorder=23) m.drawmapboundary() def setcolor(x, color): for m in x: for", "match any of the available cmaps') return my_cmap def save_basemap(m,info,path_to_basemap='auto'): ''' Saves basemap", "+ '.grid') pickle.dump(info, open(info_picklename, 'wb'), -1) print('!!! Pickles were just made: ' +", "will be deprecated soon Keyword arguments: path_to_basemap (str): Directory where to save the", "= filtered_data.sel(Dindex=indxship) print('Ship id:'+ str(ship)) # print(singleship['longitude'].values) # print(singleship['latitude'].values) x, y = m(singleship['longitude'].values,singleship['latitude'].values)", "Region\\n' + 'Bedford Institute of Oceanography\\n' + 'PO Box 1006, Dartmouth, NS, Canada,", "316.66 # m/min ...roughly 20 knots vmin = cellsize / max_speed elif info.maps.cbarmin", "'my_cmap_amber2red': # cdict = {'red': ((0.0, 1.0, 1.0), # (1.0, 0.5, 0.5)), #", "= d['lon'].values.max() else: minlat = info.grid.minlat maxlat = info.grid.maxlat minlon = info.grid.minlon maxlon", "matplotlib warnings np.warnings.filterwarnings('ignore') import xarray as xr import cmocean from pathlib import Path", "'usgsCeSrtm30v6.nc') bathymetry_file = os.path.join(path_to_map, 'usgsCeSrtm30v6.nc') if not os.path.isfile(bathymetry_file): isub = 1 base_url='http://coastwatch.pfeg.noaa.gov/erddap/griddap/usgsCeSrtm30v6.nc?' query='topo[(%f):%d:(%f)][(%f):%d:(%f)]'", "text4 = make_legend_text(info,d.attrs) ax2 = plt.subplot2grid((1,24),(0,0),colspan=4) # Turn off tick labels ax2.get_xaxis().set_visible(False) ax2.get_yaxis().set_visible(False)", "Save basemap save_basemap(m,info,path_to_basemap=path_to_map) # picklename = str(path_to_map / basemap_name) # pickle.dump(m,open(picklename,'wb'),-1) # print('!!!", "file to map. If ``None`` it looks for ``merged_grid.nc`` in the `\\merged` directory", "# plt.savefig('test.png') return m def load_my_cmap(name): ''' Creates and loads custom colormap '''", "= m(singleship['longitude'].values,singleship['latitude'].values) # x, y = m(d['longitude'].values,d['latitude'].values) cs = m.scatter(x,y,2,marker='o',color='r', zorder=30) # fig", "None: vmin = info.maps.cbarmin else: vmin = None # Log H for better", "else: filename = filename_out sm.checkDir(filedir) plt.savefig(os.path.join(filedir,filename), dpi=300) # Close netCDF file d.close() if", "np.round(10 ** label_values,decimals=0) labels = [] for log_label_value in log_label_values: labels.append(str(int(log_label_value))) cbar.ax.set_yticklabels(labels) cbar.ax.set_xlabel(d.attrs['units'])", "# m = sm.make_basemap(info.dirs.project_path,[minlat,maxlat,minlon,maxlon]) # else: # print('Found basemap...') # m = pickle.load(open(basemap_file,'rb'))", "'blue': ((0.0, c1[2], c1[2]), (1.0, c2[2], c2[2]))} my_cmap = LinearSegmentedColormap('my_colormap',cdict,256) else: print('cmap name", "str(np.min(Hmasked))) print('Max: ' + str(np.max(Hmasked))) print('Mean: ' + str(np.nanmean(Hmasked))) print('Std: ' + str(Hmasked.std()))", "info_picklename = str(path_to_map / (info.grid.basemap + '.grid')) info_picklename = os.path.join(path_to_map, info.grid.basemap + '.grid')", "\\n' + md['startdate'][0:-3] + ' to ' + md['enddate'][0:-3] + '\\n\\n' + 'Included", "figure is saved to hardrive ''' print('Mapping...') # ----------------------------------------------------------------------------- d = xr.open_dataset(file_in) #", "IS\" basis. ​USE AT YOUR OWN RISK.\\n' + '---------------------------------------------------------------\\n' ) return text1, text2,", "save (bool): If ``True`` a ``.png`` figure is saved to hardrive filename_out (str):", "+ filename + '- Grid' + str(BinNo) + ' - Filter' +str(downLim) +", "`\\merged` directory Ship_No (str): Unique identifier of the ship to plot save (bool):", "print(label_values) log_label_values = np.round(10 ** label_values,decimals=0) # print(log_label_values) labels = [] for log_label_value", "units='km', fontsize=8, fontcolor='#808080', fillcolor1 = '#cccccc', fillcolor2 = '#a6a6a6', yoffset = (0.01*(m.ymax-m.ymin)), labelstyle='simple',zorder=60)", "# 'blue': ((0.0, 0.5, 0.5), # (1.0, 1.0, 1.0))} # my_cmap = LinearSegmentedColormap('my_colormap',cdict,256)", "screen save (bool): If ``True`` a ``.png`` figure is saved to hardrive filename_out", "# using the netCDF output option # bathymetry_file = str(path_to_map / 'usgsCeSrtm30v6.nc') bathymetry_file", "= '#a6a6a6', yoffset = (0.01*(m.ymax-m.ymin)), labelstyle='simple',zorder=60) if not sidebar: cbaxes2 = fig.add_axes([0.70, 0.18,", "looks for ``merged_grid.nc`` in the `\\merged` directory Ship_No (str): Unique identifier of the", "Filter' +str(downLim) + '-' + str(upLim) + '.png') # plt.savefig('test.png') return m def", "from matplotlib.colors import LinearSegmentedColormap from mpl_toolkits.basemap import Basemap import numpy as np #", "cdict = {'red': ((0.0, 1.0, 1.0), (1.0, 0.5, 0.5)), 'green': ((0.0, 0.85, 0.85),", "'EPGS code: ' + md['epsg_code'] + '\\n' + 'Interpolation: ' + md['interpolation'] +", "0.0)), # 'blue': ((0.0, 0.0, 0.0), # (1.0, 0.0, 0.0))} # my_cmap_yellow2red =", "'ancillary' print('-----------------------------------------------------') print('-----------------------------------------------------') # basemap_file = str(path_to_basemap / 'basemap_spots.p') m = sm.make_basemap(info.dirs.project_path,[minlat,maxlat,minlon,maxlon]) #", "(i.e. not gridded density) of only one ship Arguments: info (info): ``info`` object", "``info`` object containing metadata Keyword Arguments: file_in (str): Gridded or merged file to", "picklename) path_to_map = define_path_to_map(info, path_to_basemap=path_to_basemap) # basemap_picklename = str(path_to_map / (info.grid.basemap + '.basemap'))", "(1.0, 0.0, 0.0)), # 'blue': ((0.0, 0.0, 0.0), # (1.0, 0.0, 0.0))} #", "directory cmap (str): Colormap to use sidebar (bool): If ``True``, includes side panel", "is saved to hardrive filename_out (str): Name of produced figure. If ``auto`` then", "+ 'WARNING: This is a preliminary data product.\\n' + 'We cannot ​guarantee the", "= d['lon'].values.min() maxlon = d['lon'].values.max() else: minlat = d.attrs['minlat'] maxlat = d.attrs['maxlat'] minlon", "((0.0, c1[1], c1[1]), (1.0, c2[1], c2[1])), 'blue': ((0.0, c1[2], c1[2]), (1.0, c2[2], c2[2]))}", "fig.add_axes([0.70, 0.18, 0.2, 0.03],zorder=60) cbar = plt.colorbar(extend='both', cax = cbaxes2, orientation='horizontal') # Change", "than gridded density Arguments: info (info): ``info`` object containing metadata Keyword Arguments: file_in", "fig.add_axes([0.05,0.05,0.80,1]) # ax = fig.add_axes([0,0,0.80,1]) # ax = fig.add_axes([0.23,0.035,0.85,0.9]) if sidebar: ax =", "directory Returns: Pickle file See also: :mod:`pickle` ''' # # basemap = [grid,", "in the `\\merged` directory cmap (str): Colormap to use sidebar (bool): If ``True``,", "basemap_picklename = os.path.join(path_to_map,info.grid.basemap + '.basemap') pickle.dump(m, open(basemap_picklename, 'wb'), -1) # info_picklename = str(path_to_map", "0.99, text1, verticalalignment='top', horizontalalignment='left', weight='bold', size=10, color= '#737373', transform=plt.gca().transAxes) plt.text(0.02, 0.83, text2, horizontalalignment='left',", "'basemap_sidebar.p' else: basemap_name = 'basemap.p' info = sm.calculate_gridcell_areas(info) # Save basemap save_basemap(m,info,path_to_basemap=path_to_map) #", "+ '.png') # plt.savefig('test.png') return m def load_my_cmap(name): ''' Creates and loads custom", "(1.0, c2[1], c2[1])), 'blue': ((0.0, c1[2], c1[2]), (1.0, c2[2], c2[2]))} my_cmap = LinearSegmentedColormap('my_colormap',cdict,256)", "Rotate and flip H... ---------------------------------------------------------------------------- H = np.rot90(H) H = np.flipud(H) # Mask", "mers = m.drawmeridians(meridians,labels=[False,False,False,True],dashes=[20,20],color='#00a3cc', linewidth=0.2, zorder=25) setcolor(mers,'#00a3cc') ax = plt.gca() # ax.axhline(linewidth=4, color=\"#00a3cc\") #", "``'auto'`` then path is setup by :func:`~ship_mapper.mapper.define_path_to_map` sidebar (bool): If ``True`` space for", "for basemap.p and, if doesn;t exist, make it if not os.path.exists(basemap_file): m =", "** label_values,decimals=0) # print(log_label_values) labels = [] for log_label_value in log_label_values: labels.append(str(int(log_label_value))) cbar.ax.set_xticklabels(labels)", "not gridded density) of only one ship Arguments: info (info): ``info`` object containing", "1 base_url='http://coastwatch.pfeg.noaa.gov/erddap/griddap/usgsCeSrtm30v6.nc?' query='topo[(%f):%d:(%f)][(%f):%d:(%f)]' % (maxlat,isub,minlat,minlon,isub,maxlon) url = base_url+query # store data in NetCDF", "' to ' + md['enddate'][0:-3] + '\\n\\n' + 'Included speeds: ' + info.sidebar.included_speeds", "= cbaxes2, orientation='horizontal') cbar.ax.tick_params(labelsize=8, labelcolor='#808080') # Change colorbar labels for easier interpreting label_values", "= minlat + ((maxlat-minlat)/20) m.drawmapscale(sblon, sblat, minlon, minlat, info.maps.scalebar_km, barstyle='fancy', units='km', fontsize=8, fontcolor='#808080',", "for t in x[m][1]: t.set_color(color) parallels = np.arange(minlat,maxlat,info.maps.parallels) # labels = [left,right,top,bottom] par", "maxlat = d.attrs['maxlat'] minlon = d.attrs['minlon'] maxlon = d.attrs['maxlon'] basemap_file = info.dirs.basemap print('Basemap", "+ '\\n' + 'Interpolation: ' + md['interpolation'] + '\\n' + 'Interpolation threshold: '", "info.sidebar.included_vessel_types + '\\n\\n' + 'Grid size: ' + str(md['bin_size']) + ' degrees (~'", "a ``.grid`` files ''' print('Making basemap...') # ----------------------------------------------------------------------------- path_to_map = define_path_to_map(info, path_to_basemap=path_to_basemap) sm.checkDir(str(path_to_map))", "= LinearSegmentedColormap('my_colormap',cdict,256) else: print('cmap name does not match any of the available cmaps')", "+ md['startdate'][0:-3] + ' to ' + md['enddate'][0:-3] + '\\n\\n' + 'Included speeds:", "plt.gcf() ax = plt.gca() if cmap == 'Default': cmapcolor = load_my_cmap('my_cmap_amber2red') elif cmap", "= info.grid.minlat maxlat = info.grid.maxlat minlon = info.grid.minlon maxlon = info.grid.maxlon path_to_basemap =", "+ '\\n\\n' + 'Time range: \\n' + md['startdate'][0:-3] + ' to ' +", ".grid files Arguments: info (info): ``info`` object containing metadata ''' if path_to_basemap ==", "containing metadata Keyword Arguments: path_to_basemap (str): If ``'auto'`` it looks in ``grids`` directory", "minlon = d['lon'].values.min() maxlon = d['lon'].values.max() else: minlat = info.grid.minlat maxlat = info.grid.maxlat", "block of map :param info info: ``info`` object containing metadata :return: text for", "np.ma.masked_where(topo>0,topo) cs = m.pcolormesh(lons,lats,TOPOmasked,cmap=load_my_cmap('my_cmap_lightblue'),latlon=True,zorder=5) # m.drawcoastlines(color='#A27D0C',linewidth=0.5,zorder=25) # m.fillcontinents(color='#E1E1A0',zorder=23) m.drawcoastlines(color='#a6a6a6',linewidth=0.5,zorder=25) m.fillcontinents(color='#e6e6e6',zorder=23) m.drawmapboundary() def setcolor(x,", "print('cmap name does not match any of the available cmaps') return my_cmap def", "hardrive ''' import pandas as pd print('Mapping...') # ----------------------------------------------------------------------------- d = xr.open_dataset(file_in) #", "ship to plot save (bool): If ``True`` a ``.png`` figure is saved to", "np.ma.masked_where(H<=d.attrs['mask_below'],H) # Set vman and vmin print('Min: ' + str(np.min(Hmasked))) print('Max: ' +", "print('Basemap file: ' + basemap_file) # Check for basemap.p and, if doesn;t exist,", "(1.0, 0.0, 0.0))} # my_cmap_yellow2red = LinearSegmentedColormap('my_colormap',cdict,256) cdict = {'red': ((0.0, 1.0, 1.0),", "# Close netCDF file d.close() if to_screen == False: plt.close() return def make_legend_text(info,md):", "# plt.show() # # Save map as png # if save: # filedir", "#ax.spines is a dictionary spine.set_zorder(35) # ax.spines['top'].set_visible(False) # ax.spines['right'].set_visible(False) # ax.spines['bottom'].set_visible(False) # ax.spines['left'].set_visible(False)", "or info.grid.minlon == None or info.grid.maxlon == None: minlat = d['lat'].values.min() maxlat =", "t in x[m][1]: t.set_color(color) parallels = np.arange(minlat,maxlat,info.maps.parallels) # labels = [left,right,top,bottom] par =", "'Mask below: ' + str(md['mask_below']) + ' vessels per grid' ) text3 =", "code: ' + md['epsg_code'] + '\\n' + 'Interpolation: ' + md['interpolation'] + '\\n'", "' + md['interpolation'] + '\\n' + 'Interpolation threshold: ' + str(md['interp_threshold']) + '", "# Mask zeros d.attrs['mask_below'] = info.maps.mask_below Hmasked = np.ma.masked_where(H<=d.attrs['mask_below'],H) # Set vman and", "out where is the .basemap and .grid files Arguments: info (info): ``info`` object", "plt.savefig(os.path.join(filedir,filename), dpi=300) return def define_path_to_map(info, path_to_basemap='auto'): ''' Figures out where is the .basemap", "'#737373', transform=plt.gca().transAxes) plt.text(0.02, 0.83, text2, horizontalalignment='left', verticalalignment='top', size=9, color= '#808080', transform=plt.gca().transAxes) plt.text(0.02, 0.145,", "elif info.maps.cbarmax != None: vmax = info.maps.cbarmax else: vmax = None if info.maps.cbarmin", "str(path_to_map / (info.grid.basemap + '.p')) # pickle.dump(basemap, open(picklename, 'wb'), -1) # print('!!! Pickle", "info.grid.minlat maxlat = info.grid.maxlat minlon = info.grid.minlon maxlon = info.grid.maxlon path_to_basemap = info.dirs.project_path", "np.array([103,0,13])/256 #RGB/256 cdict = {'red': ((0.0, c1[0], c1[0]), (1.0, c2[0], c2[0])), 'green': ((0.0,", "cs = m.pcolormesh(lons,lats,TOPOmasked,cmap=load_my_cmap('my_cmap_lightblue'),latlon=True,zorder=5) # m.drawcoastlines(color='#A27D0C',linewidth=0.5,zorder=25) # m.fillcontinents(color='#E1E1A0',zorder=23) m.drawcoastlines(color='#a6a6a6',linewidth=0.5,zorder=25) m.fillcontinents(color='#e6e6e6',zorder=23) m.drawmapboundary() def setcolor(x, color):", "a gridded (or merged) file Arguments: info (info): ``info`` object containing metadata Keyword", "'auto': filename = info.run_name + '__' + sm.get_filename_from_fullpath(file_in) + '.png' else: filename =", "info (info): ``info`` object containing metadata Keyword Arguments: path_to_basemap (str): If ``'auto'`` it", "pathlib import Path import _pickle as pickle import os import ship_mapper as sm", "= cbar._tick_data_values # print(\"values\") # print(label_values) log_label_values = np.round(10 ** label_values,decimals=0) # print(log_label_values)", "= np.array([252,142,110])/256 #RGB/256 c1 = np.array([250,59,59])/256 #RGB/256 c2 = np.array([103,0,13])/256 #RGB/256 cdict =", "# f = open(str(path_to_map / (info.grid.basemap + '.p')),'w') # pickle.dump(grid, f) # pickle.dump(m,", "cdict = {'red': ((0.0, c1[0], c1[0]), (1.0, c2[0], c2[0])), 'green': ((0.0, c1[1], c1[1]),", "files ''' print('Making basemap...') # ----------------------------------------------------------------------------- path_to_map = define_path_to_map(info, path_to_basemap=path_to_basemap) sm.checkDir(str(path_to_map)) minlat =", "# 'green': ((0.0, 1.0, 1.0), # (1.0, 0.0, 0.0)), # 'blue': ((0.0, 0.0,", "[] for log_label_value in log_label_values: labels.append(str(int(log_label_value))) cbar.ax.set_xticklabels(labels) cbar.ax.set_xlabel(d.attrs['units'], size=9, color='#808080') # TODO: maybe", "= open(str(path_to_map / (info.grid.basemap + '.p')),'w') # pickle.dump(grid, f) # pickle.dump(m, f) #", "import Path import _pickle as pickle import os import ship_mapper as sm import", "basemap_file) # Check for basemap.p and, if doesn;t exist, make it if not", "my_cmap = LinearSegmentedColormap('my_colormap',cdict,256) elif name == 'my_cmap_amber2red': # cdict = {'red': ((0.0, 1.0,", "# Change colorbar labels for easier interpreting label_values = cbar._tick_data_values log_label_values = np.round(10", "this? # mng = plt.get_current_fig_manager() # mng.frame.Maximize(True) # # fig.tight_layout() plt.show() # Save", "looks for ``merged_grid.nc`` in the `\\merged` directory cmap (str): Colormap to use sidebar", "cbar = plt.colorbar(extend='both', cax = cbaxes2, orientation='horizontal') cbar.ax.tick_params(labelsize=8, labelcolor='#808080') # Change colorbar labels", "If ``None`` it looks for ``merged_grid.nc`` in the `\\merged` directory Ship_No (str): Unique", "/ basemap_name) # pickle.dump(m,open(picklename,'wb'),-1) # print('!!! Pickle just made: ' + picklename) #", "containing metadata spatial (list): List with corners... this will be deprecated soon Keyword", "import datetime alat = (md['maxlat'] - md['minlat'])/2 text1 = 'VESSEL DENSITY HEATMAP' #", "+ 'Data source: ' + md['data_source'] + '\\n\\n' + 'Data source description:\\n' +", "None or info.grid.maxlat == None or info.grid.minlon == None or info.grid.maxlon == None:", "path_to_map = define_path_to_map(info, path_to_basemap=path_to_basemap) # basemap_picklename = str(path_to_map / (info.grid.basemap + '.basemap')) basemap_picklename", "'Data source description:\\n' + md['data_description'] + '\\n\\n' + 'Time range: \\n' + md['startdate'][0:-3]", "m] # f = open(str(path_to_map / (info.grid.basemap + '.p')),'w') # pickle.dump(grid, f) #", "== None or info.grid.maxlat == None or info.grid.minlon == None or info.grid.maxlon ==", "+ ' degrees (~' + str(int(round(sm.degrees_to_meters(md['bin_size'], alat))))+ ' m)\\n' + 'EPGS code: '", "# ax.spines['top'].set_visible(False) # ax.spines['right'].set_visible(False) # ax.spines['bottom'].set_visible(False) # ax.spines['left'].set_visible(False) # fig.tight_layout(pad=0.25) fig.tight_layout(rect=[0.01,0.01,.99,.99]) plt.show() if", "Ship_No, save=True): ''' Creates a map of \"pings\" (i.e. not gridded density) of", "Check for basemap.p and, if doesn;t exist, make it if not os.path.exists(basemap_file): m", "'green': ((0.0, 1.0, 1.0), # (1.0, 0.0, 0.0)), # 'blue': ((0.0, 0.0, 0.0),", "# Create grid for mapping lons_grid, lats_grid = np.meshgrid(d['lon'].values,d['lat'].values) xx,yy = m(lons_grid, lats_grid)", "f) # f.close() # picklename = str(path_to_map / (info.grid.basemap + '.p')) # pickle.dump(basemap,", "= d.attrs['maxlon'] basemap_file = info.dirs.basemap print('Basemap file: ' + basemap_file) # Check for", "filedir = filedir_out if filename_out == 'auto': filename = info.run_name + '__' +", "0.0), # Dark (1.0, 0.9, 0.9)), # Light 'green': ((0.0, 0.9, 0.9), (1.0,", "``.basemap`` and a ``.grid`` files ''' print('Making basemap...') # ----------------------------------------------------------------------------- path_to_map = define_path_to_map(info,", "sidebar (bool): If ``True``, includes side panel with metadata save (bool): If ``True``", "+ 'png\\\\' + filename + '- Grid' + str(BinNo) + ' - Filter'", "topo = ncv['topo'][:,:] # fig = plt.figure(figsize=(19,9)) # ax = fig.add_axes([0.05,0.05,0.80,1]) # ax", "text1, text2, text3, text4 = make_legend_text(info,d.attrs) ax2 = plt.subplot2grid((1,24),(0,0),colspan=4) # Turn off tick", "filename_out == 'auto': filename = info.run_name + '__' + sm.get_filename_from_fullpath(file_in) + '.png' else:", "​Data is provided\\n' + 'on an \"AS IS\" basis. ​USE AT YOUR OWN", "description:\\n' + md['data_description'] + '\\n\\n' + 'Time range: \\n' + md['startdate'][0:-3] + '", "d['lat'].values.min() maxlat = d['lat'].values.max() minlon = d['lon'].values.min() maxlon = d['lon'].values.max() else: minlat =", "+ str(md['bin_size']) + ' degrees (~' + str(int(round(sm.degrees_to_meters(md['bin_size'], alat))))+ ' m)\\n' + 'EPGS", "lon = ncv['longitude'][:] lat = ncv['latitude'][:] lons, lats = np.meshgrid(lon,lat) topo = ncv['topo'][:,:]", "info.grid.type == 'one-off': path_to_map = os.path.join(info.dirs.project_path,info.grid.region,'ancillary') elif info.grid.type == 'generic': path_to_map = os.path.abspath(os.path.join(info.dirs.project_path,'ancillary'))", "= sm.calculate_gridcell_areas(info) # Save basemap save_basemap(m,info,path_to_basemap=path_to_map) # picklename = str(path_to_map / basemap_name) #", "0.9, 0.15, 0.02],zorder=60) cbar = plt.colorbar(extend='both', cax = cbaxes2, orientation='horizontal') cbar.ax.tick_params(labelsize=8, labelcolor='#808080') #", "to hardrive ''' import pandas as pd print('Mapping...') # ----------------------------------------------------------------------------- d = xr.open_dataset(file_in)", "Change colorbar labels for easier interpreting label_values = cbar._tick_data_values # print(\"values\") # print(label_values)", "maxlon = d['lon'].values.max() else: minlat = info.grid.minlat maxlat = info.grid.maxlat minlon = info.grid.minlon", "(1.0, 0.5, 0.5)), # 'green': ((0.0, 1.0, 1.0), # (1.0, 0.0, 0.0)), #", "(1.0, c2[0], c2[0])), 'green': ((0.0, c1[1], c1[1]), (1.0, c2[1], c2[1])), 'blue': ((0.0, c1[2],", "LinearSegmentedColormap('my_colormap',cdict,256) elif name == 'my_cmap_red2black': # c1 = np.array([252,142,110])/256 #RGB/256 c1 = np.array([250,59,59])/256", "'generic': path_to_map = os.path.abspath(os.path.join(info.dirs.project_path,'ancillary')) else: path_to_map = path_to_basemap return path_to_map def make_basemap(info,spatial,path_to_basemap='auto', sidebar=False):", "as pickle import os import ship_mapper as sm import urllib.request import netCDF4 def", "easier interpreting label_values = cbar._tick_data_values log_label_values = np.round(10 ** label_values,decimals=0) labels = []", "d.attrs['minlat'])/2 cellsize = sm.degrees_to_meters(d.attrs['bin_size'], alat) # max_speed = 616.66 # m/min ...roughly 20", "legend in left block of map :param info info: ``info`` object containing metadata", "os.path.exists(basemap_file): m = sm.make_basemap(info,info.dirs.project_path,[minlat,maxlat,minlon,maxlon]) else: print('Found basemap...') m = pickle.load(open(basemap_file,'rb')) # Create grid", "it looks in ``grids`` directory Returns: Pickle file See also: :mod:`pickle` ''' #", "'Interpolation: ' + md['interpolation'] + '\\n' + 'Interpolation threshold: ' + str(md['interp_threshold']) +", "None: minlat = d['lat'].values.min() maxlat = d['lat'].values.max() minlon = d['lon'].values.min() maxlon = d['lon'].values.max()", "# basemap = [grid, m] # f = open(str(path_to_map / (info.grid.basemap + '.p')),'w')", "directory is ``info.dirs.pngs`` Returns: Basemap object ''' print('map_density ------------------------------------------------------') # Load data if", "= str(path_to_map / basemap_name) # pickle.dump(m,open(picklename,'wb'),-1) # print('!!! Pickle just made: ' +", "plt.text(0.02, 0.145, text3, horizontalalignment='left', verticalalignment='top', size=7, color= '#808080', transform=plt.gca().transAxes) plt.text(0.02, 0.25, text4, style='italic',", "m.drawmapboundary() def setcolor(x, color): for m in x: for t in x[m][1]: t.set_color(color)", "If ``True`` a ``.png`` figure is saved to hardrive filename_out (str): Name of", "isub = 1 base_url='http://coastwatch.pfeg.noaa.gov/erddap/griddap/usgsCeSrtm30v6.nc?' query='topo[(%f):%d:(%f)][(%f):%d:(%f)]' % (maxlat,isub,minlat,minlon,isub,maxlon) url = base_url+query # store data", "+ str(md['interp_threshold']) + ' knots\\n' + 'Time bin: ' + str(round(md['time_bin']*1440,1)) + '", "netCDF file d.close() if to_screen == False: plt.close() return def make_legend_text(info,md): ''' Makes", "is ``info.dirs.pngs`` Returns: Basemap object ''' print('map_density ------------------------------------------------------') # Load data if file_in", "width=1, height=1, clip_on=False, boxstyle=\"square,pad=0\", zorder=3, facecolor='#e6e6e6', alpha=1.0, edgecolor='#a6a6a6', transform=plt.gca().transAxes)) plt.text(0.15, 0.99, text1, verticalalignment='top',", "urcrnrlon=maxlon,resolution=info.maps.resolution) # TOPO # Read data from: http://coastwatch.pfeg.noaa.gov/erddap/griddap/usgsCeSrtm30v6.html # using the netCDF output", "sidebar: basemap_file = str(path_to_basemap / 'basemap_sidebar.p') else: basemap_file = str(path_to_basemap / 'basemap.p') if", "be deprecated soon Keyword arguments: path_to_basemap (str): Directory where to save the produced", "' + md['epsg_code'] + '\\n' + 'Interpolation: ' + md['interpolation'] + '\\n' +", "basemap...') # ----------------------------------------------------------------------------- path_to_map = define_path_to_map(info, path_to_basemap=path_to_basemap) sm.checkDir(str(path_to_map)) minlat = spatial[0] maxlat =", "== None: file_in = os.path.join(str(info.dirs.merged_grid),'merged_grid.nc') print(file_in) d = xr.open_dataset(file_in) # Define boundaries if", "zorder=10) # plt.show() # # Save map as png # if save: #", "label_values,decimals=0) labels = [] for log_label_value in log_label_values: labels.append(str(int(log_label_value))) cbar.ax.set_yticklabels(labels) cbar.ax.set_xlabel(d.attrs['units']) if sidebar:", "f = open(str(path_to_map / (info.grid.basemap + '.p')),'w') # pickle.dump(grid, f) # pickle.dump(m, f)", "# ax = fig.add_axes([0.23,0.035,0.85,0.9]) if sidebar: ax = plt.subplot2grid((1,24),(0,5),colspan=19) else: ax = fig.add_axes([0.05,0.05,0.94,0.94])", "''' Figures out where is the .basemap and .grid files Arguments: info (info):", "for log_label_value in log_label_values: labels.append(str(int(log_label_value))) cbar.ax.set_xticklabels(labels) cbar.ax.set_xlabel(d.attrs['units'], size=9, color='#808080') # TODO: maybe delete", "'_' + str(info.grid.bin_number) + '.png' # plt.savefig(os.path.join(filedir,filename), dpi=300) return def map_dots_one_ship(info, file_in, Ship_No,", "else: vmin = None # Log H for better display Hmasked = np.log10(Hmasked)", "AT YOUR OWN RISK.\\n' + '---------------------------------------------------------------\\n' ) return text1, text2, text3, text4 def", "# fig.tight_layout(pad=0.25) fig.tight_layout(rect=[0.01,0.01,.99,.99]) plt.show() if sidebar: basemap_name = 'basemap_sidebar.p' else: basemap_name = 'basemap.p'", "minlat + ((maxlat-minlat)/20) m.drawmapscale(sblon, sblat, minlon, minlat, info.maps.scalebar_km, barstyle='fancy', units='km', fontsize=8, fontcolor='#808080', fillcolor1", "unis[Ship_No] indxship = (filtered_data[ship_id] == ship) singleship = filtered_data.sel(Dindex=indxship) print('Ship id:'+ str(ship)) #", "0.0, 0.0), # (1.0, 0.0, 0.0))} # my_cmap_yellow2red = LinearSegmentedColormap('my_colormap',cdict,256) cdict = {'red':", "= {'red': ((0.0, 0.0, 0.0), # Dark (1.0, 0.9, 0.9)), # Light 'green':", "ax = fig.add_axes([0.23,0.035,0.85,0.9]) if sidebar: ax = plt.subplot2grid((1,24),(0,5),colspan=19) else: ax = fig.add_axes([0.05,0.05,0.94,0.94]) TOPOmasked", "sm.checkDir(str(path_to_map)) minlat = spatial[0] maxlat = spatial[1] minlon = spatial[2] maxlon = spatial[3]", "maxlat)) filtered_data = d.sel(Dindex=indx) ship_id = info.ship_id unis = pd.unique(filtered_data[ship_id].values) ship = unis[Ship_No]", "filename = info.run_name + '__' + sm.get_filename_from_fullpath(file_in) + '.png' else: filename = filename_out", "+ str(np.min(Hmasked))) print('Max: ' + str(np.max(Hmasked))) print('Mean: ' + str(np.nanmean(Hmasked))) print('Std: ' +", "where to save the produced basemap. If ``'auto'`` then path is setup by", "minlon = d.attrs['minlon'] maxlon = d.attrs['maxlon'] basemap_file = info.dirs.basemap print('Basemap file: ' +", "== 'auto': # vmin = (np.median(Hmasked)) - (4*Hmasked.std()) alat = (d.attrs['maxlat'] - d.attrs['minlat'])/2", "1006, Dartmouth, NS, Canada, B2Y 4A2' ) text4 = ('---------------------------------------------------------------\\n' + 'WARNING: This", "((0.0, 0.3, 0.3), (1.0, 0.0, 0.0))} my_cmap = LinearSegmentedColormap('my_colormap',cdict,256) elif name == 'my_cmap_red2black':", "((maxlat-minlat)/20) m.drawmapscale(sblon, sblat, minlon, minlat, info.maps.scalebar_km, barstyle='fancy', units='km', fontsize=8, fontcolor='#808080', fillcolor1 = '#cccccc',", "If ``True``, includes side panel with metadata to_screen (bool): If ``True``, a plot", "ax.spines['left'].set_color('#00a3cc') for k, spine in ax.spines.items(): #ax.spines is a dictionary spine.set_zorder(35) # ax.spines['top'].set_visible(False)", "to map. If ``None`` it looks for ``merged_grid.nc`` in the `\\merged` directory sidebar", "# vmax = (np.median(Hmasked)) + (4*Hmasked.std()) vmax = (np.max(Hmasked)) - (2*Hmasked.std()) elif info.maps.cbarmax", "d.sel(Dindex=indx) ship_id = info.ship_id unis = pd.unique(filtered_data[ship_id].values) ship = unis[Ship_No] indxship = (filtered_data[ship_id]", "t.set_color(color) parallels = np.arange(minlat,maxlat,info.maps.parallels) # labels = [left,right,top,bottom] par = m.drawparallels(parallels,labels=[True,False,False,False],dashes=[20,20],color='#00a3cc', linewidth=0.2, zorder=25)", "' vessels per grid' ) text3 = ('Creation date: ' + datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')", "elif cmap == 'red2black': cmapcolor = load_my_cmap('my_cmap_red2black') else: cmapcolor =plt.get_cmap(cmap) cs = m.pcolor(xx,yy,Hmasked,", "netCDF4 def map_density(info, file_in=None, cmap='Default', sidebar=False, to_screen=True, save=True, filename_out='auto',filedir_out='auto'): ''' Plots a map", ".basemap and .grid files Arguments: info (info): ``info`` object containing metadata ''' if", "= LinearSegmentedColormap('my_colormap',cdict,256) if name == 'my_cmap_lightblue': cdict = {'red': ((0.0, 0.0, 0.0), #", "str(Hmasked.std())) if info.maps.cbarmax == 'auto': # vmax = (np.median(Hmasked)) + (4*Hmasked.std()) vmax =", "object info (info): ``info`` object containing metadata Keyword Arguments: path_to_basemap (str): If ``'auto'``", "os.path.join(info.dirs.project_path,info.grid.region,'ancillary') elif info.grid.type == 'generic': path_to_map = os.path.abspath(os.path.join(info.dirs.project_path,'ancillary')) else: path_to_map = path_to_basemap return", "+ info.run_name + '.py\\n' + 'Software: ship mapper v0.1\\n\\n' + 'Created by:\\n' +", "info = sm.calculate_gridcell_areas(info) # Save basemap save_basemap(m,info,path_to_basemap=path_to_map) # picklename = str(path_to_map / basemap_name)", "dpi=300) return def map_dots_one_ship(info, file_in, Ship_No, save=True): ''' Creates a map of \"pings\"", "# basemap_file = str(path_to_basemap / 'basemap_spots.p') m = sm.make_basemap(info.dirs.project_path,[minlat,maxlat,minlon,maxlon]) # if not os.path.exists(str(path_to_basemap", "save (bool): If ``True`` a ``.png`` figure is saved to hardrive ''' print('Mapping...')", "(d['latitude']<= maxlat)) filtered_data = d.sel(Dindex=indx) ship_id = info.ship_id unis = pd.unique(filtered_data[ship_id].values) ship =", "import Basemap import numpy as np # Suppress matplotlib warnings np.warnings.filterwarnings('ignore') import xarray", "label_values,decimals=0) # print(log_label_values) labels = [] for log_label_value in log_label_values: labels.append(str(int(log_label_value))) cbar.ax.set_xticklabels(labels) cbar.ax.set_xlabel(d.attrs['units'],", "# plt.plot(filtered_data['longitude'].values,filtered_data['latitude'].values,'.') # plt.show() # # Save map as png # if save:", "'.png' # plt.savefig(os.path.join(filedir,filename), dpi=300) return def map_dots_one_ship(info, file_in, Ship_No, save=True): ''' Creates a", "not os.path.exists(basemap_file): m = sm.make_basemap(info,info.dirs.project_path,[minlat,maxlat,minlon,maxlon]) else: print('Found basemap...') m = pickle.load(open(basemap_file,'rb')) # Create", "((0.0, 0.9, 0.9), (1.0, 1.0,1.0)), 'blue': ((0.0, 0.9, 0.9), (1.0, 1.0, 1.0))} my_cmap", "Arguments: info (info): ``info`` object containing metadata spatial (list): List with corners... this", "Close netCDF file d.close() if to_screen == False: plt.close() return def make_legend_text(info,md): '''", "pandas as pd print('Mapping...') # ----------------------------------------------------------------------------- d = xr.open_dataset(file_in) # Define boundaries if", "'\\n' + 'Included vessels: ' + info.sidebar.included_vessel_types + '\\n\\n' + 'Grid size: '", "= minlon + ((maxlon-minlon)/10) sblat = minlat + ((maxlat-minlat)/20) m.drawmapscale(sblon, sblat, minlon, minlat,", "(bool): If ``True`` a ``.png`` figure is saved to hardrive filename_out (str): Name", "map :param info info: ``info`` object containing metadata :return: text for legend '''", "cmap=cmapcolor, zorder=10, vmin=vmin, vmax=vmax) #scalebar sblon = minlon + ((maxlon-minlon)/10) sblat = minlat", "1.0))} # my_cmap = LinearSegmentedColormap('my_colormap',cdict,256) if name == 'my_cmap_lightblue': cdict = {'red': ((0.0,", "filtered_data = d.sel(Dindex=indx) ship_id = info.ship_id unis = pd.unique(filtered_data[ship_id].values) ship = unis[Ship_No] indxship", "this product. ​Data is provided\\n' + 'on an \"AS IS\" basis. ​USE AT", "else: minlat = d.attrs['minlat'] maxlat = d.attrs['maxlat'] minlon = d.attrs['minlon'] maxlon = d.attrs['maxlon']", "0.83, text2, horizontalalignment='left', verticalalignment='top', size=9, color= '#808080', transform=plt.gca().transAxes) plt.text(0.02, 0.145, text3, horizontalalignment='left', verticalalignment='top',", "a ``.png`` figure is saved to hardrive ''' print('Mapping...') # ----------------------------------------------------------------------------- d =", "'auto': # vmax = (np.median(Hmasked)) + (4*Hmasked.std()) vmax = (np.max(Hmasked)) - (2*Hmasked.std()) elif", "to hardrive ''' print('Mapping...') # ----------------------------------------------------------------------------- d = xr.open_dataset(file_in) # Define boundaries if", "0.3), (1.0, 0.0, 0.0))} my_cmap = LinearSegmentedColormap('my_colormap',cdict,256) elif name == 'my_cmap_red2black': # c1", "url = base_url+query # store data in NetCDF file urllib.request.urlretrieve(url, bathymetry_file) # open", "basemap_name) # pickle.dump(m,open(picklename,'wb'),-1) # print('!!! Pickle just made: ' + picklename) # ##", "setup by :func:`~ship_mapper.mapper.define_path_to_map` sidebar (bool): If ``True`` space for a side panel is", "If ``'auto'`` it looks in ``grids`` directory Returns: Pickle file See also: :mod:`pickle`", "size=9, color= '#808080', transform=plt.gca().transAxes) plt.text(0.02, 0.145, text3, horizontalalignment='left', verticalalignment='top', size=7, color= '#808080', transform=plt.gca().transAxes)", "lats = np.meshgrid(lon,lat) topo = ncv['topo'][:,:] # fig = plt.figure(figsize=(19,9)) # ax =", "0.7)), # 'green': ((0.0, 0.25, 0.25), # (1.0, 0.85, 0.85)), # 'blue': ((0.0,", "``.png`` figure is saved to hardrive ''' print('Mapping...') # ----------------------------------------------------------------------------- d = xr.open_dataset(file_in)", "``True`` a ``.png`` figure is saved to hardrive filename_out (str): Name of produced", "'basemap.p' info = sm.calculate_gridcell_areas(info) # Save basemap save_basemap(m,info,path_to_basemap=path_to_map) # picklename = str(path_to_map /", "(info.grid.basemap + '.grid')) info_picklename = os.path.join(path_to_map, info.grid.basemap + '.grid') pickle.dump(info, open(info_picklename, 'wb'), -1)", "# vmin = (np.median(Hmasked)) - (4*Hmasked.std()) alat = (d.attrs['maxlat'] - d.attrs['minlat'])/2 cellsize =", "``grids`` directory Returns: Pickle file See also: :mod:`pickle` ''' # # basemap =", "panel with metadata to_screen (bool): If ``True``, a plot is printed to screen", "- md['minlat'])/2 text1 = 'VESSEL DENSITY HEATMAP' # print(info) # -------------------------------------------------------- text2 =", "Set vman and vmin print('Min: ' + str(np.min(Hmasked))) print('Max: ' + str(np.max(Hmasked))) print('Mean:", "# ax.spines['left'].set_visible(False) # fig.tight_layout(pad=0.25) fig.tight_layout(rect=[0.01,0.01,.99,.99]) plt.show() if sidebar: basemap_name = 'basemap_sidebar.p' else: basemap_name", "'green': ((0.0, c1[1], c1[1]), (1.0, c2[1], c2[1])), 'blue': ((0.0, c1[2], c1[2]), (1.0, c2[2],", "str(np.max(Hmasked))) print('Mean: ' + str(np.nanmean(Hmasked))) print('Std: ' + str(Hmasked.std())) if info.maps.cbarmax == 'auto':", "sm.checkDir(filedir) # filename = info.project_name + '_' + str(info.grid.bin_number) + '.png' # plt.savefig(os.path.join(filedir,filename),", "0.0), # (1.0, 0.7, 0.7)), # 'green': ((0.0, 0.25, 0.25), # (1.0, 0.85,", "log_label_values = np.round(10 ** label_values,decimals=0) labels = [] for log_label_value in log_label_values: labels.append(str(int(log_label_value)))", "'green': ((0.0, 0.25, 0.25), # (1.0, 0.85, 0.85)), # 'blue': ((0.0, 0.5, 0.5),", "# 'blue': ((0.0, 0.0, 0.0), # (1.0, 0.0, 0.0))} # my_cmap_yellow2red = LinearSegmentedColormap('my_colormap',cdict,256)", "HEATMAP' # print(info) # -------------------------------------------------------- text2 = ('Unit description: ' + md['unit_description'] +", "= unis[Ship_No] indxship = (filtered_data[ship_id] == ship) singleship = filtered_data.sel(Dindex=indxship) print('Ship id:'+ str(ship))", "=plt.get_cmap(cmap) cs = m.pcolor(xx,yy,Hmasked, cmap=cmapcolor, zorder=10, vmin=vmin, vmax=vmax) #scalebar sblon = minlon +", "filtered_data.sel(Dindex=indxship) print('Ship id:'+ str(ship)) # print(singleship['longitude'].values) # print(singleship['latitude'].values) x, y = m(singleship['longitude'].values,singleship['latitude'].values) #", "source description:\\n' + md['data_description'] + '\\n\\n' + 'Time range: \\n' + md['startdate'][0:-3] +", "metadata Keyword Arguments: file_in (str): Gridded or merged file to map. If ``None``", "4A2' ) text4 = ('---------------------------------------------------------------\\n' + 'WARNING: This is a preliminary data product.\\n'", "text1 = 'VESSEL DENSITY HEATMAP' # print(info) # -------------------------------------------------------- text2 = ('Unit description:", "= '#cccccc', fillcolor2 = '#a6a6a6', yoffset = (0.01*(m.ymax-m.ymin)), labelstyle='simple',zorder=60) if not sidebar: cbaxes2", "info.sidebar.included_speeds + '\\n' + 'Included vessels: ' + info.sidebar.included_vessel_types + '\\n\\n' + 'Grid", "1.0), # (1.0, 0.5, 0.5)), # 'green': ((0.0, 1.0, 1.0), # (1.0, 0.0,", "# bathymetry_file = str(path_to_map / 'usgsCeSrtm30v6.nc') bathymetry_file = os.path.join(path_to_map, 'usgsCeSrtm30v6.nc') if not os.path.isfile(bathymetry_file):", "sidebar: basemap_name = 'basemap_sidebar.p' else: basemap_name = 'basemap.p' info = sm.calculate_gridcell_areas(info) # Save", "cellsize = sm.degrees_to_meters(d.attrs['bin_size'], alat) # max_speed = 616.66 # m/min ...roughly 20 knots", "= np.rot90(H) H = np.flipud(H) # Mask zeros d.attrs['mask_below'] = info.maps.mask_below Hmasked =", "indxship = (filtered_data[ship_id] == ship) singleship = filtered_data.sel(Dindex=indxship) print('Ship id:'+ str(ship)) # print(singleship['longitude'].values)", "# Light 'green': ((0.0, 0.9, 0.9), (1.0, 1.0,1.0)), 'blue': ((0.0, 0.9, 0.9), (1.0,", "NetCDF file urllib.request.urlretrieve(url, bathymetry_file) # open NetCDF data in nc = netCDF4.Dataset(bathymetry_file) ncv", "import ship_mapper as sm import urllib.request import netCDF4 def map_density(info, file_in=None, cmap='Default', sidebar=False,", "made: ' + picklename) path_to_map = define_path_to_map(info, path_to_basemap=path_to_basemap) # basemap_picklename = str(path_to_map /", "' + datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') + '\\n' + 'Creation script: ' + info.run_name +", "print(singleship['latitude'].values) x, y = m(singleship['longitude'].values,singleship['latitude'].values) # x, y = m(d['longitude'].values,d['latitude'].values) cs = m.scatter(x,y,2,marker='o',color='r',", "pickle.dump(m, f) # f.close() # picklename = str(path_to_map / (info.grid.basemap + '.p')) #", "# ----------------------------------------------------------------------------- d = xr.open_dataset(file_in) # Define boundaries if info.grid.minlat == None or", "= 316.66 # m/min ...roughly 20 knots vmin = cellsize / max_speed elif", "Keyword arguments: path_to_basemap (str): Directory where to save the produced basemap. If ``'auto'``", "= LinearSegmentedColormap('my_colormap',cdict,256) elif name == 'my_cmap_amber2red': # cdict = {'red': ((0.0, 1.0, 1.0),", "LinearSegmentedColormap('my_colormap',cdict,256) elif name == 'my_cmap_amber2red': # cdict = {'red': ((0.0, 1.0, 1.0), #", "base_url+query # store data in NetCDF file urllib.request.urlretrieve(url, bathymetry_file) # open NetCDF data", "Returns: Basemap object ''' print('map_density ------------------------------------------------------') # Load data if file_in == None:", "in NetCDF file urllib.request.urlretrieve(url, bathymetry_file) # open NetCDF data in nc = netCDF4.Dataset(bathymetry_file)", "= 'VESSEL DENSITY HEATMAP' # print(info) # -------------------------------------------------------- text2 = ('Unit description: '", "density Arguments: info (info): ``info`` object containing metadata Keyword Arguments: file_in (str): Gridded", "+ '- Grid' + str(BinNo) + ' - Filter' +str(downLim) + '-' +", "legend ''' import datetime alat = (md['maxlat'] - md['minlat'])/2 text1 = 'VESSEL DENSITY", "info (info): ``info`` object containing metadata spatial (list): List with corners... this will", "save (bool): If ``True`` a ``.png`` figure is saved to hardrive ''' import", "as xr import cmocean from pathlib import Path import _pickle as pickle import", "sblon = minlon + ((maxlon-minlon)/10) sblat = minlat + ((maxlat-minlat)/20) m.drawmapscale(sblon, sblat, minlon,", "info.run_name + '.py\\n' + 'Software: ship mapper v0.1\\n\\n' + 'Created by:\\n' + 'Oceans", "(bool): If ``True`` space for a side panel is added to the basemap", "datetime alat = (md['maxlat'] - md['minlat'])/2 text1 = 'VESSEL DENSITY HEATMAP' # print(info)", "c1 = np.array([250,59,59])/256 #RGB/256 c2 = np.array([103,0,13])/256 #RGB/256 cdict = {'red': ((0.0, c1[0],", "(np.max(Hmasked)) - (2*Hmasked.std()) elif info.maps.cbarmax != None: vmax = info.maps.cbarmax else: vmax =", "import FancyBboxPatch from matplotlib.colors import LinearSegmentedColormap from mpl_toolkits.basemap import Basemap import numpy as", "# max_speed = 616.66 # m/min ...roughly 20 knots max_speed = 316.66 #", "a dictionary spine.set_zorder(35) # ax.spines['top'].set_visible(False) # ax.spines['right'].set_visible(False) # ax.spines['bottom'].set_visible(False) # ax.spines['left'].set_visible(False) # fig.tight_layout(pad=0.25)", "[] for log_label_value in log_label_values: labels.append(str(int(log_label_value))) cbar.ax.set_yticklabels(labels) cbar.ax.set_xlabel(d.attrs['units']) if sidebar: text1, text2, text3,", "return my_cmap def save_basemap(m,info,path_to_basemap='auto'): ''' Saves basemap (and correspoding info.grid) to a pickle", "figure is saved. If ``auto`` then output directory is ``info.dirs.pngs`` Returns: Basemap object", "orientation='horizontal') cbar.ax.tick_params(labelsize=8, labelcolor='#808080') # Change colorbar labels for easier interpreting label_values = cbar._tick_data_values", "ncv['latitude'][:] lons, lats = np.meshgrid(lon,lat) topo = ncv['topo'][:,:] # fig = plt.figure(figsize=(19,9)) #", "path_to_map = os.path.join(info.dirs.project_path,info.grid.region,'ancillary') elif info.grid.type == 'generic': path_to_map = os.path.abspath(os.path.join(info.dirs.project_path,'ancillary')) else: path_to_map =", "'Ecosystem Management Branch\\n' + 'Fisheries and Oceans Canada – Maritimes Region\\n' + 'Bedford", "'#808080', transform=plt.gca().transAxes) plt.text(0.02, 0.25, text4, style='italic', horizontalalignment='left', verticalalignment='top', size=8, color= '#808080', transform=plt.gca().transAxes) cbaxes2", ":mod:`pickle` ''' # # basemap = [grid, m] # f = open(str(path_to_map /", "''' Creates a map of \"pings\" (i.e. not gridded density) of only one", "``merged_grid.nc`` in the `\\merged` directory cmap (str): Colormap to use sidebar (bool): If", "for legend ''' import datetime alat = (md['maxlat'] - md['minlat'])/2 text1 = 'VESSEL", "(d.attrs['maxlat'] - d.attrs['minlat'])/2 cellsize = sm.degrees_to_meters(d.attrs['bin_size'], alat) # max_speed = 616.66 # m/min", "y = m(d['longitude'].values,d['latitude'].values) cs = m.scatter(x,y,2,marker='o',color='r', zorder=30) # fig = plt.figure() # plt.plot(filtered_data['longitude'].values,filtered_data['latitude'].values,'.')", "str(path_to_map / (info.grid.basemap + '.basemap')) basemap_picklename = os.path.join(path_to_map,info.grid.basemap + '.basemap') pickle.dump(m, open(basemap_picklename, 'wb'),", "id:'+ str(ship)) # print(singleship['longitude'].values) # print(singleship['latitude'].values) x, y = m(singleship['longitude'].values,singleship['latitude'].values) # x, y", "If ``None`` it looks for ``merged_grid.nc`` in the `\\merged` directory sidebar (bool): If", "== 'red2black': cmapcolor = load_my_cmap('my_cmap_red2black') else: cmapcolor =plt.get_cmap(cmap) cs = m.pcolor(xx,yy,Hmasked, cmap=cmapcolor, zorder=10,", "Creates a map of \"pings\" (i.e. not gridded density) of only one ship", "is saved to hardrive ''' import pandas as pd print('Mapping...') # ----------------------------------------------------------------------------- d", "one ship Arguments: info (info): ``info`` object containing metadata Keyword Arguments: file_in (str):", "basemap save_basemap(m,info,path_to_basemap=path_to_map) # picklename = str(path_to_map / basemap_name) # pickle.dump(m,open(picklename,'wb'),-1) # print('!!! Pickle", "for ``merged_grid.nc`` in the `\\merged` directory Ship_No (str): Unique identifier of the ship", "name does not match any of the available cmaps') return my_cmap def save_basemap(m,info,path_to_basemap='auto'):", "for legend in left block of map :param info info: ``info`` object containing", "= 'basemap.p' info = sm.calculate_gridcell_areas(info) # Save basemap save_basemap(m,info,path_to_basemap=path_to_map) # picklename = str(path_to_map", "((d['longitude']> minlon) & (d['longitude']<= maxlon) & (d['latitude']> minlat) & (d['latitude']<= maxlat)) filtered_data =", "pickle.load(open(basemap_file,'rb')) indx = ((d['longitude']> minlon) & (d['longitude']<= maxlon) & (d['latitude']> minlat) & (d['latitude']<=", "((0.0, c1[0], c1[0]), (1.0, c2[0], c2[0])), 'green': ((0.0, c1[1], c1[1]), (1.0, c2[1], c2[1])),", "exist, make it if not os.path.exists(basemap_file): m = sm.make_basemap(info,info.dirs.project_path,[minlat,maxlat,minlon,maxlon]) else: print('Found basemap...') m", "' + info.sidebar.included_vessel_types + '\\n\\n' + 'Grid size: ' + str(md['bin_size']) + '", "0.0), # (1.0, 0.0, 0.0))} # my_cmap_yellow2red = LinearSegmentedColormap('my_colormap',cdict,256) cdict = {'red': ((0.0,", "c1[0], c1[0]), (1.0, c2[0], c2[0])), 'green': ((0.0, c1[1], c1[1]), (1.0, c2[1], c2[1])), 'blue':", "spatial[2] maxlon = spatial[3] # Create map m = Basemap(projection='mill', llcrnrlat=minlat,urcrnrlat=maxlat, llcrnrlon=minlon, urcrnrlon=maxlon,resolution=info.maps.resolution)", "cbaxes2, orientation='horizontal') # Change colorbar labels for easier interpreting label_values = cbar._tick_data_values log_label_values", "H = np.flipud(H) # Mask zeros d.attrs['mask_below'] = info.maps.mask_below Hmasked = np.ma.masked_where(H<=d.attrs['mask_below'],H) #", "+ '.p')) # pickle.dump(basemap, open(picklename, 'wb'), -1) # print('!!! Pickle just made: '", "sblat = minlat + ((maxlat-minlat)/20) m.drawmapscale(sblon, sblat, minlon, minlat, info.maps.scalebar_km, barstyle='fancy', units='km', fontsize=8,", "file_in + '.png'`` filedir_out (str): Directory where figure is saved. If ``auto`` then", "' + info.sidebar.included_speeds + '\\n' + 'Included vessels: ' + info.sidebar.included_vessel_types + '\\n\\n'", "'__' + sm.get_filename_from_fullpath(file_in) + '.png' else: filename = filename_out sm.checkDir(filedir) plt.savefig(os.path.join(filedir,filename), dpi=300) #", "print('Std: ' + str(Hmasked.std())) if info.maps.cbarmax == 'auto': # vmax = (np.median(Hmasked)) +", "# Save map as png if save: if filedir_out == 'auto': filedir =", "# Read data from: http://coastwatch.pfeg.noaa.gov/erddap/griddap/usgsCeSrtm30v6.html # using the netCDF output option # bathymetry_file", "# Save basemap save_basemap(m,info,path_to_basemap=path_to_map) # picklename = str(path_to_map / basemap_name) # pickle.dump(m,open(picklename,'wb'),-1) #", "grid for mapping lons_grid, lats_grid = np.meshgrid(d['lon'].values,d['lat'].values) xx,yy = m(lons_grid, lats_grid) H =", "= np.array([103,0,13])/256 #RGB/256 cdict = {'red': ((0.0, c1[0], c1[0]), (1.0, c2[0], c2[0])), 'green':", "sm import urllib.request import netCDF4 def map_density(info, file_in=None, cmap='Default', sidebar=False, to_screen=True, save=True, filename_out='auto',filedir_out='auto'):", "print('-----------------------------------------------------') print('-----------------------------------------------------') # basemap_file = str(path_to_basemap / 'basemap_spots.p') m = sm.make_basemap(info.dirs.project_path,[minlat,maxlat,minlon,maxlon]) # if", "maxlat = spatial[1] minlon = spatial[2] maxlon = spatial[3] # Create map m", "filename_out='auto',filedir_out='auto'): ''' Plots a map using a gridded (or merged) file Arguments: info", "= cbar._tick_data_values log_label_values = np.round(10 ** label_values,decimals=0) labels = [] for log_label_value in", "= np.round(10 ** label_values,decimals=0) # print(log_label_values) labels = [] for log_label_value in log_label_values:", "((0.0, 1.0, 1.0), # (1.0, 0.0, 0.0)), # 'blue': ((0.0, 0.0, 0.0), #", "os.path.join(str(info.dirs.merged_grid),'merged_grid.nc') print(file_in) d = xr.open_dataset(file_in) # Define boundaries if info.grid.minlat == None or", "spatial (list): List with corners... this will be deprecated soon Keyword arguments: path_to_basemap", "((0.0, c1[2], c1[2]), (1.0, c2[2], c2[2]))} my_cmap = LinearSegmentedColormap('my_colormap',cdict,256) else: print('cmap name does", "#scalebar sblon = minlon + ((maxlon-minlon)/10) sblat = minlat + ((maxlat-minlat)/20) m.drawmapscale(sblon, sblat,", "np.round(10 ** label_values,decimals=0) # print(log_label_values) labels = [] for log_label_value in log_label_values: labels.append(str(int(log_label_value)))", "(4*Hmasked.std()) alat = (d.attrs['maxlat'] - d.attrs['minlat'])/2 cellsize = sm.degrees_to_meters(d.attrs['bin_size'], alat) # max_speed =", "= d['lon'].values.max() else: minlat = d.attrs['minlat'] maxlat = d.attrs['maxlat'] minlon = d.attrs['minlon'] maxlon", "cbar._tick_data_values # print(\"values\") # print(label_values) log_label_values = np.round(10 ** label_values,decimals=0) # print(log_label_values) labels", "make_basemap(info,spatial,path_to_basemap='auto', sidebar=False): ''' Makes a basemap Arguments: info (info): ``info`` object containing metadata", "with metadata to_screen (bool): If ``True``, a plot is printed to screen save", "zorder=30) # fig = plt.figure() # plt.plot(filtered_data['longitude'].values,filtered_data['latitude'].values,'.') # plt.show() # # Save map", "sidebar: text1, text2, text3, text4 = make_legend_text(info,d.attrs) ax2 = plt.subplot2grid((1,24),(0,0),colspan=4) # Turn off", "str(np.nanmean(Hmasked))) print('Std: ' + str(Hmasked.std())) if info.maps.cbarmax == 'auto': # vmax = (np.median(Hmasked))", "text1, text2, text3, text4 def map_dots(info, file_in, sidebar=False, save=True): ''' Creates a map", ") text3 = ('Creation date: ' + datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') + '\\n' + 'Creation", "ax = fig.add_axes([0,0,0.80,1]) # ax = fig.add_axes([0.23,0.035,0.85,0.9]) if sidebar: ax = plt.subplot2grid((1,24),(0,5),colspan=19) else:", "return def define_path_to_map(info, path_to_basemap='auto'): ''' Figures out where is the .basemap and .grid", "``True``, a plot is printed to screen save (bool): If ``True`` a ``.png``", "m = pickle.load(open(basemap_file,'rb')) x, y = m(d['longitude'].values,d['latitude'].values) cs = m.scatter(x,y,s=0.1,marker='o',color='r', zorder=10) # plt.show()", "'VESSEL DENSITY HEATMAP' # print(info) # -------------------------------------------------------- text2 = ('Unit description: ' +", "info.maps.cbarmin == 'auto': # vmin = (np.median(Hmasked)) - (4*Hmasked.std()) alat = (d.attrs['maxlat'] -", "'Oceans and Coastal Management Division\\n' + 'Ecosystem Management Branch\\n' + 'Fisheries and Oceans", "Canada, B2Y 4A2' ) text4 = ('---------------------------------------------------------------\\n' + 'WARNING: This is a preliminary", "``.grid`` files ''' print('Making basemap...') # ----------------------------------------------------------------------------- path_to_map = define_path_to_map(info, path_to_basemap=path_to_basemap) sm.checkDir(str(path_to_map)) minlat", "as png # if save: # filedir = str(info.dirs.pngs) # sm.checkDir(filedir) # filename", "in x[m][1]: t.set_color(color) parallels = np.arange(minlat,maxlat,info.maps.parallels) # labels = [left,right,top,bottom] par = m.drawparallels(parallels,labels=[True,False,False,False],dashes=[20,20],color='#00a3cc',", "sidebar=False, to_screen=True, save=True, filename_out='auto',filedir_out='auto'): ''' Plots a map using a gridded (or merged)", "# fig.tight_layout() plt.show() # Save map as png if save: if filedir_out ==", "print('-----------------------------------------------------') # basemap_file = str(path_to_basemap / 'basemap_spots.p') m = sm.make_basemap(info.dirs.project_path,[minlat,maxlat,minlon,maxlon]) # if not", "'green': ((0.0, 0.9, 0.9), (1.0, 1.0,1.0)), 'blue': ((0.0, 0.9, 0.9), (1.0, 1.0, 1.0))}", "'\\n\\n' + 'Data source description:\\n' + md['data_description'] + '\\n\\n' + 'Time range: \\n'", "# mng = plt.get_current_fig_manager() # mng.frame.Maximize(True) # # fig.tight_layout() plt.show() # Save map", "Directory where figure is saved. If ``auto`` then output directory is ``info.dirs.pngs`` Returns:", "``info`` object containing metadata :return: text for legend ''' import datetime alat =", "os.path.abspath(os.path.join(info.dirs.project_path,'ancillary')) else: path_to_map = path_to_basemap return path_to_map def make_basemap(info,spatial,path_to_basemap='auto', sidebar=False): ''' Makes a", "str(md['interp_threshold']) + ' knots\\n' + 'Time bin: ' + str(round(md['time_bin']*1440,1)) + ' minutes\\n'", "pickle.dump(grid, f) # pickle.dump(m, f) # f.close() # picklename = str(path_to_map / (info.grid.basemap", "np.arange(minlat,maxlat,info.maps.parallels) # labels = [left,right,top,bottom] par = m.drawparallels(parallels,labels=[True,False,False,False],dashes=[20,20],color='#00a3cc', linewidth=0.2, zorder=25) setcolor(par,'#00a3cc') meridians =", "color= '#808080', transform=plt.gca().transAxes) cbaxes2 = fig.add_axes([0.019, 0.9, 0.15, 0.02],zorder=60) cbar = plt.colorbar(extend='both', cax", "+ 'on an \"AS IS\" basis. ​USE AT YOUR OWN RISK.\\n' + '---------------------------------------------------------------\\n'", "hardrive filename_out (str): Name of produced figure. If ``auto`` then name is ``info.run_name", "import _pickle as pickle import os import ship_mapper as sm import urllib.request import", "cbar.ax.set_xlabel(d.attrs['units']) if sidebar: text1, text2, text3, text4 = make_legend_text(info,d.attrs) ax2 = plt.subplot2grid((1,24),(0,0),colspan=4) #", "= plt.get_current_fig_manager() # mng.frame.Maximize(True) # # fig.tight_layout() plt.show() # Save map as png", "log_label_values = np.round(10 ** label_values,decimals=0) # print(log_label_values) labels = [] for log_label_value in", "H = d['ship_density'].values # Rotate and flip H... ---------------------------------------------------------------------------- H = np.rot90(H) H", "looks for ``merged_grid.nc`` in the `\\merged` directory sidebar (bool): If ``True``, includes side", "labels for easier interpreting label_values = cbar._tick_data_values log_label_values = np.round(10 ** label_values,decimals=0) labels", "(str): Unique identifier of the ship to plot save (bool): If ``True`` a", "(list): List with corners... this will be deprecated soon Keyword arguments: path_to_basemap (str):", "'\\n' + 'Creation script: ' + info.run_name + '.py\\n' + 'Software: ship mapper", "c2[0], c2[0])), 'green': ((0.0, c1[1], c1[1]), (1.0, c2[1], c2[1])), 'blue': ((0.0, c1[2], c1[2]),", "Oceans Canada – Maritimes Region\\n' + 'Bedford Institute of Oceanography\\n' + 'PO Box", "gridded (or merged) file Arguments: info (info): ``info`` object containing metadata Keyword Arguments:", "0.5, 0.5)), # 'green': ((0.0, 1.0, 1.0), # (1.0, 0.0, 0.0)), # 'blue':", "= m.scatter(x,y,s=0.1,marker='o',color='r', zorder=10) # plt.show() # # Save map as png # if", "height=1, clip_on=False, boxstyle=\"square,pad=0\", zorder=3, facecolor='#e6e6e6', alpha=1.0, edgecolor='#a6a6a6', transform=plt.gca().transAxes)) plt.text(0.15, 0.99, text1, verticalalignment='top', horizontalalignment='left',", "minlat) & (d['latitude']<= maxlat)) filtered_data = d.sel(Dindex=indx) ship_id = info.ship_id unis = pd.unique(filtered_data[ship_id].values)", "== 'my_cmap_amber2red': # cdict = {'red': ((0.0, 1.0, 1.0), # (1.0, 0.5, 0.5)),", "weight='bold', size=10, color= '#737373', transform=plt.gca().transAxes) plt.text(0.02, 0.83, text2, horizontalalignment='left', verticalalignment='top', size=9, color= '#808080',", "print('Ship id:'+ str(ship)) # print(singleship['longitude'].values) # print(singleship['latitude'].values) x, y = m(singleship['longitude'].values,singleship['latitude'].values) # x,", "color= '#808080', transform=plt.gca().transAxes) plt.text(0.02, 0.145, text3, horizontalalignment='left', verticalalignment='top', size=7, color= '#808080', transform=plt.gca().transAxes) plt.text(0.02,", "''' Creates and loads custom colormap ''' # cdict = {'red': ((0.0, 0.0,", "''' print('Mapping...') # ----------------------------------------------------------------------------- d = xr.open_dataset(file_in) # Define boundaries if info.grid.minlat ==", "= np.meshgrid(lon,lat) topo = ncv['topo'][:,:] # fig = plt.figure(figsize=(19,9)) # ax = fig.add_axes([0.05,0.05,0.80,1])", "load_my_cmap('my_cmap_red2black') else: cmapcolor =plt.get_cmap(cmap) cs = m.pcolor(xx,yy,Hmasked, cmap=cmapcolor, zorder=10, vmin=vmin, vmax=vmax) #scalebar sblon", "m.pcolor(xx,yy,Hmasked, cmap=cmapcolor, zorder=10, vmin=vmin, vmax=vmax) #scalebar sblon = minlon + ((maxlon-minlon)/10) sblat =", "If ``True`` a ``.png`` figure is saved to hardrive ''' import pandas as", "+ str(info.grid.bin_number) + '.png' # plt.savefig(os.path.join(filedir,filename), dpi=300) return def define_path_to_map(info, path_to_basemap='auto'): ''' Figures", "``True``, includes side panel with metadata to_screen (bool): If ``True``, a plot is", "# print(log_label_values) labels = [] for log_label_value in log_label_values: labels.append(str(int(log_label_value))) cbar.ax.set_xticklabels(labels) cbar.ax.set_xlabel(d.attrs['units'], size=9,", "m.drawcoastlines(color='#A27D0C',linewidth=0.5,zorder=25) # m.fillcontinents(color='#E1E1A0',zorder=23) m.drawcoastlines(color='#a6a6a6',linewidth=0.5,zorder=25) m.fillcontinents(color='#e6e6e6',zorder=23) m.drawmapboundary() def setcolor(x, color): for m in x:", "'green': ((0.0, 0.85, 0.85), (1.0, 0.0, 0.0)), 'blue': ((0.0, 0.3, 0.3), (1.0, 0.0,", "# (1.0, 0.0, 0.0))} # my_cmap_yellow2red = LinearSegmentedColormap('my_colormap',cdict,256) cdict = {'red': ((0.0, 1.0,", "# f.close() # picklename = str(path_to_map / (info.grid.basemap + '.p')) # pickle.dump(basemap, open(picklename,", "minlon = d['lon'].values.min() maxlon = d['lon'].values.max() else: minlat = d.attrs['minlat'] maxlat = d.attrs['maxlat']", "info.grid.minlat == None or info.grid.maxlat == None or info.grid.minlon == None or info.grid.maxlon", "not os.path.isfile(bathymetry_file): isub = 1 base_url='http://coastwatch.pfeg.noaa.gov/erddap/griddap/usgsCeSrtm30v6.nc?' query='topo[(%f):%d:(%f)][(%f):%d:(%f)]' % (maxlat,isub,minlat,minlon,isub,maxlon) url = base_url+query #", "Figures out where is the .basemap and .grid files Arguments: info (info): ``info``", "just made: ' + picklename) # ## pngDir = 'C:\\\\Users\\\\IbarraD\\\\Documents\\\\VMS\\\\png\\\\' ## plt.savefig(datadir[0:-5] +", "to map. If ``None`` it looks for ``merged_grid.nc`` in the `\\merged` directory cmap", "'-' + str(upLim) + '.png') # plt.savefig('test.png') return m def load_my_cmap(name): ''' Creates", "' m)\\n' + 'EPGS code: ' + md['epsg_code'] + '\\n' + 'Interpolation: '", "display Hmasked = np.log10(Hmasked) if vmin != None: vmin = np.log10(vmin) if vmax", "' + str(md['mask_below']) + ' vessels per grid' ) text3 = ('Creation date:", "of \"pings\" (i.e. not gridded density) of only one ship Arguments: info (info):", "colorbar labels for easier interpreting label_values = cbar._tick_data_values # print(\"values\") # print(label_values) log_label_values", "cmap == 'red2black': cmapcolor = load_my_cmap('my_cmap_red2black') else: cmapcolor =plt.get_cmap(cmap) cs = m.pcolor(xx,yy,Hmasked, cmap=cmapcolor,", "' + str(md['interp_threshold']) + ' knots\\n' + 'Time bin: ' + str(round(md['time_bin']*1440,1)) +", "0.7, 0.7)), # 'green': ((0.0, 0.25, 0.25), # (1.0, 0.85, 0.85)), # 'blue':", "None: file_in = os.path.join(str(info.dirs.merged_grid),'merged_grid.nc') print(file_in) d = xr.open_dataset(file_in) # Define boundaries if info.grid.minlat", "print('map_density ------------------------------------------------------') # Load data if file_in == None: file_in = os.path.join(str(info.dirs.merged_grid),'merged_grid.nc') print(file_in)", "+ ' knots\\n' + 'Time bin: ' + str(round(md['time_bin']*1440,1)) + ' minutes\\n' +", "(bool): If ``True``, includes side panel with metadata save (bool): If ``True`` a", "use sidebar (bool): If ``True``, includes side panel with metadata to_screen (bool): If", "better display Hmasked = np.log10(Hmasked) if vmin != None: vmin = np.log10(vmin) if", "Management Branch\\n' + 'Fisheries and Oceans Canada – Maritimes Region\\n' + 'Bedford Institute", "pickle.dump(basemap, open(picklename, 'wb'), -1) # print('!!! Pickle just made: ' + picklename) path_to_map", "(str): Gridded or merged file to map. If ``None`` it looks for ``merged_grid.nc``", "% (maxlat,isub,minlat,minlon,isub,maxlon) url = base_url+query # store data in NetCDF file urllib.request.urlretrieve(url, bathymetry_file)", "'\\n' + 'Interpolation: ' + md['interpolation'] + '\\n' + 'Interpolation threshold: ' +", "= info.dirs.project_path / 'ancillary' print('-----------------------------------------------------') print('-----------------------------------------------------') # basemap_file = str(path_to_basemap / 'basemap_spots.p') m", "= base_url+query # store data in NetCDF file urllib.request.urlretrieve(url, bathymetry_file) # open NetCDF", "horizontalalignment='left', verticalalignment='top', size=8, color= '#808080', transform=plt.gca().transAxes) cbaxes2 = fig.add_axes([0.019, 0.9, 0.15, 0.02],zorder=60) cbar", "+ 'Fisheries and Oceans Canada – Maritimes Region\\n' + 'Bedford Institute of Oceanography\\n'", "= spatial[0] maxlat = spatial[1] minlon = spatial[2] maxlon = spatial[3] # Create", "easier interpreting label_values = cbar._tick_data_values # print(\"values\") # print(label_values) log_label_values = np.round(10 **", "+ md['data_source'] + '\\n\\n' + 'Data source description:\\n' + md['data_description'] + '\\n\\n' +", "return def map_dots_one_ship(info, file_in, Ship_No, save=True): ''' Creates a map of \"pings\" (i.e.", "m(d['longitude'].values,d['latitude'].values) cs = m.scatter(x,y,2,marker='o',color='r', zorder=30) # fig = plt.figure() # plt.plot(filtered_data['longitude'].values,filtered_data['latitude'].values,'.') # plt.show()", "lat = ncv['latitude'][:] lons, lats = np.meshgrid(lon,lat) topo = ncv['topo'][:,:] # fig =", "+ sm.get_filename_from_fullpath(file_in) + '.png' else: filename = filename_out sm.checkDir(filedir) plt.savefig(os.path.join(filedir,filename), dpi=300) # Close", "== 'generic': path_to_map = os.path.abspath(os.path.join(info.dirs.project_path,'ancillary')) else: path_to_map = path_to_basemap return path_to_map def make_basemap(info,spatial,path_to_basemap='auto',", "(1.0, 0.0, 0.0))} my_cmap = LinearSegmentedColormap('my_colormap',cdict,256) elif name == 'my_cmap_red2black': # c1 =", "= d['ship_density'].values # Rotate and flip H... ---------------------------------------------------------------------------- H = np.rot90(H) H =", "m = Basemap(projection='mill', llcrnrlat=minlat,urcrnrlat=maxlat, llcrnrlon=minlon, urcrnrlon=maxlon,resolution=info.maps.resolution) # TOPO # Read data from: http://coastwatch.pfeg.noaa.gov/erddap/griddap/usgsCeSrtm30v6.html", "H... ---------------------------------------------------------------------------- H = np.rot90(H) H = np.flipud(H) # Mask zeros d.attrs['mask_below'] =", "0.3, 0.3), (1.0, 0.0, 0.0))} my_cmap = LinearSegmentedColormap('my_colormap',cdict,256) elif name == 'my_cmap_red2black': #", "print(log_label_values) labels = [] for log_label_value in log_label_values: labels.append(str(int(log_label_value))) cbar.ax.set_xticklabels(labels) cbar.ax.set_xlabel(d.attrs['units'], size=9, color='#808080')", "''' import datetime alat = (md['maxlat'] - md['minlat'])/2 text1 = 'VESSEL DENSITY HEATMAP'", "# ## pngDir = 'C:\\\\Users\\\\IbarraD\\\\Documents\\\\VMS\\\\png\\\\' ## plt.savefig(datadir[0:-5] + 'png\\\\' + filename + '-", "= ncv['topo'][:,:] # fig = plt.figure(figsize=(19,9)) # ax = fig.add_axes([0.05,0.05,0.80,1]) # ax =", "cbar.ax.set_xlabel(d.attrs['units'], size=9, color='#808080') # TODO: maybe delete this? # mng = plt.get_current_fig_manager() #", "custom colormap ''' # cdict = {'red': ((0.0, 0.0, 0.0), # (1.0, 0.7,", "c1[1], c1[1]), (1.0, c2[1], c2[1])), 'blue': ((0.0, c1[2], c1[2]), (1.0, c2[2], c2[2]))} my_cmap", "elif info.grid.type == 'generic': path_to_map = os.path.abspath(os.path.join(info.dirs.project_path,'ancillary')) else: path_to_map = path_to_basemap return path_to_map", "unis = pd.unique(filtered_data[ship_id].values) ship = unis[Ship_No] indxship = (filtered_data[ship_id] == ship) singleship =", "(info.grid.basemap + '.basemap')) basemap_picklename = os.path.join(path_to_map,info.grid.basemap + '.basemap') pickle.dump(m, open(basemap_picklename, 'wb'), -1) #", "log_label_values: labels.append(str(int(log_label_value))) cbar.ax.set_yticklabels(labels) cbar.ax.set_xlabel(d.attrs['units']) if sidebar: text1, text2, text3, text4 = make_legend_text(info,d.attrs) ax2", "of \"pings\" rather than gridded density Arguments: info (info): ``info`` object containing metadata", "load_my_cmap(name): ''' Creates and loads custom colormap ''' # cdict = {'red': ((0.0,", "range: \\n' + md['startdate'][0:-3] + ' to ' + md['enddate'][0:-3] + '\\n\\n' +", "labelstyle='simple',zorder=60) if not sidebar: cbaxes2 = fig.add_axes([0.70, 0.18, 0.2, 0.03],zorder=60) cbar = plt.colorbar(extend='both',", "= fig.add_axes([0.23,0.035,0.85,0.9]) if sidebar: ax = plt.subplot2grid((1,24),(0,5),colspan=19) else: ax = fig.add_axes([0.05,0.05,0.94,0.94]) TOPOmasked =", "Define boundaries if info.grid.minlat == None or info.grid.maxlat == None or info.grid.minlon ==", "lats_grid) H = d['ship_density'].values # Rotate and flip H... ---------------------------------------------------------------------------- H = np.rot90(H)", "= d['lat'].values.max() minlon = d['lon'].values.min() maxlon = d['lon'].values.max() else: minlat = info.grid.minlat maxlat", "20 knots vmin = cellsize / max_speed elif info.maps.cbarmin != None: vmin =", "off tick labels ax2.get_xaxis().set_visible(False) ax2.get_yaxis().set_visible(False) ax2.add_patch(FancyBboxPatch((0,0), width=1, height=1, clip_on=False, boxstyle=\"square,pad=0\", zorder=3, facecolor='#e6e6e6', alpha=1.0,", "the ship to plot save (bool): If ``True`` a ``.png`` figure is saved", "matplotlib.pyplot as plt from matplotlib.patches import FancyBboxPatch from matplotlib.colors import LinearSegmentedColormap from mpl_toolkits.basemap", "md['minlat'])/2 text1 = 'VESSEL DENSITY HEATMAP' # print(info) # -------------------------------------------------------- text2 = ('Unit", "by:\\n' + 'Oceans and Coastal Management Division\\n' + 'Ecosystem Management Branch\\n' + 'Fisheries", "info.maps.cbarmax == 'auto': # vmax = (np.median(Hmasked)) + (4*Hmasked.std()) vmax = (np.max(Hmasked)) -", "info.grid) to a pickle file Arguments: m (mpl_toolkits.basemap.Basemap): Basemap object info (info): ``info``", "'my_cmap_red2black': # c1 = np.array([252,142,110])/256 #RGB/256 c1 = np.array([250,59,59])/256 #RGB/256 c2 = np.array([103,0,13])/256", "data in NetCDF file urllib.request.urlretrieve(url, bathymetry_file) # open NetCDF data in nc =", ":param info info: ``info`` object containing metadata :return: text for legend ''' import", "import LinearSegmentedColormap from mpl_toolkits.basemap import Basemap import numpy as np # Suppress matplotlib", "knots vmin = cellsize / max_speed elif info.maps.cbarmin != None: vmin = info.maps.cbarmin", "+ '.png' # plt.savefig(os.path.join(filedir,filename), dpi=300) return def map_dots_one_ship(info, file_in, Ship_No, save=True): ''' Creates", "= str(path_to_map / (info.grid.basemap + '.grid')) info_picklename = os.path.join(path_to_map, info.grid.basemap + '.grid') pickle.dump(info,", "+ '.png'`` filedir_out (str): Directory where figure is saved. If ``auto`` then output", "= define_path_to_map(info, path_to_basemap=path_to_basemap) # basemap_picklename = str(path_to_map / (info.grid.basemap + '.basemap')) basemap_picklename =", "np.array([250,59,59])/256 #RGB/256 c2 = np.array([103,0,13])/256 #RGB/256 cdict = {'red': ((0.0, c1[0], c1[0]), (1.0,", "''' import pandas as pd print('Mapping...') # ----------------------------------------------------------------------------- d = xr.open_dataset(file_in) # Define", "+ info.sidebar.included_speeds + '\\n' + 'Included vessels: ' + info.sidebar.included_vessel_types + '\\n\\n' +", "{'red': ((0.0, c1[0], c1[0]), (1.0, c2[0], c2[0])), 'green': ((0.0, c1[1], c1[1]), (1.0, c2[1],", "= d['lon'].values.min() maxlon = d['lon'].values.max() else: minlat = info.grid.minlat maxlat = info.grid.maxlat minlon", "# else: # print('Found basemap...') # m = pickle.load(open(basemap_file,'rb')) indx = ((d['longitude']> minlon)", "sidebar: cbaxes2 = fig.add_axes([0.70, 0.18, 0.2, 0.03],zorder=60) cbar = plt.colorbar(extend='both', cax = cbaxes2,", "= sm.make_basemap(info.dirs.project_path,[minlat,maxlat,minlon,maxlon]) # if not os.path.exists(str(path_to_basemap / 'basemap.p')): # m = sm.make_basemap(info.dirs.project_path,[minlat,maxlat,minlon,maxlon]) #", "path_to_map def make_basemap(info,spatial,path_to_basemap='auto', sidebar=False): ''' Makes a basemap Arguments: info (info): ``info`` object", "# print('!!! Pickle just made: ' + picklename) # ## pngDir = 'C:\\\\Users\\\\IbarraD\\\\Documents\\\\VMS\\\\png\\\\'", "# plt.savefig(os.path.join(filedir,filename), dpi=300) return def map_dots_one_ship(info, file_in, Ship_No, save=True): ''' Creates a map", "print(\"values\") # print(label_values) log_label_values = np.round(10 ** label_values,decimals=0) # print(log_label_values) labels = []", "setcolor(mers,'#00a3cc') ax = plt.gca() # ax.axhline(linewidth=4, color=\"#00a3cc\") # ax.axvline(linewidth=4, color=\"#00a3cc\") # ax.spines['top'].set_color('#00a3cc') ax.spines['right'].set_color('#00a3cc')", "# ax = fig.add_axes([0.05,0.05,0.80,1]) # ax = fig.add_axes([0,0,0.80,1]) # ax = fig.add_axes([0.23,0.035,0.85,0.9]) if", "= spatial[3] # Create map m = Basemap(projection='mill', llcrnrlat=minlat,urcrnrlat=maxlat, llcrnrlon=minlon, urcrnrlon=maxlon,resolution=info.maps.resolution) # TOPO", "if path_to_basemap == 'auto': if info.grid.type == 'one-off': path_to_map = os.path.join(info.dirs.project_path,info.grid.region,'ancillary') elif info.grid.type", "mapping lons_grid, lats_grid = np.meshgrid(d['lon'].values,d['lat'].values) xx,yy = m(lons_grid, lats_grid) H = d['ship_density'].values #", "0.02],zorder=60) cbar = plt.colorbar(extend='both', cax = cbaxes2, orientation='horizontal') cbar.ax.tick_params(labelsize=8, labelcolor='#808080') # Change colorbar", "!= None: vmin = info.maps.cbarmin else: vmin = None # Log H for", "= plt.subplot2grid((1,24),(0,0),colspan=4) # Turn off tick labels ax2.get_xaxis().set_visible(False) ax2.get_yaxis().set_visible(False) ax2.add_patch(FancyBboxPatch((0,0), width=1, height=1, clip_on=False,", "# # fig.tight_layout() plt.show() # Save map as png if save: if filedir_out", "matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt from matplotlib.patches import FancyBboxPatch from matplotlib.colors import", "matplotlib.use('Agg') import matplotlib.pyplot as plt from matplotlib.patches import FancyBboxPatch from matplotlib.colors import LinearSegmentedColormap", "fig = plt.gcf() ax = plt.gca() if cmap == 'Default': cmapcolor = load_my_cmap('my_cmap_amber2red')", "c2[2]))} my_cmap = LinearSegmentedColormap('my_colormap',cdict,256) else: print('cmap name does not match any of the", "= os.path.join(path_to_map, 'usgsCeSrtm30v6.nc') if not os.path.isfile(bathymetry_file): isub = 1 base_url='http://coastwatch.pfeg.noaa.gov/erddap/griddap/usgsCeSrtm30v6.nc?' query='topo[(%f):%d:(%f)][(%f):%d:(%f)]' % (maxlat,isub,minlat,minlon,isub,maxlon)", "= (np.max(Hmasked)) - (2*Hmasked.std()) elif info.maps.cbarmax != None: vmax = info.maps.cbarmax else: vmax", "filedir = str(info.dirs.pngs) # sm.checkDir(filedir) # filename = info.project_name + '_' + str(info.grid.bin_number)", "info.dirs.basemap print('Basemap file: ' + basemap_file) # Check for basemap.p and, if doesn;t", "info.maps.scalebar_km, barstyle='fancy', units='km', fontsize=8, fontcolor='#808080', fillcolor1 = '#cccccc', fillcolor2 = '#a6a6a6', yoffset =", "label_values = cbar._tick_data_values log_label_values = np.round(10 ** label_values,decimals=0) labels = [] for log_label_value", "#RGB/256 cdict = {'red': ((0.0, c1[0], c1[0]), (1.0, c2[0], c2[0])), 'green': ((0.0, c1[1],", "lons_grid, lats_grid = np.meshgrid(d['lon'].values,d['lat'].values) xx,yy = m(lons_grid, lats_grid) H = d['ship_density'].values # Rotate", "...roughly 20 knots vmin = cellsize / max_speed elif info.maps.cbarmin != None: vmin", "m(d['longitude'].values,d['latitude'].values) cs = m.scatter(x,y,s=0.1,marker='o',color='r', zorder=10) # plt.show() # # Save map as png", "for mapping lons_grid, lats_grid = np.meshgrid(d['lon'].values,d['lat'].values) xx,yy = m(lons_grid, lats_grid) H = d['ship_density'].values", "info.dirs.project_path / 'ancillary' print('-----------------------------------------------------') print('-----------------------------------------------------') if sidebar: basemap_file = str(path_to_basemap / 'basemap_sidebar.p') else:", "color=\"#00a3cc\") # ax.axvline(linewidth=4, color=\"#00a3cc\") # ax.spines['top'].set_color('#00a3cc') ax.spines['right'].set_color('#00a3cc') ax.spines['bottom'].set_color('#00a3cc') ax.spines['left'].set_color('#00a3cc') for k, spine in", "in log_label_values: labels.append(str(int(log_label_value))) cbar.ax.set_yticklabels(labels) cbar.ax.set_xlabel(d.attrs['units']) if sidebar: text1, text2, text3, text4 = make_legend_text(info,d.attrs)", "np.flipud(H) # Mask zeros d.attrs['mask_below'] = info.maps.mask_below Hmasked = np.ma.masked_where(H<=d.attrs['mask_below'],H) # Set vman", "= spatial[1] minlon = spatial[2] maxlon = spatial[3] # Create map m =", "for m in x: for t in x[m][1]: t.set_color(color) parallels = np.arange(minlat,maxlat,info.maps.parallels) #", "- (2*Hmasked.std()) elif info.maps.cbarmax != None: vmax = info.maps.cbarmax else: vmax = None", "ax2.add_patch(FancyBboxPatch((0,0), width=1, height=1, clip_on=False, boxstyle=\"square,pad=0\", zorder=3, facecolor='#e6e6e6', alpha=1.0, edgecolor='#a6a6a6', transform=plt.gca().transAxes)) plt.text(0.15, 0.99, text1,", "# mng.frame.Maximize(True) # # fig.tight_layout() plt.show() # Save map as png if save:", "ax.axhline(linewidth=4, color=\"#00a3cc\") # ax.axvline(linewidth=4, color=\"#00a3cc\") # ax.spines['top'].set_color('#00a3cc') ax.spines['right'].set_color('#00a3cc') ax.spines['bottom'].set_color('#00a3cc') ax.spines['left'].set_color('#00a3cc') for k, spine", "np.warnings.filterwarnings('ignore') import xarray as xr import cmocean from pathlib import Path import _pickle", "= d.sel(Dindex=indx) ship_id = info.ship_id unis = pd.unique(filtered_data[ship_id].values) ship = unis[Ship_No] indxship =", "the basemap Returns: A ``.basemap`` and a ``.grid`` files ''' print('Making basemap...') #", "= cbaxes2, orientation='horizontal') # Change colorbar labels for easier interpreting label_values = cbar._tick_data_values", "text3, text4 = make_legend_text(info,d.attrs) ax2 = plt.subplot2grid((1,24),(0,0),colspan=4) # Turn off tick labels ax2.get_xaxis().set_visible(False)", "= plt.gcf() ax = plt.gca() if cmap == 'Default': cmapcolor = load_my_cmap('my_cmap_amber2red') elif", "== 'auto': filename = info.run_name + '__' + sm.get_filename_from_fullpath(file_in) + '.png' else: filename", "containing metadata Keyword Arguments: file_in (str): Gridded or merged file to map. If", "# print('!!! Pickle just made: ' + picklename) path_to_map = define_path_to_map(info, path_to_basemap=path_to_basemap) #", "else: # print('Found basemap...') # m = pickle.load(open(basemap_file,'rb')) indx = ((d['longitude']> minlon) &", "c1[2]), (1.0, c2[2], c2[2]))} my_cmap = LinearSegmentedColormap('my_colormap',cdict,256) else: print('cmap name does not match", "import xarray as xr import cmocean from pathlib import Path import _pickle as", "= str(info.dirs.pngs) # sm.checkDir(filedir) # filename = info.project_name + '_' + str(info.grid.bin_number) +", "labels ax2.get_xaxis().set_visible(False) ax2.get_yaxis().set_visible(False) ax2.add_patch(FancyBboxPatch((0,0), width=1, height=1, clip_on=False, boxstyle=\"square,pad=0\", zorder=3, facecolor='#e6e6e6', alpha=1.0, edgecolor='#a6a6a6', transform=plt.gca().transAxes))", "+ '_' + str(info.grid.bin_number) + '.png' # plt.savefig(os.path.join(filedir,filename), dpi=300) return def define_path_to_map(info, path_to_basemap='auto'):", "left block of map :param info info: ``info`` object containing metadata :return: text", "0.5)), # 'green': ((0.0, 1.0, 1.0), # (1.0, 0.0, 0.0)), # 'blue': ((0.0,", "transform=plt.gca().transAxes) plt.text(0.02, 0.145, text3, horizontalalignment='left', verticalalignment='top', size=7, color= '#808080', transform=plt.gca().transAxes) plt.text(0.02, 0.25, text4,", "return path_to_map def make_basemap(info,spatial,path_to_basemap='auto', sidebar=False): ''' Makes a basemap Arguments: info (info): ``info``", "``'auto'`` it looks in ``grids`` directory Returns: Pickle file See also: :mod:`pickle` '''", "of map :param info info: ``info`` object containing metadata :return: text for legend", "Suppress matplotlib warnings np.warnings.filterwarnings('ignore') import xarray as xr import cmocean from pathlib import", "color): for m in x: for t in x[m][1]: t.set_color(color) parallels = np.arange(minlat,maxlat,info.maps.parallels)", "os.path.join(path_to_map, 'usgsCeSrtm30v6.nc') if not os.path.isfile(bathymetry_file): isub = 1 base_url='http://coastwatch.pfeg.noaa.gov/erddap/griddap/usgsCeSrtm30v6.nc?' query='topo[(%f):%d:(%f)][(%f):%d:(%f)]' % (maxlat,isub,minlat,minlon,isub,maxlon) url", "object containing metadata ''' if path_to_basemap == 'auto': if info.grid.type == 'one-off': path_to_map", "is setup by :func:`~ship_mapper.mapper.define_path_to_map` sidebar (bool): If ``True`` space for a side panel", "fillcolor1 = '#cccccc', fillcolor2 = '#a6a6a6', yoffset = (0.01*(m.ymax-m.ymin)), labelstyle='simple',zorder=60) if not sidebar:", "then name is ``info.run_name + '__' + file_in + '.png'`` filedir_out (str): Directory", "Colormap to use sidebar (bool): If ``True``, includes side panel with metadata to_screen", "m = pickle.load(open(basemap_file,'rb')) # Create grid for mapping lons_grid, lats_grid = np.meshgrid(d['lon'].values,d['lat'].values) xx,yy", "'Time bin: ' + str(round(md['time_bin']*1440,1)) + ' minutes\\n' + 'Mask below: ' +", "'---------------------------------------------------------------\\n' ) return text1, text2, text3, text4 def map_dots(info, file_in, sidebar=False, save=True): '''", "basemap...') m = pickle.load(open(basemap_file,'rb')) x, y = m(d['longitude'].values,d['latitude'].values) cs = m.scatter(x,y,s=0.1,marker='o',color='r', zorder=10) #", "((0.0, 1.0, 1.0), (1.0, 0.5, 0.5)), 'green': ((0.0, 0.85, 0.85), (1.0, 0.0, 0.0)),", "if info.grid.type == 'one-off': path_to_map = os.path.join(info.dirs.project_path,info.grid.region,'ancillary') elif info.grid.type == 'generic': path_to_map =", "``merged_grid.nc`` in the `\\merged` directory Ship_No (str): Unique identifier of the ship to", "merged) file Arguments: info (info): ``info`` object containing metadata Keyword Arguments: file_in (str):", "+ 'Included speeds: ' + info.sidebar.included_speeds + '\\n' + 'Included vessels: ' +", "0.85, 0.85)), # 'blue': ((0.0, 0.5, 0.5), # (1.0, 1.0, 1.0))} # my_cmap", "' + str(np.nanmean(Hmasked))) print('Std: ' + str(Hmasked.std())) if info.maps.cbarmax == 'auto': # vmax", "object containing metadata Keyword Arguments: file_in (str): Gridded or merged file to map.", "if to_screen == False: plt.close() return def make_legend_text(info,md): ''' Makes text for legend", "sidebar=False, save=True): ''' Creates a map of \"pings\" rather than gridded density Arguments:", "ax.spines['bottom'].set_color('#00a3cc') ax.spines['left'].set_color('#00a3cc') for k, spine in ax.spines.items(): #ax.spines is a dictionary spine.set_zorder(35) #", "x, y = m(d['longitude'].values,d['latitude'].values) cs = m.scatter(x,y,2,marker='o',color='r', zorder=30) # fig = plt.figure() #", "= np.array([250,59,59])/256 #RGB/256 c2 = np.array([103,0,13])/256 #RGB/256 cdict = {'red': ((0.0, c1[0], c1[0]),", "else: basemap_file = str(path_to_basemap / 'basemap.p') if not os.path.exists(basemap_file): m = sm.make_basemap(info,[minlat,maxlat,minlon,maxlon]) else:", "(np.median(Hmasked)) + (4*Hmasked.std()) vmax = (np.max(Hmasked)) - (2*Hmasked.std()) elif info.maps.cbarmax != None: vmax", "None # Log H for better display Hmasked = np.log10(Hmasked) if vmin !=", "knots max_speed = 316.66 # m/min ...roughly 20 knots vmin = cellsize /", "(str): Directory where to save the produced basemap. If ``'auto'`` then path is", "((0.0, 0.85, 0.85), (1.0, 0.0, 0.0)), 'blue': ((0.0, 0.3, 0.3), (1.0, 0.0, 0.0))}", "fig.tight_layout(rect=[0.01,0.01,.99,.99]) plt.show() if sidebar: basemap_name = 'basemap_sidebar.p' else: basemap_name = 'basemap.p' info =", "to screen save (bool): If ``True`` a ``.png`` figure is saved to hardrive", "data if file_in == None: file_in = os.path.join(str(info.dirs.merged_grid),'merged_grid.nc') print(file_in) d = xr.open_dataset(file_in) #", "= make_legend_text(info,d.attrs) ax2 = plt.subplot2grid((1,24),(0,0),colspan=4) # Turn off tick labels ax2.get_xaxis().set_visible(False) ax2.get_yaxis().set_visible(False) ax2.add_patch(FancyBboxPatch((0,0),", "If ``True``, includes side panel with metadata save (bool): If ``True`` a ``.png``", "' + md['enddate'][0:-3] + '\\n\\n' + 'Included speeds: ' + info.sidebar.included_speeds + '\\n'", "cbar.ax.set_yticklabels(labels) cbar.ax.set_xlabel(d.attrs['units']) if sidebar: text1, text2, text3, text4 = make_legend_text(info,d.attrs) ax2 = plt.subplot2grid((1,24),(0,0),colspan=4)", "0.18, 0.2, 0.03],zorder=60) cbar = plt.colorbar(extend='both', cax = cbaxes2, orientation='horizontal') # Change colorbar", "= sm.make_basemap(info.dirs.project_path,[minlat,maxlat,minlon,maxlon]) # else: # print('Found basemap...') # m = pickle.load(open(basemap_file,'rb')) indx =", "matplotlib.patches import FancyBboxPatch from matplotlib.colors import LinearSegmentedColormap from mpl_toolkits.basemap import Basemap import numpy", "/ (info.grid.basemap + '.p')),'w') # pickle.dump(grid, f) # pickle.dump(m, f) # f.close() #", "+ 'Interpolation: ' + md['interpolation'] + '\\n' + 'Interpolation threshold: ' + str(md['interp_threshold'])", "info.maps.cbarmax != None: vmax = info.maps.cbarmax else: vmax = None if info.maps.cbarmin ==", "store data in NetCDF file urllib.request.urlretrieve(url, bathymetry_file) # open NetCDF data in nc", "(2*Hmasked.std()) elif info.maps.cbarmax != None: vmax = info.maps.cbarmax else: vmax = None if", "make_legend_text(info,d.attrs) ax2 = plt.subplot2grid((1,24),(0,0),colspan=4) # Turn off tick labels ax2.get_xaxis().set_visible(False) ax2.get_yaxis().set_visible(False) ax2.add_patch(FancyBboxPatch((0,0), width=1,", "Load data if file_in == None: file_in = os.path.join(str(info.dirs.merged_grid),'merged_grid.nc') print(file_in) d = xr.open_dataset(file_in)", "= info.run_name + '__' + sm.get_filename_from_fullpath(file_in) + '.png' else: filename = filename_out sm.checkDir(filedir)", "size=8, color= '#808080', transform=plt.gca().transAxes) cbaxes2 = fig.add_axes([0.019, 0.9, 0.15, 0.02],zorder=60) cbar = plt.colorbar(extend='both',", "Turn off tick labels ax2.get_xaxis().set_visible(False) ax2.get_yaxis().set_visible(False) ax2.add_patch(FancyBboxPatch((0,0), width=1, height=1, clip_on=False, boxstyle=\"square,pad=0\", zorder=3, facecolor='#e6e6e6',", "linewidth=0.2, zorder=25) setcolor(mers,'#00a3cc') ax = plt.gca() # ax.axhline(linewidth=4, color=\"#00a3cc\") # ax.axvline(linewidth=4, color=\"#00a3cc\") #", "= Basemap(projection='mill', llcrnrlat=minlat,urcrnrlat=maxlat, llcrnrlon=minlon, urcrnrlon=maxlon,resolution=info.maps.resolution) # TOPO # Read data from: http://coastwatch.pfeg.noaa.gov/erddap/griddap/usgsCeSrtm30v6.html #", "k, spine in ax.spines.items(): #ax.spines is a dictionary spine.set_zorder(35) # ax.spines['top'].set_visible(False) # ax.spines['right'].set_visible(False)", "= str(path_to_map / 'usgsCeSrtm30v6.nc') bathymetry_file = os.path.join(path_to_map, 'usgsCeSrtm30v6.nc') if not os.path.isfile(bathymetry_file): isub =", "(0.01*(m.ymax-m.ymin)), labelstyle='simple',zorder=60) if not sidebar: cbaxes2 = fig.add_axes([0.70, 0.18, 0.2, 0.03],zorder=60) cbar =", "(1.0, 1.0, 1.0))} # my_cmap = LinearSegmentedColormap('my_colormap',cdict,256) if name == 'my_cmap_lightblue': cdict =", "``None`` it looks for ``merged_grid.nc`` in the `\\merged` directory cmap (str): Colormap to", "else: basemap_name = 'basemap.p' info = sm.calculate_gridcell_areas(info) # Save basemap save_basemap(m,info,path_to_basemap=path_to_map) # picklename", "== 'my_cmap_red2black': # c1 = np.array([252,142,110])/256 #RGB/256 c1 = np.array([250,59,59])/256 #RGB/256 c2 =", "+str(downLim) + '-' + str(upLim) + '.png') # plt.savefig('test.png') return m def load_my_cmap(name):", "/ (info.grid.basemap + '.grid')) info_picklename = os.path.join(path_to_map, info.grid.basemap + '.grid') pickle.dump(info, open(info_picklename, 'wb'),", "'basemap.p')): # m = sm.make_basemap(info.dirs.project_path,[minlat,maxlat,minlon,maxlon]) # else: # print('Found basemap...') # m =", "d['lat'].values.max() minlon = d['lon'].values.min() maxlon = d['lon'].values.max() else: minlat = info.grid.minlat maxlat =", "'auto': if info.grid.type == 'one-off': path_to_map = os.path.join(info.dirs.project_path,info.grid.region,'ancillary') elif info.grid.type == 'generic': path_to_map", "print('Making basemap...') # ----------------------------------------------------------------------------- path_to_map = define_path_to_map(info, path_to_basemap=path_to_basemap) sm.checkDir(str(path_to_map)) minlat = spatial[0] maxlat", "If ``True``, a plot is printed to screen save (bool): If ``True`` a", "is printed to screen save (bool): If ``True`` a ``.png`` figure is saved", "/ 'ancillary' print('-----------------------------------------------------') print('-----------------------------------------------------') # basemap_file = str(path_to_basemap / 'basemap_spots.p') m = sm.make_basemap(info.dirs.project_path,[minlat,maxlat,minlon,maxlon])", "cbar._tick_data_values log_label_values = np.round(10 ** label_values,decimals=0) labels = [] for log_label_value in log_label_values:", "print('!!! Pickle just made: ' + picklename) # ## pngDir = 'C:\\\\Users\\\\IbarraD\\\\Documents\\\\VMS\\\\png\\\\' ##", "Basemap import numpy as np # Suppress matplotlib warnings np.warnings.filterwarnings('ignore') import xarray as", "Pickle just made: ' + picklename) path_to_map = define_path_to_map(info, path_to_basemap=path_to_basemap) # basemap_picklename =", "plt.text(0.02, 0.83, text2, horizontalalignment='left', verticalalignment='top', size=9, color= '#808080', transform=plt.gca().transAxes) plt.text(0.02, 0.145, text3, horizontalalignment='left',", "print(file_in) d = xr.open_dataset(file_in) # Define boundaries if info.grid.minlat == None or info.grid.maxlat", "vman and vmin print('Min: ' + str(np.min(Hmasked))) print('Max: ' + str(np.max(Hmasked))) print('Mean: '", "'\\n\\n' + 'Data source: ' + md['data_source'] + '\\n\\n' + 'Data source description:\\n'", "'We cannot ​guarantee the validity, accuracy, \\n' + 'or quality of this product.", "0.9, 0.9), (1.0, 1.0,1.0)), 'blue': ((0.0, 0.9, 0.9), (1.0, 1.0, 1.0))} my_cmap =", "+ str(round(md['time_bin']*1440,1)) + ' minutes\\n' + 'Mask below: ' + str(md['mask_below']) + '", "# print('Found basemap...') # m = pickle.load(open(basemap_file,'rb')) indx = ((d['longitude']> minlon) & (d['longitude']<=", "metadata ''' if path_to_basemap == 'auto': if info.grid.type == 'one-off': path_to_map = os.path.join(info.dirs.project_path,info.grid.region,'ancillary')", "print(info) # -------------------------------------------------------- text2 = ('Unit description: ' + md['unit_description'] + '\\n\\n' +", "color= '#737373', transform=plt.gca().transAxes) plt.text(0.02, 0.83, text2, horizontalalignment='left', verticalalignment='top', size=9, color= '#808080', transform=plt.gca().transAxes) plt.text(0.02,", "_pickle as pickle import os import ship_mapper as sm import urllib.request import netCDF4", "m.drawparallels(parallels,labels=[True,False,False,False],dashes=[20,20],color='#00a3cc', linewidth=0.2, zorder=25) setcolor(par,'#00a3cc') meridians = np.arange(minlon,maxlon,info.maps.meridians) mers = m.drawmeridians(meridians,labels=[False,False,False,True],dashes=[20,20],color='#00a3cc', linewidth=0.2, zorder=25) setcolor(mers,'#00a3cc')", "md['data_description'] + '\\n\\n' + 'Time range: \\n' + md['startdate'][0:-3] + ' to '", "'\\n\\n' + 'Time range: \\n' + md['startdate'][0:-3] + ' to ' + md['enddate'][0:-3]", "if vmax != None: vmax = np.log10(vmax) # Make colormap fig = plt.gcf()", "print('Found basemap...') # m = pickle.load(open(basemap_file,'rb')) indx = ((d['longitude']> minlon) & (d['longitude']<= maxlon)", "+ 'Oceans and Coastal Management Division\\n' + 'Ecosystem Management Branch\\n' + 'Fisheries and", "# ax.spines['right'].set_visible(False) # ax.spines['bottom'].set_visible(False) # ax.spines['left'].set_visible(False) # fig.tight_layout(pad=0.25) fig.tight_layout(rect=[0.01,0.01,.99,.99]) plt.show() if sidebar: basemap_name", "0.03],zorder=60) cbar = plt.colorbar(extend='both', cax = cbaxes2, orientation='horizontal') # Change colorbar labels for", "0.9, 0.9), (1.0, 1.0, 1.0))} my_cmap = LinearSegmentedColormap('my_colormap',cdict,256) elif name == 'my_cmap_amber2red': #", "# picklename = str(path_to_map / (info.grid.basemap + '.p')) # pickle.dump(basemap, open(picklename, 'wb'), -1)", "d['lon'].values.max() else: minlat = info.grid.minlat maxlat = info.grid.maxlat minlon = info.grid.minlon maxlon =", "{'red': ((0.0, 0.0, 0.0), # Dark (1.0, 0.9, 0.9)), # Light 'green': ((0.0,", "verticalalignment='top', horizontalalignment='left', weight='bold', size=10, color= '#737373', transform=plt.gca().transAxes) plt.text(0.02, 0.83, text2, horizontalalignment='left', verticalalignment='top', size=9,", "only one ship Arguments: info (info): ``info`` object containing metadata Keyword Arguments: file_in", "# Suppress matplotlib warnings np.warnings.filterwarnings('ignore') import xarray as xr import cmocean from pathlib", "'blue': ((0.0, 0.9, 0.9), (1.0, 1.0, 1.0))} my_cmap = LinearSegmentedColormap('my_colormap',cdict,256) elif name ==", "= LinearSegmentedColormap('my_colormap',cdict,256) elif name == 'my_cmap_red2black': # c1 = np.array([252,142,110])/256 #RGB/256 c1 =", "= os.path.join(str(info.dirs.merged_grid),'merged_grid.nc') print(file_in) d = xr.open_dataset(file_in) # Define boundaries if info.grid.minlat == None", "define_path_to_map(info, path_to_basemap='auto'): ''' Figures out where is the .basemap and .grid files Arguments:", "= np.ma.masked_where(topo>0,topo) cs = m.pcolormesh(lons,lats,TOPOmasked,cmap=load_my_cmap('my_cmap_lightblue'),latlon=True,zorder=5) # m.drawcoastlines(color='#A27D0C',linewidth=0.5,zorder=25) # m.fillcontinents(color='#E1E1A0',zorder=23) m.drawcoastlines(color='#a6a6a6',linewidth=0.5,zorder=25) m.fillcontinents(color='#e6e6e6',zorder=23) m.drawmapboundary() def", "(str): Colormap to use sidebar (bool): If ``True``, includes side panel with metadata", "spine in ax.spines.items(): #ax.spines is a dictionary spine.set_zorder(35) # ax.spines['top'].set_visible(False) # ax.spines['right'].set_visible(False) #", "# (1.0, 0.5, 0.5)), # 'green': ((0.0, 1.0, 1.0), # (1.0, 0.0, 0.0)),", "maybe delete this? # mng = plt.get_current_fig_manager() # mng.frame.Maximize(True) # # fig.tight_layout() plt.show()", "'.png'`` filedir_out (str): Directory where figure is saved. If ``auto`` then output directory", "basemap_name = 'basemap.p' info = sm.calculate_gridcell_areas(info) # Save basemap save_basemap(m,info,path_to_basemap=path_to_map) # picklename =", "product.\\n' + 'We cannot ​guarantee the validity, accuracy, \\n' + 'or quality of", "if vmin != None: vmin = np.log10(vmin) if vmax != None: vmax =", "plt.colorbar(extend='both', cax = cbaxes2, orientation='horizontal') cbar.ax.tick_params(labelsize=8, labelcolor='#808080') # Change colorbar labels for easier", "''' Plots a map using a gridded (or merged) file Arguments: info (info):", "Branch\\n' + 'Fisheries and Oceans Canada – Maritimes Region\\n' + 'Bedford Institute of", "map_dots(info, file_in, sidebar=False, save=True): ''' Creates a map of \"pings\" rather than gridded", "# pickle.dump(m, f) # f.close() # picklename = str(path_to_map / (info.grid.basemap + '.p'))", "plt.gca() if cmap == 'Default': cmapcolor = load_my_cmap('my_cmap_amber2red') elif cmap == 'red2black': cmapcolor", "c1[2], c1[2]), (1.0, c2[2], c2[2]))} my_cmap = LinearSegmentedColormap('my_colormap',cdict,256) else: print('cmap name does not", "H = np.rot90(H) H = np.flipud(H) # Mask zeros d.attrs['mask_below'] = info.maps.mask_below Hmasked", "x[m][1]: t.set_color(color) parallels = np.arange(minlat,maxlat,info.maps.parallels) # labels = [left,right,top,bottom] par = m.drawparallels(parallels,labels=[True,False,False,False],dashes=[20,20],color='#00a3cc', linewidth=0.2,", "labels = [] for log_label_value in log_label_values: labels.append(str(int(log_label_value))) cbar.ax.set_yticklabels(labels) cbar.ax.set_xlabel(d.attrs['units']) if sidebar: text1,", "'auto': filedir = str(info.dirs.pngs) else: filedir = filedir_out if filename_out == 'auto': filename", "this will be deprecated soon Keyword arguments: path_to_basemap (str): Directory where to save", "of Oceanography\\n' + 'PO Box 1006, Dartmouth, NS, Canada, B2Y 4A2' ) text4", "+ 'Ecosystem Management Branch\\n' + 'Fisheries and Oceans Canada – Maritimes Region\\n' +", "= str(info.dirs.pngs) else: filedir = filedir_out if filename_out == 'auto': filename = info.run_name", "# my_cmap = LinearSegmentedColormap('my_colormap',cdict,256) if name == 'my_cmap_lightblue': cdict = {'red': ((0.0, 0.0,", "elif name == 'my_cmap_red2black': # c1 = np.array([252,142,110])/256 #RGB/256 c1 = np.array([250,59,59])/256 #RGB/256", "Read data from: http://coastwatch.pfeg.noaa.gov/erddap/griddap/usgsCeSrtm30v6.html # using the netCDF output option # bathymetry_file =", "plt.savefig(datadir[0:-5] + 'png\\\\' + filename + '- Grid' + str(BinNo) + ' -", "where figure is saved. If ``auto`` then output directory is ``info.dirs.pngs`` Returns: Basemap", "identifier of the ship to plot save (bool): If ``True`` a ``.png`` figure", "= path_to_basemap return path_to_map def make_basemap(info,spatial,path_to_basemap='auto', sidebar=False): ''' Makes a basemap Arguments: info", "## plt.savefig(datadir[0:-5] + 'png\\\\' + filename + '- Grid' + str(BinNo) + '", "+ 'Grid size: ' + str(md['bin_size']) + ' degrees (~' + str(int(round(sm.degrees_to_meters(md['bin_size'], alat))))+", "('Creation date: ' + datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') + '\\n' + 'Creation script: ' +", "map of \"pings\" (i.e. not gridded density) of only one ship Arguments: info", "object containing metadata spatial (list): List with corners... this will be deprecated soon", "file_in=None, cmap='Default', sidebar=False, to_screen=True, save=True, filename_out='auto',filedir_out='auto'): ''' Plots a map using a gridded", "open NetCDF data in nc = netCDF4.Dataset(bathymetry_file) ncv = nc.variables lon = ncv['longitude'][:]", "str(md['bin_size']) + ' degrees (~' + str(int(round(sm.degrees_to_meters(md['bin_size'], alat))))+ ' m)\\n' + 'EPGS code:", "((maxlon-minlon)/10) sblat = minlat + ((maxlat-minlat)/20) m.drawmapscale(sblon, sblat, minlon, minlat, info.maps.scalebar_km, barstyle='fancy', units='km',", "# Dark (1.0, 0.9, 0.9)), # Light 'green': ((0.0, 0.9, 0.9), (1.0, 1.0,1.0)),", "x, y = m(d['longitude'].values,d['latitude'].values) cs = m.scatter(x,y,s=0.1,marker='o',color='r', zorder=10) # plt.show() # # Save", "the `\\merged` directory sidebar (bool): If ``True``, includes side panel with metadata save", "= plt.gca() # ax.axhline(linewidth=4, color=\"#00a3cc\") # ax.axvline(linewidth=4, color=\"#00a3cc\") # ax.spines['top'].set_color('#00a3cc') ax.spines['right'].set_color('#00a3cc') ax.spines['bottom'].set_color('#00a3cc') ax.spines['left'].set_color('#00a3cc')", "if not os.path.exists(str(path_to_basemap / 'basemap.p')): # m = sm.make_basemap(info.dirs.project_path,[minlat,maxlat,minlon,maxlon]) # else: # print('Found", "sm.make_basemap(info,info.dirs.project_path,[minlat,maxlat,minlon,maxlon]) else: print('Found basemap...') m = pickle.load(open(basemap_file,'rb')) # Create grid for mapping lons_grid,", "plt.text(0.15, 0.99, text1, verticalalignment='top', horizontalalignment='left', weight='bold', size=10, color= '#737373', transform=plt.gca().transAxes) plt.text(0.02, 0.83, text2,", "is provided\\n' + 'on an \"AS IS\" basis. ​USE AT YOUR OWN RISK.\\n'", "fontcolor='#808080', fillcolor1 = '#cccccc', fillcolor2 = '#a6a6a6', yoffset = (0.01*(m.ymax-m.ymin)), labelstyle='simple',zorder=60) if not", "date: ' + datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') + '\\n' + 'Creation script: ' + info.run_name", "zorder=25) setcolor(mers,'#00a3cc') ax = plt.gca() # ax.axhline(linewidth=4, color=\"#00a3cc\") # ax.axvline(linewidth=4, color=\"#00a3cc\") # ax.spines['top'].set_color('#00a3cc')", "basemap_file = str(path_to_basemap / 'basemap_spots.p') m = sm.make_basemap(info.dirs.project_path,[minlat,maxlat,minlon,maxlon]) # if not os.path.exists(str(path_to_basemap /", "cbaxes2 = fig.add_axes([0.70, 0.18, 0.2, 0.03],zorder=60) cbar = plt.colorbar(extend='both', cax = cbaxes2, orientation='horizontal')", "= 616.66 # m/min ...roughly 20 knots max_speed = 316.66 # m/min ...roughly", "file_in = os.path.join(str(info.dirs.merged_grid),'merged_grid.nc') print(file_in) d = xr.open_dataset(file_in) # Define boundaries if info.grid.minlat ==", "pd.unique(filtered_data[ship_id].values) ship = unis[Ship_No] indxship = (filtered_data[ship_id] == ship) singleship = filtered_data.sel(Dindex=indxship) print('Ship", "+ ' vessels per grid' ) text3 = ('Creation date: ' + datetime.datetime.now().strftime('%Y-%m-%d", "if file_in == None: file_in = os.path.join(str(info.dirs.merged_grid),'merged_grid.nc') print(file_in) d = xr.open_dataset(file_in) # Define", "m in x: for t in x[m][1]: t.set_color(color) parallels = np.arange(minlat,maxlat,info.maps.parallels) # labels", "def map_dots_one_ship(info, file_in, Ship_No, save=True): ''' Creates a map of \"pings\" (i.e. not", "c2 = np.array([103,0,13])/256 #RGB/256 cdict = {'red': ((0.0, c1[0], c1[0]), (1.0, c2[0], c2[0])),", "'blue': ((0.0, 0.0, 0.0), # (1.0, 0.0, 0.0))} # my_cmap_yellow2red = LinearSegmentedColormap('my_colormap',cdict,256) cdict", "in the `\\merged` directory Ship_No (str): Unique identifier of the ship to plot", "Save map as png if save: if filedir_out == 'auto': filedir = str(info.dirs.pngs)", "cbar.ax.tick_params(labelsize=8, labelcolor='#808080') # Change colorbar labels for easier interpreting label_values = cbar._tick_data_values #", "sidebar (bool): If ``True`` space for a side panel is added to the", "log_label_values: labels.append(str(int(log_label_value))) cbar.ax.set_xticklabels(labels) cbar.ax.set_xlabel(d.attrs['units'], size=9, color='#808080') # TODO: maybe delete this? # mng", "/ 'basemap_sidebar.p') else: basemap_file = str(path_to_basemap / 'basemap.p') if not os.path.exists(basemap_file): m =", "not os.path.exists(str(path_to_basemap / 'basemap.p')): # m = sm.make_basemap(info.dirs.project_path,[minlat,maxlat,minlon,maxlon]) # else: # print('Found basemap...')", "(1.0, 0.9, 0.9)), # Light 'green': ((0.0, 0.9, 0.9), (1.0, 1.0,1.0)), 'blue': ((0.0,", "= str(path_to_basemap / 'basemap_sidebar.p') else: basemap_file = str(path_to_basemap / 'basemap.p') if not os.path.exists(basemap_file):", "save=True): ''' Creates a map of \"pings\" (i.e. not gridded density) of only", "# Rotate and flip H... ---------------------------------------------------------------------------- H = np.rot90(H) H = np.flipud(H) #", "path is setup by :func:`~ship_mapper.mapper.define_path_to_map` sidebar (bool): If ``True`` space for a side", "Arguments: m (mpl_toolkits.basemap.Basemap): Basemap object info (info): ``info`` object containing metadata Keyword Arguments:", "ship) singleship = filtered_data.sel(Dindex=indxship) print('Ship id:'+ str(ship)) # print(singleship['longitude'].values) # print(singleship['latitude'].values) x, y", "info.ship_id unis = pd.unique(filtered_data[ship_id].values) ship = unis[Ship_No] indxship = (filtered_data[ship_id] == ship) singleship", "m.drawmapscale(sblon, sblat, minlon, minlat, info.maps.scalebar_km, barstyle='fancy', units='km', fontsize=8, fontcolor='#808080', fillcolor1 = '#cccccc', fillcolor2", "OWN RISK.\\n' + '---------------------------------------------------------------\\n' ) return text1, text2, text3, text4 def map_dots(info, file_in,", "fig.add_axes([0.23,0.035,0.85,0.9]) if sidebar: ax = plt.subplot2grid((1,24),(0,5),colspan=19) else: ax = fig.add_axes([0.05,0.05,0.94,0.94]) TOPOmasked = np.ma.masked_where(topo>0,topo)", "str(path_to_map / basemap_name) # pickle.dump(m,open(picklename,'wb'),-1) # print('!!! Pickle just made: ' + picklename)", "+ str(BinNo) + ' - Filter' +str(downLim) + '-' + str(upLim) + '.png')", "loads custom colormap ''' # cdict = {'red': ((0.0, 0.0, 0.0), # (1.0,", "grid' ) text3 = ('Creation date: ' + datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') + '\\n' +", "= str(path_to_basemap / 'basemap_spots.p') m = sm.make_basemap(info.dirs.project_path,[minlat,maxlat,minlon,maxlon]) # if not os.path.exists(str(path_to_basemap / 'basemap.p')):", "Oceanography\\n' + 'PO Box 1006, Dartmouth, NS, Canada, B2Y 4A2' ) text4 =", "spine.set_zorder(35) # ax.spines['top'].set_visible(False) # ax.spines['right'].set_visible(False) # ax.spines['bottom'].set_visible(False) # ax.spines['left'].set_visible(False) # fig.tight_layout(pad=0.25) fig.tight_layout(rect=[0.01,0.01,.99,.99]) plt.show()", "1.0, 1.0))} # my_cmap = LinearSegmentedColormap('my_colormap',cdict,256) if name == 'my_cmap_lightblue': cdict = {'red':", "it looks for ``merged_grid.nc`` in the `\\merged` directory Ship_No (str): Unique identifier of", "map as png # if save: # filedir = str(info.dirs.pngs) # sm.checkDir(filedir) #", "plt.show() # # Save map as png # if save: # filedir =", "``info.run_name + '__' + file_in + '.png'`` filedir_out (str): Directory where figure is", "= (np.median(Hmasked)) - (4*Hmasked.std()) alat = (d.attrs['maxlat'] - d.attrs['minlat'])/2 cellsize = sm.degrees_to_meters(d.attrs['bin_size'], alat)", "'Created by:\\n' + 'Oceans and Coastal Management Division\\n' + 'Ecosystem Management Branch\\n' +", "verticalalignment='top', size=9, color= '#808080', transform=plt.gca().transAxes) plt.text(0.02, 0.145, text3, horizontalalignment='left', verticalalignment='top', size=7, color= '#808080',", "a map of \"pings\" (i.e. not gridded density) of only one ship Arguments:", "Basemap(projection='mill', llcrnrlat=minlat,urcrnrlat=maxlat, llcrnrlon=minlon, urcrnrlon=maxlon,resolution=info.maps.resolution) # TOPO # Read data from: http://coastwatch.pfeg.noaa.gov/erddap/griddap/usgsCeSrtm30v6.html # using", "arguments: path_to_basemap (str): Directory where to save the produced basemap. If ``'auto'`` then", "files Arguments: info (info): ``info`` object containing metadata ''' if path_to_basemap == 'auto':", "+ datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') + '\\n' + 'Creation script: ' + info.run_name + '.py\\n'", "y = m(d['longitude'].values,d['latitude'].values) cs = m.scatter(x,y,s=0.1,marker='o',color='r', zorder=10) # plt.show() # # Save map", "meridians = np.arange(minlon,maxlon,info.maps.meridians) mers = m.drawmeridians(meridians,labels=[False,False,False,True],dashes=[20,20],color='#00a3cc', linewidth=0.2, zorder=25) setcolor(mers,'#00a3cc') ax = plt.gca() #", "# c1 = np.array([252,142,110])/256 #RGB/256 c1 = np.array([250,59,59])/256 #RGB/256 c2 = np.array([103,0,13])/256 #RGB/256", "= np.log10(Hmasked) if vmin != None: vmin = np.log10(vmin) if vmax != None:", "``info`` object containing metadata spatial (list): List with corners... this will be deprecated", "plt.close() return def make_legend_text(info,md): ''' Makes text for legend in left block of", "= m.pcolor(xx,yy,Hmasked, cmap=cmapcolor, zorder=10, vmin=vmin, vmax=vmax) #scalebar sblon = minlon + ((maxlon-minlon)/10) sblat", "def map_density(info, file_in=None, cmap='Default', sidebar=False, to_screen=True, save=True, filename_out='auto',filedir_out='auto'): ''' Plots a map using", "added to the basemap Returns: A ``.basemap`` and a ``.grid`` files ''' print('Making", "# TODO: maybe delete this? # mng = plt.get_current_fig_manager() # mng.frame.Maximize(True) # #", "my_cmap = LinearSegmentedColormap('my_colormap',cdict,256) elif name == 'my_cmap_red2black': # c1 = np.array([252,142,110])/256 #RGB/256 c1", "Make colormap fig = plt.gcf() ax = plt.gca() if cmap == 'Default': cmapcolor", "fig.tight_layout() plt.show() # Save map as png if save: if filedir_out == 'auto':", "== None or info.grid.minlon == None or info.grid.maxlon == None: minlat = d['lat'].values.min()", "ax.spines['top'].set_visible(False) # ax.spines['right'].set_visible(False) # ax.spines['bottom'].set_visible(False) # ax.spines['left'].set_visible(False) # fig.tight_layout(pad=0.25) fig.tight_layout(rect=[0.01,0.01,.99,.99]) plt.show() if sidebar:", "m = sm.make_basemap(info.dirs.project_path,[minlat,maxlat,minlon,maxlon]) # if not os.path.exists(str(path_to_basemap / 'basemap.p')): # m = sm.make_basemap(info.dirs.project_path,[minlat,maxlat,minlon,maxlon])", "text2, horizontalalignment='left', verticalalignment='top', size=9, color= '#808080', transform=plt.gca().transAxes) plt.text(0.02, 0.145, text3, horizontalalignment='left', verticalalignment='top', size=7,", "plt.savefig(os.path.join(filedir,filename), dpi=300) return def map_dots_one_ship(info, file_in, Ship_No, save=True): ''' Creates a map of", "0.0))} # my_cmap_yellow2red = LinearSegmentedColormap('my_colormap',cdict,256) cdict = {'red': ((0.0, 1.0, 1.0), (1.0, 0.5,", "((0.0, 0.25, 0.25), # (1.0, 0.85, 0.85)), # 'blue': ((0.0, 0.5, 0.5), #", "and flip H... ---------------------------------------------------------------------------- H = np.rot90(H) H = np.flipud(H) # Mask zeros", "text2, text3, text4 = make_legend_text(info,d.attrs) ax2 = plt.subplot2grid((1,24),(0,0),colspan=4) # Turn off tick labels", "0.9)), # Light 'green': ((0.0, 0.9, 0.9), (1.0, 1.0,1.0)), 'blue': ((0.0, 0.9, 0.9),", "with metadata save (bool): If ``True`` a ``.png`` figure is saved to hardrive", "as np # Suppress matplotlib warnings np.warnings.filterwarnings('ignore') import xarray as xr import cmocean", "# labels = [left,right,top,bottom] par = m.drawparallels(parallels,labels=[True,False,False,False],dashes=[20,20],color='#00a3cc', linewidth=0.2, zorder=25) setcolor(par,'#00a3cc') meridians = np.arange(minlon,maxlon,info.maps.meridians)", "vmax = info.maps.cbarmax else: vmax = None if info.maps.cbarmin == 'auto': # vmin", "else: print('cmap name does not match any of the available cmaps') return my_cmap", "d.attrs['maxlon'] basemap_file = info.dirs.basemap print('Basemap file: ' + basemap_file) # Check for basemap.p", "it looks for ``merged_grid.nc`` in the `\\merged` directory cmap (str): Colormap to use", "= d['lat'].values.min() maxlat = d['lat'].values.max() minlon = d['lon'].values.min() maxlon = d['lon'].values.max() else: minlat", "H for better display Hmasked = np.log10(Hmasked) if vmin != None: vmin =", "# ax.spines['bottom'].set_visible(False) # ax.spines['left'].set_visible(False) # fig.tight_layout(pad=0.25) fig.tight_layout(rect=[0.01,0.01,.99,.99]) plt.show() if sidebar: basemap_name = 'basemap_sidebar.p'", "+ info.sidebar.included_vessel_types + '\\n\\n' + 'Grid size: ' + str(md['bin_size']) + ' degrees", "cs = m.scatter(x,y,2,marker='o',color='r', zorder=30) # fig = plt.figure() # plt.plot(filtered_data['longitude'].values,filtered_data['latitude'].values,'.') # plt.show() #", "= str(path_to_map / (info.grid.basemap + '.basemap')) basemap_picklename = os.path.join(path_to_map,info.grid.basemap + '.basemap') pickle.dump(m, open(basemap_picklename,", "+ 'We cannot ​guarantee the validity, accuracy, \\n' + 'or quality of this", "side panel with metadata save (bool): If ``True`` a ``.png`` figure is saved", "= np.log10(vmin) if vmax != None: vmax = np.log10(vmax) # Make colormap fig", "minlat, info.maps.scalebar_km, barstyle='fancy', units='km', fontsize=8, fontcolor='#808080', fillcolor1 = '#cccccc', fillcolor2 = '#a6a6a6', yoffset", "** label_values,decimals=0) labels = [] for log_label_value in log_label_values: labels.append(str(int(log_label_value))) cbar.ax.set_yticklabels(labels) cbar.ax.set_xlabel(d.attrs['units']) if", "``True`` space for a side panel is added to the basemap Returns: A", "dpi=300) return def define_path_to_map(info, path_to_basemap='auto'): ''' Figures out where is the .basemap and", "TOPO # Read data from: http://coastwatch.pfeg.noaa.gov/erddap/griddap/usgsCeSrtm30v6.html # using the netCDF output option #", "d['lon'].values.min() maxlon = d['lon'].values.max() else: minlat = info.grid.minlat maxlat = info.grid.maxlat minlon =", "my_cmap_yellow2red = LinearSegmentedColormap('my_colormap',cdict,256) cdict = {'red': ((0.0, 1.0, 1.0), (1.0, 0.5, 0.5)), 'green':", "vmin print('Min: ' + str(np.min(Hmasked))) print('Max: ' + str(np.max(Hmasked))) print('Mean: ' + str(np.nanmean(Hmasked)))", "616.66 # m/min ...roughly 20 knots max_speed = 316.66 # m/min ...roughly 20", "filedir_out (str): Directory where figure is saved. If ``auto`` then output directory is", "cannot ​guarantee the validity, accuracy, \\n' + 'or quality of this product. ​Data", "Basemap object ''' print('map_density ------------------------------------------------------') # Load data if file_in == None: file_in", "# (1.0, 0.0, 0.0)), # 'blue': ((0.0, 0.0, 0.0), # (1.0, 0.0, 0.0))}", "np.arange(minlon,maxlon,info.maps.meridians) mers = m.drawmeridians(meridians,labels=[False,False,False,True],dashes=[20,20],color='#00a3cc', linewidth=0.2, zorder=25) setcolor(mers,'#00a3cc') ax = plt.gca() # ax.axhline(linewidth=4, color=\"#00a3cc\")", "Pickle just made: ' + picklename) # ## pngDir = 'C:\\\\Users\\\\IbarraD\\\\Documents\\\\VMS\\\\png\\\\' ## plt.savefig(datadir[0:-5]", "= {'red': ((0.0, c1[0], c1[0]), (1.0, c2[0], c2[0])), 'green': ((0.0, c1[1], c1[1]), (1.0,", "lons, lats = np.meshgrid(lon,lat) topo = ncv['topo'][:,:] # fig = plt.figure(figsize=(19,9)) # ax", "== ship) singleship = filtered_data.sel(Dindex=indxship) print('Ship id:'+ str(ship)) # print(singleship['longitude'].values) # print(singleship['latitude'].values) x,", "+ '_' + str(info.grid.bin_number) + '.png' # plt.savefig(os.path.join(filedir,filename), dpi=300) return def map_dots_one_ship(info, file_in,", "for better display Hmasked = np.log10(Hmasked) if vmin != None: vmin = np.log10(vmin)", "to hardrive filename_out (str): Name of produced figure. If ``auto`` then name is", "+ picklename) path_to_map = define_path_to_map(info, path_to_basemap=path_to_basemap) # basemap_picklename = str(path_to_map / (info.grid.basemap +", "path_to_basemap (str): If ``'auto'`` it looks in ``grids`` directory Returns: Pickle file See", "transform=plt.gca().transAxes) plt.text(0.02, 0.83, text2, horizontalalignment='left', verticalalignment='top', size=9, color= '#808080', transform=plt.gca().transAxes) plt.text(0.02, 0.145, text3,", "then path is setup by :func:`~ship_mapper.mapper.define_path_to_map` sidebar (bool): If ``True`` space for a", "is added to the basemap Returns: A ``.basemap`` and a ``.grid`` files '''", "(1.0, 1.0, 1.0))} my_cmap = LinearSegmentedColormap('my_colormap',cdict,256) elif name == 'my_cmap_amber2red': # cdict =", "soon Keyword arguments: path_to_basemap (str): Directory where to save the produced basemap. If", "elif info.maps.cbarmin != None: vmin = info.maps.cbarmin else: vmin = None # Log", "# if save: # filedir = str(info.dirs.pngs) # sm.checkDir(filedir) # filename = info.project_name", "'#a6a6a6', yoffset = (0.01*(m.ymax-m.ymin)), labelstyle='simple',zorder=60) if not sidebar: cbaxes2 = fig.add_axes([0.70, 0.18, 0.2,", "ax.spines['right'].set_color('#00a3cc') ax.spines['bottom'].set_color('#00a3cc') ax.spines['left'].set_color('#00a3cc') for k, spine in ax.spines.items(): #ax.spines is a dictionary spine.set_zorder(35)", "' + str(md['bin_size']) + ' degrees (~' + str(int(round(sm.degrees_to_meters(md['bin_size'], alat))))+ ' m)\\n' +", "data product.\\n' + 'We cannot ​guarantee the validity, accuracy, \\n' + 'or quality", "​guarantee the validity, accuracy, \\n' + 'or quality of this product. ​Data is", "gridded density) of only one ship Arguments: info (info): ``info`` object containing metadata", "LinearSegmentedColormap('my_colormap',cdict,256) if name == 'my_cmap_lightblue': cdict = {'red': ((0.0, 0.0, 0.0), # Dark", "' minutes\\n' + 'Mask below: ' + str(md['mask_below']) + ' vessels per grid'", "filename = filename_out sm.checkDir(filedir) plt.savefig(os.path.join(filedir,filename), dpi=300) # Close netCDF file d.close() if to_screen", "& (d['latitude']> minlat) & (d['latitude']<= maxlat)) filtered_data = d.sel(Dindex=indx) ship_id = info.ship_id unis", "%H:%M:%S') + '\\n' + 'Creation script: ' + info.run_name + '.py\\n' + 'Software:", "plt.figure(figsize=(19,9)) # ax = fig.add_axes([0.05,0.05,0.80,1]) # ax = fig.add_axes([0,0,0.80,1]) # ax = fig.add_axes([0.23,0.035,0.85,0.9])", "+ '.png' # plt.savefig(os.path.join(filedir,filename), dpi=300) return def define_path_to_map(info, path_to_basemap='auto'): ''' Figures out where", "' + info.run_name + '.py\\n' + 'Software: ship mapper v0.1\\n\\n' + 'Created by:\\n'", "output directory is ``info.dirs.pngs`` Returns: Basemap object ''' print('map_density ------------------------------------------------------') # Load data", "log_label_value in log_label_values: labels.append(str(int(log_label_value))) cbar.ax.set_xticklabels(labels) cbar.ax.set_xlabel(d.attrs['units'], size=9, color='#808080') # TODO: maybe delete this?", "name == 'my_cmap_lightblue': cdict = {'red': ((0.0, 0.0, 0.0), # Dark (1.0, 0.9,", "Arguments: path_to_basemap (str): If ``'auto'`` it looks in ``grids`` directory Returns: Pickle file", "cbaxes2, orientation='horizontal') cbar.ax.tick_params(labelsize=8, labelcolor='#808080') # Change colorbar labels for easier interpreting label_values =", "a pickle file Arguments: m (mpl_toolkits.basemap.Basemap): Basemap object info (info): ``info`` object containing", "'PO Box 1006, Dartmouth, NS, Canada, B2Y 4A2' ) text4 = ('---------------------------------------------------------------\\n' +", "as pd print('Mapping...') # ----------------------------------------------------------------------------- d = xr.open_dataset(file_in) # Define boundaries if info.grid.minlat", "to_screen=True, save=True, filename_out='auto',filedir_out='auto'): ''' Plots a map using a gridded (or merged) file", "panel is added to the basemap Returns: A ``.basemap`` and a ``.grid`` files", "'auto': # vmin = (np.median(Hmasked)) - (4*Hmasked.std()) alat = (d.attrs['maxlat'] - d.attrs['minlat'])/2 cellsize", "'one-off': path_to_map = os.path.join(info.dirs.project_path,info.grid.region,'ancillary') elif info.grid.type == 'generic': path_to_map = os.path.abspath(os.path.join(info.dirs.project_path,'ancillary')) else: path_to_map", "colormap ''' # cdict = {'red': ((0.0, 0.0, 0.0), # (1.0, 0.7, 0.7)),", "+ 'Time bin: ' + str(round(md['time_bin']*1440,1)) + ' minutes\\n' + 'Mask below: '", "# picklename = str(path_to_map / basemap_name) # pickle.dump(m,open(picklename,'wb'),-1) # print('!!! Pickle just made:", "& (d['latitude']<= maxlat)) filtered_data = d.sel(Dindex=indx) ship_id = info.ship_id unis = pd.unique(filtered_data[ship_id].values) ship", "d.close() if to_screen == False: plt.close() return def make_legend_text(info,md): ''' Makes text for", "+ 'Bedford Institute of Oceanography\\n' + 'PO Box 1006, Dartmouth, NS, Canada, B2Y", "log_label_value in log_label_values: labels.append(str(int(log_label_value))) cbar.ax.set_yticklabels(labels) cbar.ax.set_xlabel(d.attrs['units']) if sidebar: text1, text2, text3, text4 =", "''' # cdict = {'red': ((0.0, 0.0, 0.0), # (1.0, 0.7, 0.7)), #", "d['lon'].values.min() maxlon = d['lon'].values.max() else: minlat = d.attrs['minlat'] maxlat = d.attrs['maxlat'] minlon =", "str(info.dirs.pngs) else: filedir = filedir_out if filename_out == 'auto': filename = info.run_name +", "density) of only one ship Arguments: info (info): ``info`` object containing metadata Keyword", "# pickle.dump(basemap, open(picklename, 'wb'), -1) # print('!!! Pickle just made: ' + picklename)", "v0.1\\n\\n' + 'Created by:\\n' + 'Oceans and Coastal Management Division\\n' + 'Ecosystem Management", "= os.path.join(path_to_map,info.grid.basemap + '.basemap') pickle.dump(m, open(basemap_picklename, 'wb'), -1) # info_picklename = str(path_to_map /", "``None`` it looks for ``merged_grid.nc`` in the `\\merged` directory sidebar (bool): If ``True``,", "to plot save (bool): If ``True`` a ``.png`` figure is saved to hardrive", "setcolor(par,'#00a3cc') meridians = np.arange(minlon,maxlon,info.maps.meridians) mers = m.drawmeridians(meridians,labels=[False,False,False,True],dashes=[20,20],color='#00a3cc', linewidth=0.2, zorder=25) setcolor(mers,'#00a3cc') ax = plt.gca()", "alpha=1.0, edgecolor='#a6a6a6', transform=plt.gca().transAxes)) plt.text(0.15, 0.99, text1, verticalalignment='top', horizontalalignment='left', weight='bold', size=10, color= '#737373', transform=plt.gca().transAxes)", "from matplotlib.patches import FancyBboxPatch from matplotlib.colors import LinearSegmentedColormap from mpl_toolkits.basemap import Basemap import", "= np.ma.masked_where(H<=d.attrs['mask_below'],H) # Set vman and vmin print('Min: ' + str(np.min(Hmasked))) print('Max: '", "return text1, text2, text3, text4 def map_dots(info, file_in, sidebar=False, save=True): ''' Creates a", "np.log10(vmax) # Make colormap fig = plt.gcf() ax = plt.gca() if cmap ==", "c2[0])), 'green': ((0.0, c1[1], c1[1]), (1.0, c2[1], c2[1])), 'blue': ((0.0, c1[2], c1[2]), (1.0,", "(str): Name of produced figure. If ``auto`` then name is ``info.run_name + '__'", "correspoding info.grid) to a pickle file Arguments: m (mpl_toolkits.basemap.Basemap): Basemap object info (info):", "sm.degrees_to_meters(d.attrs['bin_size'], alat) # max_speed = 616.66 # m/min ...roughly 20 knots max_speed =", "zorder=25) setcolor(par,'#00a3cc') meridians = np.arange(minlon,maxlon,info.maps.meridians) mers = m.drawmeridians(meridians,labels=[False,False,False,True],dashes=[20,20],color='#00a3cc', linewidth=0.2, zorder=25) setcolor(mers,'#00a3cc') ax =", "+ ((maxlon-minlon)/10) sblat = minlat + ((maxlat-minlat)/20) m.drawmapscale(sblon, sblat, minlon, minlat, info.maps.scalebar_km, barstyle='fancy',", "validity, accuracy, \\n' + 'or quality of this product. ​Data is provided\\n' +", "bin: ' + str(round(md['time_bin']*1440,1)) + ' minutes\\n' + 'Mask below: ' + str(md['mask_below'])", "is a preliminary data product.\\n' + 'We cannot ​guarantee the validity, accuracy, \\n'", "'wb'), -1) # info_picklename = str(path_to_map / (info.grid.basemap + '.grid')) info_picklename = os.path.join(path_to_map,", "= ('Unit description: ' + md['unit_description'] + '\\n\\n' + 'Data source: ' +", "If ``'auto'`` then path is setup by :func:`~ship_mapper.mapper.define_path_to_map` sidebar (bool): If ``True`` space", "# Make colormap fig = plt.gcf() ax = plt.gca() if cmap == 'Default':", "per grid' ) text3 = ('Creation date: ' + datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') + '\\n'", "sidebar (bool): If ``True``, includes side panel with metadata to_screen (bool): If ``True``,", "info.project_name + '_' + str(info.grid.bin_number) + '.png' # plt.savefig(os.path.join(filedir,filename), dpi=300) return def define_path_to_map(info,", "# TOPO # Read data from: http://coastwatch.pfeg.noaa.gov/erddap/griddap/usgsCeSrtm30v6.html # using the netCDF output option", "str(path_to_map / (info.grid.basemap + '.grid')) info_picklename = os.path.join(path_to_map, info.grid.basemap + '.grid') pickle.dump(info, open(info_picklename,", "a map using a gridded (or merged) file Arguments: info (info): ``info`` object", "x: for t in x[m][1]: t.set_color(color) parallels = np.arange(minlat,maxlat,info.maps.parallels) # labels = [left,right,top,bottom]", "# Log H for better display Hmasked = np.log10(Hmasked) if vmin != None:", "If ``auto`` then output directory is ``info.dirs.pngs`` Returns: Basemap object ''' print('map_density ------------------------------------------------------')", "map as png if save: if filedir_out == 'auto': filedir = str(info.dirs.pngs) else:", "fig = plt.figure() # plt.plot(filtered_data['longitude'].values,filtered_data['latitude'].values,'.') # plt.show() # # Save map as png", "0.25, text4, style='italic', horizontalalignment='left', verticalalignment='top', size=8, color= '#808080', transform=plt.gca().transAxes) cbaxes2 = fig.add_axes([0.019, 0.9,", "``info`` object containing metadata Keyword Arguments: path_to_basemap (str): If ``'auto'`` it looks in", "# (1.0, 0.85, 0.85)), # 'blue': ((0.0, 0.5, 0.5), # (1.0, 1.0, 1.0))}", "datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') + '\\n' + 'Creation script: ' + info.run_name + '.py\\n' +", "``True`` a ``.png`` figure is saved to hardrive ''' print('Mapping...') # ----------------------------------------------------------------------------- d", "= sm.make_basemap(info,[minlat,maxlat,minlon,maxlon]) else: print('Found basemap...') m = pickle.load(open(basemap_file,'rb')) x, y = m(d['longitude'].values,d['latitude'].values) cs", "= define_path_to_map(info, path_to_basemap=path_to_basemap) sm.checkDir(str(path_to_map)) minlat = spatial[0] maxlat = spatial[1] minlon = spatial[2]", "/ 'usgsCeSrtm30v6.nc') bathymetry_file = os.path.join(path_to_map, 'usgsCeSrtm30v6.nc') if not os.path.isfile(bathymetry_file): isub = 1 base_url='http://coastwatch.pfeg.noaa.gov/erddap/griddap/usgsCeSrtm30v6.nc?'", "m = sm.make_basemap(info,[minlat,maxlat,minlon,maxlon]) else: print('Found basemap...') m = pickle.load(open(basemap_file,'rb')) x, y = m(d['longitude'].values,d['latitude'].values)", "labels.append(str(int(log_label_value))) cbar.ax.set_yticklabels(labels) cbar.ax.set_xlabel(d.attrs['units']) if sidebar: text1, text2, text3, text4 = make_legend_text(info,d.attrs) ax2 =", "basemap...') # m = pickle.load(open(basemap_file,'rb')) indx = ((d['longitude']> minlon) & (d['longitude']<= maxlon) &", "text2, text3, text4 def map_dots(info, file_in, sidebar=False, save=True): ''' Creates a map of", "\\n' + 'or quality of this product. ​Data is provided\\n' + 'on an", "if not sidebar: cbaxes2 = fig.add_axes([0.70, 0.18, 0.2, 0.03],zorder=60) cbar = plt.colorbar(extend='both', cax", "np.log10(Hmasked) if vmin != None: vmin = np.log10(vmin) if vmax != None: vmax", "m/min ...roughly 20 knots vmin = cellsize / max_speed elif info.maps.cbarmin != None:", "d['lat'].values.max() minlon = d['lon'].values.min() maxlon = d['lon'].values.max() else: minlat = d.attrs['minlat'] maxlat =", "# print(singleship['longitude'].values) # print(singleship['latitude'].values) x, y = m(singleship['longitude'].values,singleship['latitude'].values) # x, y = m(d['longitude'].values,d['latitude'].values)", "+ 'Time range: \\n' + md['startdate'][0:-3] + ' to ' + md['enddate'][0:-3] +", "ship = unis[Ship_No] indxship = (filtered_data[ship_id] == ship) singleship = filtered_data.sel(Dindex=indxship) print('Ship id:'+", "print('Mapping...') # ----------------------------------------------------------------------------- d = xr.open_dataset(file_in) # Define boundaries if info.grid.minlat == None", "m.fillcontinents(color='#E1E1A0',zorder=23) m.drawcoastlines(color='#a6a6a6',linewidth=0.5,zorder=25) m.fillcontinents(color='#e6e6e6',zorder=23) m.drawmapboundary() def setcolor(x, color): for m in x: for t", "str(info.grid.bin_number) + '.png' # plt.savefig(os.path.join(filedir,filename), dpi=300) return def map_dots_one_ship(info, file_in, Ship_No, save=True): '''", "ncv['topo'][:,:] # fig = plt.figure(figsize=(19,9)) # ax = fig.add_axes([0.05,0.05,0.80,1]) # ax = fig.add_axes([0,0,0.80,1])", "np.meshgrid(lon,lat) topo = ncv['topo'][:,:] # fig = plt.figure(figsize=(19,9)) # ax = fig.add_axes([0.05,0.05,0.80,1]) #", "labels = [] for log_label_value in log_label_values: labels.append(str(int(log_label_value))) cbar.ax.set_xticklabels(labels) cbar.ax.set_xlabel(d.attrs['units'], size=9, color='#808080') #", "((0.0, 0.9, 0.9), (1.0, 1.0, 1.0))} my_cmap = LinearSegmentedColormap('my_colormap',cdict,256) elif name == 'my_cmap_amber2red':", "map. If ``None`` it looks for ``merged_grid.nc`` in the `\\merged` directory sidebar (bool):", "+ md['enddate'][0:-3] + '\\n\\n' + 'Included speeds: ' + info.sidebar.included_speeds + '\\n' +", "# store data in NetCDF file urllib.request.urlretrieve(url, bathymetry_file) # open NetCDF data in", "par = m.drawparallels(parallels,labels=[True,False,False,False],dashes=[20,20],color='#00a3cc', linewidth=0.2, zorder=25) setcolor(par,'#00a3cc') meridians = np.arange(minlon,maxlon,info.maps.meridians) mers = m.drawmeridians(meridians,labels=[False,False,False,True],dashes=[20,20],color='#00a3cc', linewidth=0.2,", "+ '.py\\n' + 'Software: ship mapper v0.1\\n\\n' + 'Created by:\\n' + 'Oceans and", "Create grid for mapping lons_grid, lats_grid = np.meshgrid(d['lon'].values,d['lat'].values) xx,yy = m(lons_grid, lats_grid) H", "saved to hardrive ''' import pandas as pd print('Mapping...') # ----------------------------------------------------------------------------- d =", "Dartmouth, NS, Canada, B2Y 4A2' ) text4 = ('---------------------------------------------------------------\\n' + 'WARNING: This is", "# plt.savefig(os.path.join(filedir,filename), dpi=300) return def define_path_to_map(info, path_to_basemap='auto'): ''' Figures out where is the", "None or info.grid.maxlon == None: minlat = d['lat'].values.min() maxlat = d['lat'].values.max() minlon =", "sidebar: ax = plt.subplot2grid((1,24),(0,5),colspan=19) else: ax = fig.add_axes([0.05,0.05,0.94,0.94]) TOPOmasked = np.ma.masked_where(topo>0,topo) cs =", "map. If ``None`` it looks for ``merged_grid.nc`` in the `\\merged` directory cmap (str):", "(str): Directory where figure is saved. If ``auto`` then output directory is ``info.dirs.pngs``", "saved. If ``auto`` then output directory is ``info.dirs.pngs`` Returns: Basemap object ''' print('map_density", "as png if save: if filedir_out == 'auto': filedir = str(info.dirs.pngs) else: filedir", "' + picklename) path_to_map = define_path_to_map(info, path_to_basemap=path_to_basemap) # basemap_picklename = str(path_to_map / (info.grid.basemap", "Creates and loads custom colormap ''' # cdict = {'red': ((0.0, 0.0, 0.0),", "= load_my_cmap('my_cmap_amber2red') elif cmap == 'red2black': cmapcolor = load_my_cmap('my_cmap_red2black') else: cmapcolor =plt.get_cmap(cmap) cs", "ax = plt.subplot2grid((1,24),(0,5),colspan=19) else: ax = fig.add_axes([0.05,0.05,0.94,0.94]) TOPOmasked = np.ma.masked_where(topo>0,topo) cs = m.pcolormesh(lons,lats,TOPOmasked,cmap=load_my_cmap('my_cmap_lightblue'),latlon=True,zorder=5)", "''' print('Making basemap...') # ----------------------------------------------------------------------------- path_to_map = define_path_to_map(info, path_to_basemap=path_to_basemap) sm.checkDir(str(path_to_map)) minlat = spatial[0]", "basemap_picklename = str(path_to_map / (info.grid.basemap + '.basemap')) basemap_picklename = os.path.join(path_to_map,info.grid.basemap + '.basemap') pickle.dump(m,", "cmap == 'Default': cmapcolor = load_my_cmap('my_cmap_amber2red') elif cmap == 'red2black': cmapcolor = load_my_cmap('my_cmap_red2black')", "'C:\\\\Users\\\\IbarraD\\\\Documents\\\\VMS\\\\png\\\\' ## plt.savefig(datadir[0:-5] + 'png\\\\' + filename + '- Grid' + str(BinNo) +", "import urllib.request import netCDF4 def map_density(info, file_in=None, cmap='Default', sidebar=False, to_screen=True, save=True, filename_out='auto',filedir_out='auto'): '''" ]
[ "from .init_cfg import init from .root import root # add sub-command functions here", ".init_cfg import init from .root import root # add sub-command functions here root.add_command(init)", "<gh_stars>0 from .init_cfg import init from .root import root # add sub-command functions" ]
[ "accessed for the first time. Example: Use the ``@lazy`` decorator for a function", "value \"\"\" def __init__(self, func): self.func = func def __get__(self, instance, owner): if", "is accessed for the first time. Example: Use the ``@lazy`` decorator for a", "Property for the lazy evaluation of Python attributes. In the example below, the", "Example: Use the ``@lazy`` decorator for a function that returns the result of", "normal attribute:: @lazy def my_attribute(self): value = self.expensive_computation() return value \"\"\" def __init__(self,", "only called when the attribute ``object.my_attribute`` is accessed for the first time. Example:", "the attribute ``object.my_attribute`` is accessed for the first time. Example: Use the ``@lazy``", "``@lazy`` decorator for a function that returns the result of the computation. Access", "example below, the ``expensive_computation()`` is only called when the attribute ``object.my_attribute`` is accessed", "Use the ``@lazy`` decorator for a function that returns the result of the", "owner): if instance is None: return None value = self.func(instance) setattr(instance, self.func.__name__, value)", "it as a normal attribute:: @lazy def my_attribute(self): value = self.expensive_computation() return value", "the lazy evaluation of Python attributes. In the example below, the ``expensive_computation()`` is", "``expensive_computation()`` is only called when the attribute ``object.my_attribute`` is accessed for the first", "\"\"\" def __init__(self, func): self.func = func def __get__(self, instance, owner): if instance", "instance, owner): if instance is None: return None value = self.func(instance) setattr(instance, self.func.__name__,", "result of the computation. Access it as a normal attribute:: @lazy def my_attribute(self):", "function that returns the result of the computation. Access it as a normal", "of Python attributes. In the example below, the ``expensive_computation()`` is only called when", "``object.my_attribute`` is accessed for the first time. Example: Use the ``@lazy`` decorator for", "@lazy def my_attribute(self): value = self.expensive_computation() return value \"\"\" def __init__(self, func): self.func", "Access it as a normal attribute:: @lazy def my_attribute(self): value = self.expensive_computation() return", "a normal attribute:: @lazy def my_attribute(self): value = self.expensive_computation() return value \"\"\" def", "my_attribute(self): value = self.expensive_computation() return value \"\"\" def __init__(self, func): self.func = func", "def __get__(self, instance, owner): if instance is None: return None value = self.func(instance)", "for the first time. Example: Use the ``@lazy`` decorator for a function that", "decorator for a function that returns the result of the computation. Access it", "the first time. Example: Use the ``@lazy`` decorator for a function that returns", "__init__(self, func): self.func = func def __get__(self, instance, owner): if instance is None:", "= func def __get__(self, instance, owner): if instance is None: return None value", "for a function that returns the result of the computation. Access it as", "the ``@lazy`` decorator for a function that returns the result of the computation.", "computation. Access it as a normal attribute:: @lazy def my_attribute(self): value = self.expensive_computation()", "__get__(self, instance, owner): if instance is None: return None value = self.func(instance) setattr(instance,", "return value \"\"\" def __init__(self, func): self.func = func def __get__(self, instance, owner):", "lazy evaluation of Python attributes. In the example below, the ``expensive_computation()`` is only", "if instance is None: return None value = self.func(instance) setattr(instance, self.func.__name__, value) return", "the example below, the ``expensive_computation()`` is only called when the attribute ``object.my_attribute`` is", "attribute:: @lazy def my_attribute(self): value = self.expensive_computation() return value \"\"\" def __init__(self, func):", "for the lazy evaluation of Python attributes. In the example below, the ``expensive_computation()``", "called when the attribute ``object.my_attribute`` is accessed for the first time. Example: Use", "instance is None: return None value = self.func(instance) setattr(instance, self.func.__name__, value) return value", "evaluation of Python attributes. In the example below, the ``expensive_computation()`` is only called", "value = self.expensive_computation() return value \"\"\" def __init__(self, func): self.func = func def", "self.expensive_computation() return value \"\"\" def __init__(self, func): self.func = func def __get__(self, instance,", "= self.expensive_computation() return value \"\"\" def __init__(self, func): self.func = func def __get__(self,", "the ``expensive_computation()`` is only called when the attribute ``object.my_attribute`` is accessed for the", "the computation. Access it as a normal attribute:: @lazy def my_attribute(self): value =", "attribute ``object.my_attribute`` is accessed for the first time. Example: Use the ``@lazy`` decorator", "def __init__(self, func): self.func = func def __get__(self, instance, owner): if instance is", "below, the ``expensive_computation()`` is only called when the attribute ``object.my_attribute`` is accessed for", "Python attributes. In the example below, the ``expensive_computation()`` is only called when the", "the result of the computation. Access it as a normal attribute:: @lazy def", "\"\"\" Property for the lazy evaluation of Python attributes. In the example below,", "self.func = func def __get__(self, instance, owner): if instance is None: return None", "func def __get__(self, instance, owner): if instance is None: return None value =", "def my_attribute(self): value = self.expensive_computation() return value \"\"\" def __init__(self, func): self.func =", "func): self.func = func def __get__(self, instance, owner): if instance is None: return", "a function that returns the result of the computation. Access it as a", "time. Example: Use the ``@lazy`` decorator for a function that returns the result", "lazy(object): \"\"\" Property for the lazy evaluation of Python attributes. In the example", "as a normal attribute:: @lazy def my_attribute(self): value = self.expensive_computation() return value \"\"\"", "when the attribute ``object.my_attribute`` is accessed for the first time. Example: Use the", "returns the result of the computation. Access it as a normal attribute:: @lazy", "that returns the result of the computation. Access it as a normal attribute::", "of the computation. Access it as a normal attribute:: @lazy def my_attribute(self): value", "first time. Example: Use the ``@lazy`` decorator for a function that returns the", "class lazy(object): \"\"\" Property for the lazy evaluation of Python attributes. In the", "In the example below, the ``expensive_computation()`` is only called when the attribute ``object.my_attribute``", "attributes. In the example below, the ``expensive_computation()`` is only called when the attribute", "is only called when the attribute ``object.my_attribute`` is accessed for the first time.", "<reponame>DollSimon/tweezers class lazy(object): \"\"\" Property for the lazy evaluation of Python attributes. In" ]
[ "dummy class which can be used to simply do no repair. \"\"\" def", "This class is allows to repair individuals after crossover if necessary. \"\"\" def", "\"\"\" This class is allows to repair individuals after crossover if necessary. \"\"\"", "repair individuals after crossover if necessary. \"\"\" def do(self, problem, pop, **kwargs): return", "def do(self, problem, pop, **kwargs): return self._do(problem, pop, **kwargs) @abstractmethod def _do(self, problem,", "@abstractmethod def _do(self, problem, pop, **kwargs): pass class NoRepair(Repair): \"\"\" A dummy class", "class which can be used to simply do no repair. \"\"\" def do(self,", "class Repair: \"\"\" This class is allows to repair individuals after crossover if", "A dummy class which can be used to simply do no repair. \"\"\"", "individuals after crossover if necessary. \"\"\" def do(self, problem, pop, **kwargs): return self._do(problem,", "is allows to repair individuals after crossover if necessary. \"\"\" def do(self, problem,", "to simply do no repair. \"\"\" def do(self, problem, pop, **kwargs): return pop", "problem, pop, **kwargs): pass class NoRepair(Repair): \"\"\" A dummy class which can be", "<filename>pymoo/model/repair.py from abc import abstractmethod class Repair: \"\"\" This class is allows to", "import abstractmethod class Repair: \"\"\" This class is allows to repair individuals after", "which can be used to simply do no repair. \"\"\" def do(self, problem,", "**kwargs): pass class NoRepair(Repair): \"\"\" A dummy class which can be used to", "crossover if necessary. \"\"\" def do(self, problem, pop, **kwargs): return self._do(problem, pop, **kwargs)", "pop, **kwargs) @abstractmethod def _do(self, problem, pop, **kwargs): pass class NoRepair(Repair): \"\"\" A", "Repair: \"\"\" This class is allows to repair individuals after crossover if necessary.", "be used to simply do no repair. \"\"\" def do(self, problem, pop, **kwargs):", "\"\"\" def do(self, problem, pop, **kwargs): return self._do(problem, pop, **kwargs) @abstractmethod def _do(self,", "do(self, problem, pop, **kwargs): return self._do(problem, pop, **kwargs) @abstractmethod def _do(self, problem, pop,", "after crossover if necessary. \"\"\" def do(self, problem, pop, **kwargs): return self._do(problem, pop,", "pass class NoRepair(Repair): \"\"\" A dummy class which can be used to simply", "class is allows to repair individuals after crossover if necessary. \"\"\" def do(self,", "**kwargs) @abstractmethod def _do(self, problem, pop, **kwargs): pass class NoRepair(Repair): \"\"\" A dummy", "_do(self, problem, pop, **kwargs): pass class NoRepair(Repair): \"\"\" A dummy class which can", "pop, **kwargs): pass class NoRepair(Repair): \"\"\" A dummy class which can be used", "to repair individuals after crossover if necessary. \"\"\" def do(self, problem, pop, **kwargs):", "class NoRepair(Repair): \"\"\" A dummy class which can be used to simply do", "if necessary. \"\"\" def do(self, problem, pop, **kwargs): return self._do(problem, pop, **kwargs) @abstractmethod", "used to simply do no repair. \"\"\" def do(self, problem, pop, **kwargs): return", "def _do(self, problem, pop, **kwargs): pass class NoRepair(Repair): \"\"\" A dummy class which", "\"\"\" A dummy class which can be used to simply do no repair.", "**kwargs): return self._do(problem, pop, **kwargs) @abstractmethod def _do(self, problem, pop, **kwargs): pass class", "self._do(problem, pop, **kwargs) @abstractmethod def _do(self, problem, pop, **kwargs): pass class NoRepair(Repair): \"\"\"", "necessary. \"\"\" def do(self, problem, pop, **kwargs): return self._do(problem, pop, **kwargs) @abstractmethod def", "allows to repair individuals after crossover if necessary. \"\"\" def do(self, problem, pop,", "pop, **kwargs): return self._do(problem, pop, **kwargs) @abstractmethod def _do(self, problem, pop, **kwargs): pass", "abc import abstractmethod class Repair: \"\"\" This class is allows to repair individuals", "abstractmethod class Repair: \"\"\" This class is allows to repair individuals after crossover", "return self._do(problem, pop, **kwargs) @abstractmethod def _do(self, problem, pop, **kwargs): pass class NoRepair(Repair):", "can be used to simply do no repair. \"\"\" def do(self, problem, pop,", "from abc import abstractmethod class Repair: \"\"\" This class is allows to repair", "problem, pop, **kwargs): return self._do(problem, pop, **kwargs) @abstractmethod def _do(self, problem, pop, **kwargs):", "NoRepair(Repair): \"\"\" A dummy class which can be used to simply do no" ]
[ "\"\"\" DAO Object for BlockSite table \"\"\" from dbs.dao.Oracle.BlockSite.Insert import Insert as OraBlockSiteInsert", "for BlockSite table \"\"\" from dbs.dao.Oracle.BlockSite.Insert import Insert as OraBlockSiteInsert class Insert(OraBlockSiteInsert): pass", "#!/usr/bin/env python \"\"\" DAO Object for BlockSite table \"\"\" from dbs.dao.Oracle.BlockSite.Insert import Insert", "Object for BlockSite table \"\"\" from dbs.dao.Oracle.BlockSite.Insert import Insert as OraBlockSiteInsert class Insert(OraBlockSiteInsert):", "DAO Object for BlockSite table \"\"\" from dbs.dao.Oracle.BlockSite.Insert import Insert as OraBlockSiteInsert class", "python \"\"\" DAO Object for BlockSite table \"\"\" from dbs.dao.Oracle.BlockSite.Insert import Insert as" ]
[ "[int(args[2])] * temp.shape[0] df_list += [temp] df = pd.concat(df_list) df[\"metric\"].loc[ (df[\"tracer\"] == \"fmz\")", "temp=pd.read_csv(fn) print fn args = fn.split(\"/\")[4].split(\"_\") if 'rcl' in fn : temp[\"tracer\"] =", "\"Ki\" df[\"metric\"].loc[ (df[\"tracer\"] == \"rcl\") & (df[\"metric\"] == \"mean\")] = \"BPnd\" df[\"roi\"].loc[ (df[\"tracer\"]", "+ \".png\" plt.clf() sns.stripplot(x=\"roi\", y=\"value\", hue=\"sub\", data=df, jitter=True, alpha=.6, zorder=1) sns.pointplot(x=\"roi\", y=\"value\", data=df0,", "== \"pvc\") ] = \"PVC\" df_mean[\"analysis\"].loc[ (df_mean[\"analysis\"] == \"pet-coregistration\") ] = \"Coregistration\" print", "] values = df0[\"value\"].mean() zeros = zeros_df[\"value\"].mean() ratio = values / zeros df[\"%Accuracy\"].loc[(df[\"error\"]", "= [] for fn in fn_list : temp=pd.read_csv(fn) args = fn.split(\"/\")[4].split(\"_\") if 'rcl'", "df[\"metric\"].loc[ (df[\"tracer\"] == \"fmz\") & (df[\"metric\"] == \"mean\")] = \"BPnd\" df[\"metric\"].loc[ (df[\"tracer\"] ==", "ratio return(df) df_fn = os.getcwd() + os.sep + 'appian_error.csv' qc_fn = os.getcwd() +", "+ 'appian_qc.csv' if not os.path.exists(df_fn) : df = get_error() df.to_csv(df_fn) else : df", "glob from re import sub from sys import argv, exit from os.path import", ": temp=pd.read_csv(fn) args = fn.split(\"/\")[4].split(\"_\") if 'rcl' in fn : temp[\"tracer\"] = [\"rcl\"]", "(df_mean[\"tracer\"] == \"fmz\") ] = \"FMZ\" df_mean[\"analysis\"].loc[ (df_mean[\"analysis\"] == \"tka\") ] = \"TKA\"", "= \"PVC\" df_mean[\"analysis\"].loc[ (df_mean[\"analysis\"] == \"pet-coregistration\") ] = \"Coregistration\" print df_mean plt.clf() plt.figure()", "import glob from re import sub from sys import argv, exit from os.path", "glob(\"scott/out_fdg/preproc/_args_**/_angle_**/**/*_3d.csv\") fn_list += glob(\"raclopride/out_rcl/preproc/_args_**/_angle_**/**/*_3d.csv\") df_list = [] for fn in fn_list : temp=pd.read_csv(fn)", "name[6]) & (df.error == 0) & (df[\"sub\"] == name[5]) ] values = df0[\"value\"].mean()", "# print df3 # sns.swarmplot(x=\"roi\", y='groundtruth',data=df3, palette=\"bright\") # break #ax = sns.factorplot(x=\"roi\", y=\"diff\",", "= \"Putamen\" df[\"roi\"].loc[ (df[\"tracer\"] == \"fmz\") ] = \"GM\" df[\"roi\"].loc[ (df[\"tracer\"] == \"fdg\")", "plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.) plt.savefig(out_fn) def get_qc_metrics(): fn_list = [] df_list = []", "== \"mean\")] = \"BPnd\" df[\"roi\"].loc[ (df[\"tracer\"] == \"rcl\") ] = \"Putamen\" df[\"roi\"].loc[ (df[\"tracer\"]", "* temp.shape[0] #temp[\"errortype\"] = [args[1]] * temp.shape[0] #temp[\"error\"] = [int(args[2])] * temp.shape[0] df_list", "\"fdg\") ] = \"FDG\" df_mean[\"tracer\"].loc[ (df_mean[\"tracer\"] == \"fmz\") ] = \"FMZ\" df_mean[\"analysis\"].loc[ (df_mean[\"analysis\"]", "temp=pd.read_csv(fn) args = fn.split(\"/\")[4].split(\"_\") if 'rcl' in fn : temp[\"tracer\"] = [\"rcl\"] *", "= len(df[\"tracer\"].unique()) nROI= len(df[\"analysis\"].unique()) i=1 df.rename(index=str, columns={\"roi\":\"ROI\",\"analysis\":\"Analysis\",\"tracer\":\"Radiotracer\"}, inplace=True) sns.factorplot(x=\"error\", y=\"%Accuracy\", col=\"analysis\", hue=\"tracer\", palette=\"muted\",kind=\"swarm\",col_order=['Coregistration','PVC','TKA'],", "seaborn as sns from glob import glob from re import sub from sys", "borderaxespad=0.) plt.savefig(out_fn) def get_qc_metrics(): fn_list = [] df_list = [] fn_list += glob(\"raclopride/out_rcl/groupLevelQC/coreg_roc/test_group_qc_auc.csv\")", "temp.shape[0] temp[\"error\"] = [int(args[2])] * temp.shape[0] df_list += [temp] df = pd.concat(df_list) df[\"metric\"].loc[", "#grid = sns.FacetGrid(df_mean, row=\"tracer\", col=\"analysis\", sharey=True, palette=\"muted\", size=5) #grid = grid.map(plt.scatter, \"roi\", \"value\")", "temp[\"frame\"] = [0] * temp.shape[0] temp[\"errortype\"] = [args[1]] * temp.shape[0] temp[\"error\"] = [int(args[2])]", "os def load(fn): df=pd.read_csv(fn) return(df) def plot(df0, df, tracer) : out_fn = tracer", "argv, exit from os.path import splitext import numpy as np import os def", "loc=2, borderaxespad=0.) plt.savefig(out_fn) def get_qc_metrics(): fn_list = [] df_list = [] fn_list +=", "pd.concat(df_list) return(df) def get_error(): fn_list=[] fn_list += glob(\"fmz/out_fmz/preproc/_args_**/_angle_**/**/*_3d.csv\") fn_list += glob(\"scott/out_fdg/preproc/_args_**/_angle_**/**/*_3d.csv\") fn_list +=", "= [0] * temp.shape[0] #temp[\"errortype\"] = [args[1]] * temp.shape[0] #temp[\"error\"] = [int(args[2])] *", "* df.shape[0] for name, df0 in df.groupby(['tracer','analysis', 'error', 'ses', 'task', 'sub', 'roi']) :", "/ zeros df[\"%Accuracy\"].loc[(df[\"error\"] == name[2]) & idx] = ratio return(df) df_fn = os.getcwd()", "zeros df[\"%Accuracy\"].loc[(df[\"error\"] == name[2]) & idx] = ratio return(df) df_fn = os.getcwd() +", "+ os.sep + 'appian_error.csv' qc_fn = os.getcwd() + os.sep + 'appian_qc.csv' if not", "1), loc=2, borderaxespad=0.) plt.savefig(out_fn) def get_qc_metrics(): fn_list = [] df_list = [] fn_list", "hue=\"tracer\", palette=\"muted\",kind=\"swarm\",col_order=['Coregistration','PVC','TKA'], sharey=True, data=df_mean) #for name, df3 in df2.groupby(['sub']) : # print df3", "import seaborn as sns from glob import glob from re import sub from", "row=\"tracer\", col=\"analysis\", sharey=True, palette=\"muted\", size=5) #grid = grid.map(plt.scatter, \"roi\", \"value\") #grid = grid.map(plt.scatter,", "fn_list += glob(\"raclopride/out_rcl/groupLevelQC/coreg_roc/test_group_qc_auc.csv\") #fn_list += glob(\"scott/out_fdg/groupLevelQC/coreg_roc/test_group_qc_auc.csv\") #fn_list += glob(\"fmz/out_fmz/groupLevelQC/coreg_roc/test_group_qc_auc.csv\") for fn in fn_list", "zeros_df[\"value\"].mean() ratio = values / zeros df[\"%Accuracy\"].loc[(df[\"error\"] == name[2]) & idx] = ratio", "values = df0[\"value\"].mean() zeros = zeros_df[\"value\"].mean() ratio = values / zeros df[\"%Accuracy\"].loc[(df[\"error\"] ==", "(df.roi == name[6]) & (df[\"sub\"] == name[5]) zeros_df = df.loc[ (df.tracer == name[0])", "= zeros_df[\"value\"].mean() ratio = values / zeros df[\"%Accuracy\"].loc[(df[\"error\"] == name[2]) & idx] =", "tracer + \".png\" plt.clf() sns.stripplot(x=\"roi\", y=\"value\", hue=\"sub\", data=df, jitter=True, alpha=.6, zorder=1) sns.pointplot(x=\"roi\", y=\"value\",", "qc.to_csv(qc_fn) else : qc = pd.read_csv(qc_fn) print(qc) exit(0) df_mean = df.groupby([\"analysis\",\"tracer\",\"error\",\"errortype\",\"frame\",\"metric\",\"roi\"])[\"%Accuracy\"].mean() df_mean =", "& (df.roi == name[6]) & (df[\"sub\"] == name[5]) zeros_df = df.loc[ (df.tracer ==", "qc_fn = os.getcwd() + os.sep + 'appian_qc.csv' if not os.path.exists(df_fn) : df =", "temp.shape[0] elif 'fdg' in fn : temp[\"tracer\"] = [\"fdg\"] * temp.shape[0] elif 'fmz'", "palette=\"dark\", markers=\"d\", scale=1.5) plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.) plt.savefig(out_fn) def get_qc_metrics(): fn_list = []", ": sub=name[5] ses=name[3] task=name[4] error=name[2] idx = (df.tracer == name[0]) & (df.analysis ==", "== 0) & (df[\"sub\"] == name[5]) ] values = df0[\"value\"].mean() zeros = zeros_df[\"value\"].mean()", "def get_qc_metrics(): fn_list = [] df_list = [] fn_list += glob(\"raclopride/out_rcl/groupLevelQC/coreg_roc/test_group_qc_auc.csv\") #fn_list +=", "df_fn = os.getcwd() + os.sep + 'appian_error.csv' qc_fn = os.getcwd() + os.sep +", "glob(\"scott/out_fdg/groupLevelQC/coreg_roc/test_group_qc_auc.csv\") #fn_list += glob(\"fmz/out_fmz/groupLevelQC/coreg_roc/test_group_qc_auc.csv\") for fn in fn_list : temp=pd.read_csv(fn) print fn args", "os.path import splitext import numpy as np import os def load(fn): df=pd.read_csv(fn) return(df)", "df_mean plt.clf() plt.figure() nTracer = len(df[\"tracer\"].unique()) nROI= len(df[\"analysis\"].unique()) i=1 df.rename(index=str, columns={\"roi\":\"ROI\",\"analysis\":\"Analysis\",\"tracer\":\"Radiotracer\"}, inplace=True) sns.factorplot(x=\"error\",", "[\"fdg\"] * temp.shape[0] elif 'fmz' in fn : temp[\"tracer\"] = [\"fmz\"] * temp.shape[0]", "(df_mean[\"tracer\"] == \"rcl\") ] = \"RCL\" df_mean[\"tracer\"].loc[ (df_mean[\"tracer\"] == \"fdg\") ] = \"FDG\"", "== \"fmz\") ] = \"FMZ\" df_mean[\"analysis\"].loc[ (df_mean[\"analysis\"] == \"tka\") ] = \"TKA\" df_mean[\"analysis\"].loc[", "df[\"metric\"].loc[ (df[\"tracer\"] == \"fdg\") & (df[\"metric\"] == \"mean\")] = \"Ki\" df[\"metric\"].loc[ (df[\"tracer\"] ==", "'fmz' in fn : temp[\"tracer\"] = [\"fmz\"] * temp.shape[0] #temp[\"frame\"] = [0] *", "+= glob(\"fmz/out_fmz/preproc/_args_**/_angle_**/**/*_3d.csv\") fn_list += glob(\"scott/out_fdg/preproc/_args_**/_angle_**/**/*_3d.csv\") fn_list += glob(\"raclopride/out_rcl/preproc/_args_**/_angle_**/**/*_3d.csv\") df_list = [] for fn", "temp.shape[0] temp[\"errortype\"] = [args[1]] * temp.shape[0] temp[\"error\"] = [int(args[2])] * temp.shape[0] df_list +=", "# break #ax = sns.factorplot(x=\"roi\", y=\"diff\", row=\"roi\", hue=\"analysis\", data=df, palette=\"Set2\", dodge=True) #grid =", "df_list = [] for fn in fn_list : temp=pd.read_csv(fn) args = fn.split(\"/\")[4].split(\"_\") if", "palette=\"bright\") # break #ax = sns.factorplot(x=\"roi\", y=\"diff\", row=\"roi\", hue=\"analysis\", data=df, palette=\"Set2\", dodge=True) #grid", "'error', 'ses', 'task', 'sub', 'roi']) : sub=name[5] ses=name[3] task=name[4] error=name[2] idx = (df.tracer", "\"FDG\" df_mean[\"tracer\"].loc[ (df_mean[\"tracer\"] == \"fmz\") ] = \"FMZ\" df_mean[\"analysis\"].loc[ (df_mean[\"analysis\"] == \"tka\") ]", "get_qc_metrics() qc.to_csv(qc_fn) else : qc = pd.read_csv(qc_fn) print(qc) exit(0) df_mean = df.groupby([\"analysis\",\"tracer\",\"error\",\"errortype\",\"frame\",\"metric\",\"roi\"])[\"%Accuracy\"].mean() df_mean", "= sns.factorplot(x=\"roi\", y=\"diff\", row=\"roi\", hue=\"analysis\", data=df, palette=\"Set2\", dodge=True) #grid = sns.FacetGrid(df_mean, row=\"tracer\", col=\"analysis\",", "from sys import argv, exit from os.path import splitext import numpy as np", "temp.shape[0] df_list += [temp] df = pd.concat(df_list) df[\"metric\"].loc[ (df[\"tracer\"] == \"fmz\") & (df[\"metric\"]", "idx] = ratio return(df) df_fn = os.getcwd() + os.sep + 'appian_error.csv' qc_fn =", "fn : temp[\"tracer\"] = [\"rcl\"] * temp.shape[0] elif 'fdg' in fn : temp[\"tracer\"]", "qc = pd.read_csv(qc_fn) print(qc) exit(0) df_mean = df.groupby([\"analysis\",\"tracer\",\"error\",\"errortype\",\"frame\",\"metric\",\"roi\"])[\"%Accuracy\"].mean() df_mean = df_mean.reset_index() df_mean[\"tracer\"].loc[ (df_mean[\"tracer\"]", "(df_mean[\"analysis\"] == \"tka\") ] = \"TKA\" df_mean[\"analysis\"].loc[ (df_mean[\"analysis\"] == \"pvc\") ] = \"PVC\"", "return(df) def plot(df0, df, tracer) : out_fn = tracer + \".png\" plt.clf() sns.stripplot(x=\"roi\",", "in fn : temp[\"tracer\"] = [\"fmz\"] * temp.shape[0] temp[\"frame\"] = [0] * temp.shape[0]", "[temp] df = pd.concat(df_list) df[\"metric\"].loc[ (df[\"tracer\"] == \"fmz\") & (df[\"metric\"] == \"mean\")] =", "[] df_list = [] fn_list += glob(\"raclopride/out_rcl/groupLevelQC/coreg_roc/test_group_qc_auc.csv\") #fn_list += glob(\"scott/out_fdg/groupLevelQC/coreg_roc/test_group_qc_auc.csv\") #fn_list += glob(\"fmz/out_fmz/groupLevelQC/coreg_roc/test_group_qc_auc.csv\")", "#fn_list += glob(\"scott/out_fdg/groupLevelQC/coreg_roc/test_group_qc_auc.csv\") #fn_list += glob(\"fmz/out_fmz/groupLevelQC/coreg_roc/test_group_qc_auc.csv\") for fn in fn_list : temp=pd.read_csv(fn) print", "\"fmz\") ] = \"FMZ\" df_mean[\"analysis\"].loc[ (df_mean[\"analysis\"] == \"tka\") ] = \"TKA\" df_mean[\"analysis\"].loc[ (df_mean[\"analysis\"]", "temp.shape[0] #temp[\"frame\"] = [0] * temp.shape[0] #temp[\"errortype\"] = [args[1]] * temp.shape[0] #temp[\"error\"] =", "\"GM\" df[\"roi\"].loc[ (df[\"tracer\"] == \"fdg\") ] = \"GM\" df.index = range(df.shape[0]) df[\"%Accuracy\"]= [0]", "else : df = pd.read_csv(df_fn) if not os.path.exists(qc_fn) : qc = get_qc_metrics() qc.to_csv(qc_fn)", "in fn : temp[\"tracer\"] = [\"fmz\"] * temp.shape[0] #temp[\"frame\"] = [0] * temp.shape[0]", "os.getcwd() + os.sep + 'appian_error.csv' qc_fn = os.getcwd() + os.sep + 'appian_qc.csv' if", "* temp.shape[0] temp[\"error\"] = [int(args[2])] * temp.shape[0] df_list += [temp] df = pd.concat(df_list)", "name[3]) & (df.task == name[4]) & (df.roi == name[6]) & (df[\"sub\"] == name[5])", "y=\"value\", hue=\"sub\", data=df, jitter=True, alpha=.6, zorder=1) sns.pointplot(x=\"roi\", y=\"value\", data=df0, join=False, palette=\"dark\", markers=\"d\", scale=1.5)", "* temp.shape[0] elif 'fdg' in fn : temp[\"tracer\"] = [\"fdg\"] * temp.shape[0] elif", "[] for fn in fn_list : temp=pd.read_csv(fn) args = fn.split(\"/\")[4].split(\"_\") if 'rcl' in", "fn : temp[\"tracer\"] = [\"fmz\"] * temp.shape[0] temp[\"frame\"] = [0] * temp.shape[0] temp[\"errortype\"]", "\"mean\")] = \"BPnd\" df[\"metric\"].loc[ (df[\"tracer\"] == \"fdg\") & (df[\"metric\"] == \"mean\")] = \"Ki\"", "\"RCL\" df_mean[\"tracer\"].loc[ (df_mean[\"tracer\"] == \"fdg\") ] = \"FDG\" df_mean[\"tracer\"].loc[ (df_mean[\"tracer\"] == \"fmz\") ]", "re import sub from sys import argv, exit from os.path import splitext import", "fn_list=[] fn_list += glob(\"fmz/out_fmz/preproc/_args_**/_angle_**/**/*_3d.csv\") fn_list += glob(\"scott/out_fdg/preproc/_args_**/_angle_**/**/*_3d.csv\") fn_list += glob(\"raclopride/out_rcl/preproc/_args_**/_angle_**/**/*_3d.csv\") df_list = []", ": temp[\"tracer\"] = [\"fmz\"] * temp.shape[0] temp[\"frame\"] = [0] * temp.shape[0] temp[\"errortype\"] =", "elif 'fmz' in fn : temp[\"tracer\"] = [\"fmz\"] * temp.shape[0] temp[\"frame\"] = [0]", "df[\"roi\"].loc[ (df[\"tracer\"] == \"fmz\") ] = \"GM\" df[\"roi\"].loc[ (df[\"tracer\"] == \"fdg\") ] =", "def load(fn): df=pd.read_csv(fn) return(df) def plot(df0, df, tracer) : out_fn = tracer +", "== name[6]) & (df.error == 0) & (df[\"sub\"] == name[5]) ] values =", "import pandas as pd import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt import seaborn", "df.to_csv(df_fn) else : df = pd.read_csv(df_fn) if not os.path.exists(qc_fn) : qc = get_qc_metrics()", "= [int(args[2])] * temp.shape[0] df_list += [temp] df = pd.concat(df_list) return(df) def get_error():", "def get_error(): fn_list=[] fn_list += glob(\"fmz/out_fmz/preproc/_args_**/_angle_**/**/*_3d.csv\") fn_list += glob(\"scott/out_fdg/preproc/_args_**/_angle_**/**/*_3d.csv\") fn_list += glob(\"raclopride/out_rcl/preproc/_args_**/_angle_**/**/*_3d.csv\") df_list", "== name[5]) ] values = df0[\"value\"].mean() zeros = zeros_df[\"value\"].mean() ratio = values /", "\"TKA\" df_mean[\"analysis\"].loc[ (df_mean[\"analysis\"] == \"pvc\") ] = \"PVC\" df_mean[\"analysis\"].loc[ (df_mean[\"analysis\"] == \"pet-coregistration\") ]", "df2.groupby(['sub']) : # print df3 # sns.swarmplot(x=\"roi\", y='groundtruth',data=df3, palette=\"bright\") # break #ax =", "== name[3]) & (df.task == name[4]) & (df.roi == name[6]) & (df[\"sub\"] ==", "& (df.ses == name[3]) & (df.task == name[4]) & (df.roi == name[6]) &", "& idx] = ratio return(df) df_fn = os.getcwd() + os.sep + 'appian_error.csv' qc_fn", "name[6]) & (df[\"sub\"] == name[5]) zeros_df = df.loc[ (df.tracer == name[0]) & (df.analysis", "(df_mean[\"analysis\"] == \"pvc\") ] = \"PVC\" df_mean[\"analysis\"].loc[ (df_mean[\"analysis\"] == \"pet-coregistration\") ] = \"Coregistration\"", ": temp[\"tracer\"] = [\"rcl\"] * temp.shape[0] elif 'fdg' in fn : temp[\"tracer\"] =", ": temp=pd.read_csv(fn) print fn args = fn.split(\"/\")[4].split(\"_\") if 'rcl' in fn : temp[\"tracer\"]", "\"FMZ\" df_mean[\"analysis\"].loc[ (df_mean[\"analysis\"] == \"tka\") ] = \"TKA\" df_mean[\"analysis\"].loc[ (df_mean[\"analysis\"] == \"pvc\") ]", "df3 in df2.groupby(['sub']) : # print df3 # sns.swarmplot(x=\"roi\", y='groundtruth',data=df3, palette=\"bright\") # break", "from re import sub from sys import argv, exit from os.path import splitext", "+= [temp] df = pd.concat(df_list) return(df) def get_error(): fn_list=[] fn_list += glob(\"fmz/out_fmz/preproc/_args_**/_angle_**/**/*_3d.csv\") fn_list", "+= glob(\"raclopride/out_rcl/preproc/_args_**/_angle_**/**/*_3d.csv\") df_list = [] for fn in fn_list : temp=pd.read_csv(fn) args =", "sns.stripplot(x=\"roi\", y=\"value\", hue=\"sub\", data=df, jitter=True, alpha=.6, zorder=1) sns.pointplot(x=\"roi\", y=\"value\", data=df0, join=False, palette=\"dark\", markers=\"d\",", "matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt import seaborn as sns from glob import", "hue=\"analysis\", data=df, palette=\"Set2\", dodge=True) #grid = sns.FacetGrid(df_mean, row=\"tracer\", col=\"analysis\", sharey=True, palette=\"muted\", size=5) #grid", "palette=\"Set2\", dodge=True) #grid = sns.FacetGrid(df_mean, row=\"tracer\", col=\"analysis\", sharey=True, palette=\"muted\", size=5) #grid = grid.map(plt.scatter,", ": out_fn = tracer + \".png\" plt.clf() sns.stripplot(x=\"roi\", y=\"value\", hue=\"sub\", data=df, jitter=True, alpha=.6,", "jitter=True, alpha=.6, zorder=1) sns.pointplot(x=\"roi\", y=\"value\", data=df0, join=False, palette=\"dark\", markers=\"d\", scale=1.5) plt.legend(bbox_to_anchor=(1.05, 1), loc=2,", "\"fmz\") ] = \"GM\" df[\"roi\"].loc[ (df[\"tracer\"] == \"fdg\") ] = \"GM\" df.index =", "from os.path import splitext import numpy as np import os def load(fn): df=pd.read_csv(fn)", "os.getcwd() + os.sep + 'appian_qc.csv' if not os.path.exists(df_fn) : df = get_error() df.to_csv(df_fn)", "sns.FacetGrid(df_mean, row=\"tracer\", col=\"analysis\", sharey=True, palette=\"muted\", size=5) #grid = grid.map(plt.scatter, \"roi\", \"value\") #grid =", "= pd.concat(df_list) df[\"metric\"].loc[ (df[\"tracer\"] == \"fmz\") & (df[\"metric\"] == \"mean\")] = \"BPnd\" df[\"metric\"].loc[", "as plt import seaborn as sns from glob import glob from re import", "& (df.task == name[4]) & (df.roi == name[6]) & (df.error == 0) &", "== name[0]) & (df.analysis == name[1]) & (df.ses == name[3]) & (df.task ==", "+= glob(\"raclopride/out_rcl/groupLevelQC/coreg_roc/test_group_qc_auc.csv\") #fn_list += glob(\"scott/out_fdg/groupLevelQC/coreg_roc/test_group_qc_auc.csv\") #fn_list += glob(\"fmz/out_fmz/groupLevelQC/coreg_roc/test_group_qc_auc.csv\") for fn in fn_list :", "(df.analysis == name[1]) & (df.ses == name[3]) & (df.task == name[4]) & (df.roi", "splitext import numpy as np import os def load(fn): df=pd.read_csv(fn) return(df) def plot(df0,", "fn_list += glob(\"scott/out_fdg/preproc/_args_**/_angle_**/**/*_3d.csv\") fn_list += glob(\"raclopride/out_rcl/preproc/_args_**/_angle_**/**/*_3d.csv\") df_list = [] for fn in fn_list", "len(df[\"tracer\"].unique()) nROI= len(df[\"analysis\"].unique()) i=1 df.rename(index=str, columns={\"roi\":\"ROI\",\"analysis\":\"Analysis\",\"tracer\":\"Radiotracer\"}, inplace=True) sns.factorplot(x=\"error\", y=\"%Accuracy\", col=\"analysis\", hue=\"tracer\", palette=\"muted\",kind=\"swarm\",col_order=['Coregistration','PVC','TKA'], sharey=True,", "#ax = sns.factorplot(x=\"roi\", y=\"diff\", row=\"roi\", hue=\"analysis\", data=df, palette=\"Set2\", dodge=True) #grid = sns.FacetGrid(df_mean, row=\"tracer\",", "= [0] * temp.shape[0] temp[\"errortype\"] = [args[1]] * temp.shape[0] temp[\"error\"] = [int(args[2])] *", "ses=name[3] task=name[4] error=name[2] idx = (df.tracer == name[0]) & (df.analysis == name[1]) &", "as sns from glob import glob from re import sub from sys import", "== name[5]) zeros_df = df.loc[ (df.tracer == name[0]) & (df.analysis == name[1]) &", "df_mean[\"tracer\"].loc[ (df_mean[\"tracer\"] == \"fmz\") ] = \"FMZ\" df_mean[\"analysis\"].loc[ (df_mean[\"analysis\"] == \"tka\") ] =", "df_mean[\"analysis\"].loc[ (df_mean[\"analysis\"] == \"pet-coregistration\") ] = \"Coregistration\" print df_mean plt.clf() plt.figure() nTracer =", "== name[6]) & (df[\"sub\"] == name[5]) zeros_df = df.loc[ (df.tracer == name[0]) &", "plt.clf() plt.figure() nTracer = len(df[\"tracer\"].unique()) nROI= len(df[\"analysis\"].unique()) i=1 df.rename(index=str, columns={\"roi\":\"ROI\",\"analysis\":\"Analysis\",\"tracer\":\"Radiotracer\"}, inplace=True) sns.factorplot(x=\"error\", y=\"%Accuracy\",", "exit from os.path import splitext import numpy as np import os def load(fn):", "glob(\"fmz/out_fmz/groupLevelQC/coreg_roc/test_group_qc_auc.csv\") for fn in fn_list : temp=pd.read_csv(fn) print fn args = fn.split(\"/\")[4].split(\"_\") if", "df_list += [temp] df = pd.concat(df_list) df[\"metric\"].loc[ (df[\"tracer\"] == \"fmz\") & (df[\"metric\"] ==", "palette=\"muted\",kind=\"swarm\",col_order=['Coregistration','PVC','TKA'], sharey=True, data=df_mean) #for name, df3 in df2.groupby(['sub']) : # print df3 #", "df_mean[\"analysis\"].loc[ (df_mean[\"analysis\"] == \"pvc\") ] = \"PVC\" df_mean[\"analysis\"].loc[ (df_mean[\"analysis\"] == \"pet-coregistration\") ] =", "= \"BPnd\" df[\"roi\"].loc[ (df[\"tracer\"] == \"rcl\") ] = \"Putamen\" df[\"roi\"].loc[ (df[\"tracer\"] == \"fmz\")", "matplotlib.use('Agg') import matplotlib.pyplot as plt import seaborn as sns from glob import glob", "[args[1]] * temp.shape[0] #temp[\"error\"] = [int(args[2])] * temp.shape[0] df_list += [temp] df =", "sharey=True, data=df_mean) #for name, df3 in df2.groupby(['sub']) : # print df3 # sns.swarmplot(x=\"roi\",", "(df[\"tracer\"] == \"rcl\") ] = \"Putamen\" df[\"roi\"].loc[ (df[\"tracer\"] == \"fmz\") ] = \"GM\"", "not os.path.exists(df_fn) : df = get_error() df.to_csv(df_fn) else : df = pd.read_csv(df_fn) if", "df.loc[ (df.tracer == name[0]) & (df.analysis == name[1]) & (df.ses == name[3]) &", "markers=\"d\", scale=1.5) plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.) plt.savefig(out_fn) def get_qc_metrics(): fn_list = [] df_list", "[\"fmz\"] * temp.shape[0] #temp[\"frame\"] = [0] * temp.shape[0] #temp[\"errortype\"] = [args[1]] * temp.shape[0]", "sns.factorplot(x=\"roi\", y=\"diff\", row=\"roi\", hue=\"analysis\", data=df, palette=\"Set2\", dodge=True) #grid = sns.FacetGrid(df_mean, row=\"tracer\", col=\"analysis\", sharey=True,", "import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt import seaborn as sns from glob", "nROI= len(df[\"analysis\"].unique()) i=1 df.rename(index=str, columns={\"roi\":\"ROI\",\"analysis\":\"Analysis\",\"tracer\":\"Radiotracer\"}, inplace=True) sns.factorplot(x=\"error\", y=\"%Accuracy\", col=\"analysis\", hue=\"tracer\", palette=\"muted\",kind=\"swarm\",col_order=['Coregistration','PVC','TKA'], sharey=True, data=df_mean)", "os.sep + 'appian_error.csv' qc_fn = os.getcwd() + os.sep + 'appian_qc.csv' if not os.path.exists(df_fn)", "glob(\"fmz/out_fmz/preproc/_args_**/_angle_**/**/*_3d.csv\") fn_list += glob(\"scott/out_fdg/preproc/_args_**/_angle_**/**/*_3d.csv\") fn_list += glob(\"raclopride/out_rcl/preproc/_args_**/_angle_**/**/*_3d.csv\") df_list = [] for fn in", "temp[\"tracer\"] = [\"rcl\"] * temp.shape[0] elif 'fdg' in fn : temp[\"tracer\"] = [\"fdg\"]", "[\"rcl\"] * temp.shape[0] elif 'fdg' in fn : temp[\"tracer\"] = [\"fdg\"] * temp.shape[0]", "name[5]) zeros_df = df.loc[ (df.tracer == name[0]) & (df.analysis == name[1]) & (df.ses", "df_mean = df_mean.reset_index() df_mean[\"tracer\"].loc[ (df_mean[\"tracer\"] == \"rcl\") ] = \"RCL\" df_mean[\"tracer\"].loc[ (df_mean[\"tracer\"] ==", "#temp[\"error\"] = [int(args[2])] * temp.shape[0] df_list += [temp] df = pd.concat(df_list) return(df) def", "in fn : temp[\"tracer\"] = [\"fdg\"] * temp.shape[0] elif 'fmz' in fn :", "\"tka\") ] = \"TKA\" df_mean[\"analysis\"].loc[ (df_mean[\"analysis\"] == \"pvc\") ] = \"PVC\" df_mean[\"analysis\"].loc[ (df_mean[\"analysis\"]", "name[2]) & idx] = ratio return(df) df_fn = os.getcwd() + os.sep + 'appian_error.csv'", "temp.shape[0] df_list += [temp] df = pd.concat(df_list) return(df) def get_error(): fn_list=[] fn_list +=", "== \"fmz\") & (df[\"metric\"] == \"mean\")] = \"BPnd\" df[\"metric\"].loc[ (df[\"tracer\"] == \"fdg\") &", "hue=\"sub\", data=df, jitter=True, alpha=.6, zorder=1) sns.pointplot(x=\"roi\", y=\"value\", data=df0, join=False, palette=\"dark\", markers=\"d\", scale=1.5) plt.legend(bbox_to_anchor=(1.05,", "df_list += [temp] df = pd.concat(df_list) return(df) def get_error(): fn_list=[] fn_list += glob(\"fmz/out_fmz/preproc/_args_**/_angle_**/**/*_3d.csv\")", "if 'rcl' in fn : temp[\"tracer\"] = [\"rcl\"] * temp.shape[0] elif 'fdg' in", "plt import seaborn as sns from glob import glob from re import sub", "sns.factorplot(x=\"error\", y=\"%Accuracy\", col=\"analysis\", hue=\"tracer\", palette=\"muted\",kind=\"swarm\",col_order=['Coregistration','PVC','TKA'], sharey=True, data=df_mean) #for name, df3 in df2.groupby(['sub']) :", "== \"mean\")] = \"BPnd\" df[\"metric\"].loc[ (df[\"tracer\"] == \"fdg\") & (df[\"metric\"] == \"mean\")] =", "] = \"FDG\" df_mean[\"tracer\"].loc[ (df_mean[\"tracer\"] == \"fmz\") ] = \"FMZ\" df_mean[\"analysis\"].loc[ (df_mean[\"analysis\"] ==", "temp[\"tracer\"] = [\"fmz\"] * temp.shape[0] temp[\"frame\"] = [0] * temp.shape[0] temp[\"errortype\"] = [args[1]]", ": df = get_error() df.to_csv(df_fn) else : df = pd.read_csv(df_fn) if not os.path.exists(qc_fn)", "dodge=True) #grid = sns.FacetGrid(df_mean, row=\"tracer\", col=\"analysis\", sharey=True, palette=\"muted\", size=5) #grid = grid.map(plt.scatter, \"roi\",", "= df0[\"value\"].mean() zeros = zeros_df[\"value\"].mean() ratio = values / zeros df[\"%Accuracy\"].loc[(df[\"error\"] == name[2])", "= get_qc_metrics() qc.to_csv(qc_fn) else : qc = pd.read_csv(qc_fn) print(qc) exit(0) df_mean = df.groupby([\"analysis\",\"tracer\",\"error\",\"errortype\",\"frame\",\"metric\",\"roi\"])[\"%Accuracy\"].mean()", "df_mean[\"tracer\"].loc[ (df_mean[\"tracer\"] == \"rcl\") ] = \"RCL\" df_mean[\"tracer\"].loc[ (df_mean[\"tracer\"] == \"fdg\") ] =", "glob(\"raclopride/out_rcl/preproc/_args_**/_angle_**/**/*_3d.csv\") df_list = [] for fn in fn_list : temp=pd.read_csv(fn) args = fn.split(\"/\")[4].split(\"_\")", "(df[\"tracer\"] == \"rcl\") & (df[\"metric\"] == \"mean\")] = \"BPnd\" df[\"roi\"].loc[ (df[\"tracer\"] == \"rcl\")", "fn args = fn.split(\"/\")[4].split(\"_\") if 'rcl' in fn : temp[\"tracer\"] = [\"rcl\"] *", "* temp.shape[0] temp[\"errortype\"] = [args[1]] * temp.shape[0] temp[\"error\"] = [int(args[2])] * temp.shape[0] df_list", "+= [temp] df = pd.concat(df_list) df[\"metric\"].loc[ (df[\"tracer\"] == \"fmz\") & (df[\"metric\"] == \"mean\")]", "== \"mean\")] = \"Ki\" df[\"metric\"].loc[ (df[\"tracer\"] == \"rcl\") & (df[\"metric\"] == \"mean\")] =", "os.sep + 'appian_qc.csv' if not os.path.exists(df_fn) : df = get_error() df.to_csv(df_fn) else :", "matplotlib.pyplot as plt import seaborn as sns from glob import glob from re", "+ 'appian_error.csv' qc_fn = os.getcwd() + os.sep + 'appian_qc.csv' if not os.path.exists(df_fn) :", "name, df3 in df2.groupby(['sub']) : # print df3 # sns.swarmplot(x=\"roi\", y='groundtruth',data=df3, palette=\"bright\") #", "\"GM\" df.index = range(df.shape[0]) df[\"%Accuracy\"]= [0] * df.shape[0] for name, df0 in df.groupby(['tracer','analysis',", "pd.read_csv(qc_fn) print(qc) exit(0) df_mean = df.groupby([\"analysis\",\"tracer\",\"error\",\"errortype\",\"frame\",\"metric\",\"roi\"])[\"%Accuracy\"].mean() df_mean = df_mean.reset_index() df_mean[\"tracer\"].loc[ (df_mean[\"tracer\"] == \"rcl\")", "not os.path.exists(qc_fn) : qc = get_qc_metrics() qc.to_csv(qc_fn) else : qc = pd.read_csv(qc_fn) print(qc)", "* temp.shape[0] df_list += [temp] df = pd.concat(df_list) return(df) def get_error(): fn_list=[] fn_list", "& (df.roi == name[6]) & (df.error == 0) & (df[\"sub\"] == name[5]) ]", "data=df_mean) #for name, df3 in df2.groupby(['sub']) : # print df3 # sns.swarmplot(x=\"roi\", y='groundtruth',data=df3,", "df_mean[\"analysis\"].loc[ (df_mean[\"analysis\"] == \"tka\") ] = \"TKA\" df_mean[\"analysis\"].loc[ (df_mean[\"analysis\"] == \"pvc\") ] =", "= [] fn_list += glob(\"raclopride/out_rcl/groupLevelQC/coreg_roc/test_group_qc_auc.csv\") #fn_list += glob(\"scott/out_fdg/groupLevelQC/coreg_roc/test_group_qc_auc.csv\") #fn_list += glob(\"fmz/out_fmz/groupLevelQC/coreg_roc/test_group_qc_auc.csv\") for fn", "pd.read_csv(df_fn) if not os.path.exists(qc_fn) : qc = get_qc_metrics() qc.to_csv(qc_fn) else : qc =", "row=\"roi\", hue=\"analysis\", data=df, palette=\"Set2\", dodge=True) #grid = sns.FacetGrid(df_mean, row=\"tracer\", col=\"analysis\", sharey=True, palette=\"muted\", size=5)", "join=False, palette=\"dark\", markers=\"d\", scale=1.5) plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.) plt.savefig(out_fn) def get_qc_metrics(): fn_list =", "#temp[\"frame\"] = [0] * temp.shape[0] #temp[\"errortype\"] = [args[1]] * temp.shape[0] #temp[\"error\"] = [int(args[2])]", "= pd.read_csv(df_fn) if not os.path.exists(qc_fn) : qc = get_qc_metrics() qc.to_csv(qc_fn) else : qc", "pd.concat(df_list) df[\"metric\"].loc[ (df[\"tracer\"] == \"fmz\") & (df[\"metric\"] == \"mean\")] = \"BPnd\" df[\"metric\"].loc[ (df[\"tracer\"]", ": temp[\"tracer\"] = [\"fdg\"] * temp.shape[0] elif 'fmz' in fn : temp[\"tracer\"] =", "get_error(): fn_list=[] fn_list += glob(\"fmz/out_fmz/preproc/_args_**/_angle_**/**/*_3d.csv\") fn_list += glob(\"scott/out_fdg/preproc/_args_**/_angle_**/**/*_3d.csv\") fn_list += glob(\"raclopride/out_rcl/preproc/_args_**/_angle_**/**/*_3d.csv\") df_list =", "= \"Coregistration\" print df_mean plt.clf() plt.figure() nTracer = len(df[\"tracer\"].unique()) nROI= len(df[\"analysis\"].unique()) i=1 df.rename(index=str,", "df[\"%Accuracy\"].loc[(df[\"error\"] == name[2]) & idx] = ratio return(df) df_fn = os.getcwd() + os.sep", "y=\"diff\", row=\"roi\", hue=\"analysis\", data=df, palette=\"Set2\", dodge=True) #grid = sns.FacetGrid(df_mean, row=\"tracer\", col=\"analysis\", sharey=True, palette=\"muted\",", "temp.shape[0] temp[\"frame\"] = [0] * temp.shape[0] temp[\"errortype\"] = [args[1]] * temp.shape[0] temp[\"error\"] =", "= \"FDG\" df_mean[\"tracer\"].loc[ (df_mean[\"tracer\"] == \"fmz\") ] = \"FMZ\" df_mean[\"analysis\"].loc[ (df_mean[\"analysis\"] == \"tka\")", "col=\"analysis\", hue=\"tracer\", palette=\"muted\",kind=\"swarm\",col_order=['Coregistration','PVC','TKA'], sharey=True, data=df_mean) #for name, df3 in df2.groupby(['sub']) : # print", "= [] df_list = [] fn_list += glob(\"raclopride/out_rcl/groupLevelQC/coreg_roc/test_group_qc_auc.csv\") #fn_list += glob(\"scott/out_fdg/groupLevelQC/coreg_roc/test_group_qc_auc.csv\") #fn_list +=", "df = get_error() df.to_csv(df_fn) else : df = pd.read_csv(df_fn) if not os.path.exists(qc_fn) :", "print fn args = fn.split(\"/\")[4].split(\"_\") if 'rcl' in fn : temp[\"tracer\"] = [\"rcl\"]", "import splitext import numpy as np import os def load(fn): df=pd.read_csv(fn) return(df) def", "\"rcl\") ] = \"Putamen\" df[\"roi\"].loc[ (df[\"tracer\"] == \"fmz\") ] = \"GM\" df[\"roi\"].loc[ (df[\"tracer\"]", "] = \"RCL\" df_mean[\"tracer\"].loc[ (df_mean[\"tracer\"] == \"fdg\") ] = \"FDG\" df_mean[\"tracer\"].loc[ (df_mean[\"tracer\"] ==", "df[\"roi\"].loc[ (df[\"tracer\"] == \"fdg\") ] = \"GM\" df.index = range(df.shape[0]) df[\"%Accuracy\"]= [0] *", "= os.getcwd() + os.sep + 'appian_error.csv' qc_fn = os.getcwd() + os.sep + 'appian_qc.csv'", "== \"fdg\") ] = \"FDG\" df_mean[\"tracer\"].loc[ (df_mean[\"tracer\"] == \"fmz\") ] = \"FMZ\" df_mean[\"analysis\"].loc[", "import os def load(fn): df=pd.read_csv(fn) return(df) def plot(df0, df, tracer) : out_fn =", "df[\"%Accuracy\"]= [0] * df.shape[0] for name, df0 in df.groupby(['tracer','analysis', 'error', 'ses', 'task', 'sub',", "\"mean\")] = \"Ki\" df[\"metric\"].loc[ (df[\"tracer\"] == \"rcl\") & (df[\"metric\"] == \"mean\")] = \"BPnd\"", "df_list = [] fn_list += glob(\"raclopride/out_rcl/groupLevelQC/coreg_roc/test_group_qc_auc.csv\") #fn_list += glob(\"scott/out_fdg/groupLevelQC/coreg_roc/test_group_qc_auc.csv\") #fn_list += glob(\"fmz/out_fmz/groupLevelQC/coreg_roc/test_group_qc_auc.csv\") for", "== \"rcl\") & (df[\"metric\"] == \"mean\")] = \"BPnd\" df[\"roi\"].loc[ (df[\"tracer\"] == \"rcl\") ]", "(df_mean[\"analysis\"] == \"pet-coregistration\") ] = \"Coregistration\" print df_mean plt.clf() plt.figure() nTracer = len(df[\"tracer\"].unique())", "'task', 'sub', 'roi']) : sub=name[5] ses=name[3] task=name[4] error=name[2] idx = (df.tracer == name[0])", "y=\"%Accuracy\", col=\"analysis\", hue=\"tracer\", palette=\"muted\",kind=\"swarm\",col_order=['Coregistration','PVC','TKA'], sharey=True, data=df_mean) #for name, df3 in df2.groupby(['sub']) : #", "df=pd.read_csv(fn) return(df) def plot(df0, df, tracer) : out_fn = tracer + \".png\" plt.clf()", "* temp.shape[0] #temp[\"error\"] = [int(args[2])] * temp.shape[0] df_list += [temp] df = pd.concat(df_list)", "(df.task == name[4]) & (df.roi == name[6]) & (df[\"sub\"] == name[5]) zeros_df =", "= ratio return(df) df_fn = os.getcwd() + os.sep + 'appian_error.csv' qc_fn = os.getcwd()", "& (df[\"metric\"] == \"mean\")] = \"BPnd\" df[\"metric\"].loc[ (df[\"tracer\"] == \"fdg\") & (df[\"metric\"] ==", "name, df0 in df.groupby(['tracer','analysis', 'error', 'ses', 'task', 'sub', 'roi']) : sub=name[5] ses=name[3] task=name[4]", "df.groupby([\"analysis\",\"tracer\",\"error\",\"errortype\",\"frame\",\"metric\",\"roi\"])[\"%Accuracy\"].mean() df_mean = df_mean.reset_index() df_mean[\"tracer\"].loc[ (df_mean[\"tracer\"] == \"rcl\") ] = \"RCL\" df_mean[\"tracer\"].loc[ (df_mean[\"tracer\"]", "= get_error() df.to_csv(df_fn) else : df = pd.read_csv(df_fn) if not os.path.exists(qc_fn) : qc", "data=df0, join=False, palette=\"dark\", markers=\"d\", scale=1.5) plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.) plt.savefig(out_fn) def get_qc_metrics(): fn_list", "#for name, df3 in df2.groupby(['sub']) : # print df3 # sns.swarmplot(x=\"roi\", y='groundtruth',data=df3, palette=\"bright\")", "import sub from sys import argv, exit from os.path import splitext import numpy", "(df.ses == name[3]) & (df.task == name[4]) & (df.roi == name[6]) & (df[\"sub\"]", "& (df[\"metric\"] == \"mean\")] = \"BPnd\" df[\"roi\"].loc[ (df[\"tracer\"] == \"rcl\") ] = \"Putamen\"", "] = \"Putamen\" df[\"roi\"].loc[ (df[\"tracer\"] == \"fmz\") ] = \"GM\" df[\"roi\"].loc[ (df[\"tracer\"] ==", "args = fn.split(\"/\")[4].split(\"_\") if 'rcl' in fn : temp[\"tracer\"] = [\"rcl\"] * temp.shape[0]", "\"BPnd\" df[\"metric\"].loc[ (df[\"tracer\"] == \"fdg\") & (df[\"metric\"] == \"mean\")] = \"Ki\" df[\"metric\"].loc[ (df[\"tracer\"]", "zorder=1) sns.pointplot(x=\"roi\", y=\"value\", data=df0, join=False, palette=\"dark\", markers=\"d\", scale=1.5) plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.) plt.savefig(out_fn)", "sns.pointplot(x=\"roi\", y=\"value\", data=df0, join=False, palette=\"dark\", markers=\"d\", scale=1.5) plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.) plt.savefig(out_fn) def", "= df.groupby([\"analysis\",\"tracer\",\"error\",\"errortype\",\"frame\",\"metric\",\"roi\"])[\"%Accuracy\"].mean() df_mean = df_mean.reset_index() df_mean[\"tracer\"].loc[ (df_mean[\"tracer\"] == \"rcl\") ] = \"RCL\" df_mean[\"tracer\"].loc[", "\"pvc\") ] = \"PVC\" df_mean[\"analysis\"].loc[ (df_mean[\"analysis\"] == \"pet-coregistration\") ] = \"Coregistration\" print df_mean", "name[4]) & (df.roi == name[6]) & (df[\"sub\"] == name[5]) zeros_df = df.loc[ (df.tracer", "(df[\"tracer\"] == \"fdg\") ] = \"GM\" df.index = range(df.shape[0]) df[\"%Accuracy\"]= [0] * df.shape[0]", "(df.ses == name[3]) & (df.task == name[4]) & (df.roi == name[6]) & (df.error", "sub from sys import argv, exit from os.path import splitext import numpy as", "= [args[1]] * temp.shape[0] temp[\"error\"] = [int(args[2])] * temp.shape[0] df_list += [temp] df", "df0[\"value\"].mean() zeros = zeros_df[\"value\"].mean() ratio = values / zeros df[\"%Accuracy\"].loc[(df[\"error\"] == name[2]) &", "for fn in fn_list : temp=pd.read_csv(fn) args = fn.split(\"/\")[4].split(\"_\") if 'rcl' in fn", "as pd import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt import seaborn as sns", "return(df) def get_error(): fn_list=[] fn_list += glob(\"fmz/out_fmz/preproc/_args_**/_angle_**/**/*_3d.csv\") fn_list += glob(\"scott/out_fdg/preproc/_args_**/_angle_**/**/*_3d.csv\") fn_list += glob(\"raclopride/out_rcl/preproc/_args_**/_angle_**/**/*_3d.csv\")", "'fmz' in fn : temp[\"tracer\"] = [\"fmz\"] * temp.shape[0] temp[\"frame\"] = [0] *", "fn_list : temp=pd.read_csv(fn) print fn args = fn.split(\"/\")[4].split(\"_\") if 'rcl' in fn :", "fn : temp[\"tracer\"] = [\"fmz\"] * temp.shape[0] #temp[\"frame\"] = [0] * temp.shape[0] #temp[\"errortype\"]", "'ses', 'task', 'sub', 'roi']) : sub=name[5] ses=name[3] task=name[4] error=name[2] idx = (df.tracer ==", "== \"rcl\") ] = \"RCL\" df_mean[\"tracer\"].loc[ (df_mean[\"tracer\"] == \"fdg\") ] = \"FDG\" df_mean[\"tracer\"].loc[", "= values / zeros df[\"%Accuracy\"].loc[(df[\"error\"] == name[2]) & idx] = ratio return(df) df_fn", "temp.shape[0] #temp[\"errortype\"] = [args[1]] * temp.shape[0] #temp[\"error\"] = [int(args[2])] * temp.shape[0] df_list +=", "df = pd.concat(df_list) df[\"metric\"].loc[ (df[\"tracer\"] == \"fmz\") & (df[\"metric\"] == \"mean\")] = \"BPnd\"", "= range(df.shape[0]) df[\"%Accuracy\"]= [0] * df.shape[0] for name, df0 in df.groupby(['tracer','analysis', 'error', 'ses',", "temp[\"tracer\"] = [\"fdg\"] * temp.shape[0] elif 'fmz' in fn : temp[\"tracer\"] = [\"fmz\"]", "print(qc) exit(0) df_mean = df.groupby([\"analysis\",\"tracer\",\"error\",\"errortype\",\"frame\",\"metric\",\"roi\"])[\"%Accuracy\"].mean() df_mean = df_mean.reset_index() df_mean[\"tracer\"].loc[ (df_mean[\"tracer\"] == \"rcl\") ]", "= [\"fmz\"] * temp.shape[0] #temp[\"frame\"] = [0] * temp.shape[0] #temp[\"errortype\"] = [args[1]] *", "break #ax = sns.factorplot(x=\"roi\", y=\"diff\", row=\"roi\", hue=\"analysis\", data=df, palette=\"Set2\", dodge=True) #grid = sns.FacetGrid(df_mean,", "in fn_list : temp=pd.read_csv(fn) args = fn.split(\"/\")[4].split(\"_\") if 'rcl' in fn : temp[\"tracer\"]", "'appian_error.csv' qc_fn = os.getcwd() + os.sep + 'appian_qc.csv' if not os.path.exists(df_fn) : df", "zeros = zeros_df[\"value\"].mean() ratio = values / zeros df[\"%Accuracy\"].loc[(df[\"error\"] == name[2]) & idx]", "inplace=True) sns.factorplot(x=\"error\", y=\"%Accuracy\", col=\"analysis\", hue=\"tracer\", palette=\"muted\",kind=\"swarm\",col_order=['Coregistration','PVC','TKA'], sharey=True, data=df_mean) #for name, df3 in df2.groupby(['sub'])", "#temp[\"errortype\"] = [args[1]] * temp.shape[0] #temp[\"error\"] = [int(args[2])] * temp.shape[0] df_list += [temp]", "sub=name[5] ses=name[3] task=name[4] error=name[2] idx = (df.tracer == name[0]) & (df.analysis == name[1])", "plt.savefig(out_fn) def get_qc_metrics(): fn_list = [] df_list = [] fn_list += glob(\"raclopride/out_rcl/groupLevelQC/coreg_roc/test_group_qc_auc.csv\") #fn_list", "in fn_list : temp=pd.read_csv(fn) print fn args = fn.split(\"/\")[4].split(\"_\") if 'rcl' in fn", "+= glob(\"scott/out_fdg/preproc/_args_**/_angle_**/**/*_3d.csv\") fn_list += glob(\"raclopride/out_rcl/preproc/_args_**/_angle_**/**/*_3d.csv\") df_list = [] for fn in fn_list :", ": temp[\"tracer\"] = [\"fmz\"] * temp.shape[0] #temp[\"frame\"] = [0] * temp.shape[0] #temp[\"errortype\"] =", "df_mean[\"tracer\"].loc[ (df_mean[\"tracer\"] == \"fdg\") ] = \"FDG\" df_mean[\"tracer\"].loc[ (df_mean[\"tracer\"] == \"fmz\") ] =", "name[3]) & (df.task == name[4]) & (df.roi == name[6]) & (df.error == 0)", "= \"FMZ\" df_mean[\"analysis\"].loc[ (df_mean[\"analysis\"] == \"tka\") ] = \"TKA\" df_mean[\"analysis\"].loc[ (df_mean[\"analysis\"] == \"pvc\")", "df, tracer) : out_fn = tracer + \".png\" plt.clf() sns.stripplot(x=\"roi\", y=\"value\", hue=\"sub\", data=df,", "else : qc = pd.read_csv(qc_fn) print(qc) exit(0) df_mean = df.groupby([\"analysis\",\"tracer\",\"error\",\"errortype\",\"frame\",\"metric\",\"roi\"])[\"%Accuracy\"].mean() df_mean = df_mean.reset_index()", "for fn in fn_list : temp=pd.read_csv(fn) print fn args = fn.split(\"/\")[4].split(\"_\") if 'rcl'", "df[\"metric\"].loc[ (df[\"tracer\"] == \"rcl\") & (df[\"metric\"] == \"mean\")] = \"BPnd\" df[\"roi\"].loc[ (df[\"tracer\"] ==", "idx = (df.tracer == name[0]) & (df.analysis == name[1]) & (df.ses == name[3])", "[args[1]] * temp.shape[0] temp[\"error\"] = [int(args[2])] * temp.shape[0] df_list += [temp] df =", ": qc = pd.read_csv(qc_fn) print(qc) exit(0) df_mean = df.groupby([\"analysis\",\"tracer\",\"error\",\"errortype\",\"frame\",\"metric\",\"roi\"])[\"%Accuracy\"].mean() df_mean = df_mean.reset_index() df_mean[\"tracer\"].loc[", "== name[4]) & (df.roi == name[6]) & (df.error == 0) & (df[\"sub\"] ==", "glob(\"raclopride/out_rcl/groupLevelQC/coreg_roc/test_group_qc_auc.csv\") #fn_list += glob(\"scott/out_fdg/groupLevelQC/coreg_roc/test_group_qc_auc.csv\") #fn_list += glob(\"fmz/out_fmz/groupLevelQC/coreg_roc/test_group_qc_auc.csv\") for fn in fn_list : temp=pd.read_csv(fn)", "fn_list = [] df_list = [] fn_list += glob(\"raclopride/out_rcl/groupLevelQC/coreg_roc/test_group_qc_auc.csv\") #fn_list += glob(\"scott/out_fdg/groupLevelQC/coreg_roc/test_group_qc_auc.csv\") #fn_list", "+ os.sep + 'appian_qc.csv' if not os.path.exists(df_fn) : df = get_error() df.to_csv(df_fn) else", "pandas as pd import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt import seaborn as", "[0] * temp.shape[0] temp[\"errortype\"] = [args[1]] * temp.shape[0] temp[\"error\"] = [int(args[2])] * temp.shape[0]", "sns.swarmplot(x=\"roi\", y='groundtruth',data=df3, palette=\"bright\") # break #ax = sns.factorplot(x=\"roi\", y=\"diff\", row=\"roi\", hue=\"analysis\", data=df, palette=\"Set2\",", "in fn : temp[\"tracer\"] = [\"rcl\"] * temp.shape[0] elif 'fdg' in fn :", "glob import glob from re import sub from sys import argv, exit from", "'roi']) : sub=name[5] ses=name[3] task=name[4] error=name[2] idx = (df.tracer == name[0]) & (df.analysis", "for name, df0 in df.groupby(['tracer','analysis', 'error', 'ses', 'task', 'sub', 'roi']) : sub=name[5] ses=name[3]", "if not os.path.exists(df_fn) : df = get_error() df.to_csv(df_fn) else : df = pd.read_csv(df_fn)", "df.rename(index=str, columns={\"roi\":\"ROI\",\"analysis\":\"Analysis\",\"tracer\":\"Radiotracer\"}, inplace=True) sns.factorplot(x=\"error\", y=\"%Accuracy\", col=\"analysis\", hue=\"tracer\", palette=\"muted\",kind=\"swarm\",col_order=['Coregistration','PVC','TKA'], sharey=True, data=df_mean) #for name, df3", "= [\"fdg\"] * temp.shape[0] elif 'fmz' in fn : temp[\"tracer\"] = [\"fmz\"] *", "== \"fdg\") ] = \"GM\" df.index = range(df.shape[0]) df[\"%Accuracy\"]= [0] * df.shape[0] for", "df_mean = df.groupby([\"analysis\",\"tracer\",\"error\",\"errortype\",\"frame\",\"metric\",\"roi\"])[\"%Accuracy\"].mean() df_mean = df_mean.reset_index() df_mean[\"tracer\"].loc[ (df_mean[\"tracer\"] == \"rcl\") ] = \"RCL\"", "'fdg' in fn : temp[\"tracer\"] = [\"fdg\"] * temp.shape[0] elif 'fmz' in fn", "df.index = range(df.shape[0]) df[\"%Accuracy\"]= [0] * df.shape[0] for name, df0 in df.groupby(['tracer','analysis', 'error',", "[int(args[2])] * temp.shape[0] df_list += [temp] df = pd.concat(df_list) return(df) def get_error(): fn_list=[]", "+= glob(\"fmz/out_fmz/groupLevelQC/coreg_roc/test_group_qc_auc.csv\") for fn in fn_list : temp=pd.read_csv(fn) print fn args = fn.split(\"/\")[4].split(\"_\")", "'appian_qc.csv' if not os.path.exists(df_fn) : df = get_error() df.to_csv(df_fn) else : df =", "temp.shape[0] elif 'fmz' in fn : temp[\"tracer\"] = [\"fmz\"] * temp.shape[0] #temp[\"frame\"] =", "= pd.read_csv(qc_fn) print(qc) exit(0) df_mean = df.groupby([\"analysis\",\"tracer\",\"error\",\"errortype\",\"frame\",\"metric\",\"roi\"])[\"%Accuracy\"].mean() df_mean = df_mean.reset_index() df_mean[\"tracer\"].loc[ (df_mean[\"tracer\"] ==", "get_error() df.to_csv(df_fn) else : df = pd.read_csv(df_fn) if not os.path.exists(qc_fn) : qc =", "= \"Ki\" df[\"metric\"].loc[ (df[\"tracer\"] == \"rcl\") & (df[\"metric\"] == \"mean\")] = \"BPnd\" df[\"roi\"].loc[", "* temp.shape[0] df_list += [temp] df = pd.concat(df_list) df[\"metric\"].loc[ (df[\"tracer\"] == \"fmz\") &", "name[4]) & (df.roi == name[6]) & (df.error == 0) & (df[\"sub\"] == name[5])", "range(df.shape[0]) df[\"%Accuracy\"]= [0] * df.shape[0] for name, df0 in df.groupby(['tracer','analysis', 'error', 'ses', 'task',", "& (df[\"sub\"] == name[5]) ] values = df0[\"value\"].mean() zeros = zeros_df[\"value\"].mean() ratio =", "(df.error == 0) & (df[\"sub\"] == name[5]) ] values = df0[\"value\"].mean() zeros =", "name[5]) ] values = df0[\"value\"].mean() zeros = zeros_df[\"value\"].mean() ratio = values / zeros", "\"mean\")] = \"BPnd\" df[\"roi\"].loc[ (df[\"tracer\"] == \"rcl\") ] = \"Putamen\" df[\"roi\"].loc[ (df[\"tracer\"] ==", "y='groundtruth',data=df3, palette=\"bright\") # break #ax = sns.factorplot(x=\"roi\", y=\"diff\", row=\"roi\", hue=\"analysis\", data=df, palette=\"Set2\", dodge=True)", "(df[\"tracer\"] == \"fmz\") ] = \"GM\" df[\"roi\"].loc[ (df[\"tracer\"] == \"fdg\") ] = \"GM\"", "nTracer = len(df[\"tracer\"].unique()) nROI= len(df[\"analysis\"].unique()) i=1 df.rename(index=str, columns={\"roi\":\"ROI\",\"analysis\":\"Analysis\",\"tracer\":\"Radiotracer\"}, inplace=True) sns.factorplot(x=\"error\", y=\"%Accuracy\", col=\"analysis\", hue=\"tracer\",", "\"fmz\") & (df[\"metric\"] == \"mean\")] = \"BPnd\" df[\"metric\"].loc[ (df[\"tracer\"] == \"fdg\") & (df[\"metric\"]", "== name[2]) & idx] = ratio return(df) df_fn = os.getcwd() + os.sep +", "= pd.concat(df_list) return(df) def get_error(): fn_list=[] fn_list += glob(\"fmz/out_fmz/preproc/_args_**/_angle_**/**/*_3d.csv\") fn_list += glob(\"scott/out_fdg/preproc/_args_**/_angle_**/**/*_3d.csv\") fn_list", "columns={\"roi\":\"ROI\",\"analysis\":\"Analysis\",\"tracer\":\"Radiotracer\"}, inplace=True) sns.factorplot(x=\"error\", y=\"%Accuracy\", col=\"analysis\", hue=\"tracer\", palette=\"muted\",kind=\"swarm\",col_order=['Coregistration','PVC','TKA'], sharey=True, data=df_mean) #for name, df3 in", "alpha=.6, zorder=1) sns.pointplot(x=\"roi\", y=\"value\", data=df0, join=False, palette=\"dark\", markers=\"d\", scale=1.5) plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)", "'sub', 'roi']) : sub=name[5] ses=name[3] task=name[4] error=name[2] idx = (df.tracer == name[0]) &", "return(df) df_fn = os.getcwd() + os.sep + 'appian_error.csv' qc_fn = os.getcwd() + os.sep", "col=\"analysis\", sharey=True, palette=\"muted\", size=5) #grid = grid.map(plt.scatter, \"roi\", \"value\") #grid = grid.map(plt.scatter, \"groundtruth\",", "name[1]) & (df.ses == name[3]) & (df.task == name[4]) & (df.roi == name[6])", "import numpy as np import os def load(fn): df=pd.read_csv(fn) return(df) def plot(df0, df,", "tracer) : out_fn = tracer + \".png\" plt.clf() sns.stripplot(x=\"roi\", y=\"value\", hue=\"sub\", data=df, jitter=True,", "* temp.shape[0] elif 'fmz' in fn : temp[\"tracer\"] = [\"fmz\"] * temp.shape[0] #temp[\"frame\"]", "df = pd.concat(df_list) return(df) def get_error(): fn_list=[] fn_list += glob(\"fmz/out_fmz/preproc/_args_**/_angle_**/**/*_3d.csv\") fn_list += glob(\"scott/out_fdg/preproc/_args_**/_angle_**/**/*_3d.csv\")", "zeros_df = df.loc[ (df.tracer == name[0]) & (df.analysis == name[1]) & (df.ses ==", "== \"fmz\") ] = \"GM\" df[\"roi\"].loc[ (df[\"tracer\"] == \"fdg\") ] = \"GM\" df.index", "ratio = values / zeros df[\"%Accuracy\"].loc[(df[\"error\"] == name[2]) & idx] = ratio return(df)", "load(fn): df=pd.read_csv(fn) return(df) def plot(df0, df, tracer) : out_fn = tracer + \".png\"", "\"rcl\") ] = \"RCL\" df_mean[\"tracer\"].loc[ (df_mean[\"tracer\"] == \"fdg\") ] = \"FDG\" df_mean[\"tracer\"].loc[ (df_mean[\"tracer\"]", "df3 # sns.swarmplot(x=\"roi\", y='groundtruth',data=df3, palette=\"bright\") # break #ax = sns.factorplot(x=\"roi\", y=\"diff\", row=\"roi\", hue=\"analysis\",", "fn : temp[\"tracer\"] = [\"fdg\"] * temp.shape[0] elif 'fmz' in fn : temp[\"tracer\"]", "# sns.swarmplot(x=\"roi\", y='groundtruth',data=df3, palette=\"bright\") # break #ax = sns.factorplot(x=\"roi\", y=\"diff\", row=\"roi\", hue=\"analysis\", data=df,", "data=df, palette=\"Set2\", dodge=True) #grid = sns.FacetGrid(df_mean, row=\"tracer\", col=\"analysis\", sharey=True, palette=\"muted\", size=5) #grid =", "\"Coregistration\" print df_mean plt.clf() plt.figure() nTracer = len(df[\"tracer\"].unique()) nROI= len(df[\"analysis\"].unique()) i=1 df.rename(index=str, columns={\"roi\":\"ROI\",\"analysis\":\"Analysis\",\"tracer\":\"Radiotracer\"},", "df0 in df.groupby(['tracer','analysis', 'error', 'ses', 'task', 'sub', 'roi']) : sub=name[5] ses=name[3] task=name[4] error=name[2]", "+= glob(\"scott/out_fdg/groupLevelQC/coreg_roc/test_group_qc_auc.csv\") #fn_list += glob(\"fmz/out_fmz/groupLevelQC/coreg_roc/test_group_qc_auc.csv\") for fn in fn_list : temp=pd.read_csv(fn) print fn", "task=name[4] error=name[2] idx = (df.tracer == name[0]) & (df.analysis == name[1]) & (df.ses", "= fn.split(\"/\")[4].split(\"_\") if 'rcl' in fn : temp[\"tracer\"] = [\"rcl\"] * temp.shape[0] elif", "& (df.task == name[4]) & (df.roi == name[6]) & (df[\"sub\"] == name[5]) zeros_df", "\"fdg\") & (df[\"metric\"] == \"mean\")] = \"Ki\" df[\"metric\"].loc[ (df[\"tracer\"] == \"rcl\") & (df[\"metric\"]", "values / zeros df[\"%Accuracy\"].loc[(df[\"error\"] == name[2]) & idx] = ratio return(df) df_fn =", "] = \"TKA\" df_mean[\"analysis\"].loc[ (df_mean[\"analysis\"] == \"pvc\") ] = \"PVC\" df_mean[\"analysis\"].loc[ (df_mean[\"analysis\"] ==", "= df_mean.reset_index() df_mean[\"tracer\"].loc[ (df_mean[\"tracer\"] == \"rcl\") ] = \"RCL\" df_mean[\"tracer\"].loc[ (df_mean[\"tracer\"] == \"fdg\")", "os.path.exists(df_fn) : df = get_error() df.to_csv(df_fn) else : df = pd.read_csv(df_fn) if not", "fn.split(\"/\")[4].split(\"_\") if 'rcl' in fn : temp[\"tracer\"] = [\"rcl\"] * temp.shape[0] elif 'fdg'", "plot(df0, df, tracer) : out_fn = tracer + \".png\" plt.clf() sns.stripplot(x=\"roi\", y=\"value\", hue=\"sub\",", "= (df.tracer == name[0]) & (df.analysis == name[1]) & (df.ses == name[3]) &", "import argv, exit from os.path import splitext import numpy as np import os", "== name[4]) & (df.roi == name[6]) & (df[\"sub\"] == name[5]) zeros_df = df.loc[", "[\"fmz\"] * temp.shape[0] temp[\"frame\"] = [0] * temp.shape[0] temp[\"errortype\"] = [args[1]] * temp.shape[0]", "elif 'fmz' in fn : temp[\"tracer\"] = [\"fmz\"] * temp.shape[0] #temp[\"frame\"] = [0]", "sys import argv, exit from os.path import splitext import numpy as np import", "len(df[\"analysis\"].unique()) i=1 df.rename(index=str, columns={\"roi\":\"ROI\",\"analysis\":\"Analysis\",\"tracer\":\"Radiotracer\"}, inplace=True) sns.factorplot(x=\"error\", y=\"%Accuracy\", col=\"analysis\", hue=\"tracer\", palette=\"muted\",kind=\"swarm\",col_order=['Coregistration','PVC','TKA'], sharey=True, data=df_mean) #for", "def plot(df0, df, tracer) : out_fn = tracer + \".png\" plt.clf() sns.stripplot(x=\"roi\", y=\"value\",", "palette=\"muted\", size=5) #grid = grid.map(plt.scatter, \"roi\", \"value\") #grid = grid.map(plt.scatter, \"groundtruth\", \"value\") plt.savefig(\"appian_error.png\")", "(df[\"metric\"] == \"mean\")] = \"Ki\" df[\"metric\"].loc[ (df[\"tracer\"] == \"rcl\") & (df[\"metric\"] == \"mean\")]", "\"fdg\") ] = \"GM\" df.index = range(df.shape[0]) df[\"%Accuracy\"]= [0] * df.shape[0] for name,", "\"BPnd\" df[\"roi\"].loc[ (df[\"tracer\"] == \"rcl\") ] = \"Putamen\" df[\"roi\"].loc[ (df[\"tracer\"] == \"fmz\") ]", "] = \"PVC\" df_mean[\"analysis\"].loc[ (df_mean[\"analysis\"] == \"pet-coregistration\") ] = \"Coregistration\" print df_mean plt.clf()", "os.path.exists(qc_fn) : qc = get_qc_metrics() qc.to_csv(qc_fn) else : qc = pd.read_csv(qc_fn) print(qc) exit(0)", "(df_mean[\"tracer\"] == \"fdg\") ] = \"FDG\" df_mean[\"tracer\"].loc[ (df_mean[\"tracer\"] == \"fmz\") ] = \"FMZ\"", "= \"TKA\" df_mean[\"analysis\"].loc[ (df_mean[\"analysis\"] == \"pvc\") ] = \"PVC\" df_mean[\"analysis\"].loc[ (df_mean[\"analysis\"] == \"pet-coregistration\")", "(df[\"sub\"] == name[5]) ] values = df0[\"value\"].mean() zeros = zeros_df[\"value\"].mean() ratio = values", "print df3 # sns.swarmplot(x=\"roi\", y='groundtruth',data=df3, palette=\"bright\") # break #ax = sns.factorplot(x=\"roi\", y=\"diff\", row=\"roi\",", "sharey=True, palette=\"muted\", size=5) #grid = grid.map(plt.scatter, \"roi\", \"value\") #grid = grid.map(plt.scatter, \"groundtruth\", \"value\")", "& (df[\"sub\"] == name[5]) zeros_df = df.loc[ (df.tracer == name[0]) & (df.analysis ==", "(df.task == name[4]) & (df.roi == name[6]) & (df.error == 0) & (df[\"sub\"]", "== \"fdg\") & (df[\"metric\"] == \"mean\")] = \"Ki\" df[\"metric\"].loc[ (df[\"tracer\"] == \"rcl\") &", "df.groupby(['tracer','analysis', 'error', 'ses', 'task', 'sub', 'roi']) : sub=name[5] ses=name[3] task=name[4] error=name[2] idx =", "(df.roi == name[6]) & (df.error == 0) & (df[\"sub\"] == name[5]) ] values", "df_mean.reset_index() df_mean[\"tracer\"].loc[ (df_mean[\"tracer\"] == \"rcl\") ] = \"RCL\" df_mean[\"tracer\"].loc[ (df_mean[\"tracer\"] == \"fdg\") ]", "temp.shape[0] elif 'fmz' in fn : temp[\"tracer\"] = [\"fmz\"] * temp.shape[0] temp[\"frame\"] =", "fn in fn_list : temp=pd.read_csv(fn) print fn args = fn.split(\"/\")[4].split(\"_\") if 'rcl' in", ": # print df3 # sns.swarmplot(x=\"roi\", y='groundtruth',data=df3, palette=\"bright\") # break #ax = sns.factorplot(x=\"roi\",", "qc = get_qc_metrics() qc.to_csv(qc_fn) else : qc = pd.read_csv(qc_fn) print(qc) exit(0) df_mean =", "\"Putamen\" df[\"roi\"].loc[ (df[\"tracer\"] == \"fmz\") ] = \"GM\" df[\"roi\"].loc[ (df[\"tracer\"] == \"fdg\") ]", "if not os.path.exists(qc_fn) : qc = get_qc_metrics() qc.to_csv(qc_fn) else : qc = pd.read_csv(qc_fn)", "temp.shape[0] #temp[\"error\"] = [int(args[2])] * temp.shape[0] df_list += [temp] df = pd.concat(df_list) return(df)", "= [\"rcl\"] * temp.shape[0] elif 'fdg' in fn : temp[\"tracer\"] = [\"fdg\"] *", "elif 'fdg' in fn : temp[\"tracer\"] = [\"fdg\"] * temp.shape[0] elif 'fmz' in", "] = \"GM\" df.index = range(df.shape[0]) df[\"%Accuracy\"]= [0] * df.shape[0] for name, df0", "* temp.shape[0] #temp[\"frame\"] = [0] * temp.shape[0] #temp[\"errortype\"] = [args[1]] * temp.shape[0] #temp[\"error\"]", "fn_list : temp=pd.read_csv(fn) args = fn.split(\"/\")[4].split(\"_\") if 'rcl' in fn : temp[\"tracer\"] =", "get_qc_metrics(): fn_list = [] df_list = [] fn_list += glob(\"raclopride/out_rcl/groupLevelQC/coreg_roc/test_group_qc_auc.csv\") #fn_list += glob(\"scott/out_fdg/groupLevelQC/coreg_roc/test_group_qc_auc.csv\")", "\"PVC\" df_mean[\"analysis\"].loc[ (df_mean[\"analysis\"] == \"pet-coregistration\") ] = \"Coregistration\" print df_mean plt.clf() plt.figure() nTracer", "(df[\"sub\"] == name[5]) zeros_df = df.loc[ (df.tracer == name[0]) & (df.analysis == name[1])", "df.shape[0] for name, df0 in df.groupby(['tracer','analysis', 'error', 'ses', 'task', 'sub', 'roi']) : sub=name[5]", "= [args[1]] * temp.shape[0] #temp[\"error\"] = [int(args[2])] * temp.shape[0] df_list += [temp] df", "0) & (df[\"sub\"] == name[5]) ] values = df0[\"value\"].mean() zeros = zeros_df[\"value\"].mean() ratio", "y=\"value\", data=df0, join=False, palette=\"dark\", markers=\"d\", scale=1.5) plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.) plt.savefig(out_fn) def get_qc_metrics():", "numpy as np import os def load(fn): df=pd.read_csv(fn) return(df) def plot(df0, df, tracer)", "== name[3]) & (df.task == name[4]) & (df.roi == name[6]) & (df.error ==", "== \"rcl\") ] = \"Putamen\" df[\"roi\"].loc[ (df[\"tracer\"] == \"fmz\") ] = \"GM\" df[\"roi\"].loc[", "] = \"Coregistration\" print df_mean plt.clf() plt.figure() nTracer = len(df[\"tracer\"].unique()) nROI= len(df[\"analysis\"].unique()) i=1", "i=1 df.rename(index=str, columns={\"roi\":\"ROI\",\"analysis\":\"Analysis\",\"tracer\":\"Radiotracer\"}, inplace=True) sns.factorplot(x=\"error\", y=\"%Accuracy\", col=\"analysis\", hue=\"tracer\", palette=\"muted\",kind=\"swarm\",col_order=['Coregistration','PVC','TKA'], sharey=True, data=df_mean) #for name,", "= os.getcwd() + os.sep + 'appian_qc.csv' if not os.path.exists(df_fn) : df = get_error()", "(df.tracer == name[0]) & (df.analysis == name[1]) & (df.ses == name[3]) & (df.task", "* temp.shape[0] temp[\"frame\"] = [0] * temp.shape[0] temp[\"errortype\"] = [args[1]] * temp.shape[0] temp[\"error\"]", "] = \"GM\" df[\"roi\"].loc[ (df[\"tracer\"] == \"fdg\") ] = \"GM\" df.index = range(df.shape[0])", "== \"pet-coregistration\") ] = \"Coregistration\" print df_mean plt.clf() plt.figure() nTracer = len(df[\"tracer\"].unique()) nROI=", "scale=1.5) plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.) plt.savefig(out_fn) def get_qc_metrics(): fn_list = [] df_list =", "exit(0) df_mean = df.groupby([\"analysis\",\"tracer\",\"error\",\"errortype\",\"frame\",\"metric\",\"roi\"])[\"%Accuracy\"].mean() df_mean = df_mean.reset_index() df_mean[\"tracer\"].loc[ (df_mean[\"tracer\"] == \"rcl\") ] =", "[0] * df.shape[0] for name, df0 in df.groupby(['tracer','analysis', 'error', 'ses', 'task', 'sub', 'roi'])", "== name[1]) & (df.ses == name[3]) & (df.task == name[4]) & (df.roi ==", "[temp] df = pd.concat(df_list) return(df) def get_error(): fn_list=[] fn_list += glob(\"fmz/out_fmz/preproc/_args_**/_angle_**/**/*_3d.csv\") fn_list +=", "from glob import glob from re import sub from sys import argv, exit", "print df_mean plt.clf() plt.figure() nTracer = len(df[\"tracer\"].unique()) nROI= len(df[\"analysis\"].unique()) i=1 df.rename(index=str, columns={\"roi\":\"ROI\",\"analysis\":\"Analysis\",\"tracer\":\"Radiotracer\"}, inplace=True)", "(df[\"tracer\"] == \"fmz\") & (df[\"metric\"] == \"mean\")] = \"BPnd\" df[\"metric\"].loc[ (df[\"tracer\"] == \"fdg\")", "[] fn_list += glob(\"raclopride/out_rcl/groupLevelQC/coreg_roc/test_group_qc_auc.csv\") #fn_list += glob(\"scott/out_fdg/groupLevelQC/coreg_roc/test_group_qc_auc.csv\") #fn_list += glob(\"fmz/out_fmz/groupLevelQC/coreg_roc/test_group_qc_auc.csv\") for fn in", "= df.loc[ (df.tracer == name[0]) & (df.analysis == name[1]) & (df.ses == name[3])", "#fn_list += glob(\"fmz/out_fmz/groupLevelQC/coreg_roc/test_group_qc_auc.csv\") for fn in fn_list : temp=pd.read_csv(fn) print fn args =", "fn in fn_list : temp=pd.read_csv(fn) args = fn.split(\"/\")[4].split(\"_\") if 'rcl' in fn :", "* temp.shape[0] elif 'fmz' in fn : temp[\"tracer\"] = [\"fmz\"] * temp.shape[0] temp[\"frame\"]", "& (df[\"metric\"] == \"mean\")] = \"Ki\" df[\"metric\"].loc[ (df[\"tracer\"] == \"rcl\") & (df[\"metric\"] ==", "df = pd.read_csv(df_fn) if not os.path.exists(qc_fn) : qc = get_qc_metrics() qc.to_csv(qc_fn) else :", "plt.figure() nTracer = len(df[\"tracer\"].unique()) nROI= len(df[\"analysis\"].unique()) i=1 df.rename(index=str, columns={\"roi\":\"ROI\",\"analysis\":\"Analysis\",\"tracer\":\"Radiotracer\"}, inplace=True) sns.factorplot(x=\"error\", y=\"%Accuracy\", col=\"analysis\",", "[0] * temp.shape[0] #temp[\"errortype\"] = [args[1]] * temp.shape[0] #temp[\"error\"] = [int(args[2])] * temp.shape[0]", "np import os def load(fn): df=pd.read_csv(fn) return(df) def plot(df0, df, tracer) : out_fn", "plt.clf() sns.stripplot(x=\"roi\", y=\"value\", hue=\"sub\", data=df, jitter=True, alpha=.6, zorder=1) sns.pointplot(x=\"roi\", y=\"value\", data=df0, join=False, palette=\"dark\",", "(df[\"tracer\"] == \"fdg\") & (df[\"metric\"] == \"mean\")] = \"Ki\" df[\"metric\"].loc[ (df[\"tracer\"] == \"rcl\")", "= \"BPnd\" df[\"metric\"].loc[ (df[\"tracer\"] == \"fdg\") & (df[\"metric\"] == \"mean\")] = \"Ki\" df[\"metric\"].loc[", "= \"GM\" df.index = range(df.shape[0]) df[\"%Accuracy\"]= [0] * df.shape[0] for name, df0 in", "] = \"FMZ\" df_mean[\"analysis\"].loc[ (df_mean[\"analysis\"] == \"tka\") ] = \"TKA\" df_mean[\"analysis\"].loc[ (df_mean[\"analysis\"] ==", "(df[\"metric\"] == \"mean\")] = \"BPnd\" df[\"metric\"].loc[ (df[\"tracer\"] == \"fdg\") & (df[\"metric\"] == \"mean\")]", "in df.groupby(['tracer','analysis', 'error', 'ses', 'task', 'sub', 'roi']) : sub=name[5] ses=name[3] task=name[4] error=name[2] idx", "(df[\"metric\"] == \"mean\")] = \"BPnd\" df[\"roi\"].loc[ (df[\"tracer\"] == \"rcl\") ] = \"Putamen\" df[\"roi\"].loc[", "\"rcl\") & (df[\"metric\"] == \"mean\")] = \"BPnd\" df[\"roi\"].loc[ (df[\"tracer\"] == \"rcl\") ] =", "temp[\"errortype\"] = [args[1]] * temp.shape[0] temp[\"error\"] = [int(args[2])] * temp.shape[0] df_list += [temp]", "pd import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt import seaborn as sns from", "\".png\" plt.clf() sns.stripplot(x=\"roi\", y=\"value\", hue=\"sub\", data=df, jitter=True, alpha=.6, zorder=1) sns.pointplot(x=\"roi\", y=\"value\", data=df0, join=False,", ": qc = get_qc_metrics() qc.to_csv(qc_fn) else : qc = pd.read_csv(qc_fn) print(qc) exit(0) df_mean", "& (df.analysis == name[1]) & (df.ses == name[3]) & (df.task == name[4]) &", "temp[\"tracer\"] = [\"fmz\"] * temp.shape[0] #temp[\"frame\"] = [0] * temp.shape[0] #temp[\"errortype\"] = [args[1]]", "fn_list += glob(\"raclopride/out_rcl/preproc/_args_**/_angle_**/**/*_3d.csv\") df_list = [] for fn in fn_list : temp=pd.read_csv(fn) args", "data=df, jitter=True, alpha=.6, zorder=1) sns.pointplot(x=\"roi\", y=\"value\", data=df0, join=False, palette=\"dark\", markers=\"d\", scale=1.5) plt.legend(bbox_to_anchor=(1.05, 1),", "in df2.groupby(['sub']) : # print df3 # sns.swarmplot(x=\"roi\", y='groundtruth',data=df3, palette=\"bright\") # break #ax", "= tracer + \".png\" plt.clf() sns.stripplot(x=\"roi\", y=\"value\", hue=\"sub\", data=df, jitter=True, alpha=.6, zorder=1) sns.pointplot(x=\"roi\",", "= sns.FacetGrid(df_mean, row=\"tracer\", col=\"analysis\", sharey=True, palette=\"muted\", size=5) #grid = grid.map(plt.scatter, \"roi\", \"value\") #grid", "= \"GM\" df[\"roi\"].loc[ (df[\"tracer\"] == \"fdg\") ] = \"GM\" df.index = range(df.shape[0]) df[\"%Accuracy\"]=", "== \"tka\") ] = \"TKA\" df_mean[\"analysis\"].loc[ (df_mean[\"analysis\"] == \"pvc\") ] = \"PVC\" df_mean[\"analysis\"].loc[", "import matplotlib.pyplot as plt import seaborn as sns from glob import glob from", "name[0]) & (df.analysis == name[1]) & (df.ses == name[3]) & (df.task == name[4])", "as np import os def load(fn): df=pd.read_csv(fn) return(df) def plot(df0, df, tracer) :", "error=name[2] idx = (df.tracer == name[0]) & (df.analysis == name[1]) & (df.ses ==", "= [int(args[2])] * temp.shape[0] df_list += [temp] df = pd.concat(df_list) df[\"metric\"].loc[ (df[\"tracer\"] ==", "out_fn = tracer + \".png\" plt.clf() sns.stripplot(x=\"roi\", y=\"value\", hue=\"sub\", data=df, jitter=True, alpha=.6, zorder=1)", "= [\"fmz\"] * temp.shape[0] temp[\"frame\"] = [0] * temp.shape[0] temp[\"errortype\"] = [args[1]] *", "sns from glob import glob from re import sub from sys import argv,", "fn_list += glob(\"fmz/out_fmz/preproc/_args_**/_angle_**/**/*_3d.csv\") fn_list += glob(\"scott/out_fdg/preproc/_args_**/_angle_**/**/*_3d.csv\") fn_list += glob(\"raclopride/out_rcl/preproc/_args_**/_angle_**/**/*_3d.csv\") df_list = [] for", ": df = pd.read_csv(df_fn) if not os.path.exists(qc_fn) : qc = get_qc_metrics() qc.to_csv(qc_fn) else", "& (df.error == 0) & (df[\"sub\"] == name[5]) ] values = df0[\"value\"].mean() zeros", "'rcl' in fn : temp[\"tracer\"] = [\"rcl\"] * temp.shape[0] elif 'fdg' in fn", "= \"RCL\" df_mean[\"tracer\"].loc[ (df_mean[\"tracer\"] == \"fdg\") ] = \"FDG\" df_mean[\"tracer\"].loc[ (df_mean[\"tracer\"] == \"fmz\")", "\"pet-coregistration\") ] = \"Coregistration\" print df_mean plt.clf() plt.figure() nTracer = len(df[\"tracer\"].unique()) nROI= len(df[\"analysis\"].unique())", "temp[\"error\"] = [int(args[2])] * temp.shape[0] df_list += [temp] df = pd.concat(df_list) df[\"metric\"].loc[ (df[\"tracer\"]", "df[\"roi\"].loc[ (df[\"tracer\"] == \"rcl\") ] = \"Putamen\" df[\"roi\"].loc[ (df[\"tracer\"] == \"fmz\") ] =" ]
[ "c1f5e3 in iso-8859-7 '\\u03a3\\u03b5\\u03c0': 'Sep', # d3e5f0 in iso-8859-7 '\\u039f\\u03ba\\u03c4': 'Oct', # cfeaf4", "= \\ { \\ '\\u0399\\u03b1\\u03bd': 'Jan', # c9e1ed in iso-8859-7 '\\u03a6\\u03b5\\u03b2': 'Feb', #", "iso-8859-7 '\\u039c\\u03b1\\u03ce': 'Mar', # cce1fe in iso-8859-7 '\\u0391\\u03c0\\u03c1': 'Apr', # c1f0f1 in iso-8859-7", "c9e1ed in iso-8859-7 '\\u03a6\\u03b5\\u03b2': 'Feb', # d6e5e2 in iso-8859-7 '\\u039c\\u03ac\\u03ce': 'Mar', # ccdcfe", "iso-8859-7 '\\u0399\\u03bf\\u03cd\\u03bd': 'Jun', # c9effded in iso-8859-7 '\\u0399\\u03bf\\u03bd': 'Jun', # c9efed in iso-8859-7", "} _greek_wdays = \\ { \\ '\\u039a\\u03c5\\u03c1': 'Sun', # caf5f1 in iso-8859-7 '\\u0394\\u03b5\\u03c5':", "'Mon', # c4e5f5 in iso-8859-7 '\\u03a4\\u03c1\\u03b9': 'Tue', # d4f1e9 in iso-8859-7 '\\u03a4\\u03b5\\u03c4': 'Wed',", "# caf5f1 in iso-8859-7 '\\u0394\\u03b5\\u03c5': 'Mon', # c4e5f5 in iso-8859-7 '\\u03a4\\u03c1\\u03b9': 'Tue', #", "in iso-8859-7 '\\u03a4\\u03c1\\u03b9': 'Tue', # d4f1e9 in iso-8859-7 '\\u03a4\\u03b5\\u03c4': 'Wed', # d4e5f4 in", "'\\u03a6\\u03b5\\u03b2': 'Feb', # d6e5e2 in iso-8859-7 '\\u039c\\u03ac\\u03ce': 'Mar', # ccdcfe in iso-8859-7 '\\u039c\\u03b1\\u03ce':", "# ccdce9 in iso-8859-7 '\\u039c\\u03b1\\u03ca': 'May', # cce1fa in iso-8859-7 '\\u039c\\u03b1\\u03b9': 'May', #", "in iso-8859-7 '\\u03a6\\u03b5\\u03b2': 'Feb', # d6e5e2 in iso-8859-7 '\\u039c\\u03ac\\u03ce': 'Mar', # ccdcfe in", "in iso-8859-7 '\\u03a0\\u03b5\\u03bc': 'Thu', # d0e5ec in iso-8859-7 '\\u03a0\\u03b1\\u03c1': 'Fri', # d0e1f1 in", "d6e5e2 in iso-8859-7 '\\u039c\\u03ac\\u03ce': 'Mar', # ccdcfe in iso-8859-7 '\\u039c\\u03b1\\u03ce': 'Mar', # cce1fe", "in iso-8859-7 '\\u039c\\u03b1\\u03ce': 'Mar', # cce1fe in iso-8859-7 '\\u0391\\u03c0\\u03c1': 'Apr', # c1f0f1 in", "'Fri', # d0e1f1 in iso-8859-7 '\\u03a3\\u03b1\\u03b2': 'Sat', # d3e1e2 in iso-8859-7 } _greek_date_format_re", "%(day)s %(month)s %(year)s %(hour)s:%(minute)s:%(second)s %(zonediff)s' % \\ {'wday': wday, 'day': m.group(2), 'month': month,", "<reponame>verhovsky/feedparser<filename>feedparser/datetimes/greek.py from __future__ import absolute_import, unicode_literals import re from .rfc822 import _parse_date_rfc822 #", "'Jan', # c9e1ed in iso-8859-7 '\\u03a6\\u03b5\\u03b2': 'Feb', # d6e5e2 in iso-8859-7 '\\u039c\\u03ac\\u03ce': 'Mar',", "'\\u039a\\u03c5\\u03c1': 'Sun', # caf5f1 in iso-8859-7 '\\u0394\\u03b5\\u03c5': 'Mon', # c4e5f5 in iso-8859-7 '\\u03a4\\u03c1\\u03b9':", "'\\u03a4\\u03c1\\u03b9': 'Tue', # d4f1e9 in iso-8859-7 '\\u03a4\\u03b5\\u03c4': 'Wed', # d4e5f4 in iso-8859-7 '\\u03a0\\u03b5\\u03bc':", "# cdefdd in iso-8859-7 '\\u039d\\u03bf\\u03b5': 'Nov', # cdefe5 in iso-8859-7 '\\u0394\\u03b5\\u03ba': 'Dec', #", "m.group(2), 'month': month, 'year': m.group(4),\\ 'hour': m.group(5), 'minute': m.group(6), 'second': m.group(7),\\ 'zonediff': m.group(8)}", "ccdce9 in iso-8859-7 '\\u039c\\u03b1\\u03ca': 'May', # cce1fa in iso-8859-7 '\\u039c\\u03b1\\u03b9': 'May', # cce1e9", "# d3e5f0 in iso-8859-7 '\\u039f\\u03ba\\u03c4': 'Oct', # cfeaf4 in iso-8859-7 '\\u039d\\u03bf\\u03ad': 'Nov', #", "# cfeaf4 in iso-8859-7 '\\u039d\\u03bf\\u03ad': 'Nov', # cdefdd in iso-8859-7 '\\u039d\\u03bf\\u03b5': 'Nov', #", "date strings _greek_months = \\ { \\ '\\u0399\\u03b1\\u03bd': 'Jan', # c9e1ed in iso-8859-7", "import _parse_date_rfc822 # Unicode strings for Greek date strings _greek_months = \\ {", "'\\u0399\\u03bf\\u03bb': 'Jul', # c9f9eb in iso-8859-7 '\\u0391\\u03cd\\u03b3': 'Aug', # c1fde3 in iso-8859-7 '\\u0391\\u03c5\\u03b3':", "_greek_date_format_re.match(dateString) if not m: return wday = _greek_wdays[m.group(1)] month = _greek_months[m.group(3)] rfc822date =", "{'wday': wday, 'day': m.group(2), 'month': month, 'year': m.group(4),\\ 'hour': m.group(5), 'minute': m.group(6), 'second':", "c9f9eb in iso-8859-7 '\\u0391\\u03cd\\u03b3': 'Aug', # c1fde3 in iso-8859-7 '\\u0391\\u03c5\\u03b3': 'Aug', # c1f5e3", "cdefdd in iso-8859-7 '\\u039d\\u03bf\\u03b5': 'Nov', # cdefe5 in iso-8859-7 '\\u0394\\u03b5\\u03ba': 'Dec', # c4e5ea", "'May', # cce1fa in iso-8859-7 '\\u039c\\u03b1\\u03b9': 'May', # cce1e9 in iso-8859-7 '\\u0399\\u03bf\\u03cd\\u03bd': 'Jun',", "iso-8859-7 } _greek_wdays = \\ { \\ '\\u039a\\u03c5\\u03c1': 'Sun', # caf5f1 in iso-8859-7", "for Greek date strings _greek_months = \\ { \\ '\\u0399\\u03b1\\u03bd': 'Jan', # c9e1ed", "# c9effdeb in iso-8859-7 '\\u0399\\u03bf\\u03bb': 'Jul', # c9f9eb in iso-8859-7 '\\u0391\\u03cd\\u03b3': 'Aug', #", "'''Parse a string according to a Greek 8-bit date format.''' m = _greek_date_format_re.match(dateString)", "%(month)s %(year)s %(hour)s:%(minute)s:%(second)s %(zonediff)s' % \\ {'wday': wday, 'day': m.group(2), 'month': month, 'year':", "Unicode strings for Greek date strings _greek_months = \\ { \\ '\\u0399\\u03b1\\u03bd': 'Jan',", "\\ '\\u0399\\u03b1\\u03bd': 'Jan', # c9e1ed in iso-8859-7 '\\u03a6\\u03b5\\u03b2': 'Feb', # d6e5e2 in iso-8859-7", "iso-8859-7 '\\u0394\\u03b5\\u03ba': 'Dec', # c4e5ea in iso-8859-7 } _greek_wdays = \\ { \\", "'Nov', # cdefdd in iso-8859-7 '\\u039d\\u03bf\\u03b5': 'Nov', # cdefe5 in iso-8859-7 '\\u0394\\u03b5\\u03ba': 'Dec',", "wday = _greek_wdays[m.group(1)] month = _greek_months[m.group(3)] rfc822date = '%(wday)s, %(day)s %(month)s %(year)s %(hour)s:%(minute)s:%(second)s", "iso-8859-7 '\\u03a4\\u03b5\\u03c4': 'Wed', # d4e5f4 in iso-8859-7 '\\u03a0\\u03b5\\u03bc': 'Thu', # d0e5ec in iso-8859-7", "c9effded in iso-8859-7 '\\u0399\\u03bf\\u03bd': 'Jun', # c9efed in iso-8859-7 '\\u0399\\u03bf\\u03cd\\u03bb': 'Jul', # c9effdeb", "# cce1e9 in iso-8859-7 '\\u0399\\u03bf\\u03cd\\u03bd': 'Jun', # c9effded in iso-8859-7 '\\u0399\\u03bf\\u03bd': 'Jun', #", "in iso-8859-7 '\\u03a3\\u03b1\\u03b2': 'Sat', # d3e1e2 in iso-8859-7 } _greek_date_format_re = \\ re.compile(r'([^,]+),\\s+(\\d{2})\\s+([^\\s]+)\\s+(\\d{4})\\s+(\\d{2}):(\\d{2}):(\\d{2})\\s+([^\\s]+)')", "strings _greek_months = \\ { \\ '\\u0399\\u03b1\\u03bd': 'Jan', # c9e1ed in iso-8859-7 '\\u03a6\\u03b5\\u03b2':", "# d0e1f1 in iso-8859-7 '\\u03a3\\u03b1\\u03b2': 'Sat', # d3e1e2 in iso-8859-7 } _greek_date_format_re =", "_greek_date_format_re = \\ re.compile(r'([^,]+),\\s+(\\d{2})\\s+([^\\s]+)\\s+(\\d{4})\\s+(\\d{2}):(\\d{2}):(\\d{2})\\s+([^\\s]+)') def _parse_date_greek(dateString): '''Parse a string according to a Greek", "\\ re.compile(r'([^,]+),\\s+(\\d{2})\\s+([^\\s]+)\\s+(\\d{4})\\s+(\\d{2}):(\\d{2}):(\\d{2})\\s+([^\\s]+)') def _parse_date_greek(dateString): '''Parse a string according to a Greek 8-bit date", "8-bit date format.''' m = _greek_date_format_re.match(dateString) if not m: return wday = _greek_wdays[m.group(1)]", "'\\u03a4\\u03b5\\u03c4': 'Wed', # d4e5f4 in iso-8859-7 '\\u03a0\\u03b5\\u03bc': 'Thu', # d0e5ec in iso-8859-7 '\\u03a0\\u03b1\\u03c1':", "format.''' m = _greek_date_format_re.match(dateString) if not m: return wday = _greek_wdays[m.group(1)] month =", "in iso-8859-7 '\\u03a0\\u03b1\\u03c1': 'Fri', # d0e1f1 in iso-8859-7 '\\u03a3\\u03b1\\u03b2': 'Sat', # d3e1e2 in", "import re from .rfc822 import _parse_date_rfc822 # Unicode strings for Greek date strings", "c4e5ea in iso-8859-7 } _greek_wdays = \\ { \\ '\\u039a\\u03c5\\u03c1': 'Sun', # caf5f1", "_greek_months[m.group(3)] rfc822date = '%(wday)s, %(day)s %(month)s %(year)s %(hour)s:%(minute)s:%(second)s %(zonediff)s' % \\ {'wday': wday,", "if not m: return wday = _greek_wdays[m.group(1)] month = _greek_months[m.group(3)] rfc822date = '%(wday)s,", "cce1fa in iso-8859-7 '\\u039c\\u03b1\\u03b9': 'May', # cce1e9 in iso-8859-7 '\\u0399\\u03bf\\u03cd\\u03bd': 'Jun', # c9effded", "'Sun', # caf5f1 in iso-8859-7 '\\u0394\\u03b5\\u03c5': 'Mon', # c4e5f5 in iso-8859-7 '\\u03a4\\u03c1\\u03b9': 'Tue',", "c4e5f5 in iso-8859-7 '\\u03a4\\u03c1\\u03b9': 'Tue', # d4f1e9 in iso-8859-7 '\\u03a4\\u03b5\\u03c4': 'Wed', # d4e5f4", "'\\u0399\\u03b1\\u03bd': 'Jan', # c9e1ed in iso-8859-7 '\\u03a6\\u03b5\\u03b2': 'Feb', # d6e5e2 in iso-8859-7 '\\u039c\\u03ac\\u03ce':", "'Wed', # d4e5f4 in iso-8859-7 '\\u03a0\\u03b5\\u03bc': 'Thu', # d0e5ec in iso-8859-7 '\\u03a0\\u03b1\\u03c1': 'Fri',", "iso-8859-7 '\\u039f\\u03ba\\u03c4': 'Oct', # cfeaf4 in iso-8859-7 '\\u039d\\u03bf\\u03ad': 'Nov', # cdefdd in iso-8859-7", "'\\u0399\\u03bf\\u03cd\\u03bb': 'Jul', # c9effdeb in iso-8859-7 '\\u0399\\u03bf\\u03bb': 'Jul', # c9f9eb in iso-8859-7 '\\u0391\\u03cd\\u03b3':", "= _greek_wdays[m.group(1)] month = _greek_months[m.group(3)] rfc822date = '%(wday)s, %(day)s %(month)s %(year)s %(hour)s:%(minute)s:%(second)s %(zonediff)s'", "= \\ re.compile(r'([^,]+),\\s+(\\d{2})\\s+([^\\s]+)\\s+(\\d{4})\\s+(\\d{2}):(\\d{2}):(\\d{2})\\s+([^\\s]+)') def _parse_date_greek(dateString): '''Parse a string according to a Greek 8-bit", "m = _greek_date_format_re.match(dateString) if not m: return wday = _greek_wdays[m.group(1)] month = _greek_months[m.group(3)]", "%(zonediff)s' % \\ {'wday': wday, 'day': m.group(2), 'month': month, 'year': m.group(4),\\ 'hour': m.group(5),", "date format.''' m = _greek_date_format_re.match(dateString) if not m: return wday = _greek_wdays[m.group(1)] month", "import absolute_import, unicode_literals import re from .rfc822 import _parse_date_rfc822 # Unicode strings for", "iso-8859-7 '\\u0391\\u03c5\\u03b3': 'Aug', # c1f5e3 in iso-8859-7 '\\u03a3\\u03b5\\u03c0': 'Sep', # d3e5f0 in iso-8859-7", "_greek_wdays[m.group(1)] month = _greek_months[m.group(3)] rfc822date = '%(wday)s, %(day)s %(month)s %(year)s %(hour)s:%(minute)s:%(second)s %(zonediff)s' %", "# d0e5ec in iso-8859-7 '\\u03a0\\u03b1\\u03c1': 'Fri', # d0e1f1 in iso-8859-7 '\\u03a3\\u03b1\\u03b2': 'Sat', #", "\\ '\\u039a\\u03c5\\u03c1': 'Sun', # caf5f1 in iso-8859-7 '\\u0394\\u03b5\\u03c5': 'Mon', # c4e5f5 in iso-8859-7", "ccdcfe in iso-8859-7 '\\u039c\\u03b1\\u03ce': 'Mar', # cce1fe in iso-8859-7 '\\u0391\\u03c0\\u03c1': 'Apr', # c1f0f1", "_parse_date_rfc822 # Unicode strings for Greek date strings _greek_months = \\ { \\", "# cce1fa in iso-8859-7 '\\u039c\\u03b1\\u03b9': 'May', # cce1e9 in iso-8859-7 '\\u0399\\u03bf\\u03cd\\u03bd': 'Jun', #", "# c9e1ed in iso-8859-7 '\\u03a6\\u03b5\\u03b2': 'Feb', # d6e5e2 in iso-8859-7 '\\u039c\\u03ac\\u03ce': 'Mar', #", "iso-8859-7 '\\u039d\\u03bf\\u03ad': 'Nov', # cdefdd in iso-8859-7 '\\u039d\\u03bf\\u03b5': 'Nov', # cdefe5 in iso-8859-7", "rfc822date = '%(wday)s, %(day)s %(month)s %(year)s %(hour)s:%(minute)s:%(second)s %(zonediff)s' % \\ {'wday': wday, 'day':", "'Aug', # c1f5e3 in iso-8859-7 '\\u03a3\\u03b5\\u03c0': 'Sep', # d3e5f0 in iso-8859-7 '\\u039f\\u03ba\\u03c4': 'Oct',", "'Oct', # cfeaf4 in iso-8859-7 '\\u039d\\u03bf\\u03ad': 'Nov', # cdefdd in iso-8859-7 '\\u039d\\u03bf\\u03b5': 'Nov',", "# c9effded in iso-8859-7 '\\u0399\\u03bf\\u03bd': 'Jun', # c9efed in iso-8859-7 '\\u0399\\u03bf\\u03cd\\u03bb': 'Jul', #", "# cdefe5 in iso-8859-7 '\\u0394\\u03b5\\u03ba': 'Dec', # c4e5ea in iso-8859-7 } _greek_wdays =", "# d4f1e9 in iso-8859-7 '\\u03a4\\u03b5\\u03c4': 'Wed', # d4e5f4 in iso-8859-7 '\\u03a0\\u03b5\\u03bc': 'Thu', #", "'Jun', # c9effded in iso-8859-7 '\\u0399\\u03bf\\u03bd': 'Jun', # c9efed in iso-8859-7 '\\u0399\\u03bf\\u03cd\\u03bb': 'Jul',", "'Mar', # cce1fe in iso-8859-7 '\\u0391\\u03c0\\u03c1': 'Apr', # c1f0f1 in iso-8859-7 '\\u039c\\u03ac\\u03b9': 'May',", "'Apr', # c1f0f1 in iso-8859-7 '\\u039c\\u03ac\\u03b9': 'May', # ccdce9 in iso-8859-7 '\\u039c\\u03b1\\u03ca': 'May',", "in iso-8859-7 '\\u039f\\u03ba\\u03c4': 'Oct', # cfeaf4 in iso-8859-7 '\\u039d\\u03bf\\u03ad': 'Nov', # cdefdd in", "'\\u039d\\u03bf\\u03b5': 'Nov', # cdefe5 in iso-8859-7 '\\u0394\\u03b5\\u03ba': 'Dec', # c4e5ea in iso-8859-7 }", "c1f0f1 in iso-8859-7 '\\u039c\\u03ac\\u03b9': 'May', # ccdce9 in iso-8859-7 '\\u039c\\u03b1\\u03ca': 'May', # cce1fa", "'\\u039c\\u03ac\\u03ce': 'Mar', # ccdcfe in iso-8859-7 '\\u039c\\u03b1\\u03ce': 'Mar', # cce1fe in iso-8859-7 '\\u0391\\u03c0\\u03c1':", "iso-8859-7 '\\u039c\\u03b1\\u03ca': 'May', # cce1fa in iso-8859-7 '\\u039c\\u03b1\\u03b9': 'May', # cce1e9 in iso-8859-7", "'\\u039c\\u03b1\\u03ce': 'Mar', # cce1fe in iso-8859-7 '\\u0391\\u03c0\\u03c1': 'Apr', # c1f0f1 in iso-8859-7 '\\u039c\\u03ac\\u03b9':", "in iso-8859-7 '\\u03a4\\u03b5\\u03c4': 'Wed', # d4e5f4 in iso-8859-7 '\\u03a0\\u03b5\\u03bc': 'Thu', # d0e5ec in", "'Sat', # d3e1e2 in iso-8859-7 } _greek_date_format_re = \\ re.compile(r'([^,]+),\\s+(\\d{2})\\s+([^\\s]+)\\s+(\\d{4})\\s+(\\d{2}):(\\d{2}):(\\d{2})\\s+([^\\s]+)') def _parse_date_greek(dateString): '''Parse", "a string according to a Greek 8-bit date format.''' m = _greek_date_format_re.match(dateString) if", "d3e5f0 in iso-8859-7 '\\u039f\\u03ba\\u03c4': 'Oct', # cfeaf4 in iso-8859-7 '\\u039d\\u03bf\\u03ad': 'Nov', # cdefdd", "in iso-8859-7 '\\u039d\\u03bf\\u03ad': 'Nov', # cdefdd in iso-8859-7 '\\u039d\\u03bf\\u03b5': 'Nov', # cdefe5 in", "iso-8859-7 '\\u039c\\u03b1\\u03b9': 'May', # cce1e9 in iso-8859-7 '\\u0399\\u03bf\\u03cd\\u03bd': 'Jun', # c9effded in iso-8859-7", "in iso-8859-7 '\\u0391\\u03cd\\u03b3': 'Aug', # c1fde3 in iso-8859-7 '\\u0391\\u03c5\\u03b3': 'Aug', # c1f5e3 in", "in iso-8859-7 '\\u039c\\u03b1\\u03ca': 'May', # cce1fa in iso-8859-7 '\\u039c\\u03b1\\u03b9': 'May', # cce1e9 in", "'\\u039c\\u03ac\\u03b9': 'May', # ccdce9 in iso-8859-7 '\\u039c\\u03b1\\u03ca': 'May', # cce1fa in iso-8859-7 '\\u039c\\u03b1\\u03b9':", "'\\u03a0\\u03b5\\u03bc': 'Thu', # d0e5ec in iso-8859-7 '\\u03a0\\u03b1\\u03c1': 'Fri', # d0e1f1 in iso-8859-7 '\\u03a3\\u03b1\\u03b2':", "month = _greek_months[m.group(3)] rfc822date = '%(wday)s, %(day)s %(month)s %(year)s %(hour)s:%(minute)s:%(second)s %(zonediff)s' % \\", "d0e1f1 in iso-8859-7 '\\u03a3\\u03b1\\u03b2': 'Sat', # d3e1e2 in iso-8859-7 } _greek_date_format_re = \\", "# d6e5e2 in iso-8859-7 '\\u039c\\u03ac\\u03ce': 'Mar', # ccdcfe in iso-8859-7 '\\u039c\\u03b1\\u03ce': 'Mar', #", "re.compile(r'([^,]+),\\s+(\\d{2})\\s+([^\\s]+)\\s+(\\d{4})\\s+(\\d{2}):(\\d{2}):(\\d{2})\\s+([^\\s]+)') def _parse_date_greek(dateString): '''Parse a string according to a Greek 8-bit date format.'''", "cce1fe in iso-8859-7 '\\u0391\\u03c0\\u03c1': 'Apr', # c1f0f1 in iso-8859-7 '\\u039c\\u03ac\\u03b9': 'May', # ccdce9", "'\\u039d\\u03bf\\u03ad': 'Nov', # cdefdd in iso-8859-7 '\\u039d\\u03bf\\u03b5': 'Nov', # cdefe5 in iso-8859-7 '\\u0394\\u03b5\\u03ba':", "in iso-8859-7 '\\u0399\\u03bf\\u03cd\\u03bd': 'Jun', # c9effded in iso-8859-7 '\\u0399\\u03bf\\u03bd': 'Jun', # c9efed in", "iso-8859-7 '\\u03a0\\u03b5\\u03bc': 'Thu', # d0e5ec in iso-8859-7 '\\u03a0\\u03b1\\u03c1': 'Fri', # d0e1f1 in iso-8859-7", "# d4e5f4 in iso-8859-7 '\\u03a0\\u03b5\\u03bc': 'Thu', # d0e5ec in iso-8859-7 '\\u03a0\\u03b1\\u03c1': 'Fri', #", "in iso-8859-7 '\\u039c\\u03ac\\u03ce': 'Mar', # ccdcfe in iso-8859-7 '\\u039c\\u03b1\\u03ce': 'Mar', # cce1fe in", "% \\ {'wday': wday, 'day': m.group(2), 'month': month, 'year': m.group(4),\\ 'hour': m.group(5), 'minute':", "'Jul', # c9effdeb in iso-8859-7 '\\u0399\\u03bf\\u03bb': 'Jul', # c9f9eb in iso-8859-7 '\\u0391\\u03cd\\u03b3': 'Aug',", "'\\u0399\\u03bf\\u03bd': 'Jun', # c9efed in iso-8859-7 '\\u0399\\u03bf\\u03cd\\u03bb': 'Jul', # c9effdeb in iso-8859-7 '\\u0399\\u03bf\\u03bb':", "= \\ { \\ '\\u039a\\u03c5\\u03c1': 'Sun', # caf5f1 in iso-8859-7 '\\u0394\\u03b5\\u03c5': 'Mon', #", "# ccdcfe in iso-8859-7 '\\u039c\\u03b1\\u03ce': 'Mar', # cce1fe in iso-8859-7 '\\u0391\\u03c0\\u03c1': 'Apr', #", "iso-8859-7 '\\u03a4\\u03c1\\u03b9': 'Tue', # d4f1e9 in iso-8859-7 '\\u03a4\\u03b5\\u03c4': 'Wed', # d4e5f4 in iso-8859-7", "'\\u0391\\u03c5\\u03b3': 'Aug', # c1f5e3 in iso-8859-7 '\\u03a3\\u03b5\\u03c0': 'Sep', # d3e5f0 in iso-8859-7 '\\u039f\\u03ba\\u03c4':", "# cce1fe in iso-8859-7 '\\u0391\\u03c0\\u03c1': 'Apr', # c1f0f1 in iso-8859-7 '\\u039c\\u03ac\\u03b9': 'May', #", "'\\u03a0\\u03b1\\u03c1': 'Fri', # d0e1f1 in iso-8859-7 '\\u03a3\\u03b1\\u03b2': 'Sat', # d3e1e2 in iso-8859-7 }", "d3e1e2 in iso-8859-7 } _greek_date_format_re = \\ re.compile(r'([^,]+),\\s+(\\d{2})\\s+([^\\s]+)\\s+(\\d{4})\\s+(\\d{2}):(\\d{2}):(\\d{2})\\s+([^\\s]+)') def _parse_date_greek(dateString): '''Parse a string", "# Unicode strings for Greek date strings _greek_months = \\ { \\ '\\u0399\\u03b1\\u03bd':", ".rfc822 import _parse_date_rfc822 # Unicode strings for Greek date strings _greek_months = \\", "'month': month, 'year': m.group(4),\\ 'hour': m.group(5), 'minute': m.group(6), 'second': m.group(7),\\ 'zonediff': m.group(8)} return", "# c1f5e3 in iso-8859-7 '\\u03a3\\u03b5\\u03c0': 'Sep', # d3e5f0 in iso-8859-7 '\\u039f\\u03ba\\u03c4': 'Oct', #", "Greek 8-bit date format.''' m = _greek_date_format_re.match(dateString) if not m: return wday =", "iso-8859-7 '\\u0399\\u03bf\\u03cd\\u03bb': 'Jul', # c9effdeb in iso-8859-7 '\\u0399\\u03bf\\u03bb': 'Jul', # c9f9eb in iso-8859-7", "'Mar', # ccdcfe in iso-8859-7 '\\u039c\\u03b1\\u03ce': 'Mar', # cce1fe in iso-8859-7 '\\u0391\\u03c0\\u03c1': 'Apr',", "'\\u0399\\u03bf\\u03cd\\u03bd': 'Jun', # c9effded in iso-8859-7 '\\u0399\\u03bf\\u03bd': 'Jun', # c9efed in iso-8859-7 '\\u0399\\u03bf\\u03cd\\u03bb':", "in iso-8859-7 '\\u0399\\u03bf\\u03cd\\u03bb': 'Jul', # c9effdeb in iso-8859-7 '\\u0399\\u03bf\\u03bb': 'Jul', # c9f9eb in", "'\\u0391\\u03cd\\u03b3': 'Aug', # c1fde3 in iso-8859-7 '\\u0391\\u03c5\\u03b3': 'Aug', # c1f5e3 in iso-8859-7 '\\u03a3\\u03b5\\u03c0':", "iso-8859-7 '\\u0394\\u03b5\\u03c5': 'Mon', # c4e5f5 in iso-8859-7 '\\u03a4\\u03c1\\u03b9': 'Tue', # d4f1e9 in iso-8859-7", "from .rfc822 import _parse_date_rfc822 # Unicode strings for Greek date strings _greek_months =", "according to a Greek 8-bit date format.''' m = _greek_date_format_re.match(dateString) if not m:", "c1fde3 in iso-8859-7 '\\u0391\\u03c5\\u03b3': 'Aug', # c1f5e3 in iso-8859-7 '\\u03a3\\u03b5\\u03c0': 'Sep', # d3e5f0", "iso-8859-7 '\\u03a3\\u03b5\\u03c0': 'Sep', # d3e5f0 in iso-8859-7 '\\u039f\\u03ba\\u03c4': 'Oct', # cfeaf4 in iso-8859-7", "d4e5f4 in iso-8859-7 '\\u03a0\\u03b5\\u03bc': 'Thu', # d0e5ec in iso-8859-7 '\\u03a0\\u03b1\\u03c1': 'Fri', # d0e1f1", "'Tue', # d4f1e9 in iso-8859-7 '\\u03a4\\u03b5\\u03c4': 'Wed', # d4e5f4 in iso-8859-7 '\\u03a0\\u03b5\\u03bc': 'Thu',", "'day': m.group(2), 'month': month, 'year': m.group(4),\\ 'hour': m.group(5), 'minute': m.group(6), 'second': m.group(7),\\ 'zonediff':", "{ \\ '\\u039a\\u03c5\\u03c1': 'Sun', # caf5f1 in iso-8859-7 '\\u0394\\u03b5\\u03c5': 'Mon', # c4e5f5 in", "in iso-8859-7 '\\u0399\\u03bf\\u03bd': 'Jun', # c9efed in iso-8859-7 '\\u0399\\u03bf\\u03cd\\u03bb': 'Jul', # c9effdeb in", "iso-8859-7 } _greek_date_format_re = \\ re.compile(r'([^,]+),\\s+(\\d{2})\\s+([^\\s]+)\\s+(\\d{4})\\s+(\\d{2}):(\\d{2}):(\\d{2})\\s+([^\\s]+)') def _parse_date_greek(dateString): '''Parse a string according to", "Greek date strings _greek_months = \\ { \\ '\\u0399\\u03b1\\u03bd': 'Jan', # c9e1ed in", "%(hour)s:%(minute)s:%(second)s %(zonediff)s' % \\ {'wday': wday, 'day': m.group(2), 'month': month, 'year': m.group(4),\\ 'hour':", "\\ {'wday': wday, 'day': m.group(2), 'month': month, 'year': m.group(4),\\ 'hour': m.group(5), 'minute': m.group(6),", "in iso-8859-7 } _greek_date_format_re = \\ re.compile(r'([^,]+),\\s+(\\d{2})\\s+([^\\s]+)\\s+(\\d{4})\\s+(\\d{2}):(\\d{2}):(\\d{2})\\s+([^\\s]+)') def _parse_date_greek(dateString): '''Parse a string according", "# c9f9eb in iso-8859-7 '\\u0391\\u03cd\\u03b3': 'Aug', # c1fde3 in iso-8859-7 '\\u0391\\u03c5\\u03b3': 'Aug', #", "c9efed in iso-8859-7 '\\u0399\\u03bf\\u03cd\\u03bb': 'Jul', # c9effdeb in iso-8859-7 '\\u0399\\u03bf\\u03bb': 'Jul', # c9f9eb", "in iso-8859-7 '\\u0394\\u03b5\\u03c5': 'Mon', # c4e5f5 in iso-8859-7 '\\u03a4\\u03c1\\u03b9': 'Tue', # d4f1e9 in", "wday, 'day': m.group(2), 'month': month, 'year': m.group(4),\\ 'hour': m.group(5), 'minute': m.group(6), 'second': m.group(7),\\", "\\ { \\ '\\u0399\\u03b1\\u03bd': 'Jan', # c9e1ed in iso-8859-7 '\\u03a6\\u03b5\\u03b2': 'Feb', # d6e5e2", "in iso-8859-7 '\\u039c\\u03b1\\u03b9': 'May', # cce1e9 in iso-8859-7 '\\u0399\\u03bf\\u03cd\\u03bd': 'Jun', # c9effded in", "# c1fde3 in iso-8859-7 '\\u0391\\u03c5\\u03b3': 'Aug', # c1f5e3 in iso-8859-7 '\\u03a3\\u03b5\\u03c0': 'Sep', #", "_parse_date_greek(dateString): '''Parse a string according to a Greek 8-bit date format.''' m =", "'\\u0394\\u03b5\\u03c5': 'Mon', # c4e5f5 in iso-8859-7 '\\u03a4\\u03c1\\u03b9': 'Tue', # d4f1e9 in iso-8859-7 '\\u03a4\\u03b5\\u03c4':", "strings for Greek date strings _greek_months = \\ { \\ '\\u0399\\u03b1\\u03bd': 'Jan', #", "'Dec', # c4e5ea in iso-8859-7 } _greek_wdays = \\ { \\ '\\u039a\\u03c5\\u03c1': 'Sun',", "month, 'year': m.group(4),\\ 'hour': m.group(5), 'minute': m.group(6), 'second': m.group(7),\\ 'zonediff': m.group(8)} return _parse_date_rfc822(rfc822date)", "'\\u0391\\u03c0\\u03c1': 'Apr', # c1f0f1 in iso-8859-7 '\\u039c\\u03ac\\u03b9': 'May', # ccdce9 in iso-8859-7 '\\u039c\\u03b1\\u03ca':", "in iso-8859-7 '\\u039c\\u03ac\\u03b9': 'May', # ccdce9 in iso-8859-7 '\\u039c\\u03b1\\u03ca': 'May', # cce1fa in", "caf5f1 in iso-8859-7 '\\u0394\\u03b5\\u03c5': 'Mon', # c4e5f5 in iso-8859-7 '\\u03a4\\u03c1\\u03b9': 'Tue', # d4f1e9", "in iso-8859-7 '\\u039d\\u03bf\\u03b5': 'Nov', # cdefe5 in iso-8859-7 '\\u0394\\u03b5\\u03ba': 'Dec', # c4e5ea in", "absolute_import, unicode_literals import re from .rfc822 import _parse_date_rfc822 # Unicode strings for Greek", "# c4e5f5 in iso-8859-7 '\\u03a4\\u03c1\\u03b9': 'Tue', # d4f1e9 in iso-8859-7 '\\u03a4\\u03b5\\u03c4': 'Wed', #", "iso-8859-7 '\\u03a6\\u03b5\\u03b2': 'Feb', # d6e5e2 in iso-8859-7 '\\u039c\\u03ac\\u03ce': 'Mar', # ccdcfe in iso-8859-7", "# c1f0f1 in iso-8859-7 '\\u039c\\u03ac\\u03b9': 'May', # ccdce9 in iso-8859-7 '\\u039c\\u03b1\\u03ca': 'May', #", "'\\u039f\\u03ba\\u03c4': 'Oct', # cfeaf4 in iso-8859-7 '\\u039d\\u03bf\\u03ad': 'Nov', # cdefdd in iso-8859-7 '\\u039d\\u03bf\\u03b5':", "in iso-8859-7 '\\u0391\\u03c0\\u03c1': 'Apr', # c1f0f1 in iso-8859-7 '\\u039c\\u03ac\\u03b9': 'May', # ccdce9 in", "= _greek_date_format_re.match(dateString) if not m: return wday = _greek_wdays[m.group(1)] month = _greek_months[m.group(3)] rfc822date", "'Jul', # c9f9eb in iso-8859-7 '\\u0391\\u03cd\\u03b3': 'Aug', # c1fde3 in iso-8859-7 '\\u0391\\u03c5\\u03b3': 'Aug',", "c9effdeb in iso-8859-7 '\\u0399\\u03bf\\u03bb': 'Jul', # c9f9eb in iso-8859-7 '\\u0391\\u03cd\\u03b3': 'Aug', # c1fde3", "'\\u03a3\\u03b5\\u03c0': 'Sep', # d3e5f0 in iso-8859-7 '\\u039f\\u03ba\\u03c4': 'Oct', # cfeaf4 in iso-8859-7 '\\u039d\\u03bf\\u03ad':", "'Thu', # d0e5ec in iso-8859-7 '\\u03a0\\u03b1\\u03c1': 'Fri', # d0e1f1 in iso-8859-7 '\\u03a3\\u03b1\\u03b2': 'Sat',", "# d3e1e2 in iso-8859-7 } _greek_date_format_re = \\ re.compile(r'([^,]+),\\s+(\\d{2})\\s+([^\\s]+)\\s+(\\d{4})\\s+(\\d{2}):(\\d{2}):(\\d{2})\\s+([^\\s]+)') def _parse_date_greek(dateString): '''Parse a", "in iso-8859-7 } _greek_wdays = \\ { \\ '\\u039a\\u03c5\\u03c1': 'Sun', # caf5f1 in", "{ \\ '\\u0399\\u03b1\\u03bd': 'Jan', # c9e1ed in iso-8859-7 '\\u03a6\\u03b5\\u03b2': 'Feb', # d6e5e2 in", "in iso-8859-7 '\\u03a3\\u03b5\\u03c0': 'Sep', # d3e5f0 in iso-8859-7 '\\u039f\\u03ba\\u03c4': 'Oct', # cfeaf4 in", "'\\u03a3\\u03b1\\u03b2': 'Sat', # d3e1e2 in iso-8859-7 } _greek_date_format_re = \\ re.compile(r'([^,]+),\\s+(\\d{2})\\s+([^\\s]+)\\s+(\\d{4})\\s+(\\d{2}):(\\d{2}):(\\d{2})\\s+([^\\s]+)') def _parse_date_greek(dateString):", "to a Greek 8-bit date format.''' m = _greek_date_format_re.match(dateString) if not m: return", "'%(wday)s, %(day)s %(month)s %(year)s %(hour)s:%(minute)s:%(second)s %(zonediff)s' % \\ {'wday': wday, 'day': m.group(2), 'month':", "iso-8859-7 '\\u03a0\\u03b1\\u03c1': 'Fri', # d0e1f1 in iso-8859-7 '\\u03a3\\u03b1\\u03b2': 'Sat', # d3e1e2 in iso-8859-7", "string according to a Greek 8-bit date format.''' m = _greek_date_format_re.match(dateString) if not", "iso-8859-7 '\\u0399\\u03bf\\u03bb': 'Jul', # c9f9eb in iso-8859-7 '\\u0391\\u03cd\\u03b3': 'Aug', # c1fde3 in iso-8859-7", "cce1e9 in iso-8859-7 '\\u0399\\u03bf\\u03cd\\u03bd': 'Jun', # c9effded in iso-8859-7 '\\u0399\\u03bf\\u03bd': 'Jun', # c9efed", "'\\u039c\\u03b1\\u03ca': 'May', # cce1fa in iso-8859-7 '\\u039c\\u03b1\\u03b9': 'May', # cce1e9 in iso-8859-7 '\\u0399\\u03bf\\u03cd\\u03bd':", "= _greek_months[m.group(3)] rfc822date = '%(wday)s, %(day)s %(month)s %(year)s %(hour)s:%(minute)s:%(second)s %(zonediff)s' % \\ {'wday':", "cfeaf4 in iso-8859-7 '\\u039d\\u03bf\\u03ad': 'Nov', # cdefdd in iso-8859-7 '\\u039d\\u03bf\\u03b5': 'Nov', # cdefe5", "__future__ import absolute_import, unicode_literals import re from .rfc822 import _parse_date_rfc822 # Unicode strings", "%(year)s %(hour)s:%(minute)s:%(second)s %(zonediff)s' % \\ {'wday': wday, 'day': m.group(2), 'month': month, 'year': m.group(4),\\", "'Aug', # c1fde3 in iso-8859-7 '\\u0391\\u03c5\\u03b3': 'Aug', # c1f5e3 in iso-8859-7 '\\u03a3\\u03b5\\u03c0': 'Sep',", "cdefe5 in iso-8859-7 '\\u0394\\u03b5\\u03ba': 'Dec', # c4e5ea in iso-8859-7 } _greek_wdays = \\", "'Jun', # c9efed in iso-8859-7 '\\u0399\\u03bf\\u03cd\\u03bb': 'Jul', # c9effdeb in iso-8859-7 '\\u0399\\u03bf\\u03bb': 'Jul',", "iso-8859-7 '\\u03a3\\u03b1\\u03b2': 'Sat', # d3e1e2 in iso-8859-7 } _greek_date_format_re = \\ re.compile(r'([^,]+),\\s+(\\d{2})\\s+([^\\s]+)\\s+(\\d{4})\\s+(\\d{2}):(\\d{2}):(\\d{2})\\s+([^\\s]+)') def", "} _greek_date_format_re = \\ re.compile(r'([^,]+),\\s+(\\d{2})\\s+([^\\s]+)\\s+(\\d{4})\\s+(\\d{2}):(\\d{2}):(\\d{2})\\s+([^\\s]+)') def _parse_date_greek(dateString): '''Parse a string according to a", "unicode_literals import re from .rfc822 import _parse_date_rfc822 # Unicode strings for Greek date", "not m: return wday = _greek_wdays[m.group(1)] month = _greek_months[m.group(3)] rfc822date = '%(wday)s, %(day)s", "'May', # cce1e9 in iso-8859-7 '\\u0399\\u03bf\\u03cd\\u03bd': 'Jun', # c9effded in iso-8859-7 '\\u0399\\u03bf\\u03bd': 'Jun',", "'Feb', # d6e5e2 in iso-8859-7 '\\u039c\\u03ac\\u03ce': 'Mar', # ccdcfe in iso-8859-7 '\\u039c\\u03b1\\u03ce': 'Mar',", "'Sep', # d3e5f0 in iso-8859-7 '\\u039f\\u03ba\\u03c4': 'Oct', # cfeaf4 in iso-8859-7 '\\u039d\\u03bf\\u03ad': 'Nov',", "return wday = _greek_wdays[m.group(1)] month = _greek_months[m.group(3)] rfc822date = '%(wday)s, %(day)s %(month)s %(year)s", "= '%(wday)s, %(day)s %(month)s %(year)s %(hour)s:%(minute)s:%(second)s %(zonediff)s' % \\ {'wday': wday, 'day': m.group(2),", "iso-8859-7 '\\u0391\\u03c0\\u03c1': 'Apr', # c1f0f1 in iso-8859-7 '\\u039c\\u03ac\\u03b9': 'May', # ccdce9 in iso-8859-7", "'May', # ccdce9 in iso-8859-7 '\\u039c\\u03b1\\u03ca': 'May', # cce1fa in iso-8859-7 '\\u039c\\u03b1\\u03b9': 'May',", "iso-8859-7 '\\u039c\\u03ac\\u03ce': 'Mar', # ccdcfe in iso-8859-7 '\\u039c\\u03b1\\u03ce': 'Mar', # cce1fe in iso-8859-7", "in iso-8859-7 '\\u0399\\u03bf\\u03bb': 'Jul', # c9f9eb in iso-8859-7 '\\u0391\\u03cd\\u03b3': 'Aug', # c1fde3 in", "def _parse_date_greek(dateString): '''Parse a string according to a Greek 8-bit date format.''' m", "# c9efed in iso-8859-7 '\\u0399\\u03bf\\u03cd\\u03bb': 'Jul', # c9effdeb in iso-8859-7 '\\u0399\\u03bf\\u03bb': 'Jul', #", "# c4e5ea in iso-8859-7 } _greek_wdays = \\ { \\ '\\u039a\\u03c5\\u03c1': 'Sun', #", "from __future__ import absolute_import, unicode_literals import re from .rfc822 import _parse_date_rfc822 # Unicode", "iso-8859-7 '\\u039c\\u03ac\\u03b9': 'May', # ccdce9 in iso-8859-7 '\\u039c\\u03b1\\u03ca': 'May', # cce1fa in iso-8859-7", "re from .rfc822 import _parse_date_rfc822 # Unicode strings for Greek date strings _greek_months", "d0e5ec in iso-8859-7 '\\u03a0\\u03b1\\u03c1': 'Fri', # d0e1f1 in iso-8859-7 '\\u03a3\\u03b1\\u03b2': 'Sat', # d3e1e2", "_greek_months = \\ { \\ '\\u0399\\u03b1\\u03bd': 'Jan', # c9e1ed in iso-8859-7 '\\u03a6\\u03b5\\u03b2': 'Feb',", "in iso-8859-7 '\\u0391\\u03c5\\u03b3': 'Aug', # c1f5e3 in iso-8859-7 '\\u03a3\\u03b5\\u03c0': 'Sep', # d3e5f0 in", "iso-8859-7 '\\u039d\\u03bf\\u03b5': 'Nov', # cdefe5 in iso-8859-7 '\\u0394\\u03b5\\u03ba': 'Dec', # c4e5ea in iso-8859-7", "'Nov', # cdefe5 in iso-8859-7 '\\u0394\\u03b5\\u03ba': 'Dec', # c4e5ea in iso-8859-7 } _greek_wdays", "_greek_wdays = \\ { \\ '\\u039a\\u03c5\\u03c1': 'Sun', # caf5f1 in iso-8859-7 '\\u0394\\u03b5\\u03c5': 'Mon',", "d4f1e9 in iso-8859-7 '\\u03a4\\u03b5\\u03c4': 'Wed', # d4e5f4 in iso-8859-7 '\\u03a0\\u03b5\\u03bc': 'Thu', # d0e5ec", "'\\u039c\\u03b1\\u03b9': 'May', # cce1e9 in iso-8859-7 '\\u0399\\u03bf\\u03cd\\u03bd': 'Jun', # c9effded in iso-8859-7 '\\u0399\\u03bf\\u03bd':", "iso-8859-7 '\\u0391\\u03cd\\u03b3': 'Aug', # c1fde3 in iso-8859-7 '\\u0391\\u03c5\\u03b3': 'Aug', # c1f5e3 in iso-8859-7", "a Greek 8-bit date format.''' m = _greek_date_format_re.match(dateString) if not m: return wday", "iso-8859-7 '\\u0399\\u03bf\\u03bd': 'Jun', # c9efed in iso-8859-7 '\\u0399\\u03bf\\u03cd\\u03bb': 'Jul', # c9effdeb in iso-8859-7", "\\ { \\ '\\u039a\\u03c5\\u03c1': 'Sun', # caf5f1 in iso-8859-7 '\\u0394\\u03b5\\u03c5': 'Mon', # c4e5f5", "in iso-8859-7 '\\u0394\\u03b5\\u03ba': 'Dec', # c4e5ea in iso-8859-7 } _greek_wdays = \\ {", "'\\u0394\\u03b5\\u03ba': 'Dec', # c4e5ea in iso-8859-7 } _greek_wdays = \\ { \\ '\\u039a\\u03c5\\u03c1':", "m: return wday = _greek_wdays[m.group(1)] month = _greek_months[m.group(3)] rfc822date = '%(wday)s, %(day)s %(month)s" ]
[]
[ "], options={ 'abstract': False, }, ), migrations.AddField( model_name='flight', name='user', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),", "to='core.Artifact')), ('flights', models.ManyToManyField(related_name='demo_projects', to='core.Flight')), ('users', models.ManyToManyField(related_name='demo_projects', to=settings.AUTH_USER_MODEL)), ], options={ 'abstract': False, }, ),", "('deleted', models.BooleanField(default=False)), ('artifacts', models.ManyToManyField(related_name='demo_projects', to='core.Artifact')), ('flights', models.ManyToManyField(related_name='demo_projects', to='core.Flight')), ('users', models.ManyToManyField(related_name='demo_projects', to=settings.AUTH_USER_MODEL)), ], options={", "migrations.CreateModel( name='Artifact', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('type', models.CharField(choices=[(core.models.ArtifactType['ORTHOMOSAIC'], 'Orthomosaic'), (core.models.ArtifactType['SHAPEFILE'], 'Shapefile')],", "primary_key=True, serialize=False)), ('name', models.CharField(max_length=50)), ('description', models.TextField()), ('deleted', models.BooleanField(default=False)), ('artifacts', models.ManyToManyField(related_name='user_projects', to='core.Artifact')), ('flights', models.ManyToManyField(related_name='user_projects',", "migrations.AddField( model_name='user', name='organization', field=models.CharField(blank=True, max_length=20), ), migrations.AddField( model_name='user', name='type', field=models.CharField(choices=[(core.models.UserType['DEMO_USER'], 'DemoUser'), (core.models.UserType['ACTIVE'], 'Active'),", "from django.conf import settings from django.db import migrations, models import django.db.models.deletion import uuid", "field=models.CharField(choices=[(core.models.UserType['DEMO_USER'], 'DemoUser'), (core.models.UserType['ACTIVE'], 'Active'), (core.models.UserType['DELETED'], 'Deleted'), (core.models.UserType['ADMIN'], 'Admin')], default=core.models.UserType['DEMO_USER'], max_length=20), ), migrations.CreateModel( name='UserProject',", "), migrations.CreateModel( name='UserProject', fields=[ ('uuid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)), ('name', models.CharField(max_length=50)), ('description', models.TextField()),", "models.CharField(max_length=50)), ('description', models.TextField()), ('deleted', models.BooleanField(default=False)), ('artifacts', models.ManyToManyField(related_name='demo_projects', to='core.Artifact')), ('flights', models.ManyToManyField(related_name='demo_projects', to='core.Flight')), ('users', models.ManyToManyField(related_name='demo_projects',", "max_length=20), ), migrations.AddField( model_name='user', name='type', field=models.CharField(choices=[(core.models.UserType['DEMO_USER'], 'DemoUser'), (core.models.UserType['ACTIVE'], 'Active'), (core.models.UserType['DELETED'], 'Deleted'), (core.models.UserType['ADMIN'], 'Admin')],", "}, ), migrations.AddField( model_name='flight', name='user', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ), migrations.CreateModel( name='DemoProject', fields=[", "('flights', models.ManyToManyField(related_name='demo_projects', to='core.Flight')), ('users', models.ManyToManyField(related_name='demo_projects', to=settings.AUTH_USER_MODEL)), ], options={ 'abstract': False, }, ), migrations.AddField(", "3.0 on 2019-12-15 02:01 import core.models from django.conf import settings from django.db import", "# Generated by Django 3.0 on 2019-12-15 02:01 import core.models from django.conf import", "models.BooleanField(default=False)), ('state', models.CharField(choices=[(core.models.FlightState['WAITING'], 'Waiting for images'), (core.models.FlightState['PROCESSING'], 'Processing'), (core.models.FlightState['COMPLETE'], 'Complete'), (core.models.FlightState['PAUSED'], 'Paused'), (core.models.FlightState['CANCELED'],", "models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)), ('name', models.CharField(max_length=50)), ('description', models.TextField()), ('deleted', models.BooleanField(default=False)), ('artifacts', models.ManyToManyField(related_name='user_projects', to='core.Artifact')),", "'Orthomosaic'), (core.models.ArtifactType['SHAPEFILE'], 'Shapefile')], max_length=20)), ], ), migrations.CreateModel( name='Flight', fields=[ ('uuid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True,", "migrations.RemoveField( model_name='user', name='location', ), migrations.AddField( model_name='user', name='organization', field=models.CharField(blank=True, max_length=20), ), migrations.AddField( model_name='user', name='type',", "models.ManyToManyField(related_name='user_projects', to='core.Flight')), ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user_projects', to=settings.AUTH_USER_MODEL)), ], options={ 'abstract': False, }, ), migrations.AddField(", "name='organization', field=models.CharField(blank=True, max_length=20), ), migrations.AddField( model_name='user', name='type', field=models.CharField(choices=[(core.models.UserType['DEMO_USER'], 'DemoUser'), (core.models.UserType['ACTIVE'], 'Active'), (core.models.UserType['DELETED'], 'Deleted'),", "2019-12-15 02:01 import core.models from django.conf import settings from django.db import migrations, models", "name='UserProject', fields=[ ('uuid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)), ('name', models.CharField(max_length=50)), ('description', models.TextField()), ('deleted', models.BooleanField(default=False)),", "('name', models.CharField(max_length=50)), ('description', models.TextField()), ('deleted', models.BooleanField(default=False)), ('artifacts', models.ManyToManyField(related_name='user_projects', to='core.Artifact')), ('flights', models.ManyToManyField(related_name='user_projects', to='core.Flight')), ('user',", "('deleted', models.BooleanField(default=False)), ('artifacts', models.ManyToManyField(related_name='user_projects', to='core.Artifact')), ('flights', models.ManyToManyField(related_name='user_projects', to='core.Flight')), ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user_projects', to=settings.AUTH_USER_MODEL)), ],", "(core.models.Camera['RGB'], 'RGB')], max_length=10)), ('multispectral_processing', models.BooleanField(default=False)), ('annotations', models.TextField()), ('deleted', models.BooleanField(default=False)), ('state', models.CharField(choices=[(core.models.FlightState['WAITING'], 'Waiting for", "('name', models.CharField(max_length=50)), ('description', models.TextField()), ('deleted', models.BooleanField(default=False)), ('artifacts', models.ManyToManyField(related_name='demo_projects', to='core.Artifact')), ('flights', models.ManyToManyField(related_name='demo_projects', to='core.Flight')), ('users',", "field=models.CharField(blank=True, max_length=20), ), migrations.AddField( model_name='user', name='type', field=models.CharField(choices=[(core.models.UserType['DEMO_USER'], 'DemoUser'), (core.models.UserType['ACTIVE'], 'Active'), (core.models.UserType['DELETED'], 'Deleted'), (core.models.UserType['ADMIN'],", "], ), migrations.CreateModel( name='Flight', fields=[ ('uuid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)), ('name', models.CharField(max_length=50)), ('date',", "), migrations.CreateModel( name='Flight', fields=[ ('uuid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)), ('name', models.CharField(max_length=50)), ('date', models.DateField(auto_now_add=True)),", "('state', models.CharField(choices=[(core.models.FlightState['WAITING'], 'Waiting for images'), (core.models.FlightState['PROCESSING'], 'Processing'), (core.models.FlightState['COMPLETE'], 'Complete'), (core.models.FlightState['PAUSED'], 'Paused'), (core.models.FlightState['CANCELED'], 'Canceled')],", "models.CharField(choices=[(core.models.FlightState['WAITING'], 'Waiting for images'), (core.models.FlightState['PROCESSING'], 'Processing'), (core.models.FlightState['COMPLETE'], 'Complete'), (core.models.FlightState['PAUSED'], 'Paused'), (core.models.FlightState['CANCELED'], 'Canceled')], max_length=10)),", "Generated by Django 3.0 on 2019-12-15 02:01 import core.models from django.conf import settings", "False, }, ), migrations.AddField( model_name='flight', name='user', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ), migrations.CreateModel( name='DemoProject',", "to='core.Flight')), ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user_projects', to=settings.AUTH_USER_MODEL)), ], options={ 'abstract': False, }, ), migrations.AddField( model_name='flight',", "= [ migrations.CreateModel( name='Artifact', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('type', models.CharField(choices=[(core.models.ArtifactType['ORTHOMOSAIC'], 'Orthomosaic'),", "migrations.CreateModel( name='Flight', fields=[ ('uuid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)), ('name', models.CharField(max_length=50)), ('date', models.DateField(auto_now_add=True)), ('camera',", "migrations.CreateModel( name='DemoProject', fields=[ ('uuid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)), ('name', models.CharField(max_length=50)), ('description', models.TextField()), ('deleted',", "to='core.Flight')), ('users', models.ManyToManyField(related_name='demo_projects', to=settings.AUTH_USER_MODEL)), ], options={ 'abstract': False, }, ), migrations.AddField( model_name='artifact', name='flight',", "('annotations', models.TextField()), ('deleted', models.BooleanField(default=False)), ('state', models.CharField(choices=[(core.models.FlightState['WAITING'], 'Waiting for images'), (core.models.FlightState['PROCESSING'], 'Processing'), (core.models.FlightState['COMPLETE'], 'Complete'),", "models.CharField(choices=[(core.models.ArtifactType['ORTHOMOSAIC'], 'Orthomosaic'), (core.models.ArtifactType['SHAPEFILE'], 'Shapefile')], max_length=20)), ], ), migrations.CreateModel( name='Flight', fields=[ ('uuid', models.UUIDField(default=uuid.uuid4, editable=False,", "models.TextField()), ('deleted', models.BooleanField(default=False)), ('state', models.CharField(choices=[(core.models.FlightState['WAITING'], 'Waiting for images'), (core.models.FlightState['PROCESSING'], 'Processing'), (core.models.FlightState['COMPLETE'], 'Complete'), (core.models.FlightState['PAUSED'],", "('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user_projects', to=settings.AUTH_USER_MODEL)), ], options={ 'abstract': False, }, ), migrations.AddField( model_name='flight', name='user',", "('deleted', models.BooleanField(default=False)), ('state', models.CharField(choices=[(core.models.FlightState['WAITING'], 'Waiting for images'), (core.models.FlightState['PROCESSING'], 'Processing'), (core.models.FlightState['COMPLETE'], 'Complete'), (core.models.FlightState['PAUSED'], 'Paused'),", "editable=False, primary_key=True, serialize=False)), ('name', models.CharField(max_length=50)), ('description', models.TextField()), ('deleted', models.BooleanField(default=False)), ('artifacts', models.ManyToManyField(related_name='user_projects', to='core.Artifact')), ('flights',", "import django.db.models.deletion import uuid class Migration(migrations.Migration): dependencies = [ ('core', '0001_initial'), ] operations", "'Micasense RedEdge'), (core.models.Camera['RGB'], 'RGB')], max_length=10)), ('multispectral_processing', models.BooleanField(default=False)), ('annotations', models.TextField()), ('deleted', models.BooleanField(default=False)), ('state', models.CharField(choices=[(core.models.FlightState['WAITING'],", "models.CharField(max_length=50)), ('date', models.DateField(auto_now_add=True)), ('camera', models.CharField(choices=[(core.models.Camera['REDEDGE'], 'Micasense RedEdge'), (core.models.Camera['RGB'], 'RGB')], max_length=10)), ('multispectral_processing', models.BooleanField(default=False)), ('annotations',", "models.CharField(choices=[(core.models.Camera['REDEDGE'], 'Micasense RedEdge'), (core.models.Camera['RGB'], 'RGB')], max_length=10)), ('multispectral_processing', models.BooleanField(default=False)), ('annotations', models.TextField()), ('deleted', models.BooleanField(default=False)), ('state',", "uuid class Migration(migrations.Migration): dependencies = [ ('core', '0001_initial'), ] operations = [ migrations.CreateModel(", "('camera', models.CharField(choices=[(core.models.Camera['REDEDGE'], 'Micasense RedEdge'), (core.models.Camera['RGB'], 'RGB')], max_length=10)), ('multispectral_processing', models.BooleanField(default=False)), ('annotations', models.TextField()), ('deleted', models.BooleanField(default=False)),", "models.TextField()), ('deleted', models.BooleanField(default=False)), ('artifacts', models.ManyToManyField(related_name='user_projects', to='core.Artifact')), ('flights', models.ManyToManyField(related_name='user_projects', to='core.Flight')), ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user_projects', to=settings.AUTH_USER_MODEL)),", "('artifacts', models.ManyToManyField(related_name='user_projects', to='core.Artifact')), ('flights', models.ManyToManyField(related_name='user_projects', to='core.Flight')), ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user_projects', to=settings.AUTH_USER_MODEL)), ], options={ 'abstract':", "models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)), ('name', models.CharField(max_length=50)), ('description', models.TextField()), ('deleted', models.BooleanField(default=False)), ('artifacts', models.ManyToManyField(related_name='demo_projects', to='core.Artifact')),", "migrations.AddField( model_name='flight', name='user', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ), migrations.CreateModel( name='DemoProject', fields=[ ('uuid', models.UUIDField(default=uuid.uuid4,", "editable=False, primary_key=True, serialize=False)), ('name', models.CharField(max_length=50)), ('date', models.DateField(auto_now_add=True)), ('camera', models.CharField(choices=[(core.models.Camera['REDEDGE'], 'Micasense RedEdge'), (core.models.Camera['RGB'], 'RGB')],", "name='birth_date', ), migrations.RemoveField( model_name='user', name='location', ), migrations.AddField( model_name='user', name='organization', field=models.CharField(blank=True, max_length=20), ), migrations.AddField(", "models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user_projects', to=settings.AUTH_USER_MODEL)), ], options={ 'abstract': False, }, ), migrations.AddField( model_name='flight', name='user', field=models.ForeignKey(blank=True,", "'Shapefile')], max_length=20)), ], ), migrations.CreateModel( name='Flight', fields=[ ('uuid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)), ('name',", "by Django 3.0 on 2019-12-15 02:01 import core.models from django.conf import settings from", "migrations, models import django.db.models.deletion import uuid class Migration(migrations.Migration): dependencies = [ ('core', '0001_initial'),", "primary_key=True, serialize=False)), ('name', models.CharField(max_length=50)), ('date', models.DateField(auto_now_add=True)), ('camera', models.CharField(choices=[(core.models.Camera['REDEDGE'], 'Micasense RedEdge'), (core.models.Camera['RGB'], 'RGB')], max_length=10)),", "'Complete'), (core.models.FlightState['PAUSED'], 'Paused'), (core.models.FlightState['CANCELED'], 'Canceled')], max_length=10)), ], ), migrations.RemoveField( model_name='user', name='bio', ), migrations.RemoveField(", "('artifacts', models.ManyToManyField(related_name='demo_projects', to='core.Artifact')), ('flights', models.ManyToManyField(related_name='demo_projects', to='core.Flight')), ('users', models.ManyToManyField(related_name='demo_projects', to=settings.AUTH_USER_MODEL)), ], options={ 'abstract': False,", "('users', models.ManyToManyField(related_name='demo_projects', to=settings.AUTH_USER_MODEL)), ], options={ 'abstract': False, }, ), migrations.AddField( model_name='artifact', name='flight', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE,", "02:01 import core.models from django.conf import settings from django.db import migrations, models import", "'Admin')], default=core.models.UserType['DEMO_USER'], max_length=20), ), migrations.CreateModel( name='UserProject', fields=[ ('uuid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)), ('name',", "class Migration(migrations.Migration): dependencies = [ ('core', '0001_initial'), ] operations = [ migrations.CreateModel( name='Artifact',", "django.conf import settings from django.db import migrations, models import django.db.models.deletion import uuid class", "models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('type', models.CharField(choices=[(core.models.ArtifactType['ORTHOMOSAIC'], 'Orthomosaic'), (core.models.ArtifactType['SHAPEFILE'], 'Shapefile')], max_length=20)), ], ), migrations.CreateModel(", "serialize=False)), ('name', models.CharField(max_length=50)), ('description', models.TextField()), ('deleted', models.BooleanField(default=False)), ('artifacts', models.ManyToManyField(related_name='user_projects', to='core.Artifact')), ('flights', models.ManyToManyField(related_name='user_projects', to='core.Flight')),", "'Deleted'), (core.models.UserType['ADMIN'], 'Admin')], default=core.models.UserType['DEMO_USER'], max_length=20), ), migrations.CreateModel( name='UserProject', fields=[ ('uuid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True,", "= [ ('core', '0001_initial'), ] operations = [ migrations.CreateModel( name='Artifact', fields=[ ('id', models.AutoField(auto_created=True,", "import settings from django.db import migrations, models import django.db.models.deletion import uuid class Migration(migrations.Migration):", "name='Artifact', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('type', models.CharField(choices=[(core.models.ArtifactType['ORTHOMOSAIC'], 'Orthomosaic'), (core.models.ArtifactType['SHAPEFILE'], 'Shapefile')], max_length=20)),", "field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ), migrations.CreateModel( name='DemoProject', fields=[ ('uuid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),", "), migrations.AddField( model_name='user', name='organization', field=models.CharField(blank=True, max_length=20), ), migrations.AddField( model_name='user', name='type', field=models.CharField(choices=[(core.models.UserType['DEMO_USER'], 'DemoUser'), (core.models.UserType['ACTIVE'],", "models.ManyToManyField(related_name='demo_projects', to='core.Flight')), ('users', models.ManyToManyField(related_name='demo_projects', to=settings.AUTH_USER_MODEL)), ], options={ 'abstract': False, }, ), migrations.AddField( model_name='artifact',", "serialize=False, verbose_name='ID')), ('type', models.CharField(choices=[(core.models.ArtifactType['ORTHOMOSAIC'], 'Orthomosaic'), (core.models.ArtifactType['SHAPEFILE'], 'Shapefile')], max_length=20)), ], ), migrations.CreateModel( name='Flight', fields=[", "(core.models.UserType['DELETED'], 'Deleted'), (core.models.UserType['ADMIN'], 'Admin')], default=core.models.UserType['DEMO_USER'], max_length=20), ), migrations.CreateModel( name='UserProject', fields=[ ('uuid', models.UUIDField(default=uuid.uuid4, editable=False,", "max_length=20), ), migrations.CreateModel( name='UserProject', fields=[ ('uuid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)), ('name', models.CharField(max_length=50)), ('description',", "('multispectral_processing', models.BooleanField(default=False)), ('annotations', models.TextField()), ('deleted', models.BooleanField(default=False)), ('state', models.CharField(choices=[(core.models.FlightState['WAITING'], 'Waiting for images'), (core.models.FlightState['PROCESSING'], 'Processing'),", "migrations.AddField( model_name='user', name='type', field=models.CharField(choices=[(core.models.UserType['DEMO_USER'], 'DemoUser'), (core.models.UserType['ACTIVE'], 'Active'), (core.models.UserType['DELETED'], 'Deleted'), (core.models.UserType['ADMIN'], 'Admin')], default=core.models.UserType['DEMO_USER'], max_length=20),", "migrations.CreateModel( name='UserProject', fields=[ ('uuid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)), ('name', models.CharField(max_length=50)), ('description', models.TextField()), ('deleted',", "to='core.Artifact')), ('flights', models.ManyToManyField(related_name='user_projects', to='core.Flight')), ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user_projects', to=settings.AUTH_USER_MODEL)), ], options={ 'abstract': False, },", "'Active'), (core.models.UserType['DELETED'], 'Deleted'), (core.models.UserType['ADMIN'], 'Admin')], default=core.models.UserType['DEMO_USER'], max_length=20), ), migrations.CreateModel( name='UserProject', fields=[ ('uuid', models.UUIDField(default=uuid.uuid4,", "(core.models.FlightState['COMPLETE'], 'Complete'), (core.models.FlightState['PAUSED'], 'Paused'), (core.models.FlightState['CANCELED'], 'Canceled')], max_length=10)), ], ), migrations.RemoveField( model_name='user', name='bio', ),", "], options={ 'abstract': False, }, ), migrations.AddField( model_name='artifact', name='flight', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='artifacts', to='core.Flight'), ),", "import migrations, models import django.db.models.deletion import uuid class Migration(migrations.Migration): dependencies = [ ('core',", "related_name='user_projects', to=settings.AUTH_USER_MODEL)), ], options={ 'abstract': False, }, ), migrations.AddField( model_name='flight', name='user', field=models.ForeignKey(blank=True, null=True,", "max_length=10)), ('multispectral_processing', models.BooleanField(default=False)), ('annotations', models.TextField()), ('deleted', models.BooleanField(default=False)), ('state', models.CharField(choices=[(core.models.FlightState['WAITING'], 'Waiting for images'), (core.models.FlightState['PROCESSING'],", "[ migrations.CreateModel( name='Artifact', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('type', models.CharField(choices=[(core.models.ArtifactType['ORTHOMOSAIC'], 'Orthomosaic'), (core.models.ArtifactType['SHAPEFILE'],", "options={ 'abstract': False, }, ), migrations.AddField( model_name='flight', name='user', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ),", "'Canceled')], max_length=10)), ], ), migrations.RemoveField( model_name='user', name='bio', ), migrations.RemoveField( model_name='user', name='birth_date', ), migrations.RemoveField(", "from django.db import migrations, models import django.db.models.deletion import uuid class Migration(migrations.Migration): dependencies =", "operations = [ migrations.CreateModel( name='Artifact', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('type', models.CharField(choices=[(core.models.ArtifactType['ORTHOMOSAIC'],", "'Paused'), (core.models.FlightState['CANCELED'], 'Canceled')], max_length=10)), ], ), migrations.RemoveField( model_name='user', name='bio', ), migrations.RemoveField( model_name='user', name='birth_date',", "), migrations.AddField( model_name='user', name='type', field=models.CharField(choices=[(core.models.UserType['DEMO_USER'], 'DemoUser'), (core.models.UserType['ACTIVE'], 'Active'), (core.models.UserType['DELETED'], 'Deleted'), (core.models.UserType['ADMIN'], 'Admin')], default=core.models.UserType['DEMO_USER'],", "max_length=10)), ], ), migrations.RemoveField( model_name='user', name='bio', ), migrations.RemoveField( model_name='user', name='birth_date', ), migrations.RemoveField( model_name='user',", "name='bio', ), migrations.RemoveField( model_name='user', name='birth_date', ), migrations.RemoveField( model_name='user', name='location', ), migrations.AddField( model_name='user', name='organization',", "primary_key=True, serialize=False)), ('name', models.CharField(max_length=50)), ('description', models.TextField()), ('deleted', models.BooleanField(default=False)), ('artifacts', models.ManyToManyField(related_name='demo_projects', to='core.Artifact')), ('flights', models.ManyToManyField(related_name='demo_projects',", "for images'), (core.models.FlightState['PROCESSING'], 'Processing'), (core.models.FlightState['COMPLETE'], 'Complete'), (core.models.FlightState['PAUSED'], 'Paused'), (core.models.FlightState['CANCELED'], 'Canceled')], max_length=10)), ], ),", "dependencies = [ ('core', '0001_initial'), ] operations = [ migrations.CreateModel( name='Artifact', fields=[ ('id',", "fields=[ ('uuid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)), ('name', models.CharField(max_length=50)), ('description', models.TextField()), ('deleted', models.BooleanField(default=False)), ('artifacts',", "('uuid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)), ('name', models.CharField(max_length=50)), ('date', models.DateField(auto_now_add=True)), ('camera', models.CharField(choices=[(core.models.Camera['REDEDGE'], 'Micasense RedEdge'),", "options={ 'abstract': False, }, ), migrations.AddField( model_name='artifact', name='flight', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='artifacts', to='core.Flight'), ), ]", "fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('type', models.CharField(choices=[(core.models.ArtifactType['ORTHOMOSAIC'], 'Orthomosaic'), (core.models.ArtifactType['SHAPEFILE'], 'Shapefile')], max_length=20)), ],", "images'), (core.models.FlightState['PROCESSING'], 'Processing'), (core.models.FlightState['COMPLETE'], 'Complete'), (core.models.FlightState['PAUSED'], 'Paused'), (core.models.FlightState['CANCELED'], 'Canceled')], max_length=10)), ], ), migrations.RemoveField(", "settings from django.db import migrations, models import django.db.models.deletion import uuid class Migration(migrations.Migration): dependencies", "to=settings.AUTH_USER_MODEL), ), migrations.CreateModel( name='DemoProject', fields=[ ('uuid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)), ('name', models.CharField(max_length=50)), ('description',", "[ ('core', '0001_initial'), ] operations = [ migrations.CreateModel( name='Artifact', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True,", "models.TextField()), ('deleted', models.BooleanField(default=False)), ('artifacts', models.ManyToManyField(related_name='demo_projects', to='core.Artifact')), ('flights', models.ManyToManyField(related_name='demo_projects', to='core.Flight')), ('users', models.ManyToManyField(related_name='demo_projects', to=settings.AUTH_USER_MODEL)), ],", "('description', models.TextField()), ('deleted', models.BooleanField(default=False)), ('artifacts', models.ManyToManyField(related_name='user_projects', to='core.Artifact')), ('flights', models.ManyToManyField(related_name='user_projects', to='core.Flight')), ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user_projects',", "import core.models from django.conf import settings from django.db import migrations, models import django.db.models.deletion", "(core.models.FlightState['PAUSED'], 'Paused'), (core.models.FlightState['CANCELED'], 'Canceled')], max_length=10)), ], ), migrations.RemoveField( model_name='user', name='bio', ), migrations.RemoveField( model_name='user',", "to=settings.AUTH_USER_MODEL)), ], options={ 'abstract': False, }, ), migrations.AddField( model_name='artifact', name='flight', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='artifacts', to='core.Flight'),", "default=core.models.UserType['DEMO_USER'], max_length=20), ), migrations.CreateModel( name='UserProject', fields=[ ('uuid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)), ('name', models.CharField(max_length=50)),", "serialize=False)), ('name', models.CharField(max_length=50)), ('date', models.DateField(auto_now_add=True)), ('camera', models.CharField(choices=[(core.models.Camera['REDEDGE'], 'Micasense RedEdge'), (core.models.Camera['RGB'], 'RGB')], max_length=10)), ('multispectral_processing',", "name='user', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ), migrations.CreateModel( name='DemoProject', fields=[ ('uuid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True,", "models.ManyToManyField(related_name='user_projects', to='core.Artifact')), ('flights', models.ManyToManyField(related_name='user_projects', to='core.Flight')), ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user_projects', to=settings.AUTH_USER_MODEL)), ], options={ 'abstract': False,", "model_name='user', name='bio', ), migrations.RemoveField( model_name='user', name='birth_date', ), migrations.RemoveField( model_name='user', name='location', ), migrations.AddField( model_name='user',", "name='Flight', fields=[ ('uuid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)), ('name', models.CharField(max_length=50)), ('date', models.DateField(auto_now_add=True)), ('camera', models.CharField(choices=[(core.models.Camera['REDEDGE'],", "on 2019-12-15 02:01 import core.models from django.conf import settings from django.db import migrations,", "], ), migrations.RemoveField( model_name='user', name='bio', ), migrations.RemoveField( model_name='user', name='birth_date', ), migrations.RemoveField( model_name='user', name='location',", "), migrations.RemoveField( model_name='user', name='location', ), migrations.AddField( model_name='user', name='organization', field=models.CharField(blank=True, max_length=20), ), migrations.AddField( model_name='user',", "'Processing'), (core.models.FlightState['COMPLETE'], 'Complete'), (core.models.FlightState['PAUSED'], 'Paused'), (core.models.FlightState['CANCELED'], 'Canceled')], max_length=10)), ], ), migrations.RemoveField( model_name='user', name='bio',", "RedEdge'), (core.models.Camera['RGB'], 'RGB')], max_length=10)), ('multispectral_processing', models.BooleanField(default=False)), ('annotations', models.TextField()), ('deleted', models.BooleanField(default=False)), ('state', models.CharField(choices=[(core.models.FlightState['WAITING'], 'Waiting", "models import django.db.models.deletion import uuid class Migration(migrations.Migration): dependencies = [ ('core', '0001_initial'), ]", "import uuid class Migration(migrations.Migration): dependencies = [ ('core', '0001_initial'), ] operations = [", "model_name='user', name='organization', field=models.CharField(blank=True, max_length=20), ), migrations.AddField( model_name='user', name='type', field=models.CharField(choices=[(core.models.UserType['DEMO_USER'], 'DemoUser'), (core.models.UserType['ACTIVE'], 'Active'), (core.models.UserType['DELETED'],", "migrations.RemoveField( model_name='user', name='bio', ), migrations.RemoveField( model_name='user', name='birth_date', ), migrations.RemoveField( model_name='user', name='location', ), migrations.AddField(", "), migrations.AddField( model_name='flight', name='user', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ), migrations.CreateModel( name='DemoProject', fields=[ ('uuid',", "serialize=False)), ('name', models.CharField(max_length=50)), ('description', models.TextField()), ('deleted', models.BooleanField(default=False)), ('artifacts', models.ManyToManyField(related_name='demo_projects', to='core.Artifact')), ('flights', models.ManyToManyField(related_name='demo_projects', to='core.Flight')),", "model_name='user', name='birth_date', ), migrations.RemoveField( model_name='user', name='location', ), migrations.AddField( model_name='user', name='organization', field=models.CharField(blank=True, max_length=20), ),", "(core.models.UserType['ACTIVE'], 'Active'), (core.models.UserType['DELETED'], 'Deleted'), (core.models.UserType['ADMIN'], 'Admin')], default=core.models.UserType['DEMO_USER'], max_length=20), ), migrations.CreateModel( name='UserProject', fields=[ ('uuid',", "models.ManyToManyField(related_name='demo_projects', to='core.Artifact')), ('flights', models.ManyToManyField(related_name='demo_projects', to='core.Flight')), ('users', models.ManyToManyField(related_name='demo_projects', to=settings.AUTH_USER_MODEL)), ], options={ 'abstract': False, },", "('date', models.DateField(auto_now_add=True)), ('camera', models.CharField(choices=[(core.models.Camera['REDEDGE'], 'Micasense RedEdge'), (core.models.Camera['RGB'], 'RGB')], max_length=10)), ('multispectral_processing', models.BooleanField(default=False)), ('annotations', models.TextField()),", "models.BooleanField(default=False)), ('annotations', models.TextField()), ('deleted', models.BooleanField(default=False)), ('state', models.CharField(choices=[(core.models.FlightState['WAITING'], 'Waiting for images'), (core.models.FlightState['PROCESSING'], 'Processing'), (core.models.FlightState['COMPLETE'],", "(core.models.ArtifactType['SHAPEFILE'], 'Shapefile')], max_length=20)), ], ), migrations.CreateModel( name='Flight', fields=[ ('uuid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),", "django.db import migrations, models import django.db.models.deletion import uuid class Migration(migrations.Migration): dependencies = [", "), migrations.CreateModel( name='DemoProject', fields=[ ('uuid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)), ('name', models.CharField(max_length=50)), ('description', models.TextField()),", "name='location', ), migrations.AddField( model_name='user', name='organization', field=models.CharField(blank=True, max_length=20), ), migrations.AddField( model_name='user', name='type', field=models.CharField(choices=[(core.models.UserType['DEMO_USER'], 'DemoUser'),", "to=settings.AUTH_USER_MODEL)), ], options={ 'abstract': False, }, ), migrations.AddField( model_name='flight', name='user', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE,", "migrations.RemoveField( model_name='user', name='birth_date', ), migrations.RemoveField( model_name='user', name='location', ), migrations.AddField( model_name='user', name='organization', field=models.CharField(blank=True, max_length=20),", "('core', '0001_initial'), ] operations = [ migrations.CreateModel( name='Artifact', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False,", "model_name='flight', name='user', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ), migrations.CreateModel( name='DemoProject', fields=[ ('uuid', models.UUIDField(default=uuid.uuid4, editable=False,", "('uuid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)), ('name', models.CharField(max_length=50)), ('description', models.TextField()), ('deleted', models.BooleanField(default=False)), ('artifacts', models.ManyToManyField(related_name='user_projects',", "Django 3.0 on 2019-12-15 02:01 import core.models from django.conf import settings from django.db", "on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ), migrations.CreateModel( name='DemoProject', fields=[ ('uuid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)), ('name', models.CharField(max_length=50)),", "), migrations.RemoveField( model_name='user', name='bio', ), migrations.RemoveField( model_name='user', name='birth_date', ), migrations.RemoveField( model_name='user', name='location', ),", "name='DemoProject', fields=[ ('uuid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)), ('name', models.CharField(max_length=50)), ('description', models.TextField()), ('deleted', models.BooleanField(default=False)),", "name='type', field=models.CharField(choices=[(core.models.UserType['DEMO_USER'], 'DemoUser'), (core.models.UserType['ACTIVE'], 'Active'), (core.models.UserType['DELETED'], 'Deleted'), (core.models.UserType['ADMIN'], 'Admin')], default=core.models.UserType['DEMO_USER'], max_length=20), ), migrations.CreateModel(", "max_length=20)), ], ), migrations.CreateModel( name='Flight', fields=[ ('uuid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)), ('name', models.CharField(max_length=50)),", "), migrations.RemoveField( model_name='user', name='birth_date', ), migrations.RemoveField( model_name='user', name='location', ), migrations.AddField( model_name='user', name='organization', field=models.CharField(blank=True,", "('flights', models.ManyToManyField(related_name='user_projects', to='core.Flight')), ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user_projects', to=settings.AUTH_USER_MODEL)), ], options={ 'abstract': False, }, ),", "('uuid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)), ('name', models.CharField(max_length=50)), ('description', models.TextField()), ('deleted', models.BooleanField(default=False)), ('artifacts', models.ManyToManyField(related_name='demo_projects',", "models.CharField(max_length=50)), ('description', models.TextField()), ('deleted', models.BooleanField(default=False)), ('artifacts', models.ManyToManyField(related_name='user_projects', to='core.Artifact')), ('flights', models.ManyToManyField(related_name='user_projects', to='core.Flight')), ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE,", "models.DateField(auto_now_add=True)), ('camera', models.CharField(choices=[(core.models.Camera['REDEDGE'], 'Micasense RedEdge'), (core.models.Camera['RGB'], 'RGB')], max_length=10)), ('multispectral_processing', models.BooleanField(default=False)), ('annotations', models.TextField()), ('deleted',", "null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ), migrations.CreateModel( name='DemoProject', fields=[ ('uuid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)), ('name',", "model_name='user', name='location', ), migrations.AddField( model_name='user', name='organization', field=models.CharField(blank=True, max_length=20), ), migrations.AddField( model_name='user', name='type', field=models.CharField(choices=[(core.models.UserType['DEMO_USER'],", "verbose_name='ID')), ('type', models.CharField(choices=[(core.models.ArtifactType['ORTHOMOSAIC'], 'Orthomosaic'), (core.models.ArtifactType['SHAPEFILE'], 'Shapefile')], max_length=20)), ], ), migrations.CreateModel( name='Flight', fields=[ ('uuid',", "(core.models.UserType['ADMIN'], 'Admin')], default=core.models.UserType['DEMO_USER'], max_length=20), ), migrations.CreateModel( name='UserProject', fields=[ ('uuid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),", "editable=False, primary_key=True, serialize=False)), ('name', models.CharField(max_length=50)), ('description', models.TextField()), ('deleted', models.BooleanField(default=False)), ('artifacts', models.ManyToManyField(related_name='demo_projects', to='core.Artifact')), ('flights',", "models.ManyToManyField(related_name='demo_projects', to=settings.AUTH_USER_MODEL)), ], options={ 'abstract': False, }, ), migrations.AddField( model_name='artifact', name='flight', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='artifacts',", "'0001_initial'), ] operations = [ migrations.CreateModel( name='Artifact', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),", "('type', models.CharField(choices=[(core.models.ArtifactType['ORTHOMOSAIC'], 'Orthomosaic'), (core.models.ArtifactType['SHAPEFILE'], 'Shapefile')], max_length=20)), ], ), migrations.CreateModel( name='Flight', fields=[ ('uuid', models.UUIDField(default=uuid.uuid4,", "'Waiting for images'), (core.models.FlightState['PROCESSING'], 'Processing'), (core.models.FlightState['COMPLETE'], 'Complete'), (core.models.FlightState['PAUSED'], 'Paused'), (core.models.FlightState['CANCELED'], 'Canceled')], max_length=10)), ],", "models.BooleanField(default=False)), ('artifacts', models.ManyToManyField(related_name='demo_projects', to='core.Artifact')), ('flights', models.ManyToManyField(related_name='demo_projects', to='core.Flight')), ('users', models.ManyToManyField(related_name='demo_projects', to=settings.AUTH_USER_MODEL)), ], options={ 'abstract':", "primary_key=True, serialize=False, verbose_name='ID')), ('type', models.CharField(choices=[(core.models.ArtifactType['ORTHOMOSAIC'], 'Orthomosaic'), (core.models.ArtifactType['SHAPEFILE'], 'Shapefile')], max_length=20)), ], ), migrations.CreateModel( name='Flight',", "model_name='user', name='type', field=models.CharField(choices=[(core.models.UserType['DEMO_USER'], 'DemoUser'), (core.models.UserType['ACTIVE'], 'Active'), (core.models.UserType['DELETED'], 'Deleted'), (core.models.UserType['ADMIN'], 'Admin')], default=core.models.UserType['DEMO_USER'], max_length=20), ),", "'DemoUser'), (core.models.UserType['ACTIVE'], 'Active'), (core.models.UserType['DELETED'], 'Deleted'), (core.models.UserType['ADMIN'], 'Admin')], default=core.models.UserType['DEMO_USER'], max_length=20), ), migrations.CreateModel( name='UserProject', fields=[", "models.BooleanField(default=False)), ('artifacts', models.ManyToManyField(related_name='user_projects', to='core.Artifact')), ('flights', models.ManyToManyField(related_name='user_projects', to='core.Flight')), ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user_projects', to=settings.AUTH_USER_MODEL)), ], options={", "fields=[ ('uuid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)), ('name', models.CharField(max_length=50)), ('date', models.DateField(auto_now_add=True)), ('camera', models.CharField(choices=[(core.models.Camera['REDEDGE'], 'Micasense", "(core.models.FlightState['CANCELED'], 'Canceled')], max_length=10)), ], ), migrations.RemoveField( model_name='user', name='bio', ), migrations.RemoveField( model_name='user', name='birth_date', ),", "('name', models.CharField(max_length=50)), ('date', models.DateField(auto_now_add=True)), ('camera', models.CharField(choices=[(core.models.Camera['REDEDGE'], 'Micasense RedEdge'), (core.models.Camera['RGB'], 'RGB')], max_length=10)), ('multispectral_processing', models.BooleanField(default=False)),", "('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('type', models.CharField(choices=[(core.models.ArtifactType['ORTHOMOSAIC'], 'Orthomosaic'), (core.models.ArtifactType['SHAPEFILE'], 'Shapefile')], max_length=20)), ], ),", "] operations = [ migrations.CreateModel( name='Artifact', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('type',", "'abstract': False, }, ), migrations.AddField( model_name='flight', name='user', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ), migrations.CreateModel(", "Migration(migrations.Migration): dependencies = [ ('core', '0001_initial'), ] operations = [ migrations.CreateModel( name='Artifact', fields=[", "'RGB')], max_length=10)), ('multispectral_processing', models.BooleanField(default=False)), ('annotations', models.TextField()), ('deleted', models.BooleanField(default=False)), ('state', models.CharField(choices=[(core.models.FlightState['WAITING'], 'Waiting for images'),", "('description', models.TextField()), ('deleted', models.BooleanField(default=False)), ('artifacts', models.ManyToManyField(related_name='demo_projects', to='core.Artifact')), ('flights', models.ManyToManyField(related_name='demo_projects', to='core.Flight')), ('users', models.ManyToManyField(related_name='demo_projects', to=settings.AUTH_USER_MODEL)),", "(core.models.FlightState['PROCESSING'], 'Processing'), (core.models.FlightState['COMPLETE'], 'Complete'), (core.models.FlightState['PAUSED'], 'Paused'), (core.models.FlightState['CANCELED'], 'Canceled')], max_length=10)), ], ), migrations.RemoveField( model_name='user',", "models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)), ('name', models.CharField(max_length=50)), ('date', models.DateField(auto_now_add=True)), ('camera', models.CharField(choices=[(core.models.Camera['REDEDGE'], 'Micasense RedEdge'), (core.models.Camera['RGB'],", "django.db.models.deletion import uuid class Migration(migrations.Migration): dependencies = [ ('core', '0001_initial'), ] operations =", "core.models from django.conf import settings from django.db import migrations, models import django.db.models.deletion import" ]
[ "-w 60 -h 60 -vec output${filename}.vec -maxzangle 0.5 -maxyangle 0.3 -maxxangle 0.3; #", "in oranges: cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) imgs = cv2.resize(img, (img.shape[1] / 5, img.shape[0] / 5)) cv2.imshow('img',imgs)", "-numNeg 1000 -numPos 3000 -numStages 11 -h 60 -w 60 -minHitRate 0.99 -maxFalseAlarmRate", "cascade = cv2.CascadeClassifier('cascade.xml') img = cv2.imread('orange.jpg') gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) oranges = cascade.detectMultiScale(gray,", "negatives.txt -num 25 -bgcolor 255 -w 60 -h 60 -vec output${filename}.vec -maxzangle 0.5", "numpy as np import cv2 cascade = cv2.CascadeClassifier('cascade.xml') img = cv2.imread('orange.jpg') gray =", "do opencv_createsamples -img ${filename} -bg negatives.txt -num 25 -bgcolor 255 -w 60 -h", "# $ done # $ opencv_traincascade -data Classifier -vec allvecs.vec -bg negatives.txt -numNeg", "-maxzangle 0.5 -maxyangle 0.3 -maxxangle 0.3; # $ done # $ opencv_traincascade -data", "-bg negatives.txt -numNeg 1000 -numPos 3000 -numStages 11 -h 60 -w 60 -minHitRate", "(img.shape[1] / 5, img.shape[0] / 5)) cv2.imshow('img',imgs) cv2.waitKey(0) cv2.destroyAllWindows() # # show image", "cv2.destroyAllWindows() # # show image thats being collected # $ for filename in", "-maxyangle 0.3 -maxxangle 0.3; # $ done # $ opencv_traincascade -data Classifier -vec", "cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) oranges = cascade.detectMultiScale(gray, 1.05, 15, 0, (150,150)) for (x,y,w,h) in oranges:", "cv2.COLOR_BGR2GRAY) oranges = cascade.detectMultiScale(gray, 1.05, 15, 0, (150,150)) for (x,y,w,h) in oranges: cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)", "-h 60 -vec output${filename}.vec -maxzangle 0.5 -maxyangle 0.3 -maxxangle 0.3; # $ done", "oranges = cascade.detectMultiScale(gray, 1.05, 15, 0, (150,150)) for (x,y,w,h) in oranges: cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) imgs", "-img ${filename} -bg negatives.txt -num 25 -bgcolor 255 -w 60 -h 60 -vec", "-vec output${filename}.vec -maxzangle 0.5 -maxyangle 0.3 -maxxangle 0.3; # $ done # $", "(x,y,w,h) in oranges: cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) imgs = cv2.resize(img, (img.shape[1] / 5, img.shape[0] / 5))", "cv2.CascadeClassifier('cascade.xml') img = cv2.imread('orange.jpg') gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) oranges = cascade.detectMultiScale(gray, 1.05, 15,", "0, (150,150)) for (x,y,w,h) in oranges: cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) imgs = cv2.resize(img, (img.shape[1] / 5,", "5)) cv2.imshow('img',imgs) cv2.waitKey(0) cv2.destroyAllWindows() # # show image thats being collected # $", "being collected # $ for filename in Positives/*.jpg; # $ do opencv_createsamples -img", "60 -vec output${filename}.vec -maxzangle 0.5 -maxyangle 0.3 -maxxangle 0.3; # $ done #", "np import cv2 cascade = cv2.CascadeClassifier('cascade.xml') img = cv2.imread('orange.jpg') gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)", "output${filename}.vec -maxzangle 0.5 -maxyangle 0.3 -maxxangle 0.3; # $ done # $ opencv_traincascade", "oranges: cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) imgs = cv2.resize(img, (img.shape[1] / 5, img.shape[0] / 5)) cv2.imshow('img',imgs) cv2.waitKey(0)", "= cascade.detectMultiScale(gray, 1.05, 15, 0, (150,150)) for (x,y,w,h) in oranges: cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) imgs =", "-bgcolor 255 -w 60 -h 60 -vec output${filename}.vec -maxzangle 0.5 -maxyangle 0.3 -maxxangle", "25 -bgcolor 255 -w 60 -h 60 -vec output${filename}.vec -maxzangle 0.5 -maxyangle 0.3", "0.5 -maxyangle 0.3 -maxxangle 0.3; # $ done # $ opencv_traincascade -data Classifier", "done # $ opencv_traincascade -data Classifier -vec allvecs.vec -bg negatives.txt -numNeg 1000 -numPos", "Classifier -vec allvecs.vec -bg negatives.txt -numNeg 1000 -numPos 3000 -numStages 11 -h 60", "# show image thats being collected # $ for filename in Positives/*.jpg; #", "= cv2.imread('orange.jpg') gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) oranges = cascade.detectMultiScale(gray, 1.05, 15, 0, (150,150))", "cv2 cascade = cv2.CascadeClassifier('cascade.xml') img = cv2.imread('orange.jpg') gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) oranges =", "-numPos 3000 -numStages 11 -h 60 -w 60 -minHitRate 0.99 -maxFalseAlarmRate 0.5 -featureType", "import cv2 cascade = cv2.CascadeClassifier('cascade.xml') img = cv2.imread('orange.jpg') gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) oranges", "60 -w 60 -minHitRate 0.99 -maxFalseAlarmRate 0.5 -featureType HAAR -precalcValBufSize 2048 -precalcIdxBufSize 2048", "= cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) oranges = cascade.detectMultiScale(gray, 1.05, 15, 0, (150,150)) for (x,y,w,h) in", "15, 0, (150,150)) for (x,y,w,h) in oranges: cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) imgs = cv2.resize(img, (img.shape[1] /", "for filename in Positives/*.jpg; # $ do opencv_createsamples -img ${filename} -bg negatives.txt -num", "cv2.waitKey(0) cv2.destroyAllWindows() # # show image thats being collected # $ for filename", "opencv_createsamples -img ${filename} -bg negatives.txt -num 25 -bgcolor 255 -w 60 -h 60", "cv2.imshow('img',imgs) cv2.waitKey(0) cv2.destroyAllWindows() # # show image thats being collected # $ for", "cascade.detectMultiScale(gray, 1.05, 15, 0, (150,150)) for (x,y,w,h) in oranges: cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) imgs = cv2.resize(img,", "filename in Positives/*.jpg; # $ do opencv_createsamples -img ${filename} -bg negatives.txt -num 25", "collected # $ for filename in Positives/*.jpg; # $ do opencv_createsamples -img ${filename}", "# # show image thats being collected # $ for filename in Positives/*.jpg;", "-num 25 -bgcolor 255 -w 60 -h 60 -vec output${filename}.vec -maxzangle 0.5 -maxyangle", "for (x,y,w,h) in oranges: cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) imgs = cv2.resize(img, (img.shape[1] / 5, img.shape[0] /", "img.shape[0] / 5)) cv2.imshow('img',imgs) cv2.waitKey(0) cv2.destroyAllWindows() # # show image thats being collected", "allvecs.vec -bg negatives.txt -numNeg 1000 -numPos 3000 -numStages 11 -h 60 -w 60", "Positives/*.jpg; # $ do opencv_createsamples -img ${filename} -bg negatives.txt -num 25 -bgcolor 255", "/ 5, img.shape[0] / 5)) cv2.imshow('img',imgs) cv2.waitKey(0) cv2.destroyAllWindows() # # show image thats", "$ do opencv_createsamples -img ${filename} -bg negatives.txt -num 25 -bgcolor 255 -w 60", "= cv2.resize(img, (img.shape[1] / 5, img.shape[0] / 5)) cv2.imshow('img',imgs) cv2.waitKey(0) cv2.destroyAllWindows() # #", "-data Classifier -vec allvecs.vec -bg negatives.txt -numNeg 1000 -numPos 3000 -numStages 11 -h", "# $ for filename in Positives/*.jpg; # $ do opencv_createsamples -img ${filename} -bg", "imgs = cv2.resize(img, (img.shape[1] / 5, img.shape[0] / 5)) cv2.imshow('img',imgs) cv2.waitKey(0) cv2.destroyAllWindows() #", "cv2.resize(img, (img.shape[1] / 5, img.shape[0] / 5)) cv2.imshow('img',imgs) cv2.waitKey(0) cv2.destroyAllWindows() # # show", "opencv_traincascade -data Classifier -vec allvecs.vec -bg negatives.txt -numNeg 1000 -numPos 3000 -numStages 11", "= cv2.CascadeClassifier('cascade.xml') img = cv2.imread('orange.jpg') gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) oranges = cascade.detectMultiScale(gray, 1.05,", "1000 -numPos 3000 -numStages 11 -h 60 -w 60 -minHitRate 0.99 -maxFalseAlarmRate 0.5", "thats being collected # $ for filename in Positives/*.jpg; # $ do opencv_createsamples", "# $ opencv_traincascade -data Classifier -vec allvecs.vec -bg negatives.txt -numNeg 1000 -numPos 3000", "# $ do opencv_createsamples -img ${filename} -bg negatives.txt -num 25 -bgcolor 255 -w", "cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) imgs = cv2.resize(img, (img.shape[1] / 5, img.shape[0] / 5)) cv2.imshow('img',imgs) cv2.waitKey(0) cv2.destroyAllWindows()", "cv2.imread('orange.jpg') gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) oranges = cascade.detectMultiScale(gray, 1.05, 15, 0, (150,150)) for", "0.3 -maxxangle 0.3; # $ done # $ opencv_traincascade -data Classifier -vec allvecs.vec", "import numpy as np import cv2 cascade = cv2.CascadeClassifier('cascade.xml') img = cv2.imread('orange.jpg') gray", "/ 5)) cv2.imshow('img',imgs) cv2.waitKey(0) cv2.destroyAllWindows() # # show image thats being collected #", "3000 -numStages 11 -h 60 -w 60 -minHitRate 0.99 -maxFalseAlarmRate 0.5 -featureType HAAR", "1.05, 15, 0, (150,150)) for (x,y,w,h) in oranges: cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) imgs = cv2.resize(img, (img.shape[1]", "$ opencv_traincascade -data Classifier -vec allvecs.vec -bg negatives.txt -numNeg 1000 -numPos 3000 -numStages", "as np import cv2 cascade = cv2.CascadeClassifier('cascade.xml') img = cv2.imread('orange.jpg') gray = cv2.cvtColor(img,", "0.3; # $ done # $ opencv_traincascade -data Classifier -vec allvecs.vec -bg negatives.txt", "show image thats being collected # $ for filename in Positives/*.jpg; # $", "-vec allvecs.vec -bg negatives.txt -numNeg 1000 -numPos 3000 -numStages 11 -h 60 -w", "image thats being collected # $ for filename in Positives/*.jpg; # $ do", "11 -h 60 -w 60 -minHitRate 0.99 -maxFalseAlarmRate 0.5 -featureType HAAR -precalcValBufSize 2048", "60 -h 60 -vec output${filename}.vec -maxzangle 0.5 -maxyangle 0.3 -maxxangle 0.3; # $", "(150,150)) for (x,y,w,h) in oranges: cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) imgs = cv2.resize(img, (img.shape[1] / 5, img.shape[0]", "img = cv2.imread('orange.jpg') gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) oranges = cascade.detectMultiScale(gray, 1.05, 15, 0,", "in Positives/*.jpg; # $ do opencv_createsamples -img ${filename} -bg negatives.txt -num 25 -bgcolor", "gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) oranges = cascade.detectMultiScale(gray, 1.05, 15, 0, (150,150)) for (x,y,w,h)", "5, img.shape[0] / 5)) cv2.imshow('img',imgs) cv2.waitKey(0) cv2.destroyAllWindows() # # show image thats being", "$ for filename in Positives/*.jpg; # $ do opencv_createsamples -img ${filename} -bg negatives.txt", "-h 60 -w 60 -minHitRate 0.99 -maxFalseAlarmRate 0.5 -featureType HAAR -precalcValBufSize 2048 -precalcIdxBufSize", "-maxxangle 0.3; # $ done # $ opencv_traincascade -data Classifier -vec allvecs.vec -bg", "-numStages 11 -h 60 -w 60 -minHitRate 0.99 -maxFalseAlarmRate 0.5 -featureType HAAR -precalcValBufSize", "${filename} -bg negatives.txt -num 25 -bgcolor 255 -w 60 -h 60 -vec output${filename}.vec", "255 -w 60 -h 60 -vec output${filename}.vec -maxzangle 0.5 -maxyangle 0.3 -maxxangle 0.3;", "negatives.txt -numNeg 1000 -numPos 3000 -numStages 11 -h 60 -w 60 -minHitRate 0.99", "-bg negatives.txt -num 25 -bgcolor 255 -w 60 -h 60 -vec output${filename}.vec -maxzangle", "$ done # $ opencv_traincascade -data Classifier -vec allvecs.vec -bg negatives.txt -numNeg 1000" ]
[ "+ \"/armory_data\" else: os.mkdir(\"armory_data\") data_path = str(pathlib.Path().absolute()) + \"/armory_data\" ARMORY_CONFIG = { 'ARMORY_BASE_PATH':", "+ \"/tools/armory_custom/modules\" custom_reports = abolute_home + \"/tools/armory_custom/reports\" custom_webapps = abolute_home + \"/tools/armory_webapps\" if", "str(pathlib.Path().absolute()) + \"/armory_data\" else: os.mkdir(\"armory_data\") data_path = str(pathlib.Path().absolute()) + \"/armory_data\" ARMORY_CONFIG = {", "data_path = str(pathlib.Path().absolute()) + \"/armory_data\" ARMORY_CONFIG = { 'ARMORY_BASE_PATH': data_path, 'ARMORY_CUSTOM_MODULES': [ custom_modules,", "import os import pathlib data_path = pathlib.Path(str(pathlib.Path().absolute()) + \"/armory_data\") home = str(pathlib.Path.home()) abolute_home", "[ custom_reports, ], 'ARMORY_CUSTOM_WEBAPPS': [ custom_webapps, ], } DATABASES = { 'default': {", "\"CHANGEME\" custom_modules = abolute_home + \"/tools/armory_custom/modules\" custom_reports = abolute_home + \"/tools/armory_custom/reports\" custom_webapps =", "], 'ARMORY_CUSTOM_REPORTS': [ custom_reports, ], 'ARMORY_CUSTOM_WEBAPPS': [ custom_webapps, ], } DATABASES = {", "else: os.mkdir(\"armory_data\") data_path = str(pathlib.Path().absolute()) + \"/armory_data\" ARMORY_CONFIG = { 'ARMORY_BASE_PATH': data_path, 'ARMORY_CUSTOM_MODULES':", "pathlib data_path = pathlib.Path(str(pathlib.Path().absolute()) + \"/armory_data\") home = str(pathlib.Path.home()) abolute_home = \"CHANGEME\" custom_modules", "\"/armory_data\" else: os.mkdir(\"armory_data\") data_path = str(pathlib.Path().absolute()) + \"/armory_data\" ARMORY_CONFIG = { 'ARMORY_BASE_PATH': data_path,", "home = str(pathlib.Path.home()) abolute_home = \"CHANGEME\" custom_modules = abolute_home + \"/tools/armory_custom/modules\" custom_reports =", "\"/tools/armory_custom/reports\" custom_webapps = abolute_home + \"/tools/armory_webapps\" if data_path.exists(): data_path = str(pathlib.Path().absolute()) + \"/armory_data\"", "custom_webapps = abolute_home + \"/tools/armory_webapps\" if data_path.exists(): data_path = str(pathlib.Path().absolute()) + \"/armory_data\" else:", "+ \"/tools/armory_webapps\" if data_path.exists(): data_path = str(pathlib.Path().absolute()) + \"/armory_data\" else: os.mkdir(\"armory_data\") data_path =", "abolute_home + \"/tools/armory_custom/modules\" custom_reports = abolute_home + \"/tools/armory_custom/reports\" custom_webapps = abolute_home + \"/tools/armory_webapps\"", "data_path, 'ARMORY_CUSTOM_MODULES': [ custom_modules, ], 'ARMORY_CUSTOM_REPORTS': [ custom_reports, ], 'ARMORY_CUSTOM_WEBAPPS': [ custom_webapps, ],", "= pathlib.Path(str(pathlib.Path().absolute()) + \"/armory_data\") home = str(pathlib.Path.home()) abolute_home = \"CHANGEME\" custom_modules = abolute_home", "= str(pathlib.Path.home()) abolute_home = \"CHANGEME\" custom_modules = abolute_home + \"/tools/armory_custom/modules\" custom_reports = abolute_home", "custom_modules, ], 'ARMORY_CUSTOM_REPORTS': [ custom_reports, ], 'ARMORY_CUSTOM_WEBAPPS': [ custom_webapps, ], } DATABASES =", "} DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(ARMORY_CONFIG['ARMORY_BASE_PATH'], 'db.sqlite3'), } }", "<reponame>3ndG4me/Configs #!/usr/bin/python3 import os import pathlib data_path = pathlib.Path(str(pathlib.Path().absolute()) + \"/armory_data\") home =", "= str(pathlib.Path().absolute()) + \"/armory_data\" ARMORY_CONFIG = { 'ARMORY_BASE_PATH': data_path, 'ARMORY_CUSTOM_MODULES': [ custom_modules, ],", "custom_modules = abolute_home + \"/tools/armory_custom/modules\" custom_reports = abolute_home + \"/tools/armory_custom/reports\" custom_webapps = abolute_home", "\"/armory_data\") home = str(pathlib.Path.home()) abolute_home = \"CHANGEME\" custom_modules = abolute_home + \"/tools/armory_custom/modules\" custom_reports", "if data_path.exists(): data_path = str(pathlib.Path().absolute()) + \"/armory_data\" else: os.mkdir(\"armory_data\") data_path = str(pathlib.Path().absolute()) +", "data_path.exists(): data_path = str(pathlib.Path().absolute()) + \"/armory_data\" else: os.mkdir(\"armory_data\") data_path = str(pathlib.Path().absolute()) + \"/armory_data\"", "data_path = pathlib.Path(str(pathlib.Path().absolute()) + \"/armory_data\") home = str(pathlib.Path.home()) abolute_home = \"CHANGEME\" custom_modules =", "], 'ARMORY_CUSTOM_WEBAPPS': [ custom_webapps, ], } DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3',", "{ 'ARMORY_BASE_PATH': data_path, 'ARMORY_CUSTOM_MODULES': [ custom_modules, ], 'ARMORY_CUSTOM_REPORTS': [ custom_reports, ], 'ARMORY_CUSTOM_WEBAPPS': [", "+ \"/tools/armory_custom/reports\" custom_webapps = abolute_home + \"/tools/armory_webapps\" if data_path.exists(): data_path = str(pathlib.Path().absolute()) +", "abolute_home + \"/tools/armory_custom/reports\" custom_webapps = abolute_home + \"/tools/armory_webapps\" if data_path.exists(): data_path = str(pathlib.Path().absolute())", "abolute_home + \"/tools/armory_webapps\" if data_path.exists(): data_path = str(pathlib.Path().absolute()) + \"/armory_data\" else: os.mkdir(\"armory_data\") data_path", "custom_webapps, ], } DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(ARMORY_CONFIG['ARMORY_BASE_PATH'], 'db.sqlite3'),", "custom_reports = abolute_home + \"/tools/armory_custom/reports\" custom_webapps = abolute_home + \"/tools/armory_webapps\" if data_path.exists(): data_path", "abolute_home = \"CHANGEME\" custom_modules = abolute_home + \"/tools/armory_custom/modules\" custom_reports = abolute_home + \"/tools/armory_custom/reports\"", "os import pathlib data_path = pathlib.Path(str(pathlib.Path().absolute()) + \"/armory_data\") home = str(pathlib.Path.home()) abolute_home =", "[ custom_modules, ], 'ARMORY_CUSTOM_REPORTS': [ custom_reports, ], 'ARMORY_CUSTOM_WEBAPPS': [ custom_webapps, ], } DATABASES", "'ARMORY_CUSTOM_WEBAPPS': [ custom_webapps, ], } DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME':", "= str(pathlib.Path().absolute()) + \"/armory_data\" else: os.mkdir(\"armory_data\") data_path = str(pathlib.Path().absolute()) + \"/armory_data\" ARMORY_CONFIG =", "\"/armory_data\" ARMORY_CONFIG = { 'ARMORY_BASE_PATH': data_path, 'ARMORY_CUSTOM_MODULES': [ custom_modules, ], 'ARMORY_CUSTOM_REPORTS': [ custom_reports,", "[ custom_webapps, ], } DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(ARMORY_CONFIG['ARMORY_BASE_PATH'],", "data_path = str(pathlib.Path().absolute()) + \"/armory_data\" else: os.mkdir(\"armory_data\") data_path = str(pathlib.Path().absolute()) + \"/armory_data\" ARMORY_CONFIG", "= abolute_home + \"/tools/armory_custom/reports\" custom_webapps = abolute_home + \"/tools/armory_webapps\" if data_path.exists(): data_path =", "str(pathlib.Path().absolute()) + \"/armory_data\" ARMORY_CONFIG = { 'ARMORY_BASE_PATH': data_path, 'ARMORY_CUSTOM_MODULES': [ custom_modules, ], 'ARMORY_CUSTOM_REPORTS':", "'ARMORY_CUSTOM_REPORTS': [ custom_reports, ], 'ARMORY_CUSTOM_WEBAPPS': [ custom_webapps, ], } DATABASES = { 'default':", "], } DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(ARMORY_CONFIG['ARMORY_BASE_PATH'], 'db.sqlite3'), }", "os.mkdir(\"armory_data\") data_path = str(pathlib.Path().absolute()) + \"/armory_data\" ARMORY_CONFIG = { 'ARMORY_BASE_PATH': data_path, 'ARMORY_CUSTOM_MODULES': [", "str(pathlib.Path.home()) abolute_home = \"CHANGEME\" custom_modules = abolute_home + \"/tools/armory_custom/modules\" custom_reports = abolute_home +", "= abolute_home + \"/tools/armory_custom/modules\" custom_reports = abolute_home + \"/tools/armory_custom/reports\" custom_webapps = abolute_home +", "'ARMORY_CUSTOM_MODULES': [ custom_modules, ], 'ARMORY_CUSTOM_REPORTS': [ custom_reports, ], 'ARMORY_CUSTOM_WEBAPPS': [ custom_webapps, ], }", "custom_reports, ], 'ARMORY_CUSTOM_WEBAPPS': [ custom_webapps, ], } DATABASES = { 'default': { 'ENGINE':", "= abolute_home + \"/tools/armory_webapps\" if data_path.exists(): data_path = str(pathlib.Path().absolute()) + \"/armory_data\" else: os.mkdir(\"armory_data\")", "import pathlib data_path = pathlib.Path(str(pathlib.Path().absolute()) + \"/armory_data\") home = str(pathlib.Path.home()) abolute_home = \"CHANGEME\"", "\"/tools/armory_custom/modules\" custom_reports = abolute_home + \"/tools/armory_custom/reports\" custom_webapps = abolute_home + \"/tools/armory_webapps\" if data_path.exists():", "pathlib.Path(str(pathlib.Path().absolute()) + \"/armory_data\") home = str(pathlib.Path.home()) abolute_home = \"CHANGEME\" custom_modules = abolute_home +", "= \"CHANGEME\" custom_modules = abolute_home + \"/tools/armory_custom/modules\" custom_reports = abolute_home + \"/tools/armory_custom/reports\" custom_webapps", "\"/tools/armory_webapps\" if data_path.exists(): data_path = str(pathlib.Path().absolute()) + \"/armory_data\" else: os.mkdir(\"armory_data\") data_path = str(pathlib.Path().absolute())", "ARMORY_CONFIG = { 'ARMORY_BASE_PATH': data_path, 'ARMORY_CUSTOM_MODULES': [ custom_modules, ], 'ARMORY_CUSTOM_REPORTS': [ custom_reports, ],", "= { 'ARMORY_BASE_PATH': data_path, 'ARMORY_CUSTOM_MODULES': [ custom_modules, ], 'ARMORY_CUSTOM_REPORTS': [ custom_reports, ], 'ARMORY_CUSTOM_WEBAPPS':", "'ARMORY_BASE_PATH': data_path, 'ARMORY_CUSTOM_MODULES': [ custom_modules, ], 'ARMORY_CUSTOM_REPORTS': [ custom_reports, ], 'ARMORY_CUSTOM_WEBAPPS': [ custom_webapps,", "#!/usr/bin/python3 import os import pathlib data_path = pathlib.Path(str(pathlib.Path().absolute()) + \"/armory_data\") home = str(pathlib.Path.home())", "+ \"/armory_data\" ARMORY_CONFIG = { 'ARMORY_BASE_PATH': data_path, 'ARMORY_CUSTOM_MODULES': [ custom_modules, ], 'ARMORY_CUSTOM_REPORTS': [", "+ \"/armory_data\") home = str(pathlib.Path.home()) abolute_home = \"CHANGEME\" custom_modules = abolute_home + \"/tools/armory_custom/modules\"" ]
[ "attachment targetAddress = base58.b58decode(tx['attachment']).decode() if len(targetAddress) > 1: #check if we already processed", "(\"sourceAddress\", \"targetAddress\", \"tnTxId\", \"ethTxId\", \"amount\", \"amountFee\") VALUES (\"' + transaction['sender'] + '\", \"'", "cursor.execute('SELECT height FROM heights WHERE chain = \"TN\"').fetchall()[0][0] def getCurrentBlock(self): #return current block", "minimum amount') else: try: nonce = self.w3.eth.getTransactionCount(self.config['erc20']['gatewayAddress']) if self.config['erc20']['gasprice'] > 0: gasprice =", "str(txId.hex())) cursor = self.dbCon.cursor() amount /= pow(10, self.config['erc20']['decimals']) cursor.execute('INSERT INTO executed (\"sourceAddress\", \"targetAddress\",", "= self.dbCon.cursor() amount /= pow(10, self.config['erc20']['decimals']) cursor.execute('INSERT INTO executed (\"sourceAddress\", \"targetAddress\", \"tnTxId\", \"ethTxId\",", "+ str(txId.hex())) cursor = self.dbCon.cursor() amount /= pow(10, self.config['erc20']['decimals']) cursor.execute('INSERT INTO executed (\"sourceAddress\",", "traceback import sharedfunc from web3 import Web3 from verification import verifier class TNChecker(object):", "== 4 and tx['recipient'] == self.config['tn']['gatewayAddress'] and tx['assetId'] == self.config['tn']['assetId']: #check if there", "\"' + str(amount) + '\", \"tx error, possible incorrect address\", \"' + str(e)", "self.config['erc20']['privateKey']) self.verifier = verifier(config) cursor = self.dbCon.cursor() self.lastScannedBlock = cursor.execute('SELECT height FROM heights", "class TNChecker(object): def __init__(self, config): self.config = config self.dbCon = sqlite.connect('gateway.db') self.node =", "\"senderror\", e='under minimum amount') else: try: nonce = self.w3.eth.getTransactionCount(self.config['erc20']['gatewayAddress']) if self.config['erc20']['gasprice'] > 0:", "< 0: self.faultHandler(transaction, \"senderror\", e='under minimum amount') else: try: nonce = self.w3.eth.getTransactionCount(self.config['erc20']['gatewayAddress']) if", "import sharedfunc from web3 import Web3 from verification import verifier class TNChecker(object): def", "len(targetAddress) > 1: #check if we already processed this tx cursor = self.dbCon.cursor()", "+ txId.hex() + '\", \"' + str(round(amount)) + '\", \"' + str(self.config['erc20']['fee']) +", "(\"sourceAddress\", \"targetAddress\", \"ethTxId\", \"tnTxId\", \"amount\", \"error\", \"exception\") VALUES (\"' + tx['sender'] + '\",", "SET \"height\" = ' + str(self.lastScannedBlock) + ' WHERE \"chain\" = \"TN\"') self.dbCon.commit()", "+ '\", \"' + str(amount) + '\", \"tx error, check exception error\", \"'", "return False def faultHandler(self, tx, error, e=\"\"): #handle transfers to the gateway that", "+ '\", \"' + txId.hex() + '\", \"' + str(round(amount)) + '\", \"'", "error == \"noattachment\": cursor = self.dbCon.cursor() cursor.execute('INSERT INTO errors (\"sourceAddress\", \"targetAddress\", \"ethTxId\", \"tnTxId\",", "+ targetAddress + '\", \"' + transaction['id'] + '\", \"' + txId.hex() +", "result = cursor.execute('SELECT ethTxId FROM executed WHERE tnTxId = \"' + tx['id'] +", "\"targetAddress\", \"tnTxId\", \"ethTxId\", \"amount\", \"amountFee\") VALUES (\"' + transaction['sender'] + '\", \"' +", "already processed this tx cursor = self.dbCon.cursor() result = cursor.execute('SELECT ethTxId FROM executed", "def __init__(self, config): self.config = config self.dbCon = sqlite.connect('gateway.db') self.node = self.config['tn']['node'] self.w3", "\"' + tx['id'] + '\"').fetchall() if len(result) == 0: return True else: self.faultHandler(tx,", "else: try: nonce = self.w3.eth.getTransactionCount(self.config['erc20']['gatewayAddress']) if self.config['erc20']['gasprice'] > 0: gasprice = self.w3.toWei(self.config['erc20']['gasprice'], 'gwei')", "there is an attachment targetAddress = base58.b58decode(tx['attachment']).decode() if len(targetAddress) > 1: #check if", "+ tx['sender'] + '\", \"' + targetAddress + '\", \"\", \"' + tx['id']", "if tx['type'] == 4 and tx['recipient'] == self.config['tn']['gatewayAddress'] and tx['assetId'] == self.config['tn']['assetId']: #check", "for valid transactions block = requests.get(self.node + '/blocks/at/' + str(heightToCheck)).json() for transaction in", "1.1) tx = { 'to': targetAddress, 'value': amount, 'gas': self.config['erc20']['gas'], 'gasPrice': gasprice, 'nonce':", "= { 'to': targetAddress, 'value': amount, 'gas': self.config['erc20']['gas'], 'gasPrice': gasprice, 'nonce': nonce, 'chainId':", "0 return CurrentBlock def run(self): #main routine to run continuesly print('started checking tn", "self.config['tn']['node'] self.w3 = Web3(Web3.HTTPProvider(self.config['erc20']['node'])) self.privatekey = os.getenv(self.config['erc20']['seedenvname'], self.config['erc20']['privateKey']) self.verifier = verifier(config) cursor =", "'\", \"' + targetAddress + '\", \"\", \"' + tx['id'] + '\", \"'", "that have problems amount = tx['amount'] / pow(10, self.config['tn']['decimals']) timestampStr = sharedfunc.getnow() if", "error, check exception error\", \"' + str(e) + '\")') self.dbCon.commit() print(timestampStr + \"", "if len(targetAddress) > 1: #check if we already processed this tx cursor =", "the transaction if tx['type'] == 4 and tx['recipient'] == self.config['tn']['gatewayAddress'] and tx['assetId'] ==", "/= pow(10, self.config['erc20']['decimals']) cursor.execute('INSERT INTO executed (\"sourceAddress\", \"targetAddress\", \"tnTxId\", \"ethTxId\", \"amount\", \"amountFee\") VALUES", "heights SET \"height\" = ' + str(self.lastScannedBlock) + ' WHERE \"chain\" = \"TN\"')", "\"' + targetAddress + '\", \"\", \"' + tx['id'] + '\", \"' +", "= config self.dbCon = sqlite.connect('gateway.db') self.node = self.config['tn']['node'] self.w3 = Web3(Web3.HTTPProvider(self.config['erc20']['node'])) self.privatekey =", "executed (\"sourceAddress\", \"targetAddress\", \"tnTxId\", \"ethTxId\", \"amount\", \"amountFee\") VALUES (\"' + transaction['sender'] + '\",", "= self.w3.toWei(self.config['erc20']['gasprice'], 'gwei') else: gasprice = int(self.w3.eth.gasPrice * 1.1) tx = { 'to':", "for transaction in block['transactions']: if self.checkTx(transaction): targetAddress = base58.b58decode(transaction['attachment']).decode() targetAddress = self.w3.toChecksumAddress(targetAddress) if", "= tx['amount'] / pow(10, self.config['tn']['decimals']) timestampStr = sharedfunc.getnow() if error == \"noattachment\": cursor", "import verifier class TNChecker(object): def __init__(self, config): self.config = config self.dbCon = sqlite.connect('gateway.db')", "Exception as e: self.lastScannedBlock -= 1 print('Something went wrong during tn block iteration:", "= cursor.execute('SELECT ethTxId FROM executed WHERE tnTxId = \"' + tx['id'] + '\"').fetchall()", "if not(self.w3.isAddress(targetAddress)): self.faultHandler(transaction, \"txerror\") else: amount = transaction['amount'] / pow(10, self.config['tn']['decimals']) amount -=", "else: amount = transaction['amount'] / pow(10, self.config['tn']['decimals']) amount -= self.config['erc20']['fee'] amount *= pow(10,", "base58.b58decode(transaction['attachment']).decode() targetAddress = self.w3.toChecksumAddress(targetAddress) if not(self.w3.isAddress(targetAddress)): self.faultHandler(transaction, \"txerror\") else: amount = transaction['amount'] /", "self.config['erc20']['fee'] amount *= pow(10, self.config['erc20']['decimals']) amount = int(round(amount)) if amount < 0: self.faultHandler(transaction,", "private_key=self.privatekey) txId = self.w3.eth.sendRawTransaction(signed_tx.rawTransaction) if not(str(txId.hex()).startswith('0x')): self.faultHandler(transaction, \"senderror\", e=txId.hex()) else: print(\"send tx: \"", "config): self.config = config self.dbCon = sqlite.connect('gateway.db') self.node = self.config['tn']['node'] self.w3 = Web3(Web3.HTTPProvider(self.config['erc20']['node']))", "/ pow(10, self.config['tn']['decimals']) timestampStr = sharedfunc.getnow() if error == \"noattachment\": cursor = self.dbCon.cursor()", "'\", \"' + targetAddress + '\", \"' + transaction['id'] + '\", \"' +", "self.config['tn']['decimals']) amount -= self.config['erc20']['fee'] amount *= pow(10, self.config['erc20']['decimals']) amount = int(round(amount)) if amount", "chain - try/except in case of timeouts try: CurrentBlock = requests.get(self.node + '/blocks/height').json()['height']", "problems amount = tx['amount'] / pow(10, self.config['tn']['decimals']) timestampStr = sharedfunc.getnow() if error ==", "amount < 0: self.faultHandler(transaction, \"senderror\", e='under minimum amount') else: try: nonce = self.w3.eth.getTransactionCount(self.config['erc20']['gatewayAddress'])", "as e: self.faultHandler(transaction, \"txerror\", e=e) continue def checkTx(self, tx): #check the transaction if", "def getCurrentBlock(self): #return current block on the chain - try/except in case of", "cursor.execute('INSERT INTO errors (\"sourceAddress\", \"targetAddress\", \"ethTxId\", \"tnTxId\", \"amount\", \"error\", \"exception\") VALUES (\"' +", "return CurrentBlock def run(self): #main routine to run continuesly print('started checking tn blocks", "the gateway that have problems amount = tx['amount'] / pow(10, self.config['tn']['decimals']) timestampStr =", "print('started checking tn blocks at: ' + str(self.lastScannedBlock)) self.dbCon = sqlite.connect('gateway.db') while True:", "cursor.execute('UPDATE heights SET \"height\" = ' + str(self.lastScannedBlock) + ' WHERE \"chain\" =", "'\", \"' + str(round(amount)) + '\", \"' + str(self.config['erc20']['fee']) + '\")') self.dbCon.commit() print('send", "#handle transfers to the gateway that have problems amount = tx['amount'] / pow(10,", "the block for valid transactions block = requests.get(self.node + '/blocks/at/' + str(heightToCheck)).json() for", "base58.b58decode(tx['attachment']).decode() if len(targetAddress) > 1: #check if we already processed this tx cursor", "gateway that have problems amount = tx['amount'] / pow(10, self.config['tn']['decimals']) timestampStr = sharedfunc.getnow()", "self.config['erc20']['gas'], 'gasPrice': gasprice, 'nonce': nonce, 'chainId': self.config['erc20']['chainid'] } signed_tx = self.w3.eth.account.signTransaction(tx, private_key=self.privatekey) txId", "blocks at: ' + str(self.lastScannedBlock)) self.dbCon = sqlite.connect('gateway.db') while True: try: nextblock =", "INTO errors (\"sourceAddress\", \"targetAddress\", \"ethTxId\", \"tnTxId\", \"amount\", \"error\", \"exception\") VALUES (\"' + tx['sender']", "heights WHERE chain = \"TN\"').fetchall()[0][0] def getCurrentBlock(self): #return current block on the chain", "+ '\", \"' + targetAddress + '\", \"' + transaction['id'] + '\", \"'", "gasprice = int(self.w3.eth.gasPrice * 1.1) tx = { 'to': targetAddress, 'value': amount, 'gas':", "= int(self.w3.eth.gasPrice * 1.1) tx = { 'to': targetAddress, 'value': amount, 'gas': self.config['erc20']['gas'],", "nonce, 'chainId': self.config['erc20']['chainid'] } signed_tx = self.w3.eth.account.signTransaction(tx, private_key=self.privatekey) txId = self.w3.eth.sendRawTransaction(signed_tx.rawTransaction) if not(str(txId.hex()).startswith('0x')):", "ethTxId FROM executed WHERE tnTxId = \"' + tx['id'] + '\"').fetchall() if len(result)", "def checkTx(self, tx): #check the transaction if tx['type'] == 4 and tx['recipient'] ==", "> self.lastScannedBlock: self.lastScannedBlock += 1 self.checkBlock(self.lastScannedBlock) cursor = self.dbCon.cursor() cursor.execute('UPDATE heights SET \"height\"", "tx['sender'] + \" - check errors table.\") if error == \"senderror\": targetAddress =", "try: nextblock = self.getCurrentBlock() - self.config['tn']['confirmations'] if nextblock > self.lastScannedBlock: self.lastScannedBlock += 1", "str(amount) + '\", \"tx error, possible incorrect address\", \"' + str(e) + '\")')", "error == \"txerror\": targetAddress = base58.b58decode(tx['attachment']).decode() cursor = self.dbCon.cursor() cursor.execute('INSERT INTO errors (\"sourceAddress\",", "= base58.b58decode(tx['attachment']).decode() cursor = self.dbCon.cursor() cursor.execute('INSERT INTO errors (\"sourceAddress\", \"targetAddress\", \"ethTxId\", \"tnTxId\", \"amount\",", "from tn to other network!') self.verifier.verifyOther(txId) except Exception as e: self.faultHandler(transaction, \"txerror\", e=e)", "'noattachment') return False def faultHandler(self, tx, error, e=\"\"): #handle transfers to the gateway", "\"amount\", \"error\") VALUES (\"' + tx['sender'] + '\", \"\", \"\", \"' + tx['id']", "CurrentBlock = requests.get(self.node + '/blocks/height').json()['height'] - 1 except: CurrentBlock = 0 return CurrentBlock", "#check if we already processed this tx cursor = self.dbCon.cursor() result = cursor.execute('SELECT", "+ '\", \"tx error, possible incorrect address\", \"' + str(e) + '\")') self.dbCon.commit()", "else: gasprice = int(self.w3.eth.gasPrice * 1.1) tx = { 'to': targetAddress, 'value': amount,", "if error == \"noattachment\": cursor = self.dbCon.cursor() cursor.execute('INSERT INTO errors (\"sourceAddress\", \"targetAddress\", \"ethTxId\",", "'\", \"' + transaction['id'] + '\", \"' + txId.hex() + '\", \"' +", "as e: self.lastScannedBlock -= 1 print('Something went wrong during tn block iteration: ')", "== self.config['tn']['gatewayAddress'] and tx['assetId'] == self.config['tn']['assetId']: #check if there is an attachment targetAddress", "transfers to the gateway that have problems amount = tx['amount'] / pow(10, self.config['tn']['decimals'])", "tx['assetId'] == self.config['tn']['assetId']: #check if there is an attachment targetAddress = base58.b58decode(tx['attachment']).decode() if", "table.\") if error == \"senderror\": targetAddress = base58.b58decode(tx['attachment']).decode() cursor = self.dbCon.cursor() cursor.execute('INSERT INTO", "'/blocks/at/' + str(heightToCheck)).json() for transaction in block['transactions']: if self.checkTx(transaction): targetAddress = base58.b58decode(transaction['attachment']).decode() targetAddress", "str(self.lastScannedBlock) + ' WHERE \"chain\" = \"TN\"') self.dbCon.commit() except Exception as e: self.lastScannedBlock", "self.w3.toWei(self.config['erc20']['gasprice'], 'gwei') else: gasprice = int(self.w3.eth.gasPrice * 1.1) tx = { 'to': targetAddress,", "continue def checkTx(self, tx): #check the transaction if tx['type'] == 4 and tx['recipient']", "+= 1 self.checkBlock(self.lastScannedBlock) cursor = self.dbCon.cursor() cursor.execute('UPDATE heights SET \"height\" = ' +", "+ '/blocks/height').json()['height'] - 1 except: CurrentBlock = 0 return CurrentBlock def run(self): #main", "except: CurrentBlock = 0 return CurrentBlock def run(self): #main routine to run continuesly", "self.config['erc20']['chainid'] } signed_tx = self.w3.eth.account.signTransaction(tx, private_key=self.privatekey) txId = self.w3.eth.sendRawTransaction(signed_tx.rawTransaction) if not(str(txId.hex()).startswith('0x')): self.faultHandler(transaction, \"senderror\",", "(\"' + tx['sender'] + '\", \"' + targetAddress + '\", \"\", \"' +", "'value': amount, 'gas': self.config['erc20']['gas'], 'gasPrice': gasprice, 'nonce': nonce, 'chainId': self.config['erc20']['chainid'] } signed_tx =", "tx['id'] + '\", \"' + str(amount) + '\", \"tx error, possible incorrect address\",", "executed WHERE tnTxId = \"' + tx['id'] + '\"').fetchall() if len(result) == 0:", "\"' + targetAddress + '\", \"' + transaction['id'] + '\", \"' + txId.hex()", "nonce = self.w3.eth.getTransactionCount(self.config['erc20']['gatewayAddress']) if self.config['erc20']['gasprice'] > 0: gasprice = self.w3.toWei(self.config['erc20']['gasprice'], 'gwei') else: gasprice", "errors (\"sourceAddress\", \"targetAddress\", \"ethTxId\", \"tnTxId\", \"amount\", \"error\") VALUES (\"' + tx['sender'] + '\",", "+ str(self.lastScannedBlock)) self.dbCon = sqlite.connect('gateway.db') while True: try: nextblock = self.getCurrentBlock() - self.config['tn']['confirmations']", "= ' + str(self.lastScannedBlock) + ' WHERE \"chain\" = \"TN\"') self.dbCon.commit() except Exception", "Web3 from verification import verifier class TNChecker(object): def __init__(self, config): self.config = config", "\"height\" = ' + str(self.lastScannedBlock) + ' WHERE \"chain\" = \"TN\"') self.dbCon.commit() except", "time.sleep(self.config['tn']['timeInBetweenChecks']) def checkBlock(self, heightToCheck): #check content of the block for valid transactions block", "FROM heights WHERE chain = \"TN\"').fetchall()[0][0] def getCurrentBlock(self): #return current block on the", "txId.hex() + '\", \"' + str(round(amount)) + '\", \"' + str(self.config['erc20']['fee']) + '\")')", "tx, error, e=\"\"): #handle transfers to the gateway that have problems amount =", "'\"').fetchall() if len(result) == 0: return True else: self.faultHandler(tx, 'noattachment') return False def", "\"tx error, possible incorrect address\", \"' + str(e) + '\")') self.dbCon.commit() print(timestampStr +", "'\", \"' + str(amount) + '\", \"tx error, possible incorrect address\", \"' +", "= int(round(amount)) if amount < 0: self.faultHandler(transaction, \"senderror\", e='under minimum amount') else: try:", "\"\", \"' + tx['id'] + '\", \"' + str(amount) + '\", \"no attachment", "FROM executed WHERE tnTxId = \"' + tx['id'] + '\"').fetchall() if len(result) ==", "transaction['id'] + '\", \"' + txId.hex() + '\", \"' + str(round(amount)) + '\",", "tokens from tn to other network!') self.verifier.verifyOther(txId) except Exception as e: self.faultHandler(transaction, \"txerror\",", "\"' + txId.hex() + '\", \"' + str(round(amount)) + '\", \"' + str(self.config['erc20']['fee'])", "web3 import Web3 from verification import verifier class TNChecker(object): def __init__(self, config): self.config", "e=\"\"): #handle transfers to the gateway that have problems amount = tx['amount'] /", "self.faultHandler(transaction, \"txerror\", e=e) continue def checkTx(self, tx): #check the transaction if tx['type'] ==", "cursor = self.dbCon.cursor() cursor.execute('UPDATE heights SET \"height\" = ' + str(self.lastScannedBlock) + '", "Error: no attachment found on transaction from \" + tx['sender'] + \" -", "if self.config['erc20']['gasprice'] > 0: gasprice = self.w3.toWei(self.config['erc20']['gasprice'], 'gwei') else: gasprice = int(self.w3.eth.gasPrice *", "self.config['erc20']['decimals']) cursor.execute('INSERT INTO executed (\"sourceAddress\", \"targetAddress\", \"tnTxId\", \"ethTxId\", \"amount\", \"amountFee\") VALUES (\"' +", "error == \"senderror\": targetAddress = base58.b58decode(tx['attachment']).decode() cursor = self.dbCon.cursor() cursor.execute('INSERT INTO errors (\"sourceAddress\",", "and tx['assetId'] == self.config['tn']['assetId']: #check if there is an attachment targetAddress = base58.b58decode(tx['attachment']).decode()", "self.dbCon.commit() print('send tokens from tn to other network!') self.verifier.verifyOther(txId) except Exception as e:", "self.config['tn']['assetId']: #check if there is an attachment targetAddress = base58.b58decode(tx['attachment']).decode() if len(targetAddress) >", "\"tnTxId\", \"ethTxId\", \"amount\", \"amountFee\") VALUES (\"' + transaction['sender'] + '\", \"' + targetAddress", "self.config['erc20']['decimals']) amount = int(round(amount)) if amount < 0: self.faultHandler(transaction, \"senderror\", e='under minimum amount')", "\"' + str(self.config['erc20']['fee']) + '\")') self.dbCon.commit() print('send tokens from tn to other network!')", "+ \" - Error: on outgoing transaction for transaction from \" + tx['sender']", "during tn block iteration: ') print(traceback.TracebackException.from_exception(e)) time.sleep(self.config['tn']['timeInBetweenChecks']) def checkBlock(self, heightToCheck): #check content of", "verifier class TNChecker(object): def __init__(self, config): self.config = config self.dbCon = sqlite.connect('gateway.db') self.node", "check exception error\", \"' + str(e) + '\")') self.dbCon.commit() print(timestampStr + \" -", "if there is an attachment targetAddress = base58.b58decode(tx['attachment']).decode() if len(targetAddress) > 1: #check", "self.lastScannedBlock = cursor.execute('SELECT height FROM heights WHERE chain = \"TN\"').fetchall()[0][0] def getCurrentBlock(self): #return", "nextblock > self.lastScannedBlock: self.lastScannedBlock += 1 self.checkBlock(self.lastScannedBlock) cursor = self.dbCon.cursor() cursor.execute('UPDATE heights SET", "network!') self.verifier.verifyOther(txId) except Exception as e: self.faultHandler(transaction, \"txerror\", e=e) continue def checkTx(self, tx):", "self.config = config self.dbCon = sqlite.connect('gateway.db') self.node = self.config['tn']['node'] self.w3 = Web3(Web3.HTTPProvider(self.config['erc20']['node'])) self.privatekey", "pow(10, self.config['erc20']['decimals']) amount = int(round(amount)) if amount < 0: self.faultHandler(transaction, \"senderror\", e='under minimum", "return True else: self.faultHandler(tx, 'noattachment') return False def faultHandler(self, tx, error, e=\"\"): #handle", "WHERE chain = \"TN\"').fetchall()[0][0] def getCurrentBlock(self): #return current block on the chain -", "INTO errors (\"sourceAddress\", \"targetAddress\", \"ethTxId\", \"tnTxId\", \"amount\", \"error\") VALUES (\"' + tx['sender'] +", "try: CurrentBlock = requests.get(self.node + '/blocks/height').json()['height'] - 1 except: CurrentBlock = 0 return", "1: #check if we already processed this tx cursor = self.dbCon.cursor() result =", "heightToCheck): #check content of the block for valid transactions block = requests.get(self.node +", "= sqlite.connect('gateway.db') while True: try: nextblock = self.getCurrentBlock() - self.config['tn']['confirmations'] if nextblock >", "+ tx['id'] + '\"').fetchall() if len(result) == 0: return True else: self.faultHandler(tx, 'noattachment')", "\"error\") VALUES (\"' + tx['sender'] + '\", \"\", \"\", \"' + tx['id'] +", "+ tx['id'] + '\", \"' + str(amount) + '\", \"tx error, check exception", "\"' + tx['id'] + '\", \"' + str(amount) + '\", \"tx error, check", "pow(10, self.config['tn']['decimals']) amount -= self.config['erc20']['fee'] amount *= pow(10, self.config['erc20']['decimals']) amount = int(round(amount)) if", "self.node = self.config['tn']['node'] self.w3 = Web3(Web3.HTTPProvider(self.config['erc20']['node'])) self.privatekey = os.getenv(self.config['erc20']['seedenvname'], self.config['erc20']['privateKey']) self.verifier = verifier(config)", "= cursor.execute('SELECT height FROM heights WHERE chain = \"TN\"').fetchall()[0][0] def getCurrentBlock(self): #return current", "tn to other network!') self.verifier.verifyOther(txId) except Exception as e: self.faultHandler(transaction, \"txerror\", e=e) continue", "pow(10, self.config['tn']['decimals']) timestampStr = sharedfunc.getnow() if error == \"noattachment\": cursor = self.dbCon.cursor() cursor.execute('INSERT", "+ '\", \"' + str(round(amount)) + '\", \"' + str(self.config['erc20']['fee']) + '\")') self.dbCon.commit()", "on transaction from \" + tx['sender'] + \" - check errors table.\") if", "{ 'to': targetAddress, 'value': amount, 'gas': self.config['erc20']['gas'], 'gasPrice': gasprice, 'nonce': nonce, 'chainId': self.config['erc20']['chainid']", "case of timeouts try: CurrentBlock = requests.get(self.node + '/blocks/height').json()['height'] - 1 except: CurrentBlock", "import sqlite3 as sqlite import requests import time import base58 import PyCWaves import", "= requests.get(self.node + '/blocks/at/' + str(heightToCheck)).json() for transaction in block['transactions']: if self.checkTx(transaction): targetAddress", "= transaction['amount'] / pow(10, self.config['tn']['decimals']) amount -= self.config['erc20']['fee'] amount *= pow(10, self.config['erc20']['decimals']) amount", "+ tx['sender'] + \" - check errors table.\") if error == \"senderror\": targetAddress", "targetAddress + '\", \"' + transaction['id'] + '\", \"' + txId.hex() + '\",", "e: self.lastScannedBlock -= 1 print('Something went wrong during tn block iteration: ') print(traceback.TracebackException.from_exception(e))", "'\", \"\", \"' + tx['id'] + '\", \"' + str(amount) + '\", \"tx", "= \"TN\"').fetchall()[0][0] def getCurrentBlock(self): #return current block on the chain - try/except in", "block['transactions']: if self.checkTx(transaction): targetAddress = base58.b58decode(transaction['attachment']).decode() targetAddress = self.w3.toChecksumAddress(targetAddress) if not(self.w3.isAddress(targetAddress)): self.faultHandler(transaction, \"txerror\")", "transaction if tx['type'] == 4 and tx['recipient'] == self.config['tn']['gatewayAddress'] and tx['assetId'] == self.config['tn']['assetId']:", "self.config['erc20']['gasprice'] > 0: gasprice = self.w3.toWei(self.config['erc20']['gasprice'], 'gwei') else: gasprice = int(self.w3.eth.gasPrice * 1.1)", "\"no attachment found on transaction\")') self.dbCon.commit() print(timestampStr + \" - Error: no attachment", "getCurrentBlock(self): #return current block on the chain - try/except in case of timeouts", "tx['recipient'] == self.config['tn']['gatewayAddress'] and tx['assetId'] == self.config['tn']['assetId']: #check if there is an attachment", "'\", \"\", \"\", \"' + tx['id'] + '\", \"' + str(amount) + '\",", "+ \" - Error: no attachment found on transaction from \" + tx['sender']", "we already processed this tx cursor = self.dbCon.cursor() result = cursor.execute('SELECT ethTxId FROM", "if len(result) == 0: return True else: self.faultHandler(tx, 'noattachment') return False def faultHandler(self,", "+ str(e) + '\")') self.dbCon.commit() print(timestampStr + \" - Error: on outgoing transaction", "tx cursor = self.dbCon.cursor() result = cursor.execute('SELECT ethTxId FROM executed WHERE tnTxId =", "+ '/blocks/at/' + str(heightToCheck)).json() for transaction in block['transactions']: if self.checkTx(transaction): targetAddress = base58.b58decode(transaction['attachment']).decode()", "-= self.config['erc20']['fee'] amount *= pow(10, self.config['erc20']['decimals']) amount = int(round(amount)) if amount < 0:", "+ tx['sender'] + \" - check errors table.\") if error == \"txerror\": targetAddress", "import PyCWaves import traceback import sharedfunc from web3 import Web3 from verification import", "= requests.get(self.node + '/blocks/height').json()['height'] - 1 except: CurrentBlock = 0 return CurrentBlock def", "str(heightToCheck)).json() for transaction in block['transactions']: if self.checkTx(transaction): targetAddress = base58.b58decode(transaction['attachment']).decode() targetAddress = self.w3.toChecksumAddress(targetAddress)", "\" - Error: on outgoing transaction for transaction from \" + tx['sender'] +", "check errors table.\") if error == \"senderror\": targetAddress = base58.b58decode(tx['attachment']).decode() cursor = self.dbCon.cursor()", "'/blocks/height').json()['height'] - 1 except: CurrentBlock = 0 return CurrentBlock def run(self): #main routine", "print(timestampStr + \" - Error: no attachment found on transaction from \" +", "self.dbCon.cursor() amount /= pow(10, self.config['erc20']['decimals']) cursor.execute('INSERT INTO executed (\"sourceAddress\", \"targetAddress\", \"tnTxId\", \"ethTxId\", \"amount\",", "at: ' + str(self.lastScannedBlock)) self.dbCon = sqlite.connect('gateway.db') while True: try: nextblock = self.getCurrentBlock()", "self.dbCon = sqlite.connect('gateway.db') while True: try: nextblock = self.getCurrentBlock() - self.config['tn']['confirmations'] if nextblock", "block = requests.get(self.node + '/blocks/at/' + str(heightToCheck)).json() for transaction in block['transactions']: if self.checkTx(transaction):", "Error: on outgoing transaction for transaction from \" + tx['sender'] + \" -", "= \"' + tx['id'] + '\"').fetchall() if len(result) == 0: return True else:", "- self.config['tn']['confirmations'] if nextblock > self.lastScannedBlock: self.lastScannedBlock += 1 self.checkBlock(self.lastScannedBlock) cursor = self.dbCon.cursor()", "') print(traceback.TracebackException.from_exception(e)) time.sleep(self.config['tn']['timeInBetweenChecks']) def checkBlock(self, heightToCheck): #check content of the block for valid", "check errors table.\") if error == \"txerror\": targetAddress = base58.b58decode(tx['attachment']).decode() cursor = self.dbCon.cursor()", "- Error: on outgoing transaction for transaction from \" + tx['sender'] + \"", "+ '\", \"' + str(amount) + '\", \"tx error, possible incorrect address\", \"'", "\"amount\", \"error\", \"exception\") VALUES (\"' + tx['sender'] + '\", \"' + targetAddress +", "e='under minimum amount') else: try: nonce = self.w3.eth.getTransactionCount(self.config['erc20']['gatewayAddress']) if self.config['erc20']['gasprice'] > 0: gasprice", "-= 1 print('Something went wrong during tn block iteration: ') print(traceback.TracebackException.from_exception(e)) time.sleep(self.config['tn']['timeInBetweenChecks']) def", "\"' + str(round(amount)) + '\", \"' + str(self.config['erc20']['fee']) + '\")') self.dbCon.commit() print('send tokens", "self.lastScannedBlock -= 1 print('Something went wrong during tn block iteration: ') print(traceback.TracebackException.from_exception(e)) time.sleep(self.config['tn']['timeInBetweenChecks'])", "attachment found on transaction from \" + tx['sender'] + \" - check errors", "if amount < 0: self.faultHandler(transaction, \"senderror\", e='under minimum amount') else: try: nonce =", "to other network!') self.verifier.verifyOther(txId) except Exception as e: self.faultHandler(transaction, \"txerror\", e=e) continue def", "+ str(amount) + '\", \"tx error, check exception error\", \"' + str(e) +", "' + str(self.lastScannedBlock)) self.dbCon = sqlite.connect('gateway.db') while True: try: nextblock = self.getCurrentBlock() -", "print('send tokens from tn to other network!') self.verifier.verifyOther(txId) except Exception as e: self.faultHandler(transaction,", "self.dbCon.cursor() result = cursor.execute('SELECT ethTxId FROM executed WHERE tnTxId = \"' + tx['id']", "on the chain - try/except in case of timeouts try: CurrentBlock = requests.get(self.node", "signed_tx = self.w3.eth.account.signTransaction(tx, private_key=self.privatekey) txId = self.w3.eth.sendRawTransaction(signed_tx.rawTransaction) if not(str(txId.hex()).startswith('0x')): self.faultHandler(transaction, \"senderror\", e=txId.hex()) else:", "== self.config['tn']['assetId']: #check if there is an attachment targetAddress = base58.b58decode(tx['attachment']).decode() if len(targetAddress)", "'\")') self.dbCon.commit() print(timestampStr + \" - Error: on outgoing transaction for transaction from", "1 except: CurrentBlock = 0 return CurrentBlock def run(self): #main routine to run", "\"\", \"' + tx['id'] + '\", \"' + str(amount) + '\", \"tx error,", "- check errors table.\") if error == \"senderror\": targetAddress = base58.b58decode(tx['attachment']).decode() cursor =", "'\", \"' + str(self.config['erc20']['fee']) + '\")') self.dbCon.commit() print('send tokens from tn to other", "= \"TN\"') self.dbCon.commit() except Exception as e: self.lastScannedBlock -= 1 print('Something went wrong", "else: print(\"send tx: \" + str(txId.hex())) cursor = self.dbCon.cursor() amount /= pow(10, self.config['erc20']['decimals'])", "0: return True else: self.faultHandler(tx, 'noattachment') return False def faultHandler(self, tx, error, e=\"\"):", "sharedfunc from web3 import Web3 from verification import verifier class TNChecker(object): def __init__(self,", "4 and tx['recipient'] == self.config['tn']['gatewayAddress'] and tx['assetId'] == self.config['tn']['assetId']: #check if there is", "CurrentBlock def run(self): #main routine to run continuesly print('started checking tn blocks at:", "str(amount) + '\", \"tx error, check exception error\", \"' + str(e) + '\")')", "from web3 import Web3 from verification import verifier class TNChecker(object): def __init__(self, config):", "targetAddress = base58.b58decode(tx['attachment']).decode() cursor = self.dbCon.cursor() cursor.execute('INSERT INTO errors (\"sourceAddress\", \"targetAddress\", \"ethTxId\", \"tnTxId\",", "sharedfunc.getnow() if error == \"noattachment\": cursor = self.dbCon.cursor() cursor.execute('INSERT INTO errors (\"sourceAddress\", \"targetAddress\",", "= self.w3.eth.getTransactionCount(self.config['erc20']['gatewayAddress']) if self.config['erc20']['gasprice'] > 0: gasprice = self.w3.toWei(self.config['erc20']['gasprice'], 'gwei') else: gasprice =", "exception error\", \"' + str(e) + '\")') self.dbCon.commit() print(timestampStr + \" - Error:", "+ tx['id'] + '\", \"' + str(amount) + '\", \"tx error, possible incorrect", "= self.w3.toChecksumAddress(targetAddress) if not(self.w3.isAddress(targetAddress)): self.faultHandler(transaction, \"txerror\") else: amount = transaction['amount'] / pow(10, self.config['tn']['decimals'])", "\"TN\"') self.dbCon.commit() except Exception as e: self.lastScannedBlock -= 1 print('Something went wrong during", "\"chain\" = \"TN\"') self.dbCon.commit() except Exception as e: self.lastScannedBlock -= 1 print('Something went", "Web3(Web3.HTTPProvider(self.config['erc20']['node'])) self.privatekey = os.getenv(self.config['erc20']['seedenvname'], self.config['erc20']['privateKey']) self.verifier = verifier(config) cursor = self.dbCon.cursor() self.lastScannedBlock =", "self.w3.toChecksumAddress(targetAddress) if not(self.w3.isAddress(targetAddress)): self.faultHandler(transaction, \"txerror\") else: amount = transaction['amount'] / pow(10, self.config['tn']['decimals']) amount", "base58.b58decode(tx['attachment']).decode() cursor = self.dbCon.cursor() cursor.execute('INSERT INTO errors (\"sourceAddress\", \"targetAddress\", \"ethTxId\", \"tnTxId\", \"amount\", \"error\",", "gasprice = self.w3.toWei(self.config['erc20']['gasprice'], 'gwei') else: gasprice = int(self.w3.eth.gasPrice * 1.1) tx = {", "= self.dbCon.cursor() cursor.execute('INSERT INTO errors (\"sourceAddress\", \"targetAddress\", \"ethTxId\", \"tnTxId\", \"amount\", \"error\") VALUES (\"'", "in case of timeouts try: CurrentBlock = requests.get(self.node + '/blocks/height').json()['height'] - 1 except:", "self.dbCon = sqlite.connect('gateway.db') self.node = self.config['tn']['node'] self.w3 = Web3(Web3.HTTPProvider(self.config['erc20']['node'])) self.privatekey = os.getenv(self.config['erc20']['seedenvname'], self.config['erc20']['privateKey'])", "= sharedfunc.getnow() if error == \"noattachment\": cursor = self.dbCon.cursor() cursor.execute('INSERT INTO errors (\"sourceAddress\",", "have problems amount = tx['amount'] / pow(10, self.config['tn']['decimals']) timestampStr = sharedfunc.getnow() if error", "import requests import time import base58 import PyCWaves import traceback import sharedfunc from", "address\", \"' + str(e) + '\")') self.dbCon.commit() print(timestampStr + \" - Error: on", "\"tx error, check exception error\", \"' + str(e) + '\")') self.dbCon.commit() print(timestampStr +", "amount, 'gas': self.config['erc20']['gas'], 'gasPrice': gasprice, 'nonce': nonce, 'chainId': self.config['erc20']['chainid'] } signed_tx = self.w3.eth.account.signTransaction(tx,", "+ tx['sender'] + '\", \"\", \"\", \"' + tx['id'] + '\", \"' +", "True: try: nextblock = self.getCurrentBlock() - self.config['tn']['confirmations'] if nextblock > self.lastScannedBlock: self.lastScannedBlock +=", "checkTx(self, tx): #check the transaction if tx['type'] == 4 and tx['recipient'] == self.config['tn']['gatewayAddress']", "= base58.b58decode(tx['attachment']).decode() if len(targetAddress) > 1: #check if we already processed this tx", "self.faultHandler(transaction, \"senderror\", e='under minimum amount') else: try: nonce = self.w3.eth.getTransactionCount(self.config['erc20']['gatewayAddress']) if self.config['erc20']['gasprice'] >", "> 0: gasprice = self.w3.toWei(self.config['erc20']['gasprice'], 'gwei') else: gasprice = int(self.w3.eth.gasPrice * 1.1) tx", "to the gateway that have problems amount = tx['amount'] / pow(10, self.config['tn']['decimals']) timestampStr", "targetAddress = self.w3.toChecksumAddress(targetAddress) if not(self.w3.isAddress(targetAddress)): self.faultHandler(transaction, \"txerror\") else: amount = transaction['amount'] / pow(10,", "\"amountFee\") VALUES (\"' + transaction['sender'] + '\", \"' + targetAddress + '\", \"'", "> 1: #check if we already processed this tx cursor = self.dbCon.cursor() result", "found on transaction from \" + tx['sender'] + \" - check errors table.\")", "sqlite.connect('gateway.db') self.node = self.config['tn']['node'] self.w3 = Web3(Web3.HTTPProvider(self.config['erc20']['node'])) self.privatekey = os.getenv(self.config['erc20']['seedenvname'], self.config['erc20']['privateKey']) self.verifier =", "* 1.1) tx = { 'to': targetAddress, 'value': amount, 'gas': self.config['erc20']['gas'], 'gasPrice': gasprice,", "'gas': self.config['erc20']['gas'], 'gasPrice': gasprice, 'nonce': nonce, 'chainId': self.config['erc20']['chainid'] } signed_tx = self.w3.eth.account.signTransaction(tx, private_key=self.privatekey)", "not(str(txId.hex()).startswith('0x')): self.faultHandler(transaction, \"senderror\", e=txId.hex()) else: print(\"send tx: \" + str(txId.hex())) cursor = self.dbCon.cursor()", "= self.dbCon.cursor() result = cursor.execute('SELECT ethTxId FROM executed WHERE tnTxId = \"' +", "incorrect address\", \"' + str(e) + '\")') self.dbCon.commit() print(timestampStr + \" - Error:", "def checkBlock(self, heightToCheck): #check content of the block for valid transactions block =", "= self.getCurrentBlock() - self.config['tn']['confirmations'] if nextblock > self.lastScannedBlock: self.lastScannedBlock += 1 self.checkBlock(self.lastScannedBlock) cursor", "sqlite import requests import time import base58 import PyCWaves import traceback import sharedfunc", "if we already processed this tx cursor = self.dbCon.cursor() result = cursor.execute('SELECT ethTxId", "verification import verifier class TNChecker(object): def __init__(self, config): self.config = config self.dbCon =", "txId = self.w3.eth.sendRawTransaction(signed_tx.rawTransaction) if not(str(txId.hex()).startswith('0x')): self.faultHandler(transaction, \"senderror\", e=txId.hex()) else: print(\"send tx: \" +", "try/except in case of timeouts try: CurrentBlock = requests.get(self.node + '/blocks/height').json()['height'] - 1", "+ '\", \"no attachment found on transaction\")') self.dbCon.commit() print(timestampStr + \" - Error:", "- 1 except: CurrentBlock = 0 return CurrentBlock def run(self): #main routine to", "routine to run continuesly print('started checking tn blocks at: ' + str(self.lastScannedBlock)) self.dbCon", "transaction from \" + tx['sender'] + \" - check errors table.\") if error", "this tx cursor = self.dbCon.cursor() result = cursor.execute('SELECT ethTxId FROM executed WHERE tnTxId", "+ \" - check errors table.\") if error == \"txerror\": targetAddress = base58.b58decode(tx['attachment']).decode()", "tx['id'] + '\", \"' + str(amount) + '\", \"tx error, check exception error\",", "self.w3.eth.getTransactionCount(self.config['erc20']['gatewayAddress']) if self.config['erc20']['gasprice'] > 0: gasprice = self.w3.toWei(self.config['erc20']['gasprice'], 'gwei') else: gasprice = int(self.w3.eth.gasPrice", "== \"txerror\": targetAddress = base58.b58decode(tx['attachment']).decode() cursor = self.dbCon.cursor() cursor.execute('INSERT INTO errors (\"sourceAddress\", \"targetAddress\",", "amount *= pow(10, self.config['erc20']['decimals']) amount = int(round(amount)) if amount < 0: self.faultHandler(transaction, \"senderror\",", "+ str(amount) + '\", \"tx error, possible incorrect address\", \"' + str(e) +", "transaction\")') self.dbCon.commit() print(timestampStr + \" - Error: no attachment found on transaction from", "possible incorrect address\", \"' + str(e) + '\")') self.dbCon.commit() print(timestampStr + \" -", "\"txerror\") else: amount = transaction['amount'] / pow(10, self.config['tn']['decimals']) amount -= self.config['erc20']['fee'] amount *=", "if error == \"txerror\": targetAddress = base58.b58decode(tx['attachment']).decode() cursor = self.dbCon.cursor() cursor.execute('INSERT INTO errors", "print(timestampStr + \" - Error: on outgoing transaction for transaction from \" +", "'\", \"' + str(amount) + '\", \"tx error, check exception error\", \"' +", "/ pow(10, self.config['tn']['decimals']) amount -= self.config['erc20']['fee'] amount *= pow(10, self.config['erc20']['decimals']) amount = int(round(amount))", "(\"sourceAddress\", \"targetAddress\", \"ethTxId\", \"tnTxId\", \"amount\", \"error\") VALUES (\"' + tx['sender'] + '\", \"\",", "\"targetAddress\", \"ethTxId\", \"tnTxId\", \"amount\", \"error\", \"exception\") VALUES (\"' + tx['sender'] + '\", \"'", "\"ethTxId\", \"amount\", \"amountFee\") VALUES (\"' + transaction['sender'] + '\", \"' + targetAddress +", "errors table.\") if error == \"senderror\": targetAddress = base58.b58decode(tx['attachment']).decode() cursor = self.dbCon.cursor() cursor.execute('INSERT", "pow(10, self.config['erc20']['decimals']) cursor.execute('INSERT INTO executed (\"sourceAddress\", \"targetAddress\", \"tnTxId\", \"ethTxId\", \"amount\", \"amountFee\") VALUES (\"'", "tx = { 'to': targetAddress, 'value': amount, 'gas': self.config['erc20']['gas'], 'gasPrice': gasprice, 'nonce': nonce,", "current block on the chain - try/except in case of timeouts try: CurrentBlock", "import Web3 from verification import verifier class TNChecker(object): def __init__(self, config): self.config =", "str(amount) + '\", \"no attachment found on transaction\")') self.dbCon.commit() print(timestampStr + \" -", "timestampStr = sharedfunc.getnow() if error == \"noattachment\": cursor = self.dbCon.cursor() cursor.execute('INSERT INTO errors", "cursor = self.dbCon.cursor() cursor.execute('INSERT INTO errors (\"sourceAddress\", \"targetAddress\", \"ethTxId\", \"tnTxId\", \"amount\", \"error\", \"exception\")", "on outgoing transaction for transaction from \" + tx['sender'] + \" - check", "'\", \"no attachment found on transaction\")') self.dbCon.commit() print(timestampStr + \" - Error: no", "self.dbCon.cursor() cursor.execute('UPDATE heights SET \"height\" = ' + str(self.lastScannedBlock) + ' WHERE \"chain\"", "self.lastScannedBlock += 1 self.checkBlock(self.lastScannedBlock) cursor = self.dbCon.cursor() cursor.execute('UPDATE heights SET \"height\" = '", "verifier(config) cursor = self.dbCon.cursor() self.lastScannedBlock = cursor.execute('SELECT height FROM heights WHERE chain =", "tx['id'] + '\"').fetchall() if len(result) == 0: return True else: self.faultHandler(tx, 'noattachment') return", "self.getCurrentBlock() - self.config['tn']['confirmations'] if nextblock > self.lastScannedBlock: self.lastScannedBlock += 1 self.checkBlock(self.lastScannedBlock) cursor =", "#check the transaction if tx['type'] == 4 and tx['recipient'] == self.config['tn']['gatewayAddress'] and tx['assetId']", "height FROM heights WHERE chain = \"TN\"').fetchall()[0][0] def getCurrentBlock(self): #return current block on", "1 print('Something went wrong during tn block iteration: ') print(traceback.TracebackException.from_exception(e)) time.sleep(self.config['tn']['timeInBetweenChecks']) def checkBlock(self,", "+ '\", \"\", \"' + tx['id'] + '\", \"' + str(amount) + '\",", "'nonce': nonce, 'chainId': self.config['erc20']['chainid'] } signed_tx = self.w3.eth.account.signTransaction(tx, private_key=self.privatekey) txId = self.w3.eth.sendRawTransaction(signed_tx.rawTransaction) if", "def run(self): #main routine to run continuesly print('started checking tn blocks at: '", "of the block for valid transactions block = requests.get(self.node + '/blocks/at/' + str(heightToCheck)).json()", "Exception as e: self.faultHandler(transaction, \"txerror\", e=e) continue def checkTx(self, tx): #check the transaction", "True else: self.faultHandler(tx, 'noattachment') return False def faultHandler(self, tx, error, e=\"\"): #handle transfers", "+ str(self.lastScannedBlock) + ' WHERE \"chain\" = \"TN\"') self.dbCon.commit() except Exception as e:", "to run continuesly print('started checking tn blocks at: ' + str(self.lastScannedBlock)) self.dbCon =", "import base58 import PyCWaves import traceback import sharedfunc from web3 import Web3 from", "gasprice, 'nonce': nonce, 'chainId': self.config['erc20']['chainid'] } signed_tx = self.w3.eth.account.signTransaction(tx, private_key=self.privatekey) txId = self.w3.eth.sendRawTransaction(signed_tx.rawTransaction)", "WHERE \"chain\" = \"TN\"') self.dbCon.commit() except Exception as e: self.lastScannedBlock -= 1 print('Something", "checking tn blocks at: ' + str(self.lastScannedBlock)) self.dbCon = sqlite.connect('gateway.db') while True: try:", "e: self.faultHandler(transaction, \"txerror\", e=e) continue def checkTx(self, tx): #check the transaction if tx['type']", "+ str(amount) + '\", \"no attachment found on transaction\")') self.dbCon.commit() print(timestampStr + \"", "targetAddress, 'value': amount, 'gas': self.config['erc20']['gas'], 'gasPrice': gasprice, 'nonce': nonce, 'chainId': self.config['erc20']['chainid'] } signed_tx", "- check errors table.\") if error == \"txerror\": targetAddress = base58.b58decode(tx['attachment']).decode() cursor =", "block iteration: ') print(traceback.TracebackException.from_exception(e)) time.sleep(self.config['tn']['timeInBetweenChecks']) def checkBlock(self, heightToCheck): #check content of the block", "'\", \"' + txId.hex() + '\", \"' + str(round(amount)) + '\", \"' +", "not(self.w3.isAddress(targetAddress)): self.faultHandler(transaction, \"txerror\") else: amount = transaction['amount'] / pow(10, self.config['tn']['decimals']) amount -= self.config['erc20']['fee']", "(\"' + transaction['sender'] + '\", \"' + targetAddress + '\", \"' + transaction['id']", "self.verifier = verifier(config) cursor = self.dbCon.cursor() self.lastScannedBlock = cursor.execute('SELECT height FROM heights WHERE", "content of the block for valid transactions block = requests.get(self.node + '/blocks/at/' +", "INTO executed (\"sourceAddress\", \"targetAddress\", \"tnTxId\", \"ethTxId\", \"amount\", \"amountFee\") VALUES (\"' + transaction['sender'] +", "tn block iteration: ') print(traceback.TracebackException.from_exception(e)) time.sleep(self.config['tn']['timeInBetweenChecks']) def checkBlock(self, heightToCheck): #check content of the", "= self.w3.eth.sendRawTransaction(signed_tx.rawTransaction) if not(str(txId.hex()).startswith('0x')): self.faultHandler(transaction, \"senderror\", e=txId.hex()) else: print(\"send tx: \" + str(txId.hex()))", "+ '\", \"' + str(amount) + '\", \"no attachment found on transaction\")') self.dbCon.commit()", "self.dbCon.commit() except Exception as e: self.lastScannedBlock -= 1 print('Something went wrong during tn", "+ str(self.config['erc20']['fee']) + '\")') self.dbCon.commit() print('send tokens from tn to other network!') self.verifier.verifyOther(txId)", "checkBlock(self, heightToCheck): #check content of the block for valid transactions block = requests.get(self.node", "0: self.faultHandler(transaction, \"senderror\", e='under minimum amount') else: try: nonce = self.w3.eth.getTransactionCount(self.config['erc20']['gatewayAddress']) if self.config['erc20']['gasprice']", "table.\") if error == \"txerror\": targetAddress = base58.b58decode(tx['attachment']).decode() cursor = self.dbCon.cursor() cursor.execute('INSERT INTO", "try: nonce = self.w3.eth.getTransactionCount(self.config['erc20']['gatewayAddress']) if self.config['erc20']['gasprice'] > 0: gasprice = self.w3.toWei(self.config['erc20']['gasprice'], 'gwei') else:", "chain = \"TN\"').fetchall()[0][0] def getCurrentBlock(self): #return current block on the chain - try/except", "' + str(self.lastScannedBlock) + ' WHERE \"chain\" = \"TN\"') self.dbCon.commit() except Exception as", "\"noattachment\": cursor = self.dbCon.cursor() cursor.execute('INSERT INTO errors (\"sourceAddress\", \"targetAddress\", \"ethTxId\", \"tnTxId\", \"amount\", \"error\")", "\" - Error: no attachment found on transaction from \" + tx['sender'] +", "run continuesly print('started checking tn blocks at: ' + str(self.lastScannedBlock)) self.dbCon = sqlite.connect('gateway.db')", "str(self.lastScannedBlock)) self.dbCon = sqlite.connect('gateway.db') while True: try: nextblock = self.getCurrentBlock() - self.config['tn']['confirmations'] if", "+ '\", \"' + str(self.config['erc20']['fee']) + '\")') self.dbCon.commit() print('send tokens from tn to", "error, e=\"\"): #handle transfers to the gateway that have problems amount = tx['amount']", "tx['sender'] + '\", \"\", \"\", \"' + tx['id'] + '\", \"' + str(amount)", "transactions block = requests.get(self.node + '/blocks/at/' + str(heightToCheck)).json() for transaction in block['transactions']: if", "self.config['tn']['decimals']) timestampStr = sharedfunc.getnow() if error == \"noattachment\": cursor = self.dbCon.cursor() cursor.execute('INSERT INTO", "= self.dbCon.cursor() cursor.execute('INSERT INTO errors (\"sourceAddress\", \"targetAddress\", \"ethTxId\", \"tnTxId\", \"amount\", \"error\", \"exception\") VALUES", "= self.config['tn']['node'] self.w3 = Web3(Web3.HTTPProvider(self.config['erc20']['node'])) self.privatekey = os.getenv(self.config['erc20']['seedenvname'], self.config['erc20']['privateKey']) self.verifier = verifier(config) cursor", "from \" + tx['sender'] + \" - check errors table.\") if error ==", "block on the chain - try/except in case of timeouts try: CurrentBlock =", "the chain - try/except in case of timeouts try: CurrentBlock = requests.get(self.node +", "is an attachment targetAddress = base58.b58decode(tx['attachment']).decode() if len(targetAddress) > 1: #check if we", "1 self.checkBlock(self.lastScannedBlock) cursor = self.dbCon.cursor() cursor.execute('UPDATE heights SET \"height\" = ' + str(self.lastScannedBlock)", "self.dbCon.commit() print(timestampStr + \" - Error: no attachment found on transaction from \"", "\"' + tx['id'] + '\", \"' + str(amount) + '\", \"tx error, possible", "int(round(amount)) if amount < 0: self.faultHandler(transaction, \"senderror\", e='under minimum amount') else: try: nonce", "len(result) == 0: return True else: self.faultHandler(tx, 'noattachment') return False def faultHandler(self, tx,", "\" + tx['sender'] + \" - check errors table.\") if error == \"senderror\":", "cursor = self.dbCon.cursor() self.lastScannedBlock = cursor.execute('SELECT height FROM heights WHERE chain = \"TN\"').fetchall()[0][0]", "config self.dbCon = sqlite.connect('gateway.db') self.node = self.config['tn']['node'] self.w3 = Web3(Web3.HTTPProvider(self.config['erc20']['node'])) self.privatekey = os.getenv(self.config['erc20']['seedenvname'],", "tx['amount'] / pow(10, self.config['tn']['decimals']) timestampStr = sharedfunc.getnow() if error == \"noattachment\": cursor =", "os.getenv(self.config['erc20']['seedenvname'], self.config['erc20']['privateKey']) self.verifier = verifier(config) cursor = self.dbCon.cursor() self.lastScannedBlock = cursor.execute('SELECT height FROM", "in block['transactions']: if self.checkTx(transaction): targetAddress = base58.b58decode(transaction['attachment']).decode() targetAddress = self.w3.toChecksumAddress(targetAddress) if not(self.w3.isAddress(targetAddress)): self.faultHandler(transaction,", "sqlite3 as sqlite import requests import time import base58 import PyCWaves import traceback", "import time import base58 import PyCWaves import traceback import sharedfunc from web3 import", "self.config['tn']['confirmations'] if nextblock > self.lastScannedBlock: self.lastScannedBlock += 1 self.checkBlock(self.lastScannedBlock) cursor = self.dbCon.cursor() cursor.execute('UPDATE", "'gasPrice': gasprice, 'nonce': nonce, 'chainId': self.config['erc20']['chainid'] } signed_tx = self.w3.eth.account.signTransaction(tx, private_key=self.privatekey) txId =", "+ targetAddress + '\", \"\", \"' + tx['id'] + '\", \"' + str(amount)", "tnTxId = \"' + tx['id'] + '\"').fetchall() if len(result) == 0: return True", "str(e) + '\")') self.dbCon.commit() print(timestampStr + \" - Error: on outgoing transaction for", "an attachment targetAddress = base58.b58decode(tx['attachment']).decode() if len(targetAddress) > 1: #check if we already", "+ '\", \"tx error, check exception error\", \"' + str(e) + '\")') self.dbCon.commit()", "no attachment found on transaction from \" + tx['sender'] + \" - check", "<gh_stars>0 import os import sqlite3 as sqlite import requests import time import base58", "targetAddress + '\", \"\", \"' + tx['id'] + '\", \"' + str(amount) +", "except Exception as e: self.faultHandler(transaction, \"txerror\", e=e) continue def checkTx(self, tx): #check the", "self.checkTx(transaction): targetAddress = base58.b58decode(transaction['attachment']).decode() targetAddress = self.w3.toChecksumAddress(targetAddress) if not(self.w3.isAddress(targetAddress)): self.faultHandler(transaction, \"txerror\") else: amount", "\"' + str(amount) + '\", \"tx error, check exception error\", \"' + str(e)", "= os.getenv(self.config['erc20']['seedenvname'], self.config['erc20']['privateKey']) self.verifier = verifier(config) cursor = self.dbCon.cursor() self.lastScannedBlock = cursor.execute('SELECT height", "if error == \"senderror\": targetAddress = base58.b58decode(tx['attachment']).decode() cursor = self.dbCon.cursor() cursor.execute('INSERT INTO errors", "+ str(heightToCheck)).json() for transaction in block['transactions']: if self.checkTx(transaction): targetAddress = base58.b58decode(transaction['attachment']).decode() targetAddress =", "False def faultHandler(self, tx, error, e=\"\"): #handle transfers to the gateway that have", "amount -= self.config['erc20']['fee'] amount *= pow(10, self.config['erc20']['decimals']) amount = int(round(amount)) if amount <", "WHERE tnTxId = \"' + tx['id'] + '\"').fetchall() if len(result) == 0: return", "of timeouts try: CurrentBlock = requests.get(self.node + '/blocks/height').json()['height'] - 1 except: CurrentBlock =", "+ '\", \"\", \"\", \"' + tx['id'] + '\", \"' + str(amount) +", "== \"noattachment\": cursor = self.dbCon.cursor() cursor.execute('INSERT INTO errors (\"sourceAddress\", \"targetAddress\", \"ethTxId\", \"tnTxId\", \"amount\",", "def faultHandler(self, tx, error, e=\"\"): #handle transfers to the gateway that have problems", "\"' + str(e) + '\")') self.dbCon.commit() print(timestampStr + \" - Error: on outgoing", "cursor.execute('INSERT INTO executed (\"sourceAddress\", \"targetAddress\", \"tnTxId\", \"ethTxId\", \"amount\", \"amountFee\") VALUES (\"' + transaction['sender']", "cursor.execute('SELECT ethTxId FROM executed WHERE tnTxId = \"' + tx['id'] + '\"').fetchall() if", "\"exception\") VALUES (\"' + tx['sender'] + '\", \"' + targetAddress + '\", \"\",", "e=txId.hex()) else: print(\"send tx: \" + str(txId.hex())) cursor = self.dbCon.cursor() amount /= pow(10,", "tx: \" + str(txId.hex())) cursor = self.dbCon.cursor() amount /= pow(10, self.config['erc20']['decimals']) cursor.execute('INSERT INTO", "\"ethTxId\", \"tnTxId\", \"amount\", \"error\", \"exception\") VALUES (\"' + tx['sender'] + '\", \"' +", "self.faultHandler(transaction, \"senderror\", e=txId.hex()) else: print(\"send tx: \" + str(txId.hex())) cursor = self.dbCon.cursor() amount", "run(self): #main routine to run continuesly print('started checking tn blocks at: ' +", "os import sqlite3 as sqlite import requests import time import base58 import PyCWaves", "\" - check errors table.\") if error == \"senderror\": targetAddress = base58.b58decode(tx['attachment']).decode() cursor", "VALUES (\"' + tx['sender'] + '\", \"\", \"\", \"' + tx['id'] + '\",", "transaction for transaction from \" + tx['sender'] + \" - check errors table.\")", "block for valid transactions block = requests.get(self.node + '/blocks/at/' + str(heightToCheck)).json() for transaction", "\"TN\"').fetchall()[0][0] def getCurrentBlock(self): #return current block on the chain - try/except in case", "timeouts try: CurrentBlock = requests.get(self.node + '/blocks/height').json()['height'] - 1 except: CurrentBlock = 0", "str(self.config['erc20']['fee']) + '\")') self.dbCon.commit() print('send tokens from tn to other network!') self.verifier.verifyOther(txId) except", "tn blocks at: ' + str(self.lastScannedBlock)) self.dbCon = sqlite.connect('gateway.db') while True: try: nextblock", "print(traceback.TracebackException.from_exception(e)) time.sleep(self.config['tn']['timeInBetweenChecks']) def checkBlock(self, heightToCheck): #check content of the block for valid transactions", "\" + tx['sender'] + \" - check errors table.\") if error == \"txerror\":", "requests import time import base58 import PyCWaves import traceback import sharedfunc from web3", "targetAddress = base58.b58decode(tx['attachment']).decode() if len(targetAddress) > 1: #check if we already processed this", "as sqlite import requests import time import base58 import PyCWaves import traceback import", "errors (\"sourceAddress\", \"targetAddress\", \"ethTxId\", \"tnTxId\", \"amount\", \"error\", \"exception\") VALUES (\"' + tx['sender'] +", "\"senderror\": targetAddress = base58.b58decode(tx['attachment']).decode() cursor = self.dbCon.cursor() cursor.execute('INSERT INTO errors (\"sourceAddress\", \"targetAddress\", \"ethTxId\",", "TNChecker(object): def __init__(self, config): self.config = config self.dbCon = sqlite.connect('gateway.db') self.node = self.config['tn']['node']", "if self.checkTx(transaction): targetAddress = base58.b58decode(transaction['attachment']).decode() targetAddress = self.w3.toChecksumAddress(targetAddress) if not(self.w3.isAddress(targetAddress)): self.faultHandler(transaction, \"txerror\") else:", "'\", \"tx error, check exception error\", \"' + str(e) + '\")') self.dbCon.commit() print(timestampStr", "== \"senderror\": targetAddress = base58.b58decode(tx['attachment']).decode() cursor = self.dbCon.cursor() cursor.execute('INSERT INTO errors (\"sourceAddress\", \"targetAddress\",", "transaction in block['transactions']: if self.checkTx(transaction): targetAddress = base58.b58decode(transaction['attachment']).decode() targetAddress = self.w3.toChecksumAddress(targetAddress) if not(self.w3.isAddress(targetAddress)):", "print(\"send tx: \" + str(txId.hex())) cursor = self.dbCon.cursor() amount /= pow(10, self.config['erc20']['decimals']) cursor.execute('INSERT", "\"' + transaction['id'] + '\", \"' + txId.hex() + '\", \"' + str(round(amount))", "except Exception as e: self.lastScannedBlock -= 1 print('Something went wrong during tn block", "transaction['amount'] / pow(10, self.config['tn']['decimals']) amount -= self.config['erc20']['fee'] amount *= pow(10, self.config['erc20']['decimals']) amount =", "amount /= pow(10, self.config['erc20']['decimals']) cursor.execute('INSERT INTO executed (\"sourceAddress\", \"targetAddress\", \"tnTxId\", \"ethTxId\", \"amount\", \"amountFee\")", "from verification import verifier class TNChecker(object): def __init__(self, config): self.config = config self.dbCon", "error, possible incorrect address\", \"' + str(e) + '\")') self.dbCon.commit() print(timestampStr + \"", "self.checkBlock(self.lastScannedBlock) cursor = self.dbCon.cursor() cursor.execute('UPDATE heights SET \"height\" = ' + str(self.lastScannedBlock) +", "tx['id'] + '\", \"' + str(amount) + '\", \"no attachment found on transaction\")')", "other network!') self.verifier.verifyOther(txId) except Exception as e: self.faultHandler(transaction, \"txerror\", e=e) continue def checkTx(self,", "- Error: no attachment found on transaction from \" + tx['sender'] + \"", "= sqlite.connect('gateway.db') self.node = self.config['tn']['node'] self.w3 = Web3(Web3.HTTPProvider(self.config['erc20']['node'])) self.privatekey = os.getenv(self.config['erc20']['seedenvname'], self.config['erc20']['privateKey']) self.verifier", "+ ' WHERE \"chain\" = \"TN\"') self.dbCon.commit() except Exception as e: self.lastScannedBlock -=", "\"txerror\": targetAddress = base58.b58decode(tx['attachment']).decode() cursor = self.dbCon.cursor() cursor.execute('INSERT INTO errors (\"sourceAddress\", \"targetAddress\", \"ethTxId\",", "and tx['recipient'] == self.config['tn']['gatewayAddress'] and tx['assetId'] == self.config['tn']['assetId']: #check if there is an", "= 0 return CurrentBlock def run(self): #main routine to run continuesly print('started checking", "' WHERE \"chain\" = \"TN\"') self.dbCon.commit() except Exception as e: self.lastScannedBlock -= 1", "+ '\")') self.dbCon.commit() print('send tokens from tn to other network!') self.verifier.verifyOther(txId) except Exception", "+ '\", \"' + transaction['id'] + '\", \"' + txId.hex() + '\", \"'", "attachment found on transaction\")') self.dbCon.commit() print(timestampStr + \" - Error: no attachment found", "requests.get(self.node + '/blocks/height').json()['height'] - 1 except: CurrentBlock = 0 return CurrentBlock def run(self):", "self.lastScannedBlock: self.lastScannedBlock += 1 self.checkBlock(self.lastScannedBlock) cursor = self.dbCon.cursor() cursor.execute('UPDATE heights SET \"height\" =", "if nextblock > self.lastScannedBlock: self.lastScannedBlock += 1 self.checkBlock(self.lastScannedBlock) cursor = self.dbCon.cursor() cursor.execute('UPDATE heights", "- try/except in case of timeouts try: CurrentBlock = requests.get(self.node + '/blocks/height').json()['height'] -", "tx['sender'] + '\", \"' + targetAddress + '\", \"\", \"' + tx['id'] +", "self.verifier.verifyOther(txId) except Exception as e: self.faultHandler(transaction, \"txerror\", e=e) continue def checkTx(self, tx): #check", "0: gasprice = self.w3.toWei(self.config['erc20']['gasprice'], 'gwei') else: gasprice = int(self.w3.eth.gasPrice * 1.1) tx =", "else: self.faultHandler(tx, 'noattachment') return False def faultHandler(self, tx, error, e=\"\"): #handle transfers to", "cursor = self.dbCon.cursor() result = cursor.execute('SELECT ethTxId FROM executed WHERE tnTxId = \"'", "\"tnTxId\", \"amount\", \"error\") VALUES (\"' + tx['sender'] + '\", \"\", \"\", \"' +", "} signed_tx = self.w3.eth.account.signTransaction(tx, private_key=self.privatekey) txId = self.w3.eth.sendRawTransaction(signed_tx.rawTransaction) if not(str(txId.hex()).startswith('0x')): self.faultHandler(transaction, \"senderror\", e=txId.hex())", "str(round(amount)) + '\", \"' + str(self.config['erc20']['fee']) + '\")') self.dbCon.commit() print('send tokens from tn", "= Web3(Web3.HTTPProvider(self.config['erc20']['node'])) self.privatekey = os.getenv(self.config['erc20']['seedenvname'], self.config['erc20']['privateKey']) self.verifier = verifier(config) cursor = self.dbCon.cursor() self.lastScannedBlock", "+ '\"').fetchall() if len(result) == 0: return True else: self.faultHandler(tx, 'noattachment') return False", "self.w3.eth.account.signTransaction(tx, private_key=self.privatekey) txId = self.w3.eth.sendRawTransaction(signed_tx.rawTransaction) if not(str(txId.hex()).startswith('0x')): self.faultHandler(transaction, \"senderror\", e=txId.hex()) else: print(\"send tx:", "\" + str(txId.hex())) cursor = self.dbCon.cursor() amount /= pow(10, self.config['erc20']['decimals']) cursor.execute('INSERT INTO executed", "\"ethTxId\", \"tnTxId\", \"amount\", \"error\") VALUES (\"' + tx['sender'] + '\", \"\", \"\", \"'", "continuesly print('started checking tn blocks at: ' + str(self.lastScannedBlock)) self.dbCon = sqlite.connect('gateway.db') while", "amount = int(round(amount)) if amount < 0: self.faultHandler(transaction, \"senderror\", e='under minimum amount') else:", "amount') else: try: nonce = self.w3.eth.getTransactionCount(self.config['erc20']['gatewayAddress']) if self.config['erc20']['gasprice'] > 0: gasprice = self.w3.toWei(self.config['erc20']['gasprice'],", "+ '\", \"' + targetAddress + '\", \"\", \"' + tx['id'] + '\",", "#main routine to run continuesly print('started checking tn blocks at: ' + str(self.lastScannedBlock))", "cursor = self.dbCon.cursor() amount /= pow(10, self.config['erc20']['decimals']) cursor.execute('INSERT INTO executed (\"sourceAddress\", \"targetAddress\", \"tnTxId\",", "if not(str(txId.hex()).startswith('0x')): self.faultHandler(transaction, \"senderror\", e=txId.hex()) else: print(\"send tx: \" + str(txId.hex())) cursor =", "+ \" - check errors table.\") if error == \"senderror\": targetAddress = base58.b58decode(tx['attachment']).decode()", "iteration: ') print(traceback.TracebackException.from_exception(e)) time.sleep(self.config['tn']['timeInBetweenChecks']) def checkBlock(self, heightToCheck): #check content of the block for", "#check if there is an attachment targetAddress = base58.b58decode(tx['attachment']).decode() if len(targetAddress) > 1:", "= self.dbCon.cursor() self.lastScannedBlock = cursor.execute('SELECT height FROM heights WHERE chain = \"TN\"').fetchall()[0][0] def", "error\", \"' + str(e) + '\")') self.dbCon.commit() print(timestampStr + \" - Error: on", "\"targetAddress\", \"ethTxId\", \"tnTxId\", \"amount\", \"error\") VALUES (\"' + tx['sender'] + '\", \"\", \"\",", "went wrong during tn block iteration: ') print(traceback.TracebackException.from_exception(e)) time.sleep(self.config['tn']['timeInBetweenChecks']) def checkBlock(self, heightToCheck): #check", "outgoing transaction for transaction from \" + tx['sender'] + \" - check errors", "#return current block on the chain - try/except in case of timeouts try:", "'chainId': self.config['erc20']['chainid'] } signed_tx = self.w3.eth.account.signTransaction(tx, private_key=self.privatekey) txId = self.w3.eth.sendRawTransaction(signed_tx.rawTransaction) if not(str(txId.hex()).startswith('0x')): self.faultHandler(transaction,", "cursor = self.dbCon.cursor() cursor.execute('INSERT INTO errors (\"sourceAddress\", \"targetAddress\", \"ethTxId\", \"tnTxId\", \"amount\", \"error\") VALUES", "self.config['tn']['gatewayAddress'] and tx['assetId'] == self.config['tn']['assetId']: #check if there is an attachment targetAddress =", "= self.dbCon.cursor() cursor.execute('UPDATE heights SET \"height\" = ' + str(self.lastScannedBlock) + ' WHERE", "\"tnTxId\", \"amount\", \"error\", \"exception\") VALUES (\"' + tx['sender'] + '\", \"' + targetAddress", "amount = tx['amount'] / pow(10, self.config['tn']['decimals']) timestampStr = sharedfunc.getnow() if error == \"noattachment\":", "(\"' + tx['sender'] + '\", \"\", \"\", \"' + tx['id'] + '\", \"'", "\"senderror\", e=txId.hex()) else: print(\"send tx: \" + str(txId.hex())) cursor = self.dbCon.cursor() amount /=", "self.privatekey = os.getenv(self.config['erc20']['seedenvname'], self.config['erc20']['privateKey']) self.verifier = verifier(config) cursor = self.dbCon.cursor() self.lastScannedBlock = cursor.execute('SELECT", "'to': targetAddress, 'value': amount, 'gas': self.config['erc20']['gas'], 'gasPrice': gasprice, 'nonce': nonce, 'chainId': self.config['erc20']['chainid'] }", "tx['type'] == 4 and tx['recipient'] == self.config['tn']['gatewayAddress'] and tx['assetId'] == self.config['tn']['assetId']: #check if", "*= pow(10, self.config['erc20']['decimals']) amount = int(round(amount)) if amount < 0: self.faultHandler(transaction, \"senderror\", e='under", "self.dbCon.cursor() self.lastScannedBlock = cursor.execute('SELECT height FROM heights WHERE chain = \"TN\"').fetchall()[0][0] def getCurrentBlock(self):", "self.faultHandler(transaction, \"txerror\") else: amount = transaction['amount'] / pow(10, self.config['tn']['decimals']) amount -= self.config['erc20']['fee'] amount", "+ transaction['id'] + '\", \"' + txId.hex() + '\", \"' + str(round(amount)) +", "'\")') self.dbCon.commit() print('send tokens from tn to other network!') self.verifier.verifyOther(txId) except Exception as", "CurrentBlock = 0 return CurrentBlock def run(self): #main routine to run continuesly print('started", "\" - check errors table.\") if error == \"txerror\": targetAddress = base58.b58decode(tx['attachment']).decode() cursor", "self.w3.eth.sendRawTransaction(signed_tx.rawTransaction) if not(str(txId.hex()).startswith('0x')): self.faultHandler(transaction, \"senderror\", e=txId.hex()) else: print(\"send tx: \" + str(txId.hex())) cursor", "\"amount\", \"amountFee\") VALUES (\"' + transaction['sender'] + '\", \"' + targetAddress + '\",", "== 0: return True else: self.faultHandler(tx, 'noattachment') return False def faultHandler(self, tx, error,", "self.dbCon.cursor() cursor.execute('INSERT INTO errors (\"sourceAddress\", \"targetAddress\", \"ethTxId\", \"tnTxId\", \"amount\", \"error\", \"exception\") VALUES (\"'", "print('Something went wrong during tn block iteration: ') print(traceback.TracebackException.from_exception(e)) time.sleep(self.config['tn']['timeInBetweenChecks']) def checkBlock(self, heightToCheck):", "for transaction from \" + tx['sender'] + \" - check errors table.\") if", "e=e) continue def checkTx(self, tx): #check the transaction if tx['type'] == 4 and", "processed this tx cursor = self.dbCon.cursor() result = cursor.execute('SELECT ethTxId FROM executed WHERE", "tx['sender'] + \" - check errors table.\") if error == \"txerror\": targetAddress =", "nextblock = self.getCurrentBlock() - self.config['tn']['confirmations'] if nextblock > self.lastScannedBlock: self.lastScannedBlock += 1 self.checkBlock(self.lastScannedBlock)", "requests.get(self.node + '/blocks/at/' + str(heightToCheck)).json() for transaction in block['transactions']: if self.checkTx(transaction): targetAddress =", "__init__(self, config): self.config = config self.dbCon = sqlite.connect('gateway.db') self.node = self.config['tn']['node'] self.w3 =", "errors table.\") if error == \"txerror\": targetAddress = base58.b58decode(tx['attachment']).decode() cursor = self.dbCon.cursor() cursor.execute('INSERT", "self.dbCon.cursor() cursor.execute('INSERT INTO errors (\"sourceAddress\", \"targetAddress\", \"ethTxId\", \"tnTxId\", \"amount\", \"error\") VALUES (\"' +", "VALUES (\"' + transaction['sender'] + '\", \"' + targetAddress + '\", \"' +", "\"' + tx['id'] + '\", \"' + str(amount) + '\", \"no attachment found", "targetAddress = base58.b58decode(transaction['attachment']).decode() targetAddress = self.w3.toChecksumAddress(targetAddress) if not(self.w3.isAddress(targetAddress)): self.faultHandler(transaction, \"txerror\") else: amount =", "= self.w3.eth.account.signTransaction(tx, private_key=self.privatekey) txId = self.w3.eth.sendRawTransaction(signed_tx.rawTransaction) if not(str(txId.hex()).startswith('0x')): self.faultHandler(transaction, \"senderror\", e=txId.hex()) else: print(\"send", "\"' + str(amount) + '\", \"no attachment found on transaction\")') self.dbCon.commit() print(timestampStr +", "'\", \"tx error, possible incorrect address\", \"' + str(e) + '\")') self.dbCon.commit() print(timestampStr", "\"txerror\", e=e) continue def checkTx(self, tx): #check the transaction if tx['type'] == 4", "'gwei') else: gasprice = int(self.w3.eth.gasPrice * 1.1) tx = { 'to': targetAddress, 'value':", "on transaction\")') self.dbCon.commit() print(timestampStr + \" - Error: no attachment found on transaction", "= verifier(config) cursor = self.dbCon.cursor() self.lastScannedBlock = cursor.execute('SELECT height FROM heights WHERE chain", "+ tx['id'] + '\", \"' + str(amount) + '\", \"no attachment found on", "time import base58 import PyCWaves import traceback import sharedfunc from web3 import Web3", "VALUES (\"' + tx['sender'] + '\", \"' + targetAddress + '\", \"\", \"'", "self.dbCon.commit() print(timestampStr + \" - Error: on outgoing transaction for transaction from \"", "found on transaction\")') self.dbCon.commit() print(timestampStr + \" - Error: no attachment found on", "sqlite.connect('gateway.db') while True: try: nextblock = self.getCurrentBlock() - self.config['tn']['confirmations'] if nextblock > self.lastScannedBlock:", "\"error\", \"exception\") VALUES (\"' + tx['sender'] + '\", \"' + targetAddress + '\",", "wrong during tn block iteration: ') print(traceback.TracebackException.from_exception(e)) time.sleep(self.config['tn']['timeInBetweenChecks']) def checkBlock(self, heightToCheck): #check content", "import traceback import sharedfunc from web3 import Web3 from verification import verifier class", "faultHandler(self, tx, error, e=\"\"): #handle transfers to the gateway that have problems amount", "base58 import PyCWaves import traceback import sharedfunc from web3 import Web3 from verification", "transaction['sender'] + '\", \"' + targetAddress + '\", \"' + transaction['id'] + '\",", "+ '\")') self.dbCon.commit() print(timestampStr + \" - Error: on outgoing transaction for transaction", "while True: try: nextblock = self.getCurrentBlock() - self.config['tn']['confirmations'] if nextblock > self.lastScannedBlock: self.lastScannedBlock", "self.faultHandler(tx, 'noattachment') return False def faultHandler(self, tx, error, e=\"\"): #handle transfers to the", "valid transactions block = requests.get(self.node + '/blocks/at/' + str(heightToCheck)).json() for transaction in block['transactions']:", "+ transaction['sender'] + '\", \"' + targetAddress + '\", \"' + transaction['id'] +", "import os import sqlite3 as sqlite import requests import time import base58 import", "PyCWaves import traceback import sharedfunc from web3 import Web3 from verification import verifier", "\"\", \"\", \"' + tx['id'] + '\", \"' + str(amount) + '\", \"no", "#check content of the block for valid transactions block = requests.get(self.node + '/blocks/at/'", "+ str(round(amount)) + '\", \"' + str(self.config['erc20']['fee']) + '\")') self.dbCon.commit() print('send tokens from", "'\", \"' + str(amount) + '\", \"no attachment found on transaction\")') self.dbCon.commit() print(timestampStr", "cursor.execute('INSERT INTO errors (\"sourceAddress\", \"targetAddress\", \"ethTxId\", \"tnTxId\", \"amount\", \"error\") VALUES (\"' + tx['sender']", "self.w3 = Web3(Web3.HTTPProvider(self.config['erc20']['node'])) self.privatekey = os.getenv(self.config['erc20']['seedenvname'], self.config['erc20']['privateKey']) self.verifier = verifier(config) cursor = self.dbCon.cursor()", "= base58.b58decode(transaction['attachment']).decode() targetAddress = self.w3.toChecksumAddress(targetAddress) if not(self.w3.isAddress(targetAddress)): self.faultHandler(transaction, \"txerror\") else: amount = transaction['amount']", "tx): #check the transaction if tx['type'] == 4 and tx['recipient'] == self.config['tn']['gatewayAddress'] and", "amount = transaction['amount'] / pow(10, self.config['tn']['decimals']) amount -= self.config['erc20']['fee'] amount *= pow(10, self.config['erc20']['decimals'])", "int(self.w3.eth.gasPrice * 1.1) tx = { 'to': targetAddress, 'value': amount, 'gas': self.config['erc20']['gas'], 'gasPrice':" ]
[ "remain in PENDING state until all newly provisioned instances have transitioned to RUNNING", "TERMINATED (in the case where a provider cancels provision). \"\"\" STARTING = \"STARTING\"", "are being re-started after having been stopped. Some instances may already have started,", "Individual instances may be independently transitioned to other states but the requirement is", "the requirement is still considered to be STOPPING until all instances have stopped.", "the requirement are running. Individual instances may be independently transitioned to other states", "started. \"\"\" RUNNING = \"RUNNING\" \"\"\" The computer machine instances provisioned for the", "still considered to be STOPPING until all instances have stopped. \"\"\" STOPPED =", "changed and is considered to be in a final state. \"\"\" def __str__(self)", "other states but the requirement is still considered to be running. \"\"\" STOPPING", "considered to be in a final state. \"\"\" def __str__(self) -> str: return", "has been created and submitted to YellowDog Compute.\"\"\" PENDING = \"PENDING\" \"\"\" YellowDog", "\"STARTING\" \"\"\" The computer machine instances provisioned for the requirement are being re-started", "requirement has been created and submitted to YellowDog Compute.\"\"\" PENDING = \"PENDING\" \"\"\"", "STOPPED = \"STOPPED\" \"\"\"The computer machine instances provisioned for the requirement have stopped.\"\"\"", "stopped.\"\"\" TERMINATING = \"TERMINATING\" \"\"\" The computer machine instances provisioned for the requirement", "is still considered to be STARTING until all instances have started. \"\"\" RUNNING", "aggregated view of the statuses of compute machine instances provisioned for that requirement.", "state until all newly provisioned instances have transitioned to RUNNING or TERMINATED (in", "machine instances provisioned for the requirement are being stopped. Some instances may already", "be changed and is considered to be in a final state. \"\"\" def", "YellowDog Compute is in the process of provisioning compute machine instances to meet", "the compute requirement may no longer be changed and is considered to be", "be STOPPING until all instances have stopped. \"\"\" STOPPED = \"STOPPED\" \"\"\"The computer", "\"PENDING\" \"\"\" YellowDog Compute is in the process of provisioning compute machine instances", "from enum import Enum class ComputeRequirementStatus(Enum): \"\"\" Describes the status of a compute", "provisioned for the requirement are running. Individual instances may be independently transitioned to", "still considered to be running. \"\"\" STOPPING = \"STOPPING\" \"\"\" The computer machine", "submitted to YellowDog Compute.\"\"\" PENDING = \"PENDING\" \"\"\" YellowDog Compute is in the", "considered to be running. \"\"\" STOPPING = \"STOPPING\" \"\"\" The computer machine instances", "after having been stopped. Some instances may already have started, however the requirement", "requirement. The requirement will remain in PENDING state until all newly provisioned instances", "provisioned for that requirement. \"\"\" NEW = \"NEW\" \"\"\"The compute requirement has been", "point the compute requirement may no longer be changed and is considered to", "created and submitted to YellowDog Compute.\"\"\" PENDING = \"PENDING\" \"\"\" YellowDog Compute is", "may already have started, however the requirement is still considered to be STARTING", "the process of provisioning compute machine instances to meet the requirement. The requirement", "case where a provider cancels provision). \"\"\" STARTING = \"STARTING\" \"\"\" The computer", "requirement are being re-started after having been stopped. Some instances may already have", "STARTING until all instances have started. \"\"\" RUNNING = \"RUNNING\" \"\"\" The computer", "\"NEW\" \"\"\"The compute requirement has been created and submitted to YellowDog Compute.\"\"\" PENDING", "computer machine instances provisioned for the requirement have been terminated. At this point", "\"RUNNING\" \"\"\" The computer machine instances provisioned for the requirement are running. Individual", "the requirement is still considered to be running. \"\"\" STOPPING = \"STOPPING\" \"\"\"", "instances have terminated. \"\"\" TERMINATED = \"TERMINATED\" \"\"\" The computer machine instances provisioned", "provisioning compute machine instances to meet the requirement. The requirement will remain in", "= \"PENDING\" \"\"\" YellowDog Compute is in the process of provisioning compute machine", "an aggregated view of the statuses of compute machine instances provisioned for that", "The computer machine instances provisioned for the requirement are running. Individual instances may", "being re-started after having been stopped. Some instances may already have started, however", "terminated. \"\"\" TERMINATED = \"TERMINATED\" \"\"\" The computer machine instances provisioned for the", "of provisioning compute machine instances to meet the requirement. The requirement will remain", "\"\"\" The computer machine instances provisioned for the requirement have been terminated. At", "= \"NEW\" \"\"\"The compute requirement has been created and submitted to YellowDog Compute.\"\"\"", "terminated. Some instances may already be terminated, however the requirement is still considered", "\"\"\"The computer machine instances provisioned for the requirement have stopped.\"\"\" TERMINATING = \"TERMINATING\"", "of a compute requirement provides an aggregated view of the statuses of compute", "but the requirement is still considered to be running. \"\"\" STOPPING = \"STOPPING\"", "provisioned for the requirement are being re-started after having been stopped. Some instances", "all instances have started. \"\"\" RUNNING = \"RUNNING\" \"\"\" The computer machine instances", "requirement are being terminated. Some instances may already be terminated, however the requirement", "machine instances to meet the requirement. The requirement will remain in PENDING state", "requirement may no longer be changed and is considered to be in a", "for the requirement have stopped.\"\"\" TERMINATING = \"TERMINATING\" \"\"\" The computer machine instances", "= \"TERMINATING\" \"\"\" The computer machine instances provisioned for the requirement are being", "until all instances have terminated. \"\"\" TERMINATED = \"TERMINATED\" \"\"\" The computer machine", "meet the requirement. The requirement will remain in PENDING state until all newly", "no longer be changed and is considered to be in a final state.", "to be STARTING until all instances have started. \"\"\" RUNNING = \"RUNNING\" \"\"\"", "instances may already be terminated, however the requirement is still considered to be", "already have stopped, however the requirement is still considered to be STOPPING until", "compute requirement. The status of a compute requirement provides an aggregated view of", "ComputeRequirementStatus(Enum): \"\"\" Describes the status of a compute requirement. The status of a", "computer machine instances provisioned for the requirement are being terminated. Some instances may", "all instances have terminated. \"\"\" TERMINATED = \"TERMINATED\" \"\"\" The computer machine instances", "and is considered to be in a final state. \"\"\" def __str__(self) ->", "running. Individual instances may be independently transitioned to other states but the requirement", "= \"TERMINATED\" \"\"\" The computer machine instances provisioned for the requirement have been", "The status of a compute requirement provides an aggregated view of the statuses", "been stopped. Some instances may already have started, however the requirement is still", "provisioned for the requirement have been terminated. At this point the compute requirement", "until all instances have stopped. \"\"\" STOPPED = \"STOPPED\" \"\"\"The computer machine instances", "stopped. \"\"\" STOPPED = \"STOPPED\" \"\"\"The computer machine instances provisioned for the requirement", "compute requirement has been created and submitted to YellowDog Compute.\"\"\" PENDING = \"PENDING\"", "= \"STOPPED\" \"\"\"The computer machine instances provisioned for the requirement have stopped.\"\"\" TERMINATING", "TERMINATING until all instances have terminated. \"\"\" TERMINATED = \"TERMINATED\" \"\"\" The computer", "process of provisioning compute machine instances to meet the requirement. The requirement will", "\"\"\" The computer machine instances provisioned for the requirement are being terminated. Some", "\"\"\" STOPPING = \"STOPPING\" \"\"\" The computer machine instances provisioned for the requirement", "considered to be TERMINATING until all instances have terminated. \"\"\" TERMINATED = \"TERMINATED\"", "computer machine instances provisioned for the requirement are being re-started after having been", "\"\"\" The computer machine instances provisioned for the requirement are running. Individual instances", "machine instances provisioned for the requirement have stopped.\"\"\" TERMINATING = \"TERMINATING\" \"\"\" The", "\"STOPPING\" \"\"\" The computer machine instances provisioned for the requirement are being stopped.", "considered to be STARTING until all instances have started. \"\"\" RUNNING = \"RUNNING\"", "Some instances may already have stopped, however the requirement is still considered to", "or TERMINATED (in the case where a provider cancels provision). \"\"\" STARTING =", "\"STOPPED\" \"\"\"The computer machine instances provisioned for the requirement have stopped.\"\"\" TERMINATING =", "the requirement are being re-started after having been stopped. Some instances may already", "the requirement are being terminated. Some instances may already be terminated, however the", "to YellowDog Compute.\"\"\" PENDING = \"PENDING\" \"\"\" YellowDog Compute is in the process", "provides an aggregated view of the statuses of compute machine instances provisioned for", "instances have stopped. \"\"\" STOPPED = \"STOPPED\" \"\"\"The computer machine instances provisioned for", "transitioned to other states but the requirement is still considered to be running.", "until all instances have started. \"\"\" RUNNING = \"RUNNING\" \"\"\" The computer machine", "newly provisioned instances have transitioned to RUNNING or TERMINATED (in the case where", "may be independently transitioned to other states but the requirement is still considered", "the requirement have stopped.\"\"\" TERMINATING = \"TERMINATING\" \"\"\" The computer machine instances provisioned", "(in the case where a provider cancels provision). \"\"\" STARTING = \"STARTING\" \"\"\"", "NEW = \"NEW\" \"\"\"The compute requirement has been created and submitted to YellowDog", "transitioned to RUNNING or TERMINATED (in the case where a provider cancels provision).", "import Enum class ComputeRequirementStatus(Enum): \"\"\" Describes the status of a compute requirement. The", "of a compute requirement. The status of a compute requirement provides an aggregated", "the statuses of compute machine instances provisioned for that requirement. \"\"\" NEW =", "in PENDING state until all newly provisioned instances have transitioned to RUNNING or", "STOPPING until all instances have stopped. \"\"\" STOPPED = \"STOPPED\" \"\"\"The computer machine", "is considered to be in a final state. \"\"\" def __str__(self) -> str:", "have stopped. \"\"\" STOPPED = \"STOPPED\" \"\"\"The computer machine instances provisioned for the", "that requirement. \"\"\" NEW = \"NEW\" \"\"\"The compute requirement has been created and", "be TERMINATING until all instances have terminated. \"\"\" TERMINATED = \"TERMINATED\" \"\"\" The", "Compute is in the process of provisioning compute machine instances to meet the", "statuses of compute machine instances provisioned for that requirement. \"\"\" NEW = \"NEW\"", "stopped. Some instances may already have started, however the requirement is still considered", "being terminated. Some instances may already be terminated, however the requirement is still", "\"\"\" NEW = \"NEW\" \"\"\"The compute requirement has been created and submitted to", "= \"STOPPING\" \"\"\" The computer machine instances provisioned for the requirement are being", "\"\"\" STOPPED = \"STOPPED\" \"\"\"The computer machine instances provisioned for the requirement have", "states but the requirement is still considered to be running. \"\"\" STOPPING =", "be running. \"\"\" STOPPING = \"STOPPING\" \"\"\" The computer machine instances provisioned for", "requirement is still considered to be STOPPING until all instances have stopped. \"\"\"", "are being terminated. Some instances may already be terminated, however the requirement is", "been terminated. At this point the compute requirement may no longer be changed", "have stopped, however the requirement is still considered to be STOPPING until all", "is still considered to be TERMINATING until all instances have terminated. \"\"\" TERMINATED", "At this point the compute requirement may no longer be changed and is", "the status of a compute requirement. The status of a compute requirement provides", "for the requirement are being terminated. Some instances may already be terminated, however", "instances provisioned for the requirement are being terminated. Some instances may already be", "requirement have stopped.\"\"\" TERMINATING = \"TERMINATING\" \"\"\" The computer machine instances provisioned for", "provider cancels provision). \"\"\" STARTING = \"STARTING\" \"\"\" The computer machine instances provisioned", "terminated, however the requirement is still considered to be TERMINATING until all instances", "however the requirement is still considered to be TERMINATING until all instances have", "and submitted to YellowDog Compute.\"\"\" PENDING = \"PENDING\" \"\"\" YellowDog Compute is in", "machine instances provisioned for that requirement. \"\"\" NEW = \"NEW\" \"\"\"The compute requirement", "for that requirement. \"\"\" NEW = \"NEW\" \"\"\"The compute requirement has been created", "requirement are being stopped. Some instances may already have stopped, however the requirement", "The computer machine instances provisioned for the requirement are being re-started after having", "cancels provision). \"\"\" STARTING = \"STARTING\" \"\"\" The computer machine instances provisioned for", "still considered to be STARTING until all instances have started. \"\"\" RUNNING =", "instances may already have stopped, however the requirement is still considered to be", "instances provisioned for the requirement are being re-started after having been stopped. Some", "instances provisioned for the requirement are being stopped. Some instances may already have", "The computer machine instances provisioned for the requirement have been terminated. At this", "been created and submitted to YellowDog Compute.\"\"\" PENDING = \"PENDING\" \"\"\" YellowDog Compute", "to be TERMINATING until all instances have terminated. \"\"\" TERMINATED = \"TERMINATED\" \"\"\"", "all instances have stopped. \"\"\" STOPPED = \"STOPPED\" \"\"\"The computer machine instances provisioned", "compute requirement may no longer be changed and is considered to be in", "instances have transitioned to RUNNING or TERMINATED (in the case where a provider", "already have started, however the requirement is still considered to be STARTING until", "may already have stopped, however the requirement is still considered to be STOPPING", "stopped, however the requirement is still considered to be STOPPING until all instances", "compute machine instances provisioned for that requirement. \"\"\" NEW = \"NEW\" \"\"\"The compute", "is in the process of provisioning compute machine instances to meet the requirement.", "instances provisioned for the requirement are running. Individual instances may be independently transitioned", "requirement. The status of a compute requirement provides an aggregated view of the", "having been stopped. Some instances may already have started, however the requirement is", "\"\"\" YellowDog Compute is in the process of provisioning compute machine instances to", "to meet the requirement. The requirement will remain in PENDING state until all", "RUNNING or TERMINATED (in the case where a provider cancels provision). \"\"\" STARTING", "provisioned for the requirement are being stopped. Some instances may already have stopped,", "machine instances provisioned for the requirement have been terminated. At this point the", "computer machine instances provisioned for the requirement are running. Individual instances may be", "requirement provides an aggregated view of the statuses of compute machine instances provisioned", "a provider cancels provision). \"\"\" STARTING = \"STARTING\" \"\"\" The computer machine instances", "for the requirement are running. Individual instances may be independently transitioned to other", "\"\"\" The computer machine instances provisioned for the requirement are being re-started after", "instances provisioned for the requirement have stopped.\"\"\" TERMINATING = \"TERMINATING\" \"\"\" The computer", "are running. Individual instances may be independently transitioned to other states but the", "independently transitioned to other states but the requirement is still considered to be", "provisioned instances have transitioned to RUNNING or TERMINATED (in the case where a", "Enum class ComputeRequirementStatus(Enum): \"\"\" Describes the status of a compute requirement. The status", "instances may be independently transitioned to other states but the requirement is still", "to other states but the requirement is still considered to be running. \"\"\"", "The computer machine instances provisioned for the requirement are being stopped. Some instances", "the requirement are being stopped. Some instances may already have stopped, however the", "PENDING = \"PENDING\" \"\"\" YellowDog Compute is in the process of provisioning compute", "in the process of provisioning compute machine instances to meet the requirement. The", "however the requirement is still considered to be STOPPING until all instances have", "the requirement is still considered to be TERMINATING until all instances have terminated.", "have terminated. \"\"\" TERMINATED = \"TERMINATED\" \"\"\" The computer machine instances provisioned for", "TERMINATED = \"TERMINATED\" \"\"\" The computer machine instances provisioned for the requirement have", "view of the statuses of compute machine instances provisioned for that requirement. \"\"\"", "to RUNNING or TERMINATED (in the case where a provider cancels provision). \"\"\"", "is still considered to be running. \"\"\" STOPPING = \"STOPPING\" \"\"\" The computer", "\"\"\" TERMINATED = \"TERMINATED\" \"\"\" The computer machine instances provisioned for the requirement", "compute machine instances to meet the requirement. The requirement will remain in PENDING", "a compute requirement. The status of a compute requirement provides an aggregated view", "requirement are running. Individual instances may be independently transitioned to other states but", "be terminated, however the requirement is still considered to be TERMINATING until all", "\"\"\" The computer machine instances provisioned for the requirement are being stopped. Some", "provisioned for the requirement have stopped.\"\"\" TERMINATING = \"TERMINATING\" \"\"\" The computer machine", "requirement. \"\"\" NEW = \"NEW\" \"\"\"The compute requirement has been created and submitted", "all newly provisioned instances have transitioned to RUNNING or TERMINATED (in the case", "\"\"\"The compute requirement has been created and submitted to YellowDog Compute.\"\"\" PENDING =", "to be STOPPING until all instances have stopped. \"\"\" STOPPED = \"STOPPED\" \"\"\"The", "be independently transitioned to other states but the requirement is still considered to", "this point the compute requirement may no longer be changed and is considered", "started, however the requirement is still considered to be STARTING until all instances", "a compute requirement provides an aggregated view of the statuses of compute machine", "requirement have been terminated. At this point the compute requirement may no longer", "instances provisioned for the requirement have been terminated. At this point the compute", "Some instances may already be terminated, however the requirement is still considered to", "YellowDog Compute.\"\"\" PENDING = \"PENDING\" \"\"\" YellowDog Compute is in the process of", "to be running. \"\"\" STOPPING = \"STOPPING\" \"\"\" The computer machine instances provisioned", "machine instances provisioned for the requirement are being re-started after having been stopped.", "have stopped.\"\"\" TERMINATING = \"TERMINATING\" \"\"\" The computer machine instances provisioned for the", "instances provisioned for that requirement. \"\"\" NEW = \"NEW\" \"\"\"The compute requirement has", "still considered to be TERMINATING until all instances have terminated. \"\"\" TERMINATED =", "have been terminated. At this point the compute requirement may no longer be", "\"\"\" Describes the status of a compute requirement. The status of a compute", "of compute machine instances provisioned for that requirement. \"\"\" NEW = \"NEW\" \"\"\"The", "the case where a provider cancels provision). \"\"\" STARTING = \"STARTING\" \"\"\" The", "\"\"\" RUNNING = \"RUNNING\" \"\"\" The computer machine instances provisioned for the requirement", "being stopped. Some instances may already have stopped, however the requirement is still", "\"TERMINATING\" \"\"\" The computer machine instances provisioned for the requirement are being terminated.", "may already be terminated, however the requirement is still considered to be TERMINATING", "already be terminated, however the requirement is still considered to be TERMINATING until", "are being stopped. Some instances may already have stopped, however the requirement is", "class ComputeRequirementStatus(Enum): \"\"\" Describes the status of a compute requirement. The status of", "running. \"\"\" STOPPING = \"STOPPING\" \"\"\" The computer machine instances provisioned for the", "stopped. Some instances may already have stopped, however the requirement is still considered", "have started, however the requirement is still considered to be STARTING until all", "may no longer be changed and is considered to be in a final", "machine instances provisioned for the requirement are running. Individual instances may be independently", "longer be changed and is considered to be in a final state. \"\"\"", "RUNNING = \"RUNNING\" \"\"\" The computer machine instances provisioned for the requirement are", "for the requirement are being re-started after having been stopped. Some instances may", "Some instances may already have started, however the requirement is still considered to", "STOPPING = \"STOPPING\" \"\"\" The computer machine instances provisioned for the requirement are", "of the statuses of compute machine instances provisioned for that requirement. \"\"\" NEW", "will remain in PENDING state until all newly provisioned instances have transitioned to", "however the requirement is still considered to be STARTING until all instances have", "requirement is still considered to be running. \"\"\" STOPPING = \"STOPPING\" \"\"\" The", "the requirement is still considered to be STARTING until all instances have started.", "enum import Enum class ComputeRequirementStatus(Enum): \"\"\" Describes the status of a compute requirement.", "considered to be STOPPING until all instances have stopped. \"\"\" STOPPED = \"STOPPED\"", "PENDING state until all newly provisioned instances have transitioned to RUNNING or TERMINATED", "terminated. At this point the compute requirement may no longer be changed and", "requirement is still considered to be STARTING until all instances have started. \"\"\"", "requirement is still considered to be TERMINATING until all instances have terminated. \"\"\"", "STARTING = \"STARTING\" \"\"\" The computer machine instances provisioned for the requirement are", "for the requirement have been terminated. At this point the compute requirement may", "\"\"\" STARTING = \"STARTING\" \"\"\" The computer machine instances provisioned for the requirement", "TERMINATING = \"TERMINATING\" \"\"\" The computer machine instances provisioned for the requirement are", "have transitioned to RUNNING or TERMINATED (in the case where a provider cancels", "Describes the status of a compute requirement. The status of a compute requirement", "\"TERMINATED\" \"\"\" The computer machine instances provisioned for the requirement have been terminated.", "re-started after having been stopped. Some instances may already have started, however the", "= \"RUNNING\" \"\"\" The computer machine instances provisioned for the requirement are running.", "computer machine instances provisioned for the requirement have stopped.\"\"\" TERMINATING = \"TERMINATING\" \"\"\"", "be STARTING until all instances have started. \"\"\" RUNNING = \"RUNNING\" \"\"\" The", "until all newly provisioned instances have transitioned to RUNNING or TERMINATED (in the", "requirement will remain in PENDING state until all newly provisioned instances have transitioned", "status of a compute requirement provides an aggregated view of the statuses of", "have started. \"\"\" RUNNING = \"RUNNING\" \"\"\" The computer machine instances provisioned for", "The computer machine instances provisioned for the requirement are being terminated. Some instances", "for the requirement are being stopped. Some instances may already have stopped, however", "provisioned for the requirement are being terminated. Some instances may already be terminated,", "compute requirement provides an aggregated view of the statuses of compute machine instances", "is still considered to be STOPPING until all instances have stopped. \"\"\" STOPPED", "instances have started. \"\"\" RUNNING = \"RUNNING\" \"\"\" The computer machine instances provisioned", "where a provider cancels provision). \"\"\" STARTING = \"STARTING\" \"\"\" The computer machine", "= \"STARTING\" \"\"\" The computer machine instances provisioned for the requirement are being", "machine instances provisioned for the requirement are being terminated. Some instances may already", "the requirement have been terminated. At this point the compute requirement may no", "The requirement will remain in PENDING state until all newly provisioned instances have", "Compute.\"\"\" PENDING = \"PENDING\" \"\"\" YellowDog Compute is in the process of provisioning", "provision). \"\"\" STARTING = \"STARTING\" \"\"\" The computer machine instances provisioned for the", "instances may already have started, however the requirement is still considered to be", "computer machine instances provisioned for the requirement are being stopped. Some instances may", "instances to meet the requirement. The requirement will remain in PENDING state until", "the requirement. The requirement will remain in PENDING state until all newly provisioned", "to be in a final state. \"\"\" def __str__(self) -> str: return self.name", "status of a compute requirement. The status of a compute requirement provides an" ]
[]
[ "c.BUY_PRICE: 95, c.SELL_PRICE: 100, c.ORDER_SIZE: 100, c.ORDER_COUNT: 2 }, { c.BUY_PRICE: 100, c.SELL_PRICE:", "from bitfinex_algo.cli import load_config, validate_config from bitfinex_algo import cli as c logger =", "import logging import unittest from bitfinex_algo.cli import load_config, validate_config from bitfinex_algo import cli", "bitfinex_algo import cli as c logger = logging.getLogger('bitfinex') class ConfigTests(unittest.TestCase): @classmethod def setUpClass(cls):", "c.ORDER_SIZE: 100, c.ORDER_COUNT: 1 }], c.UPDATE_FREQUENCY: 3, } ) def test_validate_config(self): for i", "[{ c.BUY_PRICE: 95, c.SELL_PRICE: 100, c.ORDER_SIZE: 100, c.ORDER_COUNT: 2 }, { c.BUY_PRICE: 100,", "class ConfigTests(unittest.TestCase): @classmethod def setUpClass(cls): logging.disable(logging.CRITICAL) @classmethod def tearDownClass(cls): logging.disable(logging.NOTSET) def test_load_config(self): self.assertIsNone(load_config('tests/config/invalid_config_1.yaml'))", "def test_validate_config(self): for i in range(2, 5): with self.subTest(i=i): config = load_config(f'tests/config/invalid_config_{i}.yaml') self.assertIsNone(validate_config(config))", "as c logger = logging.getLogger('bitfinex') class ConfigTests(unittest.TestCase): @classmethod def setUpClass(cls): logging.disable(logging.CRITICAL) @classmethod def", "3, } ) def test_validate_config(self): for i in range(2, 5): with self.subTest(i=i): config", "def test_load_config(self): self.assertIsNone(load_config('tests/config/invalid_config_1.yaml')) self.assertDictEqual( validate_config(load_config('tests/config/valid_config_5.yaml')), { c.LEVELS: [{ c.BUY_PRICE: 95, c.SELL_PRICE: 100, c.ORDER_SIZE:", "ConfigTests(unittest.TestCase): @classmethod def setUpClass(cls): logging.disable(logging.CRITICAL) @classmethod def tearDownClass(cls): logging.disable(logging.NOTSET) def test_load_config(self): self.assertIsNone(load_config('tests/config/invalid_config_1.yaml')) self.assertDictEqual(", "{ c.LEVELS: [{ c.BUY_PRICE: 95, c.SELL_PRICE: 100, c.ORDER_SIZE: 100, c.ORDER_COUNT: 2 }, {", "logger = logging.getLogger('bitfinex') class ConfigTests(unittest.TestCase): @classmethod def setUpClass(cls): logging.disable(logging.CRITICAL) @classmethod def tearDownClass(cls): logging.disable(logging.NOTSET)", "self.assertDictEqual( validate_config(load_config('tests/config/valid_config_5.yaml')), { c.LEVELS: [{ c.BUY_PRICE: 95, c.SELL_PRICE: 100, c.ORDER_SIZE: 100, c.ORDER_COUNT: 2", "import unittest from bitfinex_algo.cli import load_config, validate_config from bitfinex_algo import cli as c", "{ c.BUY_PRICE: 100, c.SELL_PRICE: 105, c.ORDER_SIZE: 100, c.ORDER_COUNT: 1 }], c.UPDATE_FREQUENCY: 3, }", "c.SELL_PRICE: 105, c.ORDER_SIZE: 100, c.ORDER_COUNT: 1 }], c.UPDATE_FREQUENCY: 3, } ) def test_validate_config(self):", "<gh_stars>0 import logging import unittest from bitfinex_algo.cli import load_config, validate_config from bitfinex_algo import", "cli as c logger = logging.getLogger('bitfinex') class ConfigTests(unittest.TestCase): @classmethod def setUpClass(cls): logging.disable(logging.CRITICAL) @classmethod", "@classmethod def tearDownClass(cls): logging.disable(logging.NOTSET) def test_load_config(self): self.assertIsNone(load_config('tests/config/invalid_config_1.yaml')) self.assertDictEqual( validate_config(load_config('tests/config/valid_config_5.yaml')), { c.LEVELS: [{ c.BUY_PRICE:", "setUpClass(cls): logging.disable(logging.CRITICAL) @classmethod def tearDownClass(cls): logging.disable(logging.NOTSET) def test_load_config(self): self.assertIsNone(load_config('tests/config/invalid_config_1.yaml')) self.assertDictEqual( validate_config(load_config('tests/config/valid_config_5.yaml')), { c.LEVELS:", "2 }, { c.BUY_PRICE: 100, c.SELL_PRICE: 105, c.ORDER_SIZE: 100, c.ORDER_COUNT: 1 }], c.UPDATE_FREQUENCY:", "bitfinex_algo.cli import load_config, validate_config from bitfinex_algo import cli as c logger = logging.getLogger('bitfinex')", "unittest from bitfinex_algo.cli import load_config, validate_config from bitfinex_algo import cli as c logger", "validate_config from bitfinex_algo import cli as c logger = logging.getLogger('bitfinex') class ConfigTests(unittest.TestCase): @classmethod", "validate_config(load_config('tests/config/valid_config_5.yaml')), { c.LEVELS: [{ c.BUY_PRICE: 95, c.SELL_PRICE: 100, c.ORDER_SIZE: 100, c.ORDER_COUNT: 2 },", "logging.disable(logging.NOTSET) def test_load_config(self): self.assertIsNone(load_config('tests/config/invalid_config_1.yaml')) self.assertDictEqual( validate_config(load_config('tests/config/valid_config_5.yaml')), { c.LEVELS: [{ c.BUY_PRICE: 95, c.SELL_PRICE: 100,", "100, c.ORDER_COUNT: 1 }], c.UPDATE_FREQUENCY: 3, } ) def test_validate_config(self): for i in", "logging.getLogger('bitfinex') class ConfigTests(unittest.TestCase): @classmethod def setUpClass(cls): logging.disable(logging.CRITICAL) @classmethod def tearDownClass(cls): logging.disable(logging.NOTSET) def test_load_config(self):", "c.BUY_PRICE: 100, c.SELL_PRICE: 105, c.ORDER_SIZE: 100, c.ORDER_COUNT: 1 }], c.UPDATE_FREQUENCY: 3, } )", "c.ORDER_COUNT: 1 }], c.UPDATE_FREQUENCY: 3, } ) def test_validate_config(self): for i in range(2,", "1 }], c.UPDATE_FREQUENCY: 3, } ) def test_validate_config(self): for i in range(2, 5):", "} ) def test_validate_config(self): for i in range(2, 5): with self.subTest(i=i): config =", "test_load_config(self): self.assertIsNone(load_config('tests/config/invalid_config_1.yaml')) self.assertDictEqual( validate_config(load_config('tests/config/valid_config_5.yaml')), { c.LEVELS: [{ c.BUY_PRICE: 95, c.SELL_PRICE: 100, c.ORDER_SIZE: 100,", ") def test_validate_config(self): for i in range(2, 5): with self.subTest(i=i): config = load_config(f'tests/config/invalid_config_{i}.yaml')", "tearDownClass(cls): logging.disable(logging.NOTSET) def test_load_config(self): self.assertIsNone(load_config('tests/config/invalid_config_1.yaml')) self.assertDictEqual( validate_config(load_config('tests/config/valid_config_5.yaml')), { c.LEVELS: [{ c.BUY_PRICE: 95, c.SELL_PRICE:", "@classmethod def setUpClass(cls): logging.disable(logging.CRITICAL) @classmethod def tearDownClass(cls): logging.disable(logging.NOTSET) def test_load_config(self): self.assertIsNone(load_config('tests/config/invalid_config_1.yaml')) self.assertDictEqual( validate_config(load_config('tests/config/valid_config_5.yaml')),", "95, c.SELL_PRICE: 100, c.ORDER_SIZE: 100, c.ORDER_COUNT: 2 }, { c.BUY_PRICE: 100, c.SELL_PRICE: 105,", "def setUpClass(cls): logging.disable(logging.CRITICAL) @classmethod def tearDownClass(cls): logging.disable(logging.NOTSET) def test_load_config(self): self.assertIsNone(load_config('tests/config/invalid_config_1.yaml')) self.assertDictEqual( validate_config(load_config('tests/config/valid_config_5.yaml')), {", "100, c.ORDER_SIZE: 100, c.ORDER_COUNT: 2 }, { c.BUY_PRICE: 100, c.SELL_PRICE: 105, c.ORDER_SIZE: 100,", "c logger = logging.getLogger('bitfinex') class ConfigTests(unittest.TestCase): @classmethod def setUpClass(cls): logging.disable(logging.CRITICAL) @classmethod def tearDownClass(cls):", "c.LEVELS: [{ c.BUY_PRICE: 95, c.SELL_PRICE: 100, c.ORDER_SIZE: 100, c.ORDER_COUNT: 2 }, { c.BUY_PRICE:", "c.SELL_PRICE: 100, c.ORDER_SIZE: 100, c.ORDER_COUNT: 2 }, { c.BUY_PRICE: 100, c.SELL_PRICE: 105, c.ORDER_SIZE:", "c.UPDATE_FREQUENCY: 3, } ) def test_validate_config(self): for i in range(2, 5): with self.subTest(i=i):", "def tearDownClass(cls): logging.disable(logging.NOTSET) def test_load_config(self): self.assertIsNone(load_config('tests/config/invalid_config_1.yaml')) self.assertDictEqual( validate_config(load_config('tests/config/valid_config_5.yaml')), { c.LEVELS: [{ c.BUY_PRICE: 95,", "load_config, validate_config from bitfinex_algo import cli as c logger = logging.getLogger('bitfinex') class ConfigTests(unittest.TestCase):", "import cli as c logger = logging.getLogger('bitfinex') class ConfigTests(unittest.TestCase): @classmethod def setUpClass(cls): logging.disable(logging.CRITICAL)", "c.ORDER_SIZE: 100, c.ORDER_COUNT: 2 }, { c.BUY_PRICE: 100, c.SELL_PRICE: 105, c.ORDER_SIZE: 100, c.ORDER_COUNT:", "c.ORDER_COUNT: 2 }, { c.BUY_PRICE: 100, c.SELL_PRICE: 105, c.ORDER_SIZE: 100, c.ORDER_COUNT: 1 }],", "100, c.SELL_PRICE: 105, c.ORDER_SIZE: 100, c.ORDER_COUNT: 1 }], c.UPDATE_FREQUENCY: 3, } ) def", "from bitfinex_algo import cli as c logger = logging.getLogger('bitfinex') class ConfigTests(unittest.TestCase): @classmethod def", "105, c.ORDER_SIZE: 100, c.ORDER_COUNT: 1 }], c.UPDATE_FREQUENCY: 3, } ) def test_validate_config(self): for", "import load_config, validate_config from bitfinex_algo import cli as c logger = logging.getLogger('bitfinex') class", "}], c.UPDATE_FREQUENCY: 3, } ) def test_validate_config(self): for i in range(2, 5): with", "logging import unittest from bitfinex_algo.cli import load_config, validate_config from bitfinex_algo import cli as", "logging.disable(logging.CRITICAL) @classmethod def tearDownClass(cls): logging.disable(logging.NOTSET) def test_load_config(self): self.assertIsNone(load_config('tests/config/invalid_config_1.yaml')) self.assertDictEqual( validate_config(load_config('tests/config/valid_config_5.yaml')), { c.LEVELS: [{", "100, c.ORDER_COUNT: 2 }, { c.BUY_PRICE: 100, c.SELL_PRICE: 105, c.ORDER_SIZE: 100, c.ORDER_COUNT: 1", "self.assertIsNone(load_config('tests/config/invalid_config_1.yaml')) self.assertDictEqual( validate_config(load_config('tests/config/valid_config_5.yaml')), { c.LEVELS: [{ c.BUY_PRICE: 95, c.SELL_PRICE: 100, c.ORDER_SIZE: 100, c.ORDER_COUNT:", "}, { c.BUY_PRICE: 100, c.SELL_PRICE: 105, c.ORDER_SIZE: 100, c.ORDER_COUNT: 1 }], c.UPDATE_FREQUENCY: 3,", "= logging.getLogger('bitfinex') class ConfigTests(unittest.TestCase): @classmethod def setUpClass(cls): logging.disable(logging.CRITICAL) @classmethod def tearDownClass(cls): logging.disable(logging.NOTSET) def" ]
[ "\"1\" : cursor.execute(query) names = list(map(lambda x: x[0], cursor.description)) print(\"----------------------------------------------------------------------------------\") print(\"{:5} | {:^20}", "Comparison == \"<\" or Comparison == \">\" or Comparison == \"=\": query =", "\"Insert INTO city (name, countrycode,district,population) VALUES ('\" + City + \"','\" + Code", "param1=Comparison break else: displaymenu() while True: Value= input(\"Enter Population :\") if Value.isdigit() ==", "\"4\": print(\"show cars by engine size\") print(\"------------------------\") while True: csize = input(\"Enter Car", "query = \"select code, Name, Continent,population,HeadofState from country\" Code=Ctyname DBconnection (query, choice, Code,param1)", "\"4\": query = {\"car.engineSize\":float(csize)} car = docs.find(query) for p in car: print ('{0}", "dfp,df if (choice == \"6\" or choice == \"7\") and dfp != \"2\"", "pd.DataFrame() def Mongoconnect(csize,choice,id,reg,size): try: global myclient myclient =pymongo.MongoClient(host = \"localhost\",port=27017) myclient.admin.command('ismaster') mydb =", "df = pd.read_sql_query(query, connection) globalSet() if choice == \"1\" : cursor.execute(query) names =", "| {:d}\".format(id,name, countrycode, district,population)) elif choice == \"3\": cursor.execute(query) connection.commit print(\"**** RESULT *****", "input(\"Enter City Name :\") Code= input(\"Country Code :\") district= input(\"District :\") pop= input(\"Population", "City + \"','\" + Code + \"','\" + district + \"',\"+ str(pop)+\")\" DBconnection", ": print (\"******Error Occurred while executing Mongo commands******\") def globalSet (): global dfp", "{:10}\".format(names[0],names[1],names[2],names[3],names[4])) print(\"----------------------------------------------------------------------------------\") for (id,name, countrycode, district,population, latitue,longitude) in cursor: print(\"{:5} | {:^20} |", "= connection.cursor(prepared=True) global dfp,df if (choice == \"6\" or choice == \"7\") and", "Engine Size\") print(\"5 - Add New Car\") print(\"6 - View Countries by name\")", "\"2\": print(\"Cities by Population\") print(\"--------------------\") while True: Comparison = input(\"Enter <, > or", "Comparison break else: displaymenu() while True: Value= input(\"Enter Population :\") if Value.isdigit() ==", "Population\") print(\"-----------------------\") query = \"select code, Name, Continent,population,HeadofState from country\" while True: Comparison", "param1 == \">\": df1 = df[(df[\"population\"] > int(code)) ].loc[:,[\"Name\",\"Continent\",\"population\",\"HeadofState\"]] elif param1 == \"<\":", "\"5\": query={\"_id\":int(id), \"car\": { \"reg\":reg,\"engineSize\":float(size)}} x = docs.insert_one(query) query = {\"_id\":int(id)} car =", "input(\"Enter Car Engine Size :\") if csize.isdigit() == True: csize = csize break", "== \"x\": print(\"Bye - Program Terminate now and welcome back anytime!\") return elif", "from tabulate import tabulate # This function will display a Menu as requested", "'.format(p[\"_id\"],p[\"car\"],p[\"addresses\"])) if choice == \"5\": query={\"_id\":int(id), \"car\": { \"reg\":reg,\"engineSize\":float(size)}} x = docs.insert_one(query) query", "by population\") print(\"3 - Add New City\") print(\"4 - Find Car by Engine", "global dfp,df if (choice == \"6\" or choice == \"7\") and dfp !=", "def globalSet (): global dfp dfp = \"2\" def DBconnection(query,choice,code,param1): try: connection =", "- View Cities by population\") print(\"3 - Add New City\") print(\"4 - Find", "True: csize = csize break else: displaymenu() Mongoconnect(csize,choice,\"\",\"\",\"\") elif choice == \"5\": print(\"Add", "city where population\" + Comparison break else: displaymenu() while True: Value= input(\"Enter Population", "password\") elif error.errno == errorcode.ER_BAD_DB_ERROR: print(\"Database does not exist\") elif error.errno == 1452:", "break else: displaymenu() DBconnection (query, choice, Code,param1) elif choice == \"4\": print(\"show cars", "<, > or = :\") if Comparison == \"<\" or Comparison == \">\"", "= {\"_id\":int(id)} car = docs.find(query) for p in car: print (p) except :", "= \"select code, Name, Continent,population,HeadofState from country\" Code=Ctyname DBconnection (query, choice, Code,param1) elif", "Engine Size :\") if csize.isdigit() == True: csize = csize break else: displaymenu()", "DBconnection (query, choice, Code,param1) elif choice == \"7\": print(\"Countries by Population\") print(\"-----------------------\") query", "\"=\": query = \"select * from city where population\" + Comparison break else:", "print(\"Add New Car\") print(\"-----------\") id= input(\"_ids:\") reg= input(\"Enter reg :\") size= input(\"Enter Size", "- View 15 Cities\") print(\"2 - View Cities by population\") print(\"3 - Add", "View Cities by population\") print(\"3 - Add New City\") print(\"4 - Find Car", "\"<\": df1 = df[(df[\"population\"] < int(code)) ].loc[:,[\"Name\",\"Continent\",\"population\",\"HeadofState\"]] elif param1 == \"=\": df1 =", "with your user name or password\") elif error.errno == errorcode.ER_BAD_DB_ERROR: print(\"Database does not", "=pymongo.MongoClient(host = \"localhost\",port=27017) myclient.admin.command('ismaster') mydb = myclient['docs'] docs = mydb[\"docs\"] if choice ==", ": cursor.execute(query) names = list(map(lambda x: x[0], cursor.description)) print(\"----------------------------------------------------------------------------------\") print(\"{:5} | {:^20} |", "pd.read_sql_query(query, connection) globalSet() if choice == \"1\" : cursor.execute(query) names = list(map(lambda x:", "print(\"World DB\") print(\"--------\") print(\"Menu\") print(\"====\") print(\"1 - View 15 Cities\") print(\"2 - View", "connection.close() def displaymenu(): print(\"This is not a valid choice. You can only choose", "as pd #Mongo modules import import pymongo from pymongo import MongoClient #Pandas printing", "print(\"Cities by Population\") print(\"--------------------\") while True: Comparison = input(\"Enter <, > or =", "displaymenu() while True: Value= input(\"Enter Population :\") if Value.isdigit() == True: query =", "from country\" while True: Comparison = input(\"Enter <, > or = :\") if", "== \"4\": print(\"show cars by engine size\") print(\"------------------------\") while True: csize = input(\"Enter", "\"5\": print(\"Add New Car\") print(\"-----------\") id= input(\"_ids:\") reg= input(\"Enter reg :\") size= input(\"Enter", "choice,Code,param1) elif choice == \"3\": print(\"Add New City\") print(\"------------\") City= input(\"Enter City Name", "Menu as requested in the project specification def menu(): print(\"--------\") print(\"World DB\") print(\"--------\")", "the table\") elif choice == \"6\" : df1 = df[df[\"Name\"].str.contains(code)].loc[:,[\"Name\",\"Continent\",\"population\",\"HeadofState\"]] #print tabulate(df1.to_string(index=False)) print(tabulate(df1,", "p in car: print (p) except : print (\"******Error Occurred while executing Mongo", "{:^12} | {:^20} | {:d}\".format(id,name, countrycode, district,population)) elif choice == \"2\" : cursor.execute(query)", "displaymenu() Mongoconnect(csize,choice,\"\",\"\",\"\") elif choice == \"5\": print(\"Add New Car\") print(\"-----------\") id= input(\"_ids:\") reg=", "# This function will display a Menu as requested in the project specification", "\"\",\"\" if choice == \"x\": print(\"Bye - Program Terminate now and welcome back", "+ \" does not exist\") print(\"----------------------------------------------------\") else: print(\"Failed to connect to the database:", "choice, Code,param1) elif choice == \"6\": print(\"Countries by Name\") print(\"-----------------\") Ctyname = input(\"Enter", "***** The new city record is inserted into the table\") elif choice ==", "input(\"Enter Country Name :\") query = \"select code, Name, Continent,population,HeadofState from country\" Code=Ctyname", "else: displaymenu() while True: Value= input(\"Enter Population :\") if Value.isdigit() == True: Code", "to connect to the database: {}\".format(error)) connection.rollback() finally: #closing database connection. if(connection.is_connected()): connection.close()", "* from city limit 15\" DBconnection (query, choice,Code,param1) elif choice == \"2\": print(\"Cities", "=\"\" df = pd.DataFrame() def Mongoconnect(csize,choice,id,reg,size): try: global myclient myclient =pymongo.MongoClient(host = \"localhost\",port=27017)", "str(Value) break else: displaymenu() DBconnection (query, choice,Code,param1) elif choice == \"3\": print(\"Add New", "= input(\"Choice : --> \") Code,param1 = \"\",\"\" if choice == \"x\": print(\"Bye", "global myclient myclient =pymongo.MongoClient(host = \"localhost\",port=27017) myclient.admin.command('ismaster') mydb = myclient['docs'] docs = mydb[\"docs\"]", "pandas as pd #Mongo modules import import pymongo from pymongo import MongoClient #Pandas", "from pymongo import MongoClient #Pandas printing module from tabulate import tabulate # This", "break else: displaymenu() DBconnection (query, choice,Code,param1) elif choice == \"3\": print(\"Add New City\")", "(name, countrycode,district,population) VALUES ('\" + City + \"','\" + Code + \"','\" +", "elif choice == \"6\": print(\"Countries by Name\") print(\"-----------------\") Ctyname = input(\"Enter Country Name", "tabulate(df1.to_string(index=False)) print(tabulate(df1, headers=\"keys\",tablefmt=\"orgtbl\")) elif choice == \"7\": if param1 == \">\": df1 =", "].loc[:,[\"Name\",\"Continent\",\"population\",\"HeadofState\"]] elif param1 == \"<\": df1 = df[(df[\"population\"] < int(code)) ].loc[:,[\"Name\",\"Continent\",\"population\",\"HeadofState\"]] elif param1", "print(tabulate(df1, headers=\"keys\",tablefmt=\"orgtbl\")) except mysql.connector.Error as error : if error.errno == errorcode.ER_ACCESS_DENIED_ERROR: print(\"Something is", "Mongoconnect(csize,choice,\"\",\"\",\"\") elif choice == \"5\": print(\"Add New Car\") print(\"-----------\") id= input(\"_ids:\") reg= input(\"Enter", "or Comparison == \">\" or Comparison == \"=\": param1=Comparison break else: displaymenu() while", "{:^20} | {:^12} | {:^20} | {:d}\".format(id,name, countrycode, district,population)) elif choice == \"2\"", ":\") if Comparison == \"<\" or Comparison == \">\" or Comparison == \"=\":", "import errorcode import pandas as pd #Mongo modules import import pymongo from pymongo", "> or = :\") if Comparison == \"<\" or Comparison == \">\" or", "Population :\") if Value.isdigit() == True: query = query + str(Value) break else:", "DBconnection (query, choice, Code,param1) elif choice == \"6\": print(\"Countries by Name\") print(\"-----------------\") Ctyname", "city (name, countrycode,district,population) VALUES ('\" + City + \"','\" + Code + \"','\"", "Python program answers # Author : Somu #mySQL modules import import mysql.connector from", "import pandas as pd #Mongo modules import import pymongo from pymongo import MongoClient", "- Find Car by Engine Size\") print(\"5 - Add New Car\") print(\"6 -", "Applied Database # Final Project # Section 4.4 - Python program answers #", "df[(df[\"population\"] < int(code)) ].loc[:,[\"Name\",\"Continent\",\"population\",\"HeadofState\"]] elif param1 == \"=\": df1 = df[(df[\"population\"] == int(code))", "is not a valid choice. You can only choose from the menu.\") input(\"\\nPress", "for (id,name, countrycode, district,population, latitue,longitude) in cursor: print(\"{:5} | {:^20} | {:^12} |", "= \"select * from city where population\" + Comparison break else: displaymenu() while", "\"car\": { \"reg\":reg,\"engineSize\":float(size)}} x = docs.insert_one(query) query = {\"_id\":int(id)} car = docs.find(query) for", "continue...\") def main(): while True: menu() choice = input(\"Choice : --> \") Code,param1", "or Comparison == \"=\": query = \"select * from city where population\" +", "choice == \"5\": print(\"Add New Car\") print(\"-----------\") id= input(\"_ids:\") reg= input(\"Enter reg :\")", "= :\") if Comparison == \"<\" or Comparison == \">\" or Comparison ==", "= df[(df[\"population\"] > int(code)) ].loc[:,[\"Name\",\"Continent\",\"population\",\"HeadofState\"]] elif param1 == \"<\": df1 = df[(df[\"population\"] <", "print(\"Bye - Program Terminate now and welcome back anytime!\") return elif choice ==", "(\"******Error Occurred while executing Mongo commands******\") def globalSet (): global dfp dfp =", "if csize.isdigit() == True: csize = csize break else: displaymenu() Mongoconnect(csize,choice,\"\",\"\",\"\") elif choice", "param1 == \"<\": df1 = df[(df[\"population\"] < int(code)) ].loc[:,[\"Name\",\"Continent\",\"population\",\"HeadofState\"]] elif param1 == \"=\":", "or = :\") if Comparison == \"<\" or Comparison == \">\" or Comparison", "choice == \"4\": query = {\"car.engineSize\":float(csize)} car = docs.find(query) for p in car:", "(choice == \"6\" or choice == \"7\") and dfp != \"2\" : df", "View 15 Cities\") print(\"2 - View Cities by population\") print(\"3 - Add New", "pop= input(\"Population :\") query = \"Insert INTO city (name, countrycode,district,population) VALUES ('\" +", "new city record is inserted into the table\") elif choice == \"6\" :", "by Engine Size\") print(\"5 - Add New Car\") print(\"6 - View Countries by", "if choice == \"4\": query = {\"car.engineSize\":float(csize)} car = docs.find(query) for p in", "#closing database connection. if(connection.is_connected()): connection.close() def displaymenu(): print(\"This is not a valid choice.", "- View Countries by name\") print(\"7 - View Countries by population\") print(\"x -", "module from tabulate import tabulate # This function will display a Menu as", "choice == \"1\" : cursor.execute(query) names = list(map(lambda x: x[0], cursor.description)) print(\"----------------------------------------------------------------------------------\") print(\"{:5}", "elif choice == \"3\": print(\"Add New City\") print(\"------------\") City= input(\"Enter City Name :\")", "+ \"',\"+ str(pop)+\")\" DBconnection (query, choice, Code,param1) elif choice == \"6\": print(\"Countries by", "choice == \"6\": print(\"Countries by Name\") print(\"-----------------\") Ctyname = input(\"Enter Country Name :\")", "\"localhost\",port=27017) myclient.admin.command('ismaster') mydb = myclient['docs'] docs = mydb[\"docs\"] if choice == \"4\": query", "== \"5\": print(\"Add New Car\") print(\"-----------\") id= input(\"_ids:\") reg= input(\"Enter reg :\") size=", "print(tabulate(df1, headers=\"keys\",tablefmt=\"orgtbl\")) elif choice == \"7\": if param1 == \">\": df1 = df[(df[\"population\"]", "cursor.execute(query) connection.commit print(\"**** RESULT ***** The new city record is inserted into the", "myclient = None global dfp, df dfp =\"\" df = pd.DataFrame() def Mongoconnect(csize,choice,id,reg,size):", "and dfp != \"2\" : df = pd.read_sql_query(query, connection) globalSet() if choice ==", "in car: print ('{0} | {1} | {2} '.format(p[\"_id\"],p[\"car\"],p[\"addresses\"])) if choice == \"5\":", "= list(map(lambda x: x[0], cursor.description)) print(\"----------------------------------------------------------------------------------\") print(\"{:5} | {:^20} | {:^12} | {:^20}", "== \"1\": query= \"select * from city limit 15\" DBconnection (query, choice,Code,param1) elif", "{\"car.engineSize\":float(csize)} car = docs.find(query) for p in car: print ('{0} | {1} |", "car: print (p) except : print (\"******Error Occurred while executing Mongo commands******\") def", "True: Comparison = input(\"Enter <, > or = :\") if Comparison == \"<\"", "== \">\" or Comparison == \"=\": query = \"select * from city where", "countrycode, district,population)) elif choice == \"2\" : cursor.execute(query) names = list(map(lambda x: x[0],", "{:^20} | {:10}\".format(names[0],names[1],names[2],names[3],names[4])) print(\"----------------------------------------------------------------------------------\") for (id,name, countrycode, district,population, latitue,longitude) in cursor: print(\"{:5} |", "Name\") print(\"-----------------\") Ctyname = input(\"Enter Country Name :\") query = \"select code, Name,", "== \"4\": query = {\"car.engineSize\":float(csize)} car = docs.find(query) for p in car: print", "requested in the project specification def menu(): print(\"--------\") print(\"World DB\") print(\"--------\") print(\"Menu\") print(\"====\")", "return elif choice == \"1\": query= \"select * from city limit 15\" DBconnection", "{:^12} | {:^20} | {:10}\".format(names[0],names[1],names[2],names[3],names[4])) print(\"----------------------------------------------------------------------------------\") for (id,name, countrycode, district,population, latitue,longitude) in cursor:", "City\") print(\"4 - Find Car by Engine Size\") print(\"5 - Add New Car\")", "The new city record is inserted into the table\") elif choice == \"6\"", "main(): while True: menu() choice = input(\"Choice : --> \") Code,param1 = \"\",\"\"", "Size :\") if csize.isdigit() == True: csize = csize break else: displaymenu() Mongoconnect(csize,choice,\"\",\"\",\"\")", "error.errno == errorcode.ER_BAD_DB_ERROR: print(\"Database does not exist\") elif error.errno == 1452: print(\"----------------------------------------------------\") print(\"***ERROR***:", "= \"\",\"\" if choice == \"x\": print(\"Bye - Program Terminate now and welcome", "New Car\") print(\"-----------\") id= input(\"_ids:\") reg= input(\"Enter reg :\") size= input(\"Enter Size :\")", "{ \"reg\":reg,\"engineSize\":float(size)}} x = docs.insert_one(query) query = {\"_id\":int(id)} car = docs.find(query) for p", "password='<PASSWORD>') cursor = connection.cursor(prepared=True) global dfp,df if (choice == \"6\" or choice ==", "DB\") print(\"--------\") print(\"Menu\") print(\"====\") print(\"1 - View 15 Cities\") print(\"2 - View Cities", "\"7\": if param1 == \">\": df1 = df[(df[\"population\"] > int(code)) ].loc[:,[\"Name\",\"Continent\",\"population\",\"HeadofState\"]] elif param1", "Code,param1 = \"\",\"\" if choice == \"x\": print(\"Bye - Program Terminate now and", "True: Code = Value break else: displaymenu() DBconnection (query, choice, Code,param1) elif choice", "in the project specification def menu(): print(\"--------\") print(\"World DB\") print(\"--------\") print(\"Menu\") print(\"====\") print(\"1", "error.errno == errorcode.ER_ACCESS_DENIED_ERROR: print(\"Something is wrong with your user name or password\") elif", "Name :\") Code= input(\"Country Code :\") district= input(\"District :\") pop= input(\"Population :\") query", "= input(\"Enter Country Name :\") query = \"select code, Name, Continent,population,HeadofState from country\"", "Code,param1) elif choice == \"6\": print(\"Countries by Name\") print(\"-----------------\") Ctyname = input(\"Enter Country", "+ City + \"','\" + Code + \"','\" + district + \"',\"+ str(pop)+\")\"", "Cities by population\") print(\"3 - Add New City\") print(\"4 - Find Car by", "{:^12} | {:^20} | {:d}\".format(id,name, countrycode, district,population)) elif choice == \"3\": cursor.execute(query) connection.commit", "\"7\": print(\"Countries by Population\") print(\"-----------------------\") query = \"select code, Name, Continent,population,HeadofState from country\"", "= \"localhost\",port=27017) myclient.admin.command('ismaster') mydb = myclient['docs'] docs = mydb[\"docs\"] if choice == \"4\":", "can only choose from the menu.\") input(\"\\nPress enter to continue...\") if __name__ ==", "print(\"-----------------------\") query = \"select code, Name, Continent,population,HeadofState from country\" while True: Comparison =", "mydb[\"docs\"] if choice == \"4\": query = {\"car.engineSize\":float(csize)} car = docs.find(query) for p", ": if error.errno == errorcode.ER_ACCESS_DENIED_ERROR: print(\"Something is wrong with your user name or", "Comparison == \"=\": param1=Comparison break else: displaymenu() while True: Value= input(\"Enter Population :\")", "connection. if(connection.is_connected()): connection.close() def displaymenu(): print(\"This is not a valid choice. You can", "True: query = query + str(Value) break else: displaymenu() DBconnection (query, choice,Code,param1) elif", "by Name\") print(\"-----------------\") Ctyname = input(\"Enter Country Name :\") query = \"select code,", "else: displaymenu() DBconnection (query, choice, Code,param1) elif choice == \"4\": print(\"show cars by", "global dfp, df dfp =\"\" df = pd.DataFrame() def Mongoconnect(csize,choice,id,reg,size): try: global myclient", "your user name or password\") elif error.errno == errorcode.ER_BAD_DB_ERROR: print(\"Database does not exist\")", "\"<\" or Comparison == \">\" or Comparison == \"=\": param1=Comparison break else: displaymenu()", "elif choice == \"5\": print(\"Add New Car\") print(\"-----------\") id= input(\"_ids:\") reg= input(\"Enter reg", "if param1 == \">\": df1 = df[(df[\"population\"] > int(code)) ].loc[:,[\"Name\",\"Continent\",\"population\",\"HeadofState\"]] elif param1 ==", "print(\"Something is wrong with your user name or password\") elif error.errno == errorcode.ER_BAD_DB_ERROR:", "elif error.errno == 1452: print(\"----------------------------------------------------\") print(\"***ERROR***: Country Code \"+ code + \" does", "choice = input(\"Choice : --> \") Code,param1 = \"\",\"\" if choice == \"x\":", "Somu #mySQL modules import import mysql.connector from mysql.connector import Error from mysql.connector import", "else: print(\"Failed to connect to the database: {}\".format(error)) connection.rollback() finally: #closing database connection.", "print(\"Menu\") print(\"====\") print(\"1 - View 15 Cities\") print(\"2 - View Cities by population\")", "from mysql.connector import errorcode import pandas as pd #Mongo modules import import pymongo", "error : if error.errno == errorcode.ER_ACCESS_DENIED_ERROR: print(\"Something is wrong with your user name", "VALUES ('\" + City + \"','\" + Code + \"','\" + district +", "csize = csize break else: displaymenu() Mongoconnect(csize,choice,\"\",\"\",\"\") elif choice == \"5\": print(\"Add New", "Occurred while executing Mongo commands******\") def globalSet (): global dfp dfp = \"2\"", "\"2\" : df = pd.read_sql_query(query, connection) globalSet() if choice == \"1\" : cursor.execute(query)", "population\" + Comparison break else: displaymenu() while True: Value= input(\"Enter Population :\") if", "elif param1 == \"<\": df1 = df[(df[\"population\"] < int(code)) ].loc[:,[\"Name\",\"Continent\",\"population\",\"HeadofState\"]] elif param1 ==", "global dfp dfp = \"2\" def DBconnection(query,choice,code,param1): try: connection = mysql.connector.connect(host='localhost',database='world', user='root', password='<PASSWORD>')", "choice,Code,param1) elif choice == \"2\": print(\"Cities by Population\") print(\"--------------------\") while True: Comparison =", "- Add New City\") print(\"4 - Find Car by Engine Size\") print(\"5 -", "docs.insert_one(query) query = {\"_id\":int(id)} car = docs.find(query) for p in car: print (p)", "\">\" or Comparison == \"=\": param1=Comparison break else: displaymenu() while True: Value= input(\"Enter", "while True: Value= input(\"Enter Population :\") if Value.isdigit() == True: query = query", "cursor: print(\"{:5} | {:^20} | {:^12} | {:^20} | {:d}\".format(id,name, countrycode, district,population)) elif", "- Python program answers # Author : Somu #mySQL modules import import mysql.connector", "City\") print(\"------------\") City= input(\"Enter City Name :\") Code= input(\"Country Code :\") district= input(\"District", "print(\"------------\") City= input(\"Enter City Name :\") Code= input(\"Country Code :\") district= input(\"District :\")", "country\" Code=Ctyname DBconnection (query, choice, Code,param1) elif choice == \"7\": print(\"Countries by Population\")", "True: menu() choice = input(\"Choice : --> \") Code,param1 = \"\",\"\" if choice", "= docs.find(query) for p in car: print (p) except : print (\"******Error Occurred", "New Car\") print(\"6 - View Countries by name\") print(\"7 - View Countries by", "print(\"Add New City\") print(\"------------\") City= input(\"Enter City Name :\") Code= input(\"Country Code :\")", "does not exist\") print(\"----------------------------------------------------\") else: print(\"Failed to connect to the database: {}\".format(error)) connection.rollback()", "if choice == \"x\": print(\"Bye - Program Terminate now and welcome back anytime!\")", "DBconnection(query,choice,code,param1): try: connection = mysql.connector.connect(host='localhost',database='world', user='root', password='<PASSWORD>') cursor = connection.cursor(prepared=True) global dfp,df if", "{:^20} | {:^12} | {:^20} | {:d}\".format(id,name, countrycode, district,population)) elif choice == \"3\":", ": Somu #mySQL modules import import mysql.connector from mysql.connector import Error from mysql.connector", "choice == \"5\": query={\"_id\":int(id), \"car\": { \"reg\":reg,\"engineSize\":float(size)}} x = docs.insert_one(query) query = {\"_id\":int(id)}", "df = pd.DataFrame() def Mongoconnect(csize,choice,id,reg,size): try: global myclient myclient =pymongo.MongoClient(host = \"localhost\",port=27017) myclient.admin.command('ismaster')", "{1} | {2} '.format(p[\"_id\"],p[\"car\"],p[\"addresses\"])) if choice == \"5\": query={\"_id\":int(id), \"car\": { \"reg\":reg,\"engineSize\":float(size)}} x", "| {:^20} | {:10}\".format(names[0],names[1],names[2],names[3],names[4])) print(\"----------------------------------------------------------------------------------\") for (id,name, countrycode, district,population, latitue,longitude) in cursor: print(\"{:5}", "from city limit 15\" DBconnection (query, choice,Code,param1) elif choice == \"2\": print(\"Cities by", "\"7\") and dfp != \"2\" : df = pd.read_sql_query(query, connection) globalSet() if choice", "Final Project # Section 4.4 - Python program answers # Author : Somu", "= pd.read_sql_query(query, connection) globalSet() if choice == \"1\" : cursor.execute(query) names = list(map(lambda", "engine size\") print(\"------------------------\") while True: csize = input(\"Enter Car Engine Size :\") if", "Author : Somu #mySQL modules import import mysql.connector from mysql.connector import Error from", "= \"select code, Name, Continent,population,HeadofState from country\" while True: Comparison = input(\"Enter <,", "cars by engine size\") print(\"------------------------\") while True: csize = input(\"Enter Car Engine Size", "\"6\" : df1 = df[df[\"Name\"].str.contains(code)].loc[:,[\"Name\",\"Continent\",\"population\",\"HeadofState\"]] #print tabulate(df1.to_string(index=False)) print(tabulate(df1, headers=\"keys\",tablefmt=\"orgtbl\")) elif choice == \"7\":", "Value.isdigit() == True: Code = Value break else: displaymenu() DBconnection (query, choice, Code,param1)", "by engine size\") print(\"------------------------\") while True: csize = input(\"Enter Car Engine Size :\")", "== \"<\" or Comparison == \">\" or Comparison == \"=\": query = \"select", "== True: query = query + str(Value) break else: displaymenu() DBconnection (query, choice,Code,param1)", "mysql.connector from mysql.connector import Error from mysql.connector import errorcode import pandas as pd", "!= \"2\" : df = pd.read_sql_query(query, connection) globalSet() if choice == \"1\" :", "== \"3\": print(\"Add New City\") print(\"------------\") City= input(\"Enter City Name :\") Code= input(\"Country", "| {:^12} | {:^20} | {:10}\".format(names[0],names[1],names[2],names[3],names[4])) print(\"----------------------------------------------------------------------------------\") for (id,name, countrycode, district,population, latitue,longitude) in", "{}\".format(error)) connection.rollback() finally: #closing database connection. if(connection.is_connected()): connection.close() def displaymenu(): print(\"This is not", "def main(): while True: menu() choice = input(\"Choice : --> \") Code,param1 =", "print(\"3 - Add New City\") print(\"4 - Find Car by Engine Size\") print(\"5", "car = docs.find(query) for p in car: print ('{0} | {1} | {2}", "print(\"This is not a valid choice. You can only choose from the above", "if choice == \"1\" : cursor.execute(query) names = list(map(lambda x: x[0], cursor.description)) print(\"----------------------------------------------------------------------------------\")", "in cursor: print(\"{:5} | {:^20} | {:^12} | {:^20} | {:d}\".format(id,name, countrycode, district,population))", ":\") if Value.isdigit() == True: query = query + str(Value) break else: displaymenu()", "reg :\") size= input(\"Enter Size :\") Mongoconnect(\"\",choice,id,reg,size) else: print(\"That is not a valid", "for p in car: print ('{0} | {1} | {2} '.format(p[\"_id\"],p[\"car\"],p[\"addresses\"])) if choice", "\"6\": print(\"Countries by Name\") print(\"-----------------\") Ctyname = input(\"Enter Country Name :\") query =", "This function will display a Menu as requested in the project specification def", "modules import import mysql.connector from mysql.connector import Error from mysql.connector import errorcode import", "print(\"4 - Find Car by Engine Size\") print(\"5 - Add New Car\") print(\"6", "size= input(\"Enter Size :\") Mongoconnect(\"\",choice,id,reg,size) else: print(\"That is not a valid choice. You", "DBconnection (query, choice,Code,param1) elif choice == \"3\": print(\"Add New City\") print(\"------------\") City= input(\"Enter", "+ Comparison break else: displaymenu() while True: Value= input(\"Enter Population :\") if Value.isdigit()", "Mongoconnect(csize,choice,id,reg,size): try: global myclient myclient =pymongo.MongoClient(host = \"localhost\",port=27017) myclient.admin.command('ismaster') mydb = myclient['docs'] docs", "df[(df[\"population\"] > int(code)) ].loc[:,[\"Name\",\"Continent\",\"population\",\"HeadofState\"]] elif param1 == \"<\": df1 = df[(df[\"population\"] < int(code))", "{:^20} | {:d}\".format(id,name, countrycode, district,population)) elif choice == \"3\": cursor.execute(query) connection.commit print(\"**** RESULT", "(p) except : print (\"******Error Occurred while executing Mongo commands******\") def globalSet ():", "print(\"----------------------------------------------------------------------------------\") print(\"{:5} | {:^20} | {:^12} | {:^20} | {:10}\".format(names[0],names[1],names[2],names[3],names[4])) print(\"----------------------------------------------------------------------------------\") for (id,name,", "Mongoconnect(\"\",choice,id,reg,size) else: print(\"That is not a valid choice. You can only choose from", "a valid choice. You can only choose from the menu.\") input(\"\\nPress enter to", "== \"7\": print(\"Countries by Population\") print(\"-----------------------\") query = \"select code, Name, Continent,population,HeadofState from", "== True: Code = Value break else: displaymenu() DBconnection (query, choice, Code,param1) elif", "print(\"2 - View Cities by population\") print(\"3 - Add New City\") print(\"4 -", "print(\"7 - View Countries by population\") print(\"x - Exit application\") myclient = None", "def Mongoconnect(csize,choice,id,reg,size): try: global myclient myclient =pymongo.MongoClient(host = \"localhost\",port=27017) myclient.admin.command('ismaster') mydb = myclient['docs']", ":\") district= input(\"District :\") pop= input(\"Population :\") query = \"Insert INTO city (name,", "choice == \"4\": print(\"show cars by engine size\") print(\"------------------------\") while True: csize =", "if Comparison == \"<\" or Comparison == \">\" or Comparison == \"=\": param1=Comparison", "choice == \"7\") and dfp != \"2\" : df = pd.read_sql_query(query, connection) globalSet()", "options\") input(\"\\nPress enter to continue...\") def main(): while True: menu() choice = input(\"Choice", "Population :\") if Value.isdigit() == True: Code = Value break else: displaymenu() DBconnection", "is inserted into the table\") elif choice == \"6\" : df1 = df[df[\"Name\"].str.contains(code)].loc[:,[\"Name\",\"Continent\",\"population\",\"HeadofState\"]]", "= myclient['docs'] docs = mydb[\"docs\"] if choice == \"4\": query = {\"car.engineSize\":float(csize)} car", "print ('{0} | {1} | {2} '.format(p[\"_id\"],p[\"car\"],p[\"addresses\"])) if choice == \"5\": query={\"_id\":int(id), \"car\":", "menu() choice = input(\"Choice : --> \") Code,param1 = \"\",\"\" if choice ==", "back anytime!\") return elif choice == \"1\": query= \"select * from city limit", "by Population\") print(\"--------------------\") while True: Comparison = input(\"Enter <, > or = :\")", "else: displaymenu() while True: Value= input(\"Enter Population :\") if Value.isdigit() == True: query", "input(\"Population :\") query = \"Insert INTO city (name, countrycode,district,population) VALUES ('\" + City", "+ \"','\" + district + \"',\"+ str(pop)+\")\" DBconnection (query, choice, Code,param1) elif choice", "where population\" + Comparison break else: displaymenu() while True: Value= input(\"Enter Population :\")", "def DBconnection(query,choice,code,param1): try: connection = mysql.connector.connect(host='localhost',database='world', user='root', password='<PASSWORD>') cursor = connection.cursor(prepared=True) global dfp,df", "print (p) except : print (\"******Error Occurred while executing Mongo commands******\") def globalSet", "input(\"Enter <, > or = :\") if Comparison == \"<\" or Comparison ==", "choice == \"7\": print(\"Countries by Population\") print(\"-----------------------\") query = \"select code, Name, Continent,population,HeadofState", "elif choice == \"2\" : cursor.execute(query) names = list(map(lambda x: x[0], cursor.description)) print(\"----------------------------------------------------------------------------------\")", "\"x\": print(\"Bye - Program Terminate now and welcome back anytime!\") return elif choice", "try: connection = mysql.connector.connect(host='localhost',database='world', user='root', password='<PASSWORD>') cursor = connection.cursor(prepared=True) global dfp,df if (choice", "choice == \"x\": print(\"Bye - Program Terminate now and welcome back anytime!\") return", "\"','\" + Code + \"','\" + district + \"',\"+ str(pop)+\")\" DBconnection (query, choice,", "print(\"----------------------------------------------------------------------------------\") for (id,name, countrycode, district,population, latitue,longitude) in cursor: print(\"{:5} | {:^20} | {:^12}", "pymongo from pymongo import MongoClient #Pandas printing module from tabulate import tabulate #", "View Countries by name\") print(\"7 - View Countries by population\") print(\"x - Exit", "- Add New Car\") print(\"6 - View Countries by name\") print(\"7 - View", "= mysql.connector.connect(host='localhost',database='world', user='root', password='<PASSWORD>') cursor = connection.cursor(prepared=True) global dfp,df if (choice == \"6\"", "| {:^20} | {:d}\".format(id,name, countrycode, district,population)) elif choice == \"2\" : cursor.execute(query) names", "into the table\") elif choice == \"6\" : df1 = df[df[\"Name\"].str.contains(code)].loc[:,[\"Name\",\"Continent\",\"population\",\"HeadofState\"]] #print tabulate(df1.to_string(index=False))", "print(\"-----------------\") Ctyname = input(\"Enter Country Name :\") query = \"select code, Name, Continent,population,HeadofState", "df1 = df[df[\"Name\"].str.contains(code)].loc[:,[\"Name\",\"Continent\",\"population\",\"HeadofState\"]] #print tabulate(df1.to_string(index=False)) print(tabulate(df1, headers=\"keys\",tablefmt=\"orgtbl\")) elif choice == \"7\": if param1", "City= input(\"Enter City Name :\") Code= input(\"Country Code :\") district= input(\"District :\") pop=", "displaymenu() while True: Value= input(\"Enter Population :\") if Value.isdigit() == True: Code =", "in car: print (p) except : print (\"******Error Occurred while executing Mongo commands******\")", "myclient =pymongo.MongoClient(host = \"localhost\",port=27017) myclient.admin.command('ismaster') mydb = myclient['docs'] docs = mydb[\"docs\"] if choice", "\">\": df1 = df[(df[\"population\"] > int(code)) ].loc[:,[\"Name\",\"Continent\",\"population\",\"HeadofState\"]] elif param1 == \"<\": df1 =", "print(\"--------\") print(\"World DB\") print(\"--------\") print(\"Menu\") print(\"====\") print(\"1 - View 15 Cities\") print(\"2 -", "if Value.isdigit() == True: Code = Value break else: displaymenu() DBconnection (query, choice,", "is wrong with your user name or password\") elif error.errno == errorcode.ER_BAD_DB_ERROR: print(\"Database", "* from city where population\" + Comparison break else: displaymenu() while True: Value=", "try: global myclient myclient =pymongo.MongoClient(host = \"localhost\",port=27017) myclient.admin.command('ismaster') mydb = myclient['docs'] docs =", "df1 = df[(df[\"population\"] > int(code)) ].loc[:,[\"Name\",\"Continent\",\"population\",\"HeadofState\"]] elif param1 == \"<\": df1 = df[(df[\"population\"]", "import mysql.connector from mysql.connector import Error from mysql.connector import errorcode import pandas as", "choice == \"2\": print(\"Cities by Population\") print(\"--------------------\") while True: Comparison = input(\"Enter <,", "cursor.execute(query) names = list(map(lambda x: x[0], cursor.description)) print(\"----------------------------------------------------------------------------------\") print(\"{:5} | {:^20} | {:^12}", "mysql.connector import Error from mysql.connector import errorcode import pandas as pd #Mongo modules", "#Mongo modules import import pymongo from pymongo import MongoClient #Pandas printing module from", "#print tabulate(df1.to_string(index=False)) print(tabulate(df1, headers=\"keys\",tablefmt=\"orgtbl\")) elif choice == \"7\": if param1 == \">\": df1", "project specification def menu(): print(\"--------\") print(\"World DB\") print(\"--------\") print(\"Menu\") print(\"====\") print(\"1 - View", "countrycode, district,population)) elif choice == \"3\": cursor.execute(query) connection.commit print(\"**** RESULT ***** The new", "DBconnection (query, choice,Code,param1) elif choice == \"2\": print(\"Cities by Population\") print(\"--------------------\") while True:", "+ str(Value) break else: displaymenu() DBconnection (query, choice,Code,param1) elif choice == \"3\": print(\"Add", "can only choose from the above options\") input(\"\\nPress enter to continue...\") def main():", "= \"2\" def DBconnection(query,choice,code,param1): try: connection = mysql.connector.connect(host='localhost',database='world', user='root', password='<PASSWORD>') cursor = connection.cursor(prepared=True)", "connection.rollback() finally: #closing database connection. if(connection.is_connected()): connection.close() def displaymenu(): print(\"This is not a", "import pymongo from pymongo import MongoClient #Pandas printing module from tabulate import tabulate", "from city where population\" + Comparison break else: displaymenu() while True: Value= input(\"Enter", "\"select * from city limit 15\" DBconnection (query, choice,Code,param1) elif choice == \"2\":", "== \"=\": df1 = df[(df[\"population\"] == int(code)) ].loc[:,[\"Name\",\"Continent\",\"population\",\"HeadofState\"]] print(tabulate(df1, headers=\"keys\",tablefmt=\"orgtbl\")) except mysql.connector.Error as", "exist\") print(\"----------------------------------------------------\") else: print(\"Failed to connect to the database: {}\".format(error)) connection.rollback() finally: #closing", "pd #Mongo modules import import pymongo from pymongo import MongoClient #Pandas printing module", "to continue...\") def main(): while True: menu() choice = input(\"Choice : --> \")", "print(\"Countries by Population\") print(\"-----------------------\") query = \"select code, Name, Continent,population,HeadofState from country\" while", "a valid choice. You can only choose from the above options\") input(\"\\nPress enter", "You can only choose from the above options\") input(\"\\nPress enter to continue...\") def", "csize.isdigit() == True: csize = csize break else: displaymenu() Mongoconnect(csize,choice,\"\",\"\",\"\") elif choice ==", "choice, Code,param1) elif choice == \"7\": print(\"Countries by Population\") print(\"-----------------------\") query = \"select", "docs = mydb[\"docs\"] if choice == \"4\": query = {\"car.engineSize\":float(csize)} car = docs.find(query)", "Continent,population,HeadofState from country\" while True: Comparison = input(\"Enter <, > or = :\")", "csize = input(\"Enter Car Engine Size :\") if csize.isdigit() == True: csize =", "database connection. if(connection.is_connected()): connection.close() def displaymenu(): print(\"This is not a valid choice. You", "elif choice == \"6\" : df1 = df[df[\"Name\"].str.contains(code)].loc[:,[\"Name\",\"Continent\",\"population\",\"HeadofState\"]] #print tabulate(df1.to_string(index=False)) print(tabulate(df1, headers=\"keys\",tablefmt=\"orgtbl\")) elif", "< int(code)) ].loc[:,[\"Name\",\"Continent\",\"population\",\"HeadofState\"]] elif param1 == \"=\": df1 = df[(df[\"population\"] == int(code)) ].loc[:,[\"Name\",\"Continent\",\"population\",\"HeadofState\"]]", "print(\"{:5} | {:^20} | {:^12} | {:^20} | {:10}\".format(names[0],names[1],names[2],names[3],names[4])) print(\"----------------------------------------------------------------------------------\") for (id,name, countrycode,", "== \"2\" : cursor.execute(query) names = list(map(lambda x: x[0], cursor.description)) print(\"----------------------------------------------------------------------------------\") print(\"{:5} |", "= df[df[\"Name\"].str.contains(code)].loc[:,[\"Name\",\"Continent\",\"population\",\"HeadofState\"]] #print tabulate(df1.to_string(index=False)) print(tabulate(df1, headers=\"keys\",tablefmt=\"orgtbl\")) elif choice == \"7\": if param1 ==", "== \"=\": param1=Comparison break else: displaymenu() while True: Value= input(\"Enter Population :\") if", "Comparison == \">\" or Comparison == \"=\": query = \"select * from city", "Value break else: displaymenu() DBconnection (query, choice, Code,param1) elif choice == \"4\": print(\"show", "Comparison == \">\" or Comparison == \"=\": param1=Comparison break else: displaymenu() while True:", "if error.errno == errorcode.ER_ACCESS_DENIED_ERROR: print(\"Something is wrong with your user name or password\")", "mysql.connector.Error as error : if error.errno == errorcode.ER_ACCESS_DENIED_ERROR: print(\"Something is wrong with your", "== errorcode.ER_ACCESS_DENIED_ERROR: print(\"Something is wrong with your user name or password\") elif error.errno", "Code,param1) elif choice == \"7\": print(\"Countries by Population\") print(\"-----------------------\") query = \"select code,", "Database # Final Project # Section 4.4 - Python program answers # Author", "+ Code + \"','\" + district + \"',\"+ str(pop)+\")\" DBconnection (query, choice, Code,param1)", "choice. You can only choose from the above options\") input(\"\\nPress enter to continue...\")", "application\") myclient = None global dfp, df dfp =\"\" df = pd.DataFrame() def", "elif choice == \"2\": print(\"Cities by Population\") print(\"--------------------\") while True: Comparison = input(\"Enter", "countrycode, district,population, latitue,longitude) in cursor: print(\"{:5} | {:^20} | {:^12} | {:^20} |", "while executing Mongo commands******\") def globalSet (): global dfp dfp = \"2\" def", "does not exist\") elif error.errno == 1452: print(\"----------------------------------------------------\") print(\"***ERROR***: Country Code \"+ code", "True: Value= input(\"Enter Population :\") if Value.isdigit() == True: Code = Value break", "Ctyname = input(\"Enter Country Name :\") query = \"select code, Name, Continent,population,HeadofState from", "input(\"Choice : --> \") Code,param1 = \"\",\"\" if choice == \"x\": print(\"Bye -", "\"select code, Name, Continent,population,HeadofState from country\" Code=Ctyname DBconnection (query, choice, Code,param1) elif choice", "== \"=\": query = \"select * from city where population\" + Comparison break", "import tabulate # This function will display a Menu as requested in the", "import import mysql.connector from mysql.connector import Error from mysql.connector import errorcode import pandas", "= query + str(Value) break else: displaymenu() DBconnection (query, choice,Code,param1) elif choice ==", "finally: #closing database connection. if(connection.is_connected()): connection.close() def displaymenu(): print(\"This is not a valid", "car = docs.find(query) for p in car: print (p) except : print (\"******Error", "displaymenu() DBconnection (query, choice,Code,param1) elif choice == \"3\": print(\"Add New City\") print(\"------------\") City=", "\"2\" : cursor.execute(query) names = list(map(lambda x: x[0], cursor.description)) print(\"----------------------------------------------------------------------------------\") print(\"{:5} | {:^20}", "record is inserted into the table\") elif choice == \"6\" : df1 =", "+ district + \"',\"+ str(pop)+\")\" DBconnection (query, choice, Code,param1) elif choice == \"6\":", "globalSet (): global dfp dfp = \"2\" def DBconnection(query,choice,code,param1): try: connection = mysql.connector.connect(host='localhost',database='world',", "# Author : Somu #mySQL modules import import mysql.connector from mysql.connector import Error", "car: print ('{0} | {1} | {2} '.format(p[\"_id\"],p[\"car\"],p[\"addresses\"])) if choice == \"5\": query={\"_id\":int(id),", "while True: menu() choice = input(\"Choice : --> \") Code,param1 = \"\",\"\" if", "\") Code,param1 = \"\",\"\" if choice == \"x\": print(\"Bye - Program Terminate now", "input(\"Enter Size :\") Mongoconnect(\"\",choice,id,reg,size) else: print(\"That is not a valid choice. You can", "{:^20} | {:d}\".format(id,name, countrycode, district,population)) elif choice == \"2\" : cursor.execute(query) names =", "from country\" Code=Ctyname DBconnection (query, choice, Code,param1) elif choice == \"7\": print(\"Countries by", "print(\"That is not a valid choice. You can only choose from the menu.\")", "displaymenu() DBconnection (query, choice, Code,param1) elif choice == \"4\": print(\"show cars by engine", "\"+ code + \" does not exist\") print(\"----------------------------------------------------\") else: print(\"Failed to connect to", "headers=\"keys\",tablefmt=\"orgtbl\")) except mysql.connector.Error as error : if error.errno == errorcode.ER_ACCESS_DENIED_ERROR: print(\"Something is wrong", "(id,name, countrycode, district,population, latitue,longitude) in cursor: print(\"{:5} | {:^20} | {:^12} | {:^20}", "enter to continue...\") def main(): while True: menu() choice = input(\"Choice : -->", "country\" while True: Comparison = input(\"Enter <, > or = :\") if Comparison", "Exit application\") myclient = None global dfp, df dfp =\"\" df = pd.DataFrame()", ":\") if csize.isdigit() == True: csize = csize break else: displaymenu() Mongoconnect(csize,choice,\"\",\"\",\"\") elif", "print(\"-----------\") id= input(\"_ids:\") reg= input(\"Enter reg :\") size= input(\"Enter Size :\") Mongoconnect(\"\",choice,id,reg,size) else:", "display a Menu as requested in the project specification def menu(): print(\"--------\") print(\"World", "print (\"******Error Occurred while executing Mongo commands******\") def globalSet (): global dfp dfp", "else: displaymenu() Mongoconnect(csize,choice,\"\",\"\",\"\") elif choice == \"5\": print(\"Add New Car\") print(\"-----------\") id= input(\"_ids:\")", ": df = pd.read_sql_query(query, connection) globalSet() if choice == \"1\" : cursor.execute(query) names", "city record is inserted into the table\") elif choice == \"6\" : df1", "to the database: {}\".format(error)) connection.rollback() finally: #closing database connection. if(connection.is_connected()): connection.close() def displaymenu():", "Car Engine Size :\") if csize.isdigit() == True: csize = csize break else:", "== errorcode.ER_BAD_DB_ERROR: print(\"Database does not exist\") elif error.errno == 1452: print(\"----------------------------------------------------\") print(\"***ERROR***: Country", "# Final Project # Section 4.4 - Python program answers # Author :", "Find Car by Engine Size\") print(\"5 - Add New Car\") print(\"6 - View", "input(\"District :\") pop= input(\"Population :\") query = \"Insert INTO city (name, countrycode,district,population) VALUES", "code, Name, Continent,population,HeadofState from country\" while True: Comparison = input(\"Enter <, > or", "myclient['docs'] docs = mydb[\"docs\"] if choice == \"4\": query = {\"car.engineSize\":float(csize)} car =", "elif param1 == \"=\": df1 = df[(df[\"population\"] == int(code)) ].loc[:,[\"Name\",\"Continent\",\"population\",\"HeadofState\"]] print(tabulate(df1, headers=\"keys\",tablefmt=\"orgtbl\")) except", "choice, Code,param1) elif choice == \"4\": print(\"show cars by engine size\") print(\"------------------------\") while", "Name :\") query = \"select code, Name, Continent,population,HeadofState from country\" Code=Ctyname DBconnection (query,", "\"6\" or choice == \"7\") and dfp != \"2\" : df = pd.read_sql_query(query,", "= csize break else: displaymenu() Mongoconnect(csize,choice,\"\",\"\",\"\") elif choice == \"5\": print(\"Add New Car\")", "input(\"Country Code :\") district= input(\"District :\") pop= input(\"Population :\") query = \"Insert INTO", "== \"7\") and dfp != \"2\" : df = pd.read_sql_query(query, connection) globalSet() if", "print(\"Failed to connect to the database: {}\".format(error)) connection.rollback() finally: #closing database connection. if(connection.is_connected()):", "choose from the above options\") input(\"\\nPress enter to continue...\") def main(): while True:", "\"select * from city where population\" + Comparison break else: displaymenu() while True:", "only choose from the above options\") input(\"\\nPress enter to continue...\") def main(): while", "15 Cities\") print(\"2 - View Cities by population\") print(\"3 - Add New City\")", "= input(\"Enter <, > or = :\") if Comparison == \"<\" or Comparison", "Value= input(\"Enter Population :\") if Value.isdigit() == True: query = query + str(Value)", "- View Countries by population\") print(\"x - Exit application\") myclient = None global", "- Program Terminate now and welcome back anytime!\") return elif choice == \"1\":", "#Pandas printing module from tabulate import tabulate # This function will display a", "Size :\") Mongoconnect(\"\",choice,id,reg,size) else: print(\"That is not a valid choice. You can only", "elif error.errno == errorcode.ER_BAD_DB_ERROR: print(\"Database does not exist\") elif error.errno == 1452: print(\"----------------------------------------------------\")", "\"3\": cursor.execute(query) connection.commit print(\"**** RESULT ***** The new city record is inserted into", "break else: displaymenu() while True: Value= input(\"Enter Population :\") if Value.isdigit() == True:", "\"3\": print(\"Add New City\") print(\"------------\") City= input(\"Enter City Name :\") Code= input(\"Country Code", "choice == \"3\": print(\"Add New City\") print(\"------------\") City= input(\"Enter City Name :\") Code=", "input(\"\\nPress enter to continue...\") def main(): while True: menu() choice = input(\"Choice :", "user name or password\") elif error.errno == errorcode.ER_BAD_DB_ERROR: print(\"Database does not exist\") elif", "print(\"Database does not exist\") elif error.errno == 1452: print(\"----------------------------------------------------\") print(\"***ERROR***: Country Code \"+", "not exist\") elif error.errno == 1452: print(\"----------------------------------------------------\") print(\"***ERROR***: Country Code \"+ code +", "== \"5\": query={\"_id\":int(id), \"car\": { \"reg\":reg,\"engineSize\":float(size)}} x = docs.insert_one(query) query = {\"_id\":int(id)} car", "print(\"{:5} | {:^20} | {:^12} | {:^20} | {:d}\".format(id,name, countrycode, district,population)) elif choice", "Add New City\") print(\"4 - Find Car by Engine Size\") print(\"5 - Add", "choice == \"3\": cursor.execute(query) connection.commit print(\"**** RESULT ***** The new city record is", "errorcode.ER_BAD_DB_ERROR: print(\"Database does not exist\") elif error.errno == 1452: print(\"----------------------------------------------------\") print(\"***ERROR***: Country Code", "Code,param1) elif choice == \"4\": print(\"show cars by engine size\") print(\"------------------------\") while True:", "Car by Engine Size\") print(\"5 - Add New Car\") print(\"6 - View Countries", "mysql.connector.connect(host='localhost',database='world', user='root', password='<PASSWORD>') cursor = connection.cursor(prepared=True) global dfp,df if (choice == \"6\" or", ": --> \") Code,param1 = \"\",\"\" if choice == \"x\": print(\"Bye - Program", "pymongo import MongoClient #Pandas printing module from tabulate import tabulate # This function", "print(\"----------------------------------------------------\") print(\"***ERROR***: Country Code \"+ code + \" does not exist\") print(\"----------------------------------------------------\") else:", "names = list(map(lambda x: x[0], cursor.description)) print(\"----------------------------------------------------------------------------------\") print(\"{:5} | {:^20} | {:^12} |", "now and welcome back anytime!\") return elif choice == \"1\": query= \"select *", ":\") pop= input(\"Population :\") query = \"Insert INTO city (name, countrycode,district,population) VALUES ('\"", "menu(): print(\"--------\") print(\"World DB\") print(\"--------\") print(\"Menu\") print(\"====\") print(\"1 - View 15 Cities\") print(\"2", "Project # Section 4.4 - Python program answers # Author : Somu #mySQL", "df dfp =\"\" df = pd.DataFrame() def Mongoconnect(csize,choice,id,reg,size): try: global myclient myclient =pymongo.MongoClient(host", "p in car: print ('{0} | {1} | {2} '.format(p[\"_id\"],p[\"car\"],p[\"addresses\"])) if choice ==", "query = \"select * from city where population\" + Comparison break else: displaymenu()", "list(map(lambda x: x[0], cursor.description)) print(\"----------------------------------------------------------------------------------\") print(\"{:5} | {:^20} | {:^12} | {:^20} |", "connection.cursor(prepared=True) global dfp,df if (choice == \"6\" or choice == \"7\") and dfp", "if Value.isdigit() == True: query = query + str(Value) break else: displaymenu() DBconnection", "(): global dfp dfp = \"2\" def DBconnection(query,choice,code,param1): try: connection = mysql.connector.connect(host='localhost',database='world', user='root',", "mysql.connector import errorcode import pandas as pd #Mongo modules import import pymongo from", "choice == \"2\" : cursor.execute(query) names = list(map(lambda x: x[0], cursor.description)) print(\"----------------------------------------------------------------------------------\") print(\"{:5}", "welcome back anytime!\") return elif choice == \"1\": query= \"select * from city", "INTO city (name, countrycode,district,population) VALUES ('\" + City + \"','\" + Code +", "== \"<\": df1 = df[(df[\"population\"] < int(code)) ].loc[:,[\"Name\",\"Continent\",\"population\",\"HeadofState\"]] elif param1 == \"=\": df1", "== True: csize = csize break else: displaymenu() Mongoconnect(csize,choice,\"\",\"\",\"\") elif choice == \"5\":", "| {:^20} | {:^12} | {:^20} | {:10}\".format(names[0],names[1],names[2],names[3],names[4])) print(\"----------------------------------------------------------------------------------\") for (id,name, countrycode, district,population,", "input(\"Enter reg :\") size= input(\"Enter Size :\") Mongoconnect(\"\",choice,id,reg,size) else: print(\"That is not a", "user='root', password='<PASSWORD>') cursor = connection.cursor(prepared=True) global dfp,df if (choice == \"6\" or choice", "from the above options\") input(\"\\nPress enter to continue...\") def main(): while True: menu()", "== \"1\" : cursor.execute(query) names = list(map(lambda x: x[0], cursor.description)) print(\"----------------------------------------------------------------------------------\") print(\"{:5} |", "Population\") print(\"--------------------\") while True: Comparison = input(\"Enter <, > or = :\") if", "\"1\": query= \"select * from city limit 15\" DBconnection (query, choice,Code,param1) elif choice", "== \"6\" : df1 = df[df[\"Name\"].str.contains(code)].loc[:,[\"Name\",\"Continent\",\"population\",\"HeadofState\"]] #print tabulate(df1.to_string(index=False)) print(tabulate(df1, headers=\"keys\",tablefmt=\"orgtbl\")) elif choice ==", "answers # Author : Somu #mySQL modules import import mysql.connector from mysql.connector import", "(query, choice, Code,param1) elif choice == \"7\": print(\"Countries by Population\") print(\"-----------------------\") query =", "dfp, df dfp =\"\" df = pd.DataFrame() def Mongoconnect(csize,choice,id,reg,size): try: global myclient myclient", "\"','\" + district + \"',\"+ str(pop)+\")\" DBconnection (query, choice, Code,param1) elif choice ==", "input(\"_ids:\") reg= input(\"Enter reg :\") size= input(\"Enter Size :\") Mongoconnect(\"\",choice,id,reg,size) else: print(\"That is", "import import pymongo from pymongo import MongoClient #Pandas printing module from tabulate import", "district= input(\"District :\") pop= input(\"Population :\") query = \"Insert INTO city (name, countrycode,district,population)", "| {:^12} | {:^20} | {:d}\".format(id,name, countrycode, district,population)) elif choice == \"2\" :", "choice == \"1\": query= \"select * from city limit 15\" DBconnection (query, choice,Code,param1)", "#mySQL modules import import mysql.connector from mysql.connector import Error from mysql.connector import errorcode", "True: csize = input(\"Enter Car Engine Size :\") if csize.isdigit() == True: csize", "--> \") Code,param1 = \"\",\"\" if choice == \"x\": print(\"Bye - Program Terminate", "tabulate # This function will display a Menu as requested in the project", "elif choice == \"7\": if param1 == \">\": df1 = df[(df[\"population\"] > int(code))", "connect to the database: {}\".format(error)) connection.rollback() finally: #closing database connection. if(connection.is_connected()): connection.close() def", "if(connection.is_connected()): connection.close() def displaymenu(): print(\"This is not a valid choice. You can only", "Code= input(\"Country Code :\") district= input(\"District :\") pop= input(\"Population :\") query = \"Insert", "elif choice == \"3\": cursor.execute(query) connection.commit print(\"**** RESULT ***** The new city record", "id= input(\"_ids:\") reg= input(\"Enter reg :\") size= input(\"Enter Size :\") Mongoconnect(\"\",choice,id,reg,size) else: print(\"That", "print(\"6 - View Countries by name\") print(\"7 - View Countries by population\") print(\"x", "| {:10}\".format(names[0],names[1],names[2],names[3],names[4])) print(\"----------------------------------------------------------------------------------\") for (id,name, countrycode, district,population, latitue,longitude) in cursor: print(\"{:5} | {:^20}", "# Applied Database # Final Project # Section 4.4 - Python program answers", "myclient myclient =pymongo.MongoClient(host = \"localhost\",port=27017) myclient.admin.command('ismaster') mydb = myclient['docs'] docs = mydb[\"docs\"] if", "modules import import pymongo from pymongo import MongoClient #Pandas printing module from tabulate", "dfp =\"\" df = pd.DataFrame() def Mongoconnect(csize,choice,id,reg,size): try: global myclient myclient =pymongo.MongoClient(host =", "(query, choice,Code,param1) elif choice == \"2\": print(\"Cities by Population\") print(\"--------------------\") while True: Comparison", "\"=\": df1 = df[(df[\"population\"] == int(code)) ].loc[:,[\"Name\",\"Continent\",\"population\",\"HeadofState\"]] print(tabulate(df1, headers=\"keys\",tablefmt=\"orgtbl\")) except mysql.connector.Error as error", "elif choice == \"4\": print(\"show cars by engine size\") print(\"------------------------\") while True: csize", "df[df[\"Name\"].str.contains(code)].loc[:,[\"Name\",\"Continent\",\"population\",\"HeadofState\"]] #print tabulate(df1.to_string(index=False)) print(tabulate(df1, headers=\"keys\",tablefmt=\"orgtbl\")) elif choice == \"7\": if param1 == \">\":", "City Name :\") Code= input(\"Country Code :\") district= input(\"District :\") pop= input(\"Population :\")", "Value.isdigit() == True: query = query + str(Value) break else: displaymenu() DBconnection (query,", "param1 == \"=\": df1 = df[(df[\"population\"] == int(code)) ].loc[:,[\"Name\",\"Continent\",\"population\",\"HeadofState\"]] print(tabulate(df1, headers=\"keys\",tablefmt=\"orgtbl\")) except mysql.connector.Error", "= docs.insert_one(query) query = {\"_id\":int(id)} car = docs.find(query) for p in car: print", "{\"_id\":int(id)} car = docs.find(query) for p in car: print (p) except : print", "reg= input(\"Enter reg :\") size= input(\"Enter Size :\") Mongoconnect(\"\",choice,id,reg,size) else: print(\"That is not", "- Exit application\") myclient = None global dfp, df dfp =\"\" df =", "or choice == \"7\") and dfp != \"2\" : df = pd.read_sql_query(query, connection)", "== \">\": df1 = df[(df[\"population\"] > int(code)) ].loc[:,[\"Name\",\"Continent\",\"population\",\"HeadofState\"]] elif param1 == \"<\": df1", "code, Name, Continent,population,HeadofState from country\" Code=Ctyname DBconnection (query, choice, Code,param1) elif choice ==", "| {:d}\".format(id,name, countrycode, district,population)) elif choice == \"2\" : cursor.execute(query) names = list(map(lambda", ":\") if Value.isdigit() == True: Code = Value break else: displaymenu() DBconnection (query,", "while True: csize = input(\"Enter Car Engine Size :\") if csize.isdigit() == True:", "printing module from tabulate import tabulate # This function will display a Menu", "else: print(\"That is not a valid choice. You can only choose from the", "elif choice == \"1\": query= \"select * from city limit 15\" DBconnection (query,", "= input(\"Enter Car Engine Size :\") if csize.isdigit() == True: csize = csize", "headers=\"keys\",tablefmt=\"orgtbl\")) elif choice == \"7\": if param1 == \">\": df1 = df[(df[\"population\"] >", "code + \" does not exist\") print(\"----------------------------------------------------\") else: print(\"Failed to connect to the", ": df1 = df[df[\"Name\"].str.contains(code)].loc[:,[\"Name\",\"Continent\",\"population\",\"HeadofState\"]] #print tabulate(df1.to_string(index=False)) print(tabulate(df1, headers=\"keys\",tablefmt=\"orgtbl\")) elif choice == \"7\": if", "].loc[:,[\"Name\",\"Continent\",\"population\",\"HeadofState\"]] print(tabulate(df1, headers=\"keys\",tablefmt=\"orgtbl\")) except mysql.connector.Error as error : if error.errno == errorcode.ER_ACCESS_DENIED_ERROR: print(\"Something", "Error from mysql.connector import errorcode import pandas as pd #Mongo modules import import", "as error : if error.errno == errorcode.ER_ACCESS_DENIED_ERROR: print(\"Something is wrong with your user", "(query, choice, Code,param1) elif choice == \"4\": print(\"show cars by engine size\") print(\"------------------------\")", "| {1} | {2} '.format(p[\"_id\"],p[\"car\"],p[\"addresses\"])) if choice == \"5\": query={\"_id\":int(id), \"car\": { \"reg\":reg,\"engineSize\":float(size)}}", "def menu(): print(\"--------\") print(\"World DB\") print(\"--------\") print(\"Menu\") print(\"====\") print(\"1 - View 15 Cities\")", "choice. You can only choose from the menu.\") input(\"\\nPress enter to continue...\") if", "print(\"5 - Add New Car\") print(\"6 - View Countries by name\") print(\"7 -", "x = docs.insert_one(query) query = {\"_id\":int(id)} car = docs.find(query) for p in car:", "('\" + City + \"','\" + Code + \"','\" + district + \"',\"+", "def displaymenu(): print(\"This is not a valid choice. You can only choose from", "from mysql.connector import Error from mysql.connector import errorcode import pandas as pd #Mongo", "name\") print(\"7 - View Countries by population\") print(\"x - Exit application\") myclient =", "You can only choose from the menu.\") input(\"\\nPress enter to continue...\") if __name__", "import Error from mysql.connector import errorcode import pandas as pd #Mongo modules import", "print(\"x - Exit application\") myclient = None global dfp, df dfp =\"\" df", "= Value break else: displaymenu() DBconnection (query, choice, Code,param1) elif choice == \"4\":", "Country Name :\") query = \"select code, Name, Continent,population,HeadofState from country\" Code=Ctyname DBconnection", "cursor = connection.cursor(prepared=True) global dfp,df if (choice == \"6\" or choice == \"7\")", "Name, Continent,population,HeadofState from country\" while True: Comparison = input(\"Enter <, > or =", "by name\") print(\"7 - View Countries by population\") print(\"x - Exit application\") myclient", "\"<\" or Comparison == \">\" or Comparison == \"=\": query = \"select *", "= docs.find(query) for p in car: print ('{0} | {1} | {2} '.format(p[\"_id\"],p[\"car\"],p[\"addresses\"]))", "== \"7\": if param1 == \">\": df1 = df[(df[\"population\"] > int(code)) ].loc[:,[\"Name\",\"Continent\",\"population\",\"HeadofState\"]] elif", "Program Terminate now and welcome back anytime!\") return elif choice == \"1\": query=", "print(\"------------------------\") while True: csize = input(\"Enter Car Engine Size :\") if csize.isdigit() ==", "= mydb[\"docs\"] if choice == \"4\": query = {\"car.engineSize\":float(csize)} car = docs.find(query) for", ":\") size= input(\"Enter Size :\") Mongoconnect(\"\",choice,id,reg,size) else: print(\"That is not a valid choice.", "\"select code, Name, Continent,population,HeadofState from country\" while True: Comparison = input(\"Enter <, >", "elif choice == \"7\": print(\"Countries by Population\") print(\"-----------------------\") query = \"select code, Name,", "database: {}\".format(error)) connection.rollback() finally: #closing database connection. if(connection.is_connected()): connection.close() def displaymenu(): print(\"This is", "query = \"Insert INTO city (name, countrycode,district,population) VALUES ('\" + City + \"','\"", "errorcode.ER_ACCESS_DENIED_ERROR: print(\"Something is wrong with your user name or password\") elif error.errno ==", "population\") print(\"3 - Add New City\") print(\"4 - Find Car by Engine Size\")", "Countries by name\") print(\"7 - View Countries by population\") print(\"x - Exit application\")", "4.4 - Python program answers # Author : Somu #mySQL modules import import", "Countries by population\") print(\"x - Exit application\") myclient = None global dfp, df", "program answers # Author : Somu #mySQL modules import import mysql.connector from mysql.connector", "MongoClient #Pandas printing module from tabulate import tabulate # This function will display", "the above options\") input(\"\\nPress enter to continue...\") def main(): while True: menu() choice", "size\") print(\"------------------------\") while True: csize = input(\"Enter Car Engine Size :\") if csize.isdigit()", "not a valid choice. You can only choose from the menu.\") input(\"\\nPress enter", "and welcome back anytime!\") return elif choice == \"1\": query= \"select * from", "{:d}\".format(id,name, countrycode, district,population)) elif choice == \"2\" : cursor.execute(query) names = list(map(lambda x:", "print(\"***ERROR***: Country Code \"+ code + \" does not exist\") print(\"----------------------------------------------------\") else: print(\"Failed", "except mysql.connector.Error as error : if error.errno == errorcode.ER_ACCESS_DENIED_ERROR: print(\"Something is wrong with", "anytime!\") return elif choice == \"1\": query= \"select * from city limit 15\"", "Code \"+ code + \" does not exist\") print(\"----------------------------------------------------\") else: print(\"Failed to connect", "or password\") elif error.errno == errorcode.ER_BAD_DB_ERROR: print(\"Database does not exist\") elif error.errno ==", "dfp dfp = \"2\" def DBconnection(query,choice,code,param1): try: connection = mysql.connector.connect(host='localhost',database='world', user='root', password='<PASSWORD>') cursor", "= \"Insert INTO city (name, countrycode,district,population) VALUES ('\" + City + \"','\" +", "# Section 4.4 - Python program answers # Author : Somu #mySQL modules", "connection.commit print(\"**** RESULT ***** The new city record is inserted into the table\")", "RESULT ***** The new city record is inserted into the table\") elif choice", "mydb = myclient['docs'] docs = mydb[\"docs\"] if choice == \"4\": query = {\"car.engineSize\":float(csize)}", "print(\"show cars by engine size\") print(\"------------------------\") while True: csize = input(\"Enter Car Engine", "\" does not exist\") print(\"----------------------------------------------------\") else: print(\"Failed to connect to the database: {}\".format(error))", "above options\") input(\"\\nPress enter to continue...\") def main(): while True: menu() choice =", "only choose from the menu.\") input(\"\\nPress enter to continue...\") if __name__ == \"__main__\":", "district,population)) elif choice == \"2\" : cursor.execute(query) names = list(map(lambda x: x[0], cursor.description))", "int(code)) ].loc[:,[\"Name\",\"Continent\",\"population\",\"HeadofState\"]] elif param1 == \"=\": df1 = df[(df[\"population\"] == int(code)) ].loc[:,[\"Name\",\"Continent\",\"population\",\"HeadofState\"]] print(tabulate(df1,", "csize break else: displaymenu() Mongoconnect(csize,choice,\"\",\"\",\"\") elif choice == \"5\": print(\"Add New Car\") print(\"-----------\")", "Comparison == \"=\": query = \"select * from city where population\" + Comparison", "break else: displaymenu() Mongoconnect(csize,choice,\"\",\"\",\"\") elif choice == \"5\": print(\"Add New Car\") print(\"-----------\") id=", "Car\") print(\"-----------\") id= input(\"_ids:\") reg= input(\"Enter reg :\") size= input(\"Enter Size :\") Mongoconnect(\"\",choice,id,reg,size)", "if choice == \"5\": query={\"_id\":int(id), \"car\": { \"reg\":reg,\"engineSize\":float(size)}} x = docs.insert_one(query) query =", "table\") elif choice == \"6\" : df1 = df[df[\"Name\"].str.contains(code)].loc[:,[\"Name\",\"Continent\",\"population\",\"HeadofState\"]] #print tabulate(df1.to_string(index=False)) print(tabulate(df1, headers=\"keys\",tablefmt=\"orgtbl\"))", "| {:^20} | {:^12} | {:^20} | {:d}\".format(id,name, countrycode, district,population)) elif choice ==", "\">\" or Comparison == \"=\": query = \"select * from city where population\"", "(query, choice,Code,param1) elif choice == \"3\": print(\"Add New City\") print(\"------------\") City= input(\"Enter City", "specification def menu(): print(\"--------\") print(\"World DB\") print(\"--------\") print(\"Menu\") print(\"====\") print(\"1 - View 15", "print(\"1 - View 15 Cities\") print(\"2 - View Cities by population\") print(\"3 -", "docs.find(query) for p in car: print (p) except : print (\"******Error Occurred while", "query= \"select * from city limit 15\" DBconnection (query, choice,Code,param1) elif choice ==", "as requested in the project specification def menu(): print(\"--------\") print(\"World DB\") print(\"--------\") print(\"Menu\")", "district,population)) elif choice == \"3\": cursor.execute(query) connection.commit print(\"**** RESULT ***** The new city", "population\") print(\"x - Exit application\") myclient = None global dfp, df dfp =\"\"", "query = {\"car.engineSize\":float(csize)} car = docs.find(query) for p in car: print ('{0} |", "executing Mongo commands******\") def globalSet (): global dfp dfp = \"2\" def DBconnection(query,choice,code,param1):", "except : print (\"******Error Occurred while executing Mongo commands******\") def globalSet (): global", "valid choice. You can only choose from the menu.\") input(\"\\nPress enter to continue...\")", "import MongoClient #Pandas printing module from tabulate import tabulate # This function will", "docs.find(query) for p in car: print ('{0} | {1} | {2} '.format(p[\"_id\"],p[\"car\"],p[\"addresses\"])) if", "View Countries by population\") print(\"x - Exit application\") myclient = None global dfp,", "True: Value= input(\"Enter Population :\") if Value.isdigit() == True: query = query +", "\"2\" def DBconnection(query,choice,code,param1): try: connection = mysql.connector.connect(host='localhost',database='world', user='root', password='<PASSWORD>') cursor = connection.cursor(prepared=True) global", "].loc[:,[\"Name\",\"Continent\",\"population\",\"HeadofState\"]] elif param1 == \"=\": df1 = df[(df[\"population\"] == int(code)) ].loc[:,[\"Name\",\"Continent\",\"population\",\"HeadofState\"]] print(tabulate(df1, headers=\"keys\",tablefmt=\"orgtbl\"))", "limit 15\" DBconnection (query, choice,Code,param1) elif choice == \"2\": print(\"Cities by Population\") print(\"--------------------\")", "by population\") print(\"x - Exit application\") myclient = None global dfp, df dfp", "Code + \"','\" + district + \"',\"+ str(pop)+\")\" DBconnection (query, choice, Code,param1) elif", "wrong with your user name or password\") elif error.errno == errorcode.ER_BAD_DB_ERROR: print(\"Database does", ":\") query = \"select code, Name, Continent,population,HeadofState from country\" Code=Ctyname DBconnection (query, choice,", "connection) globalSet() if choice == \"1\" : cursor.execute(query) names = list(map(lambda x: x[0],", "cursor.description)) print(\"----------------------------------------------------------------------------------\") print(\"{:5} | {:^20} | {:^12} | {:^20} | {:10}\".format(names[0],names[1],names[2],names[3],names[4])) print(\"----------------------------------------------------------------------------------\") for", "\"',\"+ str(pop)+\")\" DBconnection (query, choice, Code,param1) elif choice == \"6\": print(\"Countries by Name\")", "Code = Value break else: displaymenu() DBconnection (query, choice, Code,param1) elif choice ==", "== \"<\" or Comparison == \">\" or Comparison == \"=\": param1=Comparison break else:", "will display a Menu as requested in the project specification def menu(): print(\"--------\")", "commands******\") def globalSet (): global dfp dfp = \"2\" def DBconnection(query,choice,code,param1): try: connection", "function will display a Menu as requested in the project specification def menu():", "myclient.admin.command('ismaster') mydb = myclient['docs'] docs = mydb[\"docs\"] if choice == \"4\": query =", "the project specification def menu(): print(\"--------\") print(\"World DB\") print(\"--------\") print(\"Menu\") print(\"====\") print(\"1 -", "== int(code)) ].loc[:,[\"Name\",\"Continent\",\"population\",\"HeadofState\"]] print(tabulate(df1, headers=\"keys\",tablefmt=\"orgtbl\")) except mysql.connector.Error as error : if error.errno ==", "Add New Car\") print(\"6 - View Countries by name\") print(\"7 - View Countries", "{:d}\".format(id,name, countrycode, district,population)) elif choice == \"3\": cursor.execute(query) connection.commit print(\"**** RESULT ***** The", "| {:^12} | {:^20} | {:d}\".format(id,name, countrycode, district,population)) elif choice == \"3\": cursor.execute(query)", "while True: Comparison = input(\"Enter <, > or = :\") if Comparison ==", "= pd.DataFrame() def Mongoconnect(csize,choice,id,reg,size): try: global myclient myclient =pymongo.MongoClient(host = \"localhost\",port=27017) myclient.admin.command('ismaster') mydb", "int(code)) ].loc[:,[\"Name\",\"Continent\",\"population\",\"HeadofState\"]] elif param1 == \"<\": df1 = df[(df[\"population\"] < int(code)) ].loc[:,[\"Name\",\"Continent\",\"population\",\"HeadofState\"]] elif", "print(\"Countries by Name\") print(\"-----------------\") Ctyname = input(\"Enter Country Name :\") query = \"select", "not exist\") print(\"----------------------------------------------------\") else: print(\"Failed to connect to the database: {}\".format(error)) connection.rollback() finally:", "df1 = df[(df[\"population\"] < int(code)) ].loc[:,[\"Name\",\"Continent\",\"population\",\"HeadofState\"]] elif param1 == \"=\": df1 = df[(df[\"population\"]", "not a valid choice. You can only choose from the above options\") input(\"\\nPress", "== 1452: print(\"----------------------------------------------------\") print(\"***ERROR***: Country Code \"+ code + \" does not exist\")", "or Comparison == \"=\": param1=Comparison break else: displaymenu() while True: Value= input(\"Enter Population", "Name, Continent,population,HeadofState from country\" Code=Ctyname DBconnection (query, choice, Code,param1) elif choice == \"7\":", "Continent,population,HeadofState from country\" Code=Ctyname DBconnection (query, choice, Code,param1) elif choice == \"7\": print(\"Countries", "New City\") print(\"4 - Find Car by Engine Size\") print(\"5 - Add New", "Size\") print(\"5 - Add New Car\") print(\"6 - View Countries by name\") print(\"7", "name or password\") elif error.errno == errorcode.ER_BAD_DB_ERROR: print(\"Database does not exist\") elif error.errno", "if Comparison == \"<\" or Comparison == \">\" or Comparison == \"=\": query", "Value= input(\"Enter Population :\") if Value.isdigit() == True: Code = Value break else:", "Country Code \"+ code + \" does not exist\") print(\"----------------------------------------------------\") else: print(\"Failed to", "countrycode,district,population) VALUES ('\" + City + \"','\" + Code + \"','\" + district", "choice == \"6\" : df1 = df[df[\"Name\"].str.contains(code)].loc[:,[\"Name\",\"Continent\",\"population\",\"HeadofState\"]] #print tabulate(df1.to_string(index=False)) print(tabulate(df1, headers=\"keys\",tablefmt=\"orgtbl\")) elif choice", "= {\"car.engineSize\":float(csize)} car = docs.find(query) for p in car: print ('{0} | {1}", "query + str(Value) break else: displaymenu() DBconnection (query, choice,Code,param1) elif choice == \"3\":", "= None global dfp, df dfp =\"\" df = pd.DataFrame() def Mongoconnect(csize,choice,id,reg,size): try:", ":\") query = \"Insert INTO city (name, countrycode,district,population) VALUES ('\" + City +", "== \"2\": print(\"Cities by Population\") print(\"--------------------\") while True: Comparison = input(\"Enter <, >", "Cities\") print(\"2 - View Cities by population\") print(\"3 - Add New City\") print(\"4", "New City\") print(\"------------\") City= input(\"Enter City Name :\") Code= input(\"Country Code :\") district=", "print(\"====\") print(\"1 - View 15 Cities\") print(\"2 - View Cities by population\") print(\"3", "x[0], cursor.description)) print(\"----------------------------------------------------------------------------------\") print(\"{:5} | {:^20} | {:^12} | {:^20} | {:10}\".format(names[0],names[1],names[2],names[3],names[4])) print(\"----------------------------------------------------------------------------------\")", "Car\") print(\"6 - View Countries by name\") print(\"7 - View Countries by population\")", "Code=Ctyname DBconnection (query, choice, Code,param1) elif choice == \"7\": print(\"Countries by Population\") print(\"-----------------------\")", "None global dfp, df dfp =\"\" df = pd.DataFrame() def Mongoconnect(csize,choice,id,reg,size): try: global", "\"reg\":reg,\"engineSize\":float(size)}} x = docs.insert_one(query) query = {\"_id\":int(id)} car = docs.find(query) for p in", "inserted into the table\") elif choice == \"6\" : df1 = df[df[\"Name\"].str.contains(code)].loc[:,[\"Name\",\"Continent\",\"population\",\"HeadofState\"]] #print", "input(\"Enter Population :\") if Value.isdigit() == True: query = query + str(Value) break", "exist\") elif error.errno == 1452: print(\"----------------------------------------------------\") print(\"***ERROR***: Country Code \"+ code + \"", "else: displaymenu() DBconnection (query, choice,Code,param1) elif choice == \"3\": print(\"Add New City\") print(\"------------\")", "query = \"select code, Name, Continent,population,HeadofState from country\" while True: Comparison = input(\"Enter", "== \">\" or Comparison == \"=\": param1=Comparison break else: displaymenu() while True: Value=", "= df[(df[\"population\"] == int(code)) ].loc[:,[\"Name\",\"Continent\",\"population\",\"HeadofState\"]] print(tabulate(df1, headers=\"keys\",tablefmt=\"orgtbl\")) except mysql.connector.Error as error : if", "== \"6\" or choice == \"7\") and dfp != \"2\" : df =", "> int(code)) ].loc[:,[\"Name\",\"Continent\",\"population\",\"HeadofState\"]] elif param1 == \"<\": df1 = df[(df[\"population\"] < int(code)) ].loc[:,[\"Name\",\"Continent\",\"population\",\"HeadofState\"]]", "('{0} | {1} | {2} '.format(p[\"_id\"],p[\"car\"],p[\"addresses\"])) if choice == \"5\": query={\"_id\":int(id), \"car\": {", "connection = mysql.connector.connect(host='localhost',database='world', user='root', password='<PASSWORD>') cursor = connection.cursor(prepared=True) global dfp,df if (choice ==", "if (choice == \"6\" or choice == \"7\") and dfp != \"2\" :", "globalSet() if choice == \"1\" : cursor.execute(query) names = list(map(lambda x: x[0], cursor.description))", "df[(df[\"population\"] == int(code)) ].loc[:,[\"Name\",\"Continent\",\"population\",\"HeadofState\"]] print(tabulate(df1, headers=\"keys\",tablefmt=\"orgtbl\")) except mysql.connector.Error as error : if error.errno", "or Comparison == \">\" or Comparison == \"=\": query = \"select * from", "choice == \"7\": if param1 == \">\": df1 = df[(df[\"population\"] > int(code)) ].loc[:,[\"Name\",\"Continent\",\"population\",\"HeadofState\"]]", "district,population, latitue,longitude) in cursor: print(\"{:5} | {:^20} | {:^12} | {:^20} | {:d}\".format(id,name,", "== \"6\": print(\"Countries by Name\") print(\"-----------------\") Ctyname = input(\"Enter Country Name :\") query", "for p in car: print (p) except : print (\"******Error Occurred while executing", "+ \"','\" + Code + \"','\" + district + \"',\"+ str(pop)+\")\" DBconnection (query,", "str(pop)+\")\" DBconnection (query, choice, Code,param1) elif choice == \"6\": print(\"Countries by Name\") print(\"-----------------\")", "(query, choice, Code,param1) elif choice == \"6\": print(\"Countries by Name\") print(\"-----------------\") Ctyname =", "a Menu as requested in the project specification def menu(): print(\"--------\") print(\"World DB\")", ":\") Code= input(\"Country Code :\") district= input(\"District :\") pop= input(\"Population :\") query =", "Terminate now and welcome back anytime!\") return elif choice == \"1\": query= \"select", "query={\"_id\":int(id), \"car\": { \"reg\":reg,\"engineSize\":float(size)}} x = docs.insert_one(query) query = {\"_id\":int(id)} car = docs.find(query)", "Comparison = input(\"Enter <, > or = :\") if Comparison == \"<\" or", "query = query + str(Value) break else: displaymenu() DBconnection (query, choice,Code,param1) elif choice", "Code :\") district= input(\"District :\") pop= input(\"Population :\") query = \"Insert INTO city", "Comparison == \"<\" or Comparison == \">\" or Comparison == \"=\": param1=Comparison break", "15\" DBconnection (query, choice,Code,param1) elif choice == \"2\": print(\"Cities by Population\") print(\"--------------------\") while", "the database: {}\".format(error)) connection.rollback() finally: #closing database connection. if(connection.is_connected()): connection.close() def displaymenu(): print(\"This", "choose from the menu.\") input(\"\\nPress enter to continue...\") if __name__ == \"__main__\": main()", "Section 4.4 - Python program answers # Author : Somu #mySQL modules import", "| {2} '.format(p[\"_id\"],p[\"car\"],p[\"addresses\"])) if choice == \"5\": query={\"_id\":int(id), \"car\": { \"reg\":reg,\"engineSize\":float(size)}} x =", "print(\"--------\") print(\"Menu\") print(\"====\") print(\"1 - View 15 Cities\") print(\"2 - View Cities by", "{2} '.format(p[\"_id\"],p[\"car\"],p[\"addresses\"])) if choice == \"5\": query={\"_id\":int(id), \"car\": { \"reg\":reg,\"engineSize\":float(size)}} x = docs.insert_one(query)", "print(\"--------------------\") while True: Comparison = input(\"Enter <, > or = :\") if Comparison", "== \"3\": cursor.execute(query) connection.commit print(\"**** RESULT ***** The new city record is inserted", "int(code)) ].loc[:,[\"Name\",\"Continent\",\"population\",\"HeadofState\"]] print(tabulate(df1, headers=\"keys\",tablefmt=\"orgtbl\")) except mysql.connector.Error as error : if error.errno == errorcode.ER_ACCESS_DENIED_ERROR:", "error.errno == 1452: print(\"----------------------------------------------------\") print(\"***ERROR***: Country Code \"+ code + \" does not", "{:^20} | {:^12} | {:^20} | {:10}\".format(names[0],names[1],names[2],names[3],names[4])) print(\"----------------------------------------------------------------------------------\") for (id,name, countrycode, district,population, latitue,longitude)", "dfp != \"2\" : df = pd.read_sql_query(query, connection) globalSet() if choice == \"1\"", "valid choice. You can only choose from the above options\") input(\"\\nPress enter to", "DBconnection (query, choice, Code,param1) elif choice == \"4\": print(\"show cars by engine size\")", "df1 = df[(df[\"population\"] == int(code)) ].loc[:,[\"Name\",\"Continent\",\"population\",\"HeadofState\"]] print(tabulate(df1, headers=\"keys\",tablefmt=\"orgtbl\")) except mysql.connector.Error as error :", "print(\"**** RESULT ***** The new city record is inserted into the table\") elif", ":\") Mongoconnect(\"\",choice,id,reg,size) else: print(\"That is not a valid choice. You can only choose", "query = {\"_id\":int(id)} car = docs.find(query) for p in car: print (p) except", "tabulate import tabulate # This function will display a Menu as requested in", "x: x[0], cursor.description)) print(\"----------------------------------------------------------------------------------\") print(\"{:5} | {:^20} | {:^12} | {:^20} | {:10}\".format(names[0],names[1],names[2],names[3],names[4]))", "= df[(df[\"population\"] < int(code)) ].loc[:,[\"Name\",\"Continent\",\"population\",\"HeadofState\"]] elif param1 == \"=\": df1 = df[(df[\"population\"] ==", "input(\"Enter Population :\") if Value.isdigit() == True: Code = Value break else: displaymenu()", "displaymenu(): print(\"This is not a valid choice. You can only choose from the", "latitue,longitude) in cursor: print(\"{:5} | {:^20} | {:^12} | {:^20} | {:d}\".format(id,name, countrycode,", "dfp = \"2\" def DBconnection(query,choice,code,param1): try: connection = mysql.connector.connect(host='localhost',database='world', user='root', password='<PASSWORD>') cursor =", "| {:^20} | {:d}\".format(id,name, countrycode, district,population)) elif choice == \"3\": cursor.execute(query) connection.commit print(\"****", "1452: print(\"----------------------------------------------------\") print(\"***ERROR***: Country Code \"+ code + \" does not exist\") print(\"----------------------------------------------------\")", "city limit 15\" DBconnection (query, choice,Code,param1) elif choice == \"2\": print(\"Cities by Population\")", "\"=\": param1=Comparison break else: displaymenu() while True: Value= input(\"Enter Population :\") if Value.isdigit()", "is not a valid choice. You can only choose from the above options\")", "print(\"----------------------------------------------------\") else: print(\"Failed to connect to the database: {}\".format(error)) connection.rollback() finally: #closing database", "Mongo commands******\") def globalSet (): global dfp dfp = \"2\" def DBconnection(query,choice,code,param1): try:", "errorcode import pandas as pd #Mongo modules import import pymongo from pymongo import", "district + \"',\"+ str(pop)+\")\" DBconnection (query, choice, Code,param1) elif choice == \"6\": print(\"Countries", "by Population\") print(\"-----------------------\") query = \"select code, Name, Continent,population,HeadofState from country\" while True:", "while True: Value= input(\"Enter Population :\") if Value.isdigit() == True: Code = Value" ]
[ "done. elif A is 'no': # if you said no, this will happen:", "nothing was valid... print 'Error! Invalid transaction! ' # ...error message! print 'Done!'", "it is done raw_input(\"Press <RETURN> to quit.\") # makes you type <enter> to", "for a filename A = input('Do you want this to be appended to", "trick. Divides correct by amount, assigns to 'calc' calcx = (correct / amount)", "print \"\"\"############################################ # Welcome to Gradebook! v 0.1 # # YOUR LIGHT WEIGHT", "same, but make the number a DECIMAL! correct = input('How many questions did", "append,or to create new file. assigns answer to 'A' print 'Thanks! appending to", "= input('Do you want this to be appended to an existing file? ')", "happen: fyl = open(fle, 'w') # same as before, but saves the file", "'Thanks! appending to file... ' if A is 'yes': #if you answered yes:", "said. fyl.close() # closes the file; job is done. elif A is 'no':", "print 'Done!' # says it is done raw_input(\"Press <RETURN> to quit.\") # makes", "# decides to either append,or to create new file. assigns answer to 'A'", "grades. Made by <NAME>. 0.1-PUBLIC # NOTE! All letter answers are to be", "# creates what will be in your file. assigns to 'text' print text", "^^This is pretty much the same: but asks the date. amount = input('What", "0.1-PUBLIC # NOTE! All letter answers are to be written in quotes (including", "job is done. elif A is 'no': # if you said no, this", "saves the file (see the 'w' instead of 'a'?) fyl.write(text) # same fyl.close()", "% (date, subject, correct, amount, calc, calcx) # creates what will be in", "YOUR LIGHT WEIGHT SCHOOL RECORD MANAGER! # ############################################\"\"\" subject = raw_input(\"What is your", "correct, amount, calc, calcx) # creates what will be in your file. assigns", "DECIMALS! calc = divmod(correct, amount) # This is a nice homework trick. Divides", "= raw_input('What should I name the file to put the above data into?", "A is 'no': # if you said no, this will happen: fyl =", "same... make all DECIMALS! calc = divmod(correct, amount) # This is a nice", "said no, this will happen: fyl = open(fle, 'w') # same as before,", "'Done!' # says it is done raw_input(\"Press <RETURN> to quit.\") # makes you", "before, but saves the file (see the 'w' instead of 'a'?) fyl.write(text) #", "what it will put in your file (or append). fle = raw_input('What should", "Invalid transaction! ' # ...error message! print 'Done!' # says it is done", "is a nice homework trick. Divides correct by amount, assigns to 'calc' calcx", "is your assignment's subject? \") # ^^This asks your class subject; assigns it", "') # ^^^This is also the same, but make the number a DECIMAL!", "%s or %s \\n\" % (date, subject, correct, amount, calc, calcx) # creates", "# ...error message! print 'Done!' # says it is done raw_input(\"Press <RETURN> to", "but asks the date. amount = input('What is the number of questions? (NOTE:", "are to be written in quotes (including dates)! print \"\"\"############################################ # Welcome to", "subject; assigns it to 'subject'; and is used later. date = input('What is", "input('Do you want this to be appended to an existing file? ') #", "creates what will be in your file. assigns to 'text' print text #", "the command assigned to 'fyl' writes your data to the filename you said.", "the same: but asks the date. amount = input('What is the number of", "'calcx' text = \"***%s*** \\n %s | %d out of %d | %s", "input('What is the number of questions? (NOTE: make all #s from now decimals.", "is done. elif A is 'no': # if you said no, this will", "# YOUR LIGHT WEIGHT SCHOOL RECORD MANAGER! # ############################################\"\"\" subject = raw_input(\"What is", "assigned to 'fyl' writes your data to the filename you said. fyl.close() #", "= (correct / amount) # divides correct by amount; assigns to 'calcx' text", "assigns to 'calc' calcx = (correct / amount) # divides correct by amount;", "a nice homework trick. Divides correct by amount, assigns to 'calc' calcx =", "same else: # and if nothing was valid... print 'Error! Invalid transaction! '", "assigns it to 'subject'; and is used later. date = input('What is the", "date = input('What is the date for your assignment? ') # ^^This is", "# keep record of grades. Made by <NAME>. 0.1-PUBLIC # NOTE! All letter", "| %d out of %d | %s or %s \\n\" % (date, subject,", "used to combine open('fle, 'a') with future commands fyl.write(text) # the command assigned", "the above data into? ') # prompts for a filename A = input('Do", "%d out of %d | %s or %s \\n\" % (date, subject, correct,", "(including dates)! print \"\"\"############################################ # Welcome to Gradebook! v 0.1 # # YOUR", "raw_input(\"What is your assignment's subject? \") # ^^This asks your class subject; assigns", "# the command assigned to 'fyl' writes your data to the filename you", "MANAGER! # ############################################\"\"\" subject = raw_input(\"What is your assignment's subject? \") # ^^This", "# This is a nice homework trick. Divides correct by amount, assigns to", "to be written in quotes (including dates)! print \"\"\"############################################ # Welcome to Gradebook!", "with future commands fyl.write(text) # the command assigned to 'fyl' writes your data", "= input('What is the number of questions? (NOTE: make all #s from now", "to 'A' print 'Thanks! appending to file... ' if A is 'yes': #if", "amount = input('What is the number of questions? (NOTE: make all #s from", "correct = input('How many questions did you get correct? ') # ^^^The same...", "'fyl' is used to combine open('fle, 'a') with future commands fyl.write(text) # the", "also the same, but make the number a DECIMAL! correct = input('How many", "says it is done raw_input(\"Press <RETURN> to quit.\") # makes you type <enter>", "will put in your file (or append). fle = raw_input('What should I name", "= input('What is the date for your assignment? ') # ^^This is pretty", "new file. assigns answer to 'A' print 'Thanks! appending to file... ' if", "combine open('fle, 'a') with future commands fyl.write(text) # the command assigned to 'fyl'", "v 0.1 # # YOUR LIGHT WEIGHT SCHOOL RECORD MANAGER! # ############################################\"\"\" subject", "appended to an existing file? ') # decides to either append,or to create", "'a') with future commands fyl.write(text) # the command assigned to 'fyl' writes your", "yes: fyl = open(fle, 'a') # the phrase 'fyl' is used to combine", "was valid... print 'Error! Invalid transaction! ' # ...error message! print 'Done!' #", "existing file? ') # decides to either append,or to create new file. assigns", "the file; job is done. elif A is 'no': # if you said", "calc = divmod(correct, amount) # This is a nice homework trick. Divides correct", "your assignment's subject? \") # ^^This asks your class subject; assigns it to", "fle = raw_input('What should I name the file to put the above data", "the number of questions? (NOTE: make all #s from now decimals. e.g.: \"5.0\"", "and if nothing was valid... print 'Error! Invalid transaction! ' # ...error message!", "class subject; assigns it to 'subject'; and is used later. date = input('What", "fyl.write(text) # same fyl.close() # same else: # and if nothing was valid...", "= input('How many questions did you get correct? ') # ^^^The same... make", "questions did you get correct? ') # ^^^The same... make all DECIMALS! calc", "did you get correct? ') # ^^^The same... make all DECIMALS! calc =", "else: # and if nothing was valid... print 'Error! Invalid transaction! ' #", "...error message! print 'Done!' # says it is done raw_input(\"Press <RETURN> to quit.\")", "create new file. assigns answer to 'A' print 'Thanks! appending to file... '", "# ^^^This is also the same, but make the number a DECIMAL! correct", "open(fle, 'w') # same as before, but saves the file (see the 'w'", "fyl = open(fle, 'w') # same as before, but saves the file (see", "by amount; assigns to 'calcx' text = \"***%s*** \\n %s | %d out", "to be appended to an existing file? ') # decides to either append,or", "is done raw_input(\"Press <RETURN> to quit.\") # makes you type <enter> to quit.", "Divides correct by amount, assigns to 'calc' calcx = (correct / amount) #", "'w') # same as before, but saves the file (see the 'w' instead", "your data to the filename you said. fyl.close() # closes the file; job", "make all #s from now decimals. e.g.: \"5.0\" ') # ^^^This is also", "the 'w' instead of 'a'?) fyl.write(text) # same fyl.close() # same else: #", "'Error! Invalid transaction! ' # ...error message! print 'Done!' # says it is", "a filename A = input('Do you want this to be appended to an", "file. assigns to 'text' print text # prints what it will put in", "'text' print text # prints what it will put in your file (or", "correct by amount, assigns to 'calc' calcx = (correct / amount) # divides", "assigns to 'calcx' text = \"***%s*** \\n %s | %d out of %d", "future commands fyl.write(text) # the command assigned to 'fyl' writes your data to", "data to the filename you said. fyl.close() # closes the file; job is", "no, this will happen: fyl = open(fle, 'w') # same as before, but", "or %s \\n\" % (date, subject, correct, amount, calc, calcx) # creates what", "of grades. Made by <NAME>. 0.1-PUBLIC # NOTE! All letter answers are to", "# # YOUR LIGHT WEIGHT SCHOOL RECORD MANAGER! # ############################################\"\"\" subject = raw_input(\"What", "file to put the above data into? ') # prompts for a filename", "file (or append). fle = raw_input('What should I name the file to put", "answer to 'A' print 'Thanks! appending to file... ' if A is 'yes':", "# and if nothing was valid... print 'Error! Invalid transaction! ' # ...error", "many questions did you get correct? ') # ^^^The same... make all DECIMALS!", "# closes the file; job is done. elif A is 'no': # if", "keep record of grades. Made by <NAME>. 0.1-PUBLIC # NOTE! All letter answers", "but make the number a DECIMAL! correct = input('How many questions did you", "0.1 # # YOUR LIGHT WEIGHT SCHOOL RECORD MANAGER! # ############################################\"\"\" subject =", "# NOTE! All letter answers are to be written in quotes (including dates)!", "print text # prints what it will put in your file (or append).", "phrase 'fyl' is used to combine open('fle, 'a') with future commands fyl.write(text) #", "\") # ^^This asks your class subject; assigns it to 'subject'; and is", "(NOTE: make all #s from now decimals. e.g.: \"5.0\" ') # ^^^This is", "A = input('Do you want this to be appended to an existing file?", "want this to be appended to an existing file? ') # decides to", "is the number of questions? (NOTE: make all #s from now decimals. e.g.:", "#s from now decimals. e.g.: \"5.0\" ') # ^^^This is also the same,", "^^^This is also the same, but make the number a DECIMAL! correct =", "above data into? ') # prompts for a filename A = input('Do you", "the phrase 'fyl' is used to combine open('fle, 'a') with future commands fyl.write(text)", "to the filename you said. fyl.close() # closes the file; job is done.", "it to 'subject'; and is used later. date = input('What is the date", "to either append,or to create new file. assigns answer to 'A' print 'Thanks!", "same: but asks the date. amount = input('What is the number of questions?", "\"5.0\" ') # ^^^This is also the same, but make the number a", "amount, calc, calcx) # creates what will be in your file. assigns to", "subject? \") # ^^This asks your class subject; assigns it to 'subject'; and", "should I name the file to put the above data into? ') #", "'subject'; and is used later. date = input('What is the date for your", "\"\"\"############################################ # Welcome to Gradebook! v 0.1 # # YOUR LIGHT WEIGHT SCHOOL", "Gradebook! v 0.1 # # YOUR LIGHT WEIGHT SCHOOL RECORD MANAGER! # ############################################\"\"\"", "asks the date. amount = input('What is the number of questions? (NOTE: make", "calcx = (correct / amount) # divides correct by amount; assigns to 'calcx'", "'no': # if you said no, this will happen: fyl = open(fle, 'w')", "%s \\n\" % (date, subject, correct, amount, calc, calcx) # creates what will", "instead of 'a'?) fyl.write(text) # same fyl.close() # same else: # and if", "open('fle, 'a') with future commands fyl.write(text) # the command assigned to 'fyl' writes", "the date. amount = input('What is the number of questions? (NOTE: make all", "correct by amount; assigns to 'calcx' text = \"***%s*** \\n %s | %d", "\\n %s | %d out of %d | %s or %s \\n\" %", "(correct / amount) # divides correct by amount; assigns to 'calcx' text =", "of questions? (NOTE: make all #s from now decimals. e.g.: \"5.0\" ') #", "input('What is the date for your assignment? ') # ^^This is pretty much", "'a'?) fyl.write(text) # same fyl.close() # same else: # and if nothing was", "LIGHT WEIGHT SCHOOL RECORD MANAGER! # ############################################\"\"\" subject = raw_input(\"What is your assignment's", "is 'yes': #if you answered yes: fyl = open(fle, 'a') # the phrase", "') # decides to either append,or to create new file. assigns answer to", "pretty much the same: but asks the date. amount = input('What is the", "if A is 'yes': #if you answered yes: fyl = open(fle, 'a') #", "decides to either append,or to create new file. assigns answer to 'A' print", "') # prompts for a filename A = input('Do you want this to", "dates)! print \"\"\"############################################ # Welcome to Gradebook! v 0.1 # # YOUR LIGHT", "is used to combine open('fle, 'a') with future commands fyl.write(text) # the command", "number of questions? (NOTE: make all #s from now decimals. e.g.: \"5.0\" ')", "it will put in your file (or append). fle = raw_input('What should I", "number a DECIMAL! correct = input('How many questions did you get correct? ')", "will be in your file. assigns to 'text' print text # prints what", "all DECIMALS! calc = divmod(correct, amount) # This is a nice homework trick.", "Made by <NAME>. 0.1-PUBLIC # NOTE! All letter answers are to be written", "letter answers are to be written in quotes (including dates)! print \"\"\"############################################ #", "same as before, but saves the file (see the 'w' instead of 'a'?)", "divmod(correct, amount) # This is a nice homework trick. Divides correct by amount,", "you said. fyl.close() # closes the file; job is done. elif A is", "= open(fle, 'a') # the phrase 'fyl' is used to combine open('fle, 'a')", "answers are to be written in quotes (including dates)! print \"\"\"############################################ # Welcome", "(or append). fle = raw_input('What should I name the file to put the", "/ amount) # divides correct by amount; assigns to 'calcx' text = \"***%s***", "file? ') # decides to either append,or to create new file. assigns answer", "assigns answer to 'A' print 'Thanks! appending to file... ' if A is", "to an existing file? ') # decides to either append,or to create new", "SCHOOL RECORD MANAGER! # ############################################\"\"\" subject = raw_input(\"What is your assignment's subject? \")", "Welcome to Gradebook! v 0.1 # # YOUR LIGHT WEIGHT SCHOOL RECORD MANAGER!", "(date, subject, correct, amount, calc, calcx) # creates what will be in your", "' if A is 'yes': #if you answered yes: fyl = open(fle, 'a')", "but saves the file (see the 'w' instead of 'a'?) fyl.write(text) # same", "transaction! ' # ...error message! print 'Done!' # says it is done raw_input(\"Press", "get correct? ') # ^^^The same... make all DECIMALS! calc = divmod(correct, amount)", "'A' print 'Thanks! appending to file... ' if A is 'yes': #if you", "This is a nice homework trick. Divides correct by amount, assigns to 'calc'", "the date for your assignment? ') # ^^This is pretty much the same:", "I name the file to put the above data into? ') # prompts", "= open(fle, 'w') # same as before, but saves the file (see the", "WEIGHT SCHOOL RECORD MANAGER! # ############################################\"\"\" subject = raw_input(\"What is your assignment's subject?", "is also the same, but make the number a DECIMAL! correct = input('How", "fyl.close() # closes the file; job is done. elif A is 'no': #", "e.g.: \"5.0\" ') # ^^^This is also the same, but make the number", "assigns to 'text' print text # prints what it will put in your", "amount; assigns to 'calcx' text = \"***%s*** \\n %s | %d out of", "'w' instead of 'a'?) fyl.write(text) # same fyl.close() # same else: # and", "^^This asks your class subject; assigns it to 'subject'; and is used later.", "make the number a DECIMAL! correct = input('How many questions did you get", "you answered yes: fyl = open(fle, 'a') # the phrase 'fyl' is used", "A is 'yes': #if you answered yes: fyl = open(fle, 'a') # the", "# prompts for a filename A = input('Do you want this to be", "is 'no': # if you said no, this will happen: fyl = open(fle,", "# Welcome to Gradebook! v 0.1 # # YOUR LIGHT WEIGHT SCHOOL RECORD", "the file to put the above data into? ') # prompts for a", "correct? ') # ^^^The same... make all DECIMALS! calc = divmod(correct, amount) #", "(see the 'w' instead of 'a'?) fyl.write(text) # same fyl.close() # same else:", "# ^^^The same... make all DECIMALS! calc = divmod(correct, amount) # This is", "from now decimals. e.g.: \"5.0\" ') # ^^^This is also the same, but", "open(fle, 'a') # the phrase 'fyl' is used to combine open('fle, 'a') with", "All letter answers are to be written in quotes (including dates)! print \"\"\"############################################", "') # ^^^The same... make all DECIMALS! calc = divmod(correct, amount) # This", "command assigned to 'fyl' writes your data to the filename you said. fyl.close()", "by amount, assigns to 'calc' calcx = (correct / amount) # divides correct", "calcx) # creates what will be in your file. assigns to 'text' print", "to put the above data into? ') # prompts for a filename A", "\\n\" % (date, subject, correct, amount, calc, calcx) # creates what will be", "questions? (NOTE: make all #s from now decimals. e.g.: \"5.0\" ') # ^^^This", "fyl.close() # same else: # and if nothing was valid... print 'Error! Invalid", "append). fle = raw_input('What should I name the file to put the above", "closes the file; job is done. elif A is 'no': # if you", "make all DECIMALS! calc = divmod(correct, amount) # This is a nice homework", "############################################\"\"\" subject = raw_input(\"What is your assignment's subject? \") # ^^This asks your", "the number a DECIMAL! correct = input('How many questions did you get correct?", "# same else: # and if nothing was valid... print 'Error! Invalid transaction!", "| %s or %s \\n\" % (date, subject, correct, amount, calc, calcx) #", "and is used later. date = input('What is the date for your assignment?", "amount, assigns to 'calc' calcx = (correct / amount) # divides correct by", "input('How many questions did you get correct? ') # ^^^The same... make all", "file (see the 'w' instead of 'a'?) fyl.write(text) # same fyl.close() # same", "# same fyl.close() # same else: # and if nothing was valid... print", "be appended to an existing file? ') # decides to either append,or to", "# ############################################\"\"\" subject = raw_input(\"What is your assignment's subject? \") # ^^This asks", "fyl.write(text) # the command assigned to 'fyl' writes your data to the filename", "assignment's subject? \") # ^^This asks your class subject; assigns it to 'subject';", "subject = raw_input(\"What is your assignment's subject? \") # ^^This asks your class", "decimals. e.g.: \"5.0\" ') # ^^^This is also the same, but make the", "later. date = input('What is the date for your assignment? ') # ^^This", "your file. assigns to 'text' print text # prints what it will put", "text = \"***%s*** \\n %s | %d out of %d | %s or", "amount) # divides correct by amount; assigns to 'calcx' text = \"***%s*** \\n", "name the file to put the above data into? ') # prompts for", "same fyl.close() # same else: # and if nothing was valid... print 'Error!", "NOTE! All letter answers are to be written in quotes (including dates)! print", "this will happen: fyl = open(fle, 'w') # same as before, but saves", "writes your data to the filename you said. fyl.close() # closes the file;", "raw_input('What should I name the file to put the above data into? ')", "= \"***%s*** \\n %s | %d out of %d | %s or %s", "calc, calcx) # creates what will be in your file. assigns to 'text'", "to file... ' if A is 'yes': #if you answered yes: fyl =", "in your file (or append). fle = raw_input('What should I name the file", "quotes (including dates)! print \"\"\"############################################ # Welcome to Gradebook! v 0.1 # #", "of %d | %s or %s \\n\" % (date, subject, correct, amount, calc,", "elif A is 'no': # if you said no, this will happen: fyl", "be written in quotes (including dates)! print \"\"\"############################################ # Welcome to Gradebook! v", "prompts for a filename A = input('Do you want this to be appended", "data into? ') # prompts for a filename A = input('Do you want", "to 'calc' calcx = (correct / amount) # divides correct by amount; assigns", "message! print 'Done!' # says it is done raw_input(\"Press <RETURN> to quit.\") #", "now decimals. e.g.: \"5.0\" ') # ^^^This is also the same, but make", "filename A = input('Do you want this to be appended to an existing", "appending to file... ' if A is 'yes': #if you answered yes: fyl", "amount) # This is a nice homework trick. Divides correct by amount, assigns", "to combine open('fle, 'a') with future commands fyl.write(text) # the command assigned to", "homework trick. Divides correct by amount, assigns to 'calc' calcx = (correct /", "RECORD MANAGER! # ############################################\"\"\" subject = raw_input(\"What is your assignment's subject? \") #", "this to be appended to an existing file? ') # decides to either", "is the date for your assignment? ') # ^^This is pretty much the", "written in quotes (including dates)! print \"\"\"############################################ # Welcome to Gradebook! v 0.1", "') # ^^This is pretty much the same: but asks the date. amount", "date. amount = input('What is the number of questions? (NOTE: make all #s", "'a') # the phrase 'fyl' is used to combine open('fle, 'a') with future", "be in your file. assigns to 'text' print text # prints what it", "<NAME>. 0.1-PUBLIC # NOTE! All letter answers are to be written in quotes", "used later. date = input('What is the date for your assignment? ') #", "you said no, this will happen: fyl = open(fle, 'w') # same as", "your assignment? ') # ^^This is pretty much the same: but asks the", "# says it is done raw_input(\"Press <RETURN> to quit.\") # makes you type", "%s | %d out of %d | %s or %s \\n\" % (date,", "in your file. assigns to 'text' print text # prints what it will", "# divides correct by amount; assigns to 'calcx' text = \"***%s*** \\n %s", "'calc' calcx = (correct / amount) # divides correct by amount; assigns to", "the file (see the 'w' instead of 'a'?) fyl.write(text) # same fyl.close() #", "valid... print 'Error! Invalid transaction! ' # ...error message! print 'Done!' # says", "' # ...error message! print 'Done!' # says it is done raw_input(\"Press <RETURN>", "the filename you said. fyl.close() # closes the file; job is done. elif", "either append,or to create new file. assigns answer to 'A' print 'Thanks! appending", "#! /usr/bin/python # keep record of grades. Made by <NAME>. 0.1-PUBLIC # NOTE!", "print 'Error! Invalid transaction! ' # ...error message! print 'Done!' # says it", "= raw_input(\"What is your assignment's subject? \") # ^^This asks your class subject;", "# the phrase 'fyl' is used to combine open('fle, 'a') with future commands", "'yes': #if you answered yes: fyl = open(fle, 'a') # the phrase 'fyl'", "assignment? ') # ^^This is pretty much the same: but asks the date.", "date for your assignment? ') # ^^This is pretty much the same: but", "# same as before, but saves the file (see the 'w' instead of", "^^^The same... make all DECIMALS! calc = divmod(correct, amount) # This is a", "put in your file (or append). fle = raw_input('What should I name the", "file... ' if A is 'yes': #if you answered yes: fyl = open(fle,", "what will be in your file. assigns to 'text' print text # prints", "to 'subject'; and is used later. date = input('What is the date for", "to 'calcx' text = \"***%s*** \\n %s | %d out of %d |", "asks your class subject; assigns it to 'subject'; and is used later. date", "divides correct by amount; assigns to 'calcx' text = \"***%s*** \\n %s |", "answered yes: fyl = open(fle, 'a') # the phrase 'fyl' is used to", "commands fyl.write(text) # the command assigned to 'fyl' writes your data to the", "'fyl' writes your data to the filename you said. fyl.close() # closes the", "if you said no, this will happen: fyl = open(fle, 'w') # same", "the same, but make the number a DECIMAL! correct = input('How many questions", "prints what it will put in your file (or append). fle = raw_input('What", "your file (or append). fle = raw_input('What should I name the file to", "record of grades. Made by <NAME>. 0.1-PUBLIC # NOTE! All letter answers are", "as before, but saves the file (see the 'w' instead of 'a'?) fyl.write(text)", "into? ') # prompts for a filename A = input('Do you want this", "text # prints what it will put in your file (or append). fle", "is used later. date = input('What is the date for your assignment? ')", "file; job is done. elif A is 'no': # if you said no,", "you want this to be appended to an existing file? ') # decides", "for your assignment? ') # ^^This is pretty much the same: but asks", "an existing file? ') # decides to either append,or to create new file.", "to 'fyl' writes your data to the filename you said. fyl.close() # closes", "fyl = open(fle, 'a') # the phrase 'fyl' is used to combine open('fle,", "by <NAME>. 0.1-PUBLIC # NOTE! All letter answers are to be written in", "to Gradebook! v 0.1 # # YOUR LIGHT WEIGHT SCHOOL RECORD MANAGER! #", "#if you answered yes: fyl = open(fle, 'a') # the phrase 'fyl' is", "to create new file. assigns answer to 'A' print 'Thanks! appending to file...", "# prints what it will put in your file (or append). fle =", "to 'text' print text # prints what it will put in your file", "if nothing was valid... print 'Error! Invalid transaction! ' # ...error message! print", "# if you said no, this will happen: fyl = open(fle, 'w') #", "much the same: but asks the date. amount = input('What is the number", "filename you said. fyl.close() # closes the file; job is done. elif A", "%d | %s or %s \\n\" % (date, subject, correct, amount, calc, calcx)", "put the above data into? ') # prompts for a filename A =", "in quotes (including dates)! print \"\"\"############################################ # Welcome to Gradebook! v 0.1 #", "print 'Thanks! appending to file... ' if A is 'yes': #if you answered", "nice homework trick. Divides correct by amount, assigns to 'calc' calcx = (correct", "is pretty much the same: but asks the date. amount = input('What is", "file. assigns answer to 'A' print 'Thanks! appending to file... ' if A", "out of %d | %s or %s \\n\" % (date, subject, correct, amount,", "a DECIMAL! correct = input('How many questions did you get correct? ') #", "DECIMAL! correct = input('How many questions did you get correct? ') # ^^^The", "all #s from now decimals. e.g.: \"5.0\" ') # ^^^This is also the", "= divmod(correct, amount) # This is a nice homework trick. Divides correct by", "/usr/bin/python # keep record of grades. Made by <NAME>. 0.1-PUBLIC # NOTE! All", "your class subject; assigns it to 'subject'; and is used later. date =", "# ^^This asks your class subject; assigns it to 'subject'; and is used", "you get correct? ') # ^^^The same... make all DECIMALS! calc = divmod(correct,", "# ^^This is pretty much the same: but asks the date. amount =", "will happen: fyl = open(fle, 'w') # same as before, but saves the", "\"***%s*** \\n %s | %d out of %d | %s or %s \\n\"", "of 'a'?) fyl.write(text) # same fyl.close() # same else: # and if nothing", "subject, correct, amount, calc, calcx) # creates what will be in your file." ]
[ "\"r\") groups = sys.argv[2:] one_to_one = [] ortho_plus_paralog = [] one_to_one_notallspecies = []", "is_one_to_one = False is_ortho_plus_paralog = False break if line.count(group) > 1: is_one_to_one =", "in f: is_one_to_one = True is_ortho_plus_paralog = True for group in groups: if", "line_count = line.count(group) if line_count == 0: continue if cur_group is not None:", "== 0: continue if cur_group is not None: valid_group = False break cur_group", "\"w\") ortho_plus_paralog_out = open(\"ortho_plus_paralog.txt\", \"w\") one_to_one_notallspecies_out = open(\"one_to_one_notallspecies.txt\", \"w\") ortho_plus_paralog_notallspecies_out = open(\"ortho_plus_paralog_notallspecies.txt\", \"w\")", "line in f: is_one_to_one = True is_ortho_plus_paralog = True for group in groups:", "group_count = 0 for group in groups: line_count = line.count(group) if line_count ==", "= [] ortho_plus_paralog_notallspecies = [] group_single = {} group_paralog = {} for group", "# for line in group_paralog[key]: # print(line.rstrip()) # print(\"One to One\\n\\n\") # for", "> 1: other_one_to_one = False break if other_one_to_one: one_to_one_notallspecies.append(line) else: ortho_plus_paralog_notallspecies.append(line) one_to_one_out =", "is not None: valid_group = False break cur_group = group group_count = line_count", "group_paralog = {} for group in groups: group_single[group] = [] group_paralog[group] = []", "one_to_one: one_to_one_out.write(line) for line in one_to_one_notallspecies: one_to_one_notallspecies_out.write(line) for line in ortho_plus_paralog: ortho_plus_paralog_out.write(line) for", "\"w\") for line in one_to_one: one_to_one_out.write(line) for line in one_to_one_notallspecies: one_to_one_notallspecies_out.write(line) for line", "[] one_to_one_notallspecies = [] ortho_plus_paralog_notallspecies = [] group_single = {} group_paralog = {}", "1: other_one_to_one = False break if other_one_to_one: one_to_one_notallspecies.append(line) else: ortho_plus_paralog_notallspecies.append(line) one_to_one_out = open(\"one_to_one.txt\",", "= open(\"paralogs.txt\", \"w\") for line in one_to_one: one_to_one_out.write(line) for line in one_to_one_notallspecies: one_to_one_notallspecies_out.write(line)", "= [] one_to_one_notallspecies = [] ortho_plus_paralog_notallspecies = [] group_single = {} group_paralog =", "ortho_plus_paralog = [] one_to_one_notallspecies = [] ortho_plus_paralog_notallspecies = [] group_single = {} group_paralog", "is_ortho_plus_paralog = False break if line.count(group) > 1: is_one_to_one = False if is_one_to_one:", "and cur_group is not None: if line.count(cur_group) == 1: group_single[cur_group].append(line) else: group_paralog[cur_group].append(line) else:", "[] ortho_plus_paralog_notallspecies = [] group_single = {} group_paralog = {} for group in", "other_one_to_one = False break if other_one_to_one: one_to_one_notallspecies.append(line) else: ortho_plus_paralog_notallspecies.append(line) one_to_one_out = open(\"one_to_one.txt\", \"w\")", "if cur_group is not None: valid_group = False break cur_group = group group_count", "= open(\"ortho_plus_paralog_notallspecies.txt\", \"w\") single_copy_out = open(\"single_copy.txt\", \"w\") paralog_out = open(\"paralogs.txt\", \"w\") for line", "ortho_plus_paralog: ortho_plus_paralog_out.write(line) for line in ortho_plus_paralog_notallspecies: ortho_plus_paralog_notallspecies_out.write(line) for key in group_single: single_copy_out.write(\"\\n{} single", "# print(line) # print(Counter(map(lambda w: \"/\".join([group for group in groups if group in", "not None: if line.count(cur_group) == 1: group_single[cur_group].append(line) else: group_paralog[cur_group].append(line) else: other_one_to_one = True", "False break if line.count(group) > 1: is_one_to_one = False if is_one_to_one: one_to_one.append(line) elif", "= False break cur_group = group group_count = line_count if valid_group and cur_group", "line in one_to_one_notallspecies: one_to_one_notallspecies_out.write(line) for line in ortho_plus_paralog: ortho_plus_paralog_out.write(line) for line in ortho_plus_paralog_notallspecies:", "group_paralog[group] = [] for line in f: is_one_to_one = True is_ortho_plus_paralog = True", "line in ortho_plus_paralog: ortho_plus_paralog_out.write(line) for line in ortho_plus_paralog_notallspecies: ortho_plus_paralog_notallspecies_out.write(line) for key in group_single:", "key in group_paralog: paralog_out.write(\"\\n{} paralog\\n\\n\".format(key)) for line in group_paralog[key]: paralog_out.write(line) paralog_out.close() single_copy_out.close() one_to_one_out.close()", "paralog_out.write(line) paralog_out.close() single_copy_out.close() one_to_one_out.close() one_to_one_notallspecies_out.close() ortho_plus_paralog_out.close() ortho_plus_paralog_notallspecies_out.close() # # for key in group_paralog:", "ortho_plus_paralog_out = open(\"ortho_plus_paralog.txt\", \"w\") one_to_one_notallspecies_out = open(\"one_to_one_notallspecies.txt\", \"w\") ortho_plus_paralog_notallspecies_out = open(\"ortho_plus_paralog_notallspecies.txt\", \"w\") single_copy_out", "one_to_one: # print(line) # for line in ortho_plus_paralog: # print(line) # print(\"One to", "# print(\"One to One\\n\\n\") # for line in one_to_one_notallspecies: # print(line) # for", "group in groups: line_count = line.count(group) if line_count == 0: continue if cur_group", "group in groups: if line.count(group) > 1: other_one_to_one = False break if other_one_to_one:", "for line in one_to_one: # print(line) # for line in ortho_plus_paralog: # print(line)", "None group_count = 0 for group in groups: line_count = line.count(group) if line_count", "# for line in one_to_one_notallspecies: # print(line) # for line in ortho_plus_paralog_notallspecies: #", "for group in groups: line_count = line.count(group) if line_count == 0: continue if", "else: group_paralog[cur_group].append(line) else: other_one_to_one = True for group in groups: if line.count(group) >", "for line in one_to_one_notallspecies: one_to_one_notallspecies_out.write(line) for line in ortho_plus_paralog: ortho_plus_paralog_out.write(line) for line in", "group_single[group] = [] group_paralog[group] = [] for line in f: is_one_to_one = True", "if line.count(cur_group) == 1: group_single[cur_group].append(line) else: group_paralog[cur_group].append(line) else: other_one_to_one = True for group", "= open(\"one_to_one.txt\", \"w\") ortho_plus_paralog_out = open(\"ortho_plus_paralog.txt\", \"w\") one_to_one_notallspecies_out = open(\"one_to_one_notallspecies.txt\", \"w\") ortho_plus_paralog_notallspecies_out =", "Counter f = open(sys.argv[1], \"r\") groups = sys.argv[2:] one_to_one = [] ortho_plus_paralog =", "print(line) # for line in ortho_plus_paralog_notallspecies: # print(line) # print(Counter(map(lambda w: \"/\".join([group for", "line in ortho_plus_paralog_notallspecies: # print(line) # print(Counter(map(lambda w: \"/\".join([group for group in groups", "[] ortho_plus_paralog = [] one_to_one_notallspecies = [] ortho_plus_paralog_notallspecies = [] group_single = {}", "in one_to_one_notallspecies: # print(line) # for line in ortho_plus_paralog_notallspecies: # print(line) # print(Counter(map(lambda", "in groups: if line.count(group) > 1: other_one_to_one = False break if other_one_to_one: one_to_one_notallspecies.append(line)", "# for line in ortho_plus_paralog: # print(line) # print(\"One to One\\n\\n\") # for", "in groups: group_single[group] = [] group_paralog[group] = [] for line in f: is_one_to_one", "groups: if group not in line: is_one_to_one = False is_ortho_plus_paralog = False break", "in line: is_one_to_one = False is_ortho_plus_paralog = False break if line.count(group) > 1:", "cur_group = group group_count = line_count if valid_group and cur_group is not None:", "line in group_paralog[key]: # print(line.rstrip()) # print(\"One to One\\n\\n\") # for line in", "for group in groups: group_single[group] = [] group_paralog[group] = [] for line in", "# print(Counter(map(lambda w: \"/\".join([group for group in groups if group in w]), #", "{} group_paralog = {} for group in groups: group_single[group] = [] group_paralog[group] =", "= True for group in groups: if line.count(group) > 1: other_one_to_one = False", "[] group_single = {} group_paralog = {} for group in groups: group_single[group] =", "from collections import Counter f = open(sys.argv[1], \"r\") groups = sys.argv[2:] one_to_one =", "0 for group in groups: line_count = line.count(group) if line_count == 0: continue", "= 0 for group in groups: line_count = line.count(group) if line_count == 0:", "if is_one_to_one: one_to_one.append(line) elif is_ortho_plus_paralog: ortho_plus_paralog.append(line) else: valid_group = True cur_group = None", "line: is_one_to_one = False is_ortho_plus_paralog = False break if line.count(group) > 1: is_one_to_one", "True is_ortho_plus_paralog = True for group in groups: if group not in line:", "open(\"ortho_plus_paralog_notallspecies.txt\", \"w\") single_copy_out = open(\"single_copy.txt\", \"w\") paralog_out = open(\"paralogs.txt\", \"w\") for line in", "is_one_to_one = True is_ortho_plus_paralog = True for group in groups: if group not", "1: group_single[cur_group].append(line) else: group_paralog[cur_group].append(line) else: other_one_to_one = True for group in groups: if", "group_single = {} group_paralog = {} for group in groups: group_single[group] = []", "line in one_to_one: one_to_one_out.write(line) for line in one_to_one_notallspecies: one_to_one_notallspecies_out.write(line) for line in ortho_plus_paralog:", "one_to_one_notallspecies_out.close() ortho_plus_paralog_out.close() ortho_plus_paralog_notallspecies_out.close() # # for key in group_paralog: # print(\"\\n{} paralog\\n\".format(key)) #", "print(\"One to One\\n\\n\") # for line in one_to_one: # print(line) # for line", "print(line) # print(Counter(map(lambda w: \"/\".join([group for group in groups if group in w]),", "group_paralog[cur_group].append(line) else: other_one_to_one = True for group in groups: if line.count(group) > 1:", "else: valid_group = True cur_group = None group_count = 0 for group in", "in groups: line_count = line.count(group) if line_count == 0: continue if cur_group is", "# print(line) # for line in ortho_plus_paralog_notallspecies: # print(line) # print(Counter(map(lambda w: \"/\".join([group", "other_one_to_one = True for group in groups: if line.count(group) > 1: other_one_to_one =", "\"w\") one_to_one_notallspecies_out = open(\"one_to_one_notallspecies.txt\", \"w\") ortho_plus_paralog_notallspecies_out = open(\"ortho_plus_paralog_notallspecies.txt\", \"w\") single_copy_out = open(\"single_copy.txt\", \"w\")", "line_count if valid_group and cur_group is not None: if line.count(cur_group) == 1: group_single[cur_group].append(line)", "single_copy_out.close() one_to_one_out.close() one_to_one_notallspecies_out.close() ortho_plus_paralog_out.close() ortho_plus_paralog_notallspecies_out.close() # # for key in group_paralog: # print(\"\\n{}", "ortho_plus_paralog_out.close() ortho_plus_paralog_notallspecies_out.close() # # for key in group_paralog: # print(\"\\n{} paralog\\n\".format(key)) # for", "if group not in line: is_one_to_one = False is_ortho_plus_paralog = False break if", "one_to_one_notallspecies_out = open(\"one_to_one_notallspecies.txt\", \"w\") ortho_plus_paralog_notallspecies_out = open(\"ortho_plus_paralog_notallspecies.txt\", \"w\") single_copy_out = open(\"single_copy.txt\", \"w\") paralog_out", "# print(\"One to One\\n\\n\") # for line in one_to_one: # print(line) # for", "group_single[key]: single_copy_out.write(line) for key in group_paralog: paralog_out.write(\"\\n{} paralog\\n\\n\".format(key)) for line in group_paralog[key]: paralog_out.write(line)", "line.count(cur_group) == 1: group_single[cur_group].append(line) else: group_paralog[cur_group].append(line) else: other_one_to_one = True for group in", "other_one_to_one: one_to_one_notallspecies.append(line) else: ortho_plus_paralog_notallspecies.append(line) one_to_one_out = open(\"one_to_one.txt\", \"w\") ortho_plus_paralog_out = open(\"ortho_plus_paralog.txt\", \"w\") one_to_one_notallspecies_out", "None: valid_group = False break cur_group = group group_count = line_count if valid_group", "group_paralog[key]: paralog_out.write(line) paralog_out.close() single_copy_out.close() one_to_one_out.close() one_to_one_notallspecies_out.close() ortho_plus_paralog_out.close() ortho_plus_paralog_notallspecies_out.close() # # for key in", "= [] group_paralog[group] = [] for line in f: is_one_to_one = True is_ortho_plus_paralog", "False break if other_one_to_one: one_to_one_notallspecies.append(line) else: ortho_plus_paralog_notallspecies.append(line) one_to_one_out = open(\"one_to_one.txt\", \"w\") ortho_plus_paralog_out =", "for group in groups: if line.count(group) > 1: other_one_to_one = False break if", "open(\"paralogs.txt\", \"w\") for line in one_to_one: one_to_one_out.write(line) for line in one_to_one_notallspecies: one_to_one_notallspecies_out.write(line) for", "= False if is_one_to_one: one_to_one.append(line) elif is_ortho_plus_paralog: ortho_plus_paralog.append(line) else: valid_group = True cur_group", "groups: if line.count(group) > 1: other_one_to_one = False break if other_one_to_one: one_to_one_notallspecies.append(line) else:", "print(\"\\n{} paralog\\n\".format(key)) # for line in group_paralog[key]: # print(line.rstrip()) # print(\"One to One\\n\\n\")", "print(line.rstrip()) # print(\"One to One\\n\\n\") # for line in one_to_one: # print(line) #", "single_copy_out = open(\"single_copy.txt\", \"w\") paralog_out = open(\"paralogs.txt\", \"w\") for line in one_to_one: one_to_one_out.write(line)", "= group group_count = line_count if valid_group and cur_group is not None: if", "One\\n\\n\") # for line in one_to_one_notallspecies: # print(line) # for line in ortho_plus_paralog_notallspecies:", "open(\"one_to_one.txt\", \"w\") ortho_plus_paralog_out = open(\"ortho_plus_paralog.txt\", \"w\") one_to_one_notallspecies_out = open(\"one_to_one_notallspecies.txt\", \"w\") ortho_plus_paralog_notallspecies_out = open(\"ortho_plus_paralog_notallspecies.txt\",", "else: other_one_to_one = True for group in groups: if line.count(group) > 1: other_one_to_one", "= True for group in groups: if group not in line: is_one_to_one =", "= {} for group in groups: group_single[group] = [] group_paralog[group] = [] for", "f = open(sys.argv[1], \"r\") groups = sys.argv[2:] one_to_one = [] ortho_plus_paralog = []", "True cur_group = None group_count = 0 for group in groups: line_count =", "valid_group and cur_group is not None: if line.count(cur_group) == 1: group_single[cur_group].append(line) else: group_paralog[cur_group].append(line)", "ortho_plus_paralog_notallspecies: ortho_plus_paralog_notallspecies_out.write(line) for key in group_single: single_copy_out.write(\"\\n{} single copy\\n\\n\".format(key)) for line in group_single[key]:", "single_copy_out.write(\"\\n{} single copy\\n\\n\".format(key)) for line in group_single[key]: single_copy_out.write(line) for key in group_paralog: paralog_out.write(\"\\n{}", "paralog_out.write(\"\\n{} paralog\\n\\n\".format(key)) for line in group_paralog[key]: paralog_out.write(line) paralog_out.close() single_copy_out.close() one_to_one_out.close() one_to_one_notallspecies_out.close() ortho_plus_paralog_out.close() ortho_plus_paralog_notallspecies_out.close()", "for key in group_paralog: # print(\"\\n{} paralog\\n\".format(key)) # for line in group_paralog[key]: #", "line.count(group) > 1: other_one_to_one = False break if other_one_to_one: one_to_one_notallspecies.append(line) else: ortho_plus_paralog_notallspecies.append(line) one_to_one_out", "paralog\\n\".format(key)) # for line in group_paralog[key]: # print(line.rstrip()) # print(\"One to One\\n\\n\") #", "one_to_one_notallspecies: # print(line) # for line in ortho_plus_paralog_notallspecies: # print(line) # print(Counter(map(lambda w:", "> 1: is_one_to_one = False if is_one_to_one: one_to_one.append(line) elif is_ortho_plus_paralog: ortho_plus_paralog.append(line) else: valid_group", "\"w\") ortho_plus_paralog_notallspecies_out = open(\"ortho_plus_paralog_notallspecies.txt\", \"w\") single_copy_out = open(\"single_copy.txt\", \"w\") paralog_out = open(\"paralogs.txt\", \"w\")", "print(Counter(map(lambda w: \"/\".join([group for group in groups if group in w]), # f)))", "for line in ortho_plus_paralog_notallspecies: ortho_plus_paralog_notallspecies_out.write(line) for key in group_single: single_copy_out.write(\"\\n{} single copy\\n\\n\".format(key)) for", "for key in group_single: single_copy_out.write(\"\\n{} single copy\\n\\n\".format(key)) for line in group_single[key]: single_copy_out.write(line) for", "for line in group_paralog[key]: paralog_out.write(line) paralog_out.close() single_copy_out.close() one_to_one_out.close() one_to_one_notallspecies_out.close() ortho_plus_paralog_out.close() ortho_plus_paralog_notallspecies_out.close() # #", "for line in ortho_plus_paralog: # print(line) # print(\"One to One\\n\\n\") # for line", "if line_count == 0: continue if cur_group is not None: valid_group = False", "ortho_plus_paralog_notallspecies_out.close() # # for key in group_paralog: # print(\"\\n{} paralog\\n\".format(key)) # for line", "print(line) # print(\"One to One\\n\\n\") # for line in one_to_one_notallspecies: # print(line) #", "= open(\"single_copy.txt\", \"w\") paralog_out = open(\"paralogs.txt\", \"w\") for line in one_to_one: one_to_one_out.write(line) for", "if other_one_to_one: one_to_one_notallspecies.append(line) else: ortho_plus_paralog_notallspecies.append(line) one_to_one_out = open(\"one_to_one.txt\", \"w\") ortho_plus_paralog_out = open(\"ortho_plus_paralog.txt\", \"w\")", "ortho_plus_paralog_notallspecies_out.write(line) for key in group_single: single_copy_out.write(\"\\n{} single copy\\n\\n\".format(key)) for line in group_single[key]: single_copy_out.write(line)", "print(line) # for line in ortho_plus_paralog: # print(line) # print(\"One to One\\n\\n\") #", "valid_group = True cur_group = None group_count = 0 for group in groups:", "is_ortho_plus_paralog = True for group in groups: if group not in line: is_one_to_one", "import sys from collections import Counter f = open(sys.argv[1], \"r\") groups = sys.argv[2:]", "False break cur_group = group group_count = line_count if valid_group and cur_group is", "cur_group = None group_count = 0 for group in groups: line_count = line.count(group)", "one_to_one_out.write(line) for line in one_to_one_notallspecies: one_to_one_notallspecies_out.write(line) for line in ortho_plus_paralog: ortho_plus_paralog_out.write(line) for line", "False if is_one_to_one: one_to_one.append(line) elif is_ortho_plus_paralog: ortho_plus_paralog.append(line) else: valid_group = True cur_group =", "if valid_group and cur_group is not None: if line.count(cur_group) == 1: group_single[cur_group].append(line) else:", "groups = sys.argv[2:] one_to_one = [] ortho_plus_paralog = [] one_to_one_notallspecies = [] ortho_plus_paralog_notallspecies", "= sys.argv[2:] one_to_one = [] ortho_plus_paralog = [] one_to_one_notallspecies = [] ortho_plus_paralog_notallspecies =", "one_to_one_notallspecies = [] ortho_plus_paralog_notallspecies = [] group_single = {} group_paralog = {} for", "line in one_to_one_notallspecies: # print(line) # for line in ortho_plus_paralog_notallspecies: # print(line) #", "group in groups: if group not in line: is_one_to_one = False is_ortho_plus_paralog =", "not in line: is_one_to_one = False is_ortho_plus_paralog = False break if line.count(group) >", "in group_paralog: paralog_out.write(\"\\n{} paralog\\n\\n\".format(key)) for line in group_paralog[key]: paralog_out.write(line) paralog_out.close() single_copy_out.close() one_to_one_out.close() one_to_one_notallspecies_out.close()", "one_to_one_out = open(\"one_to_one.txt\", \"w\") ortho_plus_paralog_out = open(\"ortho_plus_paralog.txt\", \"w\") one_to_one_notallspecies_out = open(\"one_to_one_notallspecies.txt\", \"w\") ortho_plus_paralog_notallspecies_out", "open(\"single_copy.txt\", \"w\") paralog_out = open(\"paralogs.txt\", \"w\") for line in one_to_one: one_to_one_out.write(line) for line", "\"w\") paralog_out = open(\"paralogs.txt\", \"w\") for line in one_to_one: one_to_one_out.write(line) for line in", "= [] group_single = {} group_paralog = {} for group in groups: group_single[group]", "group_single[cur_group].append(line) else: group_paralog[cur_group].append(line) else: other_one_to_one = True for group in groups: if line.count(group)", "in ortho_plus_paralog: # print(line) # print(\"One to One\\n\\n\") # for line in one_to_one_notallspecies:", "# print(line) # print(\"One to One\\n\\n\") # for line in one_to_one_notallspecies: # print(line)", "ortho_plus_paralog_notallspecies = [] group_single = {} group_paralog = {} for group in groups:", "not None: valid_group = False break cur_group = group group_count = line_count if", "One\\n\\n\") # for line in one_to_one: # print(line) # for line in ortho_plus_paralog:", "one_to_one_notallspecies.append(line) else: ortho_plus_paralog_notallspecies.append(line) one_to_one_out = open(\"one_to_one.txt\", \"w\") ortho_plus_paralog_out = open(\"ortho_plus_paralog.txt\", \"w\") one_to_one_notallspecies_out =", "in ortho_plus_paralog_notallspecies: # print(line) # print(Counter(map(lambda w: \"/\".join([group for group in groups if", "ortho_plus_paralog: # print(line) # print(\"One to One\\n\\n\") # for line in one_to_one_notallspecies: #", "[] group_paralog[group] = [] for line in f: is_one_to_one = True is_ortho_plus_paralog =", "break cur_group = group group_count = line_count if valid_group and cur_group is not", "paralog_out.close() single_copy_out.close() one_to_one_out.close() one_to_one_notallspecies_out.close() ortho_plus_paralog_out.close() ortho_plus_paralog_notallspecies_out.close() # # for key in group_paralog: #", "= True is_ortho_plus_paralog = True for group in groups: if group not in", "is not None: if line.count(cur_group) == 1: group_single[cur_group].append(line) else: group_paralog[cur_group].append(line) else: other_one_to_one =", "single_copy_out.write(line) for key in group_paralog: paralog_out.write(\"\\n{} paralog\\n\\n\".format(key)) for line in group_paralog[key]: paralog_out.write(line) paralog_out.close()", "# print(line.rstrip()) # print(\"One to One\\n\\n\") # for line in one_to_one: # print(line)", "True for group in groups: if group not in line: is_one_to_one = False", "1: is_one_to_one = False if is_one_to_one: one_to_one.append(line) elif is_ortho_plus_paralog: ortho_plus_paralog.append(line) else: valid_group =", "None: if line.count(cur_group) == 1: group_single[cur_group].append(line) else: group_paralog[cur_group].append(line) else: other_one_to_one = True for", "= False break if line.count(group) > 1: is_one_to_one = False if is_one_to_one: one_to_one.append(line)", "for key in group_paralog: paralog_out.write(\"\\n{} paralog\\n\\n\".format(key)) for line in group_paralog[key]: paralog_out.write(line) paralog_out.close() single_copy_out.close()", "line in group_single[key]: single_copy_out.write(line) for key in group_paralog: paralog_out.write(\"\\n{} paralog\\n\\n\".format(key)) for line in", "valid_group = False break cur_group = group group_count = line_count if valid_group and", "one_to_one_notallspecies: one_to_one_notallspecies_out.write(line) for line in ortho_plus_paralog: ortho_plus_paralog_out.write(line) for line in ortho_plus_paralog_notallspecies: ortho_plus_paralog_notallspecies_out.write(line) for", "group in groups: group_single[group] = [] group_paralog[group] = [] for line in f:", "= [] ortho_plus_paralog = [] one_to_one_notallspecies = [] ortho_plus_paralog_notallspecies = [] group_single =", "ortho_plus_paralog_out.write(line) for line in ortho_plus_paralog_notallspecies: ortho_plus_paralog_notallspecies_out.write(line) for key in group_single: single_copy_out.write(\"\\n{} single copy\\n\\n\".format(key))", "in one_to_one_notallspecies: one_to_one_notallspecies_out.write(line) for line in ortho_plus_paralog: ortho_plus_paralog_out.write(line) for line in ortho_plus_paralog_notallspecies: ortho_plus_paralog_notallspecies_out.write(line)", "group_single: single_copy_out.write(\"\\n{} single copy\\n\\n\".format(key)) for line in group_single[key]: single_copy_out.write(line) for key in group_paralog:", "for group in groups: if group not in line: is_one_to_one = False is_ortho_plus_paralog", "one_to_one = [] ortho_plus_paralog = [] one_to_one_notallspecies = [] ortho_plus_paralog_notallspecies = [] group_single", "in group_single: single_copy_out.write(\"\\n{} single copy\\n\\n\".format(key)) for line in group_single[key]: single_copy_out.write(line) for key in", "sys from collections import Counter f = open(sys.argv[1], \"r\") groups = sys.argv[2:] one_to_one", "is_one_to_one = False if is_one_to_one: one_to_one.append(line) elif is_ortho_plus_paralog: ortho_plus_paralog.append(line) else: valid_group = True", "# for key in group_paralog: # print(\"\\n{} paralog\\n\".format(key)) # for line in group_paralog[key]:", "sys.argv[2:] one_to_one = [] ortho_plus_paralog = [] one_to_one_notallspecies = [] ortho_plus_paralog_notallspecies = []", "in group_paralog[key]: # print(line.rstrip()) # print(\"One to One\\n\\n\") # for line in one_to_one:", "open(\"one_to_one_notallspecies.txt\", \"w\") ortho_plus_paralog_notallspecies_out = open(\"ortho_plus_paralog_notallspecies.txt\", \"w\") single_copy_out = open(\"single_copy.txt\", \"w\") paralog_out = open(\"paralogs.txt\",", "line.count(group) > 1: is_one_to_one = False if is_one_to_one: one_to_one.append(line) elif is_ortho_plus_paralog: ortho_plus_paralog.append(line) else:", "groups: line_count = line.count(group) if line_count == 0: continue if cur_group is not", "to One\\n\\n\") # for line in one_to_one_notallspecies: # print(line) # for line in", "else: ortho_plus_paralog_notallspecies.append(line) one_to_one_out = open(\"one_to_one.txt\", \"w\") ortho_plus_paralog_out = open(\"ortho_plus_paralog.txt\", \"w\") one_to_one_notallspecies_out = open(\"one_to_one_notallspecies.txt\",", "group_paralog: paralog_out.write(\"\\n{} paralog\\n\\n\".format(key)) for line in group_paralog[key]: paralog_out.write(line) paralog_out.close() single_copy_out.close() one_to_one_out.close() one_to_one_notallspecies_out.close() ortho_plus_paralog_out.close()", "ortho_plus_paralog_notallspecies_out = open(\"ortho_plus_paralog_notallspecies.txt\", \"w\") single_copy_out = open(\"single_copy.txt\", \"w\") paralog_out = open(\"paralogs.txt\", \"w\") for", "f: is_one_to_one = True is_ortho_plus_paralog = True for group in groups: if group", "for line in ortho_plus_paralog_notallspecies: # print(line) # print(Counter(map(lambda w: \"/\".join([group for group in", "= line_count if valid_group and cur_group is not None: if line.count(cur_group) == 1:", "for line in group_single[key]: single_copy_out.write(line) for key in group_paralog: paralog_out.write(\"\\n{} paralog\\n\\n\".format(key)) for line", "= True cur_group = None group_count = 0 for group in groups: line_count", "0: continue if cur_group is not None: valid_group = False break cur_group =", "for line in ortho_plus_paralog: ortho_plus_paralog_out.write(line) for line in ortho_plus_paralog_notallspecies: ortho_plus_paralog_notallspecies_out.write(line) for key in", "key in group_paralog: # print(\"\\n{} paralog\\n\".format(key)) # for line in group_paralog[key]: # print(line.rstrip())", "ortho_plus_paralog.append(line) else: valid_group = True cur_group = None group_count = 0 for group", "group group_count = line_count if valid_group and cur_group is not None: if line.count(cur_group)", "line in ortho_plus_paralog_notallspecies: ortho_plus_paralog_notallspecies_out.write(line) for key in group_single: single_copy_out.write(\"\\n{} single copy\\n\\n\".format(key)) for line", "ortho_plus_paralog_notallspecies.append(line) one_to_one_out = open(\"one_to_one.txt\", \"w\") ortho_plus_paralog_out = open(\"ortho_plus_paralog.txt\", \"w\") one_to_one_notallspecies_out = open(\"one_to_one_notallspecies.txt\", \"w\")", "in one_to_one: # print(line) # for line in ortho_plus_paralog: # print(line) # print(\"One", "= False break if other_one_to_one: one_to_one_notallspecies.append(line) else: ortho_plus_paralog_notallspecies.append(line) one_to_one_out = open(\"one_to_one.txt\", \"w\") ortho_plus_paralog_out", "open(\"ortho_plus_paralog.txt\", \"w\") one_to_one_notallspecies_out = open(\"one_to_one_notallspecies.txt\", \"w\") ortho_plus_paralog_notallspecies_out = open(\"ortho_plus_paralog_notallspecies.txt\", \"w\") single_copy_out = open(\"single_copy.txt\",", "# for line in one_to_one: # print(line) # for line in ortho_plus_paralog: #", "group not in line: is_one_to_one = False is_ortho_plus_paralog = False break if line.count(group)", "elif is_ortho_plus_paralog: ortho_plus_paralog.append(line) else: valid_group = True cur_group = None group_count = 0", "for line in one_to_one_notallspecies: # print(line) # for line in ortho_plus_paralog_notallspecies: # print(line)", "= open(\"one_to_one_notallspecies.txt\", \"w\") ortho_plus_paralog_notallspecies_out = open(\"ortho_plus_paralog_notallspecies.txt\", \"w\") single_copy_out = open(\"single_copy.txt\", \"w\") paralog_out =", "= open(sys.argv[1], \"r\") groups = sys.argv[2:] one_to_one = [] ortho_plus_paralog = [] one_to_one_notallspecies", "= None group_count = 0 for group in groups: line_count = line.count(group) if", "print(\"One to One\\n\\n\") # for line in one_to_one_notallspecies: # print(line) # for line", "if line.count(group) > 1: other_one_to_one = False break if other_one_to_one: one_to_one_notallspecies.append(line) else: ortho_plus_paralog_notallspecies.append(line)", "to One\\n\\n\") # for line in one_to_one: # print(line) # for line in", "ortho_plus_paralog_notallspecies: # print(line) # print(Counter(map(lambda w: \"/\".join([group for group in groups if group", "for line in one_to_one: one_to_one_out.write(line) for line in one_to_one_notallspecies: one_to_one_notallspecies_out.write(line) for line in", "collections import Counter f = open(sys.argv[1], \"r\") groups = sys.argv[2:] one_to_one = []", "single copy\\n\\n\".format(key)) for line in group_single[key]: single_copy_out.write(line) for key in group_paralog: paralog_out.write(\"\\n{} paralog\\n\\n\".format(key))", "in group_single[key]: single_copy_out.write(line) for key in group_paralog: paralog_out.write(\"\\n{} paralog\\n\\n\".format(key)) for line in group_paralog[key]:", "line in group_paralog[key]: paralog_out.write(line) paralog_out.close() single_copy_out.close() one_to_one_out.close() one_to_one_notallspecies_out.close() ortho_plus_paralog_out.close() ortho_plus_paralog_notallspecies_out.close() # # for", "= [] for line in f: is_one_to_one = True is_ortho_plus_paralog = True for", "paralog\\n\\n\".format(key)) for line in group_paralog[key]: paralog_out.write(line) paralog_out.close() single_copy_out.close() one_to_one_out.close() one_to_one_notallspecies_out.close() ortho_plus_paralog_out.close() ortho_plus_paralog_notallspecies_out.close() #", "= {} group_paralog = {} for group in groups: group_single[group] = [] group_paralog[group]", "for line in f: is_one_to_one = True is_ortho_plus_paralog = True for group in", "if line.count(group) > 1: is_one_to_one = False if is_one_to_one: one_to_one.append(line) elif is_ortho_plus_paralog: ortho_plus_paralog.append(line)", "<gh_stars>1-10 import sys from collections import Counter f = open(sys.argv[1], \"r\") groups =", "cur_group is not None: if line.count(cur_group) == 1: group_single[cur_group].append(line) else: group_paralog[cur_group].append(line) else: other_one_to_one", "break if other_one_to_one: one_to_one_notallspecies.append(line) else: ortho_plus_paralog_notallspecies.append(line) one_to_one_out = open(\"one_to_one.txt\", \"w\") ortho_plus_paralog_out = open(\"ortho_plus_paralog.txt\",", "paralog_out = open(\"paralogs.txt\", \"w\") for line in one_to_one: one_to_one_out.write(line) for line in one_to_one_notallspecies:", "break if line.count(group) > 1: is_one_to_one = False if is_one_to_one: one_to_one.append(line) elif is_ortho_plus_paralog:", "in ortho_plus_paralog_notallspecies: ortho_plus_paralog_notallspecies_out.write(line) for key in group_single: single_copy_out.write(\"\\n{} single copy\\n\\n\".format(key)) for line in", "group_paralog[key]: # print(line.rstrip()) # print(\"One to One\\n\\n\") # for line in one_to_one: #", "open(sys.argv[1], \"r\") groups = sys.argv[2:] one_to_one = [] ortho_plus_paralog = [] one_to_one_notallspecies =", "False is_ortho_plus_paralog = False break if line.count(group) > 1: is_one_to_one = False if", "is_ortho_plus_paralog: ortho_plus_paralog.append(line) else: valid_group = True cur_group = None group_count = 0 for", "key in group_single: single_copy_out.write(\"\\n{} single copy\\n\\n\".format(key)) for line in group_single[key]: single_copy_out.write(line) for key", "one_to_one_out.close() one_to_one_notallspecies_out.close() ortho_plus_paralog_out.close() ortho_plus_paralog_notallspecies_out.close() # # for key in group_paralog: # print(\"\\n{} paralog\\n\".format(key))", "# # for key in group_paralog: # print(\"\\n{} paralog\\n\".format(key)) # for line in", "# print(line) # for line in ortho_plus_paralog: # print(line) # print(\"One to One\\n\\n\")", "in groups: if group not in line: is_one_to_one = False is_ortho_plus_paralog = False", "in ortho_plus_paralog: ortho_plus_paralog_out.write(line) for line in ortho_plus_paralog_notallspecies: ortho_plus_paralog_notallspecies_out.write(line) for key in group_single: single_copy_out.write(\"\\n{}", "# for line in ortho_plus_paralog_notallspecies: # print(line) # print(Counter(map(lambda w: \"/\".join([group for group", "copy\\n\\n\".format(key)) for line in group_single[key]: single_copy_out.write(line) for key in group_paralog: paralog_out.write(\"\\n{} paralog\\n\\n\".format(key)) for", "line.count(group) if line_count == 0: continue if cur_group is not None: valid_group =", "continue if cur_group is not None: valid_group = False break cur_group = group", "for line in group_paralog[key]: # print(line.rstrip()) # print(\"One to One\\n\\n\") # for line", "= open(\"ortho_plus_paralog.txt\", \"w\") one_to_one_notallspecies_out = open(\"one_to_one_notallspecies.txt\", \"w\") ortho_plus_paralog_notallspecies_out = open(\"ortho_plus_paralog_notallspecies.txt\", \"w\") single_copy_out =", "import Counter f = open(sys.argv[1], \"r\") groups = sys.argv[2:] one_to_one = [] ortho_plus_paralog", "= line.count(group) if line_count == 0: continue if cur_group is not None: valid_group", "in one_to_one: one_to_one_out.write(line) for line in one_to_one_notallspecies: one_to_one_notallspecies_out.write(line) for line in ortho_plus_paralog: ortho_plus_paralog_out.write(line)", "one_to_one_notallspecies_out.write(line) for line in ortho_plus_paralog: ortho_plus_paralog_out.write(line) for line in ortho_plus_paralog_notallspecies: ortho_plus_paralog_notallspecies_out.write(line) for key", "group_count = line_count if valid_group and cur_group is not None: if line.count(cur_group) ==", "groups: group_single[group] = [] group_paralog[group] = [] for line in f: is_one_to_one =", "in group_paralog[key]: paralog_out.write(line) paralog_out.close() single_copy_out.close() one_to_one_out.close() one_to_one_notallspecies_out.close() ortho_plus_paralog_out.close() ortho_plus_paralog_notallspecies_out.close() # # for key", "# print(\"\\n{} paralog\\n\".format(key)) # for line in group_paralog[key]: # print(line.rstrip()) # print(\"One to", "\"w\") single_copy_out = open(\"single_copy.txt\", \"w\") paralog_out = open(\"paralogs.txt\", \"w\") for line in one_to_one:", "cur_group is not None: valid_group = False break cur_group = group group_count =", "== 1: group_single[cur_group].append(line) else: group_paralog[cur_group].append(line) else: other_one_to_one = True for group in groups:", "= False is_ortho_plus_paralog = False break if line.count(group) > 1: is_one_to_one = False", "line in one_to_one: # print(line) # for line in ortho_plus_paralog: # print(line) #", "line in ortho_plus_paralog: # print(line) # print(\"One to One\\n\\n\") # for line in", "line_count == 0: continue if cur_group is not None: valid_group = False break", "{} for group in groups: group_single[group] = [] group_paralog[group] = [] for line", "in group_paralog: # print(\"\\n{} paralog\\n\".format(key)) # for line in group_paralog[key]: # print(line.rstrip()) #", "True for group in groups: if line.count(group) > 1: other_one_to_one = False break", "group_paralog: # print(\"\\n{} paralog\\n\".format(key)) # for line in group_paralog[key]: # print(line.rstrip()) # print(\"One", "[] for line in f: is_one_to_one = True is_ortho_plus_paralog = True for group", "one_to_one.append(line) elif is_ortho_plus_paralog: ortho_plus_paralog.append(line) else: valid_group = True cur_group = None group_count =", "is_one_to_one: one_to_one.append(line) elif is_ortho_plus_paralog: ortho_plus_paralog.append(line) else: valid_group = True cur_group = None group_count" ]
[ "include from . import views, api_urls urlpatterns = [ url(r'^api/', include(api_urls, namespace='api')) ]", "__future__ import absolute_import, unicode_literals from django.conf.urls import url, include from . import views,", "import absolute_import, unicode_literals from django.conf.urls import url, include from . import views, api_urls", "absolute_import, unicode_literals from django.conf.urls import url, include from . import views, api_urls urlpatterns", "from __future__ import absolute_import, unicode_literals from django.conf.urls import url, include from . import", "from django.conf.urls import url, include from . import views, api_urls urlpatterns = [", "import url, include from . import views, api_urls urlpatterns = [ url(r'^api/', include(api_urls,", "unicode_literals from django.conf.urls import url, include from . import views, api_urls urlpatterns =", "django.conf.urls import url, include from . import views, api_urls urlpatterns = [ url(r'^api/',", "url, include from . import views, api_urls urlpatterns = [ url(r'^api/', include(api_urls, namespace='api'))", "<gh_stars>0 from __future__ import absolute_import, unicode_literals from django.conf.urls import url, include from ." ]
[ "on 2020-04-01 13:24 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('reading',", "13:24 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('reading', '0007_auto_20200331_2133'), ]", "Generated by Django 3.0.4 on 2020-04-01 13:24 from django.db import migrations class Migration(migrations.Migration):", "= [ ('reading', '0007_auto_20200331_2133'), ] operations = [ migrations.RenameModel( old_name='ReadingListMetadata', new_name='ReadingMetadata', ), ]", "class Migration(migrations.Migration): dependencies = [ ('reading', '0007_auto_20200331_2133'), ] operations = [ migrations.RenameModel( old_name='ReadingListMetadata',", "3.0.4 on 2020-04-01 13:24 from django.db import migrations class Migration(migrations.Migration): dependencies = [", "from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('reading', '0007_auto_20200331_2133'), ] operations", "django.db import migrations class Migration(migrations.Migration): dependencies = [ ('reading', '0007_auto_20200331_2133'), ] operations =", "migrations class Migration(migrations.Migration): dependencies = [ ('reading', '0007_auto_20200331_2133'), ] operations = [ migrations.RenameModel(", "Migration(migrations.Migration): dependencies = [ ('reading', '0007_auto_20200331_2133'), ] operations = [ migrations.RenameModel( old_name='ReadingListMetadata', new_name='ReadingMetadata',", "Django 3.0.4 on 2020-04-01 13:24 from django.db import migrations class Migration(migrations.Migration): dependencies =", "by Django 3.0.4 on 2020-04-01 13:24 from django.db import migrations class Migration(migrations.Migration): dependencies", "# Generated by Django 3.0.4 on 2020-04-01 13:24 from django.db import migrations class", "dependencies = [ ('reading', '0007_auto_20200331_2133'), ] operations = [ migrations.RenameModel( old_name='ReadingListMetadata', new_name='ReadingMetadata', ),", "2020-04-01 13:24 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('reading', '0007_auto_20200331_2133'),", "import migrations class Migration(migrations.Migration): dependencies = [ ('reading', '0007_auto_20200331_2133'), ] operations = [" ]
[ "self._ctx.pretty: print(json.dumps(records, indent=4)) return records_by_type = {} types = {} for rec in", "https://api.cloudflare.com/#zone-list-zones class CF_Zones(object): \"\"\" commands for zones manipulation \"\"\" def __init__(self, ctx): self._ctx", "(e, e)) if not self._ctx.pretty: print(json.dumps(zones, indent=4)) return for z in zones: print(\"Zone:", "self._all_records() except CloudFlare.exceptions.CloudFlareAPIError as e: exit('/zones %d %s - api call failed' %", "return raw_results['result'] page = 1 domains = [] while True: raw_results = cf.zones.get(params={'per_page':5,", "def main(): \"\"\"Main entry\"\"\" parser = argparse.ArgumentParser( prog=\"cfbackup\", description='Simple Cloudflare backup tool.', )", "True: raw_results = cf_raw.zones.dns_records.get( zone_id, params={'per_page':100, 'page':page}, ) total_pages = raw_results['result_info']['total_pages'] result =", "= 1 domains = [] while True: raw_results = cf.zones.get(params={'per_page':5, 'page':page}) total_pages =", "{0: <16} {1}\".format(\"\", ns)) def _all_zones(self): cf = CloudFlare.CloudFlare(raw=True) if self._ctx.zone_name: raw_results =", "{}\".format(rec[\"meta\"][\"auto_added\"])) print(\"\") print(\"\") print(\"-------------------\") print(\"Records stat:\") print(\"-------------------\") print(\"{0: <11} {1: >4}\".format(\"<type>\", \"<count>\")) for", "<11} {1: >4}\".format(t, types[t])) print(\"-------------------\") print(\"{0: <11} {1: >4}\".format(\"Total:\", len(records))) def _all_records(self): cf", "json import CloudFlare # https://api.cloudflare.com/#dns-records-for-a-zone-list-dns-records class CF_DNS_Records(object): \"\"\" commands for zones manipulation \"\"\"", "cfbackup \"\"\" from __future__ import print_function import sys import argparse import json import", "total_pages = raw_results['result_info']['total_pages'] zones = raw_results['result'] for z in zones: domains.append(z) if page", "sys.exit(\"Command \" + cmd + \" not implemened for zones\") def show(self): \"\"\"Show", "records_by_type.get(rec[\"type\"]): types[rec[\"type\"]] = 0 records_by_type[rec[\"type\"]] = [] types[rec[\"type\"]] += 1 records_by_type[rec[\"type\"]].append(rec) for t", "friendly output\", ) parser_zones.add_argument( \"-z\", \"--zone-name\", help=\"optional zone name\", ) parser_dns = subparsers.add_parser(\"dns\")", "subparsers = parser.add_subparsers( help='Object of command', dest=\"object\" ) parser_zones = subparsers.add_parser(\"zones\") parser_zones.add_argument( \"--pretty\",", "help=\"command\", ) subparsers = parser.add_subparsers( help='Object of command', dest=\"object\" ) parser_zones = subparsers.add_parser(\"zones\")", "def show(self): \"\"\"Show CF zones\"\"\" # print(\"Show cf zones\") try: zones = self._all_zones()", "run - entry point for zones manipulations \"\"\" cmd = self._ctx.command if cmd", "https://api.cloudflare.com/#dns-records-for-a-zone-list-dns-records class CF_DNS_Records(object): \"\"\" commands for zones manipulation \"\"\" def __init__(self, ctx): self._ctx", "records_by_type[t]: # print(json.dumps(rec, indent=4)) print(\"Type: {}\".format(rec[\"type\"])) print(\"Name: {}\".format(rec[\"name\"])) print(\"Content: {}\".format(rec[\"content\"])) print(\"TTL: {}{}\".format( rec[\"ttl\"],", "# print(\"Show DSN records\") try: records = self._all_records() except CloudFlare.exceptions.CloudFlareAPIError as e: exit('/zones", "= zones[0]['id'] cf_raw = CloudFlare.CloudFlare(raw=True) page = 1 records = [] while True:", "zones: domains.append(z) if page == total_pages: break page += 1 return domains COMMANDS", "\"\"\"Show CF zones\"\"\" # print(\"Show DSN records\") try: records = self._all_records() except CloudFlare.exceptions.CloudFlareAPIError", "implemened for zones\") def show(self): \"\"\"Show CF zones\"\"\" # print(\"Show cf zones\") try:", "} def main(): \"\"\"Main entry\"\"\" parser = argparse.ArgumentParser( prog=\"cfbackup\", description='Simple Cloudflare backup tool.',", "CloudFlare # https://api.cloudflare.com/#dns-records-for-a-zone-list-dns-records class CF_DNS_Records(object): \"\"\" commands for zones manipulation \"\"\" def __init__(self,", "as e: exit('/zones %d %s - api call failed' % (e, e)) if", ") total_pages = raw_results['result_info']['total_pages'] result = raw_results['result'] for rec in result: records.append(rec) if", "print(\"Records stat:\") print(\"-------------------\") print(\"{0: <11} {1: >4}\".format(\"<type>\", \"<count>\")) for t in sorted(list(types)): print(\"{0:", "print(\"{0: <11} {1: >4}\".format(\"Total:\", len(records))) def _all_records(self): cf = CloudFlare.CloudFlare() zones = cf.zones.get(params={'name':", "for z in zones: print(\"Zone: {0: <16} NS: {1}\".format( z[\"name\"], z[\"name_servers\"][0], )) for", "= parser.add_subparsers( help='Object of command', dest=\"object\" ) parser_zones = subparsers.add_parser(\"zones\") parser_zones.add_argument( \"--pretty\", action='store_true',", "= raw_results['result'] for z in zones: domains.append(z) if page == total_pages: break page", "print(\"Content: {}\".format(rec[\"content\"])) print(\"TTL: {}{}\".format( rec[\"ttl\"], \" (auto)\" if str(rec[\"ttl\"]) == \"1\" else \"\",", "raw_results = cf_raw.zones.dns_records.get( zone_id, params={'per_page':100, 'page':page}, ) total_pages = raw_results['result_info']['total_pages'] result = raw_results['result']", "\"--zone-name\", required=True, help=\"required zone name\", ) parser_dns.add_argument( \"--pretty\", action='store_true', help=\"show user friendly output\",", "ctx def run(self): \"\"\" run - entry point for zones manipulations \"\"\" cmd", "result = raw_results['result'] for rec in result: records.append(rec) if page == total_pages: break", "NS: {1}\".format( z[\"name\"], z[\"name_servers\"][0], )) for ns in z[\"name_servers\"][1:]: print(\" {0: <16} {1}\".format(\"\",", "parser_zones.add_argument( \"-z\", \"--zone-name\", help=\"optional zone name\", ) parser_dns = subparsers.add_parser(\"dns\") parser_dns.add_argument( \"-z\", \"--zone-name\",", "parser_dns.add_argument( \"-z\", \"--zone-name\", required=True, help=\"required zone name\", ) parser_dns.add_argument( \"--pretty\", action='store_true', help=\"show user", "== total_pages: break page += 1 return records # https://api.cloudflare.com/#zone-list-zones class CF_Zones(object): \"\"\"", "in COMMANDS], help=\"command\", ) subparsers = parser.add_subparsers( help='Object of command', dest=\"object\" ) parser_zones", "failed' % (e, e)) if not self._ctx.pretty: print(json.dumps(zones, indent=4)) return for z in", "import argparse import json import CloudFlare # https://api.cloudflare.com/#dns-records-for-a-zone-list-dns-records class CF_DNS_Records(object): \"\"\" commands for", "try: records = self._all_records() except CloudFlare.exceptions.CloudFlareAPIError as e: exit('/zones %d %s - api", "print(\"Name: {}\".format(rec[\"name\"])) print(\"Content: {}\".format(rec[\"content\"])) print(\"TTL: {}{}\".format( rec[\"ttl\"], \" (auto)\" if str(rec[\"ttl\"]) == \"1\"", "zones\") def show(self): \"\"\"Show CF zones\"\"\" # print(\"Show cf zones\") try: zones =", "= subparsers.add_parser(\"dns\") parser_dns.add_argument( \"-z\", \"--zone-name\", required=True, help=\"required zone name\", ) parser_dns.add_argument( \"--pretty\", action='store_true',", "ctx): self._ctx = ctx def run(self): \"\"\" run - entry point for zones", "in sorted(list(types)): for rec in records_by_type[t]: # print(json.dumps(rec, indent=4)) print(\"Type: {}\".format(rec[\"type\"])) print(\"Name: {}\".format(rec[\"name\"]))", "from __future__ import print_function import sys import argparse import json import CloudFlare #", "run - entry point for DNS records manipulations \"\"\" cmd = self._ctx.command if", "print(\"Show DSN records\") try: records = self._all_records() except CloudFlare.exceptions.CloudFlareAPIError as e: exit('/zones %d", "(auto)\" if str(rec[\"ttl\"]) == \"1\" else \"\", )) print(\"Proxied: {}\".format(rec[\"proxied\"])) print(\"Auto: {}\".format(rec[\"meta\"][\"auto_added\"])) print(\"\")", "str(rec[\"ttl\"]) == \"1\" else \"\", )) print(\"Proxied: {}\".format(rec[\"proxied\"])) print(\"Auto: {}\".format(rec[\"meta\"][\"auto_added\"])) print(\"\") print(\"\") print(\"-------------------\")", "not self._ctx.pretty: print(json.dumps(zones, indent=4)) return for z in zones: print(\"Zone: {0: <16} NS:", "CF zones\"\"\" # print(\"Show DSN records\") try: records = self._all_records() except CloudFlare.exceptions.CloudFlareAPIError as", "help=\"show user friendly output\", ) parser_zones.add_argument( \"-z\", \"--zone-name\", help=\"optional zone name\", ) parser_dns", "= raw_results['result_info']['total_pages'] zones = raw_results['result'] for z in zones: domains.append(z) if page ==", "self._ctx.command if cmd == \"show\": self.show() else: sys.exit(\"Command \" + cmd + \"", "= ctx def run(self): \"\"\" run - entry point for zones manipulations \"\"\"", "for x in COMMANDS], help=\"command\", ) subparsers = parser.add_subparsers( help='Object of command', dest=\"object\"", "name\", ) parser_dns.add_argument( \"--pretty\", action='store_true', help=\"show user friendly output\", ) args = parser.parse_args()", "z[\"name_servers\"][0], )) for ns in z[\"name_servers\"][1:]: print(\" {0: <16} {1}\".format(\"\", ns)) def _all_zones(self):", "+= 1 records_by_type[rec[\"type\"]].append(rec) for t in sorted(list(types)): for rec in records_by_type[t]: # print(json.dumps(rec,", "found') zone_id = zones[0]['id'] cf_raw = CloudFlare.CloudFlare(raw=True) page = 1 records = []", "\"command\", choices=[x for x in COMMANDS], help=\"command\", ) subparsers = parser.add_subparsers( help='Object of", "raw_results = cf.zones.get(params={'per_page':5, 'page':page}) total_pages = raw_results['result_info']['total_pages'] zones = raw_results['result'] for z in", "domains.append(z) if page == total_pages: break page += 1 return domains COMMANDS =", "print(\"-------------------\") print(\"{0: <11} {1: >4}\".format(\"Total:\", len(records))) def _all_records(self): cf = CloudFlare.CloudFlare() zones =", "x in COMMANDS], help=\"command\", ) subparsers = parser.add_subparsers( help='Object of command', dest=\"object\" )", "for t in sorted(list(types)): print(\"{0: <11} {1: >4}\".format(t, types[t])) print(\"-------------------\") print(\"{0: <11} {1:", "page == total_pages: break page += 1 return records # https://api.cloudflare.com/#zone-list-zones class CF_Zones(object):", "types = {} for rec in records: if not records_by_type.get(rec[\"type\"]): types[rec[\"type\"]] = 0", "% (e, e)) if not self._ctx.pretty: print(json.dumps(zones, indent=4)) return for z in zones:", "1 return domains COMMANDS = [ \"show\", # \"restore\" ] OBJECT_ENTRYPOINT = {", "t in sorted(list(types)): print(\"{0: <11} {1: >4}\".format(t, types[t])) print(\"-------------------\") print(\"{0: <11} {1: >4}\".format(\"Total:\",", "try: zones = self._all_zones() except CloudFlare.exceptions.CloudFlareAPIError as e: exit('/zones %d %s - api", "tool.', ) parser.add_argument( \"command\", choices=[x for x in COMMANDS], help=\"command\", ) subparsers =", "manipulation \"\"\" def __init__(self, ctx): self._ctx = ctx def run(self): \"\"\" run -", "types[t])) print(\"-------------------\") print(\"{0: <11} {1: >4}\".format(\"Total:\", len(records))) def _all_records(self): cf = CloudFlare.CloudFlare() zones", "\"show\", # \"restore\" ] OBJECT_ENTRYPOINT = { \"zones\": CF_Zones, \"dns\": CF_DNS_Records, } def", "= CloudFlare.CloudFlare() zones = cf.zones.get(params={'name': self._ctx.zone_name, 'per_page': 1}) if len(zones) == 0: exit('No", "total_pages: break page += 1 return records # https://api.cloudflare.com/#zone-list-zones class CF_Zones(object): \"\"\" commands", "return records # https://api.cloudflare.com/#zone-list-zones class CF_Zones(object): \"\"\" commands for zones manipulation \"\"\" def", "len(records))) def _all_records(self): cf = CloudFlare.CloudFlare() zones = cf.zones.get(params={'name': self._ctx.zone_name, 'per_page': 1}) if", "{} for rec in records: if not records_by_type.get(rec[\"type\"]): types[rec[\"type\"]] = 0 records_by_type[rec[\"type\"]] =", "[] while True: raw_results = cf_raw.zones.dns_records.get( zone_id, params={'per_page':100, 'page':page}, ) total_pages = raw_results['result_info']['total_pages']", "print(\"Zone: {0: <16} NS: {1}\".format( z[\"name\"], z[\"name_servers\"][0], )) for ns in z[\"name_servers\"][1:]: print(\"", "call failed' % (e, e)) if not self._ctx.pretty: print(json.dumps(records, indent=4)) return records_by_type =", "= cf_raw.zones.dns_records.get( zone_id, params={'per_page':100, 'page':page}, ) total_pages = raw_results['result_info']['total_pages'] result = raw_results['result'] for", "records\") try: records = self._all_records() except CloudFlare.exceptions.CloudFlareAPIError as e: exit('/zones %d %s -", "= [ \"show\", # \"restore\" ] OBJECT_ENTRYPOINT = { \"zones\": CF_Zones, \"dns\": CF_DNS_Records,", "- api call failed' % (e, e)) if not self._ctx.pretty: print(json.dumps(zones, indent=4)) return", "output\", ) parser_zones.add_argument( \"-z\", \"--zone-name\", help=\"optional zone name\", ) parser_dns = subparsers.add_parser(\"dns\") parser_dns.add_argument(", "records: if not records_by_type.get(rec[\"type\"]): types[rec[\"type\"]] = 0 records_by_type[rec[\"type\"]] = [] types[rec[\"type\"]] += 1", "show(self): \"\"\"Show CF zones\"\"\" # print(\"Show cf zones\") try: zones = self._all_zones() except", "raw_results['result_info']['total_pages'] result = raw_results['result'] for rec in result: records.append(rec) if page == total_pages:", "entry\"\"\" parser = argparse.ArgumentParser( prog=\"cfbackup\", description='Simple Cloudflare backup tool.', ) parser.add_argument( \"command\", choices=[x", "OBJECT_ENTRYPOINT = { \"zones\": CF_Zones, \"dns\": CF_DNS_Records, } def main(): \"\"\"Main entry\"\"\" parser", "= self._ctx.command if cmd == \"show\": self.show() else: sys.exit(\"Command \" + cmd +", "z in zones: domains.append(z) if page == total_pages: break page += 1 return", "cf = CloudFlare.CloudFlare(raw=True) if self._ctx.zone_name: raw_results = cf.zones.get(params={ 'name': self._ctx.zone_name, 'per_page': 1, 'page':", "for z in zones: domains.append(z) if page == total_pages: break page += 1", "def __init__(self, ctx): self._ctx = ctx def run(self): \"\"\" run - entry point", "ctx def run(self): \"\"\" run - entry point for DNS records manipulations \"\"\"", "parser.add_argument( \"command\", choices=[x for x in COMMANDS], help=\"command\", ) subparsers = parser.add_subparsers( help='Object", "return domains COMMANDS = [ \"show\", # \"restore\" ] OBJECT_ENTRYPOINT = { \"zones\":", "for zones\") def show(self): \"\"\"Show CF zones\"\"\" # print(\"Show cf zones\") try: zones", "%s - api call failed' % (e, e)) if not self._ctx.pretty: print(json.dumps(zones, indent=4))", "print(\"\") print(\"-------------------\") print(\"Records stat:\") print(\"-------------------\") print(\"{0: <11} {1: >4}\".format(\"<type>\", \"<count>\")) for t in", "types[rec[\"type\"]] += 1 records_by_type[rec[\"type\"]].append(rec) for t in sorted(list(types)): for rec in records_by_type[t]: #", "COMMANDS], help=\"command\", ) subparsers = parser.add_subparsers( help='Object of command', dest=\"object\" ) parser_zones =", "CloudFlare.exceptions.CloudFlareAPIError as e: exit('/zones %d %s - api call failed' % (e, e))", "if str(rec[\"ttl\"]) == \"1\" else \"\", )) print(\"Proxied: {}\".format(rec[\"proxied\"])) print(\"Auto: {}\".format(rec[\"meta\"][\"auto_added\"])) print(\"\") print(\"\")", "print(\"Auto: {}\".format(rec[\"meta\"][\"auto_added\"])) print(\"\") print(\"\") print(\"-------------------\") print(\"Records stat:\") print(\"-------------------\") print(\"{0: <11} {1: >4}\".format(\"<type>\", \"<count>\"))", "self._ctx.zone_name: raw_results = cf.zones.get(params={ 'name': self._ctx.zone_name, 'per_page': 1, 'page': 1, }) return raw_results['result']", "user friendly output\", ) parser_zones.add_argument( \"-z\", \"--zone-name\", help=\"optional zone name\", ) parser_dns =", "in zones: print(\"Zone: {0: <16} NS: {1}\".format( z[\"name\"], z[\"name_servers\"][0], )) for ns in", "action='store_true', help=\"show user friendly output\", ) parser_zones.add_argument( \"-z\", \"--zone-name\", help=\"optional zone name\", )", "= CloudFlare.CloudFlare(raw=True) page = 1 records = [] while True: raw_results = cf_raw.zones.dns_records.get(", ") parser_zones.add_argument( \"-z\", \"--zone-name\", help=\"optional zone name\", ) parser_dns = subparsers.add_parser(\"dns\") parser_dns.add_argument( \"-z\",", "print(\"{0: <11} {1: >4}\".format(\"<type>\", \"<count>\")) for t in sorted(list(types)): print(\"{0: <11} {1: >4}\".format(t,", "1 domains = [] while True: raw_results = cf.zones.get(params={'per_page':5, 'page':page}) total_pages = raw_results['result_info']['total_pages']", "module provides the main functionality of cfbackup \"\"\" from __future__ import print_function import", "except CloudFlare.exceptions.CloudFlareAPIError as e: exit('/zones %d %s - api call failed' % (e,", "+= 1 return domains COMMANDS = [ \"show\", # \"restore\" ] OBJECT_ENTRYPOINT =", "else: sys.exit(\"Command \" + cmd + \" not implemened for zones\") def show(self):", "rec in records_by_type[t]: # print(json.dumps(rec, indent=4)) print(\"Type: {}\".format(rec[\"type\"])) print(\"Name: {}\".format(rec[\"name\"])) print(\"Content: {}\".format(rec[\"content\"])) print(\"TTL:", "functionality of cfbackup \"\"\" from __future__ import print_function import sys import argparse import", ">4}\".format(\"Total:\", len(records))) def _all_records(self): cf = CloudFlare.CloudFlare() zones = cf.zones.get(params={'name': self._ctx.zone_name, 'per_page': 1})", "zones found') zone_id = zones[0]['id'] cf_raw = CloudFlare.CloudFlare(raw=True) page = 1 records =", "import json import CloudFlare # https://api.cloudflare.com/#dns-records-for-a-zone-list-dns-records class CF_DNS_Records(object): \"\"\" commands for zones manipulation", "zones\") try: zones = self._all_zones() except CloudFlare.exceptions.CloudFlareAPIError as e: exit('/zones %d %s -", "== \"1\" else \"\", )) print(\"Proxied: {}\".format(rec[\"proxied\"])) print(\"Auto: {}\".format(rec[\"meta\"][\"auto_added\"])) print(\"\") print(\"\") print(\"-------------------\") print(\"Records", "\"\"\"This module provides the main functionality of cfbackup \"\"\" from __future__ import print_function", "raw_results['result'] page = 1 domains = [] while True: raw_results = cf.zones.get(params={'per_page':5, 'page':page})", "\"restore\" ] OBJECT_ENTRYPOINT = { \"zones\": CF_Zones, \"dns\": CF_DNS_Records, } def main(): \"\"\"Main", "{1}\".format( z[\"name\"], z[\"name_servers\"][0], )) for ns in z[\"name_servers\"][1:]: print(\" {0: <16} {1}\".format(\"\", ns))", "zones manipulation \"\"\" def __init__(self, ctx): self._ctx = ctx def run(self): \"\"\" run", "(e, e)) if not self._ctx.pretty: print(json.dumps(records, indent=4)) return records_by_type = {} types =", "t in sorted(list(types)): for rec in records_by_type[t]: # print(json.dumps(rec, indent=4)) print(\"Type: {}\".format(rec[\"type\"])) print(\"Name:", "result: records.append(rec) if page == total_pages: break page += 1 return records #", "cf_raw = CloudFlare.CloudFlare(raw=True) page = 1 records = [] while True: raw_results =", ") parser_zones = subparsers.add_parser(\"zones\") parser_zones.add_argument( \"--pretty\", action='store_true', help=\"show user friendly output\", ) parser_zones.add_argument(", "total_pages: break page += 1 return domains COMMANDS = [ \"show\", # \"restore\"", "records = self._all_records() except CloudFlare.exceptions.CloudFlareAPIError as e: exit('/zones %d %s - api call", "provides the main functionality of cfbackup \"\"\" from __future__ import print_function import sys", "CF_Zones, \"dns\": CF_DNS_Records, } def main(): \"\"\"Main entry\"\"\" parser = argparse.ArgumentParser( prog=\"cfbackup\", description='Simple", "page = 1 records = [] while True: raw_results = cf_raw.zones.dns_records.get( zone_id, params={'per_page':100,", "of command', dest=\"object\" ) parser_zones = subparsers.add_parser(\"zones\") parser_zones.add_argument( \"--pretty\", action='store_true', help=\"show user friendly", "\"-z\", \"--zone-name\", help=\"optional zone name\", ) parser_dns = subparsers.add_parser(\"dns\") parser_dns.add_argument( \"-z\", \"--zone-name\", required=True,", "ns)) def _all_zones(self): cf = CloudFlare.CloudFlare(raw=True) if self._ctx.zone_name: raw_results = cf.zones.get(params={ 'name': self._ctx.zone_name,", ") parser.add_argument( \"command\", choices=[x for x in COMMANDS], help=\"command\", ) subparsers = parser.add_subparsers(", "point for DNS records manipulations \"\"\" cmd = self._ctx.command if cmd == \"show\":", "= 0 records_by_type[rec[\"type\"]] = [] types[rec[\"type\"]] += 1 records_by_type[rec[\"type\"]].append(rec) for t in sorted(list(types)):", "= raw_results['result_info']['total_pages'] result = raw_results['result'] for rec in result: records.append(rec) if page ==", "page == total_pages: break page += 1 return domains COMMANDS = [ \"show\",", "cf = CloudFlare.CloudFlare() zones = cf.zones.get(params={'name': self._ctx.zone_name, 'per_page': 1}) if len(zones) == 0:", "zone name\", ) parser_dns = subparsers.add_parser(\"dns\") parser_dns.add_argument( \"-z\", \"--zone-name\", required=True, help=\"required zone name\",", "] OBJECT_ENTRYPOINT = { \"zones\": CF_Zones, \"dns\": CF_DNS_Records, } def main(): \"\"\"Main entry\"\"\"", "= { \"zones\": CF_Zones, \"dns\": CF_DNS_Records, } def main(): \"\"\"Main entry\"\"\" parser =", "cf_raw.zones.dns_records.get( zone_id, params={'per_page':100, 'page':page}, ) total_pages = raw_results['result_info']['total_pages'] result = raw_results['result'] for rec", "in records: if not records_by_type.get(rec[\"type\"]): types[rec[\"type\"]] = 0 records_by_type[rec[\"type\"]] = [] types[rec[\"type\"]] +=", "e: exit('/zones %d %s - api call failed' % (e, e)) if not", "print(json.dumps(rec, indent=4)) print(\"Type: {}\".format(rec[\"type\"])) print(\"Name: {}\".format(rec[\"name\"])) print(\"Content: {}\".format(rec[\"content\"])) print(\"TTL: {}{}\".format( rec[\"ttl\"], \" (auto)\"", "%d %s - api call failed' % (e, e)) if not self._ctx.pretty: print(json.dumps(records,", "records_by_type[rec[\"type\"]] = [] types[rec[\"type\"]] += 1 records_by_type[rec[\"type\"]].append(rec) for t in sorted(list(types)): for rec", "else \"\", )) print(\"Proxied: {}\".format(rec[\"proxied\"])) print(\"Auto: {}\".format(rec[\"meta\"][\"auto_added\"])) print(\"\") print(\"\") print(\"-------------------\") print(\"Records stat:\") print(\"-------------------\")", "\" not implemened for zones\") def show(self): \"\"\"Show CF zones\"\"\" # print(\"Show DSN", "\"\"\" def __init__(self, ctx): self._ctx = ctx def run(self): \"\"\" run - entry", "<16} NS: {1}\".format( z[\"name\"], z[\"name_servers\"][0], )) for ns in z[\"name_servers\"][1:]: print(\" {0: <16}", "print(\"TTL: {}{}\".format( rec[\"ttl\"], \" (auto)\" if str(rec[\"ttl\"]) == \"1\" else \"\", )) print(\"Proxied:", "break page += 1 return domains COMMANDS = [ \"show\", # \"restore\" ]", "COMMANDS = [ \"show\", # \"restore\" ] OBJECT_ENTRYPOINT = { \"zones\": CF_Zones, \"dns\":", "dest=\"object\" ) parser_zones = subparsers.add_parser(\"zones\") parser_zones.add_argument( \"--pretty\", action='store_true', help=\"show user friendly output\", )", "{}\".format(rec[\"proxied\"])) print(\"Auto: {}\".format(rec[\"meta\"][\"auto_added\"])) print(\"\") print(\"\") print(\"-------------------\") print(\"Records stat:\") print(\"-------------------\") print(\"{0: <11} {1: >4}\".format(\"<type>\",", "command', dest=\"object\" ) parser_zones = subparsers.add_parser(\"zones\") parser_zones.add_argument( \"--pretty\", action='store_true', help=\"show user friendly output\",", "= CloudFlare.CloudFlare(raw=True) if self._ctx.zone_name: raw_results = cf.zones.get(params={ 'name': self._ctx.zone_name, 'per_page': 1, 'page': 1,", "prog=\"cfbackup\", description='Simple Cloudflare backup tool.', ) parser.add_argument( \"command\", choices=[x for x in COMMANDS],", "run(self): \"\"\" run - entry point for zones manipulations \"\"\" cmd = self._ctx.command", "print(\"Type: {}\".format(rec[\"type\"])) print(\"Name: {}\".format(rec[\"name\"])) print(\"Content: {}\".format(rec[\"content\"])) print(\"TTL: {}{}\".format( rec[\"ttl\"], \" (auto)\" if str(rec[\"ttl\"])", "not implemened for zones\") def show(self): \"\"\"Show CF zones\"\"\" # print(\"Show DSN records\")", "rec in records: if not records_by_type.get(rec[\"type\"]): types[rec[\"type\"]] = 0 records_by_type[rec[\"type\"]] = [] types[rec[\"type\"]]", "<11} {1: >4}\".format(\"Total:\", len(records))) def _all_records(self): cf = CloudFlare.CloudFlare() zones = cf.zones.get(params={'name': self._ctx.zone_name,", "subparsers.add_parser(\"zones\") parser_zones.add_argument( \"--pretty\", action='store_true', help=\"show user friendly output\", ) parser_zones.add_argument( \"-z\", \"--zone-name\", help=\"optional", "\" (auto)\" if str(rec[\"ttl\"]) == \"1\" else \"\", )) print(\"Proxied: {}\".format(rec[\"proxied\"])) print(\"Auto: {}\".format(rec[\"meta\"][\"auto_added\"]))", "cf.zones.get(params={ 'name': self._ctx.zone_name, 'per_page': 1, 'page': 1, }) return raw_results['result'] page = 1", "zone name\", ) parser_dns.add_argument( \"--pretty\", action='store_true', help=\"show user friendly output\", ) args =", "name\", ) parser_dns = subparsers.add_parser(\"dns\") parser_dns.add_argument( \"-z\", \"--zone-name\", required=True, help=\"required zone name\", )", "show(self): \"\"\"Show CF zones\"\"\" # print(\"Show DSN records\") try: records = self._all_records() except", "argparse import json import CloudFlare # https://api.cloudflare.com/#dns-records-for-a-zone-list-dns-records class CF_DNS_Records(object): \"\"\" commands for zones", "while True: raw_results = cf_raw.zones.dns_records.get( zone_id, params={'per_page':100, 'page':page}, ) total_pages = raw_results['result_info']['total_pages'] result", "+ cmd + \" not implemened for zones\") def show(self): \"\"\"Show CF zones\"\"\"", "parser_zones = subparsers.add_parser(\"zones\") parser_zones.add_argument( \"--pretty\", action='store_true', help=\"show user friendly output\", ) parser_zones.add_argument( \"-z\",", "{}\".format(rec[\"name\"])) print(\"Content: {}\".format(rec[\"content\"])) print(\"TTL: {}{}\".format( rec[\"ttl\"], \" (auto)\" if str(rec[\"ttl\"]) == \"1\" else", "def _all_zones(self): cf = CloudFlare.CloudFlare(raw=True) if self._ctx.zone_name: raw_results = cf.zones.get(params={ 'name': self._ctx.zone_name, 'per_page':", "- entry point for zones manipulations \"\"\" cmd = self._ctx.command if cmd ==", "records_by_type = {} types = {} for rec in records: if not records_by_type.get(rec[\"type\"]):", "'page': 1, }) return raw_results['result'] page = 1 domains = [] while True:", "\"\"\" commands for zones manipulation \"\"\" def __init__(self, ctx): self._ctx = ctx def", "zones = raw_results['result'] for z in zones: domains.append(z) if page == total_pages: break", "for rec in result: records.append(rec) if page == total_pages: break page += 1", "class CF_Zones(object): \"\"\" commands for zones manipulation \"\"\" def __init__(self, ctx): self._ctx =", "\"\"\"Show CF zones\"\"\" # print(\"Show cf zones\") try: zones = self._all_zones() except CloudFlare.exceptions.CloudFlareAPIError", "in zones: domains.append(z) if page == total_pages: break page += 1 return domains", "= self._all_zones() except CloudFlare.exceptions.CloudFlareAPIError as e: exit('/zones %d %s - api call failed'", "import CloudFlare # https://api.cloudflare.com/#dns-records-for-a-zone-list-dns-records class CF_DNS_Records(object): \"\"\" commands for zones manipulation \"\"\" def", "return for z in zones: print(\"Zone: {0: <16} NS: {1}\".format( z[\"name\"], z[\"name_servers\"][0], ))", "{1}\".format(\"\", ns)) def _all_zones(self): cf = CloudFlare.CloudFlare(raw=True) if self._ctx.zone_name: raw_results = cf.zones.get(params={ 'name':", "print(\"-------------------\") print(\"Records stat:\") print(\"-------------------\") print(\"{0: <11} {1: >4}\".format(\"<type>\", \"<count>\")) for t in sorted(list(types)):", "print(json.dumps(records, indent=4)) return records_by_type = {} types = {} for rec in records:", "{0: <16} NS: {1}\".format( z[\"name\"], z[\"name_servers\"][0], )) for ns in z[\"name_servers\"][1:]: print(\" {0:", "required=True, help=\"required zone name\", ) parser_dns.add_argument( \"--pretty\", action='store_true', help=\"show user friendly output\", )", "failed' % (e, e)) if not self._ctx.pretty: print(json.dumps(records, indent=4)) return records_by_type = {}", "[] types[rec[\"type\"]] += 1 records_by_type[rec[\"type\"]].append(rec) for t in sorted(list(types)): for rec in records_by_type[t]:", "raw_results = cf.zones.get(params={ 'name': self._ctx.zone_name, 'per_page': 1, 'page': 1, }) return raw_results['result'] page", "%d %s - api call failed' % (e, e)) if not self._ctx.pretty: print(json.dumps(zones,", "= cf.zones.get(params={ 'name': self._ctx.zone_name, 'per_page': 1, 'page': 1, }) return raw_results['result'] page =", "{1: >4}\".format(\"<type>\", \"<count>\")) for t in sorted(list(types)): print(\"{0: <11} {1: >4}\".format(t, types[t])) print(\"-------------------\")", "= argparse.ArgumentParser( prog=\"cfbackup\", description='Simple Cloudflare backup tool.', ) parser.add_argument( \"command\", choices=[x for x", "}) return raw_results['result'] page = 1 domains = [] while True: raw_results =", "# print(json.dumps(rec, indent=4)) print(\"Type: {}\".format(rec[\"type\"])) print(\"Name: {}\".format(rec[\"name\"])) print(\"Content: {}\".format(rec[\"content\"])) print(\"TTL: {}{}\".format( rec[\"ttl\"], \"", "exit('No zones found') zone_id = zones[0]['id'] cf_raw = CloudFlare.CloudFlare(raw=True) page = 1 records", "for ns in z[\"name_servers\"][1:]: print(\" {0: <16} {1}\".format(\"\", ns)) def _all_zones(self): cf =", "import print_function import sys import argparse import json import CloudFlare # https://api.cloudflare.com/#dns-records-for-a-zone-list-dns-records class", "indent=4)) print(\"Type: {}\".format(rec[\"type\"])) print(\"Name: {}\".format(rec[\"name\"])) print(\"Content: {}\".format(rec[\"content\"])) print(\"TTL: {}{}\".format( rec[\"ttl\"], \" (auto)\" if", "\"\"\" run - entry point for DNS records manipulations \"\"\" cmd = self._ctx.command", "in sorted(list(types)): print(\"{0: <11} {1: >4}\".format(t, types[t])) print(\"-------------------\") print(\"{0: <11} {1: >4}\".format(\"Total:\", len(records)))", "CF_Zones(object): \"\"\" commands for zones manipulation \"\"\" def __init__(self, ctx): self._ctx = ctx", "zones\"\"\" # print(\"Show cf zones\") try: zones = self._all_zones() except CloudFlare.exceptions.CloudFlareAPIError as e:", "{}\".format(rec[\"content\"])) print(\"TTL: {}{}\".format( rec[\"ttl\"], \" (auto)\" if str(rec[\"ttl\"]) == \"1\" else \"\", ))", "rec[\"ttl\"], \" (auto)\" if str(rec[\"ttl\"]) == \"1\" else \"\", )) print(\"Proxied: {}\".format(rec[\"proxied\"])) print(\"Auto:", "\" not implemened for zones\") def show(self): \"\"\"Show CF zones\"\"\" # print(\"Show cf", "main(): \"\"\"Main entry\"\"\" parser = argparse.ArgumentParser( prog=\"cfbackup\", description='Simple Cloudflare backup tool.', ) parser.add_argument(", "help='Object of command', dest=\"object\" ) parser_zones = subparsers.add_parser(\"zones\") parser_zones.add_argument( \"--pretty\", action='store_true', help=\"show user", "if not records_by_type.get(rec[\"type\"]): types[rec[\"type\"]] = 0 records_by_type[rec[\"type\"]] = [] types[rec[\"type\"]] += 1 records_by_type[rec[\"type\"]].append(rec)", "\" + cmd + \" not implemened for zones\") def show(self): \"\"\"Show CF", "ctx): self._ctx = ctx def run(self): \"\"\" run - entry point for DNS", "run(self): \"\"\" run - entry point for DNS records manipulations \"\"\" cmd =", "not implemened for zones\") def show(self): \"\"\"Show CF zones\"\"\" # print(\"Show cf zones\")", "= [] while True: raw_results = cf.zones.get(params={'per_page':5, 'page':page}) total_pages = raw_results['result_info']['total_pages'] zones =", "== \"show\": self.show() else: sys.exit(\"Command \" + cmd + \" not implemened for", "print(\" {0: <16} {1}\".format(\"\", ns)) def _all_zones(self): cf = CloudFlare.CloudFlare(raw=True) if self._ctx.zone_name: raw_results", "_all_records(self): cf = CloudFlare.CloudFlare() zones = cf.zones.get(params={'name': self._ctx.zone_name, 'per_page': 1}) if len(zones) ==", "\"--zone-name\", help=\"optional zone name\", ) parser_dns = subparsers.add_parser(\"dns\") parser_dns.add_argument( \"-z\", \"--zone-name\", required=True, help=\"required", "rec in result: records.append(rec) if page == total_pages: break page += 1 return", "CF_DNS_Records, } def main(): \"\"\"Main entry\"\"\" parser = argparse.ArgumentParser( prog=\"cfbackup\", description='Simple Cloudflare backup", ") parser_dns = subparsers.add_parser(\"dns\") parser_dns.add_argument( \"-z\", \"--zone-name\", required=True, help=\"required zone name\", ) parser_dns.add_argument(", "for zones manipulations \"\"\" cmd = self._ctx.command if cmd == \"show\": self.show() else:", "backup tool.', ) parser.add_argument( \"command\", choices=[x for x in COMMANDS], help=\"command\", ) subparsers", "Cloudflare backup tool.', ) parser.add_argument( \"command\", choices=[x for x in COMMANDS], help=\"command\", )", ") subparsers = parser.add_subparsers( help='Object of command', dest=\"object\" ) parser_zones = subparsers.add_parser(\"zones\") parser_zones.add_argument(", "CF_DNS_Records(object): \"\"\" commands for zones manipulation \"\"\" def __init__(self, ctx): self._ctx = ctx", "\"--pretty\", action='store_true', help=\"show user friendly output\", ) parser_zones.add_argument( \"-z\", \"--zone-name\", help=\"optional zone name\",", ")) for ns in z[\"name_servers\"][1:]: print(\" {0: <16} {1}\".format(\"\", ns)) def _all_zones(self): cf", "sorted(list(types)): print(\"{0: <11} {1: >4}\".format(t, types[t])) print(\"-------------------\") print(\"{0: <11} {1: >4}\".format(\"Total:\", len(records))) def", "\"zones\": CF_Zones, \"dns\": CF_DNS_Records, } def main(): \"\"\"Main entry\"\"\" parser = argparse.ArgumentParser( prog=\"cfbackup\",", "for t in sorted(list(types)): for rec in records_by_type[t]: # print(json.dumps(rec, indent=4)) print(\"Type: {}\".format(rec[\"type\"]))", "api call failed' % (e, e)) if not self._ctx.pretty: print(json.dumps(zones, indent=4)) return for", "zones\") def show(self): \"\"\"Show CF zones\"\"\" # print(\"Show DSN records\") try: records =", "\"show\": self.show() else: sys.exit(\"Command \" + cmd + \" not implemened for zones\")", "records_by_type[rec[\"type\"]].append(rec) for t in sorted(list(types)): for rec in records_by_type[t]: # print(json.dumps(rec, indent=4)) print(\"Type:", "cmd = self._ctx.command if cmd == \"show\": self.show() else: sys.exit(\"Command \" + cmd", "return records_by_type = {} types = {} for rec in records: if not", "self._all_zones() except CloudFlare.exceptions.CloudFlareAPIError as e: exit('/zones %d %s - api call failed' %", "print_function import sys import argparse import json import CloudFlare # https://api.cloudflare.com/#dns-records-for-a-zone-list-dns-records class CF_DNS_Records(object):", "'per_page': 1}) if len(zones) == 0: exit('No zones found') zone_id = zones[0]['id'] cf_raw", "= cf.zones.get(params={'per_page':5, 'page':page}) total_pages = raw_results['result_info']['total_pages'] zones = raw_results['result'] for z in zones:", "page += 1 return records # https://api.cloudflare.com/#zone-list-zones class CF_Zones(object): \"\"\" commands for zones", "implemened for zones\") def show(self): \"\"\"Show CF zones\"\"\" # print(\"Show DSN records\") try:", "print(\"\") print(\"\") print(\"-------------------\") print(\"Records stat:\") print(\"-------------------\") print(\"{0: <11} {1: >4}\".format(\"<type>\", \"<count>\")) for t", "the main functionality of cfbackup \"\"\" from __future__ import print_function import sys import", "\"\"\" cmd = self._ctx.command if cmd == \"show\": self.show() else: sys.exit(\"Command \" +", "def show(self): \"\"\"Show CF zones\"\"\" # print(\"Show DSN records\") try: records = self._all_records()", "\"\", )) print(\"Proxied: {}\".format(rec[\"proxied\"])) print(\"Auto: {}\".format(rec[\"meta\"][\"auto_added\"])) print(\"\") print(\"\") print(\"-------------------\") print(\"Records stat:\") print(\"-------------------\") print(\"{0:", "[ \"show\", # \"restore\" ] OBJECT_ENTRYPOINT = { \"zones\": CF_Zones, \"dns\": CF_DNS_Records, }", "subparsers.add_parser(\"dns\") parser_dns.add_argument( \"-z\", \"--zone-name\", required=True, help=\"required zone name\", ) parser_dns.add_argument( \"--pretty\", action='store_true', help=\"show", "z[\"name\"], z[\"name_servers\"][0], )) for ns in z[\"name_servers\"][1:]: print(\" {0: <16} {1}\".format(\"\", ns)) def", "{}{}\".format( rec[\"ttl\"], \" (auto)\" if str(rec[\"ttl\"]) == \"1\" else \"\", )) print(\"Proxied: {}\".format(rec[\"proxied\"]))", "\"-z\", \"--zone-name\", required=True, help=\"required zone name\", ) parser_dns.add_argument( \"--pretty\", action='store_true', help=\"show user friendly", "if page == total_pages: break page += 1 return domains COMMANDS = [", "indent=4)) return for z in zones: print(\"Zone: {0: <16} NS: {1}\".format( z[\"name\"], z[\"name_servers\"][0],", "1, }) return raw_results['result'] page = 1 domains = [] while True: raw_results", "for zones manipulation \"\"\" def __init__(self, ctx): self._ctx = ctx def run(self): \"\"\"", ") parser_dns.add_argument( \"--pretty\", action='store_true', help=\"show user friendly output\", ) args = parser.parse_args() OBJECT_ENTRYPOINT[args.object](args).run()", "for rec in records_by_type[t]: # print(json.dumps(rec, indent=4)) print(\"Type: {}\".format(rec[\"type\"])) print(\"Name: {}\".format(rec[\"name\"])) print(\"Content: {}\".format(rec[\"content\"]))", "zones\"\"\" # print(\"Show DSN records\") try: records = self._all_records() except CloudFlare.exceptions.CloudFlareAPIError as e:", "cf.zones.get(params={'per_page':5, 'page':page}) total_pages = raw_results['result_info']['total_pages'] zones = raw_results['result'] for z in zones: domains.append(z)", "== total_pages: break page += 1 return domains COMMANDS = [ \"show\", #", "point for zones manipulations \"\"\" cmd = self._ctx.command if cmd == \"show\": self.show()", "def run(self): \"\"\" run - entry point for DNS records manipulations \"\"\" cmd", ")) print(\"Proxied: {}\".format(rec[\"proxied\"])) print(\"Auto: {}\".format(rec[\"meta\"][\"auto_added\"])) print(\"\") print(\"\") print(\"-------------------\") print(\"Records stat:\") print(\"-------------------\") print(\"{0: <11}", "+= 1 return records # https://api.cloudflare.com/#zone-list-zones class CF_Zones(object): \"\"\" commands for zones manipulation", "if cmd == \"show\": self.show() else: sys.exit(\"Command \" + cmd + \" not", "self._ctx.pretty: print(json.dumps(zones, indent=4)) return for z in zones: print(\"Zone: {0: <16} NS: {1}\".format(", "def run(self): \"\"\" run - entry point for zones manipulations \"\"\" cmd =", "total_pages = raw_results['result_info']['total_pages'] result = raw_results['result'] for rec in result: records.append(rec) if page", "{} types = {} for rec in records: if not records_by_type.get(rec[\"type\"]): types[rec[\"type\"]] =", "<11} {1: >4}\".format(\"<type>\", \"<count>\")) for t in sorted(list(types)): print(\"{0: <11} {1: >4}\".format(t, types[t]))", "if len(zones) == 0: exit('No zones found') zone_id = zones[0]['id'] cf_raw = CloudFlare.CloudFlare(raw=True)", "commands for zones manipulation \"\"\" def __init__(self, ctx): self._ctx = ctx def run(self):", "print(\"{0: <11} {1: >4}\".format(t, types[t])) print(\"-------------------\") print(\"{0: <11} {1: >4}\".format(\"Total:\", len(records))) def _all_records(self):", "CloudFlare.CloudFlare(raw=True) page = 1 records = [] while True: raw_results = cf_raw.zones.dns_records.get( zone_id,", "types[rec[\"type\"]] = 0 records_by_type[rec[\"type\"]] = [] types[rec[\"type\"]] += 1 records_by_type[rec[\"type\"]].append(rec) for t in", "help=\"required zone name\", ) parser_dns.add_argument( \"--pretty\", action='store_true', help=\"show user friendly output\", ) args", "= ctx def run(self): \"\"\" run - entry point for DNS records manipulations", "\"dns\": CF_DNS_Records, } def main(): \"\"\"Main entry\"\"\" parser = argparse.ArgumentParser( prog=\"cfbackup\", description='Simple Cloudflare", "sorted(list(types)): for rec in records_by_type[t]: # print(json.dumps(rec, indent=4)) print(\"Type: {}\".format(rec[\"type\"])) print(\"Name: {}\".format(rec[\"name\"])) print(\"Content:", "print(json.dumps(zones, indent=4)) return for z in zones: print(\"Zone: {0: <16} NS: {1}\".format( z[\"name\"],", "print(\"-------------------\") print(\"{0: <11} {1: >4}\".format(\"<type>\", \"<count>\")) for t in sorted(list(types)): print(\"{0: <11} {1:", "{ \"zones\": CF_Zones, \"dns\": CF_DNS_Records, } def main(): \"\"\"Main entry\"\"\" parser = argparse.ArgumentParser(", "self._ctx = ctx def run(self): \"\"\" run - entry point for zones manipulations", "if not self._ctx.pretty: print(json.dumps(records, indent=4)) return records_by_type = {} types = {} for", "'per_page': 1, 'page': 1, }) return raw_results['result'] page = 1 domains = []", "cmd == \"show\": self.show() else: sys.exit(\"Command \" + cmd + \" not implemened", "entry point for zones manipulations \"\"\" cmd = self._ctx.command if cmd == \"show\":", ">4}\".format(t, types[t])) print(\"-------------------\") print(\"{0: <11} {1: >4}\".format(\"Total:\", len(records))) def _all_records(self): cf = CloudFlare.CloudFlare()", "zones manipulations \"\"\" cmd = self._ctx.command if cmd == \"show\": self.show() else: sys.exit(\"Command", "= 1 records = [] while True: raw_results = cf_raw.zones.dns_records.get( zone_id, params={'per_page':100, 'page':page},", "1 records_by_type[rec[\"type\"]].append(rec) for t in sorted(list(types)): for rec in records_by_type[t]: # print(json.dumps(rec, indent=4))", "def _all_records(self): cf = CloudFlare.CloudFlare() zones = cf.zones.get(params={'name': self._ctx.zone_name, 'per_page': 1}) if len(zones)", "parser_zones.add_argument( \"--pretty\", action='store_true', help=\"show user friendly output\", ) parser_zones.add_argument( \"-z\", \"--zone-name\", help=\"optional zone", "self._ctx.zone_name, 'per_page': 1, 'page': 1, }) return raw_results['result'] page = 1 domains =", "<16} {1}\".format(\"\", ns)) def _all_zones(self): cf = CloudFlare.CloudFlare(raw=True) if self._ctx.zone_name: raw_results = cf.zones.get(params={", "manipulations \"\"\" cmd = self._ctx.command if cmd == \"show\": self.show() else: sys.exit(\"Command \"", "'page':page}) total_pages = raw_results['result_info']['total_pages'] zones = raw_results['result'] for z in zones: domains.append(z) if", "for DNS records manipulations \"\"\" cmd = self._ctx.command if cmd == \"show\": self.show()", "\"\"\" from __future__ import print_function import sys import argparse import json import CloudFlare", "for rec in records: if not records_by_type.get(rec[\"type\"]): types[rec[\"type\"]] = 0 records_by_type[rec[\"type\"]] = []", "# https://api.cloudflare.com/#zone-list-zones class CF_Zones(object): \"\"\" commands for zones manipulation \"\"\" def __init__(self, ctx):", "main functionality of cfbackup \"\"\" from __future__ import print_function import sys import argparse", "cmd + \" not implemened for zones\") def show(self): \"\"\"Show CF zones\"\"\" #", "argparse.ArgumentParser( prog=\"cfbackup\", description='Simple Cloudflare backup tool.', ) parser.add_argument( \"command\", choices=[x for x in", "in records_by_type[t]: # print(json.dumps(rec, indent=4)) print(\"Type: {}\".format(rec[\"type\"])) print(\"Name: {}\".format(rec[\"name\"])) print(\"Content: {}\".format(rec[\"content\"])) print(\"TTL: {}{}\".format(", "self._ctx.zone_name, 'per_page': 1}) if len(zones) == 0: exit('No zones found') zone_id = zones[0]['id']", "z[\"name_servers\"][1:]: print(\" {0: <16} {1}\".format(\"\", ns)) def _all_zones(self): cf = CloudFlare.CloudFlare(raw=True) if self._ctx.zone_name:", "0 records_by_type[rec[\"type\"]] = [] types[rec[\"type\"]] += 1 records_by_type[rec[\"type\"]].append(rec) for t in sorted(list(types)): for", "zone_id = zones[0]['id'] cf_raw = CloudFlare.CloudFlare(raw=True) page = 1 records = [] while", "e)) if not self._ctx.pretty: print(json.dumps(records, indent=4)) return records_by_type = {} types = {}", "DSN records\") try: records = self._all_records() except CloudFlare.exceptions.CloudFlareAPIError as e: exit('/zones %d %s", "zones = cf.zones.get(params={'name': self._ctx.zone_name, 'per_page': 1}) if len(zones) == 0: exit('No zones found')", "0: exit('No zones found') zone_id = zones[0]['id'] cf_raw = CloudFlare.CloudFlare(raw=True) page = 1", "= {} for rec in records: if not records_by_type.get(rec[\"type\"]): types[rec[\"type\"]] = 0 records_by_type[rec[\"type\"]]", "{1: >4}\".format(\"Total:\", len(records))) def _all_records(self): cf = CloudFlare.CloudFlare() zones = cf.zones.get(params={'name': self._ctx.zone_name, 'per_page':", "e)) if not self._ctx.pretty: print(json.dumps(zones, indent=4)) return for z in zones: print(\"Zone: {0:", "__future__ import print_function import sys import argparse import json import CloudFlare # https://api.cloudflare.com/#dns-records-for-a-zone-list-dns-records", "{}\".format(rec[\"type\"])) print(\"Name: {}\".format(rec[\"name\"])) print(\"Content: {}\".format(rec[\"content\"])) print(\"TTL: {}{}\".format( rec[\"ttl\"], \" (auto)\" if str(rec[\"ttl\"]) ==", "= cf.zones.get(params={'name': self._ctx.zone_name, 'per_page': 1}) if len(zones) == 0: exit('No zones found') zone_id", "cf.zones.get(params={'name': self._ctx.zone_name, 'per_page': 1}) if len(zones) == 0: exit('No zones found') zone_id =", "entry point for DNS records manipulations \"\"\" cmd = self._ctx.command if cmd ==", "== 0: exit('No zones found') zone_id = zones[0]['id'] cf_raw = CloudFlare.CloudFlare(raw=True) page =", "choices=[x for x in COMMANDS], help=\"command\", ) subparsers = parser.add_subparsers( help='Object of command',", "__init__(self, ctx): self._ctx = ctx def run(self): \"\"\" run - entry point for", "zone_id, params={'per_page':100, 'page':page}, ) total_pages = raw_results['result_info']['total_pages'] result = raw_results['result'] for rec in", "while True: raw_results = cf.zones.get(params={'per_page':5, 'page':page}) total_pages = raw_results['result_info']['total_pages'] zones = raw_results['result'] for", "'name': self._ctx.zone_name, 'per_page': 1, 'page': 1, }) return raw_results['result'] page = 1 domains", "z in zones: print(\"Zone: {0: <16} NS: {1}\".format( z[\"name\"], z[\"name_servers\"][0], )) for ns", "1, 'page': 1, }) return raw_results['result'] page = 1 domains = [] while", "# https://api.cloudflare.com/#dns-records-for-a-zone-list-dns-records class CF_DNS_Records(object): \"\"\" commands for zones manipulation \"\"\" def __init__(self, ctx):", "raw_results['result'] for rec in result: records.append(rec) if page == total_pages: break page +=", "sys import argparse import json import CloudFlare # https://api.cloudflare.com/#dns-records-for-a-zone-list-dns-records class CF_DNS_Records(object): \"\"\" commands", "CloudFlare.CloudFlare(raw=True) if self._ctx.zone_name: raw_results = cf.zones.get(params={ 'name': self._ctx.zone_name, 'per_page': 1, 'page': 1, })", "- api call failed' % (e, e)) if not self._ctx.pretty: print(json.dumps(records, indent=4)) return", "page += 1 return domains COMMANDS = [ \"show\", # \"restore\" ] OBJECT_ENTRYPOINT", "CF zones\"\"\" # print(\"Show cf zones\") try: zones = self._all_zones() except CloudFlare.exceptions.CloudFlareAPIError as", "description='Simple Cloudflare backup tool.', ) parser.add_argument( \"command\", choices=[x for x in COMMANDS], help=\"command\",", "records = [] while True: raw_results = cf_raw.zones.dns_records.get( zone_id, params={'per_page':100, 'page':page}, ) total_pages", "# \"restore\" ] OBJECT_ENTRYPOINT = { \"zones\": CF_Zones, \"dns\": CF_DNS_Records, } def main():", "domains = [] while True: raw_results = cf.zones.get(params={'per_page':5, 'page':page}) total_pages = raw_results['result_info']['total_pages'] zones", "zones: print(\"Zone: {0: <16} NS: {1}\".format( z[\"name\"], z[\"name_servers\"][0], )) for ns in z[\"name_servers\"][1:]:", "stat:\") print(\"-------------------\") print(\"{0: <11} {1: >4}\".format(\"<type>\", \"<count>\")) for t in sorted(list(types)): print(\"{0: <11}", "raw_results['result'] for z in zones: domains.append(z) if page == total_pages: break page +=", "indent=4)) return records_by_type = {} types = {} for rec in records: if", "True: raw_results = cf.zones.get(params={'per_page':5, 'page':page}) total_pages = raw_results['result_info']['total_pages'] zones = raw_results['result'] for z", "{1: >4}\".format(t, types[t])) print(\"-------------------\") print(\"{0: <11} {1: >4}\".format(\"Total:\", len(records))) def _all_records(self): cf =", "_all_zones(self): cf = CloudFlare.CloudFlare(raw=True) if self._ctx.zone_name: raw_results = cf.zones.get(params={ 'name': self._ctx.zone_name, 'per_page': 1,", "'page':page}, ) total_pages = raw_results['result_info']['total_pages'] result = raw_results['result'] for rec in result: records.append(rec)", "1}) if len(zones) == 0: exit('No zones found') zone_id = zones[0]['id'] cf_raw =", "print(\"Show cf zones\") try: zones = self._all_zones() except CloudFlare.exceptions.CloudFlareAPIError as e: exit('/zones %d", "break page += 1 return records # https://api.cloudflare.com/#zone-list-zones class CF_Zones(object): \"\"\" commands for", "not self._ctx.pretty: print(json.dumps(records, indent=4)) return records_by_type = {} types = {} for rec", "print(\"Proxied: {}\".format(rec[\"proxied\"])) print(\"Auto: {}\".format(rec[\"meta\"][\"auto_added\"])) print(\"\") print(\"\") print(\"-------------------\") print(\"Records stat:\") print(\"-------------------\") print(\"{0: <11} {1:", "\"\"\"Main entry\"\"\" parser = argparse.ArgumentParser( prog=\"cfbackup\", description='Simple Cloudflare backup tool.', ) parser.add_argument( \"command\",", "help=\"optional zone name\", ) parser_dns = subparsers.add_parser(\"dns\") parser_dns.add_argument( \"-z\", \"--zone-name\", required=True, help=\"required zone", "page = 1 domains = [] while True: raw_results = cf.zones.get(params={'per_page':5, 'page':page}) total_pages", "if not self._ctx.pretty: print(json.dumps(zones, indent=4)) return for z in zones: print(\"Zone: {0: <16}", "\"\"\" run - entry point for zones manipulations \"\"\" cmd = self._ctx.command if", "not records_by_type.get(rec[\"type\"]): types[rec[\"type\"]] = 0 records_by_type[rec[\"type\"]] = [] types[rec[\"type\"]] += 1 records_by_type[rec[\"type\"]].append(rec) for", "api call failed' % (e, e)) if not self._ctx.pretty: print(json.dumps(records, indent=4)) return records_by_type", "+ \" not implemened for zones\") def show(self): \"\"\"Show CF zones\"\"\" # print(\"Show", "for zones\") def show(self): \"\"\"Show CF zones\"\"\" # print(\"Show DSN records\") try: records", "%s - api call failed' % (e, e)) if not self._ctx.pretty: print(json.dumps(records, indent=4))", "parser_dns = subparsers.add_parser(\"dns\") parser_dns.add_argument( \"-z\", \"--zone-name\", required=True, help=\"required zone name\", ) parser_dns.add_argument( \"--pretty\",", "DNS records manipulations \"\"\" cmd = self._ctx.command if cmd == \"show\": self.show() else:", "\"<count>\")) for t in sorted(list(types)): print(\"{0: <11} {1: >4}\".format(t, types[t])) print(\"-------------------\") print(\"{0: <11}", "import sys import argparse import json import CloudFlare # https://api.cloudflare.com/#dns-records-for-a-zone-list-dns-records class CF_DNS_Records(object): \"\"\"", "self._ctx = ctx def run(self): \"\"\" run - entry point for DNS records", "= {} types = {} for rec in records: if not records_by_type.get(rec[\"type\"]): types[rec[\"type\"]]", "= [] types[rec[\"type\"]] += 1 records_by_type[rec[\"type\"]].append(rec) for t in sorted(list(types)): for rec in", "= self._all_records() except CloudFlare.exceptions.CloudFlareAPIError as e: exit('/zones %d %s - api call failed'", "1 records = [] while True: raw_results = cf_raw.zones.dns_records.get( zone_id, params={'per_page':100, 'page':page}, )", "% (e, e)) if not self._ctx.pretty: print(json.dumps(records, indent=4)) return records_by_type = {} types", "of cfbackup \"\"\" from __future__ import print_function import sys import argparse import json", ">4}\".format(\"<type>\", \"<count>\")) for t in sorted(list(types)): print(\"{0: <11} {1: >4}\".format(t, types[t])) print(\"-------------------\") print(\"{0:", "len(zones) == 0: exit('No zones found') zone_id = zones[0]['id'] cf_raw = CloudFlare.CloudFlare(raw=True) page", "ns in z[\"name_servers\"][1:]: print(\" {0: <16} {1}\".format(\"\", ns)) def _all_zones(self): cf = CloudFlare.CloudFlare(raw=True)", "call failed' % (e, e)) if not self._ctx.pretty: print(json.dumps(zones, indent=4)) return for z", "= raw_results['result'] for rec in result: records.append(rec) if page == total_pages: break page", "cf zones\") try: zones = self._all_zones() except CloudFlare.exceptions.CloudFlareAPIError as e: exit('/zones %d %s", "1 return records # https://api.cloudflare.com/#zone-list-zones class CF_Zones(object): \"\"\" commands for zones manipulation \"\"\"", "parser.add_subparsers( help='Object of command', dest=\"object\" ) parser_zones = subparsers.add_parser(\"zones\") parser_zones.add_argument( \"--pretty\", action='store_true', help=\"show", "records # https://api.cloudflare.com/#zone-list-zones class CF_Zones(object): \"\"\" commands for zones manipulation \"\"\" def __init__(self,", "= [] while True: raw_results = cf_raw.zones.dns_records.get( zone_id, params={'per_page':100, 'page':page}, ) total_pages =", "- entry point for DNS records manipulations \"\"\" cmd = self._ctx.command if cmd", "= subparsers.add_parser(\"zones\") parser_zones.add_argument( \"--pretty\", action='store_true', help=\"show user friendly output\", ) parser_zones.add_argument( \"-z\", \"--zone-name\",", "zones[0]['id'] cf_raw = CloudFlare.CloudFlare(raw=True) page = 1 records = [] while True: raw_results", "in result: records.append(rec) if page == total_pages: break page += 1 return records", "records manipulations \"\"\" cmd = self._ctx.command if cmd == \"show\": self.show() else: sys.exit(\"Command", "domains COMMANDS = [ \"show\", # \"restore\" ] OBJECT_ENTRYPOINT = { \"zones\": CF_Zones,", "exit('/zones %d %s - api call failed' % (e, e)) if not self._ctx.pretty:", "# print(\"Show cf zones\") try: zones = self._all_zones() except CloudFlare.exceptions.CloudFlareAPIError as e: exit('/zones", "\"1\" else \"\", )) print(\"Proxied: {}\".format(rec[\"proxied\"])) print(\"Auto: {}\".format(rec[\"meta\"][\"auto_added\"])) print(\"\") print(\"\") print(\"-------------------\") print(\"Records stat:\")", "records.append(rec) if page == total_pages: break page += 1 return records # https://api.cloudflare.com/#zone-list-zones", "[] while True: raw_results = cf.zones.get(params={'per_page':5, 'page':page}) total_pages = raw_results['result_info']['total_pages'] zones = raw_results['result']", "if self._ctx.zone_name: raw_results = cf.zones.get(params={ 'name': self._ctx.zone_name, 'per_page': 1, 'page': 1, }) return", "params={'per_page':100, 'page':page}, ) total_pages = raw_results['result_info']['total_pages'] result = raw_results['result'] for rec in result:", "in z[\"name_servers\"][1:]: print(\" {0: <16} {1}\".format(\"\", ns)) def _all_zones(self): cf = CloudFlare.CloudFlare(raw=True) if", "raw_results['result_info']['total_pages'] zones = raw_results['result'] for z in zones: domains.append(z) if page == total_pages:", "if page == total_pages: break page += 1 return records # https://api.cloudflare.com/#zone-list-zones class", "parser = argparse.ArgumentParser( prog=\"cfbackup\", description='Simple Cloudflare backup tool.', ) parser.add_argument( \"command\", choices=[x for", "self.show() else: sys.exit(\"Command \" + cmd + \" not implemened for zones\") def", "CloudFlare.CloudFlare() zones = cf.zones.get(params={'name': self._ctx.zone_name, 'per_page': 1}) if len(zones) == 0: exit('No zones", "class CF_DNS_Records(object): \"\"\" commands for zones manipulation \"\"\" def __init__(self, ctx): self._ctx =", "zones = self._all_zones() except CloudFlare.exceptions.CloudFlareAPIError as e: exit('/zones %d %s - api call" ]
[ "JSON and first paint + first contentful paint var resultJson = entry.toJSON(); resultJson.firstPaint", "as chromeOptions import sys from datetime import datetime import hashlib import uuid import", "'first-paint') { resultJson.firstPaint = pJson.startTime; } else if (pJson.name == 'first-contentful-paint') { resultJson.firstContentfulPaint", "driver = create_driver() timestamp = datetime.now() performance_metrics = get_page_performance_metrics(driver, page) # insert page", "paint var resultJson = entry.toJSON(); resultJson.firstPaint = 0; resultJson.firstContentfulPaint = 0; try {", "'responseStart', 'secureConnectionStart', 'startTime', 'firstPaint', 'firstContentfulPaint', 'nextHopProtocol', 'cacheWarming', 'error') file_elements = ('pep', 'run') #", "'error') file_elements = ('pep', 'run') # retrieve input params try: protocol = sys.argv[1]", "= ('pep', 'run') # retrieve input params try: protocol = sys.argv[1] server =", "performance elements to extract measurement_elements = ('protocol', 'server', 'domain', 'timestamp', 'connectEnd', 'connectStart', 'domComplete',", "= datetime.now() performance_metrics = get_page_performance_metrics(driver, page) # insert page into database if 'error'", "(pJson.name == 'first-contentful-paint') { resultJson.firstContentfulPaint = pJson.startTime; } } } catch(e) {} return", "f'{output_dir}/http_pep.csv' if os.path.isfile(file_path): local_csvfile = open(file_path, mode='a') else: local_csvfile = open(file_path, mode='w') new", "// Get performance and paint entries var perfEntries = performance.getEntriesByType(\"navigation\"); var paintEntries =", "get_page_performance_metrics(driver, page) # insert page into database if 'error' not in performance_metrics: #", "timestamp, cache_warming=cache_warming, error=performance_metrics['error']) driver.quit() def create_measurements_table(): new = False global local_csvfile file_path =", "server performance['domain'] = page performance['timestamp'] = timestamp performance['cacheWarming'] = cache_warming performance['error'] = error", "sys.argv[3] output_dir = sys.argv[4] file_elements_values = sys.argv[5].split(';') except IndexError: print(\"Input params incomplete (protocol,", "= timestamp performance['cacheWarming'] = cache_warming performance['error'] = error values = file_elements_values.copy() for m_e", "'domContentLoadedEventStart', 'domInteractive', 'domainLookupEnd', 'domainLookupStart', 'duration', 'encodedBodySize', 'decodedBodySize', 'transferSize', 'fetchStart', 'loadEventEnd', 'loadEventStart', 'requestStart', 'responseEnd',", "if (pJson.name == 'first-contentful-paint') { resultJson.firstContentfulPaint = pJson.startTime; } } } catch(e) {}", "new == True: headers = file_elements + measurement_elements csvfile.writerow(headers) def insert_performance(page, performance, timestamp,", "of file elements does not match\") sys.exit(1) # Chrome options chrome_options = chromeOptions()", "cache_warming=0, error=''): performance['protocol'] = protocol performance['server'] = server performance['domain'] = page performance['timestamp'] =", "delimiter=';') if new == True: headers = file_elements + measurement_elements csvfile.writerow(headers) def insert_performance(page,", "resultJson.firstContentfulPaint = 0; try { for (var i=0; i<paintEntries.length; i++) { var pJson", "driver.get(f'http://{page}') return driver.execute_script(script) except selenium.common.exceptions.WebDriverException as e: return {'error': str(e)} def perform_page_load(page, cache_warming=0):", "base64 > \"fingerprints.txt\" chrome_options.add_argument('--ignore-certificate-errors-spki-list=D29LAH0IMcLx/d7R2JAH5bw/YKYK9uNRYc6W0/GJlS8=') def create_driver(): return webdriver.Chrome(options=chrome_options, executable_path=chrome_path) def get_page_performance_metrics(driver, page): script", "page into database if 'error' not in performance_metrics: # Print page source #", "return webdriver.Chrome(options=chrome_options, executable_path=chrome_path) def get_page_performance_metrics(driver, page): script = \"\"\" // Get performance and", "chromeOptions() chrome_options.add_argument('--no-sandbox') chrome_options.add_argument('--headless') chrome_options.add_argument('--disable-dev-shm-usage') if protocol == 'quic': chrome_options.add_argument('--enable-quic') chrome_options.add_argument('--origin-to-force-quic-on=example.com:443') chrome_options.add_argument('--allow_unknown_root_cer') chrome_options.add_argument('--disable_certificate_verification') chrome_options.add_argument('--ignore-urlfetcher-cert-requests')", "paintEntries[1]; // Get the JSON and first paint + first contentful paint var", "# print(driver.page_source) driver.save_screenshot(f'{output_dir}/screenshot.png') insert_performance(page, performance_metrics, timestamp, cache_warming=cache_warming) else: insert_performance(page, {k: 0 for k", "global local_csvfile file_path = f'{output_dir}/http.csv' if file_elements_values[0] == 'false' else f'{output_dir}/http_pep.csv' if os.path.isfile(file_path):", "str(e)} def perform_page_load(page, cache_warming=0): driver = create_driver() timestamp = datetime.now() performance_metrics = get_page_performance_metrics(driver,", "csv # performance elements to extract measurement_elements = ('protocol', 'server', 'domain', 'timestamp', 'connectEnd',", "(var i=0; i<paintEntries.length; i++) { var pJson = paintEntries[i].toJSON(); if (pJson.name == 'first-paint')", "get_page_performance_metrics(driver, page): script = \"\"\" // Get performance and paint entries var perfEntries", "csvfile.writerow(headers) def insert_performance(page, performance, timestamp, cache_warming=0, error=''): performance['protocol'] = protocol performance['server'] = server", "os import csv # performance elements to extract measurement_elements = ('protocol', 'server', 'domain',", "driver.set_page_load_timeout(60) if protocol == 'quic': driver.get(f'https://{page}') else: driver.get(f'http://{page}') return driver.execute_script(script) except selenium.common.exceptions.WebDriverException as", "'duration', 'encodedBodySize', 'decodedBodySize', 'transferSize', 'fetchStart', 'loadEventEnd', 'loadEventStart', 'requestStart', 'responseEnd', 'responseStart', 'secureConnectionStart', 'startTime', 'firstPaint',", "-pubkey < \"pubkey.pem\" | openssl pkey -pubin -outform der | openssl dgst -sha256", "k in measurement_elements}, timestamp, cache_warming=cache_warming, error=performance_metrics['error']) driver.quit() def create_measurements_table(): new = False global", "= paintEntries[1]; // Get the JSON and first paint + first contentful paint", "Original script: https://github.com/Lucapaulo/web-performance/blob/main/run_measurements.py import re import time import selenium.common.exceptions from selenium import webdriver", "True: headers = file_elements + measurement_elements csvfile.writerow(headers) def insert_performance(page, performance, timestamp, cache_warming=0, error=''):", "import sys from datetime import datetime import hashlib import uuid import os import", "protocol = sys.argv[1] server = sys.argv[2] chrome_path = sys.argv[3] output_dir = sys.argv[4] file_elements_values", "var paintEntries = performance.getEntriesByType(\"paint\"); var entry = perfEntries[0]; var fpEntry = paintEntries[0]; var", "if protocol == 'quic': chrome_options.add_argument('--enable-quic') chrome_options.add_argument('--origin-to-force-quic-on=example.com:443') chrome_options.add_argument('--allow_unknown_root_cer') chrome_options.add_argument('--disable_certificate_verification') chrome_options.add_argument('--ignore-urlfetcher-cert-requests') chrome_options.add_argument(f\"--host-resolver-rules=MAP example.com {server}\") chrome_options.add_argument('--verbose')", "= sys.argv[4] file_elements_values = sys.argv[5].split(';') except IndexError: print(\"Input params incomplete (protocol, server, chrome_driver,", "performance.getEntriesByType(\"navigation\"); var paintEntries = performance.getEntriesByType(\"paint\"); var entry = perfEntries[0]; var fpEntry = paintEntries[0];", "# insert page into database if 'error' not in performance_metrics: # Print page", "into database if 'error' not in performance_metrics: # Print page source # print(driver.page_source)", "== 'false' else f'{output_dir}/http_pep.csv' if os.path.isfile(file_path): local_csvfile = open(file_path, mode='a') else: local_csvfile =", "-sha256 -binary | base64 > \"fingerprints.txt\" chrome_options.add_argument('--ignore-certificate-errors-spki-list=D29LAH0IMcLx/d7R2JAH5bw/YKYK9uNRYc6W0/GJlS8=') def create_driver(): return webdriver.Chrome(options=chrome_options, executable_path=chrome_path) def", "match\") sys.exit(1) # Chrome options chrome_options = chromeOptions() chrome_options.add_argument('--no-sandbox') chrome_options.add_argument('--headless') chrome_options.add_argument('--disable-dev-shm-usage') if protocol", "chrome_options.add_argument('--origin-to-force-quic-on=example.com:443') chrome_options.add_argument('--allow_unknown_root_cer') chrome_options.add_argument('--disable_certificate_verification') chrome_options.add_argument('--ignore-urlfetcher-cert-requests') chrome_options.add_argument(f\"--host-resolver-rules=MAP example.com {server}\") chrome_options.add_argument('--verbose') chrome_options.add_argument('--disable-http-cache') # Function to create:", "def get_page_performance_metrics(driver, page): script = \"\"\" // Get performance and paint entries var", "chrome_path = sys.argv[3] output_dir = sys.argv[4] file_elements_values = sys.argv[5].split(';') except IndexError: print(\"Input params", "driver.execute_script(script) except selenium.common.exceptions.WebDriverException as e: return {'error': str(e)} def perform_page_load(page, cache_warming=0): driver =", "protocol performance['server'] = server performance['domain'] = page performance['timestamp'] = timestamp performance['cacheWarming'] = cache_warming", "-pubin -outform der | openssl dgst -sha256 -binary | base64 > \"fingerprints.txt\" chrome_options.add_argument('--ignore-certificate-errors-spki-list=D29LAH0IMcLx/d7R2JAH5bw/YKYK9uNRYc6W0/GJlS8=')", "import re import time import selenium.common.exceptions from selenium import webdriver from selenium.webdriver.chrome.options import", "= entry.toJSON(); resultJson.firstPaint = 0; resultJson.firstContentfulPaint = 0; try { for (var i=0;", "'domInteractive', 'domainLookupEnd', 'domainLookupStart', 'duration', 'encodedBodySize', 'decodedBodySize', 'transferSize', 'fetchStart', 'loadEventEnd', 'loadEventStart', 'requestStart', 'responseEnd', 'responseStart',", "var resultJson = entry.toJSON(); resultJson.firstPaint = 0; resultJson.firstContentfulPaint = 0; try { for", "values = file_elements_values.copy() for m_e in measurement_elements: values.append(performance[m_e]) csvfile.writerow(values) create_measurements_table() # performance measurement", "pJson = paintEntries[i].toJSON(); if (pJson.name == 'first-paint') { resultJson.firstPaint = pJson.startTime; } else", "extract measurement_elements = ('protocol', 'server', 'domain', 'timestamp', 'connectEnd', 'connectStart', 'domComplete', 'domContentLoadedEventEnd', 'domContentLoadedEventStart', 'domInteractive',", "= cache_warming performance['error'] = error values = file_elements_values.copy() for m_e in measurement_elements: values.append(performance[m_e])", "as e: return {'error': str(e)} def perform_page_load(page, cache_warming=0): driver = create_driver() timestamp =", "Get performance and paint entries var perfEntries = performance.getEntriesByType(\"navigation\"); var paintEntries = performance.getEntriesByType(\"paint\");", "> \"fingerprints.txt\" chrome_options.add_argument('--ignore-certificate-errors-spki-list=D29LAH0IMcLx/d7R2JAH5bw/YKYK9uNRYc6W0/GJlS8=') def create_driver(): return webdriver.Chrome(options=chrome_options, executable_path=chrome_path) def get_page_performance_metrics(driver, page): script =", "chrome_options.add_argument('--allow_unknown_root_cer') chrome_options.add_argument('--disable_certificate_verification') chrome_options.add_argument('--ignore-urlfetcher-cert-requests') chrome_options.add_argument(f\"--host-resolver-rules=MAP example.com {server}\") chrome_options.add_argument('--verbose') chrome_options.add_argument('--disable-http-cache') # Function to create: openssl", "from datetime import datetime import hashlib import uuid import os import csv #", "in measurement_elements}, timestamp, cache_warming=cache_warming, error=performance_metrics['error']) driver.quit() def create_measurements_table(): new = False global local_csvfile", "first contentful paint var resultJson = entry.toJSON(); resultJson.firstPaint = 0; resultJson.firstContentfulPaint = 0;", "\"fingerprints.txt\" chrome_options.add_argument('--ignore-certificate-errors-spki-list=D29LAH0IMcLx/d7R2JAH5bw/YKYK9uNRYc6W0/GJlS8=') def create_driver(): return webdriver.Chrome(options=chrome_options, executable_path=chrome_path) def get_page_performance_metrics(driver, page): script = \"\"\"", "if protocol == 'quic': driver.get(f'https://{page}') else: driver.get(f'http://{page}') return driver.execute_script(script) except selenium.common.exceptions.WebDriverException as e:", "else f'{output_dir}/http_pep.csv' if os.path.isfile(file_path): local_csvfile = open(file_path, mode='a') else: local_csvfile = open(file_path, mode='w')", "import uuid import os import csv # performance elements to extract measurement_elements =", "sys.exit(1) if len(file_elements) != len(file_elements_values): print(\"Number of file elements does not match\") sys.exit(1)", "'error' not in performance_metrics: # Print page source # print(driver.page_source) driver.save_screenshot(f'{output_dir}/screenshot.png') insert_performance(page, performance_metrics,", "== 'quic': chrome_options.add_argument('--enable-quic') chrome_options.add_argument('--origin-to-force-quic-on=example.com:443') chrome_options.add_argument('--allow_unknown_root_cer') chrome_options.add_argument('--disable_certificate_verification') chrome_options.add_argument('--ignore-urlfetcher-cert-requests') chrome_options.add_argument(f\"--host-resolver-rules=MAP example.com {server}\") chrome_options.add_argument('--verbose') chrome_options.add_argument('--disable-http-cache') #", "'firstContentfulPaint', 'nextHopProtocol', 'cacheWarming', 'error') file_elements = ('pep', 'run') # retrieve input params try:", "insert_performance(page, performance_metrics, timestamp, cache_warming=cache_warming) else: insert_performance(page, {k: 0 for k in measurement_elements}, timestamp,", "} else if (pJson.name == 'first-contentful-paint') { resultJson.firstContentfulPaint = pJson.startTime; } } }", "+ first contentful paint var resultJson = entry.toJSON(); resultJson.firstPaint = 0; resultJson.firstContentfulPaint =", "'fetchStart', 'loadEventEnd', 'loadEventStart', 'requestStart', 'responseEnd', 'responseStart', 'secureConnectionStart', 'startTime', 'firstPaint', 'firstContentfulPaint', 'nextHopProtocol', 'cacheWarming', 'error')", "dgst -sha256 -binary | base64 > \"fingerprints.txt\" chrome_options.add_argument('--ignore-certificate-errors-spki-list=D29LAH0IMcLx/d7R2JAH5bw/YKYK9uNRYc6W0/GJlS8=') def create_driver(): return webdriver.Chrome(options=chrome_options, executable_path=chrome_path)", "to create: openssl x509 -pubkey < \"pubkey.pem\" | openssl pkey -pubin -outform der", "'nextHopProtocol', 'cacheWarming', 'error') file_elements = ('pep', 'run') # retrieve input params try: protocol", "chrome_options.add_argument('--disable_certificate_verification') chrome_options.add_argument('--ignore-urlfetcher-cert-requests') chrome_options.add_argument(f\"--host-resolver-rules=MAP example.com {server}\") chrome_options.add_argument('--verbose') chrome_options.add_argument('--disable-http-cache') # Function to create: openssl x509", "page) # insert page into database if 'error' not in performance_metrics: # Print", "i<paintEntries.length; i++) { var pJson = paintEntries[i].toJSON(); if (pJson.name == 'first-paint') { resultJson.firstPaint", "x509 -pubkey < \"pubkey.pem\" | openssl pkey -pubin -outform der | openssl dgst", "= ('protocol', 'server', 'domain', 'timestamp', 'connectEnd', 'connectStart', 'domComplete', 'domContentLoadedEventEnd', 'domContentLoadedEventStart', 'domInteractive', 'domainLookupEnd', 'domainLookupStart',", "chrome_options.add_argument('--disable-dev-shm-usage') if protocol == 'quic': chrome_options.add_argument('--enable-quic') chrome_options.add_argument('--origin-to-force-quic-on=example.com:443') chrome_options.add_argument('--allow_unknown_root_cer') chrome_options.add_argument('--disable_certificate_verification') chrome_options.add_argument('--ignore-urlfetcher-cert-requests') chrome_options.add_argument(f\"--host-resolver-rules=MAP example.com {server}\")", "driver.save_screenshot(f'{output_dir}/screenshot.png') insert_performance(page, performance_metrics, timestamp, cache_warming=cache_warming) else: insert_performance(page, {k: 0 for k in measurement_elements},", "'responseEnd', 'responseStart', 'secureConnectionStart', 'startTime', 'firstPaint', 'firstContentfulPaint', 'nextHopProtocol', 'cacheWarming', 'error') file_elements = ('pep', 'run')", "file_path = f'{output_dir}/http.csv' if file_elements_values[0] == 'false' else f'{output_dir}/http_pep.csv' if os.path.isfile(file_path): local_csvfile =", "insert_performance(page, {k: 0 for k in measurement_elements}, timestamp, cache_warming=cache_warming, error=performance_metrics['error']) driver.quit() def create_measurements_table():", "= csv.writer(local_csvfile, delimiter=';') if new == True: headers = file_elements + measurement_elements csvfile.writerow(headers)", "= 0; resultJson.firstContentfulPaint = 0; try { for (var i=0; i<paintEntries.length; i++) {", "= True global csvfile csvfile = csv.writer(local_csvfile, delimiter=';') if new == True: headers", "# Original script: https://github.com/Lucapaulo/web-performance/blob/main/run_measurements.py import re import time import selenium.common.exceptions from selenium import", "csv.writer(local_csvfile, delimiter=';') if new == True: headers = file_elements + measurement_elements csvfile.writerow(headers) def", "(protocol, server, chrome_driver, output_dir)\") sys.exit(1) if len(file_elements) != len(file_elements_values): print(\"Number of file elements", "'quic': driver.get(f'https://{page}') else: driver.get(f'http://{page}') return driver.execute_script(script) except selenium.common.exceptions.WebDriverException as e: return {'error': str(e)}", "global csvfile csvfile = csv.writer(local_csvfile, delimiter=';') if new == True: headers = file_elements", "params incomplete (protocol, server, chrome_driver, output_dir)\") sys.exit(1) if len(file_elements) != len(file_elements_values): print(\"Number of", "'false' else f'{output_dir}/http_pep.csv' if os.path.isfile(file_path): local_csvfile = open(file_path, mode='a') else: local_csvfile = open(file_path,", "'loadEventStart', 'requestStart', 'responseEnd', 'responseStart', 'secureConnectionStart', 'startTime', 'firstPaint', 'firstContentfulPaint', 'nextHopProtocol', 'cacheWarming', 'error') file_elements =", "= \"\"\" // Get performance and paint entries var perfEntries = performance.getEntriesByType(\"navigation\"); var", "file_elements_values = sys.argv[5].split(';') except IndexError: print(\"Input params incomplete (protocol, server, chrome_driver, output_dir)\") sys.exit(1)", "e: return {'error': str(e)} def perform_page_load(page, cache_warming=0): driver = create_driver() timestamp = datetime.now()", "if (pJson.name == 'first-paint') { resultJson.firstPaint = pJson.startTime; } else if (pJson.name ==", "'run') # retrieve input params try: protocol = sys.argv[1] server = sys.argv[2] chrome_path", "paintEntries[i].toJSON(); if (pJson.name == 'first-paint') { resultJson.firstPaint = pJson.startTime; } else if (pJson.name", "else if (pJson.name == 'first-contentful-paint') { resultJson.firstContentfulPaint = pJson.startTime; } } } catch(e)", "else: driver.get(f'http://{page}') return driver.execute_script(script) except selenium.common.exceptions.WebDriverException as e: return {'error': str(e)} def perform_page_load(page,", "'secureConnectionStart', 'startTime', 'firstPaint', 'firstContentfulPaint', 'nextHopProtocol', 'cacheWarming', 'error') file_elements = ('pep', 'run') # retrieve", "chromeOptions import sys from datetime import datetime import hashlib import uuid import os", "# Print page source # print(driver.page_source) driver.save_screenshot(f'{output_dir}/screenshot.png') insert_performance(page, performance_metrics, timestamp, cache_warming=cache_warming) else: insert_performance(page,", "contentful paint var resultJson = entry.toJSON(); resultJson.firstPaint = 0; resultJson.firstContentfulPaint = 0; try", "(pJson.name == 'first-paint') { resultJson.firstPaint = pJson.startTime; } else if (pJson.name == 'first-contentful-paint')", "import time import selenium.common.exceptions from selenium import webdriver from selenium.webdriver.chrome.options import Options as", "params try: protocol = sys.argv[1] server = sys.argv[2] chrome_path = sys.argv[3] output_dir =", "measurement_elements = ('protocol', 'server', 'domain', 'timestamp', 'connectEnd', 'connectStart', 'domComplete', 'domContentLoadedEventEnd', 'domContentLoadedEventStart', 'domInteractive', 'domainLookupEnd',", "error values = file_elements_values.copy() for m_e in measurement_elements: values.append(performance[m_e]) csvfile.writerow(values) create_measurements_table() # performance", "entry = perfEntries[0]; var fpEntry = paintEntries[0]; var fcpEntry = paintEntries[1]; // Get", "error=''): performance['protocol'] = protocol performance['server'] = server performance['domain'] = page performance['timestamp'] = timestamp", "csvfile csvfile = csv.writer(local_csvfile, delimiter=';') if new == True: headers = file_elements +", "| openssl dgst -sha256 -binary | base64 > \"fingerprints.txt\" chrome_options.add_argument('--ignore-certificate-errors-spki-list=D29LAH0IMcLx/d7R2JAH5bw/YKYK9uNRYc6W0/GJlS8=') def create_driver(): return", "'domComplete', 'domContentLoadedEventEnd', 'domContentLoadedEventStart', 'domInteractive', 'domainLookupEnd', 'domainLookupStart', 'duration', 'encodedBodySize', 'decodedBodySize', 'transferSize', 'fetchStart', 'loadEventEnd', 'loadEventStart',", "= False global local_csvfile file_path = f'{output_dir}/http.csv' if file_elements_values[0] == 'false' else f'{output_dir}/http_pep.csv'", "script = \"\"\" // Get performance and paint entries var perfEntries = performance.getEntriesByType(\"navigation\");", "| base64 > \"fingerprints.txt\" chrome_options.add_argument('--ignore-certificate-errors-spki-list=D29LAH0IMcLx/d7R2JAH5bw/YKYK9uNRYc6W0/GJlS8=') def create_driver(): return webdriver.Chrome(options=chrome_options, executable_path=chrome_path) def get_page_performance_metrics(driver, page):", "print(\"Number of file elements does not match\") sys.exit(1) # Chrome options chrome_options =", "{k: 0 for k in measurement_elements}, timestamp, cache_warming=cache_warming, error=performance_metrics['error']) driver.quit() def create_measurements_table(): new", "'requestStart', 'responseEnd', 'responseStart', 'secureConnectionStart', 'startTime', 'firstPaint', 'firstContentfulPaint', 'nextHopProtocol', 'cacheWarming', 'error') file_elements = ('pep',", "= server performance['domain'] = page performance['timestamp'] = timestamp performance['cacheWarming'] = cache_warming performance['error'] =", "\"pubkey.pem\" | openssl pkey -pubin -outform der | openssl dgst -sha256 -binary |", "chrome_options.add_argument('--enable-quic') chrome_options.add_argument('--origin-to-force-quic-on=example.com:443') chrome_options.add_argument('--allow_unknown_root_cer') chrome_options.add_argument('--disable_certificate_verification') chrome_options.add_argument('--ignore-urlfetcher-cert-requests') chrome_options.add_argument(f\"--host-resolver-rules=MAP example.com {server}\") chrome_options.add_argument('--verbose') chrome_options.add_argument('--disable-http-cache') # Function to", "headers = file_elements + measurement_elements csvfile.writerow(headers) def insert_performance(page, performance, timestamp, cache_warming=0, error=''): performance['protocol']", "def create_driver(): return webdriver.Chrome(options=chrome_options, executable_path=chrome_path) def get_page_performance_metrics(driver, page): script = \"\"\" // Get", "resultJson = entry.toJSON(); resultJson.firstPaint = 0; resultJson.firstContentfulPaint = 0; try { for (var", "openssl x509 -pubkey < \"pubkey.pem\" | openssl pkey -pubin -outform der | openssl", "try: driver.set_page_load_timeout(60) if protocol == 'quic': driver.get(f'https://{page}') else: driver.get(f'http://{page}') return driver.execute_script(script) except selenium.common.exceptions.WebDriverException", "i=0; i<paintEntries.length; i++) { var pJson = paintEntries[i].toJSON(); if (pJson.name == 'first-paint') {", "chrome_options = chromeOptions() chrome_options.add_argument('--no-sandbox') chrome_options.add_argument('--headless') chrome_options.add_argument('--disable-dev-shm-usage') if protocol == 'quic': chrome_options.add_argument('--enable-quic') chrome_options.add_argument('--origin-to-force-quic-on=example.com:443') chrome_options.add_argument('--allow_unknown_root_cer')", "resultJson.firstPaint = pJson.startTime; } else if (pJson.name == 'first-contentful-paint') { resultJson.firstContentfulPaint = pJson.startTime;", "# Chrome options chrome_options = chromeOptions() chrome_options.add_argument('--no-sandbox') chrome_options.add_argument('--headless') chrome_options.add_argument('--disable-dev-shm-usage') if protocol == 'quic':", "measurement_elements}, timestamp, cache_warming=cache_warming, error=performance_metrics['error']) driver.quit() def create_measurements_table(): new = False global local_csvfile file_path", "= open(file_path, mode='a') else: local_csvfile = open(file_path, mode='w') new = True global csvfile", "= sys.argv[3] output_dir = sys.argv[4] file_elements_values = sys.argv[5].split(';') except IndexError: print(\"Input params incomplete", "input params try: protocol = sys.argv[1] server = sys.argv[2] chrome_path = sys.argv[3] output_dir", "page): script = \"\"\" // Get performance and paint entries var perfEntries =", "'domContentLoadedEventEnd', 'domContentLoadedEventStart', 'domInteractive', 'domainLookupEnd', 'domainLookupStart', 'duration', 'encodedBodySize', 'decodedBodySize', 'transferSize', 'fetchStart', 'loadEventEnd', 'loadEventStart', 'requestStart',", "{ var pJson = paintEntries[i].toJSON(); if (pJson.name == 'first-paint') { resultJson.firstPaint = pJson.startTime;", "performance_metrics = get_page_performance_metrics(driver, page) # insert page into database if 'error' not in", "= f'{output_dir}/http.csv' if file_elements_values[0] == 'false' else f'{output_dir}/http_pep.csv' if os.path.isfile(file_path): local_csvfile = open(file_path,", "} catch(e) {} return resultJson; \"\"\" try: driver.set_page_load_timeout(60) if protocol == 'quic': driver.get(f'https://{page}')", "-outform der | openssl dgst -sha256 -binary | base64 > \"fingerprints.txt\" chrome_options.add_argument('--ignore-certificate-errors-spki-list=D29LAH0IMcLx/d7R2JAH5bw/YKYK9uNRYc6W0/GJlS8=') def", "return {'error': str(e)} def perform_page_load(page, cache_warming=0): driver = create_driver() timestamp = datetime.now() performance_metrics", "source # print(driver.page_source) driver.save_screenshot(f'{output_dir}/screenshot.png') insert_performance(page, performance_metrics, timestamp, cache_warming=cache_warming) else: insert_performance(page, {k: 0 for", "\"\"\" // Get performance and paint entries var perfEntries = performance.getEntriesByType(\"navigation\"); var paintEntries", "perform_page_load(page, cache_warming=0): driver = create_driver() timestamp = datetime.now() performance_metrics = get_page_performance_metrics(driver, page) #", "timestamp = datetime.now() performance_metrics = get_page_performance_metrics(driver, page) # insert page into database if", "cache_warming performance['error'] = error values = file_elements_values.copy() for m_e in measurement_elements: values.append(performance[m_e]) csvfile.writerow(values)", "// Get the JSON and first paint + first contentful paint var resultJson", "performance['timestamp'] = timestamp performance['cacheWarming'] = cache_warming performance['error'] = error values = file_elements_values.copy() for", "selenium.common.exceptions.WebDriverException as e: return {'error': str(e)} def perform_page_load(page, cache_warming=0): driver = create_driver() timestamp", "database if 'error' not in performance_metrics: # Print page source # print(driver.page_source) driver.save_screenshot(f'{output_dir}/screenshot.png')", "| openssl pkey -pubin -outform der | openssl dgst -sha256 -binary | base64", "paintEntries[0]; var fcpEntry = paintEntries[1]; // Get the JSON and first paint +", "len(file_elements_values): print(\"Number of file elements does not match\") sys.exit(1) # Chrome options chrome_options", "('pep', 'run') # retrieve input params try: protocol = sys.argv[1] server = sys.argv[2]", "performance.getEntriesByType(\"paint\"); var entry = perfEntries[0]; var fpEntry = paintEntries[0]; var fcpEntry = paintEntries[1];", "cache_warming=0): driver = create_driver() timestamp = datetime.now() performance_metrics = get_page_performance_metrics(driver, page) # insert", "= sys.argv[1] server = sys.argv[2] chrome_path = sys.argv[3] output_dir = sys.argv[4] file_elements_values =", "def perform_page_load(page, cache_warming=0): driver = create_driver() timestamp = datetime.now() performance_metrics = get_page_performance_metrics(driver, page)", "performance['server'] = server performance['domain'] = page performance['timestamp'] = timestamp performance['cacheWarming'] = cache_warming performance['error']", "return resultJson; \"\"\" try: driver.set_page_load_timeout(60) if protocol == 'quic': driver.get(f'https://{page}') else: driver.get(f'http://{page}') return", "f'{output_dir}/http.csv' if file_elements_values[0] == 'false' else f'{output_dir}/http_pep.csv' if os.path.isfile(file_path): local_csvfile = open(file_path, mode='a')", "chrome_options.add_argument('--verbose') chrome_options.add_argument('--disable-http-cache') # Function to create: openssl x509 -pubkey < \"pubkey.pem\" | openssl", "sys.argv[5].split(';') except IndexError: print(\"Input params incomplete (protocol, server, chrome_driver, output_dir)\") sys.exit(1) if len(file_elements)", "= performance.getEntriesByType(\"navigation\"); var paintEntries = performance.getEntriesByType(\"paint\"); var entry = perfEntries[0]; var fpEntry =", "webdriver.Chrome(options=chrome_options, executable_path=chrome_path) def get_page_performance_metrics(driver, page): script = \"\"\" // Get performance and paint", "'domainLookupEnd', 'domainLookupStart', 'duration', 'encodedBodySize', 'decodedBodySize', 'transferSize', 'fetchStart', 'loadEventEnd', 'loadEventStart', 'requestStart', 'responseEnd', 'responseStart', 'secureConnectionStart',", "new = False global local_csvfile file_path = f'{output_dir}/http.csv' if file_elements_values[0] == 'false' else", "re import time import selenium.common.exceptions from selenium import webdriver from selenium.webdriver.chrome.options import Options", "sys.exit(1) # Chrome options chrome_options = chromeOptions() chrome_options.add_argument('--no-sandbox') chrome_options.add_argument('--headless') chrome_options.add_argument('--disable-dev-shm-usage') if protocol ==", "('protocol', 'server', 'domain', 'timestamp', 'connectEnd', 'connectStart', 'domComplete', 'domContentLoadedEventEnd', 'domContentLoadedEventStart', 'domInteractive', 'domainLookupEnd', 'domainLookupStart', 'duration',", "pkey -pubin -outform der | openssl dgst -sha256 -binary | base64 > \"fingerprints.txt\"", "elements to extract measurement_elements = ('protocol', 'server', 'domain', 'timestamp', 'connectEnd', 'connectStart', 'domComplete', 'domContentLoadedEventEnd',", "chrome_options.add_argument('--no-sandbox') chrome_options.add_argument('--headless') chrome_options.add_argument('--disable-dev-shm-usage') if protocol == 'quic': chrome_options.add_argument('--enable-quic') chrome_options.add_argument('--origin-to-force-quic-on=example.com:443') chrome_options.add_argument('--allow_unknown_root_cer') chrome_options.add_argument('--disable_certificate_verification') chrome_options.add_argument('--ignore-urlfetcher-cert-requests') chrome_options.add_argument(f\"--host-resolver-rules=MAP", "sys.argv[4] file_elements_values = sys.argv[5].split(';') except IndexError: print(\"Input params incomplete (protocol, server, chrome_driver, output_dir)\")", "{'error': str(e)} def perform_page_load(page, cache_warming=0): driver = create_driver() timestamp = datetime.now() performance_metrics =", "var fpEntry = paintEntries[0]; var fcpEntry = paintEntries[1]; // Get the JSON and", "performance, timestamp, cache_warming=0, error=''): performance['protocol'] = protocol performance['server'] = server performance['domain'] = page", "import Options as chromeOptions import sys from datetime import datetime import hashlib import", "open(file_path, mode='a') else: local_csvfile = open(file_path, mode='w') new = True global csvfile csvfile", "error=performance_metrics['error']) driver.quit() def create_measurements_table(): new = False global local_csvfile file_path = f'{output_dir}/http.csv' if", "False global local_csvfile file_path = f'{output_dir}/http.csv' if file_elements_values[0] == 'false' else f'{output_dir}/http_pep.csv' if", "= protocol performance['server'] = server performance['domain'] = page performance['timestamp'] = timestamp performance['cacheWarming'] =", "performance['protocol'] = protocol performance['server'] = server performance['domain'] = page performance['timestamp'] = timestamp performance['cacheWarming']", "example.com {server}\") chrome_options.add_argument('--verbose') chrome_options.add_argument('--disable-http-cache') # Function to create: openssl x509 -pubkey < \"pubkey.pem\"", "catch(e) {} return resultJson; \"\"\" try: driver.set_page_load_timeout(60) if protocol == 'quic': driver.get(f'https://{page}') else:", "openssl dgst -sha256 -binary | base64 > \"fingerprints.txt\" chrome_options.add_argument('--ignore-certificate-errors-spki-list=D29LAH0IMcLx/d7R2JAH5bw/YKYK9uNRYc6W0/GJlS8=') def create_driver(): return webdriver.Chrome(options=chrome_options,", "and paint entries var perfEntries = performance.getEntriesByType(\"navigation\"); var paintEntries = performance.getEntriesByType(\"paint\"); var entry", "Print page source # print(driver.page_source) driver.save_screenshot(f'{output_dir}/screenshot.png') insert_performance(page, performance_metrics, timestamp, cache_warming=cache_warming) else: insert_performance(page, {k:", "resultJson.firstContentfulPaint = pJson.startTime; } } } catch(e) {} return resultJson; \"\"\" try: driver.set_page_load_timeout(60)", "performance_metrics, timestamp, cache_warming=cache_warming) else: insert_performance(page, {k: 0 for k in measurement_elements}, timestamp, cache_warming=cache_warming,", "'server', 'domain', 'timestamp', 'connectEnd', 'connectStart', 'domComplete', 'domContentLoadedEventEnd', 'domContentLoadedEventStart', 'domInteractive', 'domainLookupEnd', 'domainLookupStart', 'duration', 'encodedBodySize',", "try: protocol = sys.argv[1] server = sys.argv[2] chrome_path = sys.argv[3] output_dir = sys.argv[4]", "executable_path=chrome_path) def get_page_performance_metrics(driver, page): script = \"\"\" // Get performance and paint entries", "if len(file_elements) != len(file_elements_values): print(\"Number of file elements does not match\") sys.exit(1) #", "pJson.startTime; } } } catch(e) {} return resultJson; \"\"\" try: driver.set_page_load_timeout(60) if protocol", "chrome_options.add_argument(f\"--host-resolver-rules=MAP example.com {server}\") chrome_options.add_argument('--verbose') chrome_options.add_argument('--disable-http-cache') # Function to create: openssl x509 -pubkey <", "selenium.webdriver.chrome.options import Options as chromeOptions import sys from datetime import datetime import hashlib", "insert page into database if 'error' not in performance_metrics: # Print page source", "'startTime', 'firstPaint', 'firstContentfulPaint', 'nextHopProtocol', 'cacheWarming', 'error') file_elements = ('pep', 'run') # retrieve input", "open(file_path, mode='w') new = True global csvfile csvfile = csv.writer(local_csvfile, delimiter=';') if new", "0; resultJson.firstContentfulPaint = 0; try { for (var i=0; i<paintEntries.length; i++) { var", "local_csvfile = open(file_path, mode='w') new = True global csvfile csvfile = csv.writer(local_csvfile, delimiter=';')", "if file_elements_values[0] == 'false' else f'{output_dir}/http_pep.csv' if os.path.isfile(file_path): local_csvfile = open(file_path, mode='a') else:", "import hashlib import uuid import os import csv # performance elements to extract", "= get_page_performance_metrics(driver, page) # insert page into database if 'error' not in performance_metrics:", "import os import csv # performance elements to extract measurement_elements = ('protocol', 'server',", "new = True global csvfile csvfile = csv.writer(local_csvfile, delimiter=';') if new == True:", "True global csvfile csvfile = csv.writer(local_csvfile, delimiter=';') if new == True: headers =", "= sys.argv[5].split(';') except IndexError: print(\"Input params incomplete (protocol, server, chrome_driver, output_dir)\") sys.exit(1) if", "not match\") sys.exit(1) # Chrome options chrome_options = chromeOptions() chrome_options.add_argument('--no-sandbox') chrome_options.add_argument('--headless') chrome_options.add_argument('--disable-dev-shm-usage') if", "{} return resultJson; \"\"\" try: driver.set_page_load_timeout(60) if protocol == 'quic': driver.get(f'https://{page}') else: driver.get(f'http://{page}')", "first paint + first contentful paint var resultJson = entry.toJSON(); resultJson.firstPaint = 0;", "and first paint + first contentful paint var resultJson = entry.toJSON(); resultJson.firstPaint =", "'decodedBodySize', 'transferSize', 'fetchStart', 'loadEventEnd', 'loadEventStart', 'requestStart', 'responseEnd', 'responseStart', 'secureConnectionStart', 'startTime', 'firstPaint', 'firstContentfulPaint', 'nextHopProtocol',", "sys.argv[1] server = sys.argv[2] chrome_path = sys.argv[3] output_dir = sys.argv[4] file_elements_values = sys.argv[5].split(';')", "!= len(file_elements_values): print(\"Number of file elements does not match\") sys.exit(1) # Chrome options", "server = sys.argv[2] chrome_path = sys.argv[3] output_dir = sys.argv[4] file_elements_values = sys.argv[5].split(';') except", "= paintEntries[i].toJSON(); if (pJson.name == 'first-paint') { resultJson.firstPaint = pJson.startTime; } else if", "var pJson = paintEntries[i].toJSON(); if (pJson.name == 'first-paint') { resultJson.firstPaint = pJson.startTime; }", "from selenium import webdriver from selenium.webdriver.chrome.options import Options as chromeOptions import sys from", "from selenium.webdriver.chrome.options import Options as chromeOptions import sys from datetime import datetime import", "file_elements + measurement_elements csvfile.writerow(headers) def insert_performance(page, performance, timestamp, cache_warming=0, error=''): performance['protocol'] = protocol", "sys.argv[2] chrome_path = sys.argv[3] output_dir = sys.argv[4] file_elements_values = sys.argv[5].split(';') except IndexError: print(\"Input", "= perfEntries[0]; var fpEntry = paintEntries[0]; var fcpEntry = paintEntries[1]; // Get the", "for k in measurement_elements}, timestamp, cache_warming=cache_warming, error=performance_metrics['error']) driver.quit() def create_measurements_table(): new = False", "options chrome_options = chromeOptions() chrome_options.add_argument('--no-sandbox') chrome_options.add_argument('--headless') chrome_options.add_argument('--disable-dev-shm-usage') if protocol == 'quic': chrome_options.add_argument('--enable-quic') chrome_options.add_argument('--origin-to-force-quic-on=example.com:443')", "= create_driver() timestamp = datetime.now() performance_metrics = get_page_performance_metrics(driver, page) # insert page into", "= open(file_path, mode='w') new = True global csvfile csvfile = csv.writer(local_csvfile, delimiter=';') if", "uuid import os import csv # performance elements to extract measurement_elements = ('protocol',", "cache_warming=cache_warming, error=performance_metrics['error']) driver.quit() def create_measurements_table(): new = False global local_csvfile file_path = f'{output_dir}/http.csv'", "print(driver.page_source) driver.save_screenshot(f'{output_dir}/screenshot.png') insert_performance(page, performance_metrics, timestamp, cache_warming=cache_warming) else: insert_performance(page, {k: 0 for k in", "def insert_performance(page, performance, timestamp, cache_warming=0, error=''): performance['protocol'] = protocol performance['server'] = server performance['domain']", "i++) { var pJson = paintEntries[i].toJSON(); if (pJson.name == 'first-paint') { resultJson.firstPaint =", "'firstPaint', 'firstContentfulPaint', 'nextHopProtocol', 'cacheWarming', 'error') file_elements = ('pep', 'run') # retrieve input params", "protocol == 'quic': driver.get(f'https://{page}') else: driver.get(f'http://{page}') return driver.execute_script(script) except selenium.common.exceptions.WebDriverException as e: return", "< \"pubkey.pem\" | openssl pkey -pubin -outform der | openssl dgst -sha256 -binary", "'quic': chrome_options.add_argument('--enable-quic') chrome_options.add_argument('--origin-to-force-quic-on=example.com:443') chrome_options.add_argument('--allow_unknown_root_cer') chrome_options.add_argument('--disable_certificate_verification') chrome_options.add_argument('--ignore-urlfetcher-cert-requests') chrome_options.add_argument(f\"--host-resolver-rules=MAP example.com {server}\") chrome_options.add_argument('--verbose') chrome_options.add_argument('--disable-http-cache') # Function", "= pJson.startTime; } else if (pJson.name == 'first-contentful-paint') { resultJson.firstContentfulPaint = pJson.startTime; }", "if new == True: headers = file_elements + measurement_elements csvfile.writerow(headers) def insert_performance(page, performance,", "if os.path.isfile(file_path): local_csvfile = open(file_path, mode='a') else: local_csvfile = open(file_path, mode='w') new =", "0; try { for (var i=0; i<paintEntries.length; i++) { var pJson = paintEntries[i].toJSON();", "= pJson.startTime; } } } catch(e) {} return resultJson; \"\"\" try: driver.set_page_load_timeout(60) if", "var entry = perfEntries[0]; var fpEntry = paintEntries[0]; var fcpEntry = paintEntries[1]; //", "does not match\") sys.exit(1) # Chrome options chrome_options = chromeOptions() chrome_options.add_argument('--no-sandbox') chrome_options.add_argument('--headless') chrome_options.add_argument('--disable-dev-shm-usage')", "timestamp, cache_warming=0, error=''): performance['protocol'] = protocol performance['server'] = server performance['domain'] = page performance['timestamp']", "== True: headers = file_elements + measurement_elements csvfile.writerow(headers) def insert_performance(page, performance, timestamp, cache_warming=0,", "elements does not match\") sys.exit(1) # Chrome options chrome_options = chromeOptions() chrome_options.add_argument('--no-sandbox') chrome_options.add_argument('--headless')", "resultJson; \"\"\" try: driver.set_page_load_timeout(60) if protocol == 'quic': driver.get(f'https://{page}') else: driver.get(f'http://{page}') return driver.execute_script(script)", "paint + first contentful paint var resultJson = entry.toJSON(); resultJson.firstPaint = 0; resultJson.firstContentfulPaint", "var fcpEntry = paintEntries[1]; // Get the JSON and first paint + first", "chrome_options.add_argument('--disable-http-cache') # Function to create: openssl x509 -pubkey < \"pubkey.pem\" | openssl pkey", "timestamp, cache_warming=cache_warming) else: insert_performance(page, {k: 0 for k in measurement_elements}, timestamp, cache_warming=cache_warming, error=performance_metrics['error'])", "chrome_driver, output_dir)\") sys.exit(1) if len(file_elements) != len(file_elements_values): print(\"Number of file elements does not", "# retrieve input params try: protocol = sys.argv[1] server = sys.argv[2] chrome_path =", "fcpEntry = paintEntries[1]; // Get the JSON and first paint + first contentful", "== 'first-contentful-paint') { resultJson.firstContentfulPaint = pJson.startTime; } } } catch(e) {} return resultJson;", "'encodedBodySize', 'decodedBodySize', 'transferSize', 'fetchStart', 'loadEventEnd', 'loadEventStart', 'requestStart', 'responseEnd', 'responseStart', 'secureConnectionStart', 'startTime', 'firstPaint', 'firstContentfulPaint',", "selenium.common.exceptions from selenium import webdriver from selenium.webdriver.chrome.options import Options as chromeOptions import sys", "IndexError: print(\"Input params incomplete (protocol, server, chrome_driver, output_dir)\") sys.exit(1) if len(file_elements) != len(file_elements_values):", "} } catch(e) {} return resultJson; \"\"\" try: driver.set_page_load_timeout(60) if protocol == 'quic':", "'connectStart', 'domComplete', 'domContentLoadedEventEnd', 'domContentLoadedEventStart', 'domInteractive', 'domainLookupEnd', 'domainLookupStart', 'duration', 'encodedBodySize', 'decodedBodySize', 'transferSize', 'fetchStart', 'loadEventEnd',", "= file_elements + measurement_elements csvfile.writerow(headers) def insert_performance(page, performance, timestamp, cache_warming=0, error=''): performance['protocol'] =", "datetime import hashlib import uuid import os import csv # performance elements to", "Function to create: openssl x509 -pubkey < \"pubkey.pem\" | openssl pkey -pubin -outform", "var perfEntries = performance.getEntriesByType(\"navigation\"); var paintEntries = performance.getEntriesByType(\"paint\"); var entry = perfEntries[0]; var", "file_elements_values[0] == 'false' else f'{output_dir}/http_pep.csv' if os.path.isfile(file_path): local_csvfile = open(file_path, mode='a') else: local_csvfile", "csvfile = csv.writer(local_csvfile, delimiter=';') if new == True: headers = file_elements + measurement_elements", "-binary | base64 > \"fingerprints.txt\" chrome_options.add_argument('--ignore-certificate-errors-spki-list=D29LAH0IMcLx/d7R2JAH5bw/YKYK9uNRYc6W0/GJlS8=') def create_driver(): return webdriver.Chrome(options=chrome_options, executable_path=chrome_path) def get_page_performance_metrics(driver,", "# performance elements to extract measurement_elements = ('protocol', 'server', 'domain', 'timestamp', 'connectEnd', 'connectStart',", "import webdriver from selenium.webdriver.chrome.options import Options as chromeOptions import sys from datetime import", "fpEntry = paintEntries[0]; var fcpEntry = paintEntries[1]; // Get the JSON and first", "import csv # performance elements to extract measurement_elements = ('protocol', 'server', 'domain', 'timestamp',", "the JSON and first paint + first contentful paint var resultJson = entry.toJSON();", "output_dir)\") sys.exit(1) if len(file_elements) != len(file_elements_values): print(\"Number of file elements does not match\")", "measurement_elements csvfile.writerow(headers) def insert_performance(page, performance, timestamp, cache_warming=0, error=''): performance['protocol'] = protocol performance['server'] =", "0 for k in measurement_elements}, timestamp, cache_warming=cache_warming, error=performance_metrics['error']) driver.quit() def create_measurements_table(): new =", "# Function to create: openssl x509 -pubkey < \"pubkey.pem\" | openssl pkey -pubin", "chrome_options.add_argument('--headless') chrome_options.add_argument('--disable-dev-shm-usage') if protocol == 'quic': chrome_options.add_argument('--enable-quic') chrome_options.add_argument('--origin-to-force-quic-on=example.com:443') chrome_options.add_argument('--allow_unknown_root_cer') chrome_options.add_argument('--disable_certificate_verification') chrome_options.add_argument('--ignore-urlfetcher-cert-requests') chrome_options.add_argument(f\"--host-resolver-rules=MAP example.com", "perfEntries = performance.getEntriesByType(\"navigation\"); var paintEntries = performance.getEntriesByType(\"paint\"); var entry = perfEntries[0]; var fpEntry", "except IndexError: print(\"Input params incomplete (protocol, server, chrome_driver, output_dir)\") sys.exit(1) if len(file_elements) !=", "selenium import webdriver from selenium.webdriver.chrome.options import Options as chromeOptions import sys from datetime", "len(file_elements) != len(file_elements_values): print(\"Number of file elements does not match\") sys.exit(1) # Chrome", "<gh_stars>0 # Original script: https://github.com/Lucapaulo/web-performance/blob/main/run_measurements.py import re import time import selenium.common.exceptions from selenium", "except selenium.common.exceptions.WebDriverException as e: return {'error': str(e)} def perform_page_load(page, cache_warming=0): driver = create_driver()", "paintEntries = performance.getEntriesByType(\"paint\"); var entry = perfEntries[0]; var fpEntry = paintEntries[0]; var fcpEntry", "webdriver from selenium.webdriver.chrome.options import Options as chromeOptions import sys from datetime import datetime", "chrome_options.add_argument('--ignore-certificate-errors-spki-list=D29LAH0IMcLx/d7R2JAH5bw/YKYK9uNRYc6W0/GJlS8=') def create_driver(): return webdriver.Chrome(options=chrome_options, executable_path=chrome_path) def get_page_performance_metrics(driver, page): script = \"\"\" //", "'domainLookupStart', 'duration', 'encodedBodySize', 'decodedBodySize', 'transferSize', 'fetchStart', 'loadEventEnd', 'loadEventStart', 'requestStart', 'responseEnd', 'responseStart', 'secureConnectionStart', 'startTime',", "== 'first-paint') { resultJson.firstPaint = pJson.startTime; } else if (pJson.name == 'first-contentful-paint') {", "pJson.startTime; } else if (pJson.name == 'first-contentful-paint') { resultJson.firstContentfulPaint = pJson.startTime; } }", "== 'quic': driver.get(f'https://{page}') else: driver.get(f'http://{page}') return driver.execute_script(script) except selenium.common.exceptions.WebDriverException as e: return {'error':", "os.path.isfile(file_path): local_csvfile = open(file_path, mode='a') else: local_csvfile = open(file_path, mode='w') new = True", "insert_performance(page, performance, timestamp, cache_warming=0, error=''): performance['protocol'] = protocol performance['server'] = server performance['domain'] =", "not in performance_metrics: # Print page source # print(driver.page_source) driver.save_screenshot(f'{output_dir}/screenshot.png') insert_performance(page, performance_metrics, timestamp,", "entries var perfEntries = performance.getEntriesByType(\"navigation\"); var paintEntries = performance.getEntriesByType(\"paint\"); var entry = perfEntries[0];", "protocol == 'quic': chrome_options.add_argument('--enable-quic') chrome_options.add_argument('--origin-to-force-quic-on=example.com:443') chrome_options.add_argument('--allow_unknown_root_cer') chrome_options.add_argument('--disable_certificate_verification') chrome_options.add_argument('--ignore-urlfetcher-cert-requests') chrome_options.add_argument(f\"--host-resolver-rules=MAP example.com {server}\") chrome_options.add_argument('--verbose') chrome_options.add_argument('--disable-http-cache')", "retrieve input params try: protocol = sys.argv[1] server = sys.argv[2] chrome_path = sys.argv[3]", "file_elements = ('pep', 'run') # retrieve input params try: protocol = sys.argv[1] server", "'first-contentful-paint') { resultJson.firstContentfulPaint = pJson.startTime; } } } catch(e) {} return resultJson; \"\"\"", "file elements does not match\") sys.exit(1) # Chrome options chrome_options = chromeOptions() chrome_options.add_argument('--no-sandbox')", "\"\"\" try: driver.set_page_load_timeout(60) if protocol == 'quic': driver.get(f'https://{page}') else: driver.get(f'http://{page}') return driver.execute_script(script) except", "= file_elements_values.copy() for m_e in measurement_elements: values.append(performance[m_e]) csvfile.writerow(values) create_measurements_table() # performance measurement perform_page_load(\"example.com\")", "driver.get(f'https://{page}') else: driver.get(f'http://{page}') return driver.execute_script(script) except selenium.common.exceptions.WebDriverException as e: return {'error': str(e)} def", "'timestamp', 'connectEnd', 'connectStart', 'domComplete', 'domContentLoadedEventEnd', 'domContentLoadedEventStart', 'domInteractive', 'domainLookupEnd', 'domainLookupStart', 'duration', 'encodedBodySize', 'decodedBodySize', 'transferSize',", "performance['cacheWarming'] = cache_warming performance['error'] = error values = file_elements_values.copy() for m_e in measurement_elements:", "'cacheWarming', 'error') file_elements = ('pep', 'run') # retrieve input params try: protocol =", "chrome_options.add_argument('--ignore-urlfetcher-cert-requests') chrome_options.add_argument(f\"--host-resolver-rules=MAP example.com {server}\") chrome_options.add_argument('--verbose') chrome_options.add_argument('--disable-http-cache') # Function to create: openssl x509 -pubkey", "der | openssl dgst -sha256 -binary | base64 > \"fingerprints.txt\" chrome_options.add_argument('--ignore-certificate-errors-spki-list=D29LAH0IMcLx/d7R2JAH5bw/YKYK9uNRYc6W0/GJlS8=') def create_driver():", "performance_metrics: # Print page source # print(driver.page_source) driver.save_screenshot(f'{output_dir}/screenshot.png') insert_performance(page, performance_metrics, timestamp, cache_warming=cache_warming) else:", "{server}\") chrome_options.add_argument('--verbose') chrome_options.add_argument('--disable-http-cache') # Function to create: openssl x509 -pubkey < \"pubkey.pem\" |", "server, chrome_driver, output_dir)\") sys.exit(1) if len(file_elements) != len(file_elements_values): print(\"Number of file elements does", "paint entries var perfEntries = performance.getEntriesByType(\"navigation\"); var paintEntries = performance.getEntriesByType(\"paint\"); var entry =", "else: local_csvfile = open(file_path, mode='w') new = True global csvfile csvfile = csv.writer(local_csvfile,", "'connectEnd', 'connectStart', 'domComplete', 'domContentLoadedEventEnd', 'domContentLoadedEventStart', 'domInteractive', 'domainLookupEnd', 'domainLookupStart', 'duration', 'encodedBodySize', 'decodedBodySize', 'transferSize', 'fetchStart',", "script: https://github.com/Lucapaulo/web-performance/blob/main/run_measurements.py import re import time import selenium.common.exceptions from selenium import webdriver from", "local_csvfile = open(file_path, mode='a') else: local_csvfile = open(file_path, mode='w') new = True global", "import datetime import hashlib import uuid import os import csv # performance elements", "'loadEventEnd', 'loadEventStart', 'requestStart', 'responseEnd', 'responseStart', 'secureConnectionStart', 'startTime', 'firstPaint', 'firstContentfulPaint', 'nextHopProtocol', 'cacheWarming', 'error') file_elements", "timestamp performance['cacheWarming'] = cache_warming performance['error'] = error values = file_elements_values.copy() for m_e in", "mode='w') new = True global csvfile csvfile = csv.writer(local_csvfile, delimiter=';') if new ==", "if 'error' not in performance_metrics: # Print page source # print(driver.page_source) driver.save_screenshot(f'{output_dir}/screenshot.png') insert_performance(page,", "to extract measurement_elements = ('protocol', 'server', 'domain', 'timestamp', 'connectEnd', 'connectStart', 'domComplete', 'domContentLoadedEventEnd', 'domContentLoadedEventStart',", "Get the JSON and first paint + first contentful paint var resultJson =", "openssl pkey -pubin -outform der | openssl dgst -sha256 -binary | base64 >", "{ resultJson.firstContentfulPaint = pJson.startTime; } } } catch(e) {} return resultJson; \"\"\" try:", "else: insert_performance(page, {k: 0 for k in measurement_elements}, timestamp, cache_warming=cache_warming, error=performance_metrics['error']) driver.quit() def", "local_csvfile file_path = f'{output_dir}/http.csv' if file_elements_values[0] == 'false' else f'{output_dir}/http_pep.csv' if os.path.isfile(file_path): local_csvfile", "= 0; try { for (var i=0; i<paintEntries.length; i++) { var pJson =", "create_measurements_table(): new = False global local_csvfile file_path = f'{output_dir}/http.csv' if file_elements_values[0] == 'false'", "datetime.now() performance_metrics = get_page_performance_metrics(driver, page) # insert page into database if 'error' not", "create_driver() timestamp = datetime.now() performance_metrics = get_page_performance_metrics(driver, page) # insert page into database", "datetime import datetime import hashlib import uuid import os import csv # performance", "Chrome options chrome_options = chromeOptions() chrome_options.add_argument('--no-sandbox') chrome_options.add_argument('--headless') chrome_options.add_argument('--disable-dev-shm-usage') if protocol == 'quic': chrome_options.add_argument('--enable-quic')", "= paintEntries[0]; var fcpEntry = paintEntries[1]; // Get the JSON and first paint", "= chromeOptions() chrome_options.add_argument('--no-sandbox') chrome_options.add_argument('--headless') chrome_options.add_argument('--disable-dev-shm-usage') if protocol == 'quic': chrome_options.add_argument('--enable-quic') chrome_options.add_argument('--origin-to-force-quic-on=example.com:443') chrome_options.add_argument('--allow_unknown_root_cer') chrome_options.add_argument('--disable_certificate_verification')", "= performance.getEntriesByType(\"paint\"); var entry = perfEntries[0]; var fpEntry = paintEntries[0]; var fcpEntry =", "} } } catch(e) {} return resultJson; \"\"\" try: driver.set_page_load_timeout(60) if protocol ==", "import selenium.common.exceptions from selenium import webdriver from selenium.webdriver.chrome.options import Options as chromeOptions import", "= page performance['timestamp'] = timestamp performance['cacheWarming'] = cache_warming performance['error'] = error values =", "cache_warming=cache_warming) else: insert_performance(page, {k: 0 for k in measurement_elements}, timestamp, cache_warming=cache_warming, error=performance_metrics['error']) driver.quit()", "entry.toJSON(); resultJson.firstPaint = 0; resultJson.firstContentfulPaint = 0; try { for (var i=0; i<paintEntries.length;", "hashlib import uuid import os import csv # performance elements to extract measurement_elements", "print(\"Input params incomplete (protocol, server, chrome_driver, output_dir)\") sys.exit(1) if len(file_elements) != len(file_elements_values): print(\"Number", "def create_measurements_table(): new = False global local_csvfile file_path = f'{output_dir}/http.csv' if file_elements_values[0] ==", "+ measurement_elements csvfile.writerow(headers) def insert_performance(page, performance, timestamp, cache_warming=0, error=''): performance['protocol'] = protocol performance['server']", "file_elements_values.copy() for m_e in measurement_elements: values.append(performance[m_e]) csvfile.writerow(values) create_measurements_table() # performance measurement perform_page_load(\"example.com\") local_csvfile.close()", "perfEntries[0]; var fpEntry = paintEntries[0]; var fcpEntry = paintEntries[1]; // Get the JSON", "for (var i=0; i<paintEntries.length; i++) { var pJson = paintEntries[i].toJSON(); if (pJson.name ==", "page performance['timestamp'] = timestamp performance['cacheWarming'] = cache_warming performance['error'] = error values = file_elements_values.copy()", "return driver.execute_script(script) except selenium.common.exceptions.WebDriverException as e: return {'error': str(e)} def perform_page_load(page, cache_warming=0): driver", "create: openssl x509 -pubkey < \"pubkey.pem\" | openssl pkey -pubin -outform der |", "= error values = file_elements_values.copy() for m_e in measurement_elements: values.append(performance[m_e]) csvfile.writerow(values) create_measurements_table() #", "time import selenium.common.exceptions from selenium import webdriver from selenium.webdriver.chrome.options import Options as chromeOptions", "create_driver(): return webdriver.Chrome(options=chrome_options, executable_path=chrome_path) def get_page_performance_metrics(driver, page): script = \"\"\" // Get performance", "resultJson.firstPaint = 0; resultJson.firstContentfulPaint = 0; try { for (var i=0; i<paintEntries.length; i++)", "performance['error'] = error values = file_elements_values.copy() for m_e in measurement_elements: values.append(performance[m_e]) csvfile.writerow(values) create_measurements_table()", "driver.quit() def create_measurements_table(): new = False global local_csvfile file_path = f'{output_dir}/http.csv' if file_elements_values[0]", "Options as chromeOptions import sys from datetime import datetime import hashlib import uuid", "{ resultJson.firstPaint = pJson.startTime; } else if (pJson.name == 'first-contentful-paint') { resultJson.firstContentfulPaint =", "https://github.com/Lucapaulo/web-performance/blob/main/run_measurements.py import re import time import selenium.common.exceptions from selenium import webdriver from selenium.webdriver.chrome.options", "mode='a') else: local_csvfile = open(file_path, mode='w') new = True global csvfile csvfile =", "performance and paint entries var perfEntries = performance.getEntriesByType(\"navigation\"); var paintEntries = performance.getEntriesByType(\"paint\"); var", "in performance_metrics: # Print page source # print(driver.page_source) driver.save_screenshot(f'{output_dir}/screenshot.png') insert_performance(page, performance_metrics, timestamp, cache_warming=cache_warming)", "= sys.argv[2] chrome_path = sys.argv[3] output_dir = sys.argv[4] file_elements_values = sys.argv[5].split(';') except IndexError:", "'domain', 'timestamp', 'connectEnd', 'connectStart', 'domComplete', 'domContentLoadedEventEnd', 'domContentLoadedEventStart', 'domInteractive', 'domainLookupEnd', 'domainLookupStart', 'duration', 'encodedBodySize', 'decodedBodySize',", "try { for (var i=0; i<paintEntries.length; i++) { var pJson = paintEntries[i].toJSON(); if", "incomplete (protocol, server, chrome_driver, output_dir)\") sys.exit(1) if len(file_elements) != len(file_elements_values): print(\"Number of file", "'transferSize', 'fetchStart', 'loadEventEnd', 'loadEventStart', 'requestStart', 'responseEnd', 'responseStart', 'secureConnectionStart', 'startTime', 'firstPaint', 'firstContentfulPaint', 'nextHopProtocol', 'cacheWarming',", "{ for (var i=0; i<paintEntries.length; i++) { var pJson = paintEntries[i].toJSON(); if (pJson.name", "page source # print(driver.page_source) driver.save_screenshot(f'{output_dir}/screenshot.png') insert_performance(page, performance_metrics, timestamp, cache_warming=cache_warming) else: insert_performance(page, {k: 0", "performance['domain'] = page performance['timestamp'] = timestamp performance['cacheWarming'] = cache_warming performance['error'] = error values", "output_dir = sys.argv[4] file_elements_values = sys.argv[5].split(';') except IndexError: print(\"Input params incomplete (protocol, server,", "sys from datetime import datetime import hashlib import uuid import os import csv" ]
[ "(C) 2021 by <NAME> email : <EMAIL> ***************************************************************************/ This plugin provides access to", "the terms of the GNU General Public License as published by * *", "from qgis.core import QgsApplication from .gui import OhsomeToolsDialog from .proc import provider class", "class which provides the hook by which you can manipulate the QGIS application", "# noinspection PyTypeChecker,PyArgumentList,PyCallByClass def __init__(self, iface): \"\"\"Constructor. :param iface: An interface instance that", "Heidelberg Institute for Geoinformation Technology, HeiGIT gGmbH, Heidelberg, Germany. /*************************************************************************** * * *", "ohsome API ------------------- begin : 2021-05-01 git sha : $Format:%H$ copyright : (C)", "developed and maintained by the Heidelberg Institute for Geoinformation Technology, HeiGIT gGmbH, Heidelberg,", "* This program is free software; you can redistribute it and/or modify *", "will be passed to this class which provides the hook by which you", "API ------------------- begin : 2021-05-01 git sha : $Format:%H$ copyright : (C) 2021", "<NAME> email : <EMAIL> ***************************************************************************/ This plugin provides access to the ohsome API", "maintained by the Heidelberg Institute for Geoinformation Technology, HeiGIT gGmbH, Heidelberg, Germany. /***************************************************************************", "Plugin Implementation.\"\"\" # noinspection PyTypeChecker,PyArgumentList,PyCallByClass def __init__(self, iface): \"\"\"Constructor. :param iface: An interface", "passed to this class which provides the hook by which you can manipulate", "can manipulate the QGIS application at run time. :type iface: QgsInterface \"\"\" self.dialog", "* * (at your option) any later version. * * * ***************************************************************************/ \"\"\"", "it under the terms of the GNU General Public License as published by", "noinspection PyTypeChecker,PyArgumentList,PyCallByClass def __init__(self, iface): \"\"\"Constructor. :param iface: An interface instance that will", "HeiGIT gGmbH, Heidelberg, Germany. /*************************************************************************** * * * This program is free software;", "provides access to the ohsome API (https://api.ohsome.org), developed and maintained by the Heidelberg", "redistribute it and/or modify * * it under the terms of the GNU", "iface: An interface instance that will be passed to this class which provides", "import QgsApplication from .gui import OhsomeToolsDialog from .proc import provider class OhsomeTools: \"\"\"QGIS", "toolbar icons inside the QGIS GUI.\"\"\" QgsApplication.processingRegistry().addProvider(self.provider) self.dialog.initGui() def unload(self): \"\"\"remove menu entry", "time. :type iface: QgsInterface \"\"\" self.dialog = OhsomeToolsDialog.OhsomeToolsDialogMain(iface) self.provider = provider.OhsomeToolsProvider() def initGui(self):", "Heidelberg, Germany. /*************************************************************************** * * * This program is free software; you can", "Software Foundation; either version 2 of the License, or * * (at your", "which provides the hook by which you can manipulate the QGIS application at", "program is free software; you can redistribute it and/or modify * * it", "A QGIS plugin QGIS client to query the ohsome API ------------------- begin :", "License as published by * * the Free Software Foundation; either version 2", "***************************************************************************/ \"\"\" from qgis.core import QgsApplication from .gui import OhsomeToolsDialog from .proc import", "run time. :type iface: QgsInterface \"\"\" self.dialog = OhsomeToolsDialog.OhsomeToolsDialogMain(iface) self.provider = provider.OhsomeToolsProvider() def", "/*************************************************************************** * * * This program is free software; you can redistribute it", "application at run time. :type iface: QgsInterface \"\"\" self.dialog = OhsomeToolsDialog.OhsomeToolsDialogMain(iface) self.provider =", "that will be passed to this class which provides the hook by which", "the QGIS application at run time. :type iface: QgsInterface \"\"\" self.dialog = OhsomeToolsDialog.OhsomeToolsDialogMain(iface)", "import provider class OhsomeTools: \"\"\"QGIS Plugin Implementation.\"\"\" # noinspection PyTypeChecker,PyArgumentList,PyCallByClass def __init__(self, iface):", "(https://api.ohsome.org), developed and maintained by the Heidelberg Institute for Geoinformation Technology, HeiGIT gGmbH,", "coding: utf-8 -*- \"\"\" /*************************************************************************** ohsomeTools A QGIS plugin QGIS client to query", "2 of the License, or * * (at your option) any later version.", "you can redistribute it and/or modify * * it under the terms of", "the Heidelberg Institute for Geoinformation Technology, HeiGIT gGmbH, Heidelberg, Germany. /*************************************************************************** * *", ":type iface: QgsInterface \"\"\" self.dialog = OhsomeToolsDialog.OhsomeToolsDialogMain(iface) self.provider = provider.OhsomeToolsProvider() def initGui(self): \"\"\"Create", "and toolbar icons inside the QGIS GUI.\"\"\" QgsApplication.processingRegistry().addProvider(self.provider) self.dialog.initGui() def unload(self): \"\"\"remove menu", "iface: QgsInterface \"\"\" self.dialog = OhsomeToolsDialog.OhsomeToolsDialogMain(iface) self.provider = provider.OhsomeToolsProvider() def initGui(self): \"\"\"Create the", "QGIS client to query the ohsome API ------------------- begin : 2021-05-01 git sha", ": (C) 2021 by <NAME> email : <EMAIL> ***************************************************************************/ This plugin provides access", "provider class OhsomeTools: \"\"\"QGIS Plugin Implementation.\"\"\" # noinspection PyTypeChecker,PyArgumentList,PyCallByClass def __init__(self, iface): \"\"\"Constructor.", "free software; you can redistribute it and/or modify * * it under the", "<reponame>GIScience/ohsome-qgis-plugin # -*- coding: utf-8 -*- \"\"\" /*************************************************************************** ohsomeTools A QGIS plugin QGIS", "__init__(self, iface): \"\"\"Constructor. :param iface: An interface instance that will be passed to", "self.provider = provider.OhsomeToolsProvider() def initGui(self): \"\"\"Create the menu entries and toolbar icons inside", "initGui(self): \"\"\"Create the menu entries and toolbar icons inside the QGIS GUI.\"\"\" QgsApplication.processingRegistry().addProvider(self.provider)", ": <EMAIL> ***************************************************************************/ This plugin provides access to the ohsome API (https://api.ohsome.org), developed", "OhsomeToolsDialog from .proc import provider class OhsomeTools: \"\"\"QGIS Plugin Implementation.\"\"\" # noinspection PyTypeChecker,PyArgumentList,PyCallByClass", "* * * This program is free software; you can redistribute it and/or", "sha : $Format:%H$ copyright : (C) 2021 by <NAME> email : <EMAIL> ***************************************************************************/", "* * This program is free software; you can redistribute it and/or modify", "* * * ***************************************************************************/ \"\"\" from qgis.core import QgsApplication from .gui import OhsomeToolsDialog", "this class which provides the hook by which you can manipulate the QGIS", "qgis.core import QgsApplication from .gui import OhsomeToolsDialog from .proc import provider class OhsomeTools:", "Technology, HeiGIT gGmbH, Heidelberg, Germany. /*************************************************************************** * * * This program is free", "\"\"\" from qgis.core import QgsApplication from .gui import OhsomeToolsDialog from .proc import provider", "------------------- begin : 2021-05-01 git sha : $Format:%H$ copyright : (C) 2021 by", "An interface instance that will be passed to this class which provides the", "version 2 of the License, or * * (at your option) any later", "by which you can manipulate the QGIS application at run time. :type iface:", ": 2021-05-01 git sha : $Format:%H$ copyright : (C) 2021 by <NAME> email", "by * * the Free Software Foundation; either version 2 of the License,", "def __init__(self, iface): \"\"\"Constructor. :param iface: An interface instance that will be passed", "manipulate the QGIS application at run time. :type iface: QgsInterface \"\"\" self.dialog =", "OhsomeToolsDialog.OhsomeToolsDialogMain(iface) self.provider = provider.OhsomeToolsProvider() def initGui(self): \"\"\"Create the menu entries and toolbar icons", "the License, or * * (at your option) any later version. * *", "of the License, or * * (at your option) any later version. *", "QGIS plugin QGIS client to query the ohsome API ------------------- begin : 2021-05-01", "ohsome API (https://api.ohsome.org), developed and maintained by the Heidelberg Institute for Geoinformation Technology,", "plugin provides access to the ohsome API (https://api.ohsome.org), developed and maintained by the", "instance that will be passed to this class which provides the hook by", "icons inside the QGIS GUI.\"\"\" QgsApplication.processingRegistry().addProvider(self.provider) self.dialog.initGui() def unload(self): \"\"\"remove menu entry and", "the ohsome API ------------------- begin : 2021-05-01 git sha : $Format:%H$ copyright :", "menu entries and toolbar icons inside the QGIS GUI.\"\"\" QgsApplication.processingRegistry().addProvider(self.provider) self.dialog.initGui() def unload(self):", "QgsInterface \"\"\" self.dialog = OhsomeToolsDialog.OhsomeToolsDialogMain(iface) self.provider = provider.OhsomeToolsProvider() def initGui(self): \"\"\"Create the menu", "provider.OhsomeToolsProvider() def initGui(self): \"\"\"Create the menu entries and toolbar icons inside the QGIS", "-*- coding: utf-8 -*- \"\"\" /*************************************************************************** ohsomeTools A QGIS plugin QGIS client to", "terms of the GNU General Public License as published by * * the", "later version. * * * ***************************************************************************/ \"\"\" from qgis.core import QgsApplication from .gui", "interface instance that will be passed to this class which provides the hook", "to the ohsome API (https://api.ohsome.org), developed and maintained by the Heidelberg Institute for", "This program is free software; you can redistribute it and/or modify * *", "This plugin provides access to the ohsome API (https://api.ohsome.org), developed and maintained by", "which you can manipulate the QGIS application at run time. :type iface: QgsInterface", "the GNU General Public License as published by * * the Free Software", "git sha : $Format:%H$ copyright : (C) 2021 by <NAME> email : <EMAIL>", "version. * * * ***************************************************************************/ \"\"\" from qgis.core import QgsApplication from .gui import", "OhsomeTools: \"\"\"QGIS Plugin Implementation.\"\"\" # noinspection PyTypeChecker,PyArgumentList,PyCallByClass def __init__(self, iface): \"\"\"Constructor. :param iface:", "QGIS application at run time. :type iface: QgsInterface \"\"\" self.dialog = OhsomeToolsDialog.OhsomeToolsDialogMain(iface) self.provider", "\"\"\" /*************************************************************************** ohsomeTools A QGIS plugin QGIS client to query the ohsome API", "2021-05-01 git sha : $Format:%H$ copyright : (C) 2021 by <NAME> email :", "PyTypeChecker,PyArgumentList,PyCallByClass def __init__(self, iface): \"\"\"Constructor. :param iface: An interface instance that will be", "the Free Software Foundation; either version 2 of the License, or * *", "* it under the terms of the GNU General Public License as published", "at run time. :type iface: QgsInterface \"\"\" self.dialog = OhsomeToolsDialog.OhsomeToolsDialogMain(iface) self.provider = provider.OhsomeToolsProvider()", "be passed to this class which provides the hook by which you can", "the menu entries and toolbar icons inside the QGIS GUI.\"\"\" QgsApplication.processingRegistry().addProvider(self.provider) self.dialog.initGui() def", "client to query the ohsome API ------------------- begin : 2021-05-01 git sha :", "query the ohsome API ------------------- begin : 2021-05-01 git sha : $Format:%H$ copyright", ": $Format:%H$ copyright : (C) 2021 by <NAME> email : <EMAIL> ***************************************************************************/ This", "it and/or modify * * it under the terms of the GNU General", "Foundation; either version 2 of the License, or * * (at your option)", "either version 2 of the License, or * * (at your option) any", "email : <EMAIL> ***************************************************************************/ This plugin provides access to the ohsome API (https://api.ohsome.org),", "from .proc import provider class OhsomeTools: \"\"\"QGIS Plugin Implementation.\"\"\" # noinspection PyTypeChecker,PyArgumentList,PyCallByClass def", "QGIS GUI.\"\"\" QgsApplication.processingRegistry().addProvider(self.provider) self.dialog.initGui() def unload(self): \"\"\"remove menu entry and toolbar icons\"\"\" QgsApplication.processingRegistry().removeProvider(self.provider)", "as published by * * the Free Software Foundation; either version 2 of", "***************************************************************************/ This plugin provides access to the ohsome API (https://api.ohsome.org), developed and maintained", "the ohsome API (https://api.ohsome.org), developed and maintained by the Heidelberg Institute for Geoinformation", "License, or * * (at your option) any later version. * * *", "GUI.\"\"\" QgsApplication.processingRegistry().addProvider(self.provider) self.dialog.initGui() def unload(self): \"\"\"remove menu entry and toolbar icons\"\"\" QgsApplication.processingRegistry().removeProvider(self.provider) self.dialog.unload()", "\"\"\" self.dialog = OhsomeToolsDialog.OhsomeToolsDialogMain(iface) self.provider = provider.OhsomeToolsProvider() def initGui(self): \"\"\"Create the menu entries", "Public License as published by * * the Free Software Foundation; either version", "any later version. * * * ***************************************************************************/ \"\"\" from qgis.core import QgsApplication from", "for Geoinformation Technology, HeiGIT gGmbH, Heidelberg, Germany. /*************************************************************************** * * * This program", "* ***************************************************************************/ \"\"\" from qgis.core import QgsApplication from .gui import OhsomeToolsDialog from .proc", "and/or modify * * it under the terms of the GNU General Public", "under the terms of the GNU General Public License as published by *", "the hook by which you can manipulate the QGIS application at run time.", "/*************************************************************************** ohsomeTools A QGIS plugin QGIS client to query the ohsome API -------------------", "* * ***************************************************************************/ \"\"\" from qgis.core import QgsApplication from .gui import OhsomeToolsDialog from", "begin : 2021-05-01 git sha : $Format:%H$ copyright : (C) 2021 by <NAME>", "and maintained by the Heidelberg Institute for Geoinformation Technology, HeiGIT gGmbH, Heidelberg, Germany.", "by <NAME> email : <EMAIL> ***************************************************************************/ This plugin provides access to the ohsome", "entries and toolbar icons inside the QGIS GUI.\"\"\" QgsApplication.processingRegistry().addProvider(self.provider) self.dialog.initGui() def unload(self): \"\"\"remove", "\"\"\"Constructor. :param iface: An interface instance that will be passed to this class", "= OhsomeToolsDialog.OhsomeToolsDialogMain(iface) self.provider = provider.OhsomeToolsProvider() def initGui(self): \"\"\"Create the menu entries and toolbar", "utf-8 -*- \"\"\" /*************************************************************************** ohsomeTools A QGIS plugin QGIS client to query the", "= provider.OhsomeToolsProvider() def initGui(self): \"\"\"Create the menu entries and toolbar icons inside the", "gGmbH, Heidelberg, Germany. /*************************************************************************** * * * This program is free software; you", "access to the ohsome API (https://api.ohsome.org), developed and maintained by the Heidelberg Institute", "# -*- coding: utf-8 -*- \"\"\" /*************************************************************************** ohsomeTools A QGIS plugin QGIS client", "software; you can redistribute it and/or modify * * it under the terms", "from .gui import OhsomeToolsDialog from .proc import provider class OhsomeTools: \"\"\"QGIS Plugin Implementation.\"\"\"", "class OhsomeTools: \"\"\"QGIS Plugin Implementation.\"\"\" # noinspection PyTypeChecker,PyArgumentList,PyCallByClass def __init__(self, iface): \"\"\"Constructor. :param", "(at your option) any later version. * * * ***************************************************************************/ \"\"\" from qgis.core", "by the Heidelberg Institute for Geoinformation Technology, HeiGIT gGmbH, Heidelberg, Germany. /*************************************************************************** *", "modify * * it under the terms of the GNU General Public License", "published by * * the Free Software Foundation; either version 2 of the", "import OhsomeToolsDialog from .proc import provider class OhsomeTools: \"\"\"QGIS Plugin Implementation.\"\"\" # noinspection", "copyright : (C) 2021 by <NAME> email : <EMAIL> ***************************************************************************/ This plugin provides", "def initGui(self): \"\"\"Create the menu entries and toolbar icons inside the QGIS GUI.\"\"\"", "API (https://api.ohsome.org), developed and maintained by the Heidelberg Institute for Geoinformation Technology, HeiGIT", "ohsomeTools A QGIS plugin QGIS client to query the ohsome API ------------------- begin", "option) any later version. * * * ***************************************************************************/ \"\"\" from qgis.core import QgsApplication", "to this class which provides the hook by which you can manipulate the", "\"\"\"QGIS Plugin Implementation.\"\"\" # noinspection PyTypeChecker,PyArgumentList,PyCallByClass def __init__(self, iface): \"\"\"Constructor. :param iface: An", "of the GNU General Public License as published by * * the Free", "Free Software Foundation; either version 2 of the License, or * * (at", "provides the hook by which you can manipulate the QGIS application at run", "you can manipulate the QGIS application at run time. :type iface: QgsInterface \"\"\"", ".gui import OhsomeToolsDialog from .proc import provider class OhsomeTools: \"\"\"QGIS Plugin Implementation.\"\"\" #", "to query the ohsome API ------------------- begin : 2021-05-01 git sha : $Format:%H$", "plugin QGIS client to query the ohsome API ------------------- begin : 2021-05-01 git", "inside the QGIS GUI.\"\"\" QgsApplication.processingRegistry().addProvider(self.provider) self.dialog.initGui() def unload(self): \"\"\"remove menu entry and toolbar", "can redistribute it and/or modify * * it under the terms of the", "the QGIS GUI.\"\"\" QgsApplication.processingRegistry().addProvider(self.provider) self.dialog.initGui() def unload(self): \"\"\"remove menu entry and toolbar icons\"\"\"", "* * it under the terms of the GNU General Public License as", "<EMAIL> ***************************************************************************/ This plugin provides access to the ohsome API (https://api.ohsome.org), developed and", "iface): \"\"\"Constructor. :param iface: An interface instance that will be passed to this", "self.dialog = OhsomeToolsDialog.OhsomeToolsDialogMain(iface) self.provider = provider.OhsomeToolsProvider() def initGui(self): \"\"\"Create the menu entries and", "Institute for Geoinformation Technology, HeiGIT gGmbH, Heidelberg, Germany. /*************************************************************************** * * * This", "* the Free Software Foundation; either version 2 of the License, or *", "or * * (at your option) any later version. * * * ***************************************************************************/", "Implementation.\"\"\" # noinspection PyTypeChecker,PyArgumentList,PyCallByClass def __init__(self, iface): \"\"\"Constructor. :param iface: An interface instance", "-*- \"\"\" /*************************************************************************** ohsomeTools A QGIS plugin QGIS client to query the ohsome", "Germany. /*************************************************************************** * * * This program is free software; you can redistribute", "$Format:%H$ copyright : (C) 2021 by <NAME> email : <EMAIL> ***************************************************************************/ This plugin", "QgsApplication from .gui import OhsomeToolsDialog from .proc import provider class OhsomeTools: \"\"\"QGIS Plugin", "2021 by <NAME> email : <EMAIL> ***************************************************************************/ This plugin provides access to the", "* * the Free Software Foundation; either version 2 of the License, or", "Geoinformation Technology, HeiGIT gGmbH, Heidelberg, Germany. /*************************************************************************** * * * This program is", "is free software; you can redistribute it and/or modify * * it under", "General Public License as published by * * the Free Software Foundation; either", "your option) any later version. * * * ***************************************************************************/ \"\"\" from qgis.core import", ".proc import provider class OhsomeTools: \"\"\"QGIS Plugin Implementation.\"\"\" # noinspection PyTypeChecker,PyArgumentList,PyCallByClass def __init__(self,", ":param iface: An interface instance that will be passed to this class which", "hook by which you can manipulate the QGIS application at run time. :type", "\"\"\"Create the menu entries and toolbar icons inside the QGIS GUI.\"\"\" QgsApplication.processingRegistry().addProvider(self.provider) self.dialog.initGui()", "* (at your option) any later version. * * * ***************************************************************************/ \"\"\" from", "GNU General Public License as published by * * the Free Software Foundation;" ]
[]
[ "== \"__main__\" : rospy.init_node('learn_to_manipulate') sim = Simulation.load_simulation('/home/marcrigter/2019-08-08-11-49_learnt1_key_teleop0.pkl') case_number = 10 sim.run_new_episode(case_number, controller_type =", "from learn_to_manipulate.simulate import Simulation import rospy if __name__ == \"__main__\" : rospy.init_node('learn_to_manipulate') sim", "learn_to_manipulate.simulate import Simulation import rospy if __name__ == \"__main__\" : rospy.init_node('learn_to_manipulate') sim =", "if __name__ == \"__main__\" : rospy.init_node('learn_to_manipulate') sim = Simulation.load_simulation('/home/marcrigter/2019-08-08-11-49_learnt1_key_teleop0.pkl') case_number = 10 sim.run_new_episode(case_number,", "import Simulation import rospy if __name__ == \"__main__\" : rospy.init_node('learn_to_manipulate') sim = Simulation.load_simulation('/home/marcrigter/2019-08-08-11-49_learnt1_key_teleop0.pkl')", "#!/usr/bin/env python from learn_to_manipulate.simulate import Simulation import rospy if __name__ == \"__main__\" :", "import rospy if __name__ == \"__main__\" : rospy.init_node('learn_to_manipulate') sim = Simulation.load_simulation('/home/marcrigter/2019-08-08-11-49_learnt1_key_teleop0.pkl') case_number =", "\"__main__\" : rospy.init_node('learn_to_manipulate') sim = Simulation.load_simulation('/home/marcrigter/2019-08-08-11-49_learnt1_key_teleop0.pkl') case_number = 10 sim.run_new_episode(case_number, controller_type = 'learnt')", "Simulation import rospy if __name__ == \"__main__\" : rospy.init_node('learn_to_manipulate') sim = Simulation.load_simulation('/home/marcrigter/2019-08-08-11-49_learnt1_key_teleop0.pkl') case_number", "<filename>scripts/load_sim.py #!/usr/bin/env python from learn_to_manipulate.simulate import Simulation import rospy if __name__ == \"__main__\"", "python from learn_to_manipulate.simulate import Simulation import rospy if __name__ == \"__main__\" : rospy.init_node('learn_to_manipulate')", "rospy if __name__ == \"__main__\" : rospy.init_node('learn_to_manipulate') sim = Simulation.load_simulation('/home/marcrigter/2019-08-08-11-49_learnt1_key_teleop0.pkl') case_number = 10", "__name__ == \"__main__\" : rospy.init_node('learn_to_manipulate') sim = Simulation.load_simulation('/home/marcrigter/2019-08-08-11-49_learnt1_key_teleop0.pkl') case_number = 10 sim.run_new_episode(case_number, controller_type" ]
[ "from __future__ import unicode_literals structure_key_item = 'enum_item' structure_key_items = 'enum_items' structure_key_item_cn = '枚举类型'", "unicode_literals structure_key_item = 'enum_item' structure_key_items = 'enum_items' structure_key_item_cn = '枚举类型' structure_key_items_cn = '枚举类型'", "encoding: utf-8 \"\"\" @author: zhanghe @software: PyCharm @file: enum_items.py @time: 2018-08-23 15:55 \"\"\"", "__future__ import unicode_literals structure_key_item = 'enum_item' structure_key_items = 'enum_items' structure_key_item_cn = '枚举类型' structure_key_items_cn", "python # encoding: utf-8 \"\"\" @author: zhanghe @software: PyCharm @file: enum_items.py @time: 2018-08-23", "\"\"\" from __future__ import unicode_literals structure_key_item = 'enum_item' structure_key_items = 'enum_items' structure_key_item_cn =", "@time: 2018-08-23 15:55 \"\"\" from __future__ import unicode_literals structure_key_item = 'enum_item' structure_key_items =", "\"\"\" @author: zhanghe @software: PyCharm @file: enum_items.py @time: 2018-08-23 15:55 \"\"\" from __future__", "#!/usr/bin/env python # encoding: utf-8 \"\"\" @author: zhanghe @software: PyCharm @file: enum_items.py @time:", "PyCharm @file: enum_items.py @time: 2018-08-23 15:55 \"\"\" from __future__ import unicode_literals structure_key_item =", "15:55 \"\"\" from __future__ import unicode_literals structure_key_item = 'enum_item' structure_key_items = 'enum_items' structure_key_item_cn", "@author: zhanghe @software: PyCharm @file: enum_items.py @time: 2018-08-23 15:55 \"\"\" from __future__ import", "enum_items.py @time: 2018-08-23 15:55 \"\"\" from __future__ import unicode_literals structure_key_item = 'enum_item' structure_key_items", "@file: enum_items.py @time: 2018-08-23 15:55 \"\"\" from __future__ import unicode_literals structure_key_item = 'enum_item'", "import unicode_literals structure_key_item = 'enum_item' structure_key_items = 'enum_items' structure_key_item_cn = '枚举类型' structure_key_items_cn =", "# encoding: utf-8 \"\"\" @author: zhanghe @software: PyCharm @file: enum_items.py @time: 2018-08-23 15:55", "utf-8 \"\"\" @author: zhanghe @software: PyCharm @file: enum_items.py @time: 2018-08-23 15:55 \"\"\" from", "2018-08-23 15:55 \"\"\" from __future__ import unicode_literals structure_key_item = 'enum_item' structure_key_items = 'enum_items'", "@software: PyCharm @file: enum_items.py @time: 2018-08-23 15:55 \"\"\" from __future__ import unicode_literals structure_key_item", "zhanghe @software: PyCharm @file: enum_items.py @time: 2018-08-23 15:55 \"\"\" from __future__ import unicode_literals" ]
[]
[ "used by GOTO to do UX. \"\"\" from .text import GotoError, GotoWarning, print_text", "-*- \"\"\" Text used by GOTO to do UX. \"\"\" from .text import", "<reponame>technocake/goto # -*- coding: utf-8 -*- \"\"\" Text used by GOTO to do", "coding: utf-8 -*- \"\"\" Text used by GOTO to do UX. \"\"\" from", "\"\"\" Text used by GOTO to do UX. \"\"\" from .text import GotoError,", "Text used by GOTO to do UX. \"\"\" from .text import GotoError, GotoWarning,", "# -*- coding: utf-8 -*- \"\"\" Text used by GOTO to do UX.", "utf-8 -*- \"\"\" Text used by GOTO to do UX. \"\"\" from .text", "-*- coding: utf-8 -*- \"\"\" Text used by GOTO to do UX. \"\"\"" ]
[]
[ "description=\"JSONEncoder\", long_description=local_text_file(\"README.md\"), long_description_content_type=\"text/markdown\", author=\"NewStore Inc.\", author_email=\"<EMAIL>\", url=\"https://github.com/NewStore-oss/json-encoder\", zip_safe=True, packages=[NAME], namespace_packages=[NAMESPACE], python_requires=\">=3.6,<3.9\", package_data={NAME: []},", "import os from setuptools import setup VERSION = \"1.0.4\" NAMESPACE = \"newstore\" NAME", "\"1.0.4\" NAMESPACE = \"newstore\" NAME = \"{}.json_encoder\".format(NAMESPACE) def local_text_file(file_name): path = os.path.join(os.path.dirname(__file__), file_name)", "name=NAME, version=VERSION, description=\"JSONEncoder\", long_description=local_text_file(\"README.md\"), long_description_content_type=\"text/markdown\", author=\"NewStore Inc.\", author_email=\"<EMAIL>\", url=\"https://github.com/NewStore-oss/json-encoder\", zip_safe=True, packages=[NAME], namespace_packages=[NAMESPACE], python_requires=\">=3.6,<3.9\",", "setup( name=NAME, version=VERSION, description=\"JSONEncoder\", long_description=local_text_file(\"README.md\"), long_description_content_type=\"text/markdown\", author=\"NewStore Inc.\", author_email=\"<EMAIL>\", url=\"https://github.com/NewStore-oss/json-encoder\", zip_safe=True, packages=[NAME], namespace_packages=[NAMESPACE],", "= \"newstore\" NAME = \"{}.json_encoder\".format(NAMESPACE) def local_text_file(file_name): path = os.path.join(os.path.dirname(__file__), file_name) with open(path,", "NAME = \"{}.json_encoder\".format(NAMESPACE) def local_text_file(file_name): path = os.path.join(os.path.dirname(__file__), file_name) with open(path, \"rt\") as", "\"rt\") as fp: file_data = fp.read() return file_data setup( name=NAME, version=VERSION, description=\"JSONEncoder\", long_description=local_text_file(\"README.md\"),", "os from setuptools import setup VERSION = \"1.0.4\" NAMESPACE = \"newstore\" NAME =", "with open(path, \"rt\") as fp: file_data = fp.read() return file_data setup( name=NAME, version=VERSION,", "path = os.path.join(os.path.dirname(__file__), file_name) with open(path, \"rt\") as fp: file_data = fp.read() return", "def local_text_file(file_name): path = os.path.join(os.path.dirname(__file__), file_name) with open(path, \"rt\") as fp: file_data =", "file_name) with open(path, \"rt\") as fp: file_data = fp.read() return file_data setup( name=NAME,", "fp.read() return file_data setup( name=NAME, version=VERSION, description=\"JSONEncoder\", long_description=local_text_file(\"README.md\"), long_description_content_type=\"text/markdown\", author=\"NewStore Inc.\", author_email=\"<EMAIL>\", url=\"https://github.com/NewStore-oss/json-encoder\",", "long_description_content_type=\"text/markdown\", author=\"NewStore Inc.\", author_email=\"<EMAIL>\", url=\"https://github.com/NewStore-oss/json-encoder\", zip_safe=True, packages=[NAME], namespace_packages=[NAMESPACE], python_requires=\">=3.6,<3.9\", package_data={NAME: []}, install_requires=[\"setuptools\"], )", "\"{}.json_encoder\".format(NAMESPACE) def local_text_file(file_name): path = os.path.join(os.path.dirname(__file__), file_name) with open(path, \"rt\") as fp: file_data", "as fp: file_data = fp.read() return file_data setup( name=NAME, version=VERSION, description=\"JSONEncoder\", long_description=local_text_file(\"README.md\"), long_description_content_type=\"text/markdown\",", "return file_data setup( name=NAME, version=VERSION, description=\"JSONEncoder\", long_description=local_text_file(\"README.md\"), long_description_content_type=\"text/markdown\", author=\"NewStore Inc.\", author_email=\"<EMAIL>\", url=\"https://github.com/NewStore-oss/json-encoder\", zip_safe=True,", "file_data setup( name=NAME, version=VERSION, description=\"JSONEncoder\", long_description=local_text_file(\"README.md\"), long_description_content_type=\"text/markdown\", author=\"NewStore Inc.\", author_email=\"<EMAIL>\", url=\"https://github.com/NewStore-oss/json-encoder\", zip_safe=True, packages=[NAME],", "= \"{}.json_encoder\".format(NAMESPACE) def local_text_file(file_name): path = os.path.join(os.path.dirname(__file__), file_name) with open(path, \"rt\") as fp:", "VERSION = \"1.0.4\" NAMESPACE = \"newstore\" NAME = \"{}.json_encoder\".format(NAMESPACE) def local_text_file(file_name): path =", "local_text_file(file_name): path = os.path.join(os.path.dirname(__file__), file_name) with open(path, \"rt\") as fp: file_data = fp.read()", "= fp.read() return file_data setup( name=NAME, version=VERSION, description=\"JSONEncoder\", long_description=local_text_file(\"README.md\"), long_description_content_type=\"text/markdown\", author=\"NewStore Inc.\", author_email=\"<EMAIL>\",", "file_data = fp.read() return file_data setup( name=NAME, version=VERSION, description=\"JSONEncoder\", long_description=local_text_file(\"README.md\"), long_description_content_type=\"text/markdown\", author=\"NewStore Inc.\",", "fp: file_data = fp.read() return file_data setup( name=NAME, version=VERSION, description=\"JSONEncoder\", long_description=local_text_file(\"README.md\"), long_description_content_type=\"text/markdown\", author=\"NewStore", "setup VERSION = \"1.0.4\" NAMESPACE = \"newstore\" NAME = \"{}.json_encoder\".format(NAMESPACE) def local_text_file(file_name): path", "long_description=local_text_file(\"README.md\"), long_description_content_type=\"text/markdown\", author=\"NewStore Inc.\", author_email=\"<EMAIL>\", url=\"https://github.com/NewStore-oss/json-encoder\", zip_safe=True, packages=[NAME], namespace_packages=[NAMESPACE], python_requires=\">=3.6,<3.9\", package_data={NAME: []}, install_requires=[\"setuptools\"],", "from setuptools import setup VERSION = \"1.0.4\" NAMESPACE = \"newstore\" NAME = \"{}.json_encoder\".format(NAMESPACE)", "NAMESPACE = \"newstore\" NAME = \"{}.json_encoder\".format(NAMESPACE) def local_text_file(file_name): path = os.path.join(os.path.dirname(__file__), file_name) with", "setuptools import setup VERSION = \"1.0.4\" NAMESPACE = \"newstore\" NAME = \"{}.json_encoder\".format(NAMESPACE) def", "\"newstore\" NAME = \"{}.json_encoder\".format(NAMESPACE) def local_text_file(file_name): path = os.path.join(os.path.dirname(__file__), file_name) with open(path, \"rt\")", "version=VERSION, description=\"JSONEncoder\", long_description=local_text_file(\"README.md\"), long_description_content_type=\"text/markdown\", author=\"NewStore Inc.\", author_email=\"<EMAIL>\", url=\"https://github.com/NewStore-oss/json-encoder\", zip_safe=True, packages=[NAME], namespace_packages=[NAMESPACE], python_requires=\">=3.6,<3.9\", package_data={NAME:", "import setup VERSION = \"1.0.4\" NAMESPACE = \"newstore\" NAME = \"{}.json_encoder\".format(NAMESPACE) def local_text_file(file_name):", "open(path, \"rt\") as fp: file_data = fp.read() return file_data setup( name=NAME, version=VERSION, description=\"JSONEncoder\",", "os.path.join(os.path.dirname(__file__), file_name) with open(path, \"rt\") as fp: file_data = fp.read() return file_data setup(", "= os.path.join(os.path.dirname(__file__), file_name) with open(path, \"rt\") as fp: file_data = fp.read() return file_data", "= \"1.0.4\" NAMESPACE = \"newstore\" NAME = \"{}.json_encoder\".format(NAMESPACE) def local_text_file(file_name): path = os.path.join(os.path.dirname(__file__)," ]
[ "**kwargs) methodGrouper = sigInfo[\"self\"] orders = methodGrouper.orders key = methodGrouper.get_order_key(func) if sigInfo[\"draw_now\"]: orders.pop(key,", "_deco_draw_queue(func): \"\"\" Append one order to dict for this func call. Creates a", "to dict for this func call. Creates a key with id of Part", "This allows us to manually call draw_create(draw_now=True) after instantiating a Page instead of", "dict for this func call. Creates a key with id of Part and", "an old order then it's removed. Returns key unless draw_now is True. \"\"\"", "like this to skip queue instead of drawing instantly # if sigInfo[\"draw_now\"]: #", "= methodGrouper.orders key = methodGrouper.get_order_key(func) if sigInfo[\"draw_now\"]: orders.pop(key, None) # This allows us", "is True. \"\"\" def _wrapper(*args, **kwargs): sigInfo = SigInfo(func, *args, **kwargs) methodGrouper =", "= methodGrouper.get_order_key(func) if sigInfo[\"draw_now\"]: orders.pop(key, None) # This allows us to manually call", "PartBaseClass: def draw_create_hook(self, kwargs): \"\"\" Used to decouple properties, called by draw_create which", "to Page. sigInfo.call() else: orders[key] = sigInfo return key # Could possibly do", "def _wrapper(*args, **kwargs): sigInfo = SigInfo(func, *args, **kwargs) methodGrouper = sigInfo[\"self\"] orders =", "something like this to skip queue instead of drawing instantly # if sigInfo[\"draw_now\"]:", "part in self.get_children(depth=-1, include_self=True, gen=True): part.draw_create() assert \"Contain\" in getBaseClassNames(parent) or parent is", "call draw_create(draw_now=True) after instantiating a Page instead of passing draw_now to Page. sigInfo.call()", "generallibrary import getBaseClassNames, SigInfo, dict_insert, wrapper_transfer def set_parent_hook(self, parent, _draw=True): \"\"\" :param generalgui.MethodGrouper", "Creates a key with id of Part and func's name. If key exists", "parent: \"\"\" if _draw: for part in self.get_children(depth=-1, include_self=True, gen=True): part.draw_create() assert \"Contain\"", "of Part and func's name. If key exists as an old order then", "= sigInfo return key # Could possibly do something like this to skip", "def _deco_draw_queue(func): \"\"\" Append one order to dict for this func call. Creates", "generalgui.MethodGrouper self: :param generalgui.MethodGrouper parent: \"\"\" if _draw: for part in self.get_children(depth=-1, include_self=True,", "Could possibly do something like this to skip queue instead of drawing instantly", "is None class PartBaseClass: def draw_create_hook(self, kwargs): \"\"\" Used to decouple properties, called", "_draw: for part in self.get_children(depth=-1, include_self=True, gen=True): part.draw_create() assert \"Contain\" in getBaseClassNames(parent) or", "key # Could possibly do something like this to skip queue instead of", "kwargs): \"\"\" Used to decouple properties, called by draw_create which is called by", "sigInfo.call() else: orders[key] = sigInfo return key # Could possibly do something like", "SigInfo, dict_insert, wrapper_transfer def set_parent_hook(self, parent, _draw=True): \"\"\" :param generalgui.MethodGrouper self: :param generalgui.MethodGrouper", "if sigInfo[\"draw_now\"]: orders.pop(key, None) # This allows us to manually call draw_create(draw_now=True) after", "import getBaseClassNames, SigInfo, dict_insert, wrapper_transfer def set_parent_hook(self, parent, _draw=True): \"\"\" :param generalgui.MethodGrouper self:", "old order then it's removed. Returns key unless draw_now is True. \"\"\" def", "_draw=True): \"\"\" :param generalgui.MethodGrouper self: :param generalgui.MethodGrouper parent: \"\"\" if _draw: for part", "orders.pop(key, None) # This allows us to manually call draw_create(draw_now=True) after instantiating a", "by init and set_parent. \"\"\" def draw_create_post_hook(self): \"\"\" Called after widget is packed.", "draw_create_post_hook(self): \"\"\" Called after widget is packed. \"\"\" def _deco_draw_queue(func): \"\"\" Append one", "it's removed. Returns key unless draw_now is True. \"\"\" def _wrapper(*args, **kwargs): sigInfo", "if sigInfo[\"draw_now\"]: # dict_insert(orders, **{key: sigInfo}) # else: # orders[key] = sigInfo return", "# This allows us to manually call draw_create(draw_now=True) after instantiating a Page instead", "and set_parent. \"\"\" def draw_create_post_hook(self): \"\"\" Called after widget is packed. \"\"\" def", "to manually call draw_create(draw_now=True) after instantiating a Page instead of passing draw_now to", "func call. Creates a key with id of Part and func's name. If", "getBaseClassNames(parent) or parent is None class PartBaseClass: def draw_create_hook(self, kwargs): \"\"\" Used to", "Part and func's name. If key exists as an old order then it's", "and func's name. If key exists as an old order then it's removed.", "# dict_insert(orders, **{key: sigInfo}) # else: # orders[key] = sigInfo return wrapper_transfer(func, _wrapper)", "parent, _draw=True): \"\"\" :param generalgui.MethodGrouper self: :param generalgui.MethodGrouper parent: \"\"\" if _draw: for", "draw_create_hook(self, kwargs): \"\"\" Used to decouple properties, called by draw_create which is called", "draw_create(draw_now=True) after instantiating a Page instead of passing draw_now to Page. sigInfo.call() else:", "by draw_create which is called by init and set_parent. \"\"\" def draw_create_post_hook(self): \"\"\"", "class PartBaseClass: def draw_create_hook(self, kwargs): \"\"\" Used to decouple properties, called by draw_create", "sigInfo[\"self\"] orders = methodGrouper.orders key = methodGrouper.get_order_key(func) if sigInfo[\"draw_now\"]: orders.pop(key, None) # This", "True. \"\"\" def _wrapper(*args, **kwargs): sigInfo = SigInfo(func, *args, **kwargs) methodGrouper = sigInfo[\"self\"]", "getBaseClassNames, SigInfo, dict_insert, wrapper_transfer def set_parent_hook(self, parent, _draw=True): \"\"\" :param generalgui.MethodGrouper self: :param", "Page instead of passing draw_now to Page. sigInfo.call() else: orders[key] = sigInfo return", "gen=True): part.draw_create() assert \"Contain\" in getBaseClassNames(parent) or parent is None class PartBaseClass: def", "\"\"\" def draw_create_post_hook(self): \"\"\" Called after widget is packed. \"\"\" def _deco_draw_queue(func): \"\"\"", "is packed. \"\"\" def _deco_draw_queue(func): \"\"\" Append one order to dict for this", "to skip queue instead of drawing instantly # if sigInfo[\"draw_now\"]: # dict_insert(orders, **{key:", "of drawing instantly # if sigInfo[\"draw_now\"]: # dict_insert(orders, **{key: sigInfo}) # else: #", "self.get_children(depth=-1, include_self=True, gen=True): part.draw_create() assert \"Contain\" in getBaseClassNames(parent) or parent is None class", "draw_now to Page. sigInfo.call() else: orders[key] = sigInfo return key # Could possibly", "Returns key unless draw_now is True. \"\"\" def _wrapper(*args, **kwargs): sigInfo = SigInfo(func,", "generalgui.MethodGrouper parent: \"\"\" if _draw: for part in self.get_children(depth=-1, include_self=True, gen=True): part.draw_create() assert", "for this func call. Creates a key with id of Part and func's", "Called after widget is packed. \"\"\" def _deco_draw_queue(func): \"\"\" Append one order to", "with id of Part and func's name. If key exists as an old", "orders[key] = sigInfo return key # Could possibly do something like this to", "allows us to manually call draw_create(draw_now=True) after instantiating a Page instead of passing", "from generallibrary import getBaseClassNames, SigInfo, dict_insert, wrapper_transfer def set_parent_hook(self, parent, _draw=True): \"\"\" :param", "include_self=True, gen=True): part.draw_create() assert \"Contain\" in getBaseClassNames(parent) or parent is None class PartBaseClass:", "\"\"\" if _draw: for part in self.get_children(depth=-1, include_self=True, gen=True): part.draw_create() assert \"Contain\" in", "sigInfo[\"draw_now\"]: orders.pop(key, None) # This allows us to manually call draw_create(draw_now=True) after instantiating", "manually call draw_create(draw_now=True) after instantiating a Page instead of passing draw_now to Page.", "func's name. If key exists as an old order then it's removed. Returns", ":param generalgui.MethodGrouper self: :param generalgui.MethodGrouper parent: \"\"\" if _draw: for part in self.get_children(depth=-1,", "wrapper_transfer def set_parent_hook(self, parent, _draw=True): \"\"\" :param generalgui.MethodGrouper self: :param generalgui.MethodGrouper parent: \"\"\"", "called by draw_create which is called by init and set_parent. \"\"\" def draw_create_post_hook(self):", "draw_create which is called by init and set_parent. \"\"\" def draw_create_post_hook(self): \"\"\" Called", "unless draw_now is True. \"\"\" def _wrapper(*args, **kwargs): sigInfo = SigInfo(func, *args, **kwargs)", "a Page instead of passing draw_now to Page. sigInfo.call() else: orders[key] = sigInfo", "sigInfo = SigInfo(func, *args, **kwargs) methodGrouper = sigInfo[\"self\"] orders = methodGrouper.orders key =", "a key with id of Part and func's name. If key exists as", "assert \"Contain\" in getBaseClassNames(parent) or parent is None class PartBaseClass: def draw_create_hook(self, kwargs):", "is called by init and set_parent. \"\"\" def draw_create_post_hook(self): \"\"\" Called after widget", "then it's removed. Returns key unless draw_now is True. \"\"\" def _wrapper(*args, **kwargs):", "\"\"\" def _deco_draw_queue(func): \"\"\" Append one order to dict for this func call.", "draw_now is True. \"\"\" def _wrapper(*args, **kwargs): sigInfo = SigInfo(func, *args, **kwargs) methodGrouper", "\"\"\" :param generalgui.MethodGrouper self: :param generalgui.MethodGrouper parent: \"\"\" if _draw: for part in", "SigInfo(func, *args, **kwargs) methodGrouper = sigInfo[\"self\"] orders = methodGrouper.orders key = methodGrouper.get_order_key(func) if", "*args, **kwargs) methodGrouper = sigInfo[\"self\"] orders = methodGrouper.orders key = methodGrouper.get_order_key(func) if sigInfo[\"draw_now\"]:", "passing draw_now to Page. sigInfo.call() else: orders[key] = sigInfo return key # Could", "or parent is None class PartBaseClass: def draw_create_hook(self, kwargs): \"\"\" Used to decouple", "methodGrouper = sigInfo[\"self\"] orders = methodGrouper.orders key = methodGrouper.get_order_key(func) if sigInfo[\"draw_now\"]: orders.pop(key, None)", "key exists as an old order then it's removed. Returns key unless draw_now", "else: orders[key] = sigInfo return key # Could possibly do something like this", "sigInfo return key # Could possibly do something like this to skip queue", "part.draw_create() assert \"Contain\" in getBaseClassNames(parent) or parent is None class PartBaseClass: def draw_create_hook(self,", "to decouple properties, called by draw_create which is called by init and set_parent.", "widget is packed. \"\"\" def _deco_draw_queue(func): \"\"\" Append one order to dict for", "removed. Returns key unless draw_now is True. \"\"\" def _wrapper(*args, **kwargs): sigInfo =", "instantiating a Page instead of passing draw_now to Page. sigInfo.call() else: orders[key] =", "self: :param generalgui.MethodGrouper parent: \"\"\" if _draw: for part in self.get_children(depth=-1, include_self=True, gen=True):", "dict_insert, wrapper_transfer def set_parent_hook(self, parent, _draw=True): \"\"\" :param generalgui.MethodGrouper self: :param generalgui.MethodGrouper parent:", "\"\"\" Append one order to dict for this func call. Creates a key", "init and set_parent. \"\"\" def draw_create_post_hook(self): \"\"\" Called after widget is packed. \"\"\"", "id of Part and func's name. If key exists as an old order", "set_parent. \"\"\" def draw_create_post_hook(self): \"\"\" Called after widget is packed. \"\"\" def _deco_draw_queue(func):", "of passing draw_now to Page. sigInfo.call() else: orders[key] = sigInfo return key #", "order then it's removed. Returns key unless draw_now is True. \"\"\" def _wrapper(*args,", "after widget is packed. \"\"\" def _deco_draw_queue(func): \"\"\" Append one order to dict", "properties, called by draw_create which is called by init and set_parent. \"\"\" def", "= SigInfo(func, *args, **kwargs) methodGrouper = sigInfo[\"self\"] orders = methodGrouper.orders key = methodGrouper.get_order_key(func)", "drawing instantly # if sigInfo[\"draw_now\"]: # dict_insert(orders, **{key: sigInfo}) # else: # orders[key]", "Page. sigInfo.call() else: orders[key] = sigInfo return key # Could possibly do something", "methodGrouper.get_order_key(func) if sigInfo[\"draw_now\"]: orders.pop(key, None) # This allows us to manually call draw_create(draw_now=True)", "instead of passing draw_now to Page. sigInfo.call() else: orders[key] = sigInfo return key", "name. If key exists as an old order then it's removed. Returns key", "def set_parent_hook(self, parent, _draw=True): \"\"\" :param generalgui.MethodGrouper self: :param generalgui.MethodGrouper parent: \"\"\" if", "called by init and set_parent. \"\"\" def draw_create_post_hook(self): \"\"\" Called after widget is", "which is called by init and set_parent. \"\"\" def draw_create_post_hook(self): \"\"\" Called after", "this func call. Creates a key with id of Part and func's name.", "decouple properties, called by draw_create which is called by init and set_parent. \"\"\"", ":param generalgui.MethodGrouper parent: \"\"\" if _draw: for part in self.get_children(depth=-1, include_self=True, gen=True): part.draw_create()", "set_parent_hook(self, parent, _draw=True): \"\"\" :param generalgui.MethodGrouper self: :param generalgui.MethodGrouper parent: \"\"\" if _draw:", "instantly # if sigInfo[\"draw_now\"]: # dict_insert(orders, **{key: sigInfo}) # else: # orders[key] =", "key = methodGrouper.get_order_key(func) if sigInfo[\"draw_now\"]: orders.pop(key, None) # This allows us to manually", "do something like this to skip queue instead of drawing instantly # if", "\"Contain\" in getBaseClassNames(parent) or parent is None class PartBaseClass: def draw_create_hook(self, kwargs): \"\"\"", "exists as an old order then it's removed. Returns key unless draw_now is", "sigInfo[\"draw_now\"]: # dict_insert(orders, **{key: sigInfo}) # else: # orders[key] = sigInfo return wrapper_transfer(func,", "return key # Could possibly do something like this to skip queue instead", "order to dict for this func call. Creates a key with id of", "us to manually call draw_create(draw_now=True) after instantiating a Page instead of passing draw_now", "instead of drawing instantly # if sigInfo[\"draw_now\"]: # dict_insert(orders, **{key: sigInfo}) # else:", "None class PartBaseClass: def draw_create_hook(self, kwargs): \"\"\" Used to decouple properties, called by", "possibly do something like this to skip queue instead of drawing instantly #", "parent is None class PartBaseClass: def draw_create_hook(self, kwargs): \"\"\" Used to decouple properties,", "in self.get_children(depth=-1, include_self=True, gen=True): part.draw_create() assert \"Contain\" in getBaseClassNames(parent) or parent is None", "**kwargs): sigInfo = SigInfo(func, *args, **kwargs) methodGrouper = sigInfo[\"self\"] orders = methodGrouper.orders key", "\"\"\" def _wrapper(*args, **kwargs): sigInfo = SigInfo(func, *args, **kwargs) methodGrouper = sigInfo[\"self\"] orders", "# Could possibly do something like this to skip queue instead of drawing", "key unless draw_now is True. \"\"\" def _wrapper(*args, **kwargs): sigInfo = SigInfo(func, *args,", "def draw_create_post_hook(self): \"\"\" Called after widget is packed. \"\"\" def _deco_draw_queue(func): \"\"\" Append", "methodGrouper.orders key = methodGrouper.get_order_key(func) if sigInfo[\"draw_now\"]: orders.pop(key, None) # This allows us to", "one order to dict for this func call. Creates a key with id", "for part in self.get_children(depth=-1, include_self=True, gen=True): part.draw_create() assert \"Contain\" in getBaseClassNames(parent) or parent", "= sigInfo[\"self\"] orders = methodGrouper.orders key = methodGrouper.get_order_key(func) if sigInfo[\"draw_now\"]: orders.pop(key, None) #", "queue instead of drawing instantly # if sigInfo[\"draw_now\"]: # dict_insert(orders, **{key: sigInfo}) #", "call. Creates a key with id of Part and func's name. If key", "if _draw: for part in self.get_children(depth=-1, include_self=True, gen=True): part.draw_create() assert \"Contain\" in getBaseClassNames(parent)", "\"\"\" Used to decouple properties, called by draw_create which is called by init", "\"\"\" Called after widget is packed. \"\"\" def _deco_draw_queue(func): \"\"\" Append one order", "def draw_create_hook(self, kwargs): \"\"\" Used to decouple properties, called by draw_create which is", "packed. \"\"\" def _deco_draw_queue(func): \"\"\" Append one order to dict for this func", "None) # This allows us to manually call draw_create(draw_now=True) after instantiating a Page", "# if sigInfo[\"draw_now\"]: # dict_insert(orders, **{key: sigInfo}) # else: # orders[key] = sigInfo", "Used to decouple properties, called by draw_create which is called by init and", "Append one order to dict for this func call. Creates a key with", "key with id of Part and func's name. If key exists as an", "orders = methodGrouper.orders key = methodGrouper.get_order_key(func) if sigInfo[\"draw_now\"]: orders.pop(key, None) # This allows", "If key exists as an old order then it's removed. Returns key unless", "skip queue instead of drawing instantly # if sigInfo[\"draw_now\"]: # dict_insert(orders, **{key: sigInfo})", "in getBaseClassNames(parent) or parent is None class PartBaseClass: def draw_create_hook(self, kwargs): \"\"\" Used", "as an old order then it's removed. Returns key unless draw_now is True.", "after instantiating a Page instead of passing draw_now to Page. sigInfo.call() else: orders[key]", "this to skip queue instead of drawing instantly # if sigInfo[\"draw_now\"]: # dict_insert(orders,", "_wrapper(*args, **kwargs): sigInfo = SigInfo(func, *args, **kwargs) methodGrouper = sigInfo[\"self\"] orders = methodGrouper.orders" ]
[]
[ ">>f, out_line out_line=\"p4_switch_json=\"+str(switch_json) print >>f, out_line out_line=\"bmv2_exe=\"+str(switch_exe) print >>f, out_line out_line=\"Invoke_mininet_cli=\"+str(cli_opt) print >>f,", "JSON file...\" # Assumption is that the input topo is in file named", "data_file: data = json.load(data_file) hnames = data[\"hosts\"] hlen = len(hnames) cnt = 1", "Tools, MaxiNetConfig from MaxiNet.WorkerServer.ssh_manager import SSH_Manager from run_exercise import ExerciseRunner from p4_mininet import", "= \"Input Topology file for Experiment\") parser.add_argument('--swlog_dir', dest=\"swlog_dir\", default=\"/tmp\", help = \"Directory path", "delay 500ms\") # exp.get_node(\"s3\").cmd(\"tc qdisc change dev s3-eth1 root netem delay 300ms\") #", "in my_workers : \"Copying to Worker 1...\", worker worker.put_file(\"experiment.cfg\", \"/tmp/experiment.cfg\") worker.put_file(\"in_topo.json\", \"/tmp/in_topo.json\") if", "print \"Execute Command on Host ...\", host print \"Command Monitor on Host ...\",", "pcap_dir = str(args.pcap_dir) print \"Pcap Dir ...\", pcap_dir if args.switch_json : switch_json =", "to check network connectivity ...\" nxt_hnum = int(nxthost[1:]) tmp_hname = str(nxt_hnum) rcmd =", "+ 1 my_swlist=[] for key, value in dict.items(data[\"switches\"]): my_swlist.append(key) # Add to list", "file t1_experiment.cfg -> experiment.cfg os.rename(\"t1_experiment.cfg\", \"experiment.cfg\") # Now also copy the given input", "import Link, TCIntf from mininet.net import Mininet # from MaxiNet.Frontend import maxinet from", "MaxiNet Frontend curr_path = os.getcwd() parent_path = os.path.abspath(os.path.join(os.getcwd(), '..')) parent_dir = os.path.basename(os.path.abspath(parent_path)) sys.path.insert(1,parent_path)", "in hnames: if host != nxthost : print \"pinging from ..\", host ,\"", "\"/tmp/routernew.json\") else : print \"Create New switch JSON file...\" # Assumption is that", "sw in my_swlist : exp.program_myswitch(sw) for host, my_cmd in data[\"host_cmnds\"] : print \"Execute", "Switch Parser JSON\") parser.add_argument('--switch_exe', dest=\"switch_exe\",default=\"/home/rbabu/behavioral-model/targets/simple_router/simple_router\", help=\"P4 Switch Executable\") parser.add_argument('--mininet_cli', dest=\"cli_opt\", default=\"False\", help =", "import traceback import json import mininet.term from mininet.topo import Topo from mininet.node import", "mac=Tools.makeMAC(cnt)) cnt = cnt + 1 my_swlist=[] for key, value in dict.items(data[\"switches\"]): my_swlist.append(key)", "Monitor on Host ...\", my_cmd print exp.get_node(host).cmd(my_cmd) raw_input(\"[Continue...]\") # print exp.get_node(\"h2\").cmd(\"python new_cmd_monitor.py --cmd_file=/tmp/h2_cmnds.txt", "topo_fname if args.swlog_dir : swlog_dir = str(args.swlog_dir) print \"Switch Log Dir ...\", swlog_dir", ": pcap_dir = str(args.pcap_dir) print \"Pcap Dir ...\", pcap_dir if args.switch_json : switch_json", "cnt = cnt + 1 #hnames = data[\"hosts\"] hnames = data[\"links\"] hlen =", "add dev mn_tun1 root netem delay 600ms\") raw_input(\"[Continue...]\") print \"Switch Class ...\" print", "each of worker copy2(topo_fname,'in_topo.json') print \"File sucessfully copied as in_topo.json...\" with open('in_topo.json') as", "1 my_swlist=[] for key, value in dict.items(data[\"switches\"]): my_swlist.append(key) # Add to list of", "for key, value in dict.items(data[\"switches\"]): my_swlist.append(key) # Add to list of switches in", "file # Num workers argument is not saved in experiment.cfg file f =", "for sw in my_swlist : exp.program_myswitch(sw) for host, my_cmd in data[\"host_cmnds\"] : print", "at Mininet CLI in the Workers\") parser.add_argument('--switch_init', dest=\"swinit_opt\", default=\"AtStart\", help = \"Switch Initialization", "raw_input(\"[Continue...]\") # print exp.get_node(\"h2\").cmd(\"python new_cmd_monitor.py --cmd_file=/tmp/h2_cmnds.txt > /tmp/h2_out & \") raw_input(\"[Continue...]\") # exp.get_node(\"s2\").cmd(\"tc", "# # This is a sample program to emulate P4 Switches in Distributed", "us in MaxiNet Frontend curr_path = os.getcwd() parent_path = os.path.abspath(os.path.join(os.getcwd(), '..')) parent_dir =", "the input topo is in file named in_topo.json os.system('python gen_router_json.py') worker.put_file(\"routernew.json\", \"/tmp/routernew.json\") print", "= len(hnames) for host in hnames: for nxthost in hnames: if host !=", "-c 1 10.0.\" + tmp_hname + \".10\" print \"Rcmd is ..\", rcmd print", "default=1, help = \"Number of Workers for the Experiment : (Default 1) \")", "len(hnames) for x in range(0,hlen) : tmp = str(hnames[x][0]) tmp1 = str(hnames[x][1]) myglobalTopo.addLink(tmp,", "import logging import os import signal import subprocess import sys import tempfile import", "str(args.topo_fname) print \"Input Topo File Name is ...\", topo_fname if args.swlog_dir : swlog_dir", "\"Creating Cluster ...\" # start cluster cluster = maxinet.Cluster(minWorkers=1, maxWorkers=num_workers) # start experiment", "delay 300ms\") # exp.get_node(\"s3\").cmd(\"tc qdisc change dev s3-eth2 root netem delay 300ms\") #", "Topology...\" print \"Creating Cluster ...\" # start cluster cluster = maxinet.Cluster(minWorkers=1, maxWorkers=num_workers) #", "exp.get_node(\"s6\").cmd(\"tc qdisc add dev mn_tun1 root netem delay 600ms\") # exp.get_node(\"s5\").cmd(\"tc qdisc add", "delay 500ms\") # exp.get_node(\"s3\").cmd(\"tc qdisc add dev mn_tun0 root netem delay 500ms\") #", "JSON\") parser.add_argument('--switch_exe', dest=\"switch_exe\",default=\"/home/rbabu/behavioral-model/targets/simple_router/simple_router\", help=\"P4 Switch Executable\") parser.add_argument('--mininet_cli', dest=\"cli_opt\", default=\"False\", help = \"Invoke at", ": cli_opt = str(args.cli_opt) print \"Mininet CLI Option ...\", cli_opt if args.swinit_opt :", ">>f, out_line out_line=\"swlog_dir=\"+str(swlog_dir) print >>f, out_line out_line=\"pcap_dir=\"+str(pcap_dir) print >>f, out_line out_line=\"p4_switch_json=\"+str(switch_json) print >>f,", "maxinet.Cluster(minWorkers=1, maxWorkers=num_workers) # start experiment with P4Switch on cluster exp = maxinet.Experiment(cluster, myglobalTopo,", "\"P4 Switch Parser JSON\") # parser.add_argument('--switch_json', dest=\"switch_json\", default=\"/home/rbabu/MaxiNet/MaxiNet/WorkerServer/simple_router.json\", help = \"P4 Switch Parser", "= Topo() parser = argparse.ArgumentParser() parser.add_argument('--topo', dest=\"topo_fname\", default=\"/tmp/in_topo.json\", help = \"Input Topology file", "myglobalTopo.addLink(tmp, tmp1) print \"Finished Loading Topology...\" print \"Creating Cluster ...\" # start cluster", "mininet.term from mininet.topo import Topo from mininet.node import OVSSwitch from mininet.node import UserSwitch,", "import Mininet # from MaxiNet.Frontend import maxinet from MaxiNet.tools import Tools, MaxiNetConfig from", "500ms\") # exp.get_node(\"s3\").cmd(\"tc qdisc add dev mn_tun0 root netem delay 500ms\") # exp.get_node(\"s3\").cmd(\"tc", ": print \"Execute Command on Host ...\", host print \"Command Monitor on Host", "root netem delay 600ms\") # exp.get_node(\"s5\").cmd(\"tc qdisc add dev mn_tun1 root netem delay", "MaxiNet.Frontend import maxinet from MaxiNet.tools import Tools, MaxiNetConfig from MaxiNet.WorkerServer.ssh_manager import SSH_Manager from", ": switch_json = str(args.switch_json) print \"Switch Parser JSON File Name ...\", switch_json if", "my_swlist : exp.program_myswitch(sw) for host, my_cmd in data[\"host_cmnds\"] : print \"Execute Command on", "\") parser.add_argument('--pcap_dir', dest=\"pcap_dir\", default=\"/tmp\", help = \"Directory path for Switch pcap files \")", "#hnames = data[\"hosts\"] hnames = data[\"links\"] hlen = len(hnames) for x in range(0,hlen)", "len(hnames) cnt = 1 for x in range(0,hlen) : tmp = str(hnames[x]) myglobalTopo.addHost(tmp,", "\"Finished Loading Topology...\" print \"Creating Cluster ...\" # start cluster cluster = maxinet.Cluster(minWorkers=1,", "to list of switches in topology cnt = 1 for value1, value2 in", "converge\" time.sleep(10) # Try to do a pingall hosts hnames = data[\"hosts\"] hlen", "Workers\") parser.add_argument('--switch_init', dest=\"swinit_opt\", default=\"AtStart\", help = \"Switch Initialization AtStart | ByApp\") parser.add_argument('--num_workers', dest=\"num_workers\",", "This is going to be hardcoded print >>f, out_line out_line=\"swlog_dir=\"+str(swlog_dir) print >>f, out_line", "like this # import argparse import atexit import logging import os import signal", "to emulate P4 Switches in Distributed environment # using Maxinet. The skeleton application", "= str(hnames[x][0]) tmp1 = str(hnames[x][1]) myglobalTopo.addLink(tmp, tmp1) print \"Finished Loading Topology...\" print \"Creating", "done by us in MaxiNet Frontend curr_path = os.getcwd() parent_path = os.path.abspath(os.path.join(os.getcwd(), '..'))", "host ,\" -> \", nxthost, \" to check network connectivity ...\" nxt_hnum =", "sucessfully copied as in_topo.json...\" with open('in_topo.json') as data_file: data = json.load(data_file) hnames =", "the controller to converge\" time.sleep(10) # Try to do a pingall hosts hnames", "+ \".10\" print \"Rcmd is ..\", rcmd print exp.get_node(host).cmd(rcmd) if (swinit_opt == \"ByApp\")", "File Name is ...\", topo_fname if args.swlog_dir : swlog_dir = str(args.swlog_dir) print \"Switch", "EXE Name ...\", switch_exe if args.cli_opt : cli_opt = str(args.cli_opt) print \"Mininet CLI", "be hardcoded print >>f, out_line out_line=\"swlog_dir=\"+str(swlog_dir) print >>f, out_line out_line=\"pcap_dir=\"+str(pcap_dir) print >>f, out_line", ">>f, out_line out_line=\"bmv2_exe=\"+str(switch_exe) print >>f, out_line out_line=\"Invoke_mininet_cli=\"+str(cli_opt) print >>f, out_line out_line=\"p4_switch_initialization=\"+str(swinit_opt) print >>f,", "copy experiment.cfg, in_topo.json files to the respective workers my_allowed_paths = [] for item", "change dev s3-eth2 root netem delay 300ms\") # exp.get_node(\"s6\").cmd(\"tc qdisc add dev mn_tun1", "= cnt + 1 my_swlist=[] for key, value in dict.items(data[\"switches\"]): my_swlist.append(key) # Add", "num_workers = int(args.num_workers) print \"Number of Workers ...\", num_workers # Now save the", "Host ...\", host print \"Command Monitor on Host ...\", my_cmd print exp.get_node(host).cmd(my_cmd) raw_input(\"[Continue...]\")", "200ms\") # exp.get_node(\"s2\").cmd(\"tc qdisc add dev mn_tun0 root netem delay 500ms\") # exp.get_node(\"s3\").cmd(\"tc", "from MaxiNet.WorkerServer.ssh_manager import SSH_Manager from run_exercise import ExerciseRunner from p4_mininet import P4Switch from", "parser.add_argument('--switch_json', dest=\"switch_json\", default=\"/home/rbabu/MaxiNet/MaxiNet/WorkerServer/simple_router.json\", help = \"P4 Switch Parser JSON\") parser.add_argument('--switch_exe', dest=\"switch_exe\",default=\"/home/rbabu/behavioral-model/targets/simple_router/simple_router\", help=\"P4 Switch", "CLI Option ...\", cli_opt if args.swinit_opt : swinit_opt = str(args.swinit_opt) print \"Switch Init", "PYTHONPATH # This is done to pickup changes done by us in MaxiNet", "not saved in experiment.cfg file f = open(\"t1_experiment.cfg\", \"w\") out_line=\"topo_file_name=/tmp/in_topo.json\" # This is", "worker copy2(topo_fname,'in_topo.json') print \"File sucessfully copied as in_topo.json...\" with open('in_topo.json') as data_file: data", "cnt + 1 my_swlist=[] for key, value in dict.items(data[\"switches\"]): my_swlist.append(key) # Add to", "for nxthost in hnames: if host != nxthost : print \"pinging from ..\",", "# exp.get_node(\"s2\").cmd(\"tc qdisc change dev s2-eth1 root netem delay 200ms\") # exp.get_node(\"s2\").cmd(\"tc qdisc", ": print \"pinging from ..\", host ,\" -> \", nxthost, \" to check", "root netem delay 300ms\") # exp.get_node(\"s3\").cmd(\"tc qdisc change dev s3-eth2 root netem delay", "...\" print exp.switch raw_input(\"[Continue...]\") for host in hnames: for nxthost in hnames: if", "do a pingall hosts hnames = data[\"hosts\"] hlen = len(hnames) for host in", "arguments in experiment.cfg file # Num workers argument is not saved in experiment.cfg", "\"/tmp/routernew.json\") print \"***** Experiment Setup Start *****\" exp.setup() print \"waiting 10 seconds for", "dev s3-eth2 root netem delay 300ms\") # exp.get_node(\"s6\").cmd(\"tc qdisc add dev mn_tun1 root", "Directory in PYTHONPATH # This is done to pickup changes done by us", "Num workers argument is not saved in experiment.cfg file f = open(\"t1_experiment.cfg\", \"w\")", "Name ...\", switch_json if args.switch_exe : switch_exe = str(args.switch_exe) print \"Switch EXE Name", "Executable\") parser.add_argument('--mininet_cli', dest=\"cli_opt\", default=\"False\", help = \"Invoke at Mininet CLI in the Workers\")", "maxWorkers=num_workers) # start experiment with P4Switch on cluster exp = maxinet.Experiment(cluster, myglobalTopo, switch=P4Switch)", "= str(nxt_hnum) rcmd = \"ping -c 1 10.0.\" + tmp_hname + \".10\" print", "print exp.switch raw_input(\"[Continue...]\") for host in hnames: for nxthost in hnames: if host", "Need to Create switch JSON file...\" worker.put_file(\"simple_router.json\", \"/tmp/routernew.json\") else : print \"Create New", "tmp1) print \"Finished Loading Topology...\" print \"Creating Cluster ...\" # start cluster cluster", "str(key) myglobalTopo.addSwitch(tmp, dpid=Tools.makeDPID(cnt)) cnt = cnt + 1 #hnames = data[\"hosts\"] hnames =", "# create topology myglobalTopo = Topo() parser = argparse.ArgumentParser() parser.add_argument('--topo', dest=\"topo_fname\", default=\"/tmp/in_topo.json\", help", "respective workers my_allowed_paths = [] for item in dict.items( data[\"allowed_paths\"] ): my_allowed_paths.append(item) allowed_paths_len", "json.load(data_file) hnames = data[\"hosts\"] hlen = len(hnames) cnt = 1 for x in", "dev s3-eth1 root netem delay 300ms\") # exp.get_node(\"s3\").cmd(\"tc qdisc change dev s3-eth2 root", "time import Pyro4 import threading import traceback import json import mininet.term from mininet.topo", "600ms\") raw_input(\"[Continue...]\") print \"Switch Class ...\" print exp.switch raw_input(\"[Continue...]\") for host in hnames:", "print \"Rcmd is ..\", rcmd print exp.get_node(host).cmd(rcmd) exp.CLI(locals(),globals()) raw_input(\"[Continue...]\") exp.stop() raw_input(\"[Continue]\") # wait", "hlen = len(hnames) for x in range(0,hlen) : tmp = str(hnames[x][0]) tmp1 =", "switch JSON file...\" worker.put_file(\"simple_router.json\", \"/tmp/routernew.json\") else : print \"Create New switch JSON file...\"", "parent_dir = os.path.basename(os.path.abspath(parent_path)) sys.path.insert(1,parent_path) from Frontend import maxinet # create topology myglobalTopo =", "parser.add_argument('--switch_init', dest=\"swinit_opt\", default=\"AtStart\", help = \"Switch Initialization AtStart | ByApp\") parser.add_argument('--num_workers', dest=\"num_workers\", default=1,", "1 #hnames = data[\"hosts\"] hnames = data[\"links\"] hlen = len(hnames) for x in", "exp.get_node(\"s3\").cmd(\"tc qdisc change dev s3-eth2 root netem delay 300ms\") # exp.get_node(\"s6\").cmd(\"tc qdisc add", "delay 600ms\") # exp.get_node(\"s5\").cmd(\"tc qdisc add dev mn_tun1 root netem delay 600ms\") raw_input(\"[Continue...]\")", "with open('in_topo.json') as data_file: data = json.load(data_file) hnames = data[\"hosts\"] hlen = len(hnames)", "New switch JSON file...\" # Assumption is that the input topo is in", "print >>f, out_line out_line=\"Invoke_mininet_cli=\"+str(cli_opt) print >>f, out_line out_line=\"p4_switch_initialization=\"+str(swinit_opt) print >>f, out_line f.close() #", "import Pyro4 import threading import traceback import json import mininet.term from mininet.topo import", "dev mn_tun1 root netem delay 600ms\") raw_input(\"[Continue...]\") print \"Switch Class ...\" print exp.switch", "Mininet CLI in the Workers\") parser.add_argument('--switch_init', dest=\"swinit_opt\", default=\"AtStart\", help = \"Switch Initialization AtStart", "= str(key) myglobalTopo.addSwitch(tmp, dpid=Tools.makeDPID(cnt)) cnt = cnt + 1 #hnames = data[\"hosts\"] hnames", "\") raw_input(\"[Continue...]\") # exp.get_node(\"s2\").cmd(\"tc qdisc change dev s2-eth1 root netem delay 200ms\") #", "netem delay 200ms\") # exp.get_node(\"s2\").cmd(\"tc qdisc change dev s2-eth2 root netem delay 200ms\")", "parser = argparse.ArgumentParser() parser.add_argument('--topo', dest=\"topo_fname\", default=\"/tmp/in_topo.json\", help = \"Input Topology file for Experiment\")", "\") parser.add_argument('--switch_json', dest=\"switch_json\", default=\"/tmp/routernew.json\", help = \"P4 Switch Parser JSON\") # parser.add_argument('--switch_json', dest=\"switch_json\",", "= int(args.num_workers) print \"Number of Workers ...\", num_workers # Now save the Input", "print \"Create New switch JSON file...\" # Assumption is that the input topo", "= \"Number of Workers for the Experiment : (Default 1) \") args =", "# Try to do a pingall hosts hnames = data[\"hosts\"] hlen = len(hnames)", "# Now also copy the given input topo file as in_top.json in each", "..\", rcmd print exp.get_node(host).cmd(rcmd) if (swinit_opt == \"ByApp\") : break print \"Program Switch", "in topology cnt = 1 for value1, value2 in dict.items(data[\"switches\"][key]): tmp = str(key)", "Mininet # from MaxiNet.Frontend import maxinet from MaxiNet.tools import Tools, MaxiNetConfig from MaxiNet.WorkerServer.ssh_manager", "..\", rcmd print exp.get_node(host).cmd(rcmd) exp.CLI(locals(),globals()) raw_input(\"[Continue...]\") exp.stop() raw_input(\"[Continue]\") # wait for user to", "s2-eth2 root netem delay 200ms\") # exp.get_node(\"s2\").cmd(\"tc qdisc add dev mn_tun0 root netem", "skeleton application program should be like this # import argparse import atexit import", "args.switch_exe : switch_exe = str(args.switch_exe) print \"Switch EXE Name ...\", switch_exe if args.cli_opt", "Workers for the Experiment : (Default 1) \") args = parser.parse_args() if args.topo_fname", "exp = maxinet.Experiment(cluster, myglobalTopo, switch=P4Switch) # We can copy experiment.cfg, in_topo.json files to", "check network connectivity ...\" nxt_hnum = int(nxthost[1:]) tmp_hname = str(nxt_hnum) rcmd = \"ping", "default=\"False\", help = \"Invoke at Mininet CLI in the Workers\") parser.add_argument('--switch_init', dest=\"swinit_opt\", default=\"AtStart\",", "out_line=\"Invoke_mininet_cli=\"+str(cli_opt) print >>f, out_line out_line=\"p4_switch_initialization=\"+str(swinit_opt) print >>f, out_line f.close() # Rename the file", "of switches in topology cnt = 1 for value1, value2 in dict.items(data[\"switches\"][key]): tmp", "data[\"hosts\"] hlen = len(hnames) cnt = 1 for x in range(0,hlen) : tmp", "= maxinet.Cluster(minWorkers=1, maxWorkers=num_workers) # start experiment with P4Switch on cluster exp = maxinet.Experiment(cluster,", "1 10.0.\" + tmp_hname + \".10\" print \"Rcmd is ..\", rcmd print exp.get_node(host).cmd(rcmd)", "in_top.json in each of worker copy2(topo_fname,'in_topo.json') print \"File sucessfully copied as in_topo.json...\" with", "int(args.num_workers) print \"Number of Workers ...\", num_workers # Now save the Input CLI", "Frontend curr_path = os.getcwd() parent_path = os.path.abspath(os.path.join(os.getcwd(), '..')) parent_dir = os.path.basename(os.path.abspath(parent_path)) sys.path.insert(1,parent_path) from", "key, value in dict.items(data[\"switches\"]): my_swlist.append(key) # Add to list of switches in topology", "exp.get_node(\"s2\").cmd(\"tc qdisc change dev s2-eth1 root netem delay 200ms\") # exp.get_node(\"s2\").cmd(\"tc qdisc change", "exp.get_node(host).cmd(rcmd) exp.CLI(locals(),globals()) raw_input(\"[Continue...]\") exp.stop() raw_input(\"[Continue]\") # wait for user to acknowledge network connectivity", "from mininet.net import Mininet # from MaxiNet.Frontend import maxinet from MaxiNet.tools import Tools,", "import * import pdb # Include Project Directory in PYTHONPATH # This is", "to do a pingall hosts hnames = data[\"hosts\"] hlen = len(hnames) for host", "worker.put_file(\"experiment.cfg\", \"/tmp/experiment.cfg\") worker.put_file(\"in_topo.json\", \"/tmp/in_topo.json\") if (allowed_paths_len <= 0): print \"No Need to Create", "out_line=\"p4_switch_json=\"+str(switch_json) print >>f, out_line out_line=\"bmv2_exe=\"+str(switch_exe) print >>f, out_line out_line=\"Invoke_mininet_cli=\"+str(cli_opt) print >>f, out_line out_line=\"p4_switch_initialization=\"+str(swinit_opt)", "objects as per topology ...\" raw_input(\"[Continue...]\") for sw in my_swlist : exp.program_myswitch(sw) for", "s3-eth2 root netem delay 300ms\") # exp.get_node(\"s6\").cmd(\"tc qdisc add dev mn_tun1 root netem", "= \"Switch Initialization AtStart | ByApp\") parser.add_argument('--num_workers', dest=\"num_workers\", default=1, help = \"Number of", "f.close() # Rename the file t1_experiment.cfg -> experiment.cfg os.rename(\"t1_experiment.cfg\", \"experiment.cfg\") # Now also", ": num_workers = int(args.num_workers) print \"Number of Workers ...\", num_workers # Now save", "= data[\"hosts\"] hlen = len(hnames) cnt = 1 for x in range(0,hlen) :", "from mininet.node import UserSwitch, OVSSwitch from mininet.link import Link, TCIntf from mininet.net import", "delay 200ms\") # exp.get_node(\"s2\").cmd(\"tc qdisc change dev s2-eth2 root netem delay 200ms\") #", "exp.get_node(host).cmd(my_cmd) raw_input(\"[Continue...]\") # print exp.get_node(\"h2\").cmd(\"python new_cmd_monitor.py --cmd_file=/tmp/h2_cmnds.txt > /tmp/h2_out & \") raw_input(\"[Continue...]\") #", "dev s2-eth2 root netem delay 200ms\") # exp.get_node(\"s2\").cmd(\"tc qdisc add dev mn_tun0 root", "as in_topo.json...\" with open('in_topo.json') as data_file: data = json.load(data_file) hnames = data[\"hosts\"] hlen", "Try to do a pingall hosts hnames = data[\"hosts\"] hlen = len(hnames) for", "print \"Number of Workers ...\", num_workers # Now save the Input CLI arguments", "exp.get_node(\"s5\").cmd(\"tc qdisc add dev mn_tun1 root netem delay 600ms\") raw_input(\"[Continue...]\") print \"Switch Class", ">>f, out_line out_line=\"pcap_dir=\"+str(pcap_dir) print >>f, out_line out_line=\"p4_switch_json=\"+str(switch_json) print >>f, out_line out_line=\"bmv2_exe=\"+str(switch_exe) print >>f,", "= 1 for x in range(0,hlen) : tmp = str(hnames[x]) myglobalTopo.addHost(tmp, ip=Tools.makeIP(cnt), mac=Tools.makeMAC(cnt))", "list of switches in topology cnt = 1 for value1, value2 in dict.items(data[\"switches\"][key]):", "print \"Input Topo File Name is ...\", topo_fname if args.swlog_dir : swlog_dir =", "(Default 1) \") args = parser.parse_args() if args.topo_fname : topo_fname = str(args.topo_fname) print", "# from MaxiNet.Frontend import maxinet from MaxiNet.tools import Tools, MaxiNetConfig from MaxiNet.WorkerServer.ssh_manager import", "workers my_allowed_paths = [] for item in dict.items( data[\"allowed_paths\"] ): my_allowed_paths.append(item) allowed_paths_len =", "> /tmp/h2_out & \") raw_input(\"[Continue...]\") # exp.get_node(\"s2\").cmd(\"tc qdisc change dev s2-eth1 root netem", "import sys import tempfile import time import Pyro4 import threading import traceback import", "t1_experiment.cfg -> experiment.cfg os.rename(\"t1_experiment.cfg\", \"experiment.cfg\") # Now also copy the given input topo", "Setup Start *****\" exp.setup() print \"waiting 10 seconds for routing algorithms on the", "= \"P4 Switch Parser JSON\") # parser.add_argument('--switch_json', dest=\"switch_json\", default=\"/home/rbabu/MaxiNet/MaxiNet/WorkerServer/simple_router.json\", help = \"P4 Switch", "= os.getcwd() parent_path = os.path.abspath(os.path.join(os.getcwd(), '..')) parent_dir = os.path.basename(os.path.abspath(parent_path)) sys.path.insert(1,parent_path) from Frontend import", "cluster cluster = maxinet.Cluster(minWorkers=1, maxWorkers=num_workers) # start experiment with P4Switch on cluster exp", "Start *****\" exp.setup() print \"waiting 10 seconds for routing algorithms on the controller", "shutil import * import pdb # Include Project Directory in PYTHONPATH # This", "f = open(\"t1_experiment.cfg\", \"w\") out_line=\"topo_file_name=/tmp/in_topo.json\" # This is going to be hardcoded print", "print exp.get_node(host).cmd(rcmd) if (swinit_opt == \"ByApp\") : break print \"Program Switch objects as", "os.path.abspath(os.path.join(os.getcwd(), '..')) parent_dir = os.path.basename(os.path.abspath(parent_path)) sys.path.insert(1,parent_path) from Frontend import maxinet # create topology", ": swinit_opt = str(args.swinit_opt) print \"Switch Init Option ...\", swinit_opt if args.num_workers :", "10 seconds for routing algorithms on the controller to converge\" time.sleep(10) # Try", "application program should be like this # import argparse import atexit import logging", "Rename the file t1_experiment.cfg -> experiment.cfg os.rename(\"t1_experiment.cfg\", \"experiment.cfg\") # Now also copy the", "in the Workers\") parser.add_argument('--switch_init', dest=\"swinit_opt\", default=\"AtStart\", help = \"Switch Initialization AtStart | ByApp\")", "str(args.swinit_opt) print \"Switch Init Option ...\", swinit_opt if args.num_workers : num_workers = int(args.num_workers)", "= \"Directory path for Switch pcap files \") parser.add_argument('--switch_json', dest=\"switch_json\", default=\"/tmp/routernew.json\", help =", "\"Switch Initialization AtStart | ByApp\") parser.add_argument('--num_workers', dest=\"num_workers\", default=1, help = \"Number of Workers", "create topology myglobalTopo = Topo() parser = argparse.ArgumentParser() parser.add_argument('--topo', dest=\"topo_fname\", default=\"/tmp/in_topo.json\", help =", "dest=\"pcap_dir\", default=\"/tmp\", help = \"Directory path for Switch pcap files \") parser.add_argument('--switch_json', dest=\"switch_json\",", "import Tools, MaxiNetConfig from MaxiNet.WorkerServer.ssh_manager import SSH_Manager from run_exercise import ExerciseRunner from p4_mininet", "...\", switch_json if args.switch_exe : switch_exe = str(args.switch_exe) print \"Switch EXE Name ...\",", "= argparse.ArgumentParser() parser.add_argument('--topo', dest=\"topo_fname\", default=\"/tmp/in_topo.json\", help = \"Input Topology file for Experiment\") parser.add_argument('--swlog_dir',", "for Switch pcap files \") parser.add_argument('--switch_json', dest=\"switch_json\", default=\"/tmp/routernew.json\", help = \"P4 Switch Parser", "TCIntf from mininet.net import Mininet # from MaxiNet.Frontend import maxinet from MaxiNet.tools import", "Topology file for Experiment\") parser.add_argument('--swlog_dir', dest=\"swlog_dir\", default=\"/tmp\", help = \"Directory path for Switch", "for item in dict.items( data[\"allowed_paths\"] ): my_allowed_paths.append(item) allowed_paths_len = len(my_allowed_paths) my_workers = cluster.workers()", "print \"Switch Parser JSON File Name ...\", switch_json if args.switch_exe : switch_exe =", "hlen = len(hnames) for host in hnames: for nxthost in hnames: if host", "qdisc add dev mn_tun1 root netem delay 600ms\") # exp.get_node(\"s5\").cmd(\"tc qdisc add dev", "in PYTHONPATH # This is done to pickup changes done by us in", "1) \") args = parser.parse_args() if args.topo_fname : topo_fname = str(args.topo_fname) print \"Input", "in dict.items(data[\"switches\"][key]): tmp = str(key) myglobalTopo.addSwitch(tmp, dpid=Tools.makeDPID(cnt)) cnt = cnt + 1 #hnames", ": switch_exe = str(args.switch_exe) print \"Switch EXE Name ...\", switch_exe if args.cli_opt :", "for Experiment\") parser.add_argument('--swlog_dir', dest=\"swlog_dir\", default=\"/tmp\", help = \"Directory path for Switch Log files", "): my_allowed_paths.append(item) allowed_paths_len = len(my_allowed_paths) my_workers = cluster.workers() for worker in my_workers :", "...\" nxt_hnum = int(nxthost[1:]) tmp_hname = str(nxt_hnum) rcmd = \"ping -c 1 10.0.\"", "dict.items(data[\"switches\"]): my_swlist.append(key) # Add to list of switches in topology cnt = 1", "OVSSwitch from mininet.link import Link, TCIntf from mininet.net import Mininet # from MaxiNet.Frontend", "hosts hnames = data[\"hosts\"] hlen = len(hnames) for host in hnames: for nxthost", "if (allowed_paths_len <= 0): print \"No Need to Create switch JSON file...\" worker.put_file(\"simple_router.json\",", "Add to list of switches in topology cnt = 1 for value1, value2", "200ms\") # exp.get_node(\"s2\").cmd(\"tc qdisc change dev s2-eth2 root netem delay 200ms\") # exp.get_node(\"s2\").cmd(\"tc", "print >>f, out_line f.close() # Rename the file t1_experiment.cfg -> experiment.cfg os.rename(\"t1_experiment.cfg\", \"experiment.cfg\")", "# Add to list of switches in topology cnt = 1 for value1,", "print \"Program Switch objects as per topology ...\" raw_input(\"[Continue...]\") for sw in my_swlist", "dev s2-eth1 root netem delay 200ms\") # exp.get_node(\"s2\").cmd(\"tc qdisc change dev s2-eth2 root", "default=\"/tmp\", help = \"Directory path for Switch pcap files \") parser.add_argument('--switch_json', dest=\"switch_json\", default=\"/tmp/routernew.json\",", "+ tmp_hname + \".10\" print \"Rcmd is ..\", rcmd print exp.get_node(host).cmd(rcmd) exp.CLI(locals(),globals()) raw_input(\"[Continue...]\")", "swlog_dir if args.pcap_dir : pcap_dir = str(args.pcap_dir) print \"Pcap Dir ...\", pcap_dir if", "files to the respective workers my_allowed_paths = [] for item in dict.items( data[\"allowed_paths\"]", "add dev mn_tun1 root netem delay 600ms\") # exp.get_node(\"s5\").cmd(\"tc qdisc add dev mn_tun1", "import tempfile import time import Pyro4 import threading import traceback import json import", "= str(args.cli_opt) print \"Mininet CLI Option ...\", cli_opt if args.swinit_opt : swinit_opt =", "/tmp/h2_out & \") raw_input(\"[Continue...]\") # exp.get_node(\"s2\").cmd(\"tc qdisc change dev s2-eth1 root netem delay", "Topo() parser = argparse.ArgumentParser() parser.add_argument('--topo', dest=\"topo_fname\", default=\"/tmp/in_topo.json\", help = \"Input Topology file for", "hnames = data[\"hosts\"] hlen = len(hnames) cnt = 1 for x in range(0,hlen)", "connectivity ...\" nxt_hnum = int(nxthost[1:]) tmp_hname = str(nxt_hnum) rcmd = \"ping -c 1", "in Distributed environment # using Maxinet. The skeleton application program should be like", "switch_json if args.switch_exe : switch_exe = str(args.switch_exe) print \"Switch EXE Name ...\", switch_exe", "...\", switch_exe if args.cli_opt : cli_opt = str(args.cli_opt) print \"Mininet CLI Option ...\",", "by us in MaxiNet Frontend curr_path = os.getcwd() parent_path = os.path.abspath(os.path.join(os.getcwd(), '..')) parent_dir", "the Experiment : (Default 1) \") args = parser.parse_args() if args.topo_fname : topo_fname", "= str(args.switch_exe) print \"Switch EXE Name ...\", switch_exe if args.cli_opt : cli_opt =", "Name ...\", switch_exe if args.cli_opt : cli_opt = str(args.cli_opt) print \"Mininet CLI Option", "parent_path = os.path.abspath(os.path.join(os.getcwd(), '..')) parent_dir = os.path.basename(os.path.abspath(parent_path)) sys.path.insert(1,parent_path) from Frontend import maxinet #", "help = \"Input Topology file for Experiment\") parser.add_argument('--swlog_dir', dest=\"swlog_dir\", default=\"/tmp\", help = \"Directory", "CLI arguments in experiment.cfg file # Num workers argument is not saved in", "...\" # start cluster cluster = maxinet.Cluster(minWorkers=1, maxWorkers=num_workers) # start experiment with P4Switch", "per topology ...\" raw_input(\"[Continue...]\") for sw in my_swlist : exp.program_myswitch(sw) for host, my_cmd", "s2-eth1 root netem delay 200ms\") # exp.get_node(\"s2\").cmd(\"tc qdisc change dev s2-eth2 root netem", "root netem delay 200ms\") # exp.get_node(\"s2\").cmd(\"tc qdisc add dev mn_tun0 root netem delay", "import pdb # Include Project Directory in PYTHONPATH # This is done to", "myglobalTopo, switch=P4Switch) # We can copy experiment.cfg, in_topo.json files to the respective workers", "for x in range(0,hlen) : tmp = str(hnames[x][0]) tmp1 = str(hnames[x][1]) myglobalTopo.addLink(tmp, tmp1)", "Frontend import maxinet # create topology myglobalTopo = Topo() parser = argparse.ArgumentParser() parser.add_argument('--topo',", "print \"Mininet CLI Option ...\", cli_opt if args.swinit_opt : swinit_opt = str(args.swinit_opt) print", "= str(hnames[x]) myglobalTopo.addHost(tmp, ip=Tools.makeIP(cnt), mac=Tools.makeMAC(cnt)) cnt = cnt + 1 my_swlist=[] for key,", "SSH_Manager from run_exercise import ExerciseRunner from p4_mininet import P4Switch from shutil import *", "Input CLI arguments in experiment.cfg file # Num workers argument is not saved", "my_cmd in data[\"host_cmnds\"] : print \"Execute Command on Host ...\", host print \"Command", "Now save the Input CLI arguments in experiment.cfg file # Num workers argument", "cluster exp = maxinet.Experiment(cluster, myglobalTopo, switch=P4Switch) # We can copy experiment.cfg, in_topo.json files", "qdisc add dev mn_tun1 root netem delay 600ms\") raw_input(\"[Continue...]\") print \"Switch Class ...\"", "== \"ByApp\") : break print \"Program Switch objects as per topology ...\" raw_input(\"[Continue...]\")", "sys import tempfile import time import Pyro4 import threading import traceback import json", "args.topo_fname : topo_fname = str(args.topo_fname) print \"Input Topo File Name is ...\", topo_fname", "cnt = cnt + 1 my_swlist=[] for key, value in dict.items(data[\"switches\"]): my_swlist.append(key) #", "= parser.parse_args() if args.topo_fname : topo_fname = str(args.topo_fname) print \"Input Topo File Name", "\"experiment.cfg\") # Now also copy the given input topo file as in_top.json in", "file...\" worker.put_file(\"simple_router.json\", \"/tmp/routernew.json\") else : print \"Create New switch JSON file...\" # Assumption", "root netem delay 200ms\") # exp.get_node(\"s2\").cmd(\"tc qdisc change dev s2-eth2 root netem delay", "Log Dir ...\", swlog_dir if args.pcap_dir : pcap_dir = str(args.pcap_dir) print \"Pcap Dir", "mn_tun0 root netem delay 500ms\") # exp.get_node(\"s3\").cmd(\"tc qdisc add dev mn_tun0 root netem", "# This is a sample program to emulate P4 Switches in Distributed environment", "data[\"hosts\"] hlen = len(hnames) for host in hnames: for nxthost in hnames: if", "\"Number of Workers ...\", num_workers # Now save the Input CLI arguments in", "the Workers\") parser.add_argument('--switch_init', dest=\"swinit_opt\", default=\"AtStart\", help = \"Switch Initialization AtStart | ByApp\") parser.add_argument('--num_workers',", "x in range(0,hlen) : tmp = str(hnames[x][0]) tmp1 = str(hnames[x][1]) myglobalTopo.addLink(tmp, tmp1) print", "# start cluster cluster = maxinet.Cluster(minWorkers=1, maxWorkers=num_workers) # start experiment with P4Switch on", "print \"Pcap Dir ...\", pcap_dir if args.switch_json : switch_json = str(args.switch_json) print \"Switch", "= data[\"hosts\"] hlen = len(hnames) for host in hnames: for nxthost in hnames:", "str(args.swlog_dir) print \"Switch Log Dir ...\", swlog_dir if args.pcap_dir : pcap_dir = str(args.pcap_dir)", "300ms\") # exp.get_node(\"s3\").cmd(\"tc qdisc change dev s3-eth2 root netem delay 300ms\") # exp.get_node(\"s6\").cmd(\"tc", "of Workers ...\", num_workers # Now save the Input CLI arguments in experiment.cfg", "Option ...\", cli_opt if args.swinit_opt : swinit_opt = str(args.swinit_opt) print \"Switch Init Option", "save the Input CLI arguments in experiment.cfg file # Num workers argument is", "root netem delay 500ms\") # exp.get_node(\"s3\").cmd(\"tc qdisc change dev s3-eth1 root netem delay", "\"***** Experiment Setup Start *****\" exp.setup() print \"waiting 10 seconds for routing algorithms", "mininet.net import Mininet # from MaxiNet.Frontend import maxinet from MaxiNet.tools import Tools, MaxiNetConfig", "= len(hnames) for x in range(0,hlen) : tmp = str(hnames[x][0]) tmp1 = str(hnames[x][1])", "\"Switch Parser JSON File Name ...\", switch_json if args.switch_exe : switch_exe = str(args.switch_exe)", "Switch pcap files \") parser.add_argument('--switch_json', dest=\"switch_json\", default=\"/tmp/routernew.json\", help = \"P4 Switch Parser JSON\")", "print exp.get_node(host).cmd(rcmd) exp.CLI(locals(),globals()) raw_input(\"[Continue...]\") exp.stop() raw_input(\"[Continue]\") # wait for user to acknowledge network", "if args.pcap_dir : pcap_dir = str(args.pcap_dir) print \"Pcap Dir ...\", pcap_dir if args.switch_json", "my_cmd print exp.get_node(host).cmd(my_cmd) raw_input(\"[Continue...]\") # print exp.get_node(\"h2\").cmd(\"python new_cmd_monitor.py --cmd_file=/tmp/h2_cmnds.txt > /tmp/h2_out & \")", "algorithms on the controller to converge\" time.sleep(10) # Try to do a pingall", "Pyro4 import threading import traceback import json import mininet.term from mininet.topo import Topo", "given input topo file as in_top.json in each of worker copy2(topo_fname,'in_topo.json') print \"File", "data[\"hosts\"] hnames = data[\"links\"] hlen = len(hnames) for x in range(0,hlen) : tmp", "emulate P4 Switches in Distributed environment # using Maxinet. The skeleton application program", "import atexit import logging import os import signal import subprocess import sys import", "len(hnames) for host in hnames: for nxthost in hnames: if host != nxthost", "mininet.node import OVSSwitch from mininet.node import UserSwitch, OVSSwitch from mininet.link import Link, TCIntf", "0): print \"No Need to Create switch JSON file...\" worker.put_file(\"simple_router.json\", \"/tmp/routernew.json\") else :", "exp.setup() print \"waiting 10 seconds for routing algorithms on the controller to converge\"", "dest=\"topo_fname\", default=\"/tmp/in_topo.json\", help = \"Input Topology file for Experiment\") parser.add_argument('--swlog_dir', dest=\"swlog_dir\", default=\"/tmp\", help", "run_exercise import ExerciseRunner from p4_mininet import P4Switch from shutil import * import pdb", "the Input CLI arguments in experiment.cfg file # Num workers argument is not", "print \"File sucessfully copied as in_topo.json...\" with open('in_topo.json') as data_file: data = json.load(data_file)", "MaxiNet.tools import Tools, MaxiNetConfig from MaxiNet.WorkerServer.ssh_manager import SSH_Manager from run_exercise import ExerciseRunner from", "nxthost in hnames: if host != nxthost : print \"pinging from ..\", host", "from ..\", host ,\" -> \", nxthost, \" to check network connectivity ...\"", "dev mn_tun1 root netem delay 600ms\") # exp.get_node(\"s5\").cmd(\"tc qdisc add dev mn_tun1 root", "str(hnames[x]) myglobalTopo.addHost(tmp, ip=Tools.makeIP(cnt), mac=Tools.makeMAC(cnt)) cnt = cnt + 1 my_swlist=[] for key, value", "time.sleep(10) # Try to do a pingall hosts hnames = data[\"hosts\"] hlen =", "parser.add_argument('--pcap_dir', dest=\"pcap_dir\", default=\"/tmp\", help = \"Directory path for Switch pcap files \") parser.add_argument('--switch_json',", "Experiment : (Default 1) \") args = parser.parse_args() if args.topo_fname : topo_fname =", "cluster = maxinet.Cluster(minWorkers=1, maxWorkers=num_workers) # start experiment with P4Switch on cluster exp =", "going to be hardcoded print >>f, out_line out_line=\"swlog_dir=\"+str(swlog_dir) print >>f, out_line out_line=\"pcap_dir=\"+str(pcap_dir) print", "to the respective workers my_allowed_paths = [] for item in dict.items( data[\"allowed_paths\"] ):", "rcmd = \"ping -c 1 10.0.\" + tmp_hname + \".10\" print \"Rcmd is", "exp.get_node(\"s2\").cmd(\"tc qdisc change dev s2-eth2 root netem delay 200ms\") # exp.get_node(\"s2\").cmd(\"tc qdisc add", "add dev mn_tun0 root netem delay 500ms\") # exp.get_node(\"s3\").cmd(\"tc qdisc change dev s3-eth1", "# using Maxinet. The skeleton application program should be like this # import", "my_swlist=[] for key, value in dict.items(data[\"switches\"]): my_swlist.append(key) # Add to list of switches", "program should be like this # import argparse import atexit import logging import", "MaxiNetConfig from MaxiNet.WorkerServer.ssh_manager import SSH_Manager from run_exercise import ExerciseRunner from p4_mininet import P4Switch", "# import argparse import atexit import logging import os import signal import subprocess", "qdisc add dev mn_tun0 root netem delay 500ms\") # exp.get_node(\"s3\").cmd(\"tc qdisc change dev", "dest=\"swinit_opt\", default=\"AtStart\", help = \"Switch Initialization AtStart | ByApp\") parser.add_argument('--num_workers', dest=\"num_workers\", default=1, help", "print \"Switch Log Dir ...\", swlog_dir if args.pcap_dir : pcap_dir = str(args.pcap_dir) print", "print \"Switch EXE Name ...\", switch_exe if args.cli_opt : cli_opt = str(args.cli_opt) print", "file for Experiment\") parser.add_argument('--swlog_dir', dest=\"swlog_dir\", default=\"/tmp\", help = \"Directory path for Switch Log", "print \"Switch Init Option ...\", swinit_opt if args.num_workers : num_workers = int(args.num_workers) print", "JSON\") # parser.add_argument('--switch_json', dest=\"switch_json\", default=\"/home/rbabu/MaxiNet/MaxiNet/WorkerServer/simple_router.json\", help = \"P4 Switch Parser JSON\") parser.add_argument('--switch_exe', dest=\"switch_exe\",default=\"/home/rbabu/behavioral-model/targets/simple_router/simple_router\",", "str(hnames[x][1]) myglobalTopo.addLink(tmp, tmp1) print \"Finished Loading Topology...\" print \"Creating Cluster ...\" # start", "delay 300ms\") # exp.get_node(\"s6\").cmd(\"tc qdisc add dev mn_tun1 root netem delay 600ms\") #", "print \"Switch Class ...\" print exp.switch raw_input(\"[Continue...]\") for host in hnames: for nxthost", "mininet.topo import Topo from mininet.node import OVSSwitch from mininet.node import UserSwitch, OVSSwitch from", "file named in_topo.json os.system('python gen_router_json.py') worker.put_file(\"routernew.json\", \"/tmp/routernew.json\") print \"***** Experiment Setup Start *****\"", "copy the given input topo file as in_top.json in each of worker copy2(topo_fname,'in_topo.json')", "subprocess import sys import tempfile import time import Pyro4 import threading import traceback", "hardcoded print >>f, out_line out_line=\"swlog_dir=\"+str(swlog_dir) print >>f, out_line out_line=\"pcap_dir=\"+str(pcap_dir) print >>f, out_line out_line=\"p4_switch_json=\"+str(switch_json)", "\"File sucessfully copied as in_topo.json...\" with open('in_topo.json') as data_file: data = json.load(data_file) hnames", "the respective workers my_allowed_paths = [] for item in dict.items( data[\"allowed_paths\"] ): my_allowed_paths.append(item)", "switch_exe if args.cli_opt : cli_opt = str(args.cli_opt) print \"Mininet CLI Option ...\", cli_opt", "copy2(topo_fname,'in_topo.json') print \"File sucessfully copied as in_topo.json...\" with open('in_topo.json') as data_file: data =", "# parser.add_argument('--switch_json', dest=\"switch_json\", default=\"/home/rbabu/MaxiNet/MaxiNet/WorkerServer/simple_router.json\", help = \"P4 Switch Parser JSON\") parser.add_argument('--switch_exe', dest=\"switch_exe\",default=\"/home/rbabu/behavioral-model/targets/simple_router/simple_router\", help=\"P4", "# Num workers argument is not saved in experiment.cfg file f = open(\"t1_experiment.cfg\",", "= str(args.switch_json) print \"Switch Parser JSON File Name ...\", switch_json if args.switch_exe :", "dest=\"switch_json\", default=\"/tmp/routernew.json\", help = \"P4 Switch Parser JSON\") # parser.add_argument('--switch_json', dest=\"switch_json\", default=\"/home/rbabu/MaxiNet/MaxiNet/WorkerServer/simple_router.json\", help", "import UserSwitch, OVSSwitch from mininet.link import Link, TCIntf from mininet.net import Mininet #", "worker.put_file(\"routernew.json\", \"/tmp/routernew.json\") print \"***** Experiment Setup Start *****\" exp.setup() print \"waiting 10 seconds", ": break print \"Program Switch objects as per topology ...\" raw_input(\"[Continue...]\") for sw", "copied as in_topo.json...\" with open('in_topo.json') as data_file: data = json.load(data_file) hnames = data[\"hosts\"]", "my_workers : \"Copying to Worker 1...\", worker worker.put_file(\"experiment.cfg\", \"/tmp/experiment.cfg\") worker.put_file(\"in_topo.json\", \"/tmp/in_topo.json\") if (allowed_paths_len", "in MaxiNet Frontend curr_path = os.getcwd() parent_path = os.path.abspath(os.path.join(os.getcwd(), '..')) parent_dir = os.path.basename(os.path.abspath(parent_path))", "is not saved in experiment.cfg file f = open(\"t1_experiment.cfg\", \"w\") out_line=\"topo_file_name=/tmp/in_topo.json\" # This", "CLI in the Workers\") parser.add_argument('--switch_init', dest=\"swinit_opt\", default=\"AtStart\", help = \"Switch Initialization AtStart |", "# Include Project Directory in PYTHONPATH # This is done to pickup changes", "on cluster exp = maxinet.Experiment(cluster, myglobalTopo, switch=P4Switch) # We can copy experiment.cfg, in_topo.json", "Switch Log files \") parser.add_argument('--pcap_dir', dest=\"pcap_dir\", default=\"/tmp\", help = \"Directory path for Switch", "netem delay 300ms\") # exp.get_node(\"s3\").cmd(\"tc qdisc change dev s3-eth2 root netem delay 300ms\")", "...\", my_cmd print exp.get_node(host).cmd(my_cmd) raw_input(\"[Continue...]\") # print exp.get_node(\"h2\").cmd(\"python new_cmd_monitor.py --cmd_file=/tmp/h2_cmnds.txt > /tmp/h2_out &", "args.pcap_dir : pcap_dir = str(args.pcap_dir) print \"Pcap Dir ...\", pcap_dir if args.switch_json :", "File Name ...\", switch_json if args.switch_exe : switch_exe = str(args.switch_exe) print \"Switch EXE", "if args.topo_fname : topo_fname = str(args.topo_fname) print \"Input Topo File Name is ...\",", "Log files \") parser.add_argument('--pcap_dir', dest=\"pcap_dir\", default=\"/tmp\", help = \"Directory path for Switch pcap", "topo file as in_top.json in each of worker copy2(topo_fname,'in_topo.json') print \"File sucessfully copied", "to Create switch JSON file...\" worker.put_file(\"simple_router.json\", \"/tmp/routernew.json\") else : print \"Create New switch", "Experiment Setup Start *****\" exp.setup() print \"waiting 10 seconds for routing algorithms on", "controller to converge\" time.sleep(10) # Try to do a pingall hosts hnames =", "for routing algorithms on the controller to converge\" time.sleep(10) # Try to do", "out_line=\"topo_file_name=/tmp/in_topo.json\" # This is going to be hardcoded print >>f, out_line out_line=\"swlog_dir=\"+str(swlog_dir) print", "dest=\"switch_json\", default=\"/home/rbabu/MaxiNet/MaxiNet/WorkerServer/simple_router.json\", help = \"P4 Switch Parser JSON\") parser.add_argument('--switch_exe', dest=\"switch_exe\",default=\"/home/rbabu/behavioral-model/targets/simple_router/simple_router\", help=\"P4 Switch Executable\")", "in range(0,hlen) : tmp = str(hnames[x]) myglobalTopo.addHost(tmp, ip=Tools.makeIP(cnt), mac=Tools.makeMAC(cnt)) cnt = cnt +", "to Worker 1...\", worker worker.put_file(\"experiment.cfg\", \"/tmp/experiment.cfg\") worker.put_file(\"in_topo.json\", \"/tmp/in_topo.json\") if (allowed_paths_len <= 0): print", "add dev mn_tun0 root netem delay 500ms\") # exp.get_node(\"s3\").cmd(\"tc qdisc add dev mn_tun0", "= open(\"t1_experiment.cfg\", \"w\") out_line=\"topo_file_name=/tmp/in_topo.json\" # This is going to be hardcoded print >>f,", "hlen = len(hnames) cnt = 1 for x in range(0,hlen) : tmp =", "value2 in dict.items(data[\"switches\"][key]): tmp = str(key) myglobalTopo.addSwitch(tmp, dpid=Tools.makeDPID(cnt)) cnt = cnt + 1", "#!/usr/bin/python2 # # This is a sample program to emulate P4 Switches in", "range(0,hlen) : tmp = str(hnames[x][0]) tmp1 = str(hnames[x][1]) myglobalTopo.addLink(tmp, tmp1) print \"Finished Loading", "default=\"/home/rbabu/MaxiNet/MaxiNet/WorkerServer/simple_router.json\", help = \"P4 Switch Parser JSON\") parser.add_argument('--switch_exe', dest=\"switch_exe\",default=\"/home/rbabu/behavioral-model/targets/simple_router/simple_router\", help=\"P4 Switch Executable\") parser.add_argument('--mininet_cli',", "if args.num_workers : num_workers = int(args.num_workers) print \"Number of Workers ...\", num_workers #", "the file t1_experiment.cfg -> experiment.cfg os.rename(\"t1_experiment.cfg\", \"experiment.cfg\") # Now also copy the given", "print exp.get_node(\"h2\").cmd(\"python new_cmd_monitor.py --cmd_file=/tmp/h2_cmnds.txt > /tmp/h2_out & \") raw_input(\"[Continue...]\") # exp.get_node(\"s2\").cmd(\"tc qdisc change", "JSON File Name ...\", switch_json if args.switch_exe : switch_exe = str(args.switch_exe) print \"Switch", "pickup changes done by us in MaxiNet Frontend curr_path = os.getcwd() parent_path =", "args.num_workers : num_workers = int(args.num_workers) print \"Number of Workers ...\", num_workers # Now", "= os.path.basename(os.path.abspath(parent_path)) sys.path.insert(1,parent_path) from Frontend import maxinet # create topology myglobalTopo = Topo()", "worker.put_file(\"simple_router.json\", \"/tmp/routernew.json\") else : print \"Create New switch JSON file...\" # Assumption is", "\"waiting 10 seconds for routing algorithms on the controller to converge\" time.sleep(10) #", "print \"No Need to Create switch JSON file...\" worker.put_file(\"simple_router.json\", \"/tmp/routernew.json\") else : print", "seconds for routing algorithms on the controller to converge\" time.sleep(10) # Try to", "The skeleton application program should be like this # import argparse import atexit", "cnt + 1 #hnames = data[\"hosts\"] hnames = data[\"links\"] hlen = len(hnames) for", "Initialization AtStart | ByApp\") parser.add_argument('--num_workers', dest=\"num_workers\", default=1, help = \"Number of Workers for", "help = \"Switch Initialization AtStart | ByApp\") parser.add_argument('--num_workers', dest=\"num_workers\", default=1, help = \"Number", "argparse.ArgumentParser() parser.add_argument('--topo', dest=\"topo_fname\", default=\"/tmp/in_topo.json\", help = \"Input Topology file for Experiment\") parser.add_argument('--swlog_dir', dest=\"swlog_dir\",", "worker.put_file(\"in_topo.json\", \"/tmp/in_topo.json\") if (allowed_paths_len <= 0): print \"No Need to Create switch JSON", "Class ...\" print exp.switch raw_input(\"[Continue...]\") for host in hnames: for nxthost in hnames:", "for Switch Log files \") parser.add_argument('--pcap_dir', dest=\"pcap_dir\", default=\"/tmp\", help = \"Directory path for", ": (Default 1) \") args = parser.parse_args() if args.topo_fname : topo_fname = str(args.topo_fname)", "curr_path = os.getcwd() parent_path = os.path.abspath(os.path.join(os.getcwd(), '..')) parent_dir = os.path.basename(os.path.abspath(parent_path)) sys.path.insert(1,parent_path) from Frontend", "--cmd_file=/tmp/h2_cmnds.txt > /tmp/h2_out & \") raw_input(\"[Continue...]\") # exp.get_node(\"s2\").cmd(\"tc qdisc change dev s2-eth1 root", "600ms\") # exp.get_node(\"s5\").cmd(\"tc qdisc add dev mn_tun1 root netem delay 600ms\") raw_input(\"[Continue...]\") print", "\"Input Topology file for Experiment\") parser.add_argument('--swlog_dir', dest=\"swlog_dir\", default=\"/tmp\", help = \"Directory path for", "10.0.\" + tmp_hname + \".10\" print \"Rcmd is ..\", rcmd print exp.get_node(host).cmd(rcmd) exp.CLI(locals(),globals())", "import argparse import atexit import logging import os import signal import subprocess import", "files \") parser.add_argument('--switch_json', dest=\"switch_json\", default=\"/tmp/routernew.json\", help = \"P4 Switch Parser JSON\") # parser.add_argument('--switch_json',", "Dir ...\", swlog_dir if args.pcap_dir : pcap_dir = str(args.pcap_dir) print \"Pcap Dir ...\",", "'..')) parent_dir = os.path.basename(os.path.abspath(parent_path)) sys.path.insert(1,parent_path) from Frontend import maxinet # create topology myglobalTopo", "value in dict.items(data[\"switches\"]): my_swlist.append(key) # Add to list of switches in topology cnt", "1 for value1, value2 in dict.items(data[\"switches\"][key]): tmp = str(key) myglobalTopo.addSwitch(tmp, dpid=Tools.makeDPID(cnt)) cnt =", "default=\"/tmp\", help = \"Directory path for Switch Log files \") parser.add_argument('--pcap_dir', dest=\"pcap_dir\", default=\"/tmp\",", "cli_opt if args.swinit_opt : swinit_opt = str(args.swinit_opt) print \"Switch Init Option ...\", swinit_opt", "= \"P4 Switch Parser JSON\") parser.add_argument('--switch_exe', dest=\"switch_exe\",default=\"/home/rbabu/behavioral-model/targets/simple_router/simple_router\", help=\"P4 Switch Executable\") parser.add_argument('--mininet_cli', dest=\"cli_opt\", default=\"False\",", "experiment.cfg file f = open(\"t1_experiment.cfg\", \"w\") out_line=\"topo_file_name=/tmp/in_topo.json\" # This is going to be", "from shutil import * import pdb # Include Project Directory in PYTHONPATH #", "also copy the given input topo file as in_top.json in each of worker", "input topo file as in_top.json in each of worker copy2(topo_fname,'in_topo.json') print \"File sucessfully", "Assumption is that the input topo is in file named in_topo.json os.system('python gen_router_json.py')", "We can copy experiment.cfg, in_topo.json files to the respective workers my_allowed_paths = []", "= str(args.pcap_dir) print \"Pcap Dir ...\", pcap_dir if args.switch_json : switch_json = str(args.switch_json)", "switches in topology cnt = 1 for value1, value2 in dict.items(data[\"switches\"][key]): tmp =", "using Maxinet. The skeleton application program should be like this # import argparse", "= str(args.swlog_dir) print \"Switch Log Dir ...\", swlog_dir if args.pcap_dir : pcap_dir =", "in dict.items( data[\"allowed_paths\"] ): my_allowed_paths.append(item) allowed_paths_len = len(my_allowed_paths) my_workers = cluster.workers() for worker", "args.cli_opt : cli_opt = str(args.cli_opt) print \"Mininet CLI Option ...\", cli_opt if args.swinit_opt", "from mininet.node import OVSSwitch from mininet.node import UserSwitch, OVSSwitch from mininet.link import Link,", "from mininet.link import Link, TCIntf from mininet.net import Mininet # from MaxiNet.Frontend import", "maxinet from MaxiNet.tools import Tools, MaxiNetConfig from MaxiNet.WorkerServer.ssh_manager import SSH_Manager from run_exercise import", "topology cnt = 1 for value1, value2 in dict.items(data[\"switches\"][key]): tmp = str(key) myglobalTopo.addSwitch(tmp,", "...\", cli_opt if args.swinit_opt : swinit_opt = str(args.swinit_opt) print \"Switch Init Option ...\",", "to pickup changes done by us in MaxiNet Frontend curr_path = os.getcwd() parent_path", "\"Command Monitor on Host ...\", my_cmd print exp.get_node(host).cmd(my_cmd) raw_input(\"[Continue...]\") # print exp.get_node(\"h2\").cmd(\"python new_cmd_monitor.py", "P4Switch from shutil import * import pdb # Include Project Directory in PYTHONPATH", "str(args.switch_json) print \"Switch Parser JSON File Name ...\", switch_json if args.switch_exe : switch_exe", "ByApp\") parser.add_argument('--num_workers', dest=\"num_workers\", default=1, help = \"Number of Workers for the Experiment :", "1...\", worker worker.put_file(\"experiment.cfg\", \"/tmp/experiment.cfg\") worker.put_file(\"in_topo.json\", \"/tmp/in_topo.json\") if (allowed_paths_len <= 0): print \"No Need", "p4_mininet import P4Switch from shutil import * import pdb # Include Project Directory", "...\", topo_fname if args.swlog_dir : swlog_dir = str(args.swlog_dir) print \"Switch Log Dir ...\",", "netem delay 600ms\") # exp.get_node(\"s5\").cmd(\"tc qdisc add dev mn_tun1 root netem delay 600ms\")", "netem delay 600ms\") raw_input(\"[Continue...]\") print \"Switch Class ...\" print exp.switch raw_input(\"[Continue...]\") for host", "atexit import logging import os import signal import subprocess import sys import tempfile", "from mininet.topo import Topo from mininet.node import OVSSwitch from mininet.node import UserSwitch, OVSSwitch", "# We can copy experiment.cfg, in_topo.json files to the respective workers my_allowed_paths =", "<filename>MaxiNet/WorkerServer/tst_driver.py<gh_stars>1-10 #!/usr/bin/python2 # # This is a sample program to emulate P4 Switches", "help = \"Directory path for Switch pcap files \") parser.add_argument('--switch_json', dest=\"switch_json\", default=\"/tmp/routernew.json\", help", "ExerciseRunner from p4_mininet import P4Switch from shutil import * import pdb # Include", "data[\"host_cmnds\"] : print \"Execute Command on Host ...\", host print \"Command Monitor on", "import time import Pyro4 import threading import traceback import json import mininet.term from", "topo_fname = str(args.topo_fname) print \"Input Topo File Name is ...\", topo_fname if args.swlog_dir", "dest=\"swlog_dir\", default=\"/tmp\", help = \"Directory path for Switch Log files \") parser.add_argument('--pcap_dir', dest=\"pcap_dir\",", "...\", pcap_dir if args.switch_json : switch_json = str(args.switch_json) print \"Switch Parser JSON File", "swinit_opt = str(args.swinit_opt) print \"Switch Init Option ...\", swinit_opt if args.num_workers : num_workers", "as per topology ...\" raw_input(\"[Continue...]\") for sw in my_swlist : exp.program_myswitch(sw) for host,", "be like this # import argparse import atexit import logging import os import", "= cluster.workers() for worker in my_workers : \"Copying to Worker 1...\", worker worker.put_file(\"experiment.cfg\",", "tmp_hname = str(nxt_hnum) rcmd = \"ping -c 1 10.0.\" + tmp_hname + \".10\"", "host, my_cmd in data[\"host_cmnds\"] : print \"Execute Command on Host ...\", host print", "\"Rcmd is ..\", rcmd print exp.get_node(host).cmd(rcmd) exp.CLI(locals(),globals()) raw_input(\"[Continue...]\") exp.stop() raw_input(\"[Continue]\") # wait for", "nxthost, \" to check network connectivity ...\" nxt_hnum = int(nxthost[1:]) tmp_hname = str(nxt_hnum)", "tmp = str(hnames[x]) myglobalTopo.addHost(tmp, ip=Tools.makeIP(cnt), mac=Tools.makeMAC(cnt)) cnt = cnt + 1 my_swlist=[] for", ": topo_fname = str(args.topo_fname) print \"Input Topo File Name is ...\", topo_fname if", "hnames: if host != nxthost : print \"pinging from ..\", host ,\" ->", "my_swlist.append(key) # Add to list of switches in topology cnt = 1 for", "if args.swinit_opt : swinit_opt = str(args.swinit_opt) print \"Switch Init Option ...\", swinit_opt if", "Parser JSON\") # parser.add_argument('--switch_json', dest=\"switch_json\", default=\"/home/rbabu/MaxiNet/MaxiNet/WorkerServer/simple_router.json\", help = \"P4 Switch Parser JSON\") parser.add_argument('--switch_exe',", "= \"Directory path for Switch Log files \") parser.add_argument('--pcap_dir', dest=\"pcap_dir\", default=\"/tmp\", help =", "\"Mininet CLI Option ...\", cli_opt if args.swinit_opt : swinit_opt = str(args.swinit_opt) print \"Switch", "experiment.cfg file # Num workers argument is not saved in experiment.cfg file f", "root netem delay 300ms\") # exp.get_node(\"s6\").cmd(\"tc qdisc add dev mn_tun1 root netem delay", "import Topo from mininet.node import OVSSwitch from mininet.node import UserSwitch, OVSSwitch from mininet.link", "| ByApp\") parser.add_argument('--num_workers', dest=\"num_workers\", default=1, help = \"Number of Workers for the Experiment", "maxinet.Experiment(cluster, myglobalTopo, switch=P4Switch) # We can copy experiment.cfg, in_topo.json files to the respective", "import json import mininet.term from mininet.topo import Topo from mininet.node import OVSSwitch from", "Topo File Name is ...\", topo_fname if args.swlog_dir : swlog_dir = str(args.swlog_dir) print", "myglobalTopo.addSwitch(tmp, dpid=Tools.makeDPID(cnt)) cnt = cnt + 1 #hnames = data[\"hosts\"] hnames = data[\"links\"]", "# exp.get_node(\"s3\").cmd(\"tc qdisc change dev s3-eth2 root netem delay 300ms\") # exp.get_node(\"s6\").cmd(\"tc qdisc", "import P4Switch from shutil import * import pdb # Include Project Directory in", "args.swinit_opt : swinit_opt = str(args.swinit_opt) print \"Switch Init Option ...\", swinit_opt if args.num_workers", "\" to check network connectivity ...\" nxt_hnum = int(nxthost[1:]) tmp_hname = str(nxt_hnum) rcmd", "tmp1 = str(hnames[x][1]) myglobalTopo.addLink(tmp, tmp1) print \"Finished Loading Topology...\" print \"Creating Cluster ...\"", "\"ByApp\") : break print \"Program Switch objects as per topology ...\" raw_input(\"[Continue...]\") for", "to be hardcoded print >>f, out_line out_line=\"swlog_dir=\"+str(swlog_dir) print >>f, out_line out_line=\"pcap_dir=\"+str(pcap_dir) print >>f,", "# Assumption is that the input topo is in file named in_topo.json os.system('python", "start cluster cluster = maxinet.Cluster(minWorkers=1, maxWorkers=num_workers) # start experiment with P4Switch on cluster", "my_allowed_paths = [] for item in dict.items( data[\"allowed_paths\"] ): my_allowed_paths.append(item) allowed_paths_len = len(my_allowed_paths)", "\"Number of Workers for the Experiment : (Default 1) \") args = parser.parse_args()", "import maxinet from MaxiNet.tools import Tools, MaxiNetConfig from MaxiNet.WorkerServer.ssh_manager import SSH_Manager from run_exercise", "from MaxiNet.Frontend import maxinet from MaxiNet.tools import Tools, MaxiNetConfig from MaxiNet.WorkerServer.ssh_manager import SSH_Manager", "Option ...\", swinit_opt if args.num_workers : num_workers = int(args.num_workers) print \"Number of Workers", "in_topo.json...\" with open('in_topo.json') as data_file: data = json.load(data_file) hnames = data[\"hosts\"] hlen =", "exp.get_node(\"h2\").cmd(\"python new_cmd_monitor.py --cmd_file=/tmp/h2_cmnds.txt > /tmp/h2_out & \") raw_input(\"[Continue...]\") # exp.get_node(\"s2\").cmd(\"tc qdisc change dev", "swlog_dir = str(args.swlog_dir) print \"Switch Log Dir ...\", swlog_dir if args.pcap_dir : pcap_dir", "\"Input Topo File Name is ...\", topo_fname if args.swlog_dir : swlog_dir = str(args.swlog_dir)", "Switch Executable\") parser.add_argument('--mininet_cli', dest=\"cli_opt\", default=\"False\", help = \"Invoke at Mininet CLI in the", "sample program to emulate P4 Switches in Distributed environment # using Maxinet. The", "print \"Creating Cluster ...\" # start cluster cluster = maxinet.Cluster(minWorkers=1, maxWorkers=num_workers) # start", "qdisc change dev s2-eth1 root netem delay 200ms\") # exp.get_node(\"s2\").cmd(\"tc qdisc change dev", "cnt = 1 for value1, value2 in dict.items(data[\"switches\"][key]): tmp = str(key) myglobalTopo.addSwitch(tmp, dpid=Tools.makeDPID(cnt))", "= 1 for value1, value2 in dict.items(data[\"switches\"][key]): tmp = str(key) myglobalTopo.addSwitch(tmp, dpid=Tools.makeDPID(cnt)) cnt", "is that the input topo is in file named in_topo.json os.system('python gen_router_json.py') worker.put_file(\"routernew.json\",", "\"Program Switch objects as per topology ...\" raw_input(\"[Continue...]\") for sw in my_swlist :", "Switch objects as per topology ...\" raw_input(\"[Continue...]\") for sw in my_swlist : exp.program_myswitch(sw)", "\"/tmp/experiment.cfg\") worker.put_file(\"in_topo.json\", \"/tmp/in_topo.json\") if (allowed_paths_len <= 0): print \"No Need to Create switch", "threading import traceback import json import mininet.term from mininet.topo import Topo from mininet.node", "parser.add_argument('--num_workers', dest=\"num_workers\", default=1, help = \"Number of Workers for the Experiment : (Default", "= str(hnames[x][1]) myglobalTopo.addLink(tmp, tmp1) print \"Finished Loading Topology...\" print \"Creating Cluster ...\" #", "if args.switch_json : switch_json = str(args.switch_json) print \"Switch Parser JSON File Name ...\",", "in_topo.json files to the respective workers my_allowed_paths = [] for item in dict.items(", "-> experiment.cfg os.rename(\"t1_experiment.cfg\", \"experiment.cfg\") # Now also copy the given input topo file", "is going to be hardcoded print >>f, out_line out_line=\"swlog_dir=\"+str(swlog_dir) print >>f, out_line out_line=\"pcap_dir=\"+str(pcap_dir)", "Project Directory in PYTHONPATH # This is done to pickup changes done by", "with P4Switch on cluster exp = maxinet.Experiment(cluster, myglobalTopo, switch=P4Switch) # We can copy", "import threading import traceback import json import mininet.term from mininet.topo import Topo from", "logging import os import signal import subprocess import sys import tempfile import time", "myglobalTopo = Topo() parser = argparse.ArgumentParser() parser.add_argument('--topo', dest=\"topo_fname\", default=\"/tmp/in_topo.json\", help = \"Input Topology", "default=\"/tmp/in_topo.json\", help = \"Input Topology file for Experiment\") parser.add_argument('--swlog_dir', dest=\"swlog_dir\", default=\"/tmp\", help =", "300ms\") # exp.get_node(\"s6\").cmd(\"tc qdisc add dev mn_tun1 root netem delay 600ms\") # exp.get_node(\"s5\").cmd(\"tc", "in experiment.cfg file f = open(\"t1_experiment.cfg\", \"w\") out_line=\"topo_file_name=/tmp/in_topo.json\" # This is going to", "argument is not saved in experiment.cfg file f = open(\"t1_experiment.cfg\", \"w\") out_line=\"topo_file_name=/tmp/in_topo.json\" #", "help = \"Number of Workers for the Experiment : (Default 1) \") args", "-> \", nxthost, \" to check network connectivity ...\" nxt_hnum = int(nxthost[1:]) tmp_hname", "import subprocess import sys import tempfile import time import Pyro4 import threading import", "file f = open(\"t1_experiment.cfg\", \"w\") out_line=\"topo_file_name=/tmp/in_topo.json\" # This is going to be hardcoded", "parser.add_argument('--swlog_dir', dest=\"swlog_dir\", default=\"/tmp\", help = \"Directory path for Switch Log files \") parser.add_argument('--pcap_dir',", "argparse import atexit import logging import os import signal import subprocess import sys", "1 for x in range(0,hlen) : tmp = str(hnames[x]) myglobalTopo.addHost(tmp, ip=Tools.makeIP(cnt), mac=Tools.makeMAC(cnt)) cnt", "in data[\"host_cmnds\"] : print \"Execute Command on Host ...\", host print \"Command Monitor", "pdb # Include Project Directory in PYTHONPATH # This is done to pickup", "...\", host print \"Command Monitor on Host ...\", my_cmd print exp.get_node(host).cmd(my_cmd) raw_input(\"[Continue...]\") #", "is done to pickup changes done by us in MaxiNet Frontend curr_path =", "os.system('python gen_router_json.py') worker.put_file(\"routernew.json\", \"/tmp/routernew.json\") print \"***** Experiment Setup Start *****\" exp.setup() print \"waiting", "help = \"Invoke at Mininet CLI in the Workers\") parser.add_argument('--switch_init', dest=\"swinit_opt\", default=\"AtStart\", help", "on the controller to converge\" time.sleep(10) # Try to do a pingall hosts", "import ExerciseRunner from p4_mininet import P4Switch from shutil import * import pdb #", "print \"Finished Loading Topology...\" print \"Creating Cluster ...\" # start cluster cluster =", "\"Directory path for Switch Log files \") parser.add_argument('--pcap_dir', dest=\"pcap_dir\", default=\"/tmp\", help = \"Directory", "netem delay 200ms\") # exp.get_node(\"s2\").cmd(\"tc qdisc add dev mn_tun0 root netem delay 500ms\")", "This is a sample program to emulate P4 Switches in Distributed environment #", "\"Execute Command on Host ...\", host print \"Command Monitor on Host ...\", my_cmd", "\"Switch Class ...\" print exp.switch raw_input(\"[Continue...]\") for host in hnames: for nxthost in", "of worker copy2(topo_fname,'in_topo.json') print \"File sucessfully copied as in_topo.json...\" with open('in_topo.json') as data_file:", "parser.add_argument('--mininet_cli', dest=\"cli_opt\", default=\"False\", help = \"Invoke at Mininet CLI in the Workers\") parser.add_argument('--switch_init',", "break print \"Program Switch objects as per topology ...\" raw_input(\"[Continue...]\") for sw in", "...\", swinit_opt if args.num_workers : num_workers = int(args.num_workers) print \"Number of Workers ...\",", "= data[\"links\"] hlen = len(hnames) for x in range(0,hlen) : tmp = str(hnames[x][0])", "[] for item in dict.items( data[\"allowed_paths\"] ): my_allowed_paths.append(item) allowed_paths_len = len(my_allowed_paths) my_workers =", "os.rename(\"t1_experiment.cfg\", \"experiment.cfg\") # Now also copy the given input topo file as in_top.json", "cnt = 1 for x in range(0,hlen) : tmp = str(hnames[x]) myglobalTopo.addHost(tmp, ip=Tools.makeIP(cnt),", "= os.path.abspath(os.path.join(os.getcwd(), '..')) parent_dir = os.path.basename(os.path.abspath(parent_path)) sys.path.insert(1,parent_path) from Frontend import maxinet # create", "= maxinet.Experiment(cluster, myglobalTopo, switch=P4Switch) # We can copy experiment.cfg, in_topo.json files to the", "switch JSON file...\" # Assumption is that the input topo is in file", "input topo is in file named in_topo.json os.system('python gen_router_json.py') worker.put_file(\"routernew.json\", \"/tmp/routernew.json\") print \"*****", "Parser JSON\") parser.add_argument('--switch_exe', dest=\"switch_exe\",default=\"/home/rbabu/behavioral-model/targets/simple_router/simple_router\", help=\"P4 Switch Executable\") parser.add_argument('--mininet_cli', dest=\"cli_opt\", default=\"False\", help = \"Invoke", "change dev s3-eth1 root netem delay 300ms\") # exp.get_node(\"s3\").cmd(\"tc qdisc change dev s3-eth2", "open(\"t1_experiment.cfg\", \"w\") out_line=\"topo_file_name=/tmp/in_topo.json\" # This is going to be hardcoded print >>f, out_line", "Experiment\") parser.add_argument('--swlog_dir', dest=\"swlog_dir\", default=\"/tmp\", help = \"Directory path for Switch Log files \")", "qdisc change dev s2-eth2 root netem delay 200ms\") # exp.get_node(\"s2\").cmd(\"tc qdisc add dev", "new_cmd_monitor.py --cmd_file=/tmp/h2_cmnds.txt > /tmp/h2_out & \") raw_input(\"[Continue...]\") # exp.get_node(\"s2\").cmd(\"tc qdisc change dev s2-eth1", "= \"Invoke at Mininet CLI in the Workers\") parser.add_argument('--switch_init', dest=\"swinit_opt\", default=\"AtStart\", help =", "\"/tmp/in_topo.json\") if (allowed_paths_len <= 0): print \"No Need to Create switch JSON file...\"", "files \") parser.add_argument('--pcap_dir', dest=\"pcap_dir\", default=\"/tmp\", help = \"Directory path for Switch pcap files", "out_line out_line=\"p4_switch_initialization=\"+str(swinit_opt) print >>f, out_line f.close() # Rename the file t1_experiment.cfg -> experiment.cfg", "pcap_dir if args.switch_json : switch_json = str(args.switch_json) print \"Switch Parser JSON File Name", "out_line out_line=\"swlog_dir=\"+str(swlog_dir) print >>f, out_line out_line=\"pcap_dir=\"+str(pcap_dir) print >>f, out_line out_line=\"p4_switch_json=\"+str(switch_json) print >>f, out_line", "in file named in_topo.json os.system('python gen_router_json.py') worker.put_file(\"routernew.json\", \"/tmp/routernew.json\") print \"***** Experiment Setup Start", "in_topo.json os.system('python gen_router_json.py') worker.put_file(\"routernew.json\", \"/tmp/routernew.json\") print \"***** Experiment Setup Start *****\" exp.setup() print", "os import signal import subprocess import sys import tempfile import time import Pyro4", "changes done by us in MaxiNet Frontend curr_path = os.getcwd() parent_path = os.path.abspath(os.path.join(os.getcwd(),", "if args.cli_opt : cli_opt = str(args.cli_opt) print \"Mininet CLI Option ...\", cli_opt if", "Loading Topology...\" print \"Creating Cluster ...\" # start cluster cluster = maxinet.Cluster(minWorkers=1, maxWorkers=num_workers)", "my_workers = cluster.workers() for worker in my_workers : \"Copying to Worker 1...\", worker", "= str(args.topo_fname) print \"Input Topo File Name is ...\", topo_fname if args.swlog_dir :", ": \"Copying to Worker 1...\", worker worker.put_file(\"experiment.cfg\", \"/tmp/experiment.cfg\") worker.put_file(\"in_topo.json\", \"/tmp/in_topo.json\") if (allowed_paths_len <=", "args = parser.parse_args() if args.topo_fname : topo_fname = str(args.topo_fname) print \"Input Topo File", "host in hnames: for nxthost in hnames: if host != nxthost : print", "!= nxthost : print \"pinging from ..\", host ,\" -> \", nxthost, \"", "open('in_topo.json') as data_file: data = json.load(data_file) hnames = data[\"hosts\"] hlen = len(hnames) cnt", "# exp.get_node(\"s2\").cmd(\"tc qdisc change dev s2-eth2 root netem delay 200ms\") # exp.get_node(\"s2\").cmd(\"tc qdisc", "netem delay 500ms\") # exp.get_node(\"s3\").cmd(\"tc qdisc change dev s3-eth1 root netem delay 300ms\")", "\", nxthost, \" to check network connectivity ...\" nxt_hnum = int(nxthost[1:]) tmp_hname =", "default=\"/tmp/routernew.json\", help = \"P4 Switch Parser JSON\") # parser.add_argument('--switch_json', dest=\"switch_json\", default=\"/home/rbabu/MaxiNet/MaxiNet/WorkerServer/simple_router.json\", help =", "exp.get_node(\"s3\").cmd(\"tc qdisc change dev s3-eth1 root netem delay 300ms\") # exp.get_node(\"s3\").cmd(\"tc qdisc change", "\"Switch Log Dir ...\", swlog_dir if args.pcap_dir : pcap_dir = str(args.pcap_dir) print \"Pcap", ": tmp = str(hnames[x]) myglobalTopo.addHost(tmp, ip=Tools.makeIP(cnt), mac=Tools.makeMAC(cnt)) cnt = cnt + 1 my_swlist=[]", "if (swinit_opt == \"ByApp\") : break print \"Program Switch objects as per topology", "dict.items(data[\"switches\"][key]): tmp = str(key) myglobalTopo.addSwitch(tmp, dpid=Tools.makeDPID(cnt)) cnt = cnt + 1 #hnames =", "root netem delay 600ms\") raw_input(\"[Continue...]\") print \"Switch Class ...\" print exp.switch raw_input(\"[Continue...]\") for", "parser.add_argument('--topo', dest=\"topo_fname\", default=\"/tmp/in_topo.json\", help = \"Input Topology file for Experiment\") parser.add_argument('--swlog_dir', dest=\"swlog_dir\", default=\"/tmp\",", "int(nxthost[1:]) tmp_hname = str(nxt_hnum) rcmd = \"ping -c 1 10.0.\" + tmp_hname +", "if host != nxthost : print \"pinging from ..\", host ,\" -> \",", "to converge\" time.sleep(10) # Try to do a pingall hosts hnames = data[\"hosts\"]", "print >>f, out_line out_line=\"p4_switch_json=\"+str(switch_json) print >>f, out_line out_line=\"bmv2_exe=\"+str(switch_exe) print >>f, out_line out_line=\"Invoke_mininet_cli=\"+str(cli_opt) print", "tmp = str(key) myglobalTopo.addSwitch(tmp, dpid=Tools.makeDPID(cnt)) cnt = cnt + 1 #hnames = data[\"hosts\"]", "s3-eth1 root netem delay 300ms\") # exp.get_node(\"s3\").cmd(\"tc qdisc change dev s3-eth2 root netem", "workers argument is not saved in experiment.cfg file f = open(\"t1_experiment.cfg\", \"w\") out_line=\"topo_file_name=/tmp/in_topo.json\"", "dest=\"num_workers\", default=1, help = \"Number of Workers for the Experiment : (Default 1)", "should be like this # import argparse import atexit import logging import os", "out_line out_line=\"bmv2_exe=\"+str(switch_exe) print >>f, out_line out_line=\"Invoke_mininet_cli=\"+str(cli_opt) print >>f, out_line out_line=\"p4_switch_initialization=\"+str(swinit_opt) print >>f, out_line", "hnames: for nxthost in hnames: if host != nxthost : print \"pinging from", "<= 0): print \"No Need to Create switch JSON file...\" worker.put_file(\"simple_router.json\", \"/tmp/routernew.json\") else", "= [] for item in dict.items( data[\"allowed_paths\"] ): my_allowed_paths.append(item) allowed_paths_len = len(my_allowed_paths) my_workers", ": exp.program_myswitch(sw) for host, my_cmd in data[\"host_cmnds\"] : print \"Execute Command on Host", "dest=\"cli_opt\", default=\"False\", help = \"Invoke at Mininet CLI in the Workers\") parser.add_argument('--switch_init', dest=\"swinit_opt\",", "for value1, value2 in dict.items(data[\"switches\"][key]): tmp = str(key) myglobalTopo.addSwitch(tmp, dpid=Tools.makeDPID(cnt)) cnt = cnt", "500ms\") # exp.get_node(\"s3\").cmd(\"tc qdisc change dev s3-eth1 root netem delay 300ms\") # exp.get_node(\"s3\").cmd(\"tc", "item in dict.items( data[\"allowed_paths\"] ): my_allowed_paths.append(item) allowed_paths_len = len(my_allowed_paths) my_workers = cluster.workers() for", "num_workers # Now save the Input CLI arguments in experiment.cfg file # Num", "allowed_paths_len = len(my_allowed_paths) my_workers = cluster.workers() for worker in my_workers : \"Copying to", "10.0.\" + tmp_hname + \".10\" print \"Rcmd is ..\", rcmd print exp.get_node(host).cmd(rcmd) if", "= json.load(data_file) hnames = data[\"hosts\"] hlen = len(hnames) cnt = 1 for x", "exp.get_node(\"s2\").cmd(\"tc qdisc add dev mn_tun0 root netem delay 500ms\") # exp.get_node(\"s3\").cmd(\"tc qdisc add", "os.path.basename(os.path.abspath(parent_path)) sys.path.insert(1,parent_path) from Frontend import maxinet # create topology myglobalTopo = Topo() parser", "\") args = parser.parse_args() if args.topo_fname : topo_fname = str(args.topo_fname) print \"Input Topo", "out_line out_line=\"Invoke_mininet_cli=\"+str(cli_opt) print >>f, out_line out_line=\"p4_switch_initialization=\"+str(swinit_opt) print >>f, out_line f.close() # Rename the", "\"Switch EXE Name ...\", switch_exe if args.cli_opt : cli_opt = str(args.cli_opt) print \"Mininet", "print \"Rcmd is ..\", rcmd print exp.get_node(host).cmd(rcmd) if (swinit_opt == \"ByApp\") : break", "# exp.get_node(\"s3\").cmd(\"tc qdisc add dev mn_tun0 root netem delay 500ms\") # exp.get_node(\"s3\").cmd(\"tc qdisc", "UserSwitch, OVSSwitch from mininet.link import Link, TCIntf from mininet.net import Mininet # from", "exp.get_node(\"s3\").cmd(\"tc qdisc add dev mn_tun0 root netem delay 500ms\") # exp.get_node(\"s3\").cmd(\"tc qdisc change", "Cluster ...\" # start cluster cluster = maxinet.Cluster(minWorkers=1, maxWorkers=num_workers) # start experiment with", "# exp.get_node(\"s2\").cmd(\"tc qdisc add dev mn_tun0 root netem delay 500ms\") # exp.get_node(\"s3\").cmd(\"tc qdisc", "(swinit_opt == \"ByApp\") : break print \"Program Switch objects as per topology ...\"", "raw_input(\"[Continue...]\") for host in hnames: for nxthost in hnames: if host != nxthost", "\".10\" print \"Rcmd is ..\", rcmd print exp.get_node(host).cmd(rcmd) exp.CLI(locals(),globals()) raw_input(\"[Continue...]\") exp.stop() raw_input(\"[Continue]\") #", ">>f, out_line f.close() # Rename the file t1_experiment.cfg -> experiment.cfg os.rename(\"t1_experiment.cfg\", \"experiment.cfg\") #", "tmp = str(hnames[x][0]) tmp1 = str(hnames[x][1]) myglobalTopo.addLink(tmp, tmp1) print \"Finished Loading Topology...\" print", "MaxiNet.WorkerServer.ssh_manager import SSH_Manager from run_exercise import ExerciseRunner from p4_mininet import P4Switch from shutil", "= data[\"hosts\"] hnames = data[\"links\"] hlen = len(hnames) for x in range(0,hlen) :", "print \"***** Experiment Setup Start *****\" exp.setup() print \"waiting 10 seconds for routing", "hnames = data[\"hosts\"] hlen = len(hnames) for host in hnames: for nxthost in", "raw_input(\"[Continue...]\") # exp.get_node(\"s2\").cmd(\"tc qdisc change dev s2-eth1 root netem delay 200ms\") # exp.get_node(\"s2\").cmd(\"tc", ": swlog_dir = str(args.swlog_dir) print \"Switch Log Dir ...\", swlog_dir if args.pcap_dir :", "\"Pcap Dir ...\", pcap_dir if args.switch_json : switch_json = str(args.switch_json) print \"Switch Parser", "# This is going to be hardcoded print >>f, out_line out_line=\"swlog_dir=\"+str(swlog_dir) print >>f,", "Workers ...\", num_workers # Now save the Input CLI arguments in experiment.cfg file", "as data_file: data = json.load(data_file) hnames = data[\"hosts\"] hlen = len(hnames) cnt =", "*****\" exp.setup() print \"waiting 10 seconds for routing algorithms on the controller to", "import OVSSwitch from mininet.node import UserSwitch, OVSSwitch from mininet.link import Link, TCIntf from", "from run_exercise import ExerciseRunner from p4_mininet import P4Switch from shutil import * import", "...\", num_workers # Now save the Input CLI arguments in experiment.cfg file #", "sys.path.insert(1,parent_path) from Frontend import maxinet # create topology myglobalTopo = Topo() parser =", "that the input topo is in file named in_topo.json os.system('python gen_router_json.py') worker.put_file(\"routernew.json\", \"/tmp/routernew.json\")", "start experiment with P4Switch on cluster exp = maxinet.Experiment(cluster, myglobalTopo, switch=P4Switch) # We", "is in file named in_topo.json os.system('python gen_router_json.py') worker.put_file(\"routernew.json\", \"/tmp/routernew.json\") print \"***** Experiment Setup", "print \"Command Monitor on Host ...\", my_cmd print exp.get_node(host).cmd(my_cmd) raw_input(\"[Continue...]\") # print exp.get_node(\"h2\").cmd(\"python", "qdisc change dev s3-eth1 root netem delay 300ms\") # exp.get_node(\"s3\").cmd(\"tc qdisc change dev", "change dev s2-eth2 root netem delay 200ms\") # exp.get_node(\"s2\").cmd(\"tc qdisc add dev mn_tun0", "= len(hnames) cnt = 1 for x in range(0,hlen) : tmp = str(hnames[x])", "= cnt + 1 #hnames = data[\"hosts\"] hnames = data[\"links\"] hlen = len(hnames)", "import SSH_Manager from run_exercise import ExerciseRunner from p4_mininet import P4Switch from shutil import", "out_line out_line=\"pcap_dir=\"+str(pcap_dir) print >>f, out_line out_line=\"p4_switch_json=\"+str(switch_json) print >>f, out_line out_line=\"bmv2_exe=\"+str(switch_exe) print >>f, out_line", "experiment.cfg os.rename(\"t1_experiment.cfg\", \"experiment.cfg\") # Now also copy the given input topo file as", "parser.add_argument('--switch_exe', dest=\"switch_exe\",default=\"/home/rbabu/behavioral-model/targets/simple_router/simple_router\", help=\"P4 Switch Executable\") parser.add_argument('--mininet_cli', dest=\"cli_opt\", default=\"False\", help = \"Invoke at Mininet", "saved in experiment.cfg file f = open(\"t1_experiment.cfg\", \"w\") out_line=\"topo_file_name=/tmp/in_topo.json\" # This is going", "myglobalTopo.addHost(tmp, ip=Tools.makeIP(cnt), mac=Tools.makeMAC(cnt)) cnt = cnt + 1 my_swlist=[] for key, value in", "else : print \"Create New switch JSON file...\" # Assumption is that the", "switch_exe = str(args.switch_exe) print \"Switch EXE Name ...\", switch_exe if args.cli_opt : cli_opt", "tmp_hname + \".10\" print \"Rcmd is ..\", rcmd print exp.get_node(host).cmd(rcmd) exp.CLI(locals(),globals()) raw_input(\"[Continue...]\") exp.stop()", ": tmp = str(hnames[x][0]) tmp1 = str(hnames[x][1]) myglobalTopo.addLink(tmp, tmp1) print \"Finished Loading Topology...\"", "rcmd print exp.get_node(host).cmd(rcmd) exp.CLI(locals(),globals()) raw_input(\"[Continue...]\") exp.stop() raw_input(\"[Continue]\") # wait for user to acknowledge", "host != nxthost : print \"pinging from ..\", host ,\" -> \", nxthost,", "done to pickup changes done by us in MaxiNet Frontend curr_path = os.getcwd()", "switch=P4Switch) # We can copy experiment.cfg, in_topo.json files to the respective workers my_allowed_paths", "OVSSwitch from mininet.node import UserSwitch, OVSSwitch from mininet.link import Link, TCIntf from mininet.net", "in hnames: for nxthost in hnames: if host != nxthost : print \"pinging", "named in_topo.json os.system('python gen_router_json.py') worker.put_file(\"routernew.json\", \"/tmp/routernew.json\") print \"***** Experiment Setup Start *****\" exp.setup()", "dev mn_tun0 root netem delay 500ms\") # exp.get_node(\"s3\").cmd(\"tc qdisc add dev mn_tun0 root", "out_line=\"bmv2_exe=\"+str(switch_exe) print >>f, out_line out_line=\"Invoke_mininet_cli=\"+str(cli_opt) print >>f, out_line out_line=\"p4_switch_initialization=\"+str(swinit_opt) print >>f, out_line f.close()", ",\" -> \", nxthost, \" to check network connectivity ...\" nxt_hnum = int(nxthost[1:])", "len(my_allowed_paths) my_workers = cluster.workers() for worker in my_workers : \"Copying to Worker 1...\",", "\"ping -c 1 10.0.\" + tmp_hname + \".10\" print \"Rcmd is ..\", rcmd", "parser.add_argument('--switch_json', dest=\"switch_json\", default=\"/tmp/routernew.json\", help = \"P4 Switch Parser JSON\") # parser.add_argument('--switch_json', dest=\"switch_json\", default=\"/home/rbabu/MaxiNet/MaxiNet/WorkerServer/simple_router.json\",", "delay 200ms\") # exp.get_node(\"s2\").cmd(\"tc qdisc add dev mn_tun0 root netem delay 500ms\") #", "...\", swlog_dir if args.pcap_dir : pcap_dir = str(args.pcap_dir) print \"Pcap Dir ...\", pcap_dir", "x in range(0,hlen) : tmp = str(hnames[x]) myglobalTopo.addHost(tmp, ip=Tools.makeIP(cnt), mac=Tools.makeMAC(cnt)) cnt = cnt", "path for Switch pcap files \") parser.add_argument('--switch_json', dest=\"switch_json\", default=\"/tmp/routernew.json\", help = \"P4 Switch", "cluster.workers() for worker in my_workers : \"Copying to Worker 1...\", worker worker.put_file(\"experiment.cfg\", \"/tmp/experiment.cfg\")", "is ..\", rcmd print exp.get_node(host).cmd(rcmd) exp.CLI(locals(),globals()) raw_input(\"[Continue...]\") exp.stop() raw_input(\"[Continue]\") # wait for user", "delay 600ms\") raw_input(\"[Continue...]\") print \"Switch Class ...\" print exp.switch raw_input(\"[Continue...]\") for host in", "in my_swlist : exp.program_myswitch(sw) for host, my_cmd in data[\"host_cmnds\"] : print \"Execute Command", "signal import subprocess import sys import tempfile import time import Pyro4 import threading", "ip=Tools.makeIP(cnt), mac=Tools.makeMAC(cnt)) cnt = cnt + 1 my_swlist=[] for key, value in dict.items(data[\"switches\"]):", "\"Directory path for Switch pcap files \") parser.add_argument('--switch_json', dest=\"switch_json\", default=\"/tmp/routernew.json\", help = \"P4", "is a sample program to emulate P4 Switches in Distributed environment # using", "JSON file...\" worker.put_file(\"simple_router.json\", \"/tmp/routernew.json\") else : print \"Create New switch JSON file...\" #", "+ 1 #hnames = data[\"hosts\"] hnames = data[\"links\"] hlen = len(hnames) for x", "this # import argparse import atexit import logging import os import signal import", "file as in_top.json in each of worker copy2(topo_fname,'in_topo.json') print \"File sucessfully copied as", "+ tmp_hname + \".10\" print \"Rcmd is ..\", rcmd print exp.get_node(host).cmd(rcmd) if (swinit_opt", "cli_opt = str(args.cli_opt) print \"Mininet CLI Option ...\", cli_opt if args.swinit_opt : swinit_opt", "netem delay 300ms\") # exp.get_node(\"s6\").cmd(\"tc qdisc add dev mn_tun1 root netem delay 600ms\")", "\"Switch Init Option ...\", swinit_opt if args.num_workers : num_workers = int(args.num_workers) print \"Number", "exp.get_node(host).cmd(rcmd) if (swinit_opt == \"ByApp\") : break print \"Program Switch objects as per", "parser.parse_args() if args.topo_fname : topo_fname = str(args.topo_fname) print \"Input Topo File Name is", "help = \"Directory path for Switch Log files \") parser.add_argument('--pcap_dir', dest=\"pcap_dir\", default=\"/tmp\", help", "range(0,hlen) : tmp = str(hnames[x]) myglobalTopo.addHost(tmp, ip=Tools.makeIP(cnt), mac=Tools.makeMAC(cnt)) cnt = cnt + 1", "data[\"allowed_paths\"] ): my_allowed_paths.append(item) allowed_paths_len = len(my_allowed_paths) my_workers = cluster.workers() for worker in my_workers", "in dict.items(data[\"switches\"]): my_swlist.append(key) # Add to list of switches in topology cnt =", "out_line=\"p4_switch_initialization=\"+str(swinit_opt) print >>f, out_line f.close() # Rename the file t1_experiment.cfg -> experiment.cfg os.rename(\"t1_experiment.cfg\",", "for the Experiment : (Default 1) \") args = parser.parse_args() if args.topo_fname :", "data[\"links\"] hlen = len(hnames) for x in range(0,hlen) : tmp = str(hnames[x][0]) tmp1", "change dev s2-eth1 root netem delay 200ms\") # exp.get_node(\"s2\").cmd(\"tc qdisc change dev s2-eth2", "This is done to pickup changes done by us in MaxiNet Frontend curr_path", "for host, my_cmd in data[\"host_cmnds\"] : print \"Execute Command on Host ...\", host", "path for Switch Log files \") parser.add_argument('--pcap_dir', dest=\"pcap_dir\", default=\"/tmp\", help = \"Directory path", "dest=\"switch_exe\",default=\"/home/rbabu/behavioral-model/targets/simple_router/simple_router\", help=\"P4 Switch Executable\") parser.add_argument('--mininet_cli', dest=\"cli_opt\", default=\"False\", help = \"Invoke at Mininet CLI", "# Rename the file t1_experiment.cfg -> experiment.cfg os.rename(\"t1_experiment.cfg\", \"experiment.cfg\") # Now also copy", "value1, value2 in dict.items(data[\"switches\"][key]): tmp = str(key) myglobalTopo.addSwitch(tmp, dpid=Tools.makeDPID(cnt)) cnt = cnt +", "qdisc add dev mn_tun0 root netem delay 500ms\") # exp.get_node(\"s3\").cmd(\"tc qdisc add dev", ">>f, out_line out_line=\"Invoke_mininet_cli=\"+str(cli_opt) print >>f, out_line out_line=\"p4_switch_initialization=\"+str(swinit_opt) print >>f, out_line f.close() # Rename", "exp.switch raw_input(\"[Continue...]\") for host in hnames: for nxthost in hnames: if host !=", "is ..\", rcmd print exp.get_node(host).cmd(rcmd) if (swinit_opt == \"ByApp\") : break print \"Program", "of Workers for the Experiment : (Default 1) \") args = parser.parse_args() if", "help = \"P4 Switch Parser JSON\") parser.add_argument('--switch_exe', dest=\"switch_exe\",default=\"/home/rbabu/behavioral-model/targets/simple_router/simple_router\", help=\"P4 Switch Executable\") parser.add_argument('--mininet_cli', dest=\"cli_opt\",", "# start experiment with P4Switch on cluster exp = maxinet.Experiment(cluster, myglobalTopo, switch=P4Switch) #", "a sample program to emulate P4 Switches in Distributed environment # using Maxinet.", "print \"pinging from ..\", host ,\" -> \", nxthost, \" to check network", "help = \"P4 Switch Parser JSON\") # parser.add_argument('--switch_json', dest=\"switch_json\", default=\"/home/rbabu/MaxiNet/MaxiNet/WorkerServer/simple_router.json\", help = \"P4", "mn_tun1 root netem delay 600ms\") # exp.get_node(\"s5\").cmd(\"tc qdisc add dev mn_tun1 root netem", "Init Option ...\", swinit_opt if args.num_workers : num_workers = int(args.num_workers) print \"Number of", "out_line f.close() # Rename the file t1_experiment.cfg -> experiment.cfg os.rename(\"t1_experiment.cfg\", \"experiment.cfg\") # Now", "raw_input(\"[Continue...]\") print \"Switch Class ...\" print exp.switch raw_input(\"[Continue...]\") for host in hnames: for", "hnames = data[\"links\"] hlen = len(hnames) for x in range(0,hlen) : tmp =", "environment # using Maxinet. The skeleton application program should be like this #", "* import pdb # Include Project Directory in PYTHONPATH # This is done", "out_line out_line=\"p4_switch_json=\"+str(switch_json) print >>f, out_line out_line=\"bmv2_exe=\"+str(switch_exe) print >>f, out_line out_line=\"Invoke_mininet_cli=\"+str(cli_opt) print >>f, out_line", "\"No Need to Create switch JSON file...\" worker.put_file(\"simple_router.json\", \"/tmp/routernew.json\") else : print \"Create", "default=\"AtStart\", help = \"Switch Initialization AtStart | ByApp\") parser.add_argument('--num_workers', dest=\"num_workers\", default=1, help =", "rcmd print exp.get_node(host).cmd(rcmd) if (swinit_opt == \"ByApp\") : break print \"Program Switch objects", "Name is ...\", topo_fname if args.swlog_dir : swlog_dir = str(args.swlog_dir) print \"Switch Log", "nxthost : print \"pinging from ..\", host ,\" -> \", nxthost, \" to", "network connectivity ...\" nxt_hnum = int(nxthost[1:]) tmp_hname = str(nxt_hnum) rcmd = \"ping -c", "Switch Parser JSON\") # parser.add_argument('--switch_json', dest=\"switch_json\", default=\"/home/rbabu/MaxiNet/MaxiNet/WorkerServer/simple_router.json\", help = \"P4 Switch Parser JSON\")", "str(args.switch_exe) print \"Switch EXE Name ...\", switch_exe if args.cli_opt : cli_opt = str(args.cli_opt)", "nxt_hnum = int(nxthost[1:]) tmp_hname = str(nxt_hnum) rcmd = \"ping -c 1 10.0.\" +", "in range(0,hlen) : tmp = str(hnames[x][0]) tmp1 = str(hnames[x][1]) myglobalTopo.addLink(tmp, tmp1) print \"Finished", "for x in range(0,hlen) : tmp = str(hnames[x]) myglobalTopo.addHost(tmp, ip=Tools.makeIP(cnt), mac=Tools.makeMAC(cnt)) cnt =", "topo is in file named in_topo.json os.system('python gen_router_json.py') worker.put_file(\"routernew.json\", \"/tmp/routernew.json\") print \"***** Experiment", "print >>f, out_line out_line=\"p4_switch_initialization=\"+str(swinit_opt) print >>f, out_line f.close() # Rename the file t1_experiment.cfg", "\"pinging from ..\", host ,\" -> \", nxthost, \" to check network connectivity", "Create switch JSON file...\" worker.put_file(\"simple_router.json\", \"/tmp/routernew.json\") else : print \"Create New switch JSON", "Link, TCIntf from mininet.net import Mininet # from MaxiNet.Frontend import maxinet from MaxiNet.tools", "tempfile import time import Pyro4 import threading import traceback import json import mininet.term", "in each of worker copy2(topo_fname,'in_topo.json') print \"File sucessfully copied as in_topo.json...\" with open('in_topo.json')", "the given input topo file as in_top.json in each of worker copy2(topo_fname,'in_topo.json') print", "P4Switch on cluster exp = maxinet.Experiment(cluster, myglobalTopo, switch=P4Switch) # We can copy experiment.cfg,", "qdisc change dev s3-eth2 root netem delay 300ms\") # exp.get_node(\"s6\").cmd(\"tc qdisc add dev", "exp.program_myswitch(sw) for host, my_cmd in data[\"host_cmnds\"] : print \"Execute Command on Host ...\",", "for worker in my_workers : \"Copying to Worker 1...\", worker worker.put_file(\"experiment.cfg\", \"/tmp/experiment.cfg\") worker.put_file(\"in_topo.json\",", "Topo from mininet.node import OVSSwitch from mininet.node import UserSwitch, OVSSwitch from mininet.link import", "= str(args.swinit_opt) print \"Switch Init Option ...\", swinit_opt if args.num_workers : num_workers =", "print >>f, out_line out_line=\"pcap_dir=\"+str(pcap_dir) print >>f, out_line out_line=\"p4_switch_json=\"+str(switch_json) print >>f, out_line out_line=\"bmv2_exe=\"+str(switch_exe) print", "args.switch_json : switch_json = str(args.switch_json) print \"Switch Parser JSON File Name ...\", switch_json", "mininet.link import Link, TCIntf from mininet.net import Mininet # from MaxiNet.Frontend import maxinet", "if args.swlog_dir : swlog_dir = str(args.swlog_dir) print \"Switch Log Dir ...\", swlog_dir if", "pingall hosts hnames = data[\"hosts\"] hlen = len(hnames) for host in hnames: for", "\"w\") out_line=\"topo_file_name=/tmp/in_topo.json\" # This is going to be hardcoded print >>f, out_line out_line=\"swlog_dir=\"+str(swlog_dir)", "dpid=Tools.makeDPID(cnt)) cnt = cnt + 1 #hnames = data[\"hosts\"] hnames = data[\"links\"] hlen", "Now also copy the given input topo file as in_top.json in each of", "experiment with P4Switch on cluster exp = maxinet.Experiment(cluster, myglobalTopo, switch=P4Switch) # We can", "swinit_opt if args.num_workers : num_workers = int(args.num_workers) print \"Number of Workers ...\", num_workers", ": print \"Create New switch JSON file...\" # Assumption is that the input", "str(hnames[x][0]) tmp1 = str(hnames[x][1]) myglobalTopo.addLink(tmp, tmp1) print \"Finished Loading Topology...\" print \"Creating Cluster", "pcap files \") parser.add_argument('--switch_json', dest=\"switch_json\", default=\"/tmp/routernew.json\", help = \"P4 Switch Parser JSON\") #", "on Host ...\", my_cmd print exp.get_node(host).cmd(my_cmd) raw_input(\"[Continue...]\") # print exp.get_node(\"h2\").cmd(\"python new_cmd_monitor.py --cmd_file=/tmp/h2_cmnds.txt >", "a pingall hosts hnames = data[\"hosts\"] hlen = len(hnames) for host in hnames:", "str(nxt_hnum) rcmd = \"ping -c 1 10.0.\" + tmp_hname + \".10\" print \"Rcmd", "in experiment.cfg file # Num workers argument is not saved in experiment.cfg file", "os.getcwd() parent_path = os.path.abspath(os.path.join(os.getcwd(), '..')) parent_dir = os.path.basename(os.path.abspath(parent_path)) sys.path.insert(1,parent_path) from Frontend import maxinet", "Dir ...\", pcap_dir if args.switch_json : switch_json = str(args.switch_json) print \"Switch Parser JSON", "mininet.node import UserSwitch, OVSSwitch from mininet.link import Link, TCIntf from mininet.net import Mininet", "\"Copying to Worker 1...\", worker worker.put_file(\"experiment.cfg\", \"/tmp/experiment.cfg\") worker.put_file(\"in_topo.json\", \"/tmp/in_topo.json\") if (allowed_paths_len <= 0):", "(allowed_paths_len <= 0): print \"No Need to Create switch JSON file...\" worker.put_file(\"simple_router.json\", \"/tmp/routernew.json\")", "routing algorithms on the controller to converge\" time.sleep(10) # Try to do a", ">>f, out_line out_line=\"p4_switch_initialization=\"+str(swinit_opt) print >>f, out_line f.close() # Rename the file t1_experiment.cfg ->", "netem delay 500ms\") # exp.get_node(\"s3\").cmd(\"tc qdisc add dev mn_tun0 root netem delay 500ms\")", "topology myglobalTopo = Topo() parser = argparse.ArgumentParser() parser.add_argument('--topo', dest=\"topo_fname\", default=\"/tmp/in_topo.json\", help = \"Input", "# exp.get_node(\"s5\").cmd(\"tc qdisc add dev mn_tun1 root netem delay 600ms\") raw_input(\"[Continue...]\") print \"Switch", "is ...\", topo_fname if args.swlog_dir : swlog_dir = str(args.swlog_dir) print \"Switch Log Dir", "import mininet.term from mininet.topo import Topo from mininet.node import OVSSwitch from mininet.node import", "Include Project Directory in PYTHONPATH # This is done to pickup changes done", "\"Create New switch JSON file...\" # Assumption is that the input topo is", "\"Invoke at Mininet CLI in the Workers\") parser.add_argument('--switch_init', dest=\"swinit_opt\", default=\"AtStart\", help = \"Switch", "gen_router_json.py') worker.put_file(\"routernew.json\", \"/tmp/routernew.json\") print \"***** Experiment Setup Start *****\" exp.setup() print \"waiting 10", "\"P4 Switch Parser JSON\") parser.add_argument('--switch_exe', dest=\"switch_exe\",default=\"/home/rbabu/behavioral-model/targets/simple_router/simple_router\", help=\"P4 Switch Executable\") parser.add_argument('--mininet_cli', dest=\"cli_opt\", default=\"False\", help", "Maxinet. The skeleton application program should be like this # import argparse import", "AtStart | ByApp\") parser.add_argument('--num_workers', dest=\"num_workers\", default=1, help = \"Number of Workers for the", "= int(nxthost[1:]) tmp_hname = str(nxt_hnum) rcmd = \"ping -c 1 10.0.\" + tmp_hname", "as in_top.json in each of worker copy2(topo_fname,'in_topo.json') print \"File sucessfully copied as in_topo.json...\"", "mn_tun1 root netem delay 600ms\") raw_input(\"[Continue...]\") print \"Switch Class ...\" print exp.switch raw_input(\"[Continue...]\")", "import maxinet # create topology myglobalTopo = Topo() parser = argparse.ArgumentParser() parser.add_argument('--topo', dest=\"topo_fname\",", "help=\"P4 Switch Executable\") parser.add_argument('--mininet_cli', dest=\"cli_opt\", default=\"False\", help = \"Invoke at Mininet CLI in", "\".10\" print \"Rcmd is ..\", rcmd print exp.get_node(host).cmd(rcmd) if (swinit_opt == \"ByApp\") :", "Command on Host ...\", host print \"Command Monitor on Host ...\", my_cmd print", "mn_tun0 root netem delay 500ms\") # exp.get_node(\"s3\").cmd(\"tc qdisc change dev s3-eth1 root netem", "= len(my_allowed_paths) my_workers = cluster.workers() for worker in my_workers : \"Copying to Worker", "# exp.get_node(\"s3\").cmd(\"tc qdisc change dev s3-eth1 root netem delay 300ms\") # exp.get_node(\"s3\").cmd(\"tc qdisc", "host print \"Command Monitor on Host ...\", my_cmd print exp.get_node(host).cmd(my_cmd) raw_input(\"[Continue...]\") # print", "Distributed environment # using Maxinet. The skeleton application program should be like this", "\"Rcmd is ..\", rcmd print exp.get_node(host).cmd(rcmd) if (swinit_opt == \"ByApp\") : break print", "print exp.get_node(host).cmd(my_cmd) raw_input(\"[Continue...]\") # print exp.get_node(\"h2\").cmd(\"python new_cmd_monitor.py --cmd_file=/tmp/h2_cmnds.txt > /tmp/h2_out & \") raw_input(\"[Continue...]\")", "from p4_mininet import P4Switch from shutil import * import pdb # Include Project", "P4 Switches in Distributed environment # using Maxinet. The skeleton application program should", "Switches in Distributed environment # using Maxinet. The skeleton application program should be", "root netem delay 500ms\") # exp.get_node(\"s3\").cmd(\"tc qdisc add dev mn_tun0 root netem delay", "print >>f, out_line out_line=\"bmv2_exe=\"+str(switch_exe) print >>f, out_line out_line=\"Invoke_mininet_cli=\"+str(cli_opt) print >>f, out_line out_line=\"p4_switch_initialization=\"+str(swinit_opt) print", "from MaxiNet.tools import Tools, MaxiNetConfig from MaxiNet.WorkerServer.ssh_manager import SSH_Manager from run_exercise import ExerciseRunner", "str(args.cli_opt) print \"Mininet CLI Option ...\", cli_opt if args.swinit_opt : swinit_opt = str(args.swinit_opt)", "on Host ...\", host print \"Command Monitor on Host ...\", my_cmd print exp.get_node(host).cmd(my_cmd)", "# This is done to pickup changes done by us in MaxiNet Frontend", "# print exp.get_node(\"h2\").cmd(\"python new_cmd_monitor.py --cmd_file=/tmp/h2_cmnds.txt > /tmp/h2_out & \") raw_input(\"[Continue...]\") # exp.get_node(\"s2\").cmd(\"tc qdisc", "switch_json = str(args.switch_json) print \"Switch Parser JSON File Name ...\", switch_json if args.switch_exe", "import os import signal import subprocess import sys import tempfile import time import", "import signal import subprocess import sys import tempfile import time import Pyro4 import", "from Frontend import maxinet # create topology myglobalTopo = Topo() parser = argparse.ArgumentParser()", "args.swlog_dir : swlog_dir = str(args.swlog_dir) print \"Switch Log Dir ...\", swlog_dir if args.pcap_dir", "experiment.cfg, in_topo.json files to the respective workers my_allowed_paths = [] for item in", "out_line=\"swlog_dir=\"+str(swlog_dir) print >>f, out_line out_line=\"pcap_dir=\"+str(pcap_dir) print >>f, out_line out_line=\"p4_switch_json=\"+str(switch_json) print >>f, out_line out_line=\"bmv2_exe=\"+str(switch_exe)", "can copy experiment.cfg, in_topo.json files to the respective workers my_allowed_paths = [] for", "maxinet # create topology myglobalTopo = Topo() parser = argparse.ArgumentParser() parser.add_argument('--topo', dest=\"topo_fname\", default=\"/tmp/in_topo.json\",", "..\", host ,\" -> \", nxthost, \" to check network connectivity ...\" nxt_hnum", "program to emulate P4 Switches in Distributed environment # using Maxinet. The skeleton", "data = json.load(data_file) hnames = data[\"hosts\"] hlen = len(hnames) cnt = 1 for", "& \") raw_input(\"[Continue...]\") # exp.get_node(\"s2\").cmd(\"tc qdisc change dev s2-eth1 root netem delay 200ms\")", "out_line=\"pcap_dir=\"+str(pcap_dir) print >>f, out_line out_line=\"p4_switch_json=\"+str(switch_json) print >>f, out_line out_line=\"bmv2_exe=\"+str(switch_exe) print >>f, out_line out_line=\"Invoke_mininet_cli=\"+str(cli_opt)", "raw_input(\"[Continue...]\") for sw in my_swlist : exp.program_myswitch(sw) for host, my_cmd in data[\"host_cmnds\"] :", "file...\" # Assumption is that the input topo is in file named in_topo.json", "json import mininet.term from mininet.topo import Topo from mininet.node import OVSSwitch from mininet.node", "print >>f, out_line out_line=\"swlog_dir=\"+str(swlog_dir) print >>f, out_line out_line=\"pcap_dir=\"+str(pcap_dir) print >>f, out_line out_line=\"p4_switch_json=\"+str(switch_json) print", "worker worker.put_file(\"experiment.cfg\", \"/tmp/experiment.cfg\") worker.put_file(\"in_topo.json\", \"/tmp/in_topo.json\") if (allowed_paths_len <= 0): print \"No Need to", "if args.switch_exe : switch_exe = str(args.switch_exe) print \"Switch EXE Name ...\", switch_exe if", "dict.items( data[\"allowed_paths\"] ): my_allowed_paths.append(item) allowed_paths_len = len(my_allowed_paths) my_workers = cluster.workers() for worker in", "dev mn_tun0 root netem delay 500ms\") # exp.get_node(\"s3\").cmd(\"tc qdisc change dev s3-eth1 root", "traceback import json import mininet.term from mininet.topo import Topo from mininet.node import OVSSwitch", "str(args.pcap_dir) print \"Pcap Dir ...\", pcap_dir if args.switch_json : switch_json = str(args.switch_json) print", "Host ...\", my_cmd print exp.get_node(host).cmd(my_cmd) raw_input(\"[Continue...]\") # print exp.get_node(\"h2\").cmd(\"python new_cmd_monitor.py --cmd_file=/tmp/h2_cmnds.txt > /tmp/h2_out", "Parser JSON File Name ...\", switch_json if args.switch_exe : switch_exe = str(args.switch_exe) print", "# Now save the Input CLI arguments in experiment.cfg file # Num workers", "worker in my_workers : \"Copying to Worker 1...\", worker worker.put_file(\"experiment.cfg\", \"/tmp/experiment.cfg\") worker.put_file(\"in_topo.json\", \"/tmp/in_topo.json\")", "my_allowed_paths.append(item) allowed_paths_len = len(my_allowed_paths) my_workers = cluster.workers() for worker in my_workers : \"Copying", "for host in hnames: for nxthost in hnames: if host != nxthost :", "= \"ping -c 1 10.0.\" + tmp_hname + \".10\" print \"Rcmd is ..\",", "# exp.get_node(\"s6\").cmd(\"tc qdisc add dev mn_tun1 root netem delay 600ms\") # exp.get_node(\"s5\").cmd(\"tc qdisc", "...\" raw_input(\"[Continue...]\") for sw in my_swlist : exp.program_myswitch(sw) for host, my_cmd in data[\"host_cmnds\"]", "Worker 1...\", worker worker.put_file(\"experiment.cfg\", \"/tmp/experiment.cfg\") worker.put_file(\"in_topo.json\", \"/tmp/in_topo.json\") if (allowed_paths_len <= 0): print \"No", "+ \".10\" print \"Rcmd is ..\", rcmd print exp.get_node(host).cmd(rcmd) exp.CLI(locals(),globals()) raw_input(\"[Continue...]\") exp.stop() raw_input(\"[Continue]\")", "tmp_hname + \".10\" print \"Rcmd is ..\", rcmd print exp.get_node(host).cmd(rcmd) if (swinit_opt ==", "topology ...\" raw_input(\"[Continue...]\") for sw in my_swlist : exp.program_myswitch(sw) for host, my_cmd in", "print \"waiting 10 seconds for routing algorithms on the controller to converge\" time.sleep(10)" ]