ngram
listlengths
0
67.8k
[ "'0002_auto_20190509_1508'), ] operations = [ migrations.CreateModel( name='Horario', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),", "models.CharField(choices=[('1', 'Primeiro Horário'), ('2', 'Segundo Horário'), ('3', 'Terceiro Horário'), ('4', 'Quarto Horário'), ('5',", "'Tarde'), ('N', 'Noite')], max_length=10)), ('hora_inicio', models.TimeField()), ('hora_fim', models.TimeField()), ], ), migrations.AlterUniqueTogether( name='horario', unique_together={('ordem',", "] operations = [ migrations.CreateModel( name='Horario', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('ordem',", "fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('ordem', models.CharField(choices=[('1', 'Primeiro Horário'), ('2', 'Segundo Horário'),", "'Primeiro Horário'), ('2', 'Segundo Horário'), ('3', 'Terceiro Horário'), ('4', 'Quarto Horário'), ('5', 'Quinto", "from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('core', '0002_auto_20190509_1508'), ]", "('3', 'Terceiro Horário'), ('4', 'Quarto Horário'), ('5', 'Quinto Horário'), ('6', 'Sexto Horário')], max_length=1)),", "Django 2.1.5 on 2019-08-05 14:44 from django.db import migrations, models class Migration(migrations.Migration): dependencies", "serialize=False, verbose_name='ID')), ('ordem', models.CharField(choices=[('1', 'Primeiro Horário'), ('2', 'Segundo Horário'), ('3', 'Terceiro Horário'), ('4',", "('N', 'Noite')], max_length=10)), ('hora_inicio', models.TimeField()), ('hora_fim', models.TimeField()), ], ), migrations.AlterUniqueTogether( name='horario', unique_together={('ordem', 'turno')},", "2.1.5 on 2019-08-05 14:44 from django.db import migrations, models class Migration(migrations.Migration): dependencies =", "'Segundo Horário'), ('3', 'Terceiro Horário'), ('4', 'Quarto Horário'), ('5', 'Quinto Horário'), ('6', 'Sexto", "Horário'), ('4', 'Quarto Horário'), ('5', 'Quinto Horário'), ('6', 'Sexto Horário')], max_length=1)), ('turno', models.CharField(choices=[('M',", "'Sexto Horário')], max_length=1)), ('turno', models.CharField(choices=[('M', 'Manhã'), ('T', 'Tarde'), ('N', 'Noite')], max_length=10)), ('hora_inicio', models.TimeField()),", "('ordem', models.CharField(choices=[('1', 'Primeiro Horário'), ('2', 'Segundo Horário'), ('3', 'Terceiro Horário'), ('4', 'Quarto Horário'),", "'Noite')], max_length=10)), ('hora_inicio', models.TimeField()), ('hora_fim', models.TimeField()), ], ), migrations.AlterUniqueTogether( name='horario', unique_together={('ordem', 'turno')}, ),", "('core', '0002_auto_20190509_1508'), ] operations = [ migrations.CreateModel( name='Horario', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False,", "primary_key=True, serialize=False, verbose_name='ID')), ('ordem', models.CharField(choices=[('1', 'Primeiro Horário'), ('2', 'Segundo Horário'), ('3', 'Terceiro Horário'),", "on 2019-08-05 14:44 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [", "Horário'), ('2', 'Segundo Horário'), ('3', 'Terceiro Horário'), ('4', 'Quarto Horário'), ('5', 'Quinto Horário'),", "('5', 'Quinto Horário'), ('6', 'Sexto Horário')], max_length=1)), ('turno', models.CharField(choices=[('M', 'Manhã'), ('T', 'Tarde'), ('N',", "= [ ('core', '0002_auto_20190509_1508'), ] operations = [ migrations.CreateModel( name='Horario', fields=[ ('id', models.AutoField(auto_created=True,", "'Quinto Horário'), ('6', 'Sexto Horário')], max_length=1)), ('turno', models.CharField(choices=[('M', 'Manhã'), ('T', 'Tarde'), ('N', 'Noite')],", "models.CharField(choices=[('M', 'Manhã'), ('T', 'Tarde'), ('N', 'Noite')], max_length=10)), ('hora_inicio', models.TimeField()), ('hora_fim', models.TimeField()), ], ),", "('T', 'Tarde'), ('N', 'Noite')], max_length=10)), ('hora_inicio', models.TimeField()), ('hora_fim', models.TimeField()), ], ), migrations.AlterUniqueTogether( name='horario',", "Generated by Django 2.1.5 on 2019-08-05 14:44 from django.db import migrations, models class", "verbose_name='ID')), ('ordem', models.CharField(choices=[('1', 'Primeiro Horário'), ('2', 'Segundo Horário'), ('3', 'Terceiro Horário'), ('4', 'Quarto", "# Generated by Django 2.1.5 on 2019-08-05 14:44 from django.db import migrations, models", "('2', 'Segundo Horário'), ('3', 'Terceiro Horário'), ('4', 'Quarto Horário'), ('5', 'Quinto Horário'), ('6',", "migrations, models class Migration(migrations.Migration): dependencies = [ ('core', '0002_auto_20190509_1508'), ] operations = [", "2019-08-05 14:44 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('core',", "('4', 'Quarto Horário'), ('5', 'Quinto Horário'), ('6', 'Sexto Horário')], max_length=1)), ('turno', models.CharField(choices=[('M', 'Manhã'),", "models class Migration(migrations.Migration): dependencies = [ ('core', '0002_auto_20190509_1508'), ] operations = [ migrations.CreateModel(", "('6', 'Sexto Horário')], max_length=1)), ('turno', models.CharField(choices=[('M', 'Manhã'), ('T', 'Tarde'), ('N', 'Noite')], max_length=10)), ('hora_inicio',", "dependencies = [ ('core', '0002_auto_20190509_1508'), ] operations = [ migrations.CreateModel( name='Horario', fields=[ ('id',", "= [ migrations.CreateModel( name='Horario', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('ordem', models.CharField(choices=[('1', 'Primeiro", "('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('ordem', models.CharField(choices=[('1', 'Primeiro Horário'), ('2', 'Segundo Horário'), ('3',", "Horário'), ('5', 'Quinto Horário'), ('6', 'Sexto Horário')], max_length=1)), ('turno', models.CharField(choices=[('M', 'Manhã'), ('T', 'Tarde'),", "django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('core', '0002_auto_20190509_1508'), ] operations", "max_length=1)), ('turno', models.CharField(choices=[('M', 'Manhã'), ('T', 'Tarde'), ('N', 'Noite')], max_length=10)), ('hora_inicio', models.TimeField()), ('hora_fim', models.TimeField()),", "by Django 2.1.5 on 2019-08-05 14:44 from django.db import migrations, models class Migration(migrations.Migration):", "('turno', models.CharField(choices=[('M', 'Manhã'), ('T', 'Tarde'), ('N', 'Noite')], max_length=10)), ('hora_inicio', models.TimeField()), ('hora_fim', models.TimeField()), ],", "14:44 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('core', '0002_auto_20190509_1508'),", "Migration(migrations.Migration): dependencies = [ ('core', '0002_auto_20190509_1508'), ] operations = [ migrations.CreateModel( name='Horario', fields=[", "name='Horario', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('ordem', models.CharField(choices=[('1', 'Primeiro Horário'), ('2', 'Segundo", "operations = [ migrations.CreateModel( name='Horario', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('ordem', models.CharField(choices=[('1',", "class Migration(migrations.Migration): dependencies = [ ('core', '0002_auto_20190509_1508'), ] operations = [ migrations.CreateModel( name='Horario',", "Horário'), ('3', 'Terceiro Horário'), ('4', 'Quarto Horário'), ('5', 'Quinto Horário'), ('6', 'Sexto Horário')],", "models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('ordem', models.CharField(choices=[('1', 'Primeiro Horário'), ('2', 'Segundo Horário'), ('3', 'Terceiro", "<filename>core/migrations/0003_auto_20190805_1144.py # Generated by Django 2.1.5 on 2019-08-05 14:44 from django.db import migrations,", "'Manhã'), ('T', 'Tarde'), ('N', 'Noite')], max_length=10)), ('hora_inicio', models.TimeField()), ('hora_fim', models.TimeField()), ], ), migrations.AlterUniqueTogether(", "[ ('core', '0002_auto_20190509_1508'), ] operations = [ migrations.CreateModel( name='Horario', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True,", "'Quarto Horário'), ('5', 'Quinto Horário'), ('6', 'Sexto Horário')], max_length=1)), ('turno', models.CharField(choices=[('M', 'Manhã'), ('T',", "'Terceiro Horário'), ('4', 'Quarto Horário'), ('5', 'Quinto Horário'), ('6', 'Sexto Horário')], max_length=1)), ('turno',", "Horário')], max_length=1)), ('turno', models.CharField(choices=[('M', 'Manhã'), ('T', 'Tarde'), ('N', 'Noite')], max_length=10)), ('hora_inicio', models.TimeField()), ('hora_fim',", "[ migrations.CreateModel( name='Horario', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('ordem', models.CharField(choices=[('1', 'Primeiro Horário'),", "max_length=10)), ('hora_inicio', models.TimeField()), ('hora_fim', models.TimeField()), ], ), migrations.AlterUniqueTogether( name='horario', unique_together={('ordem', 'turno')}, ), ]", "import migrations, models class Migration(migrations.Migration): dependencies = [ ('core', '0002_auto_20190509_1508'), ] operations =", "Horário'), ('6', 'Sexto Horário')], max_length=1)), ('turno', models.CharField(choices=[('M', 'Manhã'), ('T', 'Tarde'), ('N', 'Noite')], max_length=10)),", "migrations.CreateModel( name='Horario', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('ordem', models.CharField(choices=[('1', 'Primeiro Horário'), ('2'," ]
[ "utf-8 -*- __all__ = ['mbr', 'gpt', 'common'] from . import common from .", "'gpt', 'common'] from . import common from . import mbr from . import", "['mbr', 'gpt', 'common'] from . import common from . import mbr from .", "'common'] from . import common from . import mbr from . import gpt", "-*- coding: utf-8 -*- __all__ = ['mbr', 'gpt', 'common'] from . import common", "coding: utf-8 -*- __all__ = ['mbr', 'gpt', 'common'] from . import common from", "<reponame>dariusbakunas/rawdisk # -*- coding: utf-8 -*- __all__ = ['mbr', 'gpt', 'common'] from .", "= ['mbr', 'gpt', 'common'] from . import common from . import mbr from", "# -*- coding: utf-8 -*- __all__ = ['mbr', 'gpt', 'common'] from . import", "__all__ = ['mbr', 'gpt', 'common'] from . import common from . import mbr", "-*- __all__ = ['mbr', 'gpt', 'common'] from . import common from . import" ]
[ "from .common import BaseDns # noqa: F401 from .auroradns import AuroraDns # noqa:", ".auroradns import AuroraDns # noqa: F401 from .cloudflare import CloudFlareDns # noqa: F401", "F401 from .auroradns import AuroraDns # noqa: F401 from .cloudflare import CloudFlareDns #", "noqa: F401 from .auroradns import AuroraDns # noqa: F401 from .cloudflare import CloudFlareDns", "BaseDns # noqa: F401 from .auroradns import AuroraDns # noqa: F401 from .cloudflare", "import BaseDns # noqa: F401 from .auroradns import AuroraDns # noqa: F401 from", ".common import BaseDns # noqa: F401 from .auroradns import AuroraDns # noqa: F401", "from .auroradns import AuroraDns # noqa: F401 from .cloudflare import CloudFlareDns # noqa:", "# noqa: F401 from .auroradns import AuroraDns # noqa: F401 from .cloudflare import" ]
[ "not line.startswith(\"#SBATCH\"): continue tokens = line.split()[1:] arg, val = None, None ## parse", "This file defines a class that parses and represents an SBATCH submission script", "tokens = line.split()[1:] arg, val = None, None ## parse args with '--'", "tokens arg = arg.strip(\"-\") self.args[arg] = val def multiple_key_query(self, keys): \"\"\" A function", "= arg.strip(\"-\") self.args[arg] = val def multiple_key_query(self, keys): \"\"\" A function to allow", "to allow for querying of parameters that can have multiple names, .e.g., -N,", "parameters that can have multiple names, .e.g., -N, --nodes \"\"\" for key in", "can have multiple names, .e.g., -N, --nodes \"\"\" for key in keys: try:", "else: arg, val = tokens arg = arg.strip(\"-\") self.args[arg] = val def multiple_key_query(self,", "{value}\" for key, value in self.args.items() ]) def parse(self): for line in open(self.path,", "multiple_key_query(self, keys): \"\"\" A function to allow for querying of parameters that can", "val = None, None ## parse args with '--' and '=' if len(tokens)", "\"\"\" class SBATCHScript: def __init__(self, path): self.path = path self.args = {} def", "args with '-' else: arg, val = tokens arg = arg.strip(\"-\") self.args[arg] =", "arg = arg.strip(\"-\") self.args[arg] = val def multiple_key_query(self, keys): \"\"\" A function to", "1: arg, val = tokens[0].split('=') ## parse args with '-' else: arg, val", "'--' and '=' if len(tokens) == 1: arg, val = tokens[0].split('=') ## parse", "\"\"\" for key in keys: try: return self.args[key] except KeyError: continue raise KeyError(f\"None", "defines a class that parses and represents an SBATCH submission script \"\"\" class", "__init__(self, path): self.path = path self.args = {} def __getitem__(self, key): if type(key)", "-N, --nodes \"\"\" for key in keys: try: return self.args[key] except KeyError: continue", "val = tokens[0].split('=') ## parse args with '-' else: arg, val = tokens", "= tokens[0].split('=') ## parse args with '-' else: arg, val = tokens arg", "a class that parses and represents an SBATCH submission script \"\"\" class SBATCHScript:", "allow for querying of parameters that can have multiple names, .e.g., -N, --nodes", "A function to allow for querying of parameters that can have multiple names,", "'\\n'.join([ f\"{key}: {value}\" for key, value in self.args.items() ]) def parse(self): for line", "arg, val = tokens arg = arg.strip(\"-\") self.args[arg] = val def multiple_key_query(self, keys):", "key): if type(key) == list: return self.multiple_key_query(key) else: return self.args[key] def __str__(self): return", "in keys: try: return self.args[key] except KeyError: continue raise KeyError(f\"None of {keys} in", "with '-' else: arg, val = tokens arg = arg.strip(\"-\") self.args[arg] = val", "continue raise KeyError(f\"None of {keys} in sbatch arguments\") if __name__ == \"__main__\": s", "submission script \"\"\" class SBATCHScript: def __init__(self, path): self.path = path self.args =", "in self.args.items() ]) def parse(self): for line in open(self.path, 'r').readlines(): if not line.startswith(\"#SBATCH\"):", "else: return self.args[key] def __str__(self): return '\\n'.join([ f\"{key}: {value}\" for key, value in", "for line in open(self.path, 'r').readlines(): if not line.startswith(\"#SBATCH\"): continue tokens = line.split()[1:] arg,", "continue tokens = line.split()[1:] arg, val = None, None ## parse args with", "class SBATCHScript: def __init__(self, path): self.path = path self.args = {} def __getitem__(self,", "self.args[key] except KeyError: continue raise KeyError(f\"None of {keys} in sbatch arguments\") if __name__", "value in self.args.items() ]) def parse(self): for line in open(self.path, 'r').readlines(): if not", "<reponame>alex-wenzel/expanse-su-estimator \"\"\" This file defines a class that parses and represents an SBATCH", "names, .e.g., -N, --nodes \"\"\" for key in keys: try: return self.args[key] except", "keys: try: return self.args[key] except KeyError: continue raise KeyError(f\"None of {keys} in sbatch", "of {keys} in sbatch arguments\") if __name__ == \"__main__\": s = SBATCHScript(\"test_examples/expanse_shared_example.sh\") s.parse()", "parses and represents an SBATCH submission script \"\"\" class SBATCHScript: def __init__(self, path):", "file defines a class that parses and represents an SBATCH submission script \"\"\"", "path): self.path = path self.args = {} def __getitem__(self, key): if type(key) ==", "'=' if len(tokens) == 1: arg, val = tokens[0].split('=') ## parse args with", "def __init__(self, path): self.path = path self.args = {} def __getitem__(self, key): if", "SBATCHScript: def __init__(self, path): self.path = path self.args = {} def __getitem__(self, key):", "for key in keys: try: return self.args[key] except KeyError: continue raise KeyError(f\"None of", "f\"{key}: {value}\" for key, value in self.args.items() ]) def parse(self): for line in", "if len(tokens) == 1: arg, val = tokens[0].split('=') ## parse args with '-'", "'r').readlines(): if not line.startswith(\"#SBATCH\"): continue tokens = line.split()[1:] arg, val = None, None", "self.path = path self.args = {} def __getitem__(self, key): if type(key) == list:", "self.args[key] def __str__(self): return '\\n'.join([ f\"{key}: {value}\" for key, value in self.args.items() ])", "keys): \"\"\" A function to allow for querying of parameters that can have", "have multiple names, .e.g., -N, --nodes \"\"\" for key in keys: try: return", "SBATCH submission script \"\"\" class SBATCHScript: def __init__(self, path): self.path = path self.args", "= {} def __getitem__(self, key): if type(key) == list: return self.multiple_key_query(key) else: return", "def multiple_key_query(self, keys): \"\"\" A function to allow for querying of parameters that", "= line.split()[1:] arg, val = None, None ## parse args with '--' and", "and represents an SBATCH submission script \"\"\" class SBATCHScript: def __init__(self, path): self.path", "for querying of parameters that can have multiple names, .e.g., -N, --nodes \"\"\"", "try: return self.args[key] except KeyError: continue raise KeyError(f\"None of {keys} in sbatch arguments\")", "except KeyError: continue raise KeyError(f\"None of {keys} in sbatch arguments\") if __name__ ==", "__getitem__(self, key): if type(key) == list: return self.multiple_key_query(key) else: return self.args[key] def __str__(self):", "= tokens arg = arg.strip(\"-\") self.args[arg] = val def multiple_key_query(self, keys): \"\"\" A", ".e.g., -N, --nodes \"\"\" for key in keys: try: return self.args[key] except KeyError:", "type(key) == list: return self.multiple_key_query(key) else: return self.args[key] def __str__(self): return '\\n'.join([ f\"{key}:", "'-' else: arg, val = tokens arg = arg.strip(\"-\") self.args[arg] = val def", "line in open(self.path, 'r').readlines(): if not line.startswith(\"#SBATCH\"): continue tokens = line.split()[1:] arg, val", "return self.multiple_key_query(key) else: return self.args[key] def __str__(self): return '\\n'.join([ f\"{key}: {value}\" for key,", "None ## parse args with '--' and '=' if len(tokens) == 1: arg,", "__str__(self): return '\\n'.join([ f\"{key}: {value}\" for key, value in self.args.items() ]) def parse(self):", "with '--' and '=' if len(tokens) == 1: arg, val = tokens[0].split('=') ##", "{} def __getitem__(self, key): if type(key) == list: return self.multiple_key_query(key) else: return self.args[key]", "self.multiple_key_query(key) else: return self.args[key] def __str__(self): return '\\n'.join([ f\"{key}: {value}\" for key, value", "if not line.startswith(\"#SBATCH\"): continue tokens = line.split()[1:] arg, val = None, None ##", "arg, val = tokens[0].split('=') ## parse args with '-' else: arg, val =", "key in keys: try: return self.args[key] except KeyError: continue raise KeyError(f\"None of {keys}", "multiple names, .e.g., -N, --nodes \"\"\" for key in keys: try: return self.args[key]", "= None, None ## parse args with '--' and '=' if len(tokens) ==", "\"\"\" This file defines a class that parses and represents an SBATCH submission", "for key, value in self.args.items() ]) def parse(self): for line in open(self.path, 'r').readlines():", "querying of parameters that can have multiple names, .e.g., -N, --nodes \"\"\" for", "key, value in self.args.items() ]) def parse(self): for line in open(self.path, 'r').readlines(): if", "## parse args with '-' else: arg, val = tokens arg = arg.strip(\"-\")", "list: return self.multiple_key_query(key) else: return self.args[key] def __str__(self): return '\\n'.join([ f\"{key}: {value}\" for", "line.startswith(\"#SBATCH\"): continue tokens = line.split()[1:] arg, val = None, None ## parse args", "== list: return self.multiple_key_query(key) else: return self.args[key] def __str__(self): return '\\n'.join([ f\"{key}: {value}\"", "def parse(self): for line in open(self.path, 'r').readlines(): if not line.startswith(\"#SBATCH\"): continue tokens =", "args with '--' and '=' if len(tokens) == 1: arg, val = tokens[0].split('=')", "## parse args with '--' and '=' if len(tokens) == 1: arg, val", "line.split()[1:] arg, val = None, None ## parse args with '--' and '='", "represents an SBATCH submission script \"\"\" class SBATCHScript: def __init__(self, path): self.path =", "parse args with '--' and '=' if len(tokens) == 1: arg, val =", "KeyError(f\"None of {keys} in sbatch arguments\") if __name__ == \"__main__\": s = SBATCHScript(\"test_examples/expanse_shared_example.sh\")", "open(self.path, 'r').readlines(): if not line.startswith(\"#SBATCH\"): continue tokens = line.split()[1:] arg, val = None,", "and '=' if len(tokens) == 1: arg, val = tokens[0].split('=') ## parse args", "val = tokens arg = arg.strip(\"-\") self.args[arg] = val def multiple_key_query(self, keys): \"\"\"", "that can have multiple names, .e.g., -N, --nodes \"\"\" for key in keys:", "parse(self): for line in open(self.path, 'r').readlines(): if not line.startswith(\"#SBATCH\"): continue tokens = line.split()[1:]", "parse args with '-' else: arg, val = tokens arg = arg.strip(\"-\") self.args[arg]", "self.args[arg] = val def multiple_key_query(self, keys): \"\"\" A function to allow for querying", "that parses and represents an SBATCH submission script \"\"\" class SBATCHScript: def __init__(self,", "--nodes \"\"\" for key in keys: try: return self.args[key] except KeyError: continue raise", "len(tokens) == 1: arg, val = tokens[0].split('=') ## parse args with '-' else:", "function to allow for querying of parameters that can have multiple names, .e.g.,", "return '\\n'.join([ f\"{key}: {value}\" for key, value in self.args.items() ]) def parse(self): for", "self.args = {} def __getitem__(self, key): if type(key) == list: return self.multiple_key_query(key) else:", "path self.args = {} def __getitem__(self, key): if type(key) == list: return self.multiple_key_query(key)", "= val def multiple_key_query(self, keys): \"\"\" A function to allow for querying of", "\"\"\" A function to allow for querying of parameters that can have multiple", "== 1: arg, val = tokens[0].split('=') ## parse args with '-' else: arg,", "return self.args[key] def __str__(self): return '\\n'.join([ f\"{key}: {value}\" for key, value in self.args.items()", "return self.args[key] except KeyError: continue raise KeyError(f\"None of {keys} in sbatch arguments\") if", "raise KeyError(f\"None of {keys} in sbatch arguments\") if __name__ == \"__main__\": s =", "KeyError: continue raise KeyError(f\"None of {keys} in sbatch arguments\") if __name__ == \"__main__\":", "tokens[0].split('=') ## parse args with '-' else: arg, val = tokens arg =", "of parameters that can have multiple names, .e.g., -N, --nodes \"\"\" for key", "val def multiple_key_query(self, keys): \"\"\" A function to allow for querying of parameters", "]) def parse(self): for line in open(self.path, 'r').readlines(): if not line.startswith(\"#SBATCH\"): continue tokens", "an SBATCH submission script \"\"\" class SBATCHScript: def __init__(self, path): self.path = path", "def __str__(self): return '\\n'.join([ f\"{key}: {value}\" for key, value in self.args.items() ]) def", "in open(self.path, 'r').readlines(): if not line.startswith(\"#SBATCH\"): continue tokens = line.split()[1:] arg, val =", "self.args.items() ]) def parse(self): for line in open(self.path, 'r').readlines(): if not line.startswith(\"#SBATCH\"): continue", "if type(key) == list: return self.multiple_key_query(key) else: return self.args[key] def __str__(self): return '\\n'.join([", "script \"\"\" class SBATCHScript: def __init__(self, path): self.path = path self.args = {}", "def __getitem__(self, key): if type(key) == list: return self.multiple_key_query(key) else: return self.args[key] def", "None, None ## parse args with '--' and '=' if len(tokens) == 1:", "= path self.args = {} def __getitem__(self, key): if type(key) == list: return", "arg, val = None, None ## parse args with '--' and '=' if", "class that parses and represents an SBATCH submission script \"\"\" class SBATCHScript: def", "arg.strip(\"-\") self.args[arg] = val def multiple_key_query(self, keys): \"\"\" A function to allow for" ]
[]
[ "32 32\"> <image width=\"32\" height=\"32\" xlink:href=\"{0}\" /> </svg> \"\"\" text_file.write(content.format(encoded_str)) def image_to_data_url(filename): ext", "base64 import glob, os def write_svg_file(svg_path, encoded_str): with open(svg_path, \"w\") as text_file: content", "text_file: content = \"\"\" <svg xmlns=\"http://www.w3.org/2000/svg\" xmlns:xlink=\"http://www.w3.org/1999/xlink\" width=\"32\" height=\"32\" viewBox=\"0 0 32 32\">", "= filename.split('.')[-1] prefix = f'data:image/{ext};base64,' with open(filename, 'rb') as f: img = f.read()", "+ base64.b64encode(img).decode('utf-8') basePath = \"\" # set path to icons here for file", "def image_to_data_url(filename): ext = filename.split('.')[-1] prefix = f'data:image/{ext};base64,' with open(filename, 'rb') as f:", "basePath = \"\" # set path to icons here for file in glob.glob(basePath+\"/**/**/*.png\"):", "icons here for file in glob.glob(basePath+\"/**/**/*.png\"): png_file = file svg_file = file[0:-4]+ \".svg\"", "width=\"32\" height=\"32\" viewBox=\"0 0 32 32\"> <image width=\"32\" height=\"32\" xlink:href=\"{0}\" /> </svg> \"\"\"", "viewBox=\"0 0 32 32\"> <image width=\"32\" height=\"32\" xlink:href=\"{0}\" /> </svg> \"\"\" text_file.write(content.format(encoded_str)) def", "xmlns:xlink=\"http://www.w3.org/1999/xlink\" width=\"32\" height=\"32\" viewBox=\"0 0 32 32\"> <image width=\"32\" height=\"32\" xlink:href=\"{0}\" /> </svg>", "to icons here for file in glob.glob(basePath+\"/**/**/*.png\"): png_file = file svg_file = file[0:-4]+", "= \"\"\" <svg xmlns=\"http://www.w3.org/2000/svg\" xmlns:xlink=\"http://www.w3.org/1999/xlink\" width=\"32\" height=\"32\" viewBox=\"0 0 32 32\"> <image width=\"32\"", "f: img = f.read() return prefix + base64.b64encode(img).decode('utf-8') basePath = \"\" # set", "path to icons here for file in glob.glob(basePath+\"/**/**/*.png\"): png_file = file svg_file =", "os def write_svg_file(svg_path, encoded_str): with open(svg_path, \"w\") as text_file: content = \"\"\" <svg", "height=\"32\" viewBox=\"0 0 32 32\"> <image width=\"32\" height=\"32\" xlink:href=\"{0}\" /> </svg> \"\"\" text_file.write(content.format(encoded_str))", "xlink:href=\"{0}\" /> </svg> \"\"\" text_file.write(content.format(encoded_str)) def image_to_data_url(filename): ext = filename.split('.')[-1] prefix = f'data:image/{ext};base64,'", "set path to icons here for file in glob.glob(basePath+\"/**/**/*.png\"): png_file = file svg_file", "<svg xmlns=\"http://www.w3.org/2000/svg\" xmlns:xlink=\"http://www.w3.org/1999/xlink\" width=\"32\" height=\"32\" viewBox=\"0 0 32 32\"> <image width=\"32\" height=\"32\" xlink:href=\"{0}\"", "\"\" # set path to icons here for file in glob.glob(basePath+\"/**/**/*.png\"): png_file =", "= \"\" # set path to icons here for file in glob.glob(basePath+\"/**/**/*.png\"): png_file", "open(svg_path, \"w\") as text_file: content = \"\"\" <svg xmlns=\"http://www.w3.org/2000/svg\" xmlns:xlink=\"http://www.w3.org/1999/xlink\" width=\"32\" height=\"32\" viewBox=\"0", "prefix + base64.b64encode(img).decode('utf-8') basePath = \"\" # set path to icons here for", "img = f.read() return prefix + base64.b64encode(img).decode('utf-8') basePath = \"\" # set path", "\"\"\" <svg xmlns=\"http://www.w3.org/2000/svg\" xmlns:xlink=\"http://www.w3.org/1999/xlink\" width=\"32\" height=\"32\" viewBox=\"0 0 32 32\"> <image width=\"32\" height=\"32\"", "# set path to icons here for file in glob.glob(basePath+\"/**/**/*.png\"): png_file = file", "<image width=\"32\" height=\"32\" xlink:href=\"{0}\" /> </svg> \"\"\" text_file.write(content.format(encoded_str)) def image_to_data_url(filename): ext = filename.split('.')[-1]", "<filename>src/doc/overrides/.icons/convert.py import base64 import glob, os def write_svg_file(svg_path, encoded_str): with open(svg_path, \"w\") as", "f'data:image/{ext};base64,' with open(filename, 'rb') as f: img = f.read() return prefix + base64.b64encode(img).decode('utf-8')", "\"w\") as text_file: content = \"\"\" <svg xmlns=\"http://www.w3.org/2000/svg\" xmlns:xlink=\"http://www.w3.org/1999/xlink\" width=\"32\" height=\"32\" viewBox=\"0 0", "in glob.glob(basePath+\"/**/**/*.png\"): png_file = file svg_file = file[0:-4]+ \".svg\" image_data = image_to_data_url(png_file) write_svg_file(svg_file,image_data)", "import base64 import glob, os def write_svg_file(svg_path, encoded_str): with open(svg_path, \"w\") as text_file:", "content = \"\"\" <svg xmlns=\"http://www.w3.org/2000/svg\" xmlns:xlink=\"http://www.w3.org/1999/xlink\" width=\"32\" height=\"32\" viewBox=\"0 0 32 32\"> <image", "glob.glob(basePath+\"/**/**/*.png\"): png_file = file svg_file = file[0:-4]+ \".svg\" image_data = image_to_data_url(png_file) write_svg_file(svg_file,image_data) os.remove(png_file)", "as text_file: content = \"\"\" <svg xmlns=\"http://www.w3.org/2000/svg\" xmlns:xlink=\"http://www.w3.org/1999/xlink\" width=\"32\" height=\"32\" viewBox=\"0 0 32", "glob, os def write_svg_file(svg_path, encoded_str): with open(svg_path, \"w\") as text_file: content = \"\"\"", "xmlns=\"http://www.w3.org/2000/svg\" xmlns:xlink=\"http://www.w3.org/1999/xlink\" width=\"32\" height=\"32\" viewBox=\"0 0 32 32\"> <image width=\"32\" height=\"32\" xlink:href=\"{0}\" />", "32\"> <image width=\"32\" height=\"32\" xlink:href=\"{0}\" /> </svg> \"\"\" text_file.write(content.format(encoded_str)) def image_to_data_url(filename): ext =", "file in glob.glob(basePath+\"/**/**/*.png\"): png_file = file svg_file = file[0:-4]+ \".svg\" image_data = image_to_data_url(png_file)", "/> </svg> \"\"\" text_file.write(content.format(encoded_str)) def image_to_data_url(filename): ext = filename.split('.')[-1] prefix = f'data:image/{ext};base64,' with", "text_file.write(content.format(encoded_str)) def image_to_data_url(filename): ext = filename.split('.')[-1] prefix = f'data:image/{ext};base64,' with open(filename, 'rb') as", "open(filename, 'rb') as f: img = f.read() return prefix + base64.b64encode(img).decode('utf-8') basePath =", "= f.read() return prefix + base64.b64encode(img).decode('utf-8') basePath = \"\" # set path to", "= f'data:image/{ext};base64,' with open(filename, 'rb') as f: img = f.read() return prefix +", "here for file in glob.glob(basePath+\"/**/**/*.png\"): png_file = file svg_file = file[0:-4]+ \".svg\" image_data", "prefix = f'data:image/{ext};base64,' with open(filename, 'rb') as f: img = f.read() return prefix", "ext = filename.split('.')[-1] prefix = f'data:image/{ext};base64,' with open(filename, 'rb') as f: img =", "</svg> \"\"\" text_file.write(content.format(encoded_str)) def image_to_data_url(filename): ext = filename.split('.')[-1] prefix = f'data:image/{ext};base64,' with open(filename,", "0 32 32\"> <image width=\"32\" height=\"32\" xlink:href=\"{0}\" /> </svg> \"\"\" text_file.write(content.format(encoded_str)) def image_to_data_url(filename):", "width=\"32\" height=\"32\" xlink:href=\"{0}\" /> </svg> \"\"\" text_file.write(content.format(encoded_str)) def image_to_data_url(filename): ext = filename.split('.')[-1] prefix", "encoded_str): with open(svg_path, \"w\") as text_file: content = \"\"\" <svg xmlns=\"http://www.w3.org/2000/svg\" xmlns:xlink=\"http://www.w3.org/1999/xlink\" width=\"32\"", "as f: img = f.read() return prefix + base64.b64encode(img).decode('utf-8') basePath = \"\" #", "base64.b64encode(img).decode('utf-8') basePath = \"\" # set path to icons here for file in", "\"\"\" text_file.write(content.format(encoded_str)) def image_to_data_url(filename): ext = filename.split('.')[-1] prefix = f'data:image/{ext};base64,' with open(filename, 'rb')", "image_to_data_url(filename): ext = filename.split('.')[-1] prefix = f'data:image/{ext};base64,' with open(filename, 'rb') as f: img", "height=\"32\" xlink:href=\"{0}\" /> </svg> \"\"\" text_file.write(content.format(encoded_str)) def image_to_data_url(filename): ext = filename.split('.')[-1] prefix =", "for file in glob.glob(basePath+\"/**/**/*.png\"): png_file = file svg_file = file[0:-4]+ \".svg\" image_data =", "with open(svg_path, \"w\") as text_file: content = \"\"\" <svg xmlns=\"http://www.w3.org/2000/svg\" xmlns:xlink=\"http://www.w3.org/1999/xlink\" width=\"32\" height=\"32\"", "filename.split('.')[-1] prefix = f'data:image/{ext};base64,' with open(filename, 'rb') as f: img = f.read() return", "def write_svg_file(svg_path, encoded_str): with open(svg_path, \"w\") as text_file: content = \"\"\" <svg xmlns=\"http://www.w3.org/2000/svg\"", "'rb') as f: img = f.read() return prefix + base64.b64encode(img).decode('utf-8') basePath = \"\"", "return prefix + base64.b64encode(img).decode('utf-8') basePath = \"\" # set path to icons here", "f.read() return prefix + base64.b64encode(img).decode('utf-8') basePath = \"\" # set path to icons", "with open(filename, 'rb') as f: img = f.read() return prefix + base64.b64encode(img).decode('utf-8') basePath", "write_svg_file(svg_path, encoded_str): with open(svg_path, \"w\") as text_file: content = \"\"\" <svg xmlns=\"http://www.w3.org/2000/svg\" xmlns:xlink=\"http://www.w3.org/1999/xlink\"", "import glob, os def write_svg_file(svg_path, encoded_str): with open(svg_path, \"w\") as text_file: content =" ]
[ "serializer = self.get_serializer(data=request.data) serializer.is_valid(raise_exception=True) if not IsHostOperationAllowed.check_host_permissions( request.user, IsHostOperationAllowed.get_docker_operation( serializer.data[\"command\"]), host__pk=self.kwargs.get(self.host_pk), kind=self.permission_kind, ):", "not IsHostOperationAllowed.check_host_permissions( request.user, IsHostOperationAllowed.get_docker_operation( serializer.data[\"command\"]), host__pk=self.kwargs.get(self.host_pk), kind=self.permission_kind, ): raise PermissionDenied() instance = self.get_object()", "= [IsHostOperationAllowed] lookup_field = 'id' permission_kind = \"dp\" host_pk = \"host__pk\" def get_serializer_class(self):", "PermissionDenied() instance = self.get_object() result = DockerConnectionPool(str(instance.id), instance).execute( command=serializer.data.get('command'), **serializer.data.get(\"args\"), ) status =", "kind, kind, )] = access.able_operations(kind) serializer = self.get_serializer({ \"permissions\": permissions, \"user\": request.user.pk, \"host\":", "serializer_class=ActionSerializer) # def job(self, request, *args, **kwargs): # instance = self.get_object() # result", "from .permissions import IsHostOperationAllowed, HostOperationMixin class HostViewSet(ModelViewSet, HostOperationMixin): queryset = Host.objects serializer_class =", "TODO: # @action(detail=True, # methods=['GET'], # url_path=\"job\", # serializer_class=ActionSerializer) # def job(self, request,", "= \"host__pk\" def get_queryset(self): return super().get_queryset().filter( host__pk=self.kwargs.get(\"host__pk\")) # TODO: # @action(detail=True, # methods=['GET'],", "*args, **kwargs): serializer = self.get_serializer(data=request.data) serializer.is_valid(raise_exception=True) if not IsHostOperationAllowed.check_host_permissions( request.user, IsHostOperationAllowed.get_docker_operation( serializer.data[\"command\"]), host__pk=self.kwargs.get(self.host_pk),", "super().get_queryset().filter( host__pk=self.kwargs.get(\"host__pk\")) # TODO: # @action(detail=True, # methods=['GET'], # url_path=\"job\", # serializer_class=ActionSerializer) #", "host_pk = \"pk\" ignored_suffixes = (\"List\", ) def get_queryset(self): return super().get_queryset().filter( Q(creator=self.request.user) |", "<gh_stars>0 from rest_framework.exceptions import PermissionDenied from rest_framework.viewsets import ModelViewSet, ReadOnlyModelViewSet from rest_framework.response import", "= (\"List\", ) def get_queryset(self): return super().get_queryset().filter( Q(creator=self.request.user) | Q(accesses__user=self.request.user)).distinct().order_by('title') def perform_create(self, serializer):", "result = DockerConnectionPool(str(instance.id), instance).execute( command=serializer.data.get('command'), **serializer.data.get(\"args\"), ) status = 200 if bool(result.get(\"error\")): status", "access.able_operations(kind) serializer = self.get_serializer({ \"permissions\": permissions, \"user\": request.user.pk, \"host\": pk }) return Response(serializer.data)", "return Response(serializer.data) class AccessViewSet(ModelViewSet, HostOperationMixin): queryset = Access.objects serializer_class = AccessReadSerializer permission_classes =", "Response(serializer.data) class AccessViewSet(ModelViewSet, HostOperationMixin): queryset = Access.objects serializer_class = AccessReadSerializer permission_classes = [IsHostOperationAllowed]", "from rest_framework.exceptions import PermissionDenied from rest_framework.viewsets import ModelViewSet, ReadOnlyModelViewSet from rest_framework.response import Response", "# url_path=\"job\", # serializer_class=ActionSerializer) # def job(self, request, *args, **kwargs): # instance =", "permission_classes = [IsHostOperationAllowed] permission_kind = \"dh\" host_pk = \"pk\" ignored_suffixes = (\"List\", )", "IsAuthenticated from rest_framework.decorators import action from django.db.models import Q from .models import Host,", "= self.get_object() result = DockerConnectionPool(str(instance.id), instance).execute( command=serializer.data.get('command'), **serializer.data.get(\"args\"), ) status = 200 if", "if bool(result.get(\"error\")): status = 400 return Response(result, status=status) @action(detail=True, methods=['get'], url_path=\"my_access\", serializer_class=UserAccessSerializer) def", "from rest_framework.response import Response from rest_framework.permissions import IsAuthenticated from rest_framework.decorators import action from", "ModelViewSet, ReadOnlyModelViewSet from rest_framework.response import Response from rest_framework.permissions import IsAuthenticated from rest_framework.decorators import", "@action(detail=True, # methods=['GET'], # url_path=\"job\", # serializer_class=ActionSerializer) # def job(self, request, *args, **kwargs):", "\"create\", \"partial_update\"): return AccessCreateSerializer else: return AccessReadSerializer def get_queryset(self): return super().get_queryset().prefetch_related('user').filter( host__pk=self.kwargs.get(\"host__pk\")).order_by('permissions') def", "kind in access.permissions_kind: permissions[access.permissions_dictionary.get( kind, kind, )] = access.able_operations(kind) serializer = self.get_serializer({ \"permissions\":", "django.db.models import Q from .models import Host, Access, Job from .serializer import AccessCreateSerializer,", "= self.get_serializer({ \"permissions\": permissions, \"user\": request.user.pk, \"host\": pk }) return Response(serializer.data) class AccessViewSet(ModelViewSet,", "Host.objects serializer_class = HostSerializer permission_classes = [IsHostOperationAllowed] permission_kind = \"dh\" host_pk = \"pk\"", "= HostSerializer permission_classes = [IsHostOperationAllowed] permission_kind = \"dh\" host_pk = \"pk\" ignored_suffixes =", "job(self, request, *args, **kwargs): # instance = self.get_object() # result = DockerConnectionPool(str(instance.id), #", "AccessReadSerializer, HostSerializer, ActionSerializer, JobSerializer, UserAccessSerializer from .drivers import DockerConnectionPool from .permissions import IsHostOperationAllowed,", "HostOperationMixin): queryset = Access.objects serializer_class = AccessReadSerializer permission_classes = [IsHostOperationAllowed] lookup_field = 'id'", "\"host\": pk }) return Response(serializer.data) class AccessViewSet(ModelViewSet, HostOperationMixin): queryset = Access.objects serializer_class =", "methods=['get'], url_path=\"my_access\", serializer_class=UserAccessSerializer) def my_access(self, request, pk, **kwargs): permissions = {} access: Access", "serializer): serializer.save(creator=self.request.user) @action(detail=True, methods=['POST'], url_path=\"execute\", serializer_class=ActionSerializer) def execute(self, request, *args, **kwargs): serializer =", "= JobSerializer permission_classes = [IsHostOperationAllowed] lookup_field = 'id' permission_kind = \"dh\" host_pk =", "serializer.data[\"command\"]), host__pk=self.kwargs.get(self.host_pk), kind=self.permission_kind, ): raise PermissionDenied() instance = self.get_object() result = DockerConnectionPool(str(instance.id), instance).execute(", "): raise PermissionDenied() instance = self.get_object() result = DockerConnectionPool(str(instance.id), instance).execute( command=serializer.data.get('command'), **serializer.data.get(\"args\"), )", "return AccessReadSerializer def get_queryset(self): return super().get_queryset().prefetch_related('user').filter( host__pk=self.kwargs.get(\"host__pk\")).order_by('permissions') def perform_create(self, serializer): serializer.save(host_id=self.kwargs.get(\"host__pk\")) class JobViewSet(ReadOnlyModelViewSet,", "# def job(self, request, *args, **kwargs): # instance = self.get_object() # result =", "| Q(accesses__user=self.request.user)).distinct().order_by('title') def perform_create(self, serializer): serializer.save(creator=self.request.user) @action(detail=True, methods=['POST'], url_path=\"execute\", serializer_class=ActionSerializer) def execute(self, request,", "AccessViewSet(ModelViewSet, HostOperationMixin): queryset = Access.objects serializer_class = AccessReadSerializer permission_classes = [IsHostOperationAllowed] lookup_field =", "Job.objects serializer_class = JobSerializer permission_classes = [IsHostOperationAllowed] lookup_field = 'id' permission_kind = \"dh\"", "get_queryset(self): return super().get_queryset().filter( host__pk=self.kwargs.get(\"host__pk\")) # TODO: # @action(detail=True, # methods=['GET'], # url_path=\"job\", #", "200 if bool(result.get(\"error\")): status = 400 return Response(result, status=status) @action(detail=True, methods=['get'], url_path=\"my_access\", serializer_class=UserAccessSerializer)", "bool(result.get(\"error\")): status = 400 return Response(result, status=status) @action(detail=True, methods=['get'], url_path=\"my_access\", serializer_class=UserAccessSerializer) def my_access(self,", "import action from django.db.models import Q from .models import Host, Access, Job from", "rest_framework.permissions import IsAuthenticated from rest_framework.decorators import action from django.db.models import Q from .models", "ignored_suffixes = (\"List\", ) def get_queryset(self): return super().get_queryset().filter( Q(creator=self.request.user) | Q(accesses__user=self.request.user)).distinct().order_by('title') def perform_create(self,", "if access is None: permissions = {\"full\": True} else: for kind in access.permissions_kind:", "self.get_serializer({ \"permissions\": permissions, \"user\": request.user.pk, \"host\": pk }) return Response(serializer.data) class AccessViewSet(ModelViewSet, HostOperationMixin):", "serializer_class = AccessReadSerializer permission_classes = [IsHostOperationAllowed] lookup_field = 'id' permission_kind = \"dp\" host_pk", "status = 200 if bool(result.get(\"error\")): status = 400 return Response(result, status=status) @action(detail=True, methods=['get'],", "lookup_field = 'id' permission_kind = \"dh\" host_pk = \"host__pk\" def get_queryset(self): return super().get_queryset().filter(", "from .serializer import AccessCreateSerializer, AccessReadSerializer, HostSerializer, ActionSerializer, JobSerializer, UserAccessSerializer from .drivers import DockerConnectionPool", "JobSerializer, UserAccessSerializer from .drivers import DockerConnectionPool from .permissions import IsHostOperationAllowed, HostOperationMixin class HostViewSet(ModelViewSet,", "= [IsHostOperationAllowed] permission_kind = \"dh\" host_pk = \"pk\" ignored_suffixes = (\"List\", ) def", "[IsHostOperationAllowed] permission_kind = \"dh\" host_pk = \"pk\" ignored_suffixes = (\"List\", ) def get_queryset(self):", "\"dh\" host_pk = \"host__pk\" def get_queryset(self): return super().get_queryset().filter( host__pk=self.kwargs.get(\"host__pk\")) # TODO: # @action(detail=True,", "400 return Response(result, status=status) @action(detail=True, methods=['get'], url_path=\"my_access\", serializer_class=UserAccessSerializer) def my_access(self, request, pk, **kwargs):", "rest_framework.decorators import action from django.db.models import Q from .models import Host, Access, Job", "UserAccessSerializer from .drivers import DockerConnectionPool from .permissions import IsHostOperationAllowed, HostOperationMixin class HostViewSet(ModelViewSet, HostOperationMixin):", "permissions, \"user\": request.user.pk, \"host\": pk }) return Response(serializer.data) class AccessViewSet(ModelViewSet, HostOperationMixin): queryset =", "\"user\": request.user.pk, \"host\": pk }) return Response(serializer.data) class AccessViewSet(ModelViewSet, HostOperationMixin): queryset = Access.objects", "serializer.save(host_id=self.kwargs.get(\"host__pk\")) class JobViewSet(ReadOnlyModelViewSet, HostOperationMixin): queryset = Job.objects serializer_class = JobSerializer permission_classes = [IsHostOperationAllowed]", "**kwargs): permissions = {} access: Access = Access.objects.filter(host__pk=pk, user=request.user).first() if access is None:", "(\"update\", \"create\", \"partial_update\"): return AccessCreateSerializer else: return AccessReadSerializer def get_queryset(self): return super().get_queryset().prefetch_related('user').filter( host__pk=self.kwargs.get(\"host__pk\")).order_by('permissions')", "import PermissionDenied from rest_framework.viewsets import ModelViewSet, ReadOnlyModelViewSet from rest_framework.response import Response from rest_framework.permissions", "access.permissions_kind: permissions[access.permissions_dictionary.get( kind, kind, )] = access.able_operations(kind) serializer = self.get_serializer({ \"permissions\": permissions, \"user\":", "from rest_framework.decorators import action from django.db.models import Q from .models import Host, Access,", "ActionSerializer, JobSerializer, UserAccessSerializer from .drivers import DockerConnectionPool from .permissions import IsHostOperationAllowed, HostOperationMixin class", "= \"dh\" host_pk = \"pk\" ignored_suffixes = (\"List\", ) def get_queryset(self): return super().get_queryset().filter(", "serializer_class = HostSerializer permission_classes = [IsHostOperationAllowed] permission_kind = \"dh\" host_pk = \"pk\" ignored_suffixes", "def perform_create(self, serializer): serializer.save(creator=self.request.user) @action(detail=True, methods=['POST'], url_path=\"execute\", serializer_class=ActionSerializer) def execute(self, request, *args, **kwargs):", "def get_serializer_class(self): if self.action in (\"update\", \"create\", \"partial_update\"): return AccessCreateSerializer else: return AccessReadSerializer", "= 'id' permission_kind = \"dh\" host_pk = \"host__pk\" def get_queryset(self): return super().get_queryset().filter( host__pk=self.kwargs.get(\"host__pk\"))", "PermissionDenied from rest_framework.viewsets import ModelViewSet, ReadOnlyModelViewSet from rest_framework.response import Response from rest_framework.permissions import", "@action(detail=True, methods=['get'], url_path=\"my_access\", serializer_class=UserAccessSerializer) def my_access(self, request, pk, **kwargs): permissions = {} access:", "JobViewSet(ReadOnlyModelViewSet, HostOperationMixin): queryset = Job.objects serializer_class = JobSerializer permission_classes = [IsHostOperationAllowed] lookup_field =", "from .models import Host, Access, Job from .serializer import AccessCreateSerializer, AccessReadSerializer, HostSerializer, ActionSerializer,", "is None: permissions = {\"full\": True} else: for kind in access.permissions_kind: permissions[access.permissions_dictionary.get( kind,", "status=status) @action(detail=True, methods=['get'], url_path=\"my_access\", serializer_class=UserAccessSerializer) def my_access(self, request, pk, **kwargs): permissions = {}", "super().get_queryset().filter( Q(creator=self.request.user) | Q(accesses__user=self.request.user)).distinct().order_by('title') def perform_create(self, serializer): serializer.save(creator=self.request.user) @action(detail=True, methods=['POST'], url_path=\"execute\", serializer_class=ActionSerializer) def", "import AccessCreateSerializer, AccessReadSerializer, HostSerializer, ActionSerializer, JobSerializer, UserAccessSerializer from .drivers import DockerConnectionPool from .permissions", "status = 400 return Response(result, status=status) @action(detail=True, methods=['get'], url_path=\"my_access\", serializer_class=UserAccessSerializer) def my_access(self, request,", "if self.action in (\"update\", \"create\", \"partial_update\"): return AccessCreateSerializer else: return AccessReadSerializer def get_queryset(self):", "from rest_framework.permissions import IsAuthenticated from rest_framework.decorators import action from django.db.models import Q from", "def job(self, request, *args, **kwargs): # instance = self.get_object() # result = DockerConnectionPool(str(instance.id),", ".models import Host, Access, Job from .serializer import AccessCreateSerializer, AccessReadSerializer, HostSerializer, ActionSerializer, JobSerializer,", "permissions = {} access: Access = Access.objects.filter(host__pk=pk, user=request.user).first() if access is None: permissions", "command=serializer.data.get('command'), **serializer.data.get(\"args\"), ) status = 200 if bool(result.get(\"error\")): status = 400 return Response(result,", "ReadOnlyModelViewSet from rest_framework.response import Response from rest_framework.permissions import IsAuthenticated from rest_framework.decorators import action", "import DockerConnectionPool from .permissions import IsHostOperationAllowed, HostOperationMixin class HostViewSet(ModelViewSet, HostOperationMixin): queryset = Host.objects", "Access.objects.filter(host__pk=pk, user=request.user).first() if access is None: permissions = {\"full\": True} else: for kind", "def get_queryset(self): return super().get_queryset().filter( Q(creator=self.request.user) | Q(accesses__user=self.request.user)).distinct().order_by('title') def perform_create(self, serializer): serializer.save(creator=self.request.user) @action(detail=True, methods=['POST'],", "pk, **kwargs): permissions = {} access: Access = Access.objects.filter(host__pk=pk, user=request.user).first() if access is", "\"host__pk\" def get_serializer_class(self): if self.action in (\"update\", \"create\", \"partial_update\"): return AccessCreateSerializer else: return", "[IsHostOperationAllowed] lookup_field = 'id' permission_kind = \"dh\" host_pk = \"host__pk\" def get_queryset(self): return", "AccessReadSerializer def get_queryset(self): return super().get_queryset().prefetch_related('user').filter( host__pk=self.kwargs.get(\"host__pk\")).order_by('permissions') def perform_create(self, serializer): serializer.save(host_id=self.kwargs.get(\"host__pk\")) class JobViewSet(ReadOnlyModelViewSet, HostOperationMixin):", "permissions = {\"full\": True} else: for kind in access.permissions_kind: permissions[access.permissions_dictionary.get( kind, kind, )]", "get_queryset(self): return super().get_queryset().prefetch_related('user').filter( host__pk=self.kwargs.get(\"host__pk\")).order_by('permissions') def perform_create(self, serializer): serializer.save(host_id=self.kwargs.get(\"host__pk\")) class JobViewSet(ReadOnlyModelViewSet, HostOperationMixin): queryset =", "= Job.objects serializer_class = JobSerializer permission_classes = [IsHostOperationAllowed] lookup_field = 'id' permission_kind =", "self.action in (\"update\", \"create\", \"partial_update\"): return AccessCreateSerializer else: return AccessReadSerializer def get_queryset(self): return", "= AccessReadSerializer permission_classes = [IsHostOperationAllowed] lookup_field = 'id' permission_kind = \"dp\" host_pk =", "from rest_framework.viewsets import ModelViewSet, ReadOnlyModelViewSet from rest_framework.response import Response from rest_framework.permissions import IsAuthenticated", "= Host.objects serializer_class = HostSerializer permission_classes = [IsHostOperationAllowed] permission_kind = \"dh\" host_pk =", "IsHostOperationAllowed.get_docker_operation( serializer.data[\"command\"]), host__pk=self.kwargs.get(self.host_pk), kind=self.permission_kind, ): raise PermissionDenied() instance = self.get_object() result = DockerConnectionPool(str(instance.id),", "**serializer.data.get(\"args\"), ) status = 200 if bool(result.get(\"error\")): status = 400 return Response(result, status=status)", "IsHostOperationAllowed, HostOperationMixin class HostViewSet(ModelViewSet, HostOperationMixin): queryset = Host.objects serializer_class = HostSerializer permission_classes =", "access: Access = Access.objects.filter(host__pk=pk, user=request.user).first() if access is None: permissions = {\"full\": True}", "None: permissions = {\"full\": True} else: for kind in access.permissions_kind: permissions[access.permissions_dictionary.get( kind, kind,", "= \"dp\" host_pk = \"host__pk\" def get_serializer_class(self): if self.action in (\"update\", \"create\", \"partial_update\"):", "\"dp\" host_pk = \"host__pk\" def get_serializer_class(self): if self.action in (\"update\", \"create\", \"partial_update\"): return", "host_pk = \"host__pk\" def get_queryset(self): return super().get_queryset().filter( host__pk=self.kwargs.get(\"host__pk\")) # TODO: # @action(detail=True, #", "Q(creator=self.request.user) | Q(accesses__user=self.request.user)).distinct().order_by('title') def perform_create(self, serializer): serializer.save(creator=self.request.user) @action(detail=True, methods=['POST'], url_path=\"execute\", serializer_class=ActionSerializer) def execute(self,", "if not IsHostOperationAllowed.check_host_permissions( request.user, IsHostOperationAllowed.get_docker_operation( serializer.data[\"command\"]), host__pk=self.kwargs.get(self.host_pk), kind=self.permission_kind, ): raise PermissionDenied() instance =", "permission_classes = [IsHostOperationAllowed] lookup_field = 'id' permission_kind = \"dh\" host_pk = \"host__pk\" def", "{} access: Access = Access.objects.filter(host__pk=pk, user=request.user).first() if access is None: permissions = {\"full\":", "import Response from rest_framework.permissions import IsAuthenticated from rest_framework.decorators import action from django.db.models import", "= {\"full\": True} else: for kind in access.permissions_kind: permissions[access.permissions_dictionary.get( kind, kind, )] =", "HostOperationMixin): queryset = Job.objects serializer_class = JobSerializer permission_classes = [IsHostOperationAllowed] lookup_field = 'id'", "= [IsHostOperationAllowed] lookup_field = 'id' permission_kind = \"dh\" host_pk = \"host__pk\" def get_queryset(self):", "\"partial_update\"): return AccessCreateSerializer else: return AccessReadSerializer def get_queryset(self): return super().get_queryset().prefetch_related('user').filter( host__pk=self.kwargs.get(\"host__pk\")).order_by('permissions') def perform_create(self,", "raise PermissionDenied() instance = self.get_object() result = DockerConnectionPool(str(instance.id), instance).execute( command=serializer.data.get('command'), **serializer.data.get(\"args\"), ) status", "instance = self.get_object() # result = DockerConnectionPool(str(instance.id), # instance).get_job_result( # request.query_params.get(\"key\")) # return", "import Host, Access, Job from .serializer import AccessCreateSerializer, AccessReadSerializer, HostSerializer, ActionSerializer, JobSerializer, UserAccessSerializer", "pk }) return Response(serializer.data) class AccessViewSet(ModelViewSet, HostOperationMixin): queryset = Access.objects serializer_class = AccessReadSerializer", "# methods=['GET'], # url_path=\"job\", # serializer_class=ActionSerializer) # def job(self, request, *args, **kwargs): #", "url_path=\"execute\", serializer_class=ActionSerializer) def execute(self, request, *args, **kwargs): serializer = self.get_serializer(data=request.data) serializer.is_valid(raise_exception=True) if not", "import IsAuthenticated from rest_framework.decorators import action from django.db.models import Q from .models import", "def execute(self, request, *args, **kwargs): serializer = self.get_serializer(data=request.data) serializer.is_valid(raise_exception=True) if not IsHostOperationAllowed.check_host_permissions( request.user,", "host__pk=self.kwargs.get(\"host__pk\")) # TODO: # @action(detail=True, # methods=['GET'], # url_path=\"job\", # serializer_class=ActionSerializer) # def", "return super().get_queryset().prefetch_related('user').filter( host__pk=self.kwargs.get(\"host__pk\")).order_by('permissions') def perform_create(self, serializer): serializer.save(host_id=self.kwargs.get(\"host__pk\")) class JobViewSet(ReadOnlyModelViewSet, HostOperationMixin): queryset = Job.objects", "user=request.user).first() if access is None: permissions = {\"full\": True} else: for kind in", "Response from rest_framework.permissions import IsAuthenticated from rest_framework.decorators import action from django.db.models import Q", "queryset = Job.objects serializer_class = JobSerializer permission_classes = [IsHostOperationAllowed] lookup_field = 'id' permission_kind", "serializer.is_valid(raise_exception=True) if not IsHostOperationAllowed.check_host_permissions( request.user, IsHostOperationAllowed.get_docker_operation( serializer.data[\"command\"]), host__pk=self.kwargs.get(self.host_pk), kind=self.permission_kind, ): raise PermissionDenied() instance", "self.get_object() result = DockerConnectionPool(str(instance.id), instance).execute( command=serializer.data.get('command'), **serializer.data.get(\"args\"), ) status = 200 if bool(result.get(\"error\")):", ")] = access.able_operations(kind) serializer = self.get_serializer({ \"permissions\": permissions, \"user\": request.user.pk, \"host\": pk })", "methods=['GET'], # url_path=\"job\", # serializer_class=ActionSerializer) # def job(self, request, *args, **kwargs): # instance", "kind=self.permission_kind, ): raise PermissionDenied() instance = self.get_object() result = DockerConnectionPool(str(instance.id), instance).execute( command=serializer.data.get('command'), **serializer.data.get(\"args\"),", ") status = 200 if bool(result.get(\"error\")): status = 400 return Response(result, status=status) @action(detail=True,", "\"permissions\": permissions, \"user\": request.user.pk, \"host\": pk }) return Response(serializer.data) class AccessViewSet(ModelViewSet, HostOperationMixin): queryset", "= \"pk\" ignored_suffixes = (\"List\", ) def get_queryset(self): return super().get_queryset().filter( Q(creator=self.request.user) | Q(accesses__user=self.request.user)).distinct().order_by('title')", "return AccessCreateSerializer else: return AccessReadSerializer def get_queryset(self): return super().get_queryset().prefetch_related('user').filter( host__pk=self.kwargs.get(\"host__pk\")).order_by('permissions') def perform_create(self, serializer):", "execute(self, request, *args, **kwargs): serializer = self.get_serializer(data=request.data) serializer.is_valid(raise_exception=True) if not IsHostOperationAllowed.check_host_permissions( request.user, IsHostOperationAllowed.get_docker_operation(", "Response(result, status=status) @action(detail=True, methods=['get'], url_path=\"my_access\", serializer_class=UserAccessSerializer) def my_access(self, request, pk, **kwargs): permissions =", "serializer_class=ActionSerializer) def execute(self, request, *args, **kwargs): serializer = self.get_serializer(data=request.data) serializer.is_valid(raise_exception=True) if not IsHostOperationAllowed.check_host_permissions(", "HostSerializer, ActionSerializer, JobSerializer, UserAccessSerializer from .drivers import DockerConnectionPool from .permissions import IsHostOperationAllowed, HostOperationMixin", "# serializer_class=ActionSerializer) # def job(self, request, *args, **kwargs): # instance = self.get_object() #", "@action(detail=True, methods=['POST'], url_path=\"execute\", serializer_class=ActionSerializer) def execute(self, request, *args, **kwargs): serializer = self.get_serializer(data=request.data) serializer.is_valid(raise_exception=True)", "serializer_class=UserAccessSerializer) def my_access(self, request, pk, **kwargs): permissions = {} access: Access = Access.objects.filter(host__pk=pk,", "host_pk = \"host__pk\" def get_serializer_class(self): if self.action in (\"update\", \"create\", \"partial_update\"): return AccessCreateSerializer", "= DockerConnectionPool(str(instance.id), instance).execute( command=serializer.data.get('command'), **serializer.data.get(\"args\"), ) status = 200 if bool(result.get(\"error\")): status =", "action from django.db.models import Q from .models import Host, Access, Job from .serializer", "class AccessViewSet(ModelViewSet, HostOperationMixin): queryset = Access.objects serializer_class = AccessReadSerializer permission_classes = [IsHostOperationAllowed] lookup_field", "= Access.objects.filter(host__pk=pk, user=request.user).first() if access is None: permissions = {\"full\": True} else: for", "request.user.pk, \"host\": pk }) return Response(serializer.data) class AccessViewSet(ModelViewSet, HostOperationMixin): queryset = Access.objects serializer_class", "'id' permission_kind = \"dh\" host_pk = \"host__pk\" def get_queryset(self): return super().get_queryset().filter( host__pk=self.kwargs.get(\"host__pk\")) #", "\"host__pk\" def get_queryset(self): return super().get_queryset().filter( host__pk=self.kwargs.get(\"host__pk\")) # TODO: # @action(detail=True, # methods=['GET'], #", "serializer_class = JobSerializer permission_classes = [IsHostOperationAllowed] lookup_field = 'id' permission_kind = \"dh\" host_pk", "from django.db.models import Q from .models import Host, Access, Job from .serializer import", "def get_queryset(self): return super().get_queryset().prefetch_related('user').filter( host__pk=self.kwargs.get(\"host__pk\")).order_by('permissions') def perform_create(self, serializer): serializer.save(host_id=self.kwargs.get(\"host__pk\")) class JobViewSet(ReadOnlyModelViewSet, HostOperationMixin): queryset", "= Access.objects serializer_class = AccessReadSerializer permission_classes = [IsHostOperationAllowed] lookup_field = 'id' permission_kind =", "instance = self.get_object() result = DockerConnectionPool(str(instance.id), instance).execute( command=serializer.data.get('command'), **serializer.data.get(\"args\"), ) status = 200", "return super().get_queryset().filter( Q(creator=self.request.user) | Q(accesses__user=self.request.user)).distinct().order_by('title') def perform_create(self, serializer): serializer.save(creator=self.request.user) @action(detail=True, methods=['POST'], url_path=\"execute\", serializer_class=ActionSerializer)", "queryset = Host.objects serializer_class = HostSerializer permission_classes = [IsHostOperationAllowed] permission_kind = \"dh\" host_pk", "my_access(self, request, pk, **kwargs): permissions = {} access: Access = Access.objects.filter(host__pk=pk, user=request.user).first() if", "lookup_field = 'id' permission_kind = \"dp\" host_pk = \"host__pk\" def get_serializer_class(self): if self.action", "DockerConnectionPool from .permissions import IsHostOperationAllowed, HostOperationMixin class HostViewSet(ModelViewSet, HostOperationMixin): queryset = Host.objects serializer_class", "def my_access(self, request, pk, **kwargs): permissions = {} access: Access = Access.objects.filter(host__pk=pk, user=request.user).first()", "rest_framework.viewsets import ModelViewSet, ReadOnlyModelViewSet from rest_framework.response import Response from rest_framework.permissions import IsAuthenticated from", "perform_create(self, serializer): serializer.save(creator=self.request.user) @action(detail=True, methods=['POST'], url_path=\"execute\", serializer_class=ActionSerializer) def execute(self, request, *args, **kwargs): serializer", "rest_framework.response import Response from rest_framework.permissions import IsAuthenticated from rest_framework.decorators import action from django.db.models", "permissions[access.permissions_dictionary.get( kind, kind, )] = access.able_operations(kind) serializer = self.get_serializer({ \"permissions\": permissions, \"user\": request.user.pk,", "serializer): serializer.save(host_id=self.kwargs.get(\"host__pk\")) class JobViewSet(ReadOnlyModelViewSet, HostOperationMixin): queryset = Job.objects serializer_class = JobSerializer permission_classes =", "request, pk, **kwargs): permissions = {} access: Access = Access.objects.filter(host__pk=pk, user=request.user).first() if access", "HostOperationMixin): queryset = Host.objects serializer_class = HostSerializer permission_classes = [IsHostOperationAllowed] permission_kind = \"dh\"", "= \"dh\" host_pk = \"host__pk\" def get_queryset(self): return super().get_queryset().filter( host__pk=self.kwargs.get(\"host__pk\")) # TODO: #", ".permissions import IsHostOperationAllowed, HostOperationMixin class HostViewSet(ModelViewSet, HostOperationMixin): queryset = Host.objects serializer_class = HostSerializer", "kind, )] = access.able_operations(kind) serializer = self.get_serializer({ \"permissions\": permissions, \"user\": request.user.pk, \"host\": pk", "class JobViewSet(ReadOnlyModelViewSet, HostOperationMixin): queryset = Job.objects serializer_class = JobSerializer permission_classes = [IsHostOperationAllowed] lookup_field", "Access, Job from .serializer import AccessCreateSerializer, AccessReadSerializer, HostSerializer, ActionSerializer, JobSerializer, UserAccessSerializer from .drivers", "methods=['POST'], url_path=\"execute\", serializer_class=ActionSerializer) def execute(self, request, *args, **kwargs): serializer = self.get_serializer(data=request.data) serializer.is_valid(raise_exception=True) if", "instance).execute( command=serializer.data.get('command'), **serializer.data.get(\"args\"), ) status = 200 if bool(result.get(\"error\")): status = 400 return", "}) return Response(serializer.data) class AccessViewSet(ModelViewSet, HostOperationMixin): queryset = Access.objects serializer_class = AccessReadSerializer permission_classes", "Host, Access, Job from .serializer import AccessCreateSerializer, AccessReadSerializer, HostSerializer, ActionSerializer, JobSerializer, UserAccessSerializer from", "**kwargs): serializer = self.get_serializer(data=request.data) serializer.is_valid(raise_exception=True) if not IsHostOperationAllowed.check_host_permissions( request.user, IsHostOperationAllowed.get_docker_operation( serializer.data[\"command\"]), host__pk=self.kwargs.get(self.host_pk), kind=self.permission_kind,", "import IsHostOperationAllowed, HostOperationMixin class HostViewSet(ModelViewSet, HostOperationMixin): queryset = Host.objects serializer_class = HostSerializer permission_classes", "DockerConnectionPool(str(instance.id), instance).execute( command=serializer.data.get('command'), **serializer.data.get(\"args\"), ) status = 200 if bool(result.get(\"error\")): status = 400", ".drivers import DockerConnectionPool from .permissions import IsHostOperationAllowed, HostOperationMixin class HostViewSet(ModelViewSet, HostOperationMixin): queryset =", "serializer = self.get_serializer({ \"permissions\": permissions, \"user\": request.user.pk, \"host\": pk }) return Response(serializer.data) class", "= 'id' permission_kind = \"dp\" host_pk = \"host__pk\" def get_serializer_class(self): if self.action in", "(\"List\", ) def get_queryset(self): return super().get_queryset().filter( Q(creator=self.request.user) | Q(accesses__user=self.request.user)).distinct().order_by('title') def perform_create(self, serializer): serializer.save(creator=self.request.user)", "\"dh\" host_pk = \"pk\" ignored_suffixes = (\"List\", ) def get_queryset(self): return super().get_queryset().filter( Q(creator=self.request.user)", "import Q from .models import Host, Access, Job from .serializer import AccessCreateSerializer, AccessReadSerializer,", "host__pk=self.kwargs.get(\"host__pk\")).order_by('permissions') def perform_create(self, serializer): serializer.save(host_id=self.kwargs.get(\"host__pk\")) class JobViewSet(ReadOnlyModelViewSet, HostOperationMixin): queryset = Job.objects serializer_class =", "Q from .models import Host, Access, Job from .serializer import AccessCreateSerializer, AccessReadSerializer, HostSerializer,", "host__pk=self.kwargs.get(self.host_pk), kind=self.permission_kind, ): raise PermissionDenied() instance = self.get_object() result = DockerConnectionPool(str(instance.id), instance).execute( command=serializer.data.get('command'),", "= 400 return Response(result, status=status) @action(detail=True, methods=['get'], url_path=\"my_access\", serializer_class=UserAccessSerializer) def my_access(self, request, pk,", "for kind in access.permissions_kind: permissions[access.permissions_dictionary.get( kind, kind, )] = access.able_operations(kind) serializer = self.get_serializer({", "JobSerializer permission_classes = [IsHostOperationAllowed] lookup_field = 'id' permission_kind = \"dh\" host_pk = \"host__pk\"", "[IsHostOperationAllowed] lookup_field = 'id' permission_kind = \"dp\" host_pk = \"host__pk\" def get_serializer_class(self): if", "self.get_serializer(data=request.data) serializer.is_valid(raise_exception=True) if not IsHostOperationAllowed.check_host_permissions( request.user, IsHostOperationAllowed.get_docker_operation( serializer.data[\"command\"]), host__pk=self.kwargs.get(self.host_pk), kind=self.permission_kind, ): raise PermissionDenied()", "= {} access: Access = Access.objects.filter(host__pk=pk, user=request.user).first() if access is None: permissions =", "def get_queryset(self): return super().get_queryset().filter( host__pk=self.kwargs.get(\"host__pk\")) # TODO: # @action(detail=True, # methods=['GET'], # url_path=\"job\",", "import ModelViewSet, ReadOnlyModelViewSet from rest_framework.response import Response from rest_framework.permissions import IsAuthenticated from rest_framework.decorators", "rest_framework.exceptions import PermissionDenied from rest_framework.viewsets import ModelViewSet, ReadOnlyModelViewSet from rest_framework.response import Response from", "permission_kind = \"dh\" host_pk = \"pk\" ignored_suffixes = (\"List\", ) def get_queryset(self): return", "url_path=\"my_access\", serializer_class=UserAccessSerializer) def my_access(self, request, pk, **kwargs): permissions = {} access: Access =", "**kwargs): # instance = self.get_object() # result = DockerConnectionPool(str(instance.id), # instance).get_job_result( # request.query_params.get(\"key\"))", "AccessCreateSerializer else: return AccessReadSerializer def get_queryset(self): return super().get_queryset().prefetch_related('user').filter( host__pk=self.kwargs.get(\"host__pk\")).order_by('permissions') def perform_create(self, serializer): serializer.save(host_id=self.kwargs.get(\"host__pk\"))", "serializer.save(creator=self.request.user) @action(detail=True, methods=['POST'], url_path=\"execute\", serializer_class=ActionSerializer) def execute(self, request, *args, **kwargs): serializer = self.get_serializer(data=request.data)", "queryset = Access.objects serializer_class = AccessReadSerializer permission_classes = [IsHostOperationAllowed] lookup_field = 'id' permission_kind", "*args, **kwargs): # instance = self.get_object() # result = DockerConnectionPool(str(instance.id), # instance).get_job_result( #", "get_queryset(self): return super().get_queryset().filter( Q(creator=self.request.user) | Q(accesses__user=self.request.user)).distinct().order_by('title') def perform_create(self, serializer): serializer.save(creator=self.request.user) @action(detail=True, methods=['POST'], url_path=\"execute\",", "AccessReadSerializer permission_classes = [IsHostOperationAllowed] lookup_field = 'id' permission_kind = \"dp\" host_pk = \"host__pk\"", "= \"host__pk\" def get_serializer_class(self): if self.action in (\"update\", \"create\", \"partial_update\"): return AccessCreateSerializer else:", "perform_create(self, serializer): serializer.save(host_id=self.kwargs.get(\"host__pk\")) class JobViewSet(ReadOnlyModelViewSet, HostOperationMixin): queryset = Job.objects serializer_class = JobSerializer permission_classes", "return Response(result, status=status) @action(detail=True, methods=['get'], url_path=\"my_access\", serializer_class=UserAccessSerializer) def my_access(self, request, pk, **kwargs): permissions", "# @action(detail=True, # methods=['GET'], # url_path=\"job\", # serializer_class=ActionSerializer) # def job(self, request, *args,", "# instance = self.get_object() # result = DockerConnectionPool(str(instance.id), # instance).get_job_result( # request.query_params.get(\"key\")) #", "Access.objects serializer_class = AccessReadSerializer permission_classes = [IsHostOperationAllowed] lookup_field = 'id' permission_kind = \"dp\"", "\"pk\" ignored_suffixes = (\"List\", ) def get_queryset(self): return super().get_queryset().filter( Q(creator=self.request.user) | Q(accesses__user=self.request.user)).distinct().order_by('title') def", "permission_classes = [IsHostOperationAllowed] lookup_field = 'id' permission_kind = \"dp\" host_pk = \"host__pk\" def", "HostViewSet(ModelViewSet, HostOperationMixin): queryset = Host.objects serializer_class = HostSerializer permission_classes = [IsHostOperationAllowed] permission_kind =", "else: return AccessReadSerializer def get_queryset(self): return super().get_queryset().prefetch_related('user').filter( host__pk=self.kwargs.get(\"host__pk\")).order_by('permissions') def perform_create(self, serializer): serializer.save(host_id=self.kwargs.get(\"host__pk\")) class", "permission_kind = \"dh\" host_pk = \"host__pk\" def get_queryset(self): return super().get_queryset().filter( host__pk=self.kwargs.get(\"host__pk\")) # TODO:", "'id' permission_kind = \"dp\" host_pk = \"host__pk\" def get_serializer_class(self): if self.action in (\"update\",", "def perform_create(self, serializer): serializer.save(host_id=self.kwargs.get(\"host__pk\")) class JobViewSet(ReadOnlyModelViewSet, HostOperationMixin): queryset = Job.objects serializer_class = JobSerializer", "True} else: for kind in access.permissions_kind: permissions[access.permissions_dictionary.get( kind, kind, )] = access.able_operations(kind) serializer", "= self.get_serializer(data=request.data) serializer.is_valid(raise_exception=True) if not IsHostOperationAllowed.check_host_permissions( request.user, IsHostOperationAllowed.get_docker_operation( serializer.data[\"command\"]), host__pk=self.kwargs.get(self.host_pk), kind=self.permission_kind, ): raise", "Access = Access.objects.filter(host__pk=pk, user=request.user).first() if access is None: permissions = {\"full\": True} else:", "# TODO: # @action(detail=True, # methods=['GET'], # url_path=\"job\", # serializer_class=ActionSerializer) # def job(self,", "Job from .serializer import AccessCreateSerializer, AccessReadSerializer, HostSerializer, ActionSerializer, JobSerializer, UserAccessSerializer from .drivers import", "= self.get_object() # result = DockerConnectionPool(str(instance.id), # instance).get_job_result( # request.query_params.get(\"key\")) # return Response(result)", "AccessCreateSerializer, AccessReadSerializer, HostSerializer, ActionSerializer, JobSerializer, UserAccessSerializer from .drivers import DockerConnectionPool from .permissions import", "= 200 if bool(result.get(\"error\")): status = 400 return Response(result, status=status) @action(detail=True, methods=['get'], url_path=\"my_access\",", "request, *args, **kwargs): # instance = self.get_object() # result = DockerConnectionPool(str(instance.id), # instance).get_job_result(", "HostSerializer permission_classes = [IsHostOperationAllowed] permission_kind = \"dh\" host_pk = \"pk\" ignored_suffixes = (\"List\",", "super().get_queryset().prefetch_related('user').filter( host__pk=self.kwargs.get(\"host__pk\")).order_by('permissions') def perform_create(self, serializer): serializer.save(host_id=self.kwargs.get(\"host__pk\")) class JobViewSet(ReadOnlyModelViewSet, HostOperationMixin): queryset = Job.objects serializer_class", "url_path=\"job\", # serializer_class=ActionSerializer) # def job(self, request, *args, **kwargs): # instance = self.get_object()", ") def get_queryset(self): return super().get_queryset().filter( Q(creator=self.request.user) | Q(accesses__user=self.request.user)).distinct().order_by('title') def perform_create(self, serializer): serializer.save(creator=self.request.user) @action(detail=True,", "permission_kind = \"dp\" host_pk = \"host__pk\" def get_serializer_class(self): if self.action in (\"update\", \"create\",", "{\"full\": True} else: for kind in access.permissions_kind: permissions[access.permissions_dictionary.get( kind, kind, )] = access.able_operations(kind)", "request.user, IsHostOperationAllowed.get_docker_operation( serializer.data[\"command\"]), host__pk=self.kwargs.get(self.host_pk), kind=self.permission_kind, ): raise PermissionDenied() instance = self.get_object() result =", "return super().get_queryset().filter( host__pk=self.kwargs.get(\"host__pk\")) # TODO: # @action(detail=True, # methods=['GET'], # url_path=\"job\", # serializer_class=ActionSerializer)", "in access.permissions_kind: permissions[access.permissions_dictionary.get( kind, kind, )] = access.able_operations(kind) serializer = self.get_serializer({ \"permissions\": permissions,", "IsHostOperationAllowed.check_host_permissions( request.user, IsHostOperationAllowed.get_docker_operation( serializer.data[\"command\"]), host__pk=self.kwargs.get(self.host_pk), kind=self.permission_kind, ): raise PermissionDenied() instance = self.get_object() result", "from .drivers import DockerConnectionPool from .permissions import IsHostOperationAllowed, HostOperationMixin class HostViewSet(ModelViewSet, HostOperationMixin): queryset", "in (\"update\", \"create\", \"partial_update\"): return AccessCreateSerializer else: return AccessReadSerializer def get_queryset(self): return super().get_queryset().prefetch_related('user').filter(", "else: for kind in access.permissions_kind: permissions[access.permissions_dictionary.get( kind, kind, )] = access.able_operations(kind) serializer =", "Q(accesses__user=self.request.user)).distinct().order_by('title') def perform_create(self, serializer): serializer.save(creator=self.request.user) @action(detail=True, methods=['POST'], url_path=\"execute\", serializer_class=ActionSerializer) def execute(self, request, *args,", "request, *args, **kwargs): serializer = self.get_serializer(data=request.data) serializer.is_valid(raise_exception=True) if not IsHostOperationAllowed.check_host_permissions( request.user, IsHostOperationAllowed.get_docker_operation( serializer.data[\"command\"]),", "get_serializer_class(self): if self.action in (\"update\", \"create\", \"partial_update\"): return AccessCreateSerializer else: return AccessReadSerializer def", ".serializer import AccessCreateSerializer, AccessReadSerializer, HostSerializer, ActionSerializer, JobSerializer, UserAccessSerializer from .drivers import DockerConnectionPool from", "HostOperationMixin class HostViewSet(ModelViewSet, HostOperationMixin): queryset = Host.objects serializer_class = HostSerializer permission_classes = [IsHostOperationAllowed]", "= access.able_operations(kind) serializer = self.get_serializer({ \"permissions\": permissions, \"user\": request.user.pk, \"host\": pk }) return", "access is None: permissions = {\"full\": True} else: for kind in access.permissions_kind: permissions[access.permissions_dictionary.get(", "class HostViewSet(ModelViewSet, HostOperationMixin): queryset = Host.objects serializer_class = HostSerializer permission_classes = [IsHostOperationAllowed] permission_kind" ]
[ "settings.INSTALLED_APPS[-1] + \".models\": fixture_files = [] for loc in settings.INITIAL_FIXTURE_DIRS: loc = os.path.abspath(loc)", "loading the last installed app \"\"\" if kwargs['app'].__name__ == settings.INSTALLED_APPS[-1] + \".models\": fixture_files", "in fixture_files: print \" >> %s\" % (fixture) management.call_command('loaddata', fixture, verbosity=0) # Update", "fixture_files += os.listdir(loc) fixture_files = filter(lambda v: FIXTURE_RE.match(v), fixture_files) fixture_files = [os.path.join(loc, f)", "if len(fixture_files) > 0: print \"Initializing Fixtures:\" for fixture in fixture_files: print \"", "fixture_files) fixture_files = [os.path.join(loc, f) for f in fixture_files] if len(fixture_files) > 0:", "os.listdir(loc) fixture_files = filter(lambda v: FIXTURE_RE.match(v), fixture_files) fixture_files = [os.path.join(loc, f) for f", "in settings.INITIAL_FIXTURE_DIRS: loc = os.path.abspath(loc) if os.path.exists(loc): fixture_files += os.listdir(loc) fixture_files = filter(lambda", "os import re FIXTURE_RE = re.compile(r'^[^.]*.json$') def load_data(sender, **kwargs): \"\"\" Loads fixture data", "fixture_files = [] for loc in settings.INITIAL_FIXTURE_DIRS: loc = os.path.abspath(loc) if os.path.exists(loc): fixture_files", "\"Initializing Fixtures:\" for fixture in fixture_files: print \" >> %s\" % (fixture) management.call_command('loaddata',", "= os.path.abspath(loc) if os.path.exists(loc): fixture_files += os.listdir(loc) fixture_files = filter(lambda v: FIXTURE_RE.match(v), fixture_files)", "the last installed app \"\"\" if kwargs['app'].__name__ == settings.INSTALLED_APPS[-1] + \".models\": fixture_files =", "installed app \"\"\" if kwargs['app'].__name__ == settings.INSTALLED_APPS[-1] + \".models\": fixture_files = [] for", "**kwargs): \"\"\" Loads fixture data after loading the last installed app \"\"\" if", "for f in fixture_files] if len(fixture_files) > 0: print \"Initializing Fixtures:\" for fixture", "[] for loc in settings.INITIAL_FIXTURE_DIRS: loc = os.path.abspath(loc) if os.path.exists(loc): fixture_files += os.listdir(loc)", "os.path.abspath(loc) if os.path.exists(loc): fixture_files += os.listdir(loc) fixture_files = filter(lambda v: FIXTURE_RE.match(v), fixture_files) fixture_files", "f in fixture_files] if len(fixture_files) > 0: print \"Initializing Fixtures:\" for fixture in", "fixture_files = filter(lambda v: FIXTURE_RE.match(v), fixture_files) fixture_files = [os.path.join(loc, f) for f in", "= [os.path.join(loc, f) for f in fixture_files] if len(fixture_files) > 0: print \"Initializing", "verbosity=0) # Update the index print 'Generating Index' management.call_command('index', 'all', flush=True, verbosity=1) post_syncdb.connect(load_data)", "post_syncdb from django.conf import settings from django.core import management import os import re", "= filter(lambda v: FIXTURE_RE.match(v), fixture_files) fixture_files = [os.path.join(loc, f) for f in fixture_files]", "from django.conf import settings from django.core import management import os import re FIXTURE_RE", "import settings from django.core import management import os import re FIXTURE_RE = re.compile(r'^[^.]*.json$')", "for loc in settings.INITIAL_FIXTURE_DIRS: loc = os.path.abspath(loc) if os.path.exists(loc): fixture_files += os.listdir(loc) fixture_files", "fixture, verbosity=0) # Update the index print 'Generating Index' management.call_command('index', 'all', flush=True, verbosity=1)", "os.path.exists(loc): fixture_files += os.listdir(loc) fixture_files = filter(lambda v: FIXTURE_RE.match(v), fixture_files) fixture_files = [os.path.join(loc,", "def load_data(sender, **kwargs): \"\"\" Loads fixture data after loading the last installed app", "\".models\": fixture_files = [] for loc in settings.INITIAL_FIXTURE_DIRS: loc = os.path.abspath(loc) if os.path.exists(loc):", "Fixtures:\" for fixture in fixture_files: print \" >> %s\" % (fixture) management.call_command('loaddata', fixture,", "re.compile(r'^[^.]*.json$') def load_data(sender, **kwargs): \"\"\" Loads fixture data after loading the last installed", "app \"\"\" if kwargs['app'].__name__ == settings.INSTALLED_APPS[-1] + \".models\": fixture_files = [] for loc", "fixture_files = [os.path.join(loc, f) for f in fixture_files] if len(fixture_files) > 0: print", "from django.db.models.signals import post_syncdb from django.conf import settings from django.core import management import", "%s\" % (fixture) management.call_command('loaddata', fixture, verbosity=0) # Update the index print 'Generating Index'", "\"\"\" if kwargs['app'].__name__ == settings.INSTALLED_APPS[-1] + \".models\": fixture_files = [] for loc in", "if os.path.exists(loc): fixture_files += os.listdir(loc) fixture_files = filter(lambda v: FIXTURE_RE.match(v), fixture_files) fixture_files =", "(fixture) management.call_command('loaddata', fixture, verbosity=0) # Update the index print 'Generating Index' management.call_command('index', 'all',", "settings from django.core import management import os import re FIXTURE_RE = re.compile(r'^[^.]*.json$') def", "+= os.listdir(loc) fixture_files = filter(lambda v: FIXTURE_RE.match(v), fixture_files) fixture_files = [os.path.join(loc, f) for", "FIXTURE_RE = re.compile(r'^[^.]*.json$') def load_data(sender, **kwargs): \"\"\" Loads fixture data after loading the", "fixture data after loading the last installed app \"\"\" if kwargs['app'].__name__ == settings.INSTALLED_APPS[-1]", "= re.compile(r'^[^.]*.json$') def load_data(sender, **kwargs): \"\"\" Loads fixture data after loading the last", "len(fixture_files) > 0: print \"Initializing Fixtures:\" for fixture in fixture_files: print \" >>", "% (fixture) management.call_command('loaddata', fixture, verbosity=0) # Update the index print 'Generating Index' management.call_command('index',", "print \" >> %s\" % (fixture) management.call_command('loaddata', fixture, verbosity=0) # Update the index", "import management import os import re FIXTURE_RE = re.compile(r'^[^.]*.json$') def load_data(sender, **kwargs): \"\"\"", "fixture_files: print \" >> %s\" % (fixture) management.call_command('loaddata', fixture, verbosity=0) # Update the", "filter(lambda v: FIXTURE_RE.match(v), fixture_files) fixture_files = [os.path.join(loc, f) for f in fixture_files] if", "[os.path.join(loc, f) for f in fixture_files] if len(fixture_files) > 0: print \"Initializing Fixtures:\"", "+ \".models\": fixture_files = [] for loc in settings.INITIAL_FIXTURE_DIRS: loc = os.path.abspath(loc) if", "loc in settings.INITIAL_FIXTURE_DIRS: loc = os.path.abspath(loc) if os.path.exists(loc): fixture_files += os.listdir(loc) fixture_files =", "re FIXTURE_RE = re.compile(r'^[^.]*.json$') def load_data(sender, **kwargs): \"\"\" Loads fixture data after loading", "kwargs['app'].__name__ == settings.INSTALLED_APPS[-1] + \".models\": fixture_files = [] for loc in settings.INITIAL_FIXTURE_DIRS: loc", "for fixture in fixture_files: print \" >> %s\" % (fixture) management.call_command('loaddata', fixture, verbosity=0)", "data after loading the last installed app \"\"\" if kwargs['app'].__name__ == settings.INSTALLED_APPS[-1] +", "= [] for loc in settings.INITIAL_FIXTURE_DIRS: loc = os.path.abspath(loc) if os.path.exists(loc): fixture_files +=", ">> %s\" % (fixture) management.call_command('loaddata', fixture, verbosity=0) # Update the index print 'Generating", "after loading the last installed app \"\"\" if kwargs['app'].__name__ == settings.INSTALLED_APPS[-1] + \".models\":", "management.call_command('loaddata', fixture, verbosity=0) # Update the index print 'Generating Index' management.call_command('index', 'all', flush=True,", "in fixture_files] if len(fixture_files) > 0: print \"Initializing Fixtures:\" for fixture in fixture_files:", "\"\"\" Loads fixture data after loading the last installed app \"\"\" if kwargs['app'].__name__", "django.conf import settings from django.core import management import os import re FIXTURE_RE =", "loc = os.path.abspath(loc) if os.path.exists(loc): fixture_files += os.listdir(loc) fixture_files = filter(lambda v: FIXTURE_RE.match(v),", "load_data(sender, **kwargs): \"\"\" Loads fixture data after loading the last installed app \"\"\"", "last installed app \"\"\" if kwargs['app'].__name__ == settings.INSTALLED_APPS[-1] + \".models\": fixture_files = []", "FIXTURE_RE.match(v), fixture_files) fixture_files = [os.path.join(loc, f) for f in fixture_files] if len(fixture_files) >", "django.core import management import os import re FIXTURE_RE = re.compile(r'^[^.]*.json$') def load_data(sender, **kwargs):", "fixture in fixture_files: print \" >> %s\" % (fixture) management.call_command('loaddata', fixture, verbosity=0) #", "django.db.models.signals import post_syncdb from django.conf import settings from django.core import management import os", "settings.INITIAL_FIXTURE_DIRS: loc = os.path.abspath(loc) if os.path.exists(loc): fixture_files += os.listdir(loc) fixture_files = filter(lambda v:", "print \"Initializing Fixtures:\" for fixture in fixture_files: print \" >> %s\" % (fixture)", "> 0: print \"Initializing Fixtures:\" for fixture in fixture_files: print \" >> %s\"", "from django.core import management import os import re FIXTURE_RE = re.compile(r'^[^.]*.json$') def load_data(sender,", "Loads fixture data after loading the last installed app \"\"\" if kwargs['app'].__name__ ==", "import os import re FIXTURE_RE = re.compile(r'^[^.]*.json$') def load_data(sender, **kwargs): \"\"\" Loads fixture", "management import os import re FIXTURE_RE = re.compile(r'^[^.]*.json$') def load_data(sender, **kwargs): \"\"\" Loads", "0: print \"Initializing Fixtures:\" for fixture in fixture_files: print \" >> %s\" %", "import post_syncdb from django.conf import settings from django.core import management import os import", "f) for f in fixture_files] if len(fixture_files) > 0: print \"Initializing Fixtures:\" for", "\" >> %s\" % (fixture) management.call_command('loaddata', fixture, verbosity=0) # Update the index print", "v: FIXTURE_RE.match(v), fixture_files) fixture_files = [os.path.join(loc, f) for f in fixture_files] if len(fixture_files)", "fixture_files] if len(fixture_files) > 0: print \"Initializing Fixtures:\" for fixture in fixture_files: print", "import re FIXTURE_RE = re.compile(r'^[^.]*.json$') def load_data(sender, **kwargs): \"\"\" Loads fixture data after", "if kwargs['app'].__name__ == settings.INSTALLED_APPS[-1] + \".models\": fixture_files = [] for loc in settings.INITIAL_FIXTURE_DIRS:", "== settings.INSTALLED_APPS[-1] + \".models\": fixture_files = [] for loc in settings.INITIAL_FIXTURE_DIRS: loc =" ]
[ "raising exceptions. :author: <NAME> \"\"\" from argparse import ArgumentParser class ArgumentParserError(Exception): \"\"\" Exceptions", "parser that uses exceptions for error handling. \"\"\" def error(self, message): raise ArgumentParserError(message)", "\"\"\" pass class ExceptionsArgumentParser(ArgumentParser): \"\"\" Argument parser that uses exceptions for error handling.", "with wrapper that makes arguments parsers raising exceptions. :author: <NAME> \"\"\" from argparse", "exceptions. :author: <NAME> \"\"\" from argparse import ArgumentParser class ArgumentParserError(Exception): \"\"\" Exceptions for", "\"\"\" Exceptions for argument parsing. \"\"\" pass class ExceptionsArgumentParser(ArgumentParser): \"\"\" Argument parser that", "arguments parsers raising exceptions. :author: <NAME> \"\"\" from argparse import ArgumentParser class ArgumentParserError(Exception):", "that makes arguments parsers raising exceptions. :author: <NAME> \"\"\" from argparse import ArgumentParser", "\"\"\" Argument parser that uses exceptions for error handling. \"\"\" def error(self, message):", "Exceptions for argument parsing. \"\"\" pass class ExceptionsArgumentParser(ArgumentParser): \"\"\" Argument parser that uses", "\"\"\"\" Created on 30.06.20 Module with wrapper that makes arguments parsers raising exceptions.", "-*- \"\"\"\" Created on 30.06.20 Module with wrapper that makes arguments parsers raising", "makes arguments parsers raising exceptions. :author: <NAME> \"\"\" from argparse import ArgumentParser class", "UTF-8 -*- \"\"\"\" Created on 30.06.20 Module with wrapper that makes arguments parsers", "ArgumentParser class ArgumentParserError(Exception): \"\"\" Exceptions for argument parsing. \"\"\" pass class ExceptionsArgumentParser(ArgumentParser): \"\"\"", "Module with wrapper that makes arguments parsers raising exceptions. :author: <NAME> \"\"\" from", "import ArgumentParser class ArgumentParserError(Exception): \"\"\" Exceptions for argument parsing. \"\"\" pass class ExceptionsArgumentParser(ArgumentParser):", "wrapper that makes arguments parsers raising exceptions. :author: <NAME> \"\"\" from argparse import", "30.06.20 Module with wrapper that makes arguments parsers raising exceptions. :author: <NAME> \"\"\"", "parsing. \"\"\" pass class ExceptionsArgumentParser(ArgumentParser): \"\"\" Argument parser that uses exceptions for error", "ExceptionsArgumentParser(ArgumentParser): \"\"\" Argument parser that uses exceptions for error handling. \"\"\" def error(self,", "from argparse import ArgumentParser class ArgumentParserError(Exception): \"\"\" Exceptions for argument parsing. \"\"\" pass", "class ExceptionsArgumentParser(ArgumentParser): \"\"\" Argument parser that uses exceptions for error handling. \"\"\" def", "<gh_stars>0 # -*- coding: UTF-8 -*- \"\"\"\" Created on 30.06.20 Module with wrapper", "ArgumentParserError(Exception): \"\"\" Exceptions for argument parsing. \"\"\" pass class ExceptionsArgumentParser(ArgumentParser): \"\"\" Argument parser", "on 30.06.20 Module with wrapper that makes arguments parsers raising exceptions. :author: <NAME>", "for argument parsing. \"\"\" pass class ExceptionsArgumentParser(ArgumentParser): \"\"\" Argument parser that uses exceptions", "Created on 30.06.20 Module with wrapper that makes arguments parsers raising exceptions. :author:", "class ArgumentParserError(Exception): \"\"\" Exceptions for argument parsing. \"\"\" pass class ExceptionsArgumentParser(ArgumentParser): \"\"\" Argument", "# -*- coding: UTF-8 -*- \"\"\"\" Created on 30.06.20 Module with wrapper that", "\"\"\" from argparse import ArgumentParser class ArgumentParserError(Exception): \"\"\" Exceptions for argument parsing. \"\"\"", "pass class ExceptionsArgumentParser(ArgumentParser): \"\"\" Argument parser that uses exceptions for error handling. \"\"\"", "coding: UTF-8 -*- \"\"\"\" Created on 30.06.20 Module with wrapper that makes arguments", ":author: <NAME> \"\"\" from argparse import ArgumentParser class ArgumentParserError(Exception): \"\"\" Exceptions for argument", "Argument parser that uses exceptions for error handling. \"\"\" def error(self, message): raise", "argparse import ArgumentParser class ArgumentParserError(Exception): \"\"\" Exceptions for argument parsing. \"\"\" pass class", "-*- coding: UTF-8 -*- \"\"\"\" Created on 30.06.20 Module with wrapper that makes", "<NAME> \"\"\" from argparse import ArgumentParser class ArgumentParserError(Exception): \"\"\" Exceptions for argument parsing.", "argument parsing. \"\"\" pass class ExceptionsArgumentParser(ArgumentParser): \"\"\" Argument parser that uses exceptions for", "parsers raising exceptions. :author: <NAME> \"\"\" from argparse import ArgumentParser class ArgumentParserError(Exception): \"\"\"" ]
[ "_data = 'Hello' try: opts, args = getopt.getopt(sys.argv[1:], \"hd:c:\", [\"data=\", \"clients=\"]) except getopt.GetoptError", "in opts: if opt == '-h': print('Usage: python -m client -d <data_to_send> -c", "sys.exit() elif opt in (\"-d\", \"--data\"): _data = arg elif opt in (\"-c\",", "<reponame>harveyspec1245/tcpClient<gh_stars>0 from asyncClient import Clients import sys import getopt if __name__ == '__main__':", "if __name__ == '__main__': _clients = 2 _data = 'Hello' try: opts, args", "from asyncClient import Clients import sys import getopt if __name__ == '__main__': _clients", "-m client -d <data_to_send> -c <num of clients>') sys.exit(2) for opt, arg in", "-d <data_to_send> -c <num of clients>') sys.exit(2) for opt, arg in opts: if", "-c <num of clients>') sys.exit() elif opt in (\"-d\", \"--data\"): _data = arg", "(\"-d\", \"--data\"): _data = arg elif opt in (\"-c\", \"--clients\"): _clients = int(arg)", "opts: if opt == '-h': print('Usage: python -m client -d <data_to_send> -c <num", "python -m client -d <data_to_send> -c <num of clients>') sys.exit() elif opt in", "__name__ == '__main__': _clients = 2 _data = 'Hello' try: opts, args =", "<num of clients>') sys.exit() elif opt in (\"-d\", \"--data\"): _data = arg elif", "= 2 _data = 'Hello' try: opts, args = getopt.getopt(sys.argv[1:], \"hd:c:\", [\"data=\", \"clients=\"])", "import sys import getopt if __name__ == '__main__': _clients = 2 _data =", "print(e) print('Usage: python -m client -d <data_to_send> -c <num of clients>') sys.exit(2) for", "opts, args = getopt.getopt(sys.argv[1:], \"hd:c:\", [\"data=\", \"clients=\"]) except getopt.GetoptError as e: print(e) print('Usage:", "[\"data=\", \"clients=\"]) except getopt.GetoptError as e: print(e) print('Usage: python -m client -d <data_to_send>", "'-h': print('Usage: python -m client -d <data_to_send> -c <num of clients>') sys.exit() elif", "asyncClient import Clients import sys import getopt if __name__ == '__main__': _clients =", "arg in opts: if opt == '-h': print('Usage: python -m client -d <data_to_send>", "import Clients import sys import getopt if __name__ == '__main__': _clients = 2", "client -d <data_to_send> -c <num of clients>') sys.exit(2) for opt, arg in opts:", "<data_to_send> -c <num of clients>') sys.exit(2) for opt, arg in opts: if opt", "<data_to_send> -c <num of clients>') sys.exit() elif opt in (\"-d\", \"--data\"): _data =", "clients>') sys.exit() elif opt in (\"-d\", \"--data\"): _data = arg elif opt in", "getopt if __name__ == '__main__': _clients = 2 _data = 'Hello' try: opts,", "Clients import sys import getopt if __name__ == '__main__': _clients = 2 _data", "= 'Hello' try: opts, args = getopt.getopt(sys.argv[1:], \"hd:c:\", [\"data=\", \"clients=\"]) except getopt.GetoptError as", "client -d <data_to_send> -c <num of clients>') sys.exit() elif opt in (\"-d\", \"--data\"):", "of clients>') sys.exit(2) for opt, arg in opts: if opt == '-h': print('Usage:", "for opt, arg in opts: if opt == '-h': print('Usage: python -m client", "opt, arg in opts: if opt == '-h': print('Usage: python -m client -d", "'Hello' try: opts, args = getopt.getopt(sys.argv[1:], \"hd:c:\", [\"data=\", \"clients=\"]) except getopt.GetoptError as e:", "= getopt.getopt(sys.argv[1:], \"hd:c:\", [\"data=\", \"clients=\"]) except getopt.GetoptError as e: print(e) print('Usage: python -m", "-d <data_to_send> -c <num of clients>') sys.exit() elif opt in (\"-d\", \"--data\"): _data", "args = getopt.getopt(sys.argv[1:], \"hd:c:\", [\"data=\", \"clients=\"]) except getopt.GetoptError as e: print(e) print('Usage: python", "sys import getopt if __name__ == '__main__': _clients = 2 _data = 'Hello'", "\"--data\"): _data = arg elif opt in (\"-c\", \"--clients\"): _clients = int(arg) Clients(_clients,", "\"hd:c:\", [\"data=\", \"clients=\"]) except getopt.GetoptError as e: print(e) print('Usage: python -m client -d", "python -m client -d <data_to_send> -c <num of clients>') sys.exit(2) for opt, arg", "getopt.GetoptError as e: print(e) print('Usage: python -m client -d <data_to_send> -c <num of", "opt in (\"-d\", \"--data\"): _data = arg elif opt in (\"-c\", \"--clients\"): _clients", "\"clients=\"]) except getopt.GetoptError as e: print(e) print('Usage: python -m client -d <data_to_send> -c", "elif opt in (\"-d\", \"--data\"): _data = arg elif opt in (\"-c\", \"--clients\"):", "in (\"-d\", \"--data\"): _data = arg elif opt in (\"-c\", \"--clients\"): _clients =", "except getopt.GetoptError as e: print(e) print('Usage: python -m client -d <data_to_send> -c <num", "getopt.getopt(sys.argv[1:], \"hd:c:\", [\"data=\", \"clients=\"]) except getopt.GetoptError as e: print(e) print('Usage: python -m client", "opt == '-h': print('Usage: python -m client -d <data_to_send> -c <num of clients>')", "2 _data = 'Hello' try: opts, args = getopt.getopt(sys.argv[1:], \"hd:c:\", [\"data=\", \"clients=\"]) except", "print('Usage: python -m client -d <data_to_send> -c <num of clients>') sys.exit() elif opt", "clients>') sys.exit(2) for opt, arg in opts: if opt == '-h': print('Usage: python", "of clients>') sys.exit() elif opt in (\"-d\", \"--data\"): _data = arg elif opt", "== '-h': print('Usage: python -m client -d <data_to_send> -c <num of clients>') sys.exit()", "as e: print(e) print('Usage: python -m client -d <data_to_send> -c <num of clients>')", "<num of clients>') sys.exit(2) for opt, arg in opts: if opt == '-h':", "_clients = 2 _data = 'Hello' try: opts, args = getopt.getopt(sys.argv[1:], \"hd:c:\", [\"data=\",", "if opt == '-h': print('Usage: python -m client -d <data_to_send> -c <num of", "-m client -d <data_to_send> -c <num of clients>') sys.exit() elif opt in (\"-d\",", "try: opts, args = getopt.getopt(sys.argv[1:], \"hd:c:\", [\"data=\", \"clients=\"]) except getopt.GetoptError as e: print(e)", "e: print(e) print('Usage: python -m client -d <data_to_send> -c <num of clients>') sys.exit(2)", "sys.exit(2) for opt, arg in opts: if opt == '-h': print('Usage: python -m", "-c <num of clients>') sys.exit(2) for opt, arg in opts: if opt ==", "'__main__': _clients = 2 _data = 'Hello' try: opts, args = getopt.getopt(sys.argv[1:], \"hd:c:\",", "print('Usage: python -m client -d <data_to_send> -c <num of clients>') sys.exit(2) for opt,", "import getopt if __name__ == '__main__': _clients = 2 _data = 'Hello' try:", "_data = arg elif opt in (\"-c\", \"--clients\"): _clients = int(arg) Clients(_clients, _data)", "== '__main__': _clients = 2 _data = 'Hello' try: opts, args = getopt.getopt(sys.argv[1:]," ]
[ "typing import List # Definition for singly-linked list. # class ListNode: # def", "if ptA is None else ptA.next ptB = headA if ptB is None", "headB is None: return None ptA = headA ptB = headB while ptA", "= headA ptB = headB while ptA is not ptB: ptA = headB", "ptA is None else ptA.next ptB = headA if ptB is None else", "O() \"\"\" from typing import List # Definition for singly-linked list. # class", "ListNode, headB: ListNode) -> ListNode: if headA is None or headB is None:", "<filename>Max/Max_0160_20200409.py \"\"\" 160. Intersection of Two Linked Lists https://leetcode.com/problems/intersection-of-two-linked-lists/ Time complexity: O() Space", "is None else ptA.next ptB = headA if ptB is None else ptB.next", "= x # self.next = None class Solution: def getIntersectionNode(self, headA: ListNode, headB:", "ListNode) -> ListNode: if headA is None or headB is None: return None", "Solution: def getIntersectionNode(self, headA: ListNode, headB: ListNode) -> ListNode: if headA is None", "Definition for singly-linked list. # class ListNode: # def __init__(self, x): # self.val", "x): # self.val = x # self.next = None class Solution: def getIntersectionNode(self,", "None class Solution: def getIntersectionNode(self, headA: ListNode, headB: ListNode) -> ListNode: if headA", "headA is None or headB is None: return None ptA = headA ptB", "class ListNode: # def __init__(self, x): # self.val = x # self.next =", "headB while ptA is not ptB: ptA = headB if ptA is None", "is None: return None ptA = headA ptB = headB while ptA is", "= headB if ptA is None else ptA.next ptB = headA if ptB", "# class ListNode: # def __init__(self, x): # self.val = x # self.next", "Time complexity: O() Space complexity: O() \"\"\" from typing import List # Definition", "Linked Lists https://leetcode.com/problems/intersection-of-two-linked-lists/ Time complexity: O() Space complexity: O() \"\"\" from typing import", "of Two Linked Lists https://leetcode.com/problems/intersection-of-two-linked-lists/ Time complexity: O() Space complexity: O() \"\"\" from", "Two Linked Lists https://leetcode.com/problems/intersection-of-two-linked-lists/ Time complexity: O() Space complexity: O() \"\"\" from typing", "headB: ListNode) -> ListNode: if headA is None or headB is None: return", "List # Definition for singly-linked list. # class ListNode: # def __init__(self, x):", "getIntersectionNode(self, headA: ListNode, headB: ListNode) -> ListNode: if headA is None or headB", "__init__(self, x): # self.val = x # self.next = None class Solution: def", "ptA = headB if ptA is None else ptA.next ptB = headA if", "ptB: ptA = headB if ptA is None else ptA.next ptB = headA", "\"\"\" 160. Intersection of Two Linked Lists https://leetcode.com/problems/intersection-of-two-linked-lists/ Time complexity: O() Space complexity:", "# self.next = None class Solution: def getIntersectionNode(self, headA: ListNode, headB: ListNode) ->", "None: return None ptA = headA ptB = headB while ptA is not", "singly-linked list. # class ListNode: # def __init__(self, x): # self.val = x", "def __init__(self, x): # self.val = x # self.next = None class Solution:", "not ptB: ptA = headB if ptA is None else ptA.next ptB =", "ListNode: if headA is None or headB is None: return None ptA =", "headB if ptA is None else ptA.next ptB = headA if ptB is", "ListNode: # def __init__(self, x): # self.val = x # self.next = None", "-> ListNode: if headA is None or headB is None: return None ptA", "list. # class ListNode: # def __init__(self, x): # self.val = x #", "160. Intersection of Two Linked Lists https://leetcode.com/problems/intersection-of-two-linked-lists/ Time complexity: O() Space complexity: O()", "# self.val = x # self.next = None class Solution: def getIntersectionNode(self, headA:", "for singly-linked list. # class ListNode: # def __init__(self, x): # self.val =", "Lists https://leetcode.com/problems/intersection-of-two-linked-lists/ Time complexity: O() Space complexity: O() \"\"\" from typing import List", "# Definition for singly-linked list. # class ListNode: # def __init__(self, x): #", "headA: ListNode, headB: ListNode) -> ListNode: if headA is None or headB is", "None ptA = headA ptB = headB while ptA is not ptB: ptA", "None else ptA.next ptB = headA if ptB is None else ptB.next return", "Intersection of Two Linked Lists https://leetcode.com/problems/intersection-of-two-linked-lists/ Time complexity: O() Space complexity: O() \"\"\"", "else ptA.next ptB = headA if ptB is None else ptB.next return ptA", "O() Space complexity: O() \"\"\" from typing import List # Definition for singly-linked", "= headB while ptA is not ptB: ptA = headB if ptA is", "ptB = headB while ptA is not ptB: ptA = headB if ptA", "ptA is not ptB: ptA = headB if ptA is None else ptA.next", "headA ptB = headB while ptA is not ptB: ptA = headB if", "Space complexity: O() \"\"\" from typing import List # Definition for singly-linked list.", "from typing import List # Definition for singly-linked list. # class ListNode: #", "x # self.next = None class Solution: def getIntersectionNode(self, headA: ListNode, headB: ListNode)", "complexity: O() Space complexity: O() \"\"\" from typing import List # Definition for", "self.val = x # self.next = None class Solution: def getIntersectionNode(self, headA: ListNode,", "= None class Solution: def getIntersectionNode(self, headA: ListNode, headB: ListNode) -> ListNode: if", "# def __init__(self, x): # self.val = x # self.next = None class", "or headB is None: return None ptA = headA ptB = headB while", "None or headB is None: return None ptA = headA ptB = headB", "import List # Definition for singly-linked list. # class ListNode: # def __init__(self,", "is None or headB is None: return None ptA = headA ptB =", "while ptA is not ptB: ptA = headB if ptA is None else", "complexity: O() \"\"\" from typing import List # Definition for singly-linked list. #", "if headA is None or headB is None: return None ptA = headA", "https://leetcode.com/problems/intersection-of-two-linked-lists/ Time complexity: O() Space complexity: O() \"\"\" from typing import List #", "ptA = headA ptB = headB while ptA is not ptB: ptA =", "return None ptA = headA ptB = headB while ptA is not ptB:", "\"\"\" from typing import List # Definition for singly-linked list. # class ListNode:", "self.next = None class Solution: def getIntersectionNode(self, headA: ListNode, headB: ListNode) -> ListNode:", "is not ptB: ptA = headB if ptA is None else ptA.next ptB", "class Solution: def getIntersectionNode(self, headA: ListNode, headB: ListNode) -> ListNode: if headA is", "def getIntersectionNode(self, headA: ListNode, headB: ListNode) -> ListNode: if headA is None or" ]
[ "30 prepopulated_fields = { \"slug\": [\"name\"] } search_fields = ['name__istartswith'] @admin.register(models.Genre) class GenreAdmin(admin.ModelAdmin):", "class ReviewAdmin(admin.ModelAdmin): autocomplete_fields = [\"album_id\", \"reviewer_id\"] list_display = [\"__str__\", \"album_id\", \"reviewer_id\"] @admin.register(models.FavoriteReviewerArtist) class", "@admin.register(models.Artist) class ArtistAdmin(admin.ModelAdmin): list_display = [\"name\", \"created_at\", \"created_by\"] ordering = [\"name\"] list_per_page =", "[\"__str__\", \"album_id\"] autocomplete_fields = [\"album_id\"] @admin.register(models.AlbumOfTheYear) class AlbumOfTheYear(admin.ModelAdmin): list_display = [\"__str__\", \"album_id\"] autocomplete_fields", "ordering = [\"name\"] list_per_page = 30 prepopulated_fields = { \"slug\": [\"name\"] } search_fields", "list_per_page = 30 prepopulated_fields = { \"slug\": [\"title\"] } list_select_related = [\"artist_id\", \"created_by\"]", "list_display = [\"__str__\", \"album_id\"] autocomplete_fields = [\"album_id\"] @admin.register(models.AlbumOfTheYear) class AlbumOfTheYear(admin.ModelAdmin): list_display = [\"__str__\",", "import admin from . import models @admin.register(models.Reviewer) class ReviewerAdmin(admin.ModelAdmin): autocomplete_fields = [\"user\"] list_display", "\"reviewer_id\"] @admin.register(models.FavoriteReviewerArtist) class FavoriteReviewerArtistAdmin(admin.ModelAdmin): autocomplete_fields = [\"artist_id\", \"reviewer_id\"] list_display = [\"artist_id\", \"reviewer_id\"] @admin.register(models.ReviewerLink)", "= [\"title\"] list_per_page = 30 prepopulated_fields = { \"slug\": [\"title\"] } list_select_related =", "AlbumOfTheYear(admin.ModelAdmin): list_display = [\"__str__\", \"album_id\"] autocomplete_fields = [\"album_id\"] @admin.register(models.Track) class TrackAdmin(admin.ModelAdmin): list_display =", "class AlbumAdmin(admin.ModelAdmin): list_display = [\"title\", \"artist_id\", \"created_at\", \"created_by\"] ordering = [\"title\"] list_per_page =", "[\"title\"] list_per_page = 30 prepopulated_fields = { \"slug\": [\"title\"] } list_select_related = [\"artist_id\",", "from . import models @admin.register(models.Reviewer) class ReviewerAdmin(admin.ModelAdmin): autocomplete_fields = [\"user\"] list_display = [\"username\",", "search_fields = ['name__istartswith'] @admin.register(models.Genre) class GenreAdmin(admin.ModelAdmin): search_fields = ['name__istartswith'] @admin.register(models.AlbumGenre) class AlbumGenreAdmin(admin.ModelAdmin): list_display", "\"album_id\"] autocomplete_fields = [\"album_id\"] @admin.register(models.AlbumOfTheYear) class AlbumOfTheYear(admin.ModelAdmin): list_display = [\"__str__\", \"album_id\"] autocomplete_fields =", "[\"__str__\", \"album_id\"] @admin.register(models.Review) class ReviewAdmin(admin.ModelAdmin): autocomplete_fields = [\"album_id\", \"reviewer_id\"] list_display = [\"__str__\", \"album_id\",", "= [\"__str__\", \"album_id\"] autocomplete_fields = [\"album_id\"] @admin.register(models.AlbumOfTheYear) class AlbumOfTheYear(admin.ModelAdmin): list_display = [\"__str__\", \"album_id\"]", "autocomplete_fields = [\"album_id\", \"reviewer_id\"] list_display = [\"__str__\", \"album_id\", \"reviewer_id\"] @admin.register(models.FavoriteReviewerArtist) class FavoriteReviewerArtistAdmin(admin.ModelAdmin): autocomplete_fields", "autocomplete_fields = [\"album_id\"] @admin.register(models.Track) class TrackAdmin(admin.ModelAdmin): list_display = [\"__str__\", \"album_id\"] @admin.register(models.Review) class ReviewAdmin(admin.ModelAdmin):", "= [\"artist_id\", \"reviewer_id\"] @admin.register(models.ReviewerLink) class ReviewerLinkAdmin(admin.ModelAdmin): autocomplete_fields = [\"reviewer_id\"] list_display = [\"reviewer_id\", \"service_name\"]", "@admin.register(models.FavoriteReviewerArtist) class FavoriteReviewerArtistAdmin(admin.ModelAdmin): autocomplete_fields = [\"artist_id\", \"reviewer_id\"] list_display = [\"artist_id\", \"reviewer_id\"] @admin.register(models.ReviewerLink) class", "list_display = [\"__str__\", \"album_id\", \"genre_id\"] autocomplete_fields = [\"album_id\", \"genre_id\"] @admin.register(models.AlbumLink) class AlbumLinkAdmin(admin.ModelAdmin): list_display", "{ \"slug\": [\"name\"] } search_fields = ['name__istartswith'] @admin.register(models.Genre) class GenreAdmin(admin.ModelAdmin): search_fields = ['name__istartswith']", "= [\"title\", \"artist_id\", \"created_at\", \"created_by\"] ordering = [\"title\"] list_per_page = 30 prepopulated_fields =", "\"album_id\"] autocomplete_fields = [\"album_id\"] @admin.register(models.Track) class TrackAdmin(admin.ModelAdmin): list_display = [\"__str__\", \"album_id\"] @admin.register(models.Review) class", "= [\"__str__\", \"album_id\"] autocomplete_fields = [\"album_id\"] @admin.register(models.Track) class TrackAdmin(admin.ModelAdmin): list_display = [\"__str__\", \"album_id\"]", "list_display = [\"username\", \"email\"] search_fields = [\"username__istartswith\"] @admin.register(models.Album) class AlbumAdmin(admin.ModelAdmin): list_display = [\"title\",", "@admin.register(models.Reviewer) class ReviewerAdmin(admin.ModelAdmin): autocomplete_fields = [\"user\"] list_display = [\"username\", \"email\"] search_fields = [\"username__istartswith\"]", "prepopulated_fields = { \"slug\": [\"name\"] } search_fields = ['name__istartswith'] @admin.register(models.Genre) class GenreAdmin(admin.ModelAdmin): search_fields", "@admin.register(models.AlbumGenre) class AlbumGenreAdmin(admin.ModelAdmin): list_display = [\"__str__\", \"album_id\", \"genre_id\"] autocomplete_fields = [\"album_id\", \"genre_id\"] @admin.register(models.AlbumLink)", "list_display = [\"artist_id\", \"reviewer_id\"] @admin.register(models.ReviewerLink) class ReviewerLinkAdmin(admin.ModelAdmin): autocomplete_fields = [\"reviewer_id\"] list_display = [\"reviewer_id\",", "list_per_page = 30 prepopulated_fields = { \"slug\": [\"name\"] } search_fields = ['name__istartswith'] @admin.register(models.Genre)", "= [\"album_id\", \"reviewer_id\"] list_display = [\"__str__\", \"album_id\", \"reviewer_id\"] @admin.register(models.FavoriteReviewerArtist) class FavoriteReviewerArtistAdmin(admin.ModelAdmin): autocomplete_fields =", ". import models @admin.register(models.Reviewer) class ReviewerAdmin(admin.ModelAdmin): autocomplete_fields = [\"user\"] list_display = [\"username\", \"email\"]", "@admin.register(models.Genre) class GenreAdmin(admin.ModelAdmin): search_fields = ['name__istartswith'] @admin.register(models.AlbumGenre) class AlbumGenreAdmin(admin.ModelAdmin): list_display = [\"__str__\", \"album_id\",", "[\"artist_id\", \"reviewer_id\"] list_display = [\"artist_id\", \"reviewer_id\"] @admin.register(models.ReviewerLink) class ReviewerLinkAdmin(admin.ModelAdmin): autocomplete_fields = [\"reviewer_id\"] list_display", "class AlbumLinkAdmin(admin.ModelAdmin): list_display = [\"__str__\", \"album_id\"] autocomplete_fields = [\"album_id\"] @admin.register(models.AlbumOfTheYear) class AlbumOfTheYear(admin.ModelAdmin): list_display", "ordering = [\"title\"] list_per_page = 30 prepopulated_fields = { \"slug\": [\"title\"] } list_select_related", "autocomplete_fields = [\"user\"] list_display = [\"username\", \"email\"] search_fields = [\"username__istartswith\"] @admin.register(models.Album) class AlbumAdmin(admin.ModelAdmin):", "AlbumAdmin(admin.ModelAdmin): list_display = [\"title\", \"artist_id\", \"created_at\", \"created_by\"] ordering = [\"title\"] list_per_page = 30", "GenreAdmin(admin.ModelAdmin): search_fields = ['name__istartswith'] @admin.register(models.AlbumGenre) class AlbumGenreAdmin(admin.ModelAdmin): list_display = [\"__str__\", \"album_id\", \"genre_id\"] autocomplete_fields", "search_fields = [\"title\"] @admin.register(models.Artist) class ArtistAdmin(admin.ModelAdmin): list_display = [\"name\", \"created_at\", \"created_by\"] ordering =", "autocomplete_fields = [\"album_id\", \"genre_id\"] @admin.register(models.AlbumLink) class AlbumLinkAdmin(admin.ModelAdmin): list_display = [\"__str__\", \"album_id\"] autocomplete_fields =", "search_fields = ['name__istartswith'] @admin.register(models.AlbumGenre) class AlbumGenreAdmin(admin.ModelAdmin): list_display = [\"__str__\", \"album_id\", \"genre_id\"] autocomplete_fields =", "= ['name__istartswith'] @admin.register(models.AlbumGenre) class AlbumGenreAdmin(admin.ModelAdmin): list_display = [\"__str__\", \"album_id\", \"genre_id\"] autocomplete_fields = [\"album_id\",", "\"reviewer_id\"] list_display = [\"__str__\", \"album_id\", \"reviewer_id\"] @admin.register(models.FavoriteReviewerArtist) class FavoriteReviewerArtistAdmin(admin.ModelAdmin): autocomplete_fields = [\"artist_id\", \"reviewer_id\"]", "AlbumGenreAdmin(admin.ModelAdmin): list_display = [\"__str__\", \"album_id\", \"genre_id\"] autocomplete_fields = [\"album_id\", \"genre_id\"] @admin.register(models.AlbumLink) class AlbumLinkAdmin(admin.ModelAdmin):", "= [\"album_id\"] @admin.register(models.AlbumOfTheYear) class AlbumOfTheYear(admin.ModelAdmin): list_display = [\"__str__\", \"album_id\"] autocomplete_fields = [\"album_id\"] @admin.register(models.Track)", "= [\"artist_id\", \"reviewer_id\"] list_display = [\"artist_id\", \"reviewer_id\"] @admin.register(models.ReviewerLink) class ReviewerLinkAdmin(admin.ModelAdmin): autocomplete_fields = [\"reviewer_id\"]", "\"reviewer_id\"] list_display = [\"artist_id\", \"reviewer_id\"] @admin.register(models.ReviewerLink) class ReviewerLinkAdmin(admin.ModelAdmin): autocomplete_fields = [\"reviewer_id\"] list_display =", "\"slug\": [\"name\"] } search_fields = ['name__istartswith'] @admin.register(models.Genre) class GenreAdmin(admin.ModelAdmin): search_fields = ['name__istartswith'] @admin.register(models.AlbumGenre)", "[\"__str__\", \"album_id\"] autocomplete_fields = [\"album_id\"] @admin.register(models.Track) class TrackAdmin(admin.ModelAdmin): list_display = [\"__str__\", \"album_id\"] @admin.register(models.Review)", "[\"__str__\", \"album_id\", \"reviewer_id\"] @admin.register(models.FavoriteReviewerArtist) class FavoriteReviewerArtistAdmin(admin.ModelAdmin): autocomplete_fields = [\"artist_id\", \"reviewer_id\"] list_display = [\"artist_id\",", "autocomplete_fields = [\"artist_id\"] search_fields = [\"title\"] @admin.register(models.Artist) class ArtistAdmin(admin.ModelAdmin): list_display = [\"name\", \"created_at\",", "list_display = [\"title\", \"artist_id\", \"created_at\", \"created_by\"] ordering = [\"title\"] list_per_page = 30 prepopulated_fields", "= [\"artist_id\", \"created_by\"] autocomplete_fields = [\"artist_id\"] search_fields = [\"title\"] @admin.register(models.Artist) class ArtistAdmin(admin.ModelAdmin): list_display", "\"created_at\", \"created_by\"] ordering = [\"name\"] list_per_page = 30 prepopulated_fields = { \"slug\": [\"name\"]", "[\"album_id\"] @admin.register(models.AlbumOfTheYear) class AlbumOfTheYear(admin.ModelAdmin): list_display = [\"__str__\", \"album_id\"] autocomplete_fields = [\"album_id\"] @admin.register(models.Track) class", "= { \"slug\": [\"name\"] } search_fields = ['name__istartswith'] @admin.register(models.Genre) class GenreAdmin(admin.ModelAdmin): search_fields =", "@admin.register(models.AlbumLink) class AlbumLinkAdmin(admin.ModelAdmin): list_display = [\"__str__\", \"album_id\"] autocomplete_fields = [\"album_id\"] @admin.register(models.AlbumOfTheYear) class AlbumOfTheYear(admin.ModelAdmin):", "[\"artist_id\", \"created_by\"] autocomplete_fields = [\"artist_id\"] search_fields = [\"title\"] @admin.register(models.Artist) class ArtistAdmin(admin.ModelAdmin): list_display =", "\"genre_id\"] @admin.register(models.AlbumLink) class AlbumLinkAdmin(admin.ModelAdmin): list_display = [\"__str__\", \"album_id\"] autocomplete_fields = [\"album_id\"] @admin.register(models.AlbumOfTheYear) class", "list_display = [\"__str__\", \"album_id\"] @admin.register(models.Review) class ReviewAdmin(admin.ModelAdmin): autocomplete_fields = [\"album_id\", \"reviewer_id\"] list_display =", "= [\"__str__\", \"album_id\", \"reviewer_id\"] @admin.register(models.FavoriteReviewerArtist) class FavoriteReviewerArtistAdmin(admin.ModelAdmin): autocomplete_fields = [\"artist_id\", \"reviewer_id\"] list_display =", "class ReviewerAdmin(admin.ModelAdmin): autocomplete_fields = [\"user\"] list_display = [\"username\", \"email\"] search_fields = [\"username__istartswith\"] @admin.register(models.Album)", "= [\"__str__\", \"album_id\"] @admin.register(models.Review) class ReviewAdmin(admin.ModelAdmin): autocomplete_fields = [\"album_id\", \"reviewer_id\"] list_display = [\"__str__\",", "= [\"username\", \"email\"] search_fields = [\"username__istartswith\"] @admin.register(models.Album) class AlbumAdmin(admin.ModelAdmin): list_display = [\"title\", \"artist_id\",", "class GenreAdmin(admin.ModelAdmin): search_fields = ['name__istartswith'] @admin.register(models.AlbumGenre) class AlbumGenreAdmin(admin.ModelAdmin): list_display = [\"__str__\", \"album_id\", \"genre_id\"]", "list_display = [\"__str__\", \"album_id\", \"reviewer_id\"] @admin.register(models.FavoriteReviewerArtist) class FavoriteReviewerArtistAdmin(admin.ModelAdmin): autocomplete_fields = [\"artist_id\", \"reviewer_id\"] list_display", "= 30 prepopulated_fields = { \"slug\": [\"title\"] } list_select_related = [\"artist_id\", \"created_by\"] autocomplete_fields", "[\"title\"] @admin.register(models.Artist) class ArtistAdmin(admin.ModelAdmin): list_display = [\"name\", \"created_at\", \"created_by\"] ordering = [\"name\"] list_per_page", "= 30 prepopulated_fields = { \"slug\": [\"name\"] } search_fields = ['name__istartswith'] @admin.register(models.Genre) class", "\"created_by\"] ordering = [\"title\"] list_per_page = 30 prepopulated_fields = { \"slug\": [\"title\"] }", "autocomplete_fields = [\"album_id\"] @admin.register(models.AlbumOfTheYear) class AlbumOfTheYear(admin.ModelAdmin): list_display = [\"__str__\", \"album_id\"] autocomplete_fields = [\"album_id\"]", "[\"album_id\", \"reviewer_id\"] list_display = [\"__str__\", \"album_id\", \"reviewer_id\"] @admin.register(models.FavoriteReviewerArtist) class FavoriteReviewerArtistAdmin(admin.ModelAdmin): autocomplete_fields = [\"artist_id\",", "[\"artist_id\"] search_fields = [\"title\"] @admin.register(models.Artist) class ArtistAdmin(admin.ModelAdmin): list_display = [\"name\", \"created_at\", \"created_by\"] ordering", "[\"user\"] list_display = [\"username\", \"email\"] search_fields = [\"username__istartswith\"] @admin.register(models.Album) class AlbumAdmin(admin.ModelAdmin): list_display =", "search_fields = [\"username__istartswith\"] @admin.register(models.Album) class AlbumAdmin(admin.ModelAdmin): list_display = [\"title\", \"artist_id\", \"created_at\", \"created_by\"] ordering", "ArtistAdmin(admin.ModelAdmin): list_display = [\"name\", \"created_at\", \"created_by\"] ordering = [\"name\"] list_per_page = 30 prepopulated_fields", "@admin.register(models.Review) class ReviewAdmin(admin.ModelAdmin): autocomplete_fields = [\"album_id\", \"reviewer_id\"] list_display = [\"__str__\", \"album_id\", \"reviewer_id\"] @admin.register(models.FavoriteReviewerArtist)", "ReviewAdmin(admin.ModelAdmin): autocomplete_fields = [\"album_id\", \"reviewer_id\"] list_display = [\"__str__\", \"album_id\", \"reviewer_id\"] @admin.register(models.FavoriteReviewerArtist) class FavoriteReviewerArtistAdmin(admin.ModelAdmin):", "= [\"artist_id\"] search_fields = [\"title\"] @admin.register(models.Artist) class ArtistAdmin(admin.ModelAdmin): list_display = [\"name\", \"created_at\", \"created_by\"]", "= { \"slug\": [\"title\"] } list_select_related = [\"artist_id\", \"created_by\"] autocomplete_fields = [\"artist_id\"] search_fields", "= [\"name\"] list_per_page = 30 prepopulated_fields = { \"slug\": [\"name\"] } search_fields =", "\"email\"] search_fields = [\"username__istartswith\"] @admin.register(models.Album) class AlbumAdmin(admin.ModelAdmin): list_display = [\"title\", \"artist_id\", \"created_at\", \"created_by\"]", "[\"name\"] } search_fields = ['name__istartswith'] @admin.register(models.Genre) class GenreAdmin(admin.ModelAdmin): search_fields = ['name__istartswith'] @admin.register(models.AlbumGenre) class", "[\"album_id\", \"genre_id\"] @admin.register(models.AlbumLink) class AlbumLinkAdmin(admin.ModelAdmin): list_display = [\"__str__\", \"album_id\"] autocomplete_fields = [\"album_id\"] @admin.register(models.AlbumOfTheYear)", "[\"username\", \"email\"] search_fields = [\"username__istartswith\"] @admin.register(models.Album) class AlbumAdmin(admin.ModelAdmin): list_display = [\"title\", \"artist_id\", \"created_at\",", "list_display = [\"__str__\", \"album_id\"] autocomplete_fields = [\"album_id\"] @admin.register(models.Track) class TrackAdmin(admin.ModelAdmin): list_display = [\"__str__\",", "[\"title\"] } list_select_related = [\"artist_id\", \"created_by\"] autocomplete_fields = [\"artist_id\"] search_fields = [\"title\"] @admin.register(models.Artist)", "30 prepopulated_fields = { \"slug\": [\"title\"] } list_select_related = [\"artist_id\", \"created_by\"] autocomplete_fields =", "} list_select_related = [\"artist_id\", \"created_by\"] autocomplete_fields = [\"artist_id\"] search_fields = [\"title\"] @admin.register(models.Artist) class", "= [\"username__istartswith\"] @admin.register(models.Album) class AlbumAdmin(admin.ModelAdmin): list_display = [\"title\", \"artist_id\", \"created_at\", \"created_by\"] ordering =", "list_select_related = [\"artist_id\", \"created_by\"] autocomplete_fields = [\"artist_id\"] search_fields = [\"title\"] @admin.register(models.Artist) class ArtistAdmin(admin.ModelAdmin):", "AlbumLinkAdmin(admin.ModelAdmin): list_display = [\"__str__\", \"album_id\"] autocomplete_fields = [\"album_id\"] @admin.register(models.AlbumOfTheYear) class AlbumOfTheYear(admin.ModelAdmin): list_display =", "\"created_by\"] ordering = [\"name\"] list_per_page = 30 prepopulated_fields = { \"slug\": [\"name\"] }", "[\"username__istartswith\"] @admin.register(models.Album) class AlbumAdmin(admin.ModelAdmin): list_display = [\"title\", \"artist_id\", \"created_at\", \"created_by\"] ordering = [\"title\"]", "admin from . import models @admin.register(models.Reviewer) class ReviewerAdmin(admin.ModelAdmin): autocomplete_fields = [\"user\"] list_display =", "class ArtistAdmin(admin.ModelAdmin): list_display = [\"name\", \"created_at\", \"created_by\"] ordering = [\"name\"] list_per_page = 30", "\"album_id\", \"reviewer_id\"] @admin.register(models.FavoriteReviewerArtist) class FavoriteReviewerArtistAdmin(admin.ModelAdmin): autocomplete_fields = [\"artist_id\", \"reviewer_id\"] list_display = [\"artist_id\", \"reviewer_id\"]", "django.contrib import admin from . import models @admin.register(models.Reviewer) class ReviewerAdmin(admin.ModelAdmin): autocomplete_fields = [\"user\"]", "\"album_id\", \"genre_id\"] autocomplete_fields = [\"album_id\", \"genre_id\"] @admin.register(models.AlbumLink) class AlbumLinkAdmin(admin.ModelAdmin): list_display = [\"__str__\", \"album_id\"]", "= [\"album_id\"] @admin.register(models.Track) class TrackAdmin(admin.ModelAdmin): list_display = [\"__str__\", \"album_id\"] @admin.register(models.Review) class ReviewAdmin(admin.ModelAdmin): autocomplete_fields", "\"album_id\"] @admin.register(models.Review) class ReviewAdmin(admin.ModelAdmin): autocomplete_fields = [\"album_id\", \"reviewer_id\"] list_display = [\"__str__\", \"album_id\", \"reviewer_id\"]", "TrackAdmin(admin.ModelAdmin): list_display = [\"__str__\", \"album_id\"] @admin.register(models.Review) class ReviewAdmin(admin.ModelAdmin): autocomplete_fields = [\"album_id\", \"reviewer_id\"] list_display", "list_display = [\"name\", \"created_at\", \"created_by\"] ordering = [\"name\"] list_per_page = 30 prepopulated_fields =", "[\"title\", \"artist_id\", \"created_at\", \"created_by\"] ordering = [\"title\"] list_per_page = 30 prepopulated_fields = {", "prepopulated_fields = { \"slug\": [\"title\"] } list_select_related = [\"artist_id\", \"created_by\"] autocomplete_fields = [\"artist_id\"]", "from django.contrib import admin from . import models @admin.register(models.Reviewer) class ReviewerAdmin(admin.ModelAdmin): autocomplete_fields =", "autocomplete_fields = [\"artist_id\", \"reviewer_id\"] list_display = [\"artist_id\", \"reviewer_id\"] @admin.register(models.ReviewerLink) class ReviewerLinkAdmin(admin.ModelAdmin): autocomplete_fields =", "FavoriteReviewerArtistAdmin(admin.ModelAdmin): autocomplete_fields = [\"artist_id\", \"reviewer_id\"] list_display = [\"artist_id\", \"reviewer_id\"] @admin.register(models.ReviewerLink) class ReviewerLinkAdmin(admin.ModelAdmin): autocomplete_fields", "= [\"album_id\", \"genre_id\"] @admin.register(models.AlbumLink) class AlbumLinkAdmin(admin.ModelAdmin): list_display = [\"__str__\", \"album_id\"] autocomplete_fields = [\"album_id\"]", "= [\"name\", \"created_at\", \"created_by\"] ordering = [\"name\"] list_per_page = 30 prepopulated_fields = {", "@admin.register(models.Album) class AlbumAdmin(admin.ModelAdmin): list_display = [\"title\", \"artist_id\", \"created_at\", \"created_by\"] ordering = [\"title\"] list_per_page", "class AlbumOfTheYear(admin.ModelAdmin): list_display = [\"__str__\", \"album_id\"] autocomplete_fields = [\"album_id\"] @admin.register(models.Track) class TrackAdmin(admin.ModelAdmin): list_display", "[\"name\", \"created_at\", \"created_by\"] ordering = [\"name\"] list_per_page = 30 prepopulated_fields = { \"slug\":", "models @admin.register(models.Reviewer) class ReviewerAdmin(admin.ModelAdmin): autocomplete_fields = [\"user\"] list_display = [\"username\", \"email\"] search_fields =", "= [\"user\"] list_display = [\"username\", \"email\"] search_fields = [\"username__istartswith\"] @admin.register(models.Album) class AlbumAdmin(admin.ModelAdmin): list_display", "= ['name__istartswith'] @admin.register(models.Genre) class GenreAdmin(admin.ModelAdmin): search_fields = ['name__istartswith'] @admin.register(models.AlbumGenre) class AlbumGenreAdmin(admin.ModelAdmin): list_display =", "@admin.register(models.Track) class TrackAdmin(admin.ModelAdmin): list_display = [\"__str__\", \"album_id\"] @admin.register(models.Review) class ReviewAdmin(admin.ModelAdmin): autocomplete_fields = [\"album_id\",", "\"created_by\"] autocomplete_fields = [\"artist_id\"] search_fields = [\"title\"] @admin.register(models.Artist) class ArtistAdmin(admin.ModelAdmin): list_display = [\"name\",", "[\"__str__\", \"album_id\", \"genre_id\"] autocomplete_fields = [\"album_id\", \"genre_id\"] @admin.register(models.AlbumLink) class AlbumLinkAdmin(admin.ModelAdmin): list_display = [\"__str__\",", "[\"name\"] list_per_page = 30 prepopulated_fields = { \"slug\": [\"name\"] } search_fields = ['name__istartswith']", "[\"album_id\"] @admin.register(models.Track) class TrackAdmin(admin.ModelAdmin): list_display = [\"__str__\", \"album_id\"] @admin.register(models.Review) class ReviewAdmin(admin.ModelAdmin): autocomplete_fields =", "= [\"__str__\", \"album_id\", \"genre_id\"] autocomplete_fields = [\"album_id\", \"genre_id\"] @admin.register(models.AlbumLink) class AlbumLinkAdmin(admin.ModelAdmin): list_display =", "ReviewerAdmin(admin.ModelAdmin): autocomplete_fields = [\"user\"] list_display = [\"username\", \"email\"] search_fields = [\"username__istartswith\"] @admin.register(models.Album) class", "class AlbumGenreAdmin(admin.ModelAdmin): list_display = [\"__str__\", \"album_id\", \"genre_id\"] autocomplete_fields = [\"album_id\", \"genre_id\"] @admin.register(models.AlbumLink) class", "\"slug\": [\"title\"] } list_select_related = [\"artist_id\", \"created_by\"] autocomplete_fields = [\"artist_id\"] search_fields = [\"title\"]", "= [\"title\"] @admin.register(models.Artist) class ArtistAdmin(admin.ModelAdmin): list_display = [\"name\", \"created_at\", \"created_by\"] ordering = [\"name\"]", "\"genre_id\"] autocomplete_fields = [\"album_id\", \"genre_id\"] @admin.register(models.AlbumLink) class AlbumLinkAdmin(admin.ModelAdmin): list_display = [\"__str__\", \"album_id\"] autocomplete_fields", "@admin.register(models.AlbumOfTheYear) class AlbumOfTheYear(admin.ModelAdmin): list_display = [\"__str__\", \"album_id\"] autocomplete_fields = [\"album_id\"] @admin.register(models.Track) class TrackAdmin(admin.ModelAdmin):", "} search_fields = ['name__istartswith'] @admin.register(models.Genre) class GenreAdmin(admin.ModelAdmin): search_fields = ['name__istartswith'] @admin.register(models.AlbumGenre) class AlbumGenreAdmin(admin.ModelAdmin):", "class FavoriteReviewerArtistAdmin(admin.ModelAdmin): autocomplete_fields = [\"artist_id\", \"reviewer_id\"] list_display = [\"artist_id\", \"reviewer_id\"] @admin.register(models.ReviewerLink) class ReviewerLinkAdmin(admin.ModelAdmin):", "['name__istartswith'] @admin.register(models.Genre) class GenreAdmin(admin.ModelAdmin): search_fields = ['name__istartswith'] @admin.register(models.AlbumGenre) class AlbumGenreAdmin(admin.ModelAdmin): list_display = [\"__str__\",", "['name__istartswith'] @admin.register(models.AlbumGenre) class AlbumGenreAdmin(admin.ModelAdmin): list_display = [\"__str__\", \"album_id\", \"genre_id\"] autocomplete_fields = [\"album_id\", \"genre_id\"]", "{ \"slug\": [\"title\"] } list_select_related = [\"artist_id\", \"created_by\"] autocomplete_fields = [\"artist_id\"] search_fields =", "import models @admin.register(models.Reviewer) class ReviewerAdmin(admin.ModelAdmin): autocomplete_fields = [\"user\"] list_display = [\"username\", \"email\"] search_fields", "class TrackAdmin(admin.ModelAdmin): list_display = [\"__str__\", \"album_id\"] @admin.register(models.Review) class ReviewAdmin(admin.ModelAdmin): autocomplete_fields = [\"album_id\", \"reviewer_id\"]", "\"created_at\", \"created_by\"] ordering = [\"title\"] list_per_page = 30 prepopulated_fields = { \"slug\": [\"title\"]", "\"artist_id\", \"created_at\", \"created_by\"] ordering = [\"title\"] list_per_page = 30 prepopulated_fields = { \"slug\":" ]
[ "axis=1) for i in range(n): # sample an example from X with replacement", "number of data points in dataset # Py - desired label distribution ###################################", "- desired label distribution ################################### def tweak_dist(X, y, num_labels, n, Py): shape =", "= np.random.choice(indices_by_label[labels[i]]) Xshift[i] = X[idx] yshift[i] = y[idx] return Xshift, yshift def tweak_one(X,", "label indices_by_label = [(y==k).nonzero()[0] for k in range(10)] labels = np.argmax( np.random.multinomial(1, Py,", "desired label distribution ################################### def tweak_dist(X, y, num_labels, n, Py): shape = (n,", "in range(n): # sample an example from X with replacement idx = np.random.choice(indices_by_label[labels[i]])", "sample an example from X with replacement idx = np.random.choice(indices_by_label[labels[i]]) Xshift[i] = X[idx]", "# Py - desired label distribution ################################### def tweak_dist(X, y, num_labels, n, Py):", "data # n - number of data points in dataset # Py -", "X[idx] yshift[i] = y[idx] return Xshift, yshift def tweak_one(X, y, num_labels, n, knockout_label,", "################################### def tweak_dist(X, y, num_labels, n, Py): shape = (n, *X.shape[1:]) Xshift =", "of data points in dataset # Py - desired label distribution ################################### def", "y[idx] return Xshift, yshift def tweak_one(X, y, num_labels, n, knockout_label, p): # create", "num_labels, n, knockout_label, p): # create Py # call down to tweak_dist Py", "- training data # n - number of data points in dataset #", "autograd import numpy as np ##################################3 # X, y - training data #", "n, knockout_label, p): # create Py # call down to tweak_dist Py =", "shape = (n, *X.shape[1:]) Xshift = np.zeros(shape) yshift = np.zeros(n, dtype=np.int8) # get", "(n, *X.shape[1:]) Xshift = np.zeros(shape) yshift = np.zeros(n, dtype=np.int8) # get indices for", "y, num_labels, n, Py): shape = (n, *X.shape[1:]) Xshift = np.zeros(shape) yshift =", "Xshift[i] = X[idx] yshift[i] = y[idx] return Xshift, yshift def tweak_one(X, y, num_labels,", "for i in range(n): # sample an example from X with replacement idx", "##################################3 # X, y - training data # n - number of data", "training data # n - number of data points in dataset # Py", "call down to tweak_dist Py = np.full(num_labels, (1.-p)/(num_labels-1)) Py[knockout_label] = p print(Py) return", "np.random.choice(indices_by_label[labels[i]]) Xshift[i] = X[idx] yshift[i] = y[idx] return Xshift, yshift def tweak_one(X, y,", "p): # create Py # call down to tweak_dist Py = np.full(num_labels, (1.-p)/(num_labels-1))", "Py): shape = (n, *X.shape[1:]) Xshift = np.zeros(shape) yshift = np.zeros(n, dtype=np.int8) #", "Py, n), axis=1) for i in range(n): # sample an example from X", "# n - number of data points in dataset # Py - desired", "nd, autograd import numpy as np ##################################3 # X, y - training data", "create Py # call down to tweak_dist Py = np.full(num_labels, (1.-p)/(num_labels-1)) Py[knockout_label] =", "i in range(n): # sample an example from X with replacement idx =", "def tweak_one(X, y, num_labels, n, knockout_label, p): # create Py # call down", "dtype=np.int8) # get indices for each label indices_by_label = [(y==k).nonzero()[0] for k in", "tweak_one(X, y, num_labels, n, knockout_label, p): # create Py # call down to", "range(10)] labels = np.argmax( np.random.multinomial(1, Py, n), axis=1) for i in range(n): #", "for each label indices_by_label = [(y==k).nonzero()[0] for k in range(10)] labels = np.argmax(", "= np.zeros(n, dtype=np.int8) # get indices for each label indices_by_label = [(y==k).nonzero()[0] for", "tweak_dist Py = np.full(num_labels, (1.-p)/(num_labels-1)) Py[knockout_label] = p print(Py) return tweak_dist(X, y, num_labels,", "labels = np.argmax( np.random.multinomial(1, Py, n), axis=1) for i in range(n): # sample", "an example from X with replacement idx = np.random.choice(indices_by_label[labels[i]]) Xshift[i] = X[idx] yshift[i]", "for k in range(10)] labels = np.argmax( np.random.multinomial(1, Py, n), axis=1) for i", "with replacement idx = np.random.choice(indices_by_label[labels[i]]) Xshift[i] = X[idx] yshift[i] = y[idx] return Xshift,", "as np ##################################3 # X, y - training data # n - number", "numpy as np ##################################3 # X, y - training data # n -", "replacement idx = np.random.choice(indices_by_label[labels[i]]) Xshift[i] = X[idx] yshift[i] = y[idx] return Xshift, yshift", "label distribution ################################### def tweak_dist(X, y, num_labels, n, Py): shape = (n, *X.shape[1:])", "np ##################################3 # X, y - training data # n - number of", "Py - desired label distribution ################################### def tweak_dist(X, y, num_labels, n, Py): shape", "mxnet as mx from mxnet import nd, autograd import numpy as np ##################################3", "indices_by_label = [(y==k).nonzero()[0] for k in range(10)] labels = np.argmax( np.random.multinomial(1, Py, n),", "def tweak_dist(X, y, num_labels, n, Py): shape = (n, *X.shape[1:]) Xshift = np.zeros(shape)", "yshift def tweak_one(X, y, num_labels, n, knockout_label, p): # create Py # call", "# create Py # call down to tweak_dist Py = np.full(num_labels, (1.-p)/(num_labels-1)) Py[knockout_label]", "y, num_labels, n, knockout_label, p): # create Py # call down to tweak_dist", "in dataset # Py - desired label distribution ################################### def tweak_dist(X, y, num_labels,", "= np.argmax( np.random.multinomial(1, Py, n), axis=1) for i in range(n): # sample an", "= X[idx] yshift[i] = y[idx] return Xshift, yshift def tweak_one(X, y, num_labels, n,", "points in dataset # Py - desired label distribution ################################### def tweak_dist(X, y,", "each label indices_by_label = [(y==k).nonzero()[0] for k in range(10)] labels = np.argmax( np.random.multinomial(1,", "Xshift = np.zeros(shape) yshift = np.zeros(n, dtype=np.int8) # get indices for each label", "# sample an example from X with replacement idx = np.random.choice(indices_by_label[labels[i]]) Xshift[i] =", "import mxnet as mx from mxnet import nd, autograd import numpy as np", "np.zeros(shape) yshift = np.zeros(n, dtype=np.int8) # get indices for each label indices_by_label =", "down to tweak_dist Py = np.full(num_labels, (1.-p)/(num_labels-1)) Py[knockout_label] = p print(Py) return tweak_dist(X,", "Xshift, yshift def tweak_one(X, y, num_labels, n, knockout_label, p): # create Py #", "data points in dataset # Py - desired label distribution ################################### def tweak_dist(X,", "return Xshift, yshift def tweak_one(X, y, num_labels, n, knockout_label, p): # create Py", "as mx from mxnet import nd, autograd import numpy as np ##################################3 #", "X, y - training data # n - number of data points in", "dataset # Py - desired label distribution ################################### def tweak_dist(X, y, num_labels, n,", "*X.shape[1:]) Xshift = np.zeros(shape) yshift = np.zeros(n, dtype=np.int8) # get indices for each", "from mxnet import nd, autograd import numpy as np ##################################3 # X, y", "= (n, *X.shape[1:]) Xshift = np.zeros(shape) yshift = np.zeros(n, dtype=np.int8) # get indices", "np.argmax( np.random.multinomial(1, Py, n), axis=1) for i in range(n): # sample an example", "idx = np.random.choice(indices_by_label[labels[i]]) Xshift[i] = X[idx] yshift[i] = y[idx] return Xshift, yshift def", "yshift = np.zeros(n, dtype=np.int8) # get indices for each label indices_by_label = [(y==k).nonzero()[0]", "indices for each label indices_by_label = [(y==k).nonzero()[0] for k in range(10)] labels =", "# call down to tweak_dist Py = np.full(num_labels, (1.-p)/(num_labels-1)) Py[knockout_label] = p print(Py)", "mx from mxnet import nd, autograd import numpy as np ##################################3 # X,", "n - number of data points in dataset # Py - desired label", "import nd, autograd import numpy as np ##################################3 # X, y - training", "k in range(10)] labels = np.argmax( np.random.multinomial(1, Py, n), axis=1) for i in", "tweak_dist(X, y, num_labels, n, Py): shape = (n, *X.shape[1:]) Xshift = np.zeros(shape) yshift", "example from X with replacement idx = np.random.choice(indices_by_label[labels[i]]) Xshift[i] = X[idx] yshift[i] =", "mxnet import nd, autograd import numpy as np ##################################3 # X, y -", "# X, y - training data # n - number of data points", "n, Py): shape = (n, *X.shape[1:]) Xshift = np.zeros(shape) yshift = np.zeros(n, dtype=np.int8)", "np.zeros(n, dtype=np.int8) # get indices for each label indices_by_label = [(y==k).nonzero()[0] for k", "num_labels, n, Py): shape = (n, *X.shape[1:]) Xshift = np.zeros(shape) yshift = np.zeros(n,", "= [(y==k).nonzero()[0] for k in range(10)] labels = np.argmax( np.random.multinomial(1, Py, n), axis=1)", "from X with replacement idx = np.random.choice(indices_by_label[labels[i]]) Xshift[i] = X[idx] yshift[i] = y[idx]", "np.random.multinomial(1, Py, n), axis=1) for i in range(n): # sample an example from", "- number of data points in dataset # Py - desired label distribution", "y - training data # n - number of data points in dataset", "get indices for each label indices_by_label = [(y==k).nonzero()[0] for k in range(10)] labels", "yshift[i] = y[idx] return Xshift, yshift def tweak_one(X, y, num_labels, n, knockout_label, p):", "Py = np.full(num_labels, (1.-p)/(num_labels-1)) Py[knockout_label] = p print(Py) return tweak_dist(X, y, num_labels, n,", "n), axis=1) for i in range(n): # sample an example from X with", "[(y==k).nonzero()[0] for k in range(10)] labels = np.argmax( np.random.multinomial(1, Py, n), axis=1) for", "X with replacement idx = np.random.choice(indices_by_label[labels[i]]) Xshift[i] = X[idx] yshift[i] = y[idx] return", "knockout_label, p): # create Py # call down to tweak_dist Py = np.full(num_labels,", "in range(10)] labels = np.argmax( np.random.multinomial(1, Py, n), axis=1) for i in range(n):", "import numpy as np ##################################3 # X, y - training data # n", "= np.zeros(shape) yshift = np.zeros(n, dtype=np.int8) # get indices for each label indices_by_label", "range(n): # sample an example from X with replacement idx = np.random.choice(indices_by_label[labels[i]]) Xshift[i]", "distribution ################################### def tweak_dist(X, y, num_labels, n, Py): shape = (n, *X.shape[1:]) Xshift", "= np.full(num_labels, (1.-p)/(num_labels-1)) Py[knockout_label] = p print(Py) return tweak_dist(X, y, num_labels, n, Py)", "to tweak_dist Py = np.full(num_labels, (1.-p)/(num_labels-1)) Py[knockout_label] = p print(Py) return tweak_dist(X, y,", "Py # call down to tweak_dist Py = np.full(num_labels, (1.-p)/(num_labels-1)) Py[knockout_label] = p", "# get indices for each label indices_by_label = [(y==k).nonzero()[0] for k in range(10)]", "= y[idx] return Xshift, yshift def tweak_one(X, y, num_labels, n, knockout_label, p): #" ]
[ "AnimatedProp(DirectObject.DirectObject): notify = DirectNotifyGlobal.directNotify.newCategory('AnimatedProp') def __init__(self, node): self.node = node def delete(self): pass", "class AnimatedProp(DirectObject.DirectObject): notify = DirectNotifyGlobal.directNotify.newCategory('AnimatedProp') def __init__(self, node): self.node = node def delete(self):", "node): self.node = node def delete(self): pass def uniqueName(self, name): return name +", "import DirectNotifyGlobal class AnimatedProp(DirectObject.DirectObject): notify = DirectNotifyGlobal.directNotify.newCategory('AnimatedProp') def __init__(self, node): self.node = node", "from direct.directnotify import DirectNotifyGlobal class AnimatedProp(DirectObject.DirectObject): notify = DirectNotifyGlobal.directNotify.newCategory('AnimatedProp') def __init__(self, node): self.node", "notify = DirectNotifyGlobal.directNotify.newCategory('AnimatedProp') def __init__(self, node): self.node = node def delete(self): pass def", "__init__(self, node): self.node = node def delete(self): pass def uniqueName(self, name): return name", "direct.showbase import DirectObject from direct.directnotify import DirectNotifyGlobal class AnimatedProp(DirectObject.DirectObject): notify = DirectNotifyGlobal.directNotify.newCategory('AnimatedProp') def", "DirectNotifyGlobal class AnimatedProp(DirectObject.DirectObject): notify = DirectNotifyGlobal.directNotify.newCategory('AnimatedProp') def __init__(self, node): self.node = node def", "self.node = node def delete(self): pass def uniqueName(self, name): return name + '-'", "DirectObject from direct.directnotify import DirectNotifyGlobal class AnimatedProp(DirectObject.DirectObject): notify = DirectNotifyGlobal.directNotify.newCategory('AnimatedProp') def __init__(self, node):", "= node def delete(self): pass def uniqueName(self, name): return name + '-' +", "node def delete(self): pass def uniqueName(self, name): return name + '-' + str(self.node.this)", "delete(self): pass def uniqueName(self, name): return name + '-' + str(self.node.this) def enter(self):", "<filename>v1.0.0.test/toontown/hood/AnimatedProp.py from direct.showbase import DirectObject from direct.directnotify import DirectNotifyGlobal class AnimatedProp(DirectObject.DirectObject): notify =", "pass def uniqueName(self, name): return name + '-' + str(self.node.this) def enter(self): self.notify.debug('enter')", "import DirectObject from direct.directnotify import DirectNotifyGlobal class AnimatedProp(DirectObject.DirectObject): notify = DirectNotifyGlobal.directNotify.newCategory('AnimatedProp') def __init__(self,", "name): return name + '-' + str(self.node.this) def enter(self): self.notify.debug('enter') def exit(self): self.notify.debug('exit')", "def delete(self): pass def uniqueName(self, name): return name + '-' + str(self.node.this) def", "= DirectNotifyGlobal.directNotify.newCategory('AnimatedProp') def __init__(self, node): self.node = node def delete(self): pass def uniqueName(self,", "uniqueName(self, name): return name + '-' + str(self.node.this) def enter(self): self.notify.debug('enter') def exit(self):", "DirectNotifyGlobal.directNotify.newCategory('AnimatedProp') def __init__(self, node): self.node = node def delete(self): pass def uniqueName(self, name):", "def __init__(self, node): self.node = node def delete(self): pass def uniqueName(self, name): return", "from direct.showbase import DirectObject from direct.directnotify import DirectNotifyGlobal class AnimatedProp(DirectObject.DirectObject): notify = DirectNotifyGlobal.directNotify.newCategory('AnimatedProp')", "def uniqueName(self, name): return name + '-' + str(self.node.this) def enter(self): self.notify.debug('enter') def", "direct.directnotify import DirectNotifyGlobal class AnimatedProp(DirectObject.DirectObject): notify = DirectNotifyGlobal.directNotify.newCategory('AnimatedProp') def __init__(self, node): self.node =" ]
[ "None, reason=\"api_key is not provided\") def test_microsoft_successful_post_twotargets(): posted = MicrosoftTranslator(api_key=APIkey, target=[\"en\", \"ru\"]).translate( \"auf", "= [{\"translations\": [{\"text\": \"See you later!\", \"to\": \"en\"}]}] def res(): r = requests.Response()", "m1 = MicrosoftTranslator(api_key=APIkey, source=\"en\", target=\"fr\") m2 = MicrosoftTranslator(api_key=APIkey, source=\"English\", target=\"French\") assert \"\".join(m1._source) ==", "def res(): r = requests.Response() def json_func(): return returned_json r.json = json_func return", "= MicrosoftTranslator(api_key=APIkey, target=\"en\").translate( \"auf wiedersehen!\" ) assert isinstance(posted, str) @pytest.mark.skipif(APIkey is None, reason=\"api_key", "the remaining tests are actual requests to Microsoft API and use an api", "\"See you later!\" ) def test_MicrosoftAPIerror(): with pytest.raises(exceptions.MicrosoftAPIerror): MicrosoftTranslator(api_key=\"empty\", source=\"de\", target=\"en\").translate(\"text\") # the", "\"\"\"Tests for `deep_translator` package.\"\"\" from unittest.mock import patch import pytest import requests from", "def test_MicrosoftAPIerror(): with pytest.raises(exceptions.MicrosoftAPIerror): MicrosoftTranslator(api_key=\"empty\", source=\"de\", target=\"en\").translate(\"text\") # the remaining tests are actual", "for `deep_translator` package.\"\"\" from unittest.mock import patch import pytest import requests from deep_translator", "is None, reason=\"api_key is not provided\") def test_microsoft_successful_post_twotargets(): posted = MicrosoftTranslator(api_key=APIkey, target=[\"en\", \"ru\"]).translate(", "with pytest.raises(exceptions.MicrosoftAPIerror): MicrosoftTranslator(api_key=\"empty\", source=\"de\", target=\"en\").translate(\"text\") # the remaining tests are actual requests to", "target=\"en\").translate( \"auf wiedersehen!\" ) == \"See you later!\" ) def test_MicrosoftAPIerror(): with pytest.raises(exceptions.MicrosoftAPIerror):", "= json_func return r mock_request_post.return_value = res() assert ( MicrosoftTranslator(api_key=\"an_api_key\", source=\"de\", target=\"en\").translate( \"auf", ") assert isinstance(posted, str) @pytest.mark.skipif(APIkey is None, reason=\"api_key is not provided\") def test_microsoft_successful_post_twotargets():", "@patch.object(requests, \"post\") def test_microsoft_successful_post_mock(mock_request_post): returned_json = [{\"translations\": [{\"text\": \"See you later!\", \"to\": \"en\"}]}]", "API and use an api key # if APIkey variable is None, they", "api key # if APIkey variable is None, they are skipped APIkey =", ") == \"See you later!\" ) def test_MicrosoftAPIerror(): with pytest.raises(exceptions.MicrosoftAPIerror): MicrosoftTranslator(api_key=\"empty\", source=\"de\", target=\"en\").translate(\"text\")", "MicrosoftTranslator(api_key=APIkey, target=\"\") with pytest.raises(exceptions.ServerException): MicrosoftTranslator(api_key=\"\", target=\"nothing\") @pytest.mark.skipif(APIkey is None, reason=\"api_key is not provided\")", "return returned_json r.json = json_func return r mock_request_post.return_value = res() assert ( MicrosoftTranslator(api_key=\"an_api_key\",", "use an api key # if APIkey variable is None, they are skipped", "deep_translator import MicrosoftTranslator, exceptions # mocked request.post @patch.object(requests, \"post\") def test_microsoft_successful_post_mock(mock_request_post): returned_json =", "source=\"en\", target=\"fr\") m2 = MicrosoftTranslator(api_key=APIkey, source=\"English\", target=\"French\") assert \"\".join(m1._source) == \"\".join(m2._source) assert \"\".join(m1._target)", "variable is None, they are skipped APIkey = None @pytest.mark.skipif(APIkey is None, reason=\"api_key", "and use an api key # if APIkey variable is None, they are", "posted = MicrosoftTranslator(api_key=APIkey, target=[\"en\", \"ru\"]).translate( \"auf wiedersehen!\" ) assert isinstance(posted, str) @pytest.mark.skipif(APIkey is", "skipped APIkey = None @pytest.mark.skipif(APIkey is None, reason=\"api_key is not provided\") def test_microsoft_successful_post_onetarget():", "import pytest import requests from deep_translator import MicrosoftTranslator, exceptions # mocked request.post @patch.object(requests,", "actual requests to Microsoft API and use an api key # if APIkey", "# if APIkey variable is None, they are skipped APIkey = None @pytest.mark.skipif(APIkey", "= MicrosoftTranslator(api_key=APIkey, source=\"en\", target=\"fr\") m2 = MicrosoftTranslator(api_key=APIkey, source=\"English\", target=\"French\") assert \"\".join(m1._source) == \"\".join(m2._source)", "from deep_translator import MicrosoftTranslator, exceptions # mocked request.post @patch.object(requests, \"post\") def test_microsoft_successful_post_mock(mock_request_post): returned_json", "@pytest.mark.skipif(APIkey is None, reason=\"api_key is not provided\") def test_incorrect_target_attributes(): with pytest.raises(exceptions.ServerException): MicrosoftTranslator(api_key=APIkey, target=\"\")", "None, reason=\"api_key is not provided\") def test_microsoft_successful_post_onetarget(): posted = MicrosoftTranslator(api_key=APIkey, target=\"en\").translate( \"auf wiedersehen!\"", "reason=\"api_key is not provided\") def test_microsoft_successful_post_onetarget(): posted = MicrosoftTranslator(api_key=APIkey, target=\"en\").translate( \"auf wiedersehen!\" )", "test_abbreviations(): m1 = MicrosoftTranslator(api_key=APIkey, source=\"en\", target=\"fr\") m2 = MicrosoftTranslator(api_key=APIkey, source=\"English\", target=\"French\") assert \"\".join(m1._source)", "mock_request_post.return_value = res() assert ( MicrosoftTranslator(api_key=\"an_api_key\", source=\"de\", target=\"en\").translate( \"auf wiedersehen!\" ) == \"See", "isinstance(posted, str) @pytest.mark.skipif(APIkey is None, reason=\"api_key is not provided\") def test_incorrect_target_attributes(): with pytest.raises(exceptions.ServerException):", "provided\") def test_microsoft_successful_post_twotargets(): posted = MicrosoftTranslator(api_key=APIkey, target=[\"en\", \"ru\"]).translate( \"auf wiedersehen!\" ) assert isinstance(posted,", "returned_json = [{\"translations\": [{\"text\": \"See you later!\", \"to\": \"en\"}]}] def res(): r =", "r mock_request_post.return_value = res() assert ( MicrosoftTranslator(api_key=\"an_api_key\", source=\"de\", target=\"en\").translate( \"auf wiedersehen!\" ) ==", "is None, reason=\"api_key is not provided\") def test_microsoft_successful_post_onetarget(): posted = MicrosoftTranslator(api_key=APIkey, target=\"en\").translate( \"auf", "target=\"en\").translate( \"auf wiedersehen!\" ) assert isinstance(posted, str) @pytest.mark.skipif(APIkey is None, reason=\"api_key is not", "is not provided\") def test_microsoft_successful_post_twotargets(): posted = MicrosoftTranslator(api_key=APIkey, target=[\"en\", \"ru\"]).translate( \"auf wiedersehen!\" )", ") def test_MicrosoftAPIerror(): with pytest.raises(exceptions.MicrosoftAPIerror): MicrosoftTranslator(api_key=\"empty\", source=\"de\", target=\"en\").translate(\"text\") # the remaining tests are", "reason=\"api_key is not provided\") def test_abbreviations(): m1 = MicrosoftTranslator(api_key=APIkey, source=\"en\", target=\"fr\") m2 =", "= None @pytest.mark.skipif(APIkey is None, reason=\"api_key is not provided\") def test_microsoft_successful_post_onetarget(): posted =", "to Microsoft API and use an api key # if APIkey variable is", "exceptions # mocked request.post @patch.object(requests, \"post\") def test_microsoft_successful_post_mock(mock_request_post): returned_json = [{\"translations\": [{\"text\": \"See", "not provided\") def test_microsoft_successful_post_onetarget(): posted = MicrosoftTranslator(api_key=APIkey, target=\"en\").translate( \"auf wiedersehen!\" ) assert isinstance(posted,", "res(): r = requests.Response() def json_func(): return returned_json r.json = json_func return r", "source=\"de\", target=\"en\").translate( \"auf wiedersehen!\" ) == \"See you later!\" ) def test_MicrosoftAPIerror(): with", "source=\"de\", target=\"en\").translate(\"text\") # the remaining tests are actual requests to Microsoft API and", "MicrosoftTranslator(api_key=APIkey, source=\"en\", target=\"fr\") m2 = MicrosoftTranslator(api_key=APIkey, source=\"English\", target=\"French\") assert \"\".join(m1._source) == \"\".join(m2._source) assert", "mocked request.post @patch.object(requests, \"post\") def test_microsoft_successful_post_mock(mock_request_post): returned_json = [{\"translations\": [{\"text\": \"See you later!\",", "( MicrosoftTranslator(api_key=\"an_api_key\", source=\"de\", target=\"en\").translate( \"auf wiedersehen!\" ) == \"See you later!\" ) def", "str) @pytest.mark.skipif(APIkey is None, reason=\"api_key is not provided\") def test_incorrect_target_attributes(): with pytest.raises(exceptions.ServerException): MicrosoftTranslator(api_key=APIkey,", "with pytest.raises(exceptions.ServerException): MicrosoftTranslator(api_key=\"\", target=\"nothing\") @pytest.mark.skipif(APIkey is None, reason=\"api_key is not provided\") def test_abbreviations():", "import requests from deep_translator import MicrosoftTranslator, exceptions # mocked request.post @patch.object(requests, \"post\") def", "[{\"translations\": [{\"text\": \"See you later!\", \"to\": \"en\"}]}] def res(): r = requests.Response() def", "not provided\") def test_microsoft_successful_post_twotargets(): posted = MicrosoftTranslator(api_key=APIkey, target=[\"en\", \"ru\"]).translate( \"auf wiedersehen!\" ) assert", "not provided\") def test_incorrect_target_attributes(): with pytest.raises(exceptions.ServerException): MicrosoftTranslator(api_key=APIkey, target=\"\") with pytest.raises(exceptions.ServerException): MicrosoftTranslator(api_key=\"\", target=\"nothing\") @pytest.mark.skipif(APIkey", "unittest.mock import patch import pytest import requests from deep_translator import MicrosoftTranslator, exceptions #", "assert isinstance(posted, str) @pytest.mark.skipif(APIkey is None, reason=\"api_key is not provided\") def test_incorrect_target_attributes(): with", "returned_json r.json = json_func return r mock_request_post.return_value = res() assert ( MicrosoftTranslator(api_key=\"an_api_key\", source=\"de\",", "#!/usr/bin/env python \"\"\"Tests for `deep_translator` package.\"\"\" from unittest.mock import patch import pytest import", "tests are actual requests to Microsoft API and use an api key #", "target=[\"en\", \"ru\"]).translate( \"auf wiedersehen!\" ) assert isinstance(posted, str) @pytest.mark.skipif(APIkey is None, reason=\"api_key is", "\"en\"}]}] def res(): r = requests.Response() def json_func(): return returned_json r.json = json_func", "MicrosoftTranslator(api_key=\"empty\", source=\"de\", target=\"en\").translate(\"text\") # the remaining tests are actual requests to Microsoft API", "is not provided\") def test_abbreviations(): m1 = MicrosoftTranslator(api_key=APIkey, source=\"en\", target=\"fr\") m2 = MicrosoftTranslator(api_key=APIkey,", "test_microsoft_successful_post_twotargets(): posted = MicrosoftTranslator(api_key=APIkey, target=[\"en\", \"ru\"]).translate( \"auf wiedersehen!\" ) assert isinstance(posted, str) @pytest.mark.skipif(APIkey", "posted = MicrosoftTranslator(api_key=APIkey, target=\"en\").translate( \"auf wiedersehen!\" ) assert isinstance(posted, str) @pytest.mark.skipif(APIkey is None,", "pytest.raises(exceptions.MicrosoftAPIerror): MicrosoftTranslator(api_key=\"empty\", source=\"de\", target=\"en\").translate(\"text\") # the remaining tests are actual requests to Microsoft", "\"auf wiedersehen!\" ) == \"See you later!\" ) def test_MicrosoftAPIerror(): with pytest.raises(exceptions.MicrosoftAPIerror): MicrosoftTranslator(api_key=\"empty\",", "\"ru\"]).translate( \"auf wiedersehen!\" ) assert isinstance(posted, str) @pytest.mark.skipif(APIkey is None, reason=\"api_key is not", "APIkey = None @pytest.mark.skipif(APIkey is None, reason=\"api_key is not provided\") def test_microsoft_successful_post_onetarget(): posted", "m2 = MicrosoftTranslator(api_key=APIkey, source=\"English\", target=\"French\") assert \"\".join(m1._source) == \"\".join(m2._source) assert \"\".join(m1._target) == \"\".join(m2._target)", "= MicrosoftTranslator(api_key=APIkey, target=[\"en\", \"ru\"]).translate( \"auf wiedersehen!\" ) assert isinstance(posted, str) @pytest.mark.skipif(APIkey is None,", "later!\", \"to\": \"en\"}]}] def res(): r = requests.Response() def json_func(): return returned_json r.json", "MicrosoftTranslator(api_key=\"\", target=\"nothing\") @pytest.mark.skipif(APIkey is None, reason=\"api_key is not provided\") def test_abbreviations(): m1 =", "test_microsoft_successful_post_mock(mock_request_post): returned_json = [{\"translations\": [{\"text\": \"See you later!\", \"to\": \"en\"}]}] def res(): r", "they are skipped APIkey = None @pytest.mark.skipif(APIkey is None, reason=\"api_key is not provided\")", "an api key # if APIkey variable is None, they are skipped APIkey", "None @pytest.mark.skipif(APIkey is None, reason=\"api_key is not provided\") def test_microsoft_successful_post_onetarget(): posted = MicrosoftTranslator(api_key=APIkey,", "reason=\"api_key is not provided\") def test_incorrect_target_attributes(): with pytest.raises(exceptions.ServerException): MicrosoftTranslator(api_key=APIkey, target=\"\") with pytest.raises(exceptions.ServerException): MicrosoftTranslator(api_key=\"\",", "# mocked request.post @patch.object(requests, \"post\") def test_microsoft_successful_post_mock(mock_request_post): returned_json = [{\"translations\": [{\"text\": \"See you", "def test_microsoft_successful_post_onetarget(): posted = MicrosoftTranslator(api_key=APIkey, target=\"en\").translate( \"auf wiedersehen!\" ) assert isinstance(posted, str) @pytest.mark.skipif(APIkey", "return r mock_request_post.return_value = res() assert ( MicrosoftTranslator(api_key=\"an_api_key\", source=\"de\", target=\"en\").translate( \"auf wiedersehen!\" )", "target=\"\") with pytest.raises(exceptions.ServerException): MicrosoftTranslator(api_key=\"\", target=\"nothing\") @pytest.mark.skipif(APIkey is None, reason=\"api_key is not provided\") def", "if APIkey variable is None, they are skipped APIkey = None @pytest.mark.skipif(APIkey is", "def json_func(): return returned_json r.json = json_func return r mock_request_post.return_value = res() assert", "[{\"text\": \"See you later!\", \"to\": \"en\"}]}] def res(): r = requests.Response() def json_func():", "is not provided\") def test_microsoft_successful_post_onetarget(): posted = MicrosoftTranslator(api_key=APIkey, target=\"en\").translate( \"auf wiedersehen!\" ) assert", "def test_abbreviations(): m1 = MicrosoftTranslator(api_key=APIkey, source=\"en\", target=\"fr\") m2 = MicrosoftTranslator(api_key=APIkey, source=\"English\", target=\"French\") assert", "= res() assert ( MicrosoftTranslator(api_key=\"an_api_key\", source=\"de\", target=\"en\").translate( \"auf wiedersehen!\" ) == \"See you", "r.json = json_func return r mock_request_post.return_value = res() assert ( MicrosoftTranslator(api_key=\"an_api_key\", source=\"de\", target=\"en\").translate(", "= requests.Response() def json_func(): return returned_json r.json = json_func return r mock_request_post.return_value =", "package.\"\"\" from unittest.mock import patch import pytest import requests from deep_translator import MicrosoftTranslator,", "assert ( MicrosoftTranslator(api_key=\"an_api_key\", source=\"de\", target=\"en\").translate( \"auf wiedersehen!\" ) == \"See you later!\" )", "None, they are skipped APIkey = None @pytest.mark.skipif(APIkey is None, reason=\"api_key is not", "None, reason=\"api_key is not provided\") def test_abbreviations(): m1 = MicrosoftTranslator(api_key=APIkey, source=\"en\", target=\"fr\") m2", "are skipped APIkey = None @pytest.mark.skipif(APIkey is None, reason=\"api_key is not provided\") def", "@pytest.mark.skipif(APIkey is None, reason=\"api_key is not provided\") def test_microsoft_successful_post_twotargets(): posted = MicrosoftTranslator(api_key=APIkey, target=[\"en\",", "with pytest.raises(exceptions.ServerException): MicrosoftTranslator(api_key=APIkey, target=\"\") with pytest.raises(exceptions.ServerException): MicrosoftTranslator(api_key=\"\", target=\"nothing\") @pytest.mark.skipif(APIkey is None, reason=\"api_key is", "def test_incorrect_target_attributes(): with pytest.raises(exceptions.ServerException): MicrosoftTranslator(api_key=APIkey, target=\"\") with pytest.raises(exceptions.ServerException): MicrosoftTranslator(api_key=\"\", target=\"nothing\") @pytest.mark.skipif(APIkey is None,", "requests to Microsoft API and use an api key # if APIkey variable", "str) @pytest.mark.skipif(APIkey is None, reason=\"api_key is not provided\") def test_microsoft_successful_post_twotargets(): posted = MicrosoftTranslator(api_key=APIkey,", "@pytest.mark.skipif(APIkey is None, reason=\"api_key is not provided\") def test_microsoft_successful_post_onetarget(): posted = MicrosoftTranslator(api_key=APIkey, target=\"en\").translate(", "provided\") def test_abbreviations(): m1 = MicrosoftTranslator(api_key=APIkey, source=\"en\", target=\"fr\") m2 = MicrosoftTranslator(api_key=APIkey, source=\"English\", target=\"French\")", "request.post @patch.object(requests, \"post\") def test_microsoft_successful_post_mock(mock_request_post): returned_json = [{\"translations\": [{\"text\": \"See you later!\", \"to\":", "<filename>tests/test_microsoft_trans.py #!/usr/bin/env python \"\"\"Tests for `deep_translator` package.\"\"\" from unittest.mock import patch import pytest", "test_MicrosoftAPIerror(): with pytest.raises(exceptions.MicrosoftAPIerror): MicrosoftTranslator(api_key=\"empty\", source=\"de\", target=\"en\").translate(\"text\") # the remaining tests are actual requests", "provided\") def test_microsoft_successful_post_onetarget(): posted = MicrosoftTranslator(api_key=APIkey, target=\"en\").translate( \"auf wiedersehen!\" ) assert isinstance(posted, str)", "remaining tests are actual requests to Microsoft API and use an api key", ") assert isinstance(posted, str) @pytest.mark.skipif(APIkey is None, reason=\"api_key is not provided\") def test_incorrect_target_attributes():", "res() assert ( MicrosoftTranslator(api_key=\"an_api_key\", source=\"de\", target=\"en\").translate( \"auf wiedersehen!\" ) == \"See you later!\"", "def test_microsoft_successful_post_twotargets(): posted = MicrosoftTranslator(api_key=APIkey, target=[\"en\", \"ru\"]).translate( \"auf wiedersehen!\" ) assert isinstance(posted, str)", "MicrosoftTranslator(api_key=APIkey, target=[\"en\", \"ru\"]).translate( \"auf wiedersehen!\" ) assert isinstance(posted, str) @pytest.mark.skipif(APIkey is None, reason=\"api_key", "not provided\") def test_abbreviations(): m1 = MicrosoftTranslator(api_key=APIkey, source=\"en\", target=\"fr\") m2 = MicrosoftTranslator(api_key=APIkey, source=\"English\",", "target=\"en\").translate(\"text\") # the remaining tests are actual requests to Microsoft API and use", "pytest.raises(exceptions.ServerException): MicrosoftTranslator(api_key=\"\", target=\"nothing\") @pytest.mark.skipif(APIkey is None, reason=\"api_key is not provided\") def test_abbreviations(): m1", "python \"\"\"Tests for `deep_translator` package.\"\"\" from unittest.mock import patch import pytest import requests", "patch import pytest import requests from deep_translator import MicrosoftTranslator, exceptions # mocked request.post", "\"See you later!\", \"to\": \"en\"}]}] def res(): r = requests.Response() def json_func(): return", "reason=\"api_key is not provided\") def test_microsoft_successful_post_twotargets(): posted = MicrosoftTranslator(api_key=APIkey, target=[\"en\", \"ru\"]).translate( \"auf wiedersehen!\"", "from unittest.mock import patch import pytest import requests from deep_translator import MicrosoftTranslator, exceptions", "def test_microsoft_successful_post_mock(mock_request_post): returned_json = [{\"translations\": [{\"text\": \"See you later!\", \"to\": \"en\"}]}] def res():", "\"auf wiedersehen!\" ) assert isinstance(posted, str) @pytest.mark.skipif(APIkey is None, reason=\"api_key is not provided\")", "pytest.raises(exceptions.ServerException): MicrosoftTranslator(api_key=APIkey, target=\"\") with pytest.raises(exceptions.ServerException): MicrosoftTranslator(api_key=\"\", target=\"nothing\") @pytest.mark.skipif(APIkey is None, reason=\"api_key is not", "`deep_translator` package.\"\"\" from unittest.mock import patch import pytest import requests from deep_translator import", "wiedersehen!\" ) assert isinstance(posted, str) @pytest.mark.skipif(APIkey is None, reason=\"api_key is not provided\") def", "key # if APIkey variable is None, they are skipped APIkey = None", "@pytest.mark.skipif(APIkey is None, reason=\"api_key is not provided\") def test_abbreviations(): m1 = MicrosoftTranslator(api_key=APIkey, source=\"en\",", "target=\"nothing\") @pytest.mark.skipif(APIkey is None, reason=\"api_key is not provided\") def test_abbreviations(): m1 = MicrosoftTranslator(api_key=APIkey,", "target=\"fr\") m2 = MicrosoftTranslator(api_key=APIkey, source=\"English\", target=\"French\") assert \"\".join(m1._source) == \"\".join(m2._source) assert \"\".join(m1._target) ==", "APIkey variable is None, they are skipped APIkey = None @pytest.mark.skipif(APIkey is None,", "assert isinstance(posted, str) @pytest.mark.skipif(APIkey is None, reason=\"api_key is not provided\") def test_microsoft_successful_post_twotargets(): posted", "\"post\") def test_microsoft_successful_post_mock(mock_request_post): returned_json = [{\"translations\": [{\"text\": \"See you later!\", \"to\": \"en\"}]}] def", "requests from deep_translator import MicrosoftTranslator, exceptions # mocked request.post @patch.object(requests, \"post\") def test_microsoft_successful_post_mock(mock_request_post):", "is None, reason=\"api_key is not provided\") def test_incorrect_target_attributes(): with pytest.raises(exceptions.ServerException): MicrosoftTranslator(api_key=APIkey, target=\"\") with", "MicrosoftTranslator(api_key=\"an_api_key\", source=\"de\", target=\"en\").translate( \"auf wiedersehen!\" ) == \"See you later!\" ) def test_MicrosoftAPIerror():", "is None, they are skipped APIkey = None @pytest.mark.skipif(APIkey is None, reason=\"api_key is", "Microsoft API and use an api key # if APIkey variable is None,", "None, reason=\"api_key is not provided\") def test_incorrect_target_attributes(): with pytest.raises(exceptions.ServerException): MicrosoftTranslator(api_key=APIkey, target=\"\") with pytest.raises(exceptions.ServerException):", "test_incorrect_target_attributes(): with pytest.raises(exceptions.ServerException): MicrosoftTranslator(api_key=APIkey, target=\"\") with pytest.raises(exceptions.ServerException): MicrosoftTranslator(api_key=\"\", target=\"nothing\") @pytest.mark.skipif(APIkey is None, reason=\"api_key", "MicrosoftTranslator(api_key=APIkey, target=\"en\").translate( \"auf wiedersehen!\" ) assert isinstance(posted, str) @pytest.mark.skipif(APIkey is None, reason=\"api_key is", "provided\") def test_incorrect_target_attributes(): with pytest.raises(exceptions.ServerException): MicrosoftTranslator(api_key=APIkey, target=\"\") with pytest.raises(exceptions.ServerException): MicrosoftTranslator(api_key=\"\", target=\"nothing\") @pytest.mark.skipif(APIkey is", "requests.Response() def json_func(): return returned_json r.json = json_func return r mock_request_post.return_value = res()", "isinstance(posted, str) @pytest.mark.skipif(APIkey is None, reason=\"api_key is not provided\") def test_microsoft_successful_post_twotargets(): posted =", "test_microsoft_successful_post_onetarget(): posted = MicrosoftTranslator(api_key=APIkey, target=\"en\").translate( \"auf wiedersehen!\" ) assert isinstance(posted, str) @pytest.mark.skipif(APIkey is", "is not provided\") def test_incorrect_target_attributes(): with pytest.raises(exceptions.ServerException): MicrosoftTranslator(api_key=APIkey, target=\"\") with pytest.raises(exceptions.ServerException): MicrosoftTranslator(api_key=\"\", target=\"nothing\")", "are actual requests to Microsoft API and use an api key # if", "import MicrosoftTranslator, exceptions # mocked request.post @patch.object(requests, \"post\") def test_microsoft_successful_post_mock(mock_request_post): returned_json = [{\"translations\":", "r = requests.Response() def json_func(): return returned_json r.json = json_func return r mock_request_post.return_value", "# the remaining tests are actual requests to Microsoft API and use an", "import patch import pytest import requests from deep_translator import MicrosoftTranslator, exceptions # mocked", "you later!\" ) def test_MicrosoftAPIerror(): with pytest.raises(exceptions.MicrosoftAPIerror): MicrosoftTranslator(api_key=\"empty\", source=\"de\", target=\"en\").translate(\"text\") # the remaining", "MicrosoftTranslator, exceptions # mocked request.post @patch.object(requests, \"post\") def test_microsoft_successful_post_mock(mock_request_post): returned_json = [{\"translations\": [{\"text\":", "wiedersehen!\" ) == \"See you later!\" ) def test_MicrosoftAPIerror(): with pytest.raises(exceptions.MicrosoftAPIerror): MicrosoftTranslator(api_key=\"empty\", source=\"de\",", "\"to\": \"en\"}]}] def res(): r = requests.Response() def json_func(): return returned_json r.json =", "== \"See you later!\" ) def test_MicrosoftAPIerror(): with pytest.raises(exceptions.MicrosoftAPIerror): MicrosoftTranslator(api_key=\"empty\", source=\"de\", target=\"en\").translate(\"text\") #", "is None, reason=\"api_key is not provided\") def test_abbreviations(): m1 = MicrosoftTranslator(api_key=APIkey, source=\"en\", target=\"fr\")", "you later!\", \"to\": \"en\"}]}] def res(): r = requests.Response() def json_func(): return returned_json", "json_func return r mock_request_post.return_value = res() assert ( MicrosoftTranslator(api_key=\"an_api_key\", source=\"de\", target=\"en\").translate( \"auf wiedersehen!\"", "json_func(): return returned_json r.json = json_func return r mock_request_post.return_value = res() assert (", "pytest import requests from deep_translator import MicrosoftTranslator, exceptions # mocked request.post @patch.object(requests, \"post\")", "later!\" ) def test_MicrosoftAPIerror(): with pytest.raises(exceptions.MicrosoftAPIerror): MicrosoftTranslator(api_key=\"empty\", source=\"de\", target=\"en\").translate(\"text\") # the remaining tests" ]
[ "build_dir_raw: build_dir = Path(build_dir_raw) build_products_dir = Path(__file__).parent / \"newlib-build-products\" if False: automake_src_dir, automake_build_dir", "\"https://ftp.gnu.org/gnu/automake/automake-1.11.tar.gz\" ) automake_configure_path = automake_src_dir / \"configure\" run_and_check( [automake_configure_path.as_posix(), f\"--prefix={build_products_dir}\"], cwd=automake_build_dir ) run_and_check([\"make\"],", "= build_dir / f\"build-{tool_name}\" tool_build_dir.mkdir(exist_ok=True) return tool_src_dir, tool_build_dir def build() -> None: axle_dir", "axle_dir / \"i686-toolchain\" binaries_dir = toolchain_dir / \"bin\" with tempfile.TemporaryDirectory() as build_dir_raw: build_dir", "Tuple from build_utils import download_and_unpack_archive, run_and_check def clone_tool_and_prepare_build_dir(build_dir: Path, url: str) -> Tuple[Path,", "= Path(__file__).parent / \"newlib-build-products\" if False: automake_src_dir, automake_build_dir = clone_tool_and_prepare_build_dir( build_dir, \"https://ftp.gnu.org/gnu/automake/automake-1.11.tar.gz\" )", "f\"--prefix={build_products_dir}\"], cwd=automake_build_dir ) run_and_check([\"make\"], cwd=automake_build_dir) run_and_check([\"make\", \"install\"], cwd=automake_build_dir) autoconf_src_dir, autoconf_build_dir = clone_tool_and_prepare_build_dir( build_dir,", "\"all\"], cwd=newlib_build_dir, env_additions=env) run_and_check([\"make\", f\"DESTDIR={sysroot_dir.as_posix()}\", \"install\"], cwd=newlib_build_dir, env_additions=env) # If you make some", "directory # ../newlib-2.5.0.20171222/configure --prefix=/usr --target=i686-axle # Fail when newlib doesn't compile # set", "\"configure\" run_and_check( [automake_configure_path.as_posix(), f\"--prefix={build_products_dir}\"], cwd=automake_build_dir ) run_and_check([\"make\"], cwd=automake_build_dir) run_and_check([\"make\", \"install\"], cwd=automake_build_dir) autoconf_src_dir, autoconf_build_dir", "Tuple[Path, Path]: tool_src_dir = download_and_unpack_archive(build_dir, url) tool_name = url.split(\"/\")[-1].removesuffix(\".tar.gz\") tool_build_dir = build_dir /", "kind of config change to the axle target, such as adding new files", "cwd=automake_build_dir ) run_and_check([\"make\"], cwd=automake_build_dir) run_and_check([\"make\", \"install\"], cwd=automake_build_dir) autoconf_src_dir, autoconf_build_dir = clone_tool_and_prepare_build_dir( build_dir, \"https://ftp.gnu.org/gnu/autoconf/autoconf-2.65.tar.gz\"", "\"newlib-build-products\" if False: automake_src_dir, automake_build_dir = clone_tool_and_prepare_build_dir( build_dir, \"https://ftp.gnu.org/gnu/automake/automake-1.11.tar.gz\" ) automake_configure_path = automake_src_dir", "-> Tuple[Path, Path]: tool_src_dir = download_and_unpack_archive(build_dir, url) tool_name = url.split(\"/\")[-1].removesuffix(\".tar.gz\") tool_build_dir = build_dir", "/ \"ports\" / \"newlib\" / \"newlib-2.5.0.20171222\" newlib_build_dir = build_dir / \"build-newlib\" newlib_build_dir.mkdir() os.symlink((binaries_dir", "doesn't compile # set -e # make all if __name__ == \"__main__\": build()", "\"bin\" with tempfile.TemporaryDirectory() as build_dir_raw: build_dir = Path(build_dir_raw) build_products_dir = Path(__file__).parent / \"newlib-build-products\"", "automake_src_dir / \"configure\" run_and_check( [automake_configure_path.as_posix(), f\"--prefix={build_products_dir}\"], cwd=automake_build_dir ) run_and_check([\"make\"], cwd=automake_build_dir) run_and_check([\"make\", \"install\"], cwd=automake_build_dir)", "\"newlib\" / \"newlib-2.5.0.20171222\" newlib_build_dir = build_dir / \"build-newlib\" newlib_build_dir.mkdir() os.symlink((binaries_dir / \"i686-elf-ar\").as_posix(), (newlib_build_dir", "script: # /bin/sh: /Users/philliptennen/Documents/develop/axle/ports/newlib/newlib-2.5.0.20171222/etc/configure: No such file or directory # ../newlib-2.5.0.20171222/configure --prefix=/usr --target=i686-axle", "# You may see an error like the following while running this script:", "= newlib_src_dir / \"configure\" run_and_check( [newlib_configure_path.as_posix(), \"--prefix=/usr\", \"--target=i686-axle\"], cwd=newlib_build_dir, env_additions=env, ) run_and_check([\"make\", \"all\"],", "/ \"i686-elf-ar\").as_posix(), (newlib_build_dir / \"i686-axle-ar\").as_posix()) os.symlink((binaries_dir / \"i686-elf-as\").as_posix(), (newlib_build_dir / \"i686-axle-as\").as_posix()) os.symlink((binaries_dir /", "/ \"newlib-build-products\" if False: automake_src_dir, automake_build_dir = clone_tool_and_prepare_build_dir( build_dir, \"https://ftp.gnu.org/gnu/automake/automake-1.11.tar.gz\" ) automake_configure_path =", "def clone_tool_and_prepare_build_dir(build_dir: Path, url: str) -> Tuple[Path, Path]: tool_src_dir = download_and_unpack_archive(build_dir, url) tool_name", "clone_tool_and_prepare_build_dir( build_dir, \"https://ftp.gnu.org/gnu/autoconf/autoconf-2.65.tar.gz\" ) autoconf_configure_path = autoconf_src_dir / \"configure\" run_and_check( [autoconf_configure_path.as_posix(), f\"--prefix={build_products_dir}\"], cwd=autoconf_build_dir", "Path(__file__).parent / \"newlib-build-products\" if False: automake_src_dir, automake_build_dir = clone_tool_and_prepare_build_dir( build_dir, \"https://ftp.gnu.org/gnu/automake/automake-1.11.tar.gz\" ) automake_configure_path", "arch_target = \"i686-elf\" toolchain_dir = axle_dir / \"i686-toolchain\" binaries_dir = toolchain_dir / \"bin\"", "autoconf_configure_path = autoconf_src_dir / \"configure\" run_and_check( [autoconf_configure_path.as_posix(), f\"--prefix={build_products_dir}\"], cwd=autoconf_build_dir ) run_and_check([\"make\"], cwd=autoconf_build_dir) run_and_check([\"make\",", "No such file or directory # ../newlib-2.5.0.20171222/configure --prefix=/usr --target=i686-axle # Fail when newlib", "= url.split(\"/\")[-1].removesuffix(\".tar.gz\") tool_build_dir = build_dir / f\"build-{tool_name}\" tool_build_dir.mkdir(exist_ok=True) return tool_src_dir, tool_build_dir def build()", "axle_dir / \"axle-sysroot\" arch_target = \"i686-elf\" toolchain_dir = axle_dir / \"i686-toolchain\" binaries_dir =", "[autoconf_configure_path.as_posix(), f\"--prefix={build_products_dir}\"], cwd=autoconf_build_dir ) run_and_check([\"make\"], cwd=autoconf_build_dir) run_and_check([\"make\", \"install\"], cwd=autoconf_build_dir) newlib_src_dir = axle_dir /", "toolchain_dir = axle_dir / \"i686-toolchain\" binaries_dir = toolchain_dir / \"bin\" with tempfile.TemporaryDirectory() as", "import tempfile from pathlib import Path from typing import Tuple from build_utils import", ") run_and_check([\"make\"], cwd=autoconf_build_dir) run_and_check([\"make\", \"install\"], cwd=autoconf_build_dir) newlib_src_dir = axle_dir / \"ports\" / \"newlib\"", "= clone_tool_and_prepare_build_dir( build_dir, \"https://ftp.gnu.org/gnu/autoconf/autoconf-2.65.tar.gz\" ) autoconf_configure_path = autoconf_src_dir / \"configure\" run_and_check( [autoconf_configure_path.as_posix(), f\"--prefix={build_products_dir}\"],", ") autoconf_configure_path = autoconf_src_dir / \"configure\" run_and_check( [autoconf_configure_path.as_posix(), f\"--prefix={build_products_dir}\"], cwd=autoconf_build_dir ) run_and_check([\"make\"], cwd=autoconf_build_dir)", "have to run this command # You may see an error like the", "\"i686-axle-gcc\").as_posix()) os.symlink((binaries_dir / \"i686-elf-cc\").as_posix(), (newlib_build_dir / \"i686-axle-cc\").as_posix()) os.symlink((binaries_dir / \"i686-elf-ranlib\").as_posix(), (newlib_build_dir / \"i686-axle-ranlib\").as_posix())", "run this command # You may see an error like the following while", "autoconf_src_dir, autoconf_build_dir = clone_tool_and_prepare_build_dir( build_dir, \"https://ftp.gnu.org/gnu/autoconf/autoconf-2.65.tar.gz\" ) autoconf_configure_path = autoconf_src_dir / \"configure\" run_and_check(", "/ \"i686-axle-ranlib\").as_posix()) env = {\"PATH\": f'{newlib_build_dir}:{os.environ[\"PATH\"]}'} newlib_configure_path = newlib_src_dir / \"configure\" run_and_check( [newlib_configure_path.as_posix(),", "import download_and_unpack_archive, run_and_check def clone_tool_and_prepare_build_dir(build_dir: Path, url: str) -> Tuple[Path, Path]: tool_src_dir =", "\"i686-elf\" toolchain_dir = axle_dir / \"i686-toolchain\" binaries_dir = toolchain_dir / \"bin\" with tempfile.TemporaryDirectory()", "run_and_check([\"make\", \"all\"], cwd=newlib_build_dir, env_additions=env) run_and_check([\"make\", f\"DESTDIR={sysroot_dir.as_posix()}\", \"install\"], cwd=newlib_build_dir, env_additions=env) # If you make", "import os import tempfile from pathlib import Path from typing import Tuple from", "url.split(\"/\")[-1].removesuffix(\".tar.gz\") tool_build_dir = build_dir / f\"build-{tool_name}\" tool_build_dir.mkdir(exist_ok=True) return tool_src_dir, tool_build_dir def build() ->", "Fail when newlib doesn't compile # set -e # make all if __name__", "--prefix=/usr --target=i686-axle # Fail when newlib doesn't compile # set -e # make", "/ \"i686-elf-cc\").as_posix(), (newlib_build_dir / \"i686-axle-cc\").as_posix()) os.symlink((binaries_dir / \"i686-elf-ranlib\").as_posix(), (newlib_build_dir / \"i686-axle-ranlib\").as_posix()) env =", "#!/usr/bin/python3 import os import tempfile from pathlib import Path from typing import Tuple", "= clone_tool_and_prepare_build_dir( build_dir, \"https://ftp.gnu.org/gnu/automake/automake-1.11.tar.gz\" ) automake_configure_path = automake_src_dir / \"configure\" run_and_check( [automake_configure_path.as_posix(), f\"--prefix={build_products_dir}\"],", "if False: automake_src_dir, automake_build_dir = clone_tool_and_prepare_build_dir( build_dir, \"https://ftp.gnu.org/gnu/automake/automake-1.11.tar.gz\" ) automake_configure_path = automake_src_dir /", "with tempfile.TemporaryDirectory() as build_dir_raw: build_dir = Path(build_dir_raw) build_products_dir = Path(__file__).parent / \"newlib-build-products\" if", "tool_src_dir, tool_build_dir def build() -> None: axle_dir = Path(__file__).parent sysroot_dir = axle_dir /", "tool_src_dir = download_and_unpack_archive(build_dir, url) tool_name = url.split(\"/\")[-1].removesuffix(\".tar.gz\") tool_build_dir = build_dir / f\"build-{tool_name}\" tool_build_dir.mkdir(exist_ok=True)", "/ \"i686-axle-as\").as_posix()) os.symlink((binaries_dir / \"i686-elf-gcc\").as_posix(), (newlib_build_dir / \"i686-axle-gcc\").as_posix()) os.symlink((binaries_dir / \"i686-elf-cc\").as_posix(), (newlib_build_dir /", "such as adding new files within the newlib port, # you may have", "If you make some kind of config change to the axle target, such", "\"i686-axle-cc\").as_posix()) os.symlink((binaries_dir / \"i686-elf-ranlib\").as_posix(), (newlib_build_dir / \"i686-axle-ranlib\").as_posix()) env = {\"PATH\": f'{newlib_build_dir}:{os.environ[\"PATH\"]}'} newlib_configure_path =", "(newlib_build_dir / \"i686-axle-ar\").as_posix()) os.symlink((binaries_dir / \"i686-elf-as\").as_posix(), (newlib_build_dir / \"i686-axle-as\").as_posix()) os.symlink((binaries_dir / \"i686-elf-gcc\").as_posix(), (newlib_build_dir", "\"configure\" run_and_check( [newlib_configure_path.as_posix(), \"--prefix=/usr\", \"--target=i686-axle\"], cwd=newlib_build_dir, env_additions=env, ) run_and_check([\"make\", \"all\"], cwd=newlib_build_dir, env_additions=env) run_and_check([\"make\",", "automake_build_dir = clone_tool_and_prepare_build_dir( build_dir, \"https://ftp.gnu.org/gnu/automake/automake-1.11.tar.gz\" ) automake_configure_path = automake_src_dir / \"configure\" run_and_check( [automake_configure_path.as_posix(),", "change to the axle target, such as adding new files within the newlib", "\"--target=i686-axle\"], cwd=newlib_build_dir, env_additions=env, ) run_and_check([\"make\", \"all\"], cwd=newlib_build_dir, env_additions=env) run_and_check([\"make\", f\"DESTDIR={sysroot_dir.as_posix()}\", \"install\"], cwd=newlib_build_dir, env_additions=env)", "tool_build_dir.mkdir(exist_ok=True) return tool_src_dir, tool_build_dir def build() -> None: axle_dir = Path(__file__).parent sysroot_dir =", "target, such as adding new files within the newlib port, # you may", "binaries_dir = toolchain_dir / \"bin\" with tempfile.TemporaryDirectory() as build_dir_raw: build_dir = Path(build_dir_raw) build_products_dir", "\"newlib-2.5.0.20171222\" newlib_build_dir = build_dir / \"build-newlib\" newlib_build_dir.mkdir() os.symlink((binaries_dir / \"i686-elf-ar\").as_posix(), (newlib_build_dir / \"i686-axle-ar\").as_posix())", "running this script: # /bin/sh: /Users/philliptennen/Documents/develop/axle/ports/newlib/newlib-2.5.0.20171222/etc/configure: No such file or directory # ../newlib-2.5.0.20171222/configure", "--target=i686-axle # Fail when newlib doesn't compile # set -e # make all", "\"i686-elf-gcc\").as_posix(), (newlib_build_dir / \"i686-axle-gcc\").as_posix()) os.symlink((binaries_dir / \"i686-elf-cc\").as_posix(), (newlib_build_dir / \"i686-axle-cc\").as_posix()) os.symlink((binaries_dir / \"i686-elf-ranlib\").as_posix(),", "{\"PATH\": f'{newlib_build_dir}:{os.environ[\"PATH\"]}'} newlib_configure_path = newlib_src_dir / \"configure\" run_and_check( [newlib_configure_path.as_posix(), \"--prefix=/usr\", \"--target=i686-axle\"], cwd=newlib_build_dir, env_additions=env,", "while running this script: # /bin/sh: /Users/philliptennen/Documents/develop/axle/ports/newlib/newlib-2.5.0.20171222/etc/configure: No such file or directory #", "or directory # ../newlib-2.5.0.20171222/configure --prefix=/usr --target=i686-axle # Fail when newlib doesn't compile #", "\"install\"], cwd=autoconf_build_dir) newlib_src_dir = axle_dir / \"ports\" / \"newlib\" / \"newlib-2.5.0.20171222\" newlib_build_dir =", "= Path(__file__).parent sysroot_dir = axle_dir / \"axle-sysroot\" arch_target = \"i686-elf\" toolchain_dir = axle_dir", "url: str) -> Tuple[Path, Path]: tool_src_dir = download_and_unpack_archive(build_dir, url) tool_name = url.split(\"/\")[-1].removesuffix(\".tar.gz\") tool_build_dir", "\"--prefix=/usr\", \"--target=i686-axle\"], cwd=newlib_build_dir, env_additions=env, ) run_and_check([\"make\", \"all\"], cwd=newlib_build_dir, env_additions=env) run_and_check([\"make\", f\"DESTDIR={sysroot_dir.as_posix()}\", \"install\"], cwd=newlib_build_dir,", "/ \"i686-elf-ranlib\").as_posix(), (newlib_build_dir / \"i686-axle-ranlib\").as_posix()) env = {\"PATH\": f'{newlib_build_dir}:{os.environ[\"PATH\"]}'} newlib_configure_path = newlib_src_dir /", "/ \"newlib\" / \"newlib-2.5.0.20171222\" newlib_build_dir = build_dir / \"build-newlib\" newlib_build_dir.mkdir() os.symlink((binaries_dir / \"i686-elf-ar\").as_posix(),", "newlib_src_dir = axle_dir / \"ports\" / \"newlib\" / \"newlib-2.5.0.20171222\" newlib_build_dir = build_dir /", "You may see an error like the following while running this script: #", "f\"--prefix={build_products_dir}\"], cwd=autoconf_build_dir ) run_and_check([\"make\"], cwd=autoconf_build_dir) run_and_check([\"make\", \"install\"], cwd=autoconf_build_dir) newlib_src_dir = axle_dir / \"ports\"", "\"https://ftp.gnu.org/gnu/autoconf/autoconf-2.65.tar.gz\" ) autoconf_configure_path = autoconf_src_dir / \"configure\" run_and_check( [autoconf_configure_path.as_posix(), f\"--prefix={build_products_dir}\"], cwd=autoconf_build_dir ) run_and_check([\"make\"],", "f\"build-{tool_name}\" tool_build_dir.mkdir(exist_ok=True) return tool_src_dir, tool_build_dir def build() -> None: axle_dir = Path(__file__).parent sysroot_dir", "Path, url: str) -> Tuple[Path, Path]: tool_src_dir = download_and_unpack_archive(build_dir, url) tool_name = url.split(\"/\")[-1].removesuffix(\".tar.gz\")", "run_and_check([\"make\", f\"DESTDIR={sysroot_dir.as_posix()}\", \"install\"], cwd=newlib_build_dir, env_additions=env) # If you make some kind of config", "from typing import Tuple from build_utils import download_and_unpack_archive, run_and_check def clone_tool_and_prepare_build_dir(build_dir: Path, url:", "autoconf_build_dir = clone_tool_and_prepare_build_dir( build_dir, \"https://ftp.gnu.org/gnu/autoconf/autoconf-2.65.tar.gz\" ) autoconf_configure_path = autoconf_src_dir / \"configure\" run_and_check( [autoconf_configure_path.as_posix(),", "axle_dir = Path(__file__).parent sysroot_dir = axle_dir / \"axle-sysroot\" arch_target = \"i686-elf\" toolchain_dir =", ") run_and_check([\"make\", \"all\"], cwd=newlib_build_dir, env_additions=env) run_and_check([\"make\", f\"DESTDIR={sysroot_dir.as_posix()}\", \"install\"], cwd=newlib_build_dir, env_additions=env) # If you", "<reponame>codyd51/axle<filename>build_newlib.py #!/usr/bin/python3 import os import tempfile from pathlib import Path from typing import", "build_dir, \"https://ftp.gnu.org/gnu/automake/automake-1.11.tar.gz\" ) automake_configure_path = automake_src_dir / \"configure\" run_and_check( [automake_configure_path.as_posix(), f\"--prefix={build_products_dir}\"], cwd=automake_build_dir )", "\"install\"], cwd=automake_build_dir) autoconf_src_dir, autoconf_build_dir = clone_tool_and_prepare_build_dir( build_dir, \"https://ftp.gnu.org/gnu/autoconf/autoconf-2.65.tar.gz\" ) autoconf_configure_path = autoconf_src_dir /", "= build_dir / \"build-newlib\" newlib_build_dir.mkdir() os.symlink((binaries_dir / \"i686-elf-ar\").as_posix(), (newlib_build_dir / \"i686-axle-ar\").as_posix()) os.symlink((binaries_dir /", "you may have to run this command # You may see an error", "None: axle_dir = Path(__file__).parent sysroot_dir = axle_dir / \"axle-sysroot\" arch_target = \"i686-elf\" toolchain_dir", "os.symlink((binaries_dir / \"i686-elf-ar\").as_posix(), (newlib_build_dir / \"i686-axle-ar\").as_posix()) os.symlink((binaries_dir / \"i686-elf-as\").as_posix(), (newlib_build_dir / \"i686-axle-as\").as_posix()) os.symlink((binaries_dir", "[newlib_configure_path.as_posix(), \"--prefix=/usr\", \"--target=i686-axle\"], cwd=newlib_build_dir, env_additions=env, ) run_and_check([\"make\", \"all\"], cwd=newlib_build_dir, env_additions=env) run_and_check([\"make\", f\"DESTDIR={sysroot_dir.as_posix()}\", \"install\"],", "= Path(build_dir_raw) build_products_dir = Path(__file__).parent / \"newlib-build-products\" if False: automake_src_dir, automake_build_dir = clone_tool_and_prepare_build_dir(", "tool_build_dir = build_dir / f\"build-{tool_name}\" tool_build_dir.mkdir(exist_ok=True) return tool_src_dir, tool_build_dir def build() -> None:", "the axle target, such as adding new files within the newlib port, #", "(newlib_build_dir / \"i686-axle-cc\").as_posix()) os.symlink((binaries_dir / \"i686-elf-ranlib\").as_posix(), (newlib_build_dir / \"i686-axle-ranlib\").as_posix()) env = {\"PATH\": f'{newlib_build_dir}:{os.environ[\"PATH\"]}'}", "= axle_dir / \"i686-toolchain\" binaries_dir = toolchain_dir / \"bin\" with tempfile.TemporaryDirectory() as build_dir_raw:", "the newlib port, # you may have to run this command # You", "env_additions=env, ) run_and_check([\"make\", \"all\"], cwd=newlib_build_dir, env_additions=env) run_and_check([\"make\", f\"DESTDIR={sysroot_dir.as_posix()}\", \"install\"], cwd=newlib_build_dir, env_additions=env) # If", "(newlib_build_dir / \"i686-axle-as\").as_posix()) os.symlink((binaries_dir / \"i686-elf-gcc\").as_posix(), (newlib_build_dir / \"i686-axle-gcc\").as_posix()) os.symlink((binaries_dir / \"i686-elf-cc\").as_posix(), (newlib_build_dir", "as build_dir_raw: build_dir = Path(build_dir_raw) build_products_dir = Path(__file__).parent / \"newlib-build-products\" if False: automake_src_dir,", "the following while running this script: # /bin/sh: /Users/philliptennen/Documents/develop/axle/ports/newlib/newlib-2.5.0.20171222/etc/configure: No such file or", "/ \"i686-axle-gcc\").as_posix()) os.symlink((binaries_dir / \"i686-elf-cc\").as_posix(), (newlib_build_dir / \"i686-axle-cc\").as_posix()) os.symlink((binaries_dir / \"i686-elf-ranlib\").as_posix(), (newlib_build_dir /", "automake_configure_path = automake_src_dir / \"configure\" run_and_check( [automake_configure_path.as_posix(), f\"--prefix={build_products_dir}\"], cwd=automake_build_dir ) run_and_check([\"make\"], cwd=automake_build_dir) run_and_check([\"make\",", "/ \"newlib-2.5.0.20171222\" newlib_build_dir = build_dir / \"build-newlib\" newlib_build_dir.mkdir() os.symlink((binaries_dir / \"i686-elf-ar\").as_posix(), (newlib_build_dir /", "/ \"i686-axle-ar\").as_posix()) os.symlink((binaries_dir / \"i686-elf-as\").as_posix(), (newlib_build_dir / \"i686-axle-as\").as_posix()) os.symlink((binaries_dir / \"i686-elf-gcc\").as_posix(), (newlib_build_dir /", "/ \"configure\" run_and_check( [newlib_configure_path.as_posix(), \"--prefix=/usr\", \"--target=i686-axle\"], cwd=newlib_build_dir, env_additions=env, ) run_and_check([\"make\", \"all\"], cwd=newlib_build_dir, env_additions=env)", "os.symlink((binaries_dir / \"i686-elf-as\").as_posix(), (newlib_build_dir / \"i686-axle-as\").as_posix()) os.symlink((binaries_dir / \"i686-elf-gcc\").as_posix(), (newlib_build_dir / \"i686-axle-gcc\").as_posix()) os.symlink((binaries_dir", "env_additions=env) run_and_check([\"make\", f\"DESTDIR={sysroot_dir.as_posix()}\", \"install\"], cwd=newlib_build_dir, env_additions=env) # If you make some kind of", "within the newlib port, # you may have to run this command #", "as adding new files within the newlib port, # you may have to", "= download_and_unpack_archive(build_dir, url) tool_name = url.split(\"/\")[-1].removesuffix(\".tar.gz\") tool_build_dir = build_dir / f\"build-{tool_name}\" tool_build_dir.mkdir(exist_ok=True) return", "str) -> Tuple[Path, Path]: tool_src_dir = download_and_unpack_archive(build_dir, url) tool_name = url.split(\"/\")[-1].removesuffix(\".tar.gz\") tool_build_dir =", "build() -> None: axle_dir = Path(__file__).parent sysroot_dir = axle_dir / \"axle-sysroot\" arch_target =", "may have to run this command # You may see an error like", "build_products_dir = Path(__file__).parent / \"newlib-build-products\" if False: automake_src_dir, automake_build_dir = clone_tool_and_prepare_build_dir( build_dir, \"https://ftp.gnu.org/gnu/automake/automake-1.11.tar.gz\"", "newlib_configure_path = newlib_src_dir / \"configure\" run_and_check( [newlib_configure_path.as_posix(), \"--prefix=/usr\", \"--target=i686-axle\"], cwd=newlib_build_dir, env_additions=env, ) run_and_check([\"make\",", "tool_build_dir def build() -> None: axle_dir = Path(__file__).parent sysroot_dir = axle_dir / \"axle-sysroot\"", "\"i686-elf-cc\").as_posix(), (newlib_build_dir / \"i686-axle-cc\").as_posix()) os.symlink((binaries_dir / \"i686-elf-ranlib\").as_posix(), (newlib_build_dir / \"i686-axle-ranlib\").as_posix()) env = {\"PATH\":", "\"ports\" / \"newlib\" / \"newlib-2.5.0.20171222\" newlib_build_dir = build_dir / \"build-newlib\" newlib_build_dir.mkdir() os.symlink((binaries_dir /", "new files within the newlib port, # you may have to run this", "= autoconf_src_dir / \"configure\" run_and_check( [autoconf_configure_path.as_posix(), f\"--prefix={build_products_dir}\"], cwd=autoconf_build_dir ) run_and_check([\"make\"], cwd=autoconf_build_dir) run_and_check([\"make\", \"install\"],", "sysroot_dir = axle_dir / \"axle-sysroot\" arch_target = \"i686-elf\" toolchain_dir = axle_dir / \"i686-toolchain\"", "(newlib_build_dir / \"i686-axle-gcc\").as_posix()) os.symlink((binaries_dir / \"i686-elf-cc\").as_posix(), (newlib_build_dir / \"i686-axle-cc\").as_posix()) os.symlink((binaries_dir / \"i686-elf-ranlib\").as_posix(), (newlib_build_dir", "tool_name = url.split(\"/\")[-1].removesuffix(\".tar.gz\") tool_build_dir = build_dir / f\"build-{tool_name}\" tool_build_dir.mkdir(exist_ok=True) return tool_src_dir, tool_build_dir def", "\"i686-axle-ar\").as_posix()) os.symlink((binaries_dir / \"i686-elf-as\").as_posix(), (newlib_build_dir / \"i686-axle-as\").as_posix()) os.symlink((binaries_dir / \"i686-elf-gcc\").as_posix(), (newlib_build_dir / \"i686-axle-gcc\").as_posix())", "/ \"i686-elf-as\").as_posix(), (newlib_build_dir / \"i686-axle-as\").as_posix()) os.symlink((binaries_dir / \"i686-elf-gcc\").as_posix(), (newlib_build_dir / \"i686-axle-gcc\").as_posix()) os.symlink((binaries_dir /", "Path from typing import Tuple from build_utils import download_and_unpack_archive, run_and_check def clone_tool_and_prepare_build_dir(build_dir: Path,", "return tool_src_dir, tool_build_dir def build() -> None: axle_dir = Path(__file__).parent sysroot_dir = axle_dir", "/bin/sh: /Users/philliptennen/Documents/develop/axle/ports/newlib/newlib-2.5.0.20171222/etc/configure: No such file or directory # ../newlib-2.5.0.20171222/configure --prefix=/usr --target=i686-axle # Fail", "this script: # /bin/sh: /Users/philliptennen/Documents/develop/axle/ports/newlib/newlib-2.5.0.20171222/etc/configure: No such file or directory # ../newlib-2.5.0.20171222/configure --prefix=/usr", "newlib doesn't compile # set -e # make all if __name__ == \"__main__\":", "Path(build_dir_raw) build_products_dir = Path(__file__).parent / \"newlib-build-products\" if False: automake_src_dir, automake_build_dir = clone_tool_and_prepare_build_dir( build_dir,", "autoconf_src_dir / \"configure\" run_and_check( [autoconf_configure_path.as_posix(), f\"--prefix={build_products_dir}\"], cwd=autoconf_build_dir ) run_and_check([\"make\"], cwd=autoconf_build_dir) run_and_check([\"make\", \"install\"], cwd=autoconf_build_dir)", "= axle_dir / \"axle-sysroot\" arch_target = \"i686-elf\" toolchain_dir = axle_dir / \"i686-toolchain\" binaries_dir", "typing import Tuple from build_utils import download_and_unpack_archive, run_and_check def clone_tool_and_prepare_build_dir(build_dir: Path, url: str)", "run_and_check( [autoconf_configure_path.as_posix(), f\"--prefix={build_products_dir}\"], cwd=autoconf_build_dir ) run_and_check([\"make\"], cwd=autoconf_build_dir) run_and_check([\"make\", \"install\"], cwd=autoconf_build_dir) newlib_src_dir = axle_dir", "../newlib-2.5.0.20171222/configure --prefix=/usr --target=i686-axle # Fail when newlib doesn't compile # set -e #", ") automake_configure_path = automake_src_dir / \"configure\" run_and_check( [automake_configure_path.as_posix(), f\"--prefix={build_products_dir}\"], cwd=automake_build_dir ) run_and_check([\"make\"], cwd=automake_build_dir)", "# If you make some kind of config change to the axle target,", "import Path from typing import Tuple from build_utils import download_and_unpack_archive, run_and_check def clone_tool_and_prepare_build_dir(build_dir:", "\"configure\" run_and_check( [autoconf_configure_path.as_posix(), f\"--prefix={build_products_dir}\"], cwd=autoconf_build_dir ) run_and_check([\"make\"], cwd=autoconf_build_dir) run_and_check([\"make\", \"install\"], cwd=autoconf_build_dir) newlib_src_dir =", "run_and_check( [automake_configure_path.as_posix(), f\"--prefix={build_products_dir}\"], cwd=automake_build_dir ) run_and_check([\"make\"], cwd=automake_build_dir) run_and_check([\"make\", \"install\"], cwd=automake_build_dir) autoconf_src_dir, autoconf_build_dir =", "cwd=automake_build_dir) autoconf_src_dir, autoconf_build_dir = clone_tool_and_prepare_build_dir( build_dir, \"https://ftp.gnu.org/gnu/autoconf/autoconf-2.65.tar.gz\" ) autoconf_configure_path = autoconf_src_dir / \"configure\"", "newlib_build_dir = build_dir / \"build-newlib\" newlib_build_dir.mkdir() os.symlink((binaries_dir / \"i686-elf-ar\").as_posix(), (newlib_build_dir / \"i686-axle-ar\").as_posix()) os.symlink((binaries_dir", "some kind of config change to the axle target, such as adding new", "(newlib_build_dir / \"i686-axle-ranlib\").as_posix()) env = {\"PATH\": f'{newlib_build_dir}:{os.environ[\"PATH\"]}'} newlib_configure_path = newlib_src_dir / \"configure\" run_and_check(", "\"i686-toolchain\" binaries_dir = toolchain_dir / \"bin\" with tempfile.TemporaryDirectory() as build_dir_raw: build_dir = Path(build_dir_raw)", "\"i686-axle-ranlib\").as_posix()) env = {\"PATH\": f'{newlib_build_dir}:{os.environ[\"PATH\"]}'} newlib_configure_path = newlib_src_dir / \"configure\" run_and_check( [newlib_configure_path.as_posix(), \"--prefix=/usr\",", "file or directory # ../newlib-2.5.0.20171222/configure --prefix=/usr --target=i686-axle # Fail when newlib doesn't compile", "axle target, such as adding new files within the newlib port, # you", "\"i686-elf-ar\").as_posix(), (newlib_build_dir / \"i686-axle-ar\").as_posix()) os.symlink((binaries_dir / \"i686-elf-as\").as_posix(), (newlib_build_dir / \"i686-axle-as\").as_posix()) os.symlink((binaries_dir / \"i686-elf-gcc\").as_posix(),", "port, # you may have to run this command # You may see", "/ \"build-newlib\" newlib_build_dir.mkdir() os.symlink((binaries_dir / \"i686-elf-ar\").as_posix(), (newlib_build_dir / \"i686-axle-ar\").as_posix()) os.symlink((binaries_dir / \"i686-elf-as\").as_posix(), (newlib_build_dir", "this command # You may see an error like the following while running", "make some kind of config change to the axle target, such as adding", "of config change to the axle target, such as adding new files within", "# Fail when newlib doesn't compile # set -e # make all if", "cwd=newlib_build_dir, env_additions=env, ) run_and_check([\"make\", \"all\"], cwd=newlib_build_dir, env_additions=env) run_and_check([\"make\", f\"DESTDIR={sysroot_dir.as_posix()}\", \"install\"], cwd=newlib_build_dir, env_additions=env) #", "automake_src_dir, automake_build_dir = clone_tool_and_prepare_build_dir( build_dir, \"https://ftp.gnu.org/gnu/automake/automake-1.11.tar.gz\" ) automake_configure_path = automake_src_dir / \"configure\" run_and_check(", "run_and_check([\"make\", \"install\"], cwd=autoconf_build_dir) newlib_src_dir = axle_dir / \"ports\" / \"newlib\" / \"newlib-2.5.0.20171222\" newlib_build_dir", "= \"i686-elf\" toolchain_dir = axle_dir / \"i686-toolchain\" binaries_dir = toolchain_dir / \"bin\" with", "env_additions=env) # If you make some kind of config change to the axle", "to run this command # You may see an error like the following", "os import tempfile from pathlib import Path from typing import Tuple from build_utils", "# /bin/sh: /Users/philliptennen/Documents/develop/axle/ports/newlib/newlib-2.5.0.20171222/etc/configure: No such file or directory # ../newlib-2.5.0.20171222/configure --prefix=/usr --target=i686-axle #", "= toolchain_dir / \"bin\" with tempfile.TemporaryDirectory() as build_dir_raw: build_dir = Path(build_dir_raw) build_products_dir =", "/ \"configure\" run_and_check( [autoconf_configure_path.as_posix(), f\"--prefix={build_products_dir}\"], cwd=autoconf_build_dir ) run_and_check([\"make\"], cwd=autoconf_build_dir) run_and_check([\"make\", \"install\"], cwd=autoconf_build_dir) newlib_src_dir", "cwd=autoconf_build_dir ) run_and_check([\"make\"], cwd=autoconf_build_dir) run_and_check([\"make\", \"install\"], cwd=autoconf_build_dir) newlib_src_dir = axle_dir / \"ports\" /", "os.symlink((binaries_dir / \"i686-elf-ranlib\").as_posix(), (newlib_build_dir / \"i686-axle-ranlib\").as_posix()) env = {\"PATH\": f'{newlib_build_dir}:{os.environ[\"PATH\"]}'} newlib_configure_path = newlib_src_dir", "an error like the following while running this script: # /bin/sh: /Users/philliptennen/Documents/develop/axle/ports/newlib/newlib-2.5.0.20171222/etc/configure: No", "download_and_unpack_archive(build_dir, url) tool_name = url.split(\"/\")[-1].removesuffix(\".tar.gz\") tool_build_dir = build_dir / f\"build-{tool_name}\" tool_build_dir.mkdir(exist_ok=True) return tool_src_dir,", "clone_tool_and_prepare_build_dir( build_dir, \"https://ftp.gnu.org/gnu/automake/automake-1.11.tar.gz\" ) automake_configure_path = automake_src_dir / \"configure\" run_and_check( [automake_configure_path.as_posix(), f\"--prefix={build_products_dir}\"], cwd=automake_build_dir", "download_and_unpack_archive, run_and_check def clone_tool_and_prepare_build_dir(build_dir: Path, url: str) -> Tuple[Path, Path]: tool_src_dir = download_and_unpack_archive(build_dir,", "pathlib import Path from typing import Tuple from build_utils import download_and_unpack_archive, run_and_check def", "run_and_check( [newlib_configure_path.as_posix(), \"--prefix=/usr\", \"--target=i686-axle\"], cwd=newlib_build_dir, env_additions=env, ) run_and_check([\"make\", \"all\"], cwd=newlib_build_dir, env_additions=env) run_and_check([\"make\", f\"DESTDIR={sysroot_dir.as_posix()}\",", "\"axle-sysroot\" arch_target = \"i686-elf\" toolchain_dir = axle_dir / \"i686-toolchain\" binaries_dir = toolchain_dir /", "build_dir, \"https://ftp.gnu.org/gnu/autoconf/autoconf-2.65.tar.gz\" ) autoconf_configure_path = autoconf_src_dir / \"configure\" run_and_check( [autoconf_configure_path.as_posix(), f\"--prefix={build_products_dir}\"], cwd=autoconf_build_dir )", "files within the newlib port, # you may have to run this command", "error like the following while running this script: # /bin/sh: /Users/philliptennen/Documents/develop/axle/ports/newlib/newlib-2.5.0.20171222/etc/configure: No such", "cwd=newlib_build_dir, env_additions=env) run_and_check([\"make\", f\"DESTDIR={sysroot_dir.as_posix()}\", \"install\"], cwd=newlib_build_dir, env_additions=env) # If you make some kind", "/ \"i686-toolchain\" binaries_dir = toolchain_dir / \"bin\" with tempfile.TemporaryDirectory() as build_dir_raw: build_dir =", "url) tool_name = url.split(\"/\")[-1].removesuffix(\".tar.gz\") tool_build_dir = build_dir / f\"build-{tool_name}\" tool_build_dir.mkdir(exist_ok=True) return tool_src_dir, tool_build_dir", "/ \"axle-sysroot\" arch_target = \"i686-elf\" toolchain_dir = axle_dir / \"i686-toolchain\" binaries_dir = toolchain_dir", "\"install\"], cwd=newlib_build_dir, env_additions=env) # If you make some kind of config change to", "like the following while running this script: # /bin/sh: /Users/philliptennen/Documents/develop/axle/ports/newlib/newlib-2.5.0.20171222/etc/configure: No such file", "axle_dir / \"ports\" / \"newlib\" / \"newlib-2.5.0.20171222\" newlib_build_dir = build_dir / \"build-newlib\" newlib_build_dir.mkdir()", "cwd=autoconf_build_dir) run_and_check([\"make\", \"install\"], cwd=autoconf_build_dir) newlib_src_dir = axle_dir / \"ports\" / \"newlib\" / \"newlib-2.5.0.20171222\"", "# ../newlib-2.5.0.20171222/configure --prefix=/usr --target=i686-axle # Fail when newlib doesn't compile # set -e", "from build_utils import download_and_unpack_archive, run_and_check def clone_tool_and_prepare_build_dir(build_dir: Path, url: str) -> Tuple[Path, Path]:", "run_and_check([\"make\"], cwd=automake_build_dir) run_and_check([\"make\", \"install\"], cwd=automake_build_dir) autoconf_src_dir, autoconf_build_dir = clone_tool_and_prepare_build_dir( build_dir, \"https://ftp.gnu.org/gnu/autoconf/autoconf-2.65.tar.gz\" ) autoconf_configure_path", "/ \"configure\" run_and_check( [automake_configure_path.as_posix(), f\"--prefix={build_products_dir}\"], cwd=automake_build_dir ) run_and_check([\"make\"], cwd=automake_build_dir) run_and_check([\"make\", \"install\"], cwd=automake_build_dir) autoconf_src_dir,", "def build() -> None: axle_dir = Path(__file__).parent sysroot_dir = axle_dir / \"axle-sysroot\" arch_target", "os.symlink((binaries_dir / \"i686-elf-gcc\").as_posix(), (newlib_build_dir / \"i686-axle-gcc\").as_posix()) os.symlink((binaries_dir / \"i686-elf-cc\").as_posix(), (newlib_build_dir / \"i686-axle-cc\").as_posix()) os.symlink((binaries_dir", "cwd=newlib_build_dir, env_additions=env) # If you make some kind of config change to the", "cwd=automake_build_dir) run_and_check([\"make\", \"install\"], cwd=automake_build_dir) autoconf_src_dir, autoconf_build_dir = clone_tool_and_prepare_build_dir( build_dir, \"https://ftp.gnu.org/gnu/autoconf/autoconf-2.65.tar.gz\" ) autoconf_configure_path =", "import Tuple from build_utils import download_and_unpack_archive, run_and_check def clone_tool_and_prepare_build_dir(build_dir: Path, url: str) ->", "from pathlib import Path from typing import Tuple from build_utils import download_and_unpack_archive, run_and_check", "run_and_check([\"make\", \"install\"], cwd=automake_build_dir) autoconf_src_dir, autoconf_build_dir = clone_tool_and_prepare_build_dir( build_dir, \"https://ftp.gnu.org/gnu/autoconf/autoconf-2.65.tar.gz\" ) autoconf_configure_path = autoconf_src_dir", "build_dir = Path(build_dir_raw) build_products_dir = Path(__file__).parent / \"newlib-build-products\" if False: automake_src_dir, automake_build_dir =", "False: automake_src_dir, automake_build_dir = clone_tool_and_prepare_build_dir( build_dir, \"https://ftp.gnu.org/gnu/automake/automake-1.11.tar.gz\" ) automake_configure_path = automake_src_dir / \"configure\"", "newlib_build_dir.mkdir() os.symlink((binaries_dir / \"i686-elf-ar\").as_posix(), (newlib_build_dir / \"i686-axle-ar\").as_posix()) os.symlink((binaries_dir / \"i686-elf-as\").as_posix(), (newlib_build_dir / \"i686-axle-as\").as_posix())", "\"i686-axle-as\").as_posix()) os.symlink((binaries_dir / \"i686-elf-gcc\").as_posix(), (newlib_build_dir / \"i686-axle-gcc\").as_posix()) os.symlink((binaries_dir / \"i686-elf-cc\").as_posix(), (newlib_build_dir / \"i686-axle-cc\").as_posix())", "f'{newlib_build_dir}:{os.environ[\"PATH\"]}'} newlib_configure_path = newlib_src_dir / \"configure\" run_and_check( [newlib_configure_path.as_posix(), \"--prefix=/usr\", \"--target=i686-axle\"], cwd=newlib_build_dir, env_additions=env, )", "-> None: axle_dir = Path(__file__).parent sysroot_dir = axle_dir / \"axle-sysroot\" arch_target = \"i686-elf\"", "[automake_configure_path.as_posix(), f\"--prefix={build_products_dir}\"], cwd=automake_build_dir ) run_and_check([\"make\"], cwd=automake_build_dir) run_and_check([\"make\", \"install\"], cwd=automake_build_dir) autoconf_src_dir, autoconf_build_dir = clone_tool_and_prepare_build_dir(", "clone_tool_and_prepare_build_dir(build_dir: Path, url: str) -> Tuple[Path, Path]: tool_src_dir = download_and_unpack_archive(build_dir, url) tool_name =", "Path(__file__).parent sysroot_dir = axle_dir / \"axle-sysroot\" arch_target = \"i686-elf\" toolchain_dir = axle_dir /", "see an error like the following while running this script: # /bin/sh: /Users/philliptennen/Documents/develop/axle/ports/newlib/newlib-2.5.0.20171222/etc/configure:", "following while running this script: # /bin/sh: /Users/philliptennen/Documents/develop/axle/ports/newlib/newlib-2.5.0.20171222/etc/configure: No such file or directory", "/Users/philliptennen/Documents/develop/axle/ports/newlib/newlib-2.5.0.20171222/etc/configure: No such file or directory # ../newlib-2.5.0.20171222/configure --prefix=/usr --target=i686-axle # Fail when", "\"i686-elf-ranlib\").as_posix(), (newlib_build_dir / \"i686-axle-ranlib\").as_posix()) env = {\"PATH\": f'{newlib_build_dir}:{os.environ[\"PATH\"]}'} newlib_configure_path = newlib_src_dir / \"configure\"", "toolchain_dir / \"bin\" with tempfile.TemporaryDirectory() as build_dir_raw: build_dir = Path(build_dir_raw) build_products_dir = Path(__file__).parent", "build_utils import download_and_unpack_archive, run_and_check def clone_tool_and_prepare_build_dir(build_dir: Path, url: str) -> Tuple[Path, Path]: tool_src_dir", "run_and_check def clone_tool_and_prepare_build_dir(build_dir: Path, url: str) -> Tuple[Path, Path]: tool_src_dir = download_and_unpack_archive(build_dir, url)", "/ f\"build-{tool_name}\" tool_build_dir.mkdir(exist_ok=True) return tool_src_dir, tool_build_dir def build() -> None: axle_dir = Path(__file__).parent", "build_dir / \"build-newlib\" newlib_build_dir.mkdir() os.symlink((binaries_dir / \"i686-elf-ar\").as_posix(), (newlib_build_dir / \"i686-axle-ar\").as_posix()) os.symlink((binaries_dir / \"i686-elf-as\").as_posix(),", "tempfile.TemporaryDirectory() as build_dir_raw: build_dir = Path(build_dir_raw) build_products_dir = Path(__file__).parent / \"newlib-build-products\" if False:", "adding new files within the newlib port, # you may have to run", "\"build-newlib\" newlib_build_dir.mkdir() os.symlink((binaries_dir / \"i686-elf-ar\").as_posix(), (newlib_build_dir / \"i686-axle-ar\").as_posix()) os.symlink((binaries_dir / \"i686-elf-as\").as_posix(), (newlib_build_dir /", "cwd=autoconf_build_dir) newlib_src_dir = axle_dir / \"ports\" / \"newlib\" / \"newlib-2.5.0.20171222\" newlib_build_dir = build_dir", "f\"DESTDIR={sysroot_dir.as_posix()}\", \"install\"], cwd=newlib_build_dir, env_additions=env) # If you make some kind of config change", "may see an error like the following while running this script: # /bin/sh:", "build_dir / f\"build-{tool_name}\" tool_build_dir.mkdir(exist_ok=True) return tool_src_dir, tool_build_dir def build() -> None: axle_dir =", "run_and_check([\"make\"], cwd=autoconf_build_dir) run_and_check([\"make\", \"install\"], cwd=autoconf_build_dir) newlib_src_dir = axle_dir / \"ports\" / \"newlib\" /", "when newlib doesn't compile # set -e # make all if __name__ ==", "/ \"bin\" with tempfile.TemporaryDirectory() as build_dir_raw: build_dir = Path(build_dir_raw) build_products_dir = Path(__file__).parent /", "such file or directory # ../newlib-2.5.0.20171222/configure --prefix=/usr --target=i686-axle # Fail when newlib doesn't", "Path]: tool_src_dir = download_and_unpack_archive(build_dir, url) tool_name = url.split(\"/\")[-1].removesuffix(\".tar.gz\") tool_build_dir = build_dir / f\"build-{tool_name}\"", ") run_and_check([\"make\"], cwd=automake_build_dir) run_and_check([\"make\", \"install\"], cwd=automake_build_dir) autoconf_src_dir, autoconf_build_dir = clone_tool_and_prepare_build_dir( build_dir, \"https://ftp.gnu.org/gnu/autoconf/autoconf-2.65.tar.gz\" )", "/ \"i686-axle-cc\").as_posix()) os.symlink((binaries_dir / \"i686-elf-ranlib\").as_posix(), (newlib_build_dir / \"i686-axle-ranlib\").as_posix()) env = {\"PATH\": f'{newlib_build_dir}:{os.environ[\"PATH\"]}'} newlib_configure_path", "tempfile from pathlib import Path from typing import Tuple from build_utils import download_and_unpack_archive,", "you make some kind of config change to the axle target, such as", "newlib_src_dir / \"configure\" run_and_check( [newlib_configure_path.as_posix(), \"--prefix=/usr\", \"--target=i686-axle\"], cwd=newlib_build_dir, env_additions=env, ) run_and_check([\"make\", \"all\"], cwd=newlib_build_dir,", "\"i686-elf-as\").as_posix(), (newlib_build_dir / \"i686-axle-as\").as_posix()) os.symlink((binaries_dir / \"i686-elf-gcc\").as_posix(), (newlib_build_dir / \"i686-axle-gcc\").as_posix()) os.symlink((binaries_dir / \"i686-elf-cc\").as_posix(),", "config change to the axle target, such as adding new files within the", "command # You may see an error like the following while running this", "= axle_dir / \"ports\" / \"newlib\" / \"newlib-2.5.0.20171222\" newlib_build_dir = build_dir / \"build-newlib\"", "to the axle target, such as adding new files within the newlib port,", "env = {\"PATH\": f'{newlib_build_dir}:{os.environ[\"PATH\"]}'} newlib_configure_path = newlib_src_dir / \"configure\" run_and_check( [newlib_configure_path.as_posix(), \"--prefix=/usr\", \"--target=i686-axle\"],", "/ \"i686-elf-gcc\").as_posix(), (newlib_build_dir / \"i686-axle-gcc\").as_posix()) os.symlink((binaries_dir / \"i686-elf-cc\").as_posix(), (newlib_build_dir / \"i686-axle-cc\").as_posix()) os.symlink((binaries_dir /", "= {\"PATH\": f'{newlib_build_dir}:{os.environ[\"PATH\"]}'} newlib_configure_path = newlib_src_dir / \"configure\" run_and_check( [newlib_configure_path.as_posix(), \"--prefix=/usr\", \"--target=i686-axle\"], cwd=newlib_build_dir,", "# you may have to run this command # You may see an", "newlib port, # you may have to run this command # You may", "os.symlink((binaries_dir / \"i686-elf-cc\").as_posix(), (newlib_build_dir / \"i686-axle-cc\").as_posix()) os.symlink((binaries_dir / \"i686-elf-ranlib\").as_posix(), (newlib_build_dir / \"i686-axle-ranlib\").as_posix()) env", "= automake_src_dir / \"configure\" run_and_check( [automake_configure_path.as_posix(), f\"--prefix={build_products_dir}\"], cwd=automake_build_dir ) run_and_check([\"make\"], cwd=automake_build_dir) run_and_check([\"make\", \"install\"]," ]
[]
[ "to be a str\") pulumi.set(__self__, \"wan_ip\", wan_ip) if wan_netmask and not isinstance(wan_netmask, str):", "str: \"\"\" The IPv4 address of the WAN. \"\"\" return pulumi.get(self, \"wan_ip\") @property", "quality of service. \"\"\" return pulumi.get(self, \"wan_egress_qos\") @property @pulumi.getter(name=\"wanGateway\") def wan_gateway(self) -> str:", "associate the network with. \"\"\" return pulumi.get(self, \"site\") @property @pulumi.getter def subnet(self) ->", "Delegation. \"\"\" return pulumi.get(self, \"ipv6_pd_interface\") @property @pulumi.getter(name=\"ipv6PdPrefixid\") def ipv6_pd_prefixid(self) -> str: \"\"\" Specifies", "dhcpd_boot_server and not isinstance(dhcpd_boot_server, str): raise TypeError(\"Expected argument 'dhcpd_boot_server' to be a str\")", "def wan_netmask(self) -> str: \"\"\" The IPv4 netmask of the WAN. \"\"\" return", "be set to true if you have dhcpd*boot*filename, and dhcpd*boot*server set. \"\"\" return", "@property @pulumi.getter def name(self) -> str: \"\"\" The name of the network. \"\"\"", "@pulumi.getter(name=\"domainName\") def domain_name(self) -> str: \"\"\" The domain name of this network. \"\"\"", "raise TypeError(\"Expected argument 'ipv6_static_subnet' to be a str\") pulumi.set(__self__, \"ipv6_static_subnet\", ipv6_static_subnet) if name", "list\") pulumi.set(__self__, \"dhcp_dns\", dhcp_dns) if dhcp_enabled and not isinstance(dhcp_enabled, bool): raise TypeError(\"Expected argument", "\"ipv6_pd_prefixid\", ipv6_pd_prefixid) if ipv6_ra_enable and not isinstance(ipv6_ra_enable, bool): raise TypeError(\"Expected argument 'ipv6_ra_enable' to", "TypeError(\"Expected argument 'dhcp_enabled' to be a bool\") pulumi.set(__self__, \"dhcp_enabled\", dhcp_enabled) if dhcp_lease and", "return pulumi.get(self, \"id\") @property @pulumi.getter(name=\"igmpSnooping\") def igmp_snooping(self) -> bool: \"\"\" Specifies whether IGMP", "data source can be used to retrieve settings for a network by name", "isinstance(ipv6_interface_type, str): raise TypeError(\"Expected argument 'ipv6_interface_type' to be a str\") pulumi.set(__self__, \"ipv6_interface_type\", ipv6_interface_type)", "The IPv4 address where the DHCP range of addresses stops. \"\"\" return pulumi.get(self,", "address of a TFTP server to network boot from. \"\"\" return pulumi.get(self, \"dhcpd_boot_server\")", "and not isinstance(dhcpd_boot_server, str): raise TypeError(\"Expected argument 'dhcpd_boot_server' to be a str\") pulumi.set(__self__,", "TypeError(\"Expected argument 'dhcpd_boot_filename' to be a str\") pulumi.set(__self__, \"dhcpd_boot_filename\", dhcpd_boot_filename) if dhcpd_boot_server and", "\"network_group\", network_group) if purpose and not isinstance(purpose, str): raise TypeError(\"Expected argument 'purpose' to", "and not isinstance(x_wan_password, str): raise TypeError(\"Expected argument 'x_wan_password' to be a str\") pulumi.set(__self__,", "`WAN_LTE_FAILOVER`. \"\"\" return pulumi.get(self, \"wan_networkgroup\") @property @pulumi.getter(name=\"wanType\") def wan_type(self) -> str: \"\"\" Specifies", "@property @pulumi.getter(name=\"wanUsername\") def wan_username(self) -> str: \"\"\" Specifies the IPV4 WAN username. \"\"\"", "if id and not isinstance(id, str): raise TypeError(\"Expected argument 'id' to be a", "\"ipv6_interface_type\") @property @pulumi.getter(name=\"ipv6PdInterface\") def ipv6_pd_interface(self) -> str: \"\"\" Specifies which WAN interface is", "pulumi.get(self, \"subnet\") @property @pulumi.getter(name=\"vlanId\") def vlan_id(self) -> int: \"\"\" The VLAN ID of", "network by name or ID. ## Example Usage ```python import pulumi import pulumi_unifi", "domain_name and not isinstance(domain_name, str): raise TypeError(\"Expected argument 'domain_name' to be a str\")", "isinstance(vlan_id, int): raise TypeError(\"Expected argument 'vlan_id' to be a int\") pulumi.set(__self__, \"vlan_id\", vlan_id)", "ipv6_pd_prefixid=None, ipv6_ra_enable=None, ipv6_static_subnet=None, name=None, network_group=None, purpose=None, site=None, subnet=None, vlan_id=None, wan_dns=None, wan_egress_qos=None, wan_gateway=None, wan_ip=None,", "@pulumi.getter(name=\"wanIp\") def wan_ip(self) -> str: \"\"\" The IPv4 address of the WAN. \"\"\"", "to be a str\") pulumi.set(__self__, \"wan_type\", wan_type) if wan_username and not isinstance(wan_username, str):", "the WAN network group. One of either `WAN`, `WAN2` or `WAN_LTE_FAILOVER`. \"\"\" return", "raise TypeError(\"Expected argument 'ipv6_interface_type' to be a str\") pulumi.set(__self__, \"ipv6_interface_type\", ipv6_interface_type) if ipv6_pd_interface", "set to true if you have dhcpd*boot*filename, and dhcpd*boot*server set. \"\"\" return pulumi.get(self,", "and not isinstance(subnet, str): raise TypeError(\"Expected argument 'subnet' to be a str\") pulumi.set(__self__,", "pulumi.set(__self__, \"wan_type\", wan_type) if wan_username and not isinstance(wan_username, str): raise TypeError(\"Expected argument 'wan_username'", "is None: opts.version = _utilities.get_version() if opts.plugin_download_url is None: opts.plugin_download_url = _utilities.get_plugin_download_url() __ret__", "-> str: \"\"\" The domain name of this network. \"\"\" return pulumi.get(self, \"domain_name\")", "network with. \"\"\" return pulumi.get(self, \"site\") @property @pulumi.getter def subnet(self) -> str: \"\"\"", "@property @pulumi.getter(name=\"wanDns\") def wan_dns(self) -> Sequence[str]: \"\"\" DNS servers IPs of the WAN.", "unifi.get_network(id=my_device.network_id) ``` :param str id: The ID of the network. :param str name:", "int): raise TypeError(\"Expected argument 'wan_egress_qos' to be a int\") pulumi.set(__self__, \"wan_egress_qos\", wan_egress_qos) if", "-> str: \"\"\" The IPv4 netmask of the WAN. \"\"\" return pulumi.get(self, \"wan_netmask\")", "not isinstance(name, str): raise TypeError(\"Expected argument 'name' to be a str\") pulumi.set(__self__, \"name\",", "\"\"\" The name of the network. \"\"\" return pulumi.get(self, \"name\") @property @pulumi.getter(name=\"networkGroup\") def", "return pulumi.get(self, \"ipv6_static_subnet\") @property @pulumi.getter def name(self) -> str: \"\"\" The name of", "isinstance(dhcp_stop, str): raise TypeError(\"Expected argument 'dhcp_stop' to be a str\") pulumi.set(__self__, \"dhcp_stop\", dhcp_stop)", "x_wan_password=self.x_wan_password) def get_network(id: Optional[str] = None, name: Optional[str] = None, site: Optional[str] =", "settings for a network by name or ID. ## Example Usage ```python import", "\"\"\" return pulumi.get(self, \"igmp_snooping\") @property @pulumi.getter(name=\"ipv6InterfaceType\") def ipv6_interface_type(self) -> str: \"\"\" Specifies which", "address where the DHCP range of addresses starts. \"\"\" return pulumi.get(self, \"dhcp_start\") @property", "pulumi.set(__self__, \"name\", name) if network_group and not isinstance(network_group, str): raise TypeError(\"Expected argument 'network_group'", "opts is None: opts = pulumi.InvokeOptions() if opts.version is None: opts.version = _utilities.get_version()", "`WAN2` or `WAN_LTE_FAILOVER`. \"\"\" return pulumi.get(self, \"wan_networkgroup\") @property @pulumi.getter(name=\"wanType\") def wan_type(self) -> str:", "## Example Usage ```python import pulumi import pulumi_unifi as unifi lan_network = unifi.get_network(name=\"LAN\")", "name of the site to associate the network with. \"\"\" return pulumi.get(self, \"site\")", "argument 'ipv6_static_subnet' to be a str\") pulumi.set(__self__, \"ipv6_static_subnet\", ipv6_static_subnet) if name and not", "\"\"\" The IPv4 address where the DHCP range of addresses starts. \"\"\" return", "wan_netmask=__ret__.wan_netmask, wan_networkgroup=__ret__.wan_networkgroup, wan_type=__ret__.wan_type, wan_username=__ret__.wan_username, x_wan_password=__ret__.x_wan_password) @_utilities.lift_output_func(get_network) def get_network_output(id: Optional[pulumi.Input[Optional[str]]] = None, name: Optional[pulumi.Input[Optional[str]]]", "x_wan_password=__ret__.x_wan_password) @_utilities.lift_output_func(get_network) def get_network_output(id: Optional[pulumi.Input[Optional[str]]] = None, name: Optional[pulumi.Input[Optional[str]]] = None, site: Optional[pulumi.Input[Optional[str]]]", "Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand unless", "static IPv6 subnet (when ipv6*interface*type is 'static'). \"\"\" return pulumi.get(self, \"ipv6_static_subnet\") @property @pulumi.getter", "my_network = unifi.get_network(id=my_device.network_id) ``` :param str id: The ID of the network. :param", "\"wan_netmask\", wan_netmask) if wan_networkgroup and not isinstance(wan_networkgroup, str): raise TypeError(\"Expected argument 'wan_networkgroup' to", "not isinstance(dhcpd_boot_server, str): raise TypeError(\"Expected argument 'dhcpd_boot_server' to be a str\") pulumi.set(__self__, \"dhcpd_boot_server\",", "bool: \"\"\" whether DHCP is enabled or not on this network. \"\"\" return", "\"\"\" def __init__(__self__, dhcp_dns=None, dhcp_enabled=None, dhcp_lease=None, dhcp_start=None, dhcp_stop=None, dhcpd_boot_enabled=None, dhcpd_boot_filename=None, dhcpd_boot_server=None, domain_name=None, id=None,", "and not isinstance(purpose, str): raise TypeError(\"Expected argument 'purpose' to be a str\") pulumi.set(__self__,", "network. \"\"\" return pulumi.get(self, \"dhcp_enabled\") @property @pulumi.getter(name=\"dhcpLease\") def dhcp_lease(self) -> int: \"\"\" lease", "pulumi.get(self, \"domain_name\") @property @pulumi.getter def id(self) -> str: \"\"\" The ID of the", "\"dhcp_dns\") @property @pulumi.getter(name=\"dhcpEnabled\") def dhcp_enabled(self) -> bool: \"\"\" whether DHCP is enabled or", "= None, site: Optional[pulumi.Input[Optional[str]]] = None, opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetNetworkResult]: \"\"\"", "IPv4 address of the WAN. \"\"\" return pulumi.get(self, \"wan_ip\") @property @pulumi.getter(name=\"wanNetmask\") def wan_netmask(self)", "getNetwork. \"\"\" def __init__(__self__, dhcp_dns=None, dhcp_enabled=None, dhcp_lease=None, dhcp_start=None, dhcp_stop=None, dhcpd_boot_enabled=None, dhcpd_boot_filename=None, dhcpd_boot_server=None, domain_name=None,", "isinstance(ipv6_ra_enable, bool): raise TypeError(\"Expected argument 'ipv6_ra_enable' to be a bool\") pulumi.set(__self__, \"ipv6_ra_enable\", ipv6_ra_enable)", "@pulumi.getter(name=\"dhcpdBootFilename\") def dhcpd_boot_filename(self) -> str: \"\"\" the file to PXE boot from on", "None: opts = pulumi.InvokeOptions() if opts.version is None: opts.version = _utilities.get_version() if opts.plugin_download_url", "@property @pulumi.getter(name=\"wanGateway\") def wan_gateway(self) -> str: \"\"\" The IPv4 gateway of the WAN.", "with. \"\"\" return pulumi.get(self, \"site\") @property @pulumi.getter def subnet(self) -> str: \"\"\" The", "netmask of the WAN. \"\"\" return pulumi.get(self, \"wan_netmask\") @property @pulumi.getter(name=\"wanNetworkgroup\") def wan_networkgroup(self) ->", "of the site to associate the network with. \"\"\" __args__ = dict() __args__['id']", "isinstance(dhcpd_boot_filename, str): raise TypeError(\"Expected argument 'dhcpd_boot_filename' to be a str\") pulumi.set(__self__, \"dhcpd_boot_filename\", dhcpd_boot_filename)", "range of addresses starts. \"\"\" return pulumi.get(self, \"dhcp_start\") @property @pulumi.getter(name=\"dhcpStop\") def dhcp_stop(self) ->", "network_group(self) -> str: \"\"\" The group of the network. \"\"\" return pulumi.get(self, \"network_group\")", "@pulumi.getter def id(self) -> str: \"\"\" The ID of the network. \"\"\" return", "starts. \"\"\" return pulumi.get(self, \"dhcp_start\") @property @pulumi.getter(name=\"dhcpStop\") def dhcp_stop(self) -> str: \"\"\" The", "to retrieve settings for a network by name or ID. ## Example Usage", "the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by", "a str\") pulumi.set(__self__, \"wan_networkgroup\", wan_networkgroup) if wan_type and not isinstance(wan_type, str): raise TypeError(\"Expected", "and not isinstance(ipv6_ra_enable, bool): raise TypeError(\"Expected argument 'ipv6_ra_enable' to be a bool\") pulumi.set(__self__,", "\"\"\" The subnet of the network (CIDR address). \"\"\" return pulumi.get(self, \"subnet\") @property", "be a str\") pulumi.set(__self__, \"id\", id) if igmp_snooping and not isinstance(igmp_snooping, bool): raise", "to be returned from the DHCP server. \"\"\" return pulumi.get(self, \"dhcp_dns\") @property @pulumi.getter(name=\"dhcpEnabled\")", "wan_type and not isinstance(wan_type, str): raise TypeError(\"Expected argument 'wan_type' to be a str\")", ":param str site: The name of the site to associate the network with.", "a str\") pulumi.set(__self__, \"network_group\", network_group) if purpose and not isinstance(purpose, str): raise TypeError(\"Expected", "vlan_id(self) -> int: \"\"\" The VLAN ID of the network. \"\"\" return pulumi.get(self,", "\"\"\" The purpose of the network. One of `corporate`, `guest`, `wan`, or `vlan-only`.", "*** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional,", "you are doing! *** import warnings import pulumi import pulumi.runtime from typing import", "'ipv6_static_subnet' to be a str\") pulumi.set(__self__, \"ipv6_static_subnet\", ipv6_static_subnet) if name and not isinstance(name,", "to PXE boot from on the dhcpd*boot*server. \"\"\" return pulumi.get(self, \"dhcpd_boot_filename\") @property @pulumi.getter(name=\"dhcpdBootServer\")", "return pulumi.get(self, \"dhcp_lease\") @property @pulumi.getter(name=\"dhcpStart\") def dhcp_start(self) -> str: \"\"\" The IPv4 address", "Bridge (tfgen) Tool. *** # *** Do not edit by hand unless you're", "to be a bool\") pulumi.set(__self__, \"dhcp_enabled\", dhcp_enabled) if dhcp_lease and not isinstance(dhcp_lease, int):", "\"wan_type\") @property @pulumi.getter(name=\"wanUsername\") def wan_username(self) -> str: \"\"\" Specifies the IPV4 WAN username.", "'ipv6_ra_enable' to be a bool\") pulumi.set(__self__, \"ipv6_ra_enable\", ipv6_ra_enable) if ipv6_static_subnet and not isinstance(ipv6_static_subnet,", "to network boot from. \"\"\" return pulumi.get(self, \"dhcpd_boot_server\") @property @pulumi.getter(name=\"domainName\") def domain_name(self) ->", "\"vlan_id\", vlan_id) if wan_dns and not isinstance(wan_dns, list): raise TypeError(\"Expected argument 'wan_dns' to", "not isinstance(dhcp_enabled, bool): raise TypeError(\"Expected argument 'dhcp_enabled' to be a bool\") pulumi.set(__self__, \"dhcp_enabled\",", "IPv6 Prefix Delegation. \"\"\" return pulumi.get(self, \"ipv6_pd_interface\") @property @pulumi.getter(name=\"ipv6PdPrefixid\") def ipv6_pd_prefixid(self) -> str:", "not on this network. \"\"\" return pulumi.get(self, \"dhcp_enabled\") @property @pulumi.getter(name=\"dhcpLease\") def dhcp_lease(self) ->", "-> str: \"\"\" The purpose of the network. One of `corporate`, `guest`, `wan`,", "\"purpose\", purpose) if site and not isinstance(site, str): raise TypeError(\"Expected argument 'site' to", "argument 'wan_gateway' to be a str\") pulumi.set(__self__, \"wan_gateway\", wan_gateway) if wan_ip and not", "coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform Bridge", "list): raise TypeError(\"Expected argument 'dhcp_dns' to be a list\") pulumi.set(__self__, \"dhcp_dns\", dhcp_dns) if", "pylint: disable=using-constant-test def __await__(self): if False: yield self return GetNetworkResult( dhcp_dns=self.dhcp_dns, dhcp_enabled=self.dhcp_enabled, dhcp_lease=self.dhcp_lease,", "'subnet' to be a str\") pulumi.set(__self__, \"subnet\", subnet) if vlan_id and not isinstance(vlan_id,", "igmp_snooping=None, ipv6_interface_type=None, ipv6_pd_interface=None, ipv6_pd_prefixid=None, ipv6_ra_enable=None, ipv6_static_subnet=None, name=None, network_group=None, purpose=None, site=None, subnet=None, vlan_id=None, wan_dns=None,", "Specifies the WAN network group. One of either `WAN`, `WAN2` or `WAN_LTE_FAILOVER`. \"\"\"", "either `disabled`, `static`, `dhcp`, or `pppoe`. \"\"\" return pulumi.get(self, \"wan_type\") @property @pulumi.getter(name=\"wanUsername\") def", "of the network. One of `corporate`, `guest`, `wan`, or `vlan-only`. \"\"\" return pulumi.get(self,", "\"\"\" The domain name of this network. \"\"\" return pulumi.get(self, \"domain_name\") @property @pulumi.getter", "enabled or not. \"\"\" return pulumi.get(self, \"igmp_snooping\") @property @pulumi.getter(name=\"ipv6InterfaceType\") def ipv6_interface_type(self) -> str:", "ipv6_static_subnet(self) -> str: \"\"\" Specifies the static IPv6 subnet (when ipv6*interface*type is 'static').", "pulumi.set(__self__, \"igmp_snooping\", igmp_snooping) if ipv6_interface_type and not isinstance(ipv6_interface_type, str): raise TypeError(\"Expected argument 'ipv6_interface_type'", "not isinstance(wan_networkgroup, str): raise TypeError(\"Expected argument 'wan_networkgroup' to be a str\") pulumi.set(__self__, \"wan_networkgroup\",", "One of either `WAN`, `WAN2` or `WAN_LTE_FAILOVER`. \"\"\" return pulumi.get(self, \"wan_networkgroup\") @property @pulumi.getter(name=\"wanType\")", "set. \"\"\" return pulumi.get(self, \"dhcpd_boot_enabled\") @property @pulumi.getter(name=\"dhcpdBootFilename\") def dhcpd_boot_filename(self) -> str: \"\"\" the", "unifi.get_user(mac=\"01:23:45:67:89:ab\") my_network = unifi.get_network(id=my_device.network_id) ``` :param str id: The ID of the network.", "raise TypeError(\"Expected argument 'dhcpd_boot_filename' to be a str\") pulumi.set(__self__, \"dhcpd_boot_filename\", dhcpd_boot_filename) if dhcpd_boot_server", "def purpose(self) -> str: \"\"\" The purpose of the network. One of `corporate`,", "pulumi.get(self, \"ipv6_interface_type\") @property @pulumi.getter(name=\"ipv6PdInterface\") def ipv6_pd_interface(self) -> str: \"\"\" Specifies which WAN interface", "isinstance(network_group, str): raise TypeError(\"Expected argument 'network_group' to be a str\") pulumi.set(__self__, \"network_group\", network_group)", "domain name of this network. \"\"\" return pulumi.get(self, \"domain_name\") @property @pulumi.getter def id(self)", "bool): raise TypeError(\"Expected argument 'igmp_snooping' to be a bool\") pulumi.set(__self__, \"igmp_snooping\", igmp_snooping) if", "@property @pulumi.getter def subnet(self) -> str: \"\"\" The subnet of the network (CIDR", "Mapping, Optional, Sequence, Union, overload from . import _utilities __all__ = [ 'GetNetworkResult',", "dhcp_enabled=__ret__.dhcp_enabled, dhcp_lease=__ret__.dhcp_lease, dhcp_start=__ret__.dhcp_start, dhcp_stop=__ret__.dhcp_stop, dhcpd_boot_enabled=__ret__.dhcpd_boot_enabled, dhcpd_boot_filename=__ret__.dhcpd_boot_filename, dhcpd_boot_server=__ret__.dhcpd_boot_server, domain_name=__ret__.domain_name, id=__ret__.id, igmp_snooping=__ret__.igmp_snooping, ipv6_interface_type=__ret__.ipv6_interface_type, ipv6_pd_interface=__ret__.ipv6_pd_interface, ipv6_pd_prefixid=__ret__.ipv6_pd_prefixid,", "dhcpd_boot_filename=self.dhcpd_boot_filename, dhcpd_boot_server=self.dhcpd_boot_server, domain_name=self.domain_name, id=self.id, igmp_snooping=self.igmp_snooping, ipv6_interface_type=self.ipv6_interface_type, ipv6_pd_interface=self.ipv6_pd_interface, ipv6_pd_prefixid=self.ipv6_pd_prefixid, ipv6_ra_enable=self.ipv6_ra_enable, ipv6_static_subnet=self.ipv6_static_subnet, name=self.name, network_group=self.network_group, purpose=self.purpose,", "= pulumi.InvokeOptions() if opts.version is None: opts.version = _utilities.get_version() if opts.plugin_download_url is None:", "None, opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetNetworkResult]: \"\"\" `Network` data source can be", "ipv6_ra_enable=None, ipv6_static_subnet=None, name=None, network_group=None, purpose=None, site=None, subnet=None, vlan_id=None, wan_dns=None, wan_egress_qos=None, wan_gateway=None, wan_ip=None, wan_netmask=None,", "Specifies the IPv6 Prefix ID. \"\"\" return pulumi.get(self, \"ipv6_pd_prefixid\") @property @pulumi.getter(name=\"ipv6RaEnable\") def ipv6_ra_enable(self)", "site=__ret__.site, subnet=__ret__.subnet, vlan_id=__ret__.vlan_id, wan_dns=__ret__.wan_dns, wan_egress_qos=__ret__.wan_egress_qos, wan_gateway=__ret__.wan_gateway, wan_ip=__ret__.wan_ip, wan_netmask=__ret__.wan_netmask, wan_networkgroup=__ret__.wan_networkgroup, wan_type=__ret__.wan_type, wan_username=__ret__.wan_username, x_wan_password=__ret__.x_wan_password) @_utilities.lift_output_func(get_network)", "vlan_id=None, wan_dns=None, wan_egress_qos=None, wan_gateway=None, wan_ip=None, wan_netmask=None, wan_networkgroup=None, wan_type=None, wan_username=None, x_wan_password=<PASSWORD>): if dhcp_dns and", "str\") pulumi.set(__self__, \"site\", site) if subnet and not isinstance(subnet, str): raise TypeError(\"Expected argument", "TypeError(\"Expected argument 'wan_username' to be a str\") pulumi.set(__self__, \"wan_username\", wan_username) if x_wan_password and", "return pulumi.get(self, \"ipv6_pd_prefixid\") @property @pulumi.getter(name=\"ipv6RaEnable\") def ipv6_ra_enable(self) -> bool: \"\"\" Specifies whether to", "'vlan_id' to be a int\") pulumi.set(__self__, \"vlan_id\", vlan_id) if wan_dns and not isinstance(wan_dns,", "wan_ip=__ret__.wan_ip, wan_netmask=__ret__.wan_netmask, wan_networkgroup=__ret__.wan_networkgroup, wan_type=__ret__.wan_type, wan_username=__ret__.wan_username, x_wan_password=__ret__.x_wan_password) @_utilities.lift_output_func(get_network) def get_network_output(id: Optional[pulumi.Input[Optional[str]]] = None, name:", "domain_name=None, id=None, igmp_snooping=None, ipv6_interface_type=None, ipv6_pd_interface=None, ipv6_pd_prefixid=None, ipv6_ra_enable=None, ipv6_static_subnet=None, name=None, network_group=None, purpose=None, site=None, subnet=None,", "a str\") pulumi.set(__self__, \"subnet\", subnet) if vlan_id and not isinstance(vlan_id, int): raise TypeError(\"Expected", "@property @pulumi.getter(name=\"ipv6RaEnable\") def ipv6_ra_enable(self) -> bool: \"\"\" Specifies whether to enable router advertisements", "which type of IPv6 connection to use. \"\"\" return pulumi.get(self, \"ipv6_interface_type\") @property @pulumi.getter(name=\"ipv6PdInterface\")", "get_network(id: Optional[str] = None, name: Optional[str] = None, site: Optional[str] = None, opts:", "of `corporate`, `guest`, `wan`, or `vlan-only`. \"\"\" return pulumi.get(self, \"purpose\") @property @pulumi.getter def", "doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping,", "str\") pulumi.set(__self__, \"dhcpd_boot_server\", dhcpd_boot_server) if domain_name and not isinstance(domain_name, str): raise TypeError(\"Expected argument", "The name of the site to associate the network with. \"\"\" __args__ =", "`wan`, or `vlan-only`. \"\"\" return pulumi.get(self, \"purpose\") @property @pulumi.getter def site(self) -> str:", "name of the site to associate the network with. \"\"\" __args__ = dict()", "opts.plugin_download_url is None: opts.plugin_download_url = _utilities.get_plugin_download_url() __ret__ = pulumi.runtime.invoke('unifi:index/getNetwork:getNetwork', __args__, opts=opts, typ=GetNetworkResult).value return", "pulumi.set(__self__, \"wan_netmask\", wan_netmask) if wan_networkgroup and not isinstance(wan_networkgroup, str): raise TypeError(\"Expected argument 'wan_networkgroup'", "\"\"\" return pulumi.get(self, \"name\") @property @pulumi.getter(name=\"networkGroup\") def network_group(self) -> str: \"\"\" The group", "and not isinstance(ipv6_static_subnet, str): raise TypeError(\"Expected argument 'ipv6_static_subnet' to be a str\") pulumi.set(__self__,", "pulumi.get(self, \"igmp_snooping\") @property @pulumi.getter(name=\"ipv6InterfaceType\") def ipv6_interface_type(self) -> str: \"\"\" Specifies which type of", "from the DHCP server. \"\"\" return pulumi.get(self, \"dhcp_dns\") @property @pulumi.getter(name=\"dhcpEnabled\") def dhcp_enabled(self) ->", "'get_network', 'get_network_output', ] @pulumi.output_type class GetNetworkResult: \"\"\" A collection of values returned by", "isinstance(wan_egress_qos, int): raise TypeError(\"Expected argument 'wan_egress_qos' to be a int\") pulumi.set(__self__, \"wan_egress_qos\", wan_egress_qos)", "'wan_networkgroup' to be a str\") pulumi.set(__self__, \"wan_networkgroup\", wan_networkgroup) if wan_type and not isinstance(wan_type,", "ipv6_interface_type=None, ipv6_pd_interface=None, ipv6_pd_prefixid=None, ipv6_ra_enable=None, ipv6_static_subnet=None, name=None, network_group=None, purpose=None, site=None, subnet=None, vlan_id=None, wan_dns=None, wan_egress_qos=None,", "Specifies whether IGMP snooping is enabled or not. \"\"\" return pulumi.get(self, \"igmp_snooping\") @property", "@property @pulumi.getter(name=\"wanType\") def wan_type(self) -> str: \"\"\" Specifies the IPV4 WAN connection type.", "the network with. \"\"\" return pulumi.get(self, \"site\") @property @pulumi.getter def subnet(self) -> str:", "dhcp_start=__ret__.dhcp_start, dhcp_stop=__ret__.dhcp_stop, dhcpd_boot_enabled=__ret__.dhcpd_boot_enabled, dhcpd_boot_filename=__ret__.dhcpd_boot_filename, dhcpd_boot_server=__ret__.dhcpd_boot_server, domain_name=__ret__.domain_name, id=__ret__.id, igmp_snooping=__ret__.igmp_snooping, ipv6_interface_type=__ret__.ipv6_interface_type, ipv6_pd_interface=__ret__.ipv6_pd_interface, ipv6_pd_prefixid=__ret__.ipv6_pd_prefixid, ipv6_ra_enable=__ret__.ipv6_ra_enable, ipv6_static_subnet=__ret__.ipv6_static_subnet,", "a network by name or ID. ## Example Usage ```python import pulumi import", "PXE boot from on the dhcpd*boot*server. \"\"\" return pulumi.get(self, \"dhcpd_boot_filename\") @property @pulumi.getter(name=\"dhcpdBootServer\") def", "if dhcpd_boot_server and not isinstance(dhcpd_boot_server, str): raise TypeError(\"Expected argument 'dhcpd_boot_server' to be a", "pulumi.set(__self__, \"x_wan_password\", x_wan_password) @property @pulumi.getter(name=\"dhcpDns\") def dhcp_dns(self) -> Sequence[str]: \"\"\" IPv4 addresses for", "'ipv6_pd_prefixid' to be a str\") pulumi.set(__self__, \"ipv6_pd_prefixid\", ipv6_pd_prefixid) if ipv6_ra_enable and not isinstance(ipv6_ra_enable,", "not isinstance(wan_netmask, str): raise TypeError(\"Expected argument 'wan_netmask' to be a str\") pulumi.set(__self__, \"wan_netmask\",", "be a str\") pulumi.set(__self__, \"wan_username\", wan_username) if x_wan_password and not isinstance(x_wan_password, str): raise", "a str\") pulumi.set(__self__, \"name\", name) if network_group and not isinstance(network_group, str): raise TypeError(\"Expected", "if ipv6_pd_interface and not isinstance(ipv6_pd_interface, str): raise TypeError(\"Expected argument 'ipv6_pd_interface' to be a", "isinstance(ipv6_pd_prefixid, str): raise TypeError(\"Expected argument 'ipv6_pd_prefixid' to be a str\") pulumi.set(__self__, \"ipv6_pd_prefixid\", ipv6_pd_prefixid)", "\"dhcpd_boot_enabled\") @property @pulumi.getter(name=\"dhcpdBootFilename\") def dhcpd_boot_filename(self) -> str: \"\"\" the file to PXE boot", "pulumi.get(self, \"vlan_id\") @property @pulumi.getter(name=\"wanDns\") def wan_dns(self) -> Sequence[str]: \"\"\" DNS servers IPs of", "not isinstance(wan_dns, list): raise TypeError(\"Expected argument 'wan_dns' to be a list\") pulumi.set(__self__, \"wan_dns\",", "import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence,", "@pulumi.getter(name=\"dhcpDns\") def dhcp_dns(self) -> Sequence[str]: \"\"\" IPv4 addresses for the DNS server to", "`static`, `dhcp`, or `pppoe`. \"\"\" return pulumi.get(self, \"wan_type\") @property @pulumi.getter(name=\"wanUsername\") def wan_username(self) ->", "a str\") pulumi.set(__self__, \"ipv6_pd_prefixid\", ipv6_pd_prefixid) if ipv6_ra_enable and not isinstance(ipv6_ra_enable, bool): raise TypeError(\"Expected", "-> bool: \"\"\" Specifies whether IGMP snooping is enabled or not. \"\"\" return", "wan_dns=None, wan_egress_qos=None, wan_gateway=None, wan_ip=None, wan_netmask=None, wan_networkgroup=None, wan_type=None, wan_username=None, x_wan_password=<PASSWORD>): if dhcp_dns and not", "VLAN ID of the network. \"\"\" return pulumi.get(self, \"vlan_id\") @property @pulumi.getter(name=\"wanDns\") def wan_dns(self)", "@property @pulumi.getter(name=\"wanEgressQos\") def wan_egress_qos(self) -> int: \"\"\" Specifies the WAN egress quality of", "and not isinstance(wan_networkgroup, str): raise TypeError(\"Expected argument 'wan_networkgroup' to be a str\") pulumi.set(__self__,", "One of `corporate`, `guest`, `wan`, or `vlan-only`. \"\"\" return pulumi.get(self, \"purpose\") @property @pulumi.getter", "not isinstance(ipv6_interface_type, str): raise TypeError(\"Expected argument 'ipv6_interface_type' to be a str\") pulumi.set(__self__, \"ipv6_interface_type\",", "raise TypeError(\"Expected argument 'wan_netmask' to be a str\") pulumi.set(__self__, \"wan_netmask\", wan_netmask) if wan_networkgroup", "'network_group' to be a str\") pulumi.set(__self__, \"network_group\", network_group) if purpose and not isinstance(purpose,", "\"wan_type\", wan_type) if wan_username and not isinstance(wan_username, str): raise TypeError(\"Expected argument 'wan_username' to", "IPv6 connection to use. \"\"\" return pulumi.get(self, \"ipv6_interface_type\") @property @pulumi.getter(name=\"ipv6PdInterface\") def ipv6_pd_interface(self) ->", "wan_username=self.wan_username, x_wan_password=self.x_wan_password) def get_network(id: Optional[str] = None, name: Optional[str] = None, site: Optional[str]", "be a str\") pulumi.set(__self__, \"domain_name\", domain_name) if id and not isinstance(id, str): raise", "dhcpd_boot_enabled=self.dhcpd_boot_enabled, dhcpd_boot_filename=self.dhcpd_boot_filename, dhcpd_boot_server=self.dhcpd_boot_server, domain_name=self.domain_name, id=self.id, igmp_snooping=self.igmp_snooping, ipv6_interface_type=self.ipv6_interface_type, ipv6_pd_interface=self.ipv6_pd_interface, ipv6_pd_prefixid=self.ipv6_pd_prefixid, ipv6_ra_enable=self.ipv6_ra_enable, ipv6_static_subnet=self.ipv6_static_subnet, name=self.name, network_group=self.network_group,", "\"wan_networkgroup\", wan_networkgroup) if wan_type and not isinstance(wan_type, str): raise TypeError(\"Expected argument 'wan_type' to", "group. One of either `WAN`, `WAN2` or `WAN_LTE_FAILOVER`. \"\"\" return pulumi.get(self, \"wan_networkgroup\") @property", "of the WAN. \"\"\" return pulumi.get(self, \"wan_netmask\") @property @pulumi.getter(name=\"wanNetworkgroup\") def wan_networkgroup(self) -> str:", "username. \"\"\" return pulumi.get(self, \"wan_username\") @property @pulumi.getter(name=\"xWanPassword\") def x_wan_password(self) -> str: \"\"\" Specifies", "-> bool: \"\"\" Specifies whether to enable router advertisements or not. \"\"\" return", "DHCP server. \"\"\" return pulumi.get(self, \"dhcp_dns\") @property @pulumi.getter(name=\"dhcpEnabled\") def dhcp_enabled(self) -> bool: \"\"\"", "dhcp_start=self.dhcp_start, dhcp_stop=self.dhcp_stop, dhcpd_boot_enabled=self.dhcpd_boot_enabled, dhcpd_boot_filename=self.dhcpd_boot_filename, dhcpd_boot_server=self.dhcpd_boot_server, domain_name=self.domain_name, id=self.id, igmp_snooping=self.igmp_snooping, ipv6_interface_type=self.ipv6_interface_type, ipv6_pd_interface=self.ipv6_pd_interface, ipv6_pd_prefixid=self.ipv6_pd_prefixid, ipv6_ra_enable=self.ipv6_ra_enable, ipv6_static_subnet=self.ipv6_static_subnet,", "def ipv6_pd_interface(self) -> str: \"\"\" Specifies which WAN interface is used for IPv6", "subnet=None, vlan_id=None, wan_dns=None, wan_egress_qos=None, wan_gateway=None, wan_ip=None, wan_netmask=None, wan_networkgroup=None, wan_type=None, wan_username=None, x_wan_password=<PASSWORD>): if dhcp_dns", "@property @pulumi.getter(name=\"igmpSnooping\") def igmp_snooping(self) -> bool: \"\"\" Specifies whether IGMP snooping is enabled", "str): raise TypeError(\"Expected argument 'id' to be a str\") pulumi.set(__self__, \"id\", id) if", "ipv6_ra_enable and not isinstance(ipv6_ra_enable, bool): raise TypeError(\"Expected argument 'ipv6_ra_enable' to be a bool\")", "\"\"\" Specifies the WAN network group. One of either `WAN`, `WAN2` or `WAN_LTE_FAILOVER`.", "know what you are doing! *** import warnings import pulumi import pulumi.runtime from", "bool): raise TypeError(\"Expected argument 'ipv6_ra_enable' to be a bool\") pulumi.set(__self__, \"ipv6_ra_enable\", ipv6_ra_enable) if", "str\") pulumi.set(__self__, \"x_wan_password\", x_wan_password) @property @pulumi.getter(name=\"dhcpDns\") def dhcp_dns(self) -> Sequence[str]: \"\"\" IPv4 addresses", "\"network_group\") @property @pulumi.getter def purpose(self) -> str: \"\"\" The purpose of the network.", "int): raise TypeError(\"Expected argument 'vlan_id' to be a int\") pulumi.set(__self__, \"vlan_id\", vlan_id) if", "__all__ = [ 'GetNetworkResult', 'AwaitableGetNetworkResult', 'get_network', 'get_network_output', ] @pulumi.output_type class GetNetworkResult: \"\"\" A", "@pulumi.output_type class GetNetworkResult: \"\"\" A collection of values returned by getNetwork. \"\"\" def", "bool: \"\"\" Specifies whether IGMP snooping is enabled or not. \"\"\" return pulumi.get(self,", "by name or ID. ## Example Usage ```python import pulumi import pulumi_unifi as", "'wan_type' to be a str\") pulumi.set(__self__, \"wan_type\", wan_type) if wan_username and not isinstance(wan_username,", "\"\"\" return pulumi.get(self, \"vlan_id\") @property @pulumi.getter(name=\"wanDns\") def wan_dns(self) -> Sequence[str]: \"\"\" DNS servers", "bool\") pulumi.set(__self__, \"igmp_snooping\", igmp_snooping) if ipv6_interface_type and not isinstance(ipv6_interface_type, str): raise TypeError(\"Expected argument", "boot from on the dhcpd*boot*server. \"\"\" return pulumi.get(self, \"dhcpd_boot_filename\") @property @pulumi.getter(name=\"dhcpdBootServer\") def dhcpd_boot_server(self)", "to be a str\") pulumi.set(__self__, \"ipv6_interface_type\", ipv6_interface_type) if ipv6_pd_interface and not isinstance(ipv6_pd_interface, str):", "str: \"\"\" The name of the network. \"\"\" return pulumi.get(self, \"name\") @property @pulumi.getter(name=\"networkGroup\")", "ipv6_static_subnet=__ret__.ipv6_static_subnet, name=__ret__.name, network_group=__ret__.network_group, purpose=__ret__.purpose, site=__ret__.site, subnet=__ret__.subnet, vlan_id=__ret__.vlan_id, wan_dns=__ret__.wan_dns, wan_egress_qos=__ret__.wan_egress_qos, wan_gateway=__ret__.wan_gateway, wan_ip=__ret__.wan_ip, wan_netmask=__ret__.wan_netmask, wan_networkgroup=__ret__.wan_networkgroup,", "dhcpd_boot_filename) if dhcpd_boot_server and not isinstance(dhcpd_boot_server, str): raise TypeError(\"Expected argument 'dhcpd_boot_server' to be", "be a str\") pulumi.set(__self__, \"dhcp_start\", dhcp_start) if dhcp_stop and not isinstance(dhcp_stop, str): raise", "`guest`, `wan`, or `vlan-only`. \"\"\" return pulumi.get(self, \"purpose\") @property @pulumi.getter def site(self) ->", "raise TypeError(\"Expected argument 'dhcpd_boot_server' to be a str\") pulumi.set(__self__, \"dhcpd_boot_server\", dhcpd_boot_server) if domain_name", "you have dhcpd*boot*filename, and dhcpd*boot*server set. \"\"\" return pulumi.get(self, \"dhcpd_boot_enabled\") @property @pulumi.getter(name=\"dhcpdBootFilename\") def", "either `WAN`, `WAN2` or `WAN_LTE_FAILOVER`. \"\"\" return pulumi.get(self, \"wan_networkgroup\") @property @pulumi.getter(name=\"wanType\") def wan_type(self)", "\"igmp_snooping\") @property @pulumi.getter(name=\"ipv6InterfaceType\") def ipv6_interface_type(self) -> str: \"\"\" Specifies which type of IPv6", "str\") pulumi.set(__self__, \"name\", name) if network_group and not isinstance(network_group, str): raise TypeError(\"Expected argument", "name of the network. \"\"\" return pulumi.get(self, \"name\") @property @pulumi.getter(name=\"networkGroup\") def network_group(self) ->", "\"dhcp_enabled\", dhcp_enabled) if dhcp_lease and not isinstance(dhcp_lease, int): raise TypeError(\"Expected argument 'dhcp_lease' to", "ID. \"\"\" return pulumi.get(self, \"ipv6_pd_prefixid\") @property @pulumi.getter(name=\"ipv6RaEnable\") def ipv6_ra_enable(self) -> bool: \"\"\" Specifies", "network. :param str name: The name of the network. :param str site: The", "and not isinstance(dhcpd_boot_enabled, bool): raise TypeError(\"Expected argument 'dhcpd_boot_enabled' to be a bool\") pulumi.set(__self__,", "__args__['id'] = id __args__['name'] = name __args__['site'] = site if opts is None:", "to be a str\") pulumi.set(__self__, \"ipv6_pd_prefixid\", ipv6_pd_prefixid) if ipv6_ra_enable and not isinstance(ipv6_ra_enable, bool):", "str: \"\"\" The name of the site to associate the network with. \"\"\"", "\"wan_gateway\", wan_gateway) if wan_ip and not isinstance(wan_ip, str): raise TypeError(\"Expected argument 'wan_ip' to", "a list\") pulumi.set(__self__, \"dhcp_dns\", dhcp_dns) if dhcp_enabled and not isinstance(dhcp_enabled, bool): raise TypeError(\"Expected", "str): raise TypeError(\"Expected argument 'subnet' to be a str\") pulumi.set(__self__, \"subnet\", subnet) if", "retrieve settings for a network by name or ID. ## Example Usage ```python", "generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not", "pulumi.get(self, \"site\") @property @pulumi.getter def subnet(self) -> str: \"\"\" The subnet of the", "str): raise TypeError(\"Expected argument 'network_group' to be a str\") pulumi.set(__self__, \"network_group\", network_group) if", "isinstance(purpose, str): raise TypeError(\"Expected argument 'purpose' to be a str\") pulumi.set(__self__, \"purpose\", purpose)", "if wan_gateway and not isinstance(wan_gateway, str): raise TypeError(\"Expected argument 'wan_gateway' to be a", "None: opts.version = _utilities.get_version() if opts.plugin_download_url is None: opts.plugin_download_url = _utilities.get_plugin_download_url() __ret__ =", "TypeError(\"Expected argument 'wan_gateway' to be a str\") pulumi.set(__self__, \"wan_gateway\", wan_gateway) if wan_ip and", "\"x_wan_password\") class AwaitableGetNetworkResult(GetNetworkResult): # pylint: disable=using-constant-test def __await__(self): if False: yield self return", "the network. \"\"\" return pulumi.get(self, \"name\") @property @pulumi.getter(name=\"networkGroup\") def network_group(self) -> str: \"\"\"", "The IPv4 netmask of the WAN. \"\"\" return pulumi.get(self, \"wan_netmask\") @property @pulumi.getter(name=\"wanNetworkgroup\") def", "WAN. \"\"\" return pulumi.get(self, \"wan_dns\") @property @pulumi.getter(name=\"wanEgressQos\") def wan_egress_qos(self) -> int: \"\"\" Specifies", "@property @pulumi.getter(name=\"domainName\") def domain_name(self) -> str: \"\"\" The domain name of this network.", "pulumi.get(self, \"dhcp_stop\") @property @pulumi.getter(name=\"dhcpdBootEnabled\") def dhcpd_boot_enabled(self) -> bool: \"\"\" Toggles on the DHCP", "GetNetworkResult: \"\"\" A collection of values returned by getNetwork. \"\"\" def __init__(__self__, dhcp_dns=None,", "dhcp_start=None, dhcp_stop=None, dhcpd_boot_enabled=None, dhcpd_boot_filename=None, dhcpd_boot_server=None, domain_name=None, id=None, igmp_snooping=None, ipv6_interface_type=None, ipv6_pd_interface=None, ipv6_pd_prefixid=None, ipv6_ra_enable=None, ipv6_static_subnet=None,", "site: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetNetworkResult: \"\"\" `Network` data", "be a bool\") pulumi.set(__self__, \"dhcpd_boot_enabled\", dhcpd_boot_enabled) if dhcpd_boot_filename and not isinstance(dhcpd_boot_filename, str): raise", "str): raise TypeError(\"Expected argument 'ipv6_pd_prefixid' to be a str\") pulumi.set(__self__, \"ipv6_pd_prefixid\", ipv6_pd_prefixid) if", "not isinstance(dhcp_start, str): raise TypeError(\"Expected argument 'dhcp_start' to be a str\") pulumi.set(__self__, \"dhcp_start\",", "argument 'dhcpd_boot_server' to be a str\") pulumi.set(__self__, \"dhcpd_boot_server\", dhcpd_boot_server) if domain_name and not", "and not isinstance(dhcp_enabled, bool): raise TypeError(\"Expected argument 'dhcp_enabled' to be a bool\") pulumi.set(__self__,", "and not isinstance(ipv6_pd_interface, str): raise TypeError(\"Expected argument 'ipv6_pd_interface' to be a str\") pulumi.set(__self__,", "ipv6_ra_enable) if ipv6_static_subnet and not isinstance(ipv6_static_subnet, str): raise TypeError(\"Expected argument 'ipv6_static_subnet' to be", "str: \"\"\" The IPv4 netmask of the WAN. \"\"\" return pulumi.get(self, \"wan_netmask\") @property", "opts = pulumi.InvokeOptions() if opts.version is None: opts.version = _utilities.get_version() if opts.plugin_download_url is", "not isinstance(dhcp_lease, int): raise TypeError(\"Expected argument 'dhcp_lease' to be a int\") pulumi.set(__self__, \"dhcp_lease\",", "Optional[str] = None, name: Optional[str] = None, site: Optional[str] = None, opts: Optional[pulumi.InvokeOptions]", "\"igmp_snooping\", igmp_snooping) if ipv6_interface_type and not isinstance(ipv6_interface_type, str): raise TypeError(\"Expected argument 'ipv6_interface_type' to", "wan_username=None, x_wan_password=<PASSWORD>): if dhcp_dns and not isinstance(dhcp_dns, list): raise TypeError(\"Expected argument 'dhcp_dns' to", "group of the network. \"\"\" return pulumi.get(self, \"network_group\") @property @pulumi.getter def purpose(self) ->", "and not isinstance(dhcp_dns, list): raise TypeError(\"Expected argument 'dhcp_dns' to be a list\") pulumi.set(__self__,", "return pulumi.get(self, \"wan_egress_qos\") @property @pulumi.getter(name=\"wanGateway\") def wan_gateway(self) -> str: \"\"\" The IPv4 gateway", "raise TypeError(\"Expected argument 'dhcp_stop' to be a str\") pulumi.set(__self__, \"dhcp_stop\", dhcp_stop) if dhcpd_boot_enabled", "Specifies which type of IPv6 connection to use. \"\"\" return pulumi.get(self, \"ipv6_interface_type\") @property", "the network. :param str site: The name of the site to associate the", "@property @pulumi.getter(name=\"dhcpStart\") def dhcp_start(self) -> str: \"\"\" The IPv4 address where the DHCP", "of either `disabled`, `static`, `dhcp`, or `pppoe`. \"\"\" return pulumi.get(self, \"wan_type\") @property @pulumi.getter(name=\"wanUsername\")", "The subnet of the network (CIDR address). \"\"\" return pulumi.get(self, \"subnet\") @property @pulumi.getter(name=\"vlanId\")", "\"dhcp_start\") @property @pulumi.getter(name=\"dhcpStop\") def dhcp_stop(self) -> str: \"\"\" The IPv4 address where the", "str\") pulumi.set(__self__, \"ipv6_pd_prefixid\", ipv6_pd_prefixid) if ipv6_ra_enable and not isinstance(ipv6_ra_enable, bool): raise TypeError(\"Expected argument", "the network. \"\"\" return pulumi.get(self, \"network_group\") @property @pulumi.getter def purpose(self) -> str: \"\"\"", "Optional[pulumi.InvokeOptions] = None) -> AwaitableGetNetworkResult: \"\"\" `Network` data source can be used to", "isinstance(wan_netmask, str): raise TypeError(\"Expected argument 'wan_netmask' to be a str\") pulumi.set(__self__, \"wan_netmask\", wan_netmask)", "-> str: \"\"\" The name of the site to associate the network with.", "and not isinstance(wan_netmask, str): raise TypeError(\"Expected argument 'wan_netmask' to be a str\") pulumi.set(__self__,", "if igmp_snooping and not isinstance(igmp_snooping, bool): raise TypeError(\"Expected argument 'igmp_snooping' to be a", "@pulumi.getter def purpose(self) -> str: \"\"\" The purpose of the network. One of", "a str\") pulumi.set(__self__, \"wan_ip\", wan_ip) if wan_netmask and not isinstance(wan_netmask, str): raise TypeError(\"Expected", "if subnet and not isinstance(subnet, str): raise TypeError(\"Expected argument 'subnet' to be a", "gateway of the WAN. \"\"\" return pulumi.get(self, \"wan_gateway\") @property @pulumi.getter(name=\"wanIp\") def wan_ip(self) ->", "server to network boot from. \"\"\" return pulumi.get(self, \"dhcpd_boot_server\") @property @pulumi.getter(name=\"domainName\") def domain_name(self)", "TypeError(\"Expected argument 'ipv6_static_subnet' to be a str\") pulumi.set(__self__, \"ipv6_static_subnet\", ipv6_static_subnet) if name and", "\"\"\" return pulumi.get(self, \"wan_ip\") @property @pulumi.getter(name=\"wanNetmask\") def wan_netmask(self) -> str: \"\"\" The IPv4", "dhcpd_boot_filename and not isinstance(dhcpd_boot_filename, str): raise TypeError(\"Expected argument 'dhcpd_boot_filename' to be a str\")", "AwaitableGetNetworkResult( dhcp_dns=__ret__.dhcp_dns, dhcp_enabled=__ret__.dhcp_enabled, dhcp_lease=__ret__.dhcp_lease, dhcp_start=__ret__.dhcp_start, dhcp_stop=__ret__.dhcp_stop, dhcpd_boot_enabled=__ret__.dhcpd_boot_enabled, dhcpd_boot_filename=__ret__.dhcpd_boot_filename, dhcpd_boot_server=__ret__.dhcpd_boot_server, domain_name=__ret__.domain_name, id=__ret__.id, igmp_snooping=__ret__.igmp_snooping, ipv6_interface_type=__ret__.ipv6_interface_type,", "return pulumi.get(self, \"dhcp_dns\") @property @pulumi.getter(name=\"dhcpEnabled\") def dhcp_enabled(self) -> bool: \"\"\" whether DHCP is", "Optional, Sequence, Union, overload from . import _utilities __all__ = [ 'GetNetworkResult', 'AwaitableGetNetworkResult',", "@property @pulumi.getter def site(self) -> str: \"\"\" The name of the site to", "the network. :param str name: The name of the network. :param str site:", "be a str\") pulumi.set(__self__, \"network_group\", network_group) if purpose and not isinstance(purpose, str): raise", "pulumi.set(__self__, \"id\", id) if igmp_snooping and not isinstance(igmp_snooping, bool): raise TypeError(\"Expected argument 'igmp_snooping'", "wan_username=__ret__.wan_username, x_wan_password=__ret__.x_wan_password) @_utilities.lift_output_func(get_network) def get_network_output(id: Optional[pulumi.Input[Optional[str]]] = None, name: Optional[pulumi.Input[Optional[str]]] = None, site:", "\"\"\" Specifies which WAN interface is used for IPv6 Prefix Delegation. \"\"\" return", "ipv6_pd_interface=__ret__.ipv6_pd_interface, ipv6_pd_prefixid=__ret__.ipv6_pd_prefixid, ipv6_ra_enable=__ret__.ipv6_ra_enable, ipv6_static_subnet=__ret__.ipv6_static_subnet, name=__ret__.name, network_group=__ret__.network_group, purpose=__ret__.purpose, site=__ret__.site, subnet=__ret__.subnet, vlan_id=__ret__.vlan_id, wan_dns=__ret__.wan_dns, wan_egress_qos=__ret__.wan_egress_qos, wan_gateway=__ret__.wan_gateway,", "igmp_snooping(self) -> bool: \"\"\" Specifies whether IGMP snooping is enabled or not. \"\"\"", "Specifies the WAN egress quality of service. \"\"\" return pulumi.get(self, \"wan_egress_qos\") @property @pulumi.getter(name=\"wanGateway\")", "dhcp_enabled=None, dhcp_lease=None, dhcp_start=None, dhcp_stop=None, dhcpd_boot_enabled=None, dhcpd_boot_filename=None, dhcpd_boot_server=None, domain_name=None, id=None, igmp_snooping=None, ipv6_interface_type=None, ipv6_pd_interface=None, ipv6_pd_prefixid=None,", "`Network` data source can be used to retrieve settings for a network by", "The name of the network. :param str site: The name of the site", "subnet=self.subnet, vlan_id=self.vlan_id, wan_dns=self.wan_dns, wan_egress_qos=self.wan_egress_qos, wan_gateway=self.wan_gateway, wan_ip=self.wan_ip, wan_netmask=self.wan_netmask, wan_networkgroup=self.wan_networkgroup, wan_type=self.wan_type, wan_username=self.wan_username, x_wan_password=self.x_wan_password) def get_network(id:", "isinstance(ipv6_pd_interface, str): raise TypeError(\"Expected argument 'ipv6_pd_interface' to be a str\") pulumi.set(__self__, \"ipv6_pd_interface\", ipv6_pd_interface)", "DNS servers IPs of the WAN. \"\"\" return pulumi.get(self, \"wan_dns\") @property @pulumi.getter(name=\"wanEgressQos\") def", "raise TypeError(\"Expected argument 'name' to be a str\") pulumi.set(__self__, \"name\", name) if network_group", "return pulumi.get(self, \"wan_ip\") @property @pulumi.getter(name=\"wanNetmask\") def wan_netmask(self) -> str: \"\"\" The IPv4 netmask", "file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # ***", "def ipv6_ra_enable(self) -> bool: \"\"\" Specifies whether to enable router advertisements or not.", "the WAN. \"\"\" return pulumi.get(self, \"wan_dns\") @property @pulumi.getter(name=\"wanEgressQos\") def wan_egress_qos(self) -> int: \"\"\"", "\"\"\" return pulumi.get(self, \"ipv6_interface_type\") @property @pulumi.getter(name=\"ipv6PdInterface\") def ipv6_pd_interface(self) -> str: \"\"\" Specifies which", "purpose and not isinstance(purpose, str): raise TypeError(\"Expected argument 'purpose' to be a str\")", "not isinstance(id, str): raise TypeError(\"Expected argument 'id' to be a str\") pulumi.set(__self__, \"id\",", "dhcpd_boot_enabled(self) -> bool: \"\"\" Toggles on the DHCP boot options. will be set", "-> int: \"\"\" lease time for DHCP addresses. \"\"\" return pulumi.get(self, \"dhcp_lease\") @property", "true if you have dhcpd*boot*filename, and dhcpd*boot*server set. \"\"\" return pulumi.get(self, \"dhcpd_boot_enabled\") @property", "'dhcp_start' to be a str\") pulumi.set(__self__, \"dhcp_start\", dhcp_start) if dhcp_stop and not isinstance(dhcp_stop,", "def wan_username(self) -> str: \"\"\" Specifies the IPV4 WAN username. \"\"\" return pulumi.get(self,", "TypeError(\"Expected argument 'x_wan_password' to be a str\") pulumi.set(__self__, \"x_wan_password\", x_wan_password) @property @pulumi.getter(name=\"dhcpDns\") def", "IPV4 WAN password. \"\"\" return pulumi.get(self, \"x_wan_password\") class AwaitableGetNetworkResult(GetNetworkResult): # pylint: disable=using-constant-test def", "import Any, Mapping, Optional, Sequence, Union, overload from . import _utilities __all__ =", "if opts.plugin_download_url is None: opts.plugin_download_url = _utilities.get_plugin_download_url() __ret__ = pulumi.runtime.invoke('unifi:index/getNetwork:getNetwork', __args__, opts=opts, typ=GetNetworkResult).value", "pulumi.get(self, \"dhcp_lease\") @property @pulumi.getter(name=\"dhcpStart\") def dhcp_start(self) -> str: \"\"\" The IPv4 address where", "of the network (CIDR address). \"\"\" return pulumi.get(self, \"subnet\") @property @pulumi.getter(name=\"vlanId\") def vlan_id(self)", "wan_networkgroup(self) -> str: \"\"\" Specifies the WAN network group. One of either `WAN`,", "purpose=self.purpose, site=self.site, subnet=self.subnet, vlan_id=self.vlan_id, wan_dns=self.wan_dns, wan_egress_qos=self.wan_egress_qos, wan_gateway=self.wan_gateway, wan_ip=self.wan_ip, wan_netmask=self.wan_netmask, wan_networkgroup=self.wan_networkgroup, wan_type=self.wan_type, wan_username=self.wan_username, x_wan_password=self.x_wan_password)", "pulumi.set(__self__, \"ipv6_pd_interface\", ipv6_pd_interface) if ipv6_pd_prefixid and not isinstance(ipv6_pd_prefixid, str): raise TypeError(\"Expected argument 'ipv6_pd_prefixid'", "-> int: \"\"\" The VLAN ID of the network. \"\"\" return pulumi.get(self, \"vlan_id\")", "TypeError(\"Expected argument 'dhcp_start' to be a str\") pulumi.set(__self__, \"dhcp_start\", dhcp_start) if dhcp_stop and", ":param str id: The ID of the network. :param str name: The name", "be a str\") pulumi.set(__self__, \"x_wan_password\", x_wan_password) @property @pulumi.getter(name=\"dhcpDns\") def dhcp_dns(self) -> Sequence[str]: \"\"\"", "wan_netmask=self.wan_netmask, wan_networkgroup=self.wan_networkgroup, wan_type=self.wan_type, wan_username=self.wan_username, x_wan_password=self.x_wan_password) def get_network(id: Optional[str] = None, name: Optional[str] =", "a str\") pulumi.set(__self__, \"wan_username\", wan_username) if x_wan_password and not isinstance(x_wan_password, str): raise TypeError(\"Expected", "\"\"\" return pulumi.get(self, \"wan_type\") @property @pulumi.getter(name=\"wanUsername\") def wan_username(self) -> str: \"\"\" Specifies the", "and not isinstance(wan_ip, str): raise TypeError(\"Expected argument 'wan_ip' to be a str\") pulumi.set(__self__,", "\"\"\" A collection of values returned by getNetwork. \"\"\" def __init__(__self__, dhcp_dns=None, dhcp_enabled=None,", "@property @pulumi.getter(name=\"ipv6PdInterface\") def ipv6_pd_interface(self) -> str: \"\"\" Specifies which WAN interface is used", "dhcp_stop(self) -> str: \"\"\" The IPv4 address where the DHCP range of addresses", "pulumi.get(self, \"wan_gateway\") @property @pulumi.getter(name=\"wanIp\") def wan_ip(self) -> str: \"\"\" The IPv4 address of", "ipv6_interface_type=self.ipv6_interface_type, ipv6_pd_interface=self.ipv6_pd_interface, ipv6_pd_prefixid=self.ipv6_pd_prefixid, ipv6_ra_enable=self.ipv6_ra_enable, ipv6_static_subnet=self.ipv6_static_subnet, name=self.name, network_group=self.network_group, purpose=self.purpose, site=self.site, subnet=self.subnet, vlan_id=self.vlan_id, wan_dns=self.wan_dns, wan_egress_qos=self.wan_egress_qos,", "if dhcp_lease and not isinstance(dhcp_lease, int): raise TypeError(\"Expected argument 'dhcp_lease' to be a", "@pulumi.getter(name=\"ipv6StaticSubnet\") def ipv6_static_subnet(self) -> str: \"\"\" Specifies the static IPv6 subnet (when ipv6*interface*type", "wan_dns) if wan_egress_qos and not isinstance(wan_egress_qos, int): raise TypeError(\"Expected argument 'wan_egress_qos' to be", "\"domain_name\") @property @pulumi.getter def id(self) -> str: \"\"\" The ID of the network.", "of IPv6 connection to use. \"\"\" return pulumi.get(self, \"ipv6_interface_type\") @property @pulumi.getter(name=\"ipv6PdInterface\") def ipv6_pd_interface(self)", "server. \"\"\" return pulumi.get(self, \"dhcp_dns\") @property @pulumi.getter(name=\"dhcpEnabled\") def dhcp_enabled(self) -> bool: \"\"\" whether", "= unifi.get_network(id=my_device.network_id) ``` :param str id: The ID of the network. :param str", "'dhcp_lease' to be a int\") pulumi.set(__self__, \"dhcp_lease\", dhcp_lease) if dhcp_start and not isinstance(dhcp_start,", "raise TypeError(\"Expected argument 'wan_gateway' to be a str\") pulumi.set(__self__, \"wan_gateway\", wan_gateway) if wan_ip", "\"\"\" return pulumi.get(self, \"wan_dns\") @property @pulumi.getter(name=\"wanEgressQos\") def wan_egress_qos(self) -> int: \"\"\" Specifies the", "not isinstance(dhcpd_boot_filename, str): raise TypeError(\"Expected argument 'dhcpd_boot_filename' to be a str\") pulumi.set(__self__, \"dhcpd_boot_filename\",", "pulumi.set(__self__, \"ipv6_static_subnet\", ipv6_static_subnet) if name and not isinstance(name, str): raise TypeError(\"Expected argument 'name'", "str\") pulumi.set(__self__, \"dhcpd_boot_filename\", dhcpd_boot_filename) if dhcpd_boot_server and not isinstance(dhcpd_boot_server, str): raise TypeError(\"Expected argument", "and not isinstance(domain_name, str): raise TypeError(\"Expected argument 'domain_name' to be a str\") pulumi.set(__self__,", "bool\") pulumi.set(__self__, \"dhcpd_boot_enabled\", dhcpd_boot_enabled) if dhcpd_boot_filename and not isinstance(dhcpd_boot_filename, str): raise TypeError(\"Expected argument", "dhcpd_boot_filename=None, dhcpd_boot_server=None, domain_name=None, id=None, igmp_snooping=None, ipv6_interface_type=None, ipv6_pd_interface=None, ipv6_pd_prefixid=None, ipv6_ra_enable=None, ipv6_static_subnet=None, name=None, network_group=None, purpose=None,", "be used to retrieve settings for a network by name or ID. ##", "argument 'dhcp_dns' to be a list\") pulumi.set(__self__, \"dhcp_dns\", dhcp_dns) if dhcp_enabled and not", "raise TypeError(\"Expected argument 'domain_name' to be a str\") pulumi.set(__self__, \"domain_name\", domain_name) if id", "TypeError(\"Expected argument 'wan_type' to be a str\") pulumi.set(__self__, \"wan_type\", wan_type) if wan_username and", "argument 'dhcp_enabled' to be a bool\") pulumi.set(__self__, \"dhcp_enabled\", dhcp_enabled) if dhcp_lease and not", "TypeError(\"Expected argument 'network_group' to be a str\") pulumi.set(__self__, \"network_group\", network_group) if purpose and", "snooping is enabled or not. \"\"\" return pulumi.get(self, \"igmp_snooping\") @property @pulumi.getter(name=\"ipv6InterfaceType\") def ipv6_interface_type(self)", "a str\") pulumi.set(__self__, \"wan_type\", wan_type) if wan_username and not isinstance(wan_username, str): raise TypeError(\"Expected", "pulumi.set(__self__, \"dhcp_start\", dhcp_start) if dhcp_stop and not isinstance(dhcp_stop, str): raise TypeError(\"Expected argument 'dhcp_stop'", "The IPv4 gateway of the WAN. \"\"\" return pulumi.get(self, \"wan_gateway\") @property @pulumi.getter(name=\"wanIp\") def", "\"dhcpd_boot_filename\", dhcpd_boot_filename) if dhcpd_boot_server and not isinstance(dhcpd_boot_server, str): raise TypeError(\"Expected argument 'dhcpd_boot_server' to", "pulumi.set(__self__, \"wan_ip\", wan_ip) if wan_netmask and not isinstance(wan_netmask, str): raise TypeError(\"Expected argument 'wan_netmask'", "isinstance(dhcp_lease, int): raise TypeError(\"Expected argument 'dhcp_lease' to be a int\") pulumi.set(__self__, \"dhcp_lease\", dhcp_lease)", "def vlan_id(self) -> int: \"\"\" The VLAN ID of the network. \"\"\" return", "IPV4 WAN username. \"\"\" return pulumi.get(self, \"wan_username\") @property @pulumi.getter(name=\"xWanPassword\") def x_wan_password(self) -> str:", "\"\"\" return pulumi.get(self, \"wan_username\") @property @pulumi.getter(name=\"xWanPassword\") def x_wan_password(self) -> str: \"\"\" Specifies the", "int\") pulumi.set(__self__, \"dhcp_lease\", dhcp_lease) if dhcp_start and not isinstance(dhcp_start, str): raise TypeError(\"Expected argument", "dhcp_start(self) -> str: \"\"\" The IPv4 address where the DHCP range of addresses", "\"\"\" The IPv4 address where the DHCP range of addresses stops. \"\"\" return", "__ret__ = pulumi.runtime.invoke('unifi:index/getNetwork:getNetwork', __args__, opts=opts, typ=GetNetworkResult).value return AwaitableGetNetworkResult( dhcp_dns=__ret__.dhcp_dns, dhcp_enabled=__ret__.dhcp_enabled, dhcp_lease=__ret__.dhcp_lease, dhcp_start=__ret__.dhcp_start, dhcp_stop=__ret__.dhcp_stop,", "a str\") pulumi.set(__self__, \"ipv6_interface_type\", ipv6_interface_type) if ipv6_pd_interface and not isinstance(ipv6_pd_interface, str): raise TypeError(\"Expected", "return pulumi.get(self, \"name\") @property @pulumi.getter(name=\"networkGroup\") def network_group(self) -> str: \"\"\" The group of", "str): raise TypeError(\"Expected argument 'dhcp_stop' to be a str\") pulumi.set(__self__, \"dhcp_stop\", dhcp_stop) if", "dhcp_stop) if dhcpd_boot_enabled and not isinstance(dhcpd_boot_enabled, bool): raise TypeError(\"Expected argument 'dhcpd_boot_enabled' to be", "pulumi.set(__self__, \"wan_egress_qos\", wan_egress_qos) if wan_gateway and not isinstance(wan_gateway, str): raise TypeError(\"Expected argument 'wan_gateway'", "@property @pulumi.getter(name=\"xWanPassword\") def x_wan_password(self) -> str: \"\"\" Specifies the IPV4 WAN password. \"\"\"", "pulumi.get(self, \"wan_netmask\") @property @pulumi.getter(name=\"wanNetworkgroup\") def wan_networkgroup(self) -> str: \"\"\" Specifies the WAN network", "to be a str\") pulumi.set(__self__, \"x_wan_password\", x_wan_password) @property @pulumi.getter(name=\"dhcpDns\") def dhcp_dns(self) -> Sequence[str]:", "str\") pulumi.set(__self__, \"wan_ip\", wan_ip) if wan_netmask and not isinstance(wan_netmask, str): raise TypeError(\"Expected argument", "for DHCP addresses. \"\"\" return pulumi.get(self, \"dhcp_lease\") @property @pulumi.getter(name=\"dhcpStart\") def dhcp_start(self) -> str:", "class GetNetworkResult: \"\"\" A collection of values returned by getNetwork. \"\"\" def __init__(__self__,", "TypeError(\"Expected argument 'vlan_id' to be a int\") pulumi.set(__self__, \"vlan_id\", vlan_id) if wan_dns and", "\"site\", site) if subnet and not isinstance(subnet, str): raise TypeError(\"Expected argument 'subnet' to", "argument 'dhcpd_boot_filename' to be a str\") pulumi.set(__self__, \"dhcpd_boot_filename\", dhcpd_boot_filename) if dhcpd_boot_server and not", "to be a str\") pulumi.set(__self__, \"id\", id) if igmp_snooping and not isinstance(igmp_snooping, bool):", "@pulumi.getter(name=\"wanDns\") def wan_dns(self) -> Sequence[str]: \"\"\" DNS servers IPs of the WAN. \"\"\"", "a str\") pulumi.set(__self__, \"x_wan_password\", x_wan_password) @property @pulumi.getter(name=\"dhcpDns\") def dhcp_dns(self) -> Sequence[str]: \"\"\" IPv4", "from on the dhcpd*boot*server. \"\"\" return pulumi.get(self, \"dhcpd_boot_filename\") @property @pulumi.getter(name=\"dhcpdBootServer\") def dhcpd_boot_server(self) ->", "\"ipv6_pd_interface\") @property @pulumi.getter(name=\"ipv6PdPrefixid\") def ipv6_pd_prefixid(self) -> str: \"\"\" Specifies the IPv6 Prefix ID.", "pulumi.set(__self__, \"purpose\", purpose) if site and not isinstance(site, str): raise TypeError(\"Expected argument 'site'", "be a str\") pulumi.set(__self__, \"ipv6_interface_type\", ipv6_interface_type) if ipv6_pd_interface and not isinstance(ipv6_pd_interface, str): raise", "\"\"\" lease time for DHCP addresses. \"\"\" return pulumi.get(self, \"dhcp_lease\") @property @pulumi.getter(name=\"dhcpStart\") def", "you're certain you know what you are doing! *** import warnings import pulumi", "\"dhcpd_boot_server\") @property @pulumi.getter(name=\"domainName\") def domain_name(self) -> str: \"\"\" The domain name of this", "be a str\") pulumi.set(__self__, \"wan_ip\", wan_ip) if wan_netmask and not isinstance(wan_netmask, str): raise", "name(self) -> str: \"\"\" The name of the network. \"\"\" return pulumi.get(self, \"name\")", "@pulumi.getter def name(self) -> str: \"\"\" The name of the network. \"\"\" return", "is enabled or not. \"\"\" return pulumi.get(self, \"igmp_snooping\") @property @pulumi.getter(name=\"ipv6InterfaceType\") def ipv6_interface_type(self) ->", "and not isinstance(id, str): raise TypeError(\"Expected argument 'id' to be a str\") pulumi.set(__self__,", "str: \"\"\" Specifies the IPV4 WAN username. \"\"\" return pulumi.get(self, \"wan_username\") @property @pulumi.getter(name=\"xWanPassword\")", "def igmp_snooping(self) -> bool: \"\"\" Specifies whether IGMP snooping is enabled or not.", "The name of the site to associate the network with. \"\"\" return pulumi.get(self,", "dhcp_stop=None, dhcpd_boot_enabled=None, dhcpd_boot_filename=None, dhcpd_boot_server=None, domain_name=None, id=None, igmp_snooping=None, ipv6_interface_type=None, ipv6_pd_interface=None, ipv6_pd_prefixid=None, ipv6_ra_enable=None, ipv6_static_subnet=None, name=None,", "-> str: \"\"\" Specifies the static IPv6 subnet (when ipv6*interface*type is 'static'). \"\"\"", "to be a bool\") pulumi.set(__self__, \"igmp_snooping\", igmp_snooping) if ipv6_interface_type and not isinstance(ipv6_interface_type, str):", "a int\") pulumi.set(__self__, \"wan_egress_qos\", wan_egress_qos) if wan_gateway and not isinstance(wan_gateway, str): raise TypeError(\"Expected", "-> str: \"\"\" The IPv4 address of the WAN. \"\"\" return pulumi.get(self, \"wan_ip\")", "str\") pulumi.set(__self__, \"dhcp_stop\", dhcp_stop) if dhcpd_boot_enabled and not isinstance(dhcpd_boot_enabled, bool): raise TypeError(\"Expected argument", "Sequence, Union, overload from . import _utilities __all__ = [ 'GetNetworkResult', 'AwaitableGetNetworkResult', 'get_network',", "\"\"\" return pulumi.get(self, \"ipv6_ra_enable\") @property @pulumi.getter(name=\"ipv6StaticSubnet\") def ipv6_static_subnet(self) -> str: \"\"\" Specifies the", "WAN interface is used for IPv6 Prefix Delegation. \"\"\" return pulumi.get(self, \"ipv6_pd_interface\") @property", "wan_dns=__ret__.wan_dns, wan_egress_qos=__ret__.wan_egress_qos, wan_gateway=__ret__.wan_gateway, wan_ip=__ret__.wan_ip, wan_netmask=__ret__.wan_netmask, wan_networkgroup=__ret__.wan_networkgroup, wan_type=__ret__.wan_type, wan_username=__ret__.wan_username, x_wan_password=__ret__.x_wan_password) @_utilities.lift_output_func(get_network) def get_network_output(id: Optional[pulumi.Input[Optional[str]]]", "WAN username. \"\"\" return pulumi.get(self, \"wan_username\") @property @pulumi.getter(name=\"xWanPassword\") def x_wan_password(self) -> str: \"\"\"", "ipv6_ra_enable=self.ipv6_ra_enable, ipv6_static_subnet=self.ipv6_static_subnet, name=self.name, network_group=self.network_group, purpose=self.purpose, site=self.site, subnet=self.subnet, vlan_id=self.vlan_id, wan_dns=self.wan_dns, wan_egress_qos=self.wan_egress_qos, wan_gateway=self.wan_gateway, wan_ip=self.wan_ip, wan_netmask=self.wan_netmask,", "the WAN. \"\"\" return pulumi.get(self, \"wan_gateway\") @property @pulumi.getter(name=\"wanIp\") def wan_ip(self) -> str: \"\"\"", "int: \"\"\" Specifies the WAN egress quality of service. \"\"\" return pulumi.get(self, \"wan_egress_qos\")", "of the network. \"\"\" return pulumi.get(self, \"vlan_id\") @property @pulumi.getter(name=\"wanDns\") def wan_dns(self) -> Sequence[str]:", "-> pulumi.Output[GetNetworkResult]: \"\"\" `Network` data source can be used to retrieve settings for", "or not. \"\"\" return pulumi.get(self, \"igmp_snooping\") @property @pulumi.getter(name=\"ipv6InterfaceType\") def ipv6_interface_type(self) -> str: \"\"\"", "str: \"\"\" Specifies the WAN network group. One of either `WAN`, `WAN2` or", "return pulumi.get(self, \"vlan_id\") @property @pulumi.getter(name=\"wanDns\") def wan_dns(self) -> Sequence[str]: \"\"\" DNS servers IPs", "domain_name=self.domain_name, id=self.id, igmp_snooping=self.igmp_snooping, ipv6_interface_type=self.ipv6_interface_type, ipv6_pd_interface=self.ipv6_pd_interface, ipv6_pd_prefixid=self.ipv6_pd_prefixid, ipv6_ra_enable=self.ipv6_ra_enable, ipv6_static_subnet=self.ipv6_static_subnet, name=self.name, network_group=self.network_group, purpose=self.purpose, site=self.site, subnet=self.subnet,", "be a int\") pulumi.set(__self__, \"dhcp_lease\", dhcp_lease) if dhcp_start and not isinstance(dhcp_start, str): raise", "The domain name of this network. \"\"\" return pulumi.get(self, \"domain_name\") @property @pulumi.getter def", "is 'static'). \"\"\" return pulumi.get(self, \"ipv6_static_subnet\") @property @pulumi.getter def name(self) -> str: \"\"\"", "__args__ = dict() __args__['id'] = id __args__['name'] = name __args__['site'] = site if", "IGMP snooping is enabled or not. \"\"\" return pulumi.get(self, \"igmp_snooping\") @property @pulumi.getter(name=\"ipv6InterfaceType\") def", "dhcp_dns=None, dhcp_enabled=None, dhcp_lease=None, dhcp_start=None, dhcp_stop=None, dhcpd_boot_enabled=None, dhcpd_boot_filename=None, dhcpd_boot_server=None, domain_name=None, id=None, igmp_snooping=None, ipv6_interface_type=None, ipv6_pd_interface=None,", "network_group and not isinstance(network_group, str): raise TypeError(\"Expected argument 'network_group' to be a str\")", "to enable router advertisements or not. \"\"\" return pulumi.get(self, \"ipv6_ra_enable\") @property @pulumi.getter(name=\"ipv6StaticSubnet\") def", "from typing import Any, Mapping, Optional, Sequence, Union, overload from . import _utilities", "\"ipv6_static_subnet\", ipv6_static_subnet) if name and not isinstance(name, str): raise TypeError(\"Expected argument 'name' to", "WAN connection type. One of either `disabled`, `static`, `dhcp`, or `pppoe`. \"\"\" return", "pulumi.set(__self__, \"ipv6_interface_type\", ipv6_interface_type) if ipv6_pd_interface and not isinstance(ipv6_pd_interface, str): raise TypeError(\"Expected argument 'ipv6_pd_interface'", "@property @pulumi.getter(name=\"dhcpStop\") def dhcp_stop(self) -> str: \"\"\" The IPv4 address where the DHCP", "TypeError(\"Expected argument 'igmp_snooping' to be a bool\") pulumi.set(__self__, \"igmp_snooping\", igmp_snooping) if ipv6_interface_type and", "TypeError(\"Expected argument 'wan_ip' to be a str\") pulumi.set(__self__, \"wan_ip\", wan_ip) if wan_netmask and", "*** Do not edit by hand unless you're certain you know what you", "\"dhcpd_boot_enabled\", dhcpd_boot_enabled) if dhcpd_boot_filename and not isinstance(dhcpd_boot_filename, str): raise TypeError(\"Expected argument 'dhcpd_boot_filename' to", "source can be used to retrieve settings for a network by name or", "`corporate`, `guest`, `wan`, or `vlan-only`. \"\"\" return pulumi.get(self, \"purpose\") @property @pulumi.getter def site(self)", "dhcp_start) if dhcp_stop and not isinstance(dhcp_stop, str): raise TypeError(\"Expected argument 'dhcp_stop' to be", "if dhcp_stop and not isinstance(dhcp_stop, str): raise TypeError(\"Expected argument 'dhcp_stop' to be a", "DHCP boot options. will be set to true if you have dhcpd*boot*filename, and", "subnet(self) -> str: \"\"\" The subnet of the network (CIDR address). \"\"\" return", "boot from. \"\"\" return pulumi.get(self, \"dhcpd_boot_server\") @property @pulumi.getter(name=\"domainName\") def domain_name(self) -> str: \"\"\"", "@pulumi.getter(name=\"dhcpStart\") def dhcp_start(self) -> str: \"\"\" The IPv4 address where the DHCP range", "DHCP is enabled or not on this network. \"\"\" return pulumi.get(self, \"dhcp_enabled\") @property", "TFTP server to network boot from. \"\"\" return pulumi.get(self, \"dhcpd_boot_server\") @property @pulumi.getter(name=\"domainName\") def", "if False: yield self return GetNetworkResult( dhcp_dns=self.dhcp_dns, dhcp_enabled=self.dhcp_enabled, dhcp_lease=self.dhcp_lease, dhcp_start=self.dhcp_start, dhcp_stop=self.dhcp_stop, dhcpd_boot_enabled=self.dhcpd_boot_enabled, dhcpd_boot_filename=self.dhcpd_boot_filename,", "-> str: \"\"\" The IPv4 address where the DHCP range of addresses stops.", "'ipv6_pd_interface' to be a str\") pulumi.set(__self__, \"ipv6_pd_interface\", ipv6_pd_interface) if ipv6_pd_prefixid and not isinstance(ipv6_pd_prefixid,", "site if opts is None: opts = pulumi.InvokeOptions() if opts.version is None: opts.version", "int\") pulumi.set(__self__, \"wan_egress_qos\", wan_egress_qos) if wan_gateway and not isinstance(wan_gateway, str): raise TypeError(\"Expected argument", "or `WAN_LTE_FAILOVER`. \"\"\" return pulumi.get(self, \"wan_networkgroup\") @property @pulumi.getter(name=\"wanType\") def wan_type(self) -> str: \"\"\"", "id) if igmp_snooping and not isinstance(igmp_snooping, bool): raise TypeError(\"Expected argument 'igmp_snooping' to be", "WAN egress quality of service. \"\"\" return pulumi.get(self, \"wan_egress_qos\") @property @pulumi.getter(name=\"wanGateway\") def wan_gateway(self)", "if you have dhcpd*boot*filename, and dhcpd*boot*server set. \"\"\" return pulumi.get(self, \"dhcpd_boot_enabled\") @property @pulumi.getter(name=\"dhcpdBootFilename\")", "subnet=__ret__.subnet, vlan_id=__ret__.vlan_id, wan_dns=__ret__.wan_dns, wan_egress_qos=__ret__.wan_egress_qos, wan_gateway=__ret__.wan_gateway, wan_ip=__ret__.wan_ip, wan_netmask=__ret__.wan_netmask, wan_networkgroup=__ret__.wan_networkgroup, wan_type=__ret__.wan_type, wan_username=__ret__.wan_username, x_wan_password=__ret__.x_wan_password) @_utilities.lift_output_func(get_network) def", "-> str: \"\"\" The subnet of the network (CIDR address). \"\"\" return pulumi.get(self,", "class AwaitableGetNetworkResult(GetNetworkResult): # pylint: disable=using-constant-test def __await__(self): if False: yield self return GetNetworkResult(", "not isinstance(wan_username, str): raise TypeError(\"Expected argument 'wan_username' to be a str\") pulumi.set(__self__, \"wan_username\",", "to be a str\") pulumi.set(__self__, \"subnet\", subnet) if vlan_id and not isinstance(vlan_id, int):", "@_utilities.lift_output_func(get_network) def get_network_output(id: Optional[pulumi.Input[Optional[str]]] = None, name: Optional[pulumi.Input[Optional[str]]] = None, site: Optional[pulumi.Input[Optional[str]]] =", "network. \"\"\" return pulumi.get(self, \"network_group\") @property @pulumi.getter def purpose(self) -> str: \"\"\" The", "wan_networkgroup=None, wan_type=None, wan_username=None, x_wan_password=<PASSWORD>): if dhcp_dns and not isinstance(dhcp_dns, list): raise TypeError(\"Expected argument", "def wan_ip(self) -> str: \"\"\" The IPv4 address of the WAN. \"\"\" return", "-> bool: \"\"\" Toggles on the DHCP boot options. will be set to", "if dhcp_dns and not isinstance(dhcp_dns, list): raise TypeError(\"Expected argument 'dhcp_dns' to be a", "return pulumi.get(self, \"wan_username\") @property @pulumi.getter(name=\"xWanPassword\") def x_wan_password(self) -> str: \"\"\" Specifies the IPV4", "and not isinstance(site, str): raise TypeError(\"Expected argument 'site' to be a str\") pulumi.set(__self__,", "pulumi.set(__self__, \"dhcpd_boot_enabled\", dhcpd_boot_enabled) if dhcpd_boot_filename and not isinstance(dhcpd_boot_filename, str): raise TypeError(\"Expected argument 'dhcpd_boot_filename'", "the DHCP range of addresses starts. \"\"\" return pulumi.get(self, \"dhcp_start\") @property @pulumi.getter(name=\"dhcpStop\") def", "be a str\") pulumi.set(__self__, \"ipv6_pd_prefixid\", ipv6_pd_prefixid) if ipv6_ra_enable and not isinstance(ipv6_ra_enable, bool): raise", "and not isinstance(network_group, str): raise TypeError(\"Expected argument 'network_group' to be a str\") pulumi.set(__self__,", "site(self) -> str: \"\"\" The name of the site to associate the network", "name: Optional[pulumi.Input[Optional[str]]] = None, site: Optional[pulumi.Input[Optional[str]]] = None, opts: Optional[pulumi.InvokeOptions] = None) ->", "str: \"\"\" Specifies the IPV4 WAN connection type. One of either `disabled`, `static`,", "and not isinstance(name, str): raise TypeError(\"Expected argument 'name' to be a str\") pulumi.set(__self__,", "raise TypeError(\"Expected argument 'wan_networkgroup' to be a str\") pulumi.set(__self__, \"wan_networkgroup\", wan_networkgroup) if wan_type", "A collection of values returned by getNetwork. \"\"\" def __init__(__self__, dhcp_dns=None, dhcp_enabled=None, dhcp_lease=None,", "the WAN. \"\"\" return pulumi.get(self, \"wan_ip\") @property @pulumi.getter(name=\"wanNetmask\") def wan_netmask(self) -> str: \"\"\"", "str\") pulumi.set(__self__, \"ipv6_static_subnet\", ipv6_static_subnet) if name and not isinstance(name, str): raise TypeError(\"Expected argument", "The ID of the network. \"\"\" return pulumi.get(self, \"id\") @property @pulumi.getter(name=\"igmpSnooping\") def igmp_snooping(self)", "def dhcp_start(self) -> str: \"\"\" The IPv4 address where the DHCP range of", "raise TypeError(\"Expected argument 'id' to be a str\") pulumi.set(__self__, \"id\", id) if igmp_snooping", "@pulumi.getter def site(self) -> str: \"\"\" The name of the site to associate", "dhcp_dns and not isinstance(dhcp_dns, list): raise TypeError(\"Expected argument 'dhcp_dns' to be a list\")", "str name: The name of the network. :param str site: The name of", "argument 'ipv6_pd_prefixid' to be a str\") pulumi.set(__self__, \"ipv6_pd_prefixid\", ipv6_pd_prefixid) if ipv6_ra_enable and not", "name and not isinstance(name, str): raise TypeError(\"Expected argument 'name' to be a str\")", "with. \"\"\" __args__ = dict() __args__['id'] = id __args__['name'] = name __args__['site'] =", "None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetNetworkResult: \"\"\" `Network` data source can be", "list): raise TypeError(\"Expected argument 'wan_dns' to be a list\") pulumi.set(__self__, \"wan_dns\", wan_dns) if", "to be a str\") pulumi.set(__self__, \"name\", name) if network_group and not isinstance(network_group, str):", "```python import pulumi import pulumi_unifi as unifi lan_network = unifi.get_network(name=\"LAN\") my_device = unifi.get_user(mac=\"01:23:45:67:89:ab\")", "ipv6_interface_type(self) -> str: \"\"\" Specifies which type of IPv6 connection to use. \"\"\"", "def ipv6_pd_prefixid(self) -> str: \"\"\" Specifies the IPv6 Prefix ID. \"\"\" return pulumi.get(self,", "WAN password. \"\"\" return pulumi.get(self, \"x_wan_password\") class AwaitableGetNetworkResult(GetNetworkResult): # pylint: disable=using-constant-test def __await__(self):", "if opts is None: opts = pulumi.InvokeOptions() if opts.version is None: opts.version =", "isinstance(domain_name, str): raise TypeError(\"Expected argument 'domain_name' to be a str\") pulumi.set(__self__, \"domain_name\", domain_name)", "def dhcp_dns(self) -> Sequence[str]: \"\"\" IPv4 addresses for the DNS server to be", "\"ipv6_pd_prefixid\") @property @pulumi.getter(name=\"ipv6RaEnable\") def ipv6_ra_enable(self) -> bool: \"\"\" Specifies whether to enable router", "by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit", "raise TypeError(\"Expected argument 'igmp_snooping' to be a bool\") pulumi.set(__self__, \"igmp_snooping\", igmp_snooping) if ipv6_interface_type", "argument 'wan_type' to be a str\") pulumi.set(__self__, \"wan_type\", wan_type) if wan_username and not", "pulumi.get(self, \"id\") @property @pulumi.getter(name=\"igmpSnooping\") def igmp_snooping(self) -> bool: \"\"\" Specifies whether IGMP snooping", "return AwaitableGetNetworkResult( dhcp_dns=__ret__.dhcp_dns, dhcp_enabled=__ret__.dhcp_enabled, dhcp_lease=__ret__.dhcp_lease, dhcp_start=__ret__.dhcp_start, dhcp_stop=__ret__.dhcp_stop, dhcpd_boot_enabled=__ret__.dhcpd_boot_enabled, dhcpd_boot_filename=__ret__.dhcpd_boot_filename, dhcpd_boot_server=__ret__.dhcpd_boot_server, domain_name=__ret__.domain_name, id=__ret__.id, igmp_snooping=__ret__.igmp_snooping,", "pulumi.runtime.invoke('unifi:index/getNetwork:getNetwork', __args__, opts=opts, typ=GetNetworkResult).value return AwaitableGetNetworkResult( dhcp_dns=__ret__.dhcp_dns, dhcp_enabled=__ret__.dhcp_enabled, dhcp_lease=__ret__.dhcp_lease, dhcp_start=__ret__.dhcp_start, dhcp_stop=__ret__.dhcp_stop, dhcpd_boot_enabled=__ret__.dhcpd_boot_enabled, dhcpd_boot_filename=__ret__.dhcpd_boot_filename,", "_utilities __all__ = [ 'GetNetworkResult', 'AwaitableGetNetworkResult', 'get_network', 'get_network_output', ] @pulumi.output_type class GetNetworkResult: \"\"\"", "\"name\") @property @pulumi.getter(name=\"networkGroup\") def network_group(self) -> str: \"\"\" The group of the network.", "\"\"\" return pulumi.get(self, \"dhcp_stop\") @property @pulumi.getter(name=\"dhcpdBootEnabled\") def dhcpd_boot_enabled(self) -> bool: \"\"\" Toggles on", "network (CIDR address). \"\"\" return pulumi.get(self, \"subnet\") @property @pulumi.getter(name=\"vlanId\") def vlan_id(self) -> int:", "\"\"\" Specifies the IPV4 WAN username. \"\"\" return pulumi.get(self, \"wan_username\") @property @pulumi.getter(name=\"xWanPassword\") def", "isinstance(x_wan_password, str): raise TypeError(\"Expected argument 'x_wan_password' to be a str\") pulumi.set(__self__, \"x_wan_password\", x_wan_password)", "on the DHCP boot options. will be set to true if you have", "The IPv4 address of the WAN. \"\"\" return pulumi.get(self, \"wan_ip\") @property @pulumi.getter(name=\"wanNetmask\") def", "Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand", "__args__['name'] = name __args__['site'] = site if opts is None: opts = pulumi.InvokeOptions()", "ipv6_interface_type and not isinstance(ipv6_interface_type, str): raise TypeError(\"Expected argument 'ipv6_interface_type' to be a str\")", "IPv4 gateway of the WAN. \"\"\" return pulumi.get(self, \"wan_gateway\") @property @pulumi.getter(name=\"wanIp\") def wan_ip(self)", "pulumi import pulumi_unifi as unifi lan_network = unifi.get_network(name=\"LAN\") my_device = unifi.get_user(mac=\"01:23:45:67:89:ab\") my_network =", "dhcpd_boot_server=None, domain_name=None, id=None, igmp_snooping=None, ipv6_interface_type=None, ipv6_pd_interface=None, ipv6_pd_prefixid=None, ipv6_ra_enable=None, ipv6_static_subnet=None, name=None, network_group=None, purpose=None, site=None,", "if wan_username and not isinstance(wan_username, str): raise TypeError(\"Expected argument 'wan_username' to be a", "# coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform", "isinstance(dhcp_start, str): raise TypeError(\"Expected argument 'dhcp_start' to be a str\") pulumi.set(__self__, \"dhcp_start\", dhcp_start)", "if purpose and not isinstance(purpose, str): raise TypeError(\"Expected argument 'purpose' to be a", "str\") pulumi.set(__self__, \"wan_username\", wan_username) if x_wan_password and not isinstance(x_wan_password, str): raise TypeError(\"Expected argument", "@property @pulumi.getter(name=\"dhcpdBootFilename\") def dhcpd_boot_filename(self) -> str: \"\"\" the file to PXE boot from", "-> AwaitableGetNetworkResult: \"\"\" `Network` data source can be used to retrieve settings for", "vlan_id=__ret__.vlan_id, wan_dns=__ret__.wan_dns, wan_egress_qos=__ret__.wan_egress_qos, wan_gateway=__ret__.wan_gateway, wan_ip=__ret__.wan_ip, wan_netmask=__ret__.wan_netmask, wan_networkgroup=__ret__.wan_networkgroup, wan_type=__ret__.wan_type, wan_username=__ret__.wan_username, x_wan_password=__ret__.x_wan_password) @_utilities.lift_output_func(get_network) def get_network_output(id:", "be a str\") pulumi.set(__self__, \"ipv6_static_subnet\", ipv6_static_subnet) if name and not isinstance(name, str): raise", "id __args__['name'] = name __args__['site'] = site if opts is None: opts =", "None, name: Optional[str] = None, site: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None)", "Union, overload from . import _utilities __all__ = [ 'GetNetworkResult', 'AwaitableGetNetworkResult', 'get_network', 'get_network_output',", "be a str\") pulumi.set(__self__, \"ipv6_pd_interface\", ipv6_pd_interface) if ipv6_pd_prefixid and not isinstance(ipv6_pd_prefixid, str): raise", "the network with. \"\"\" __args__ = dict() __args__['id'] = id __args__['name'] = name", "__args__['site'] = site if opts is None: opts = pulumi.InvokeOptions() if opts.version is", "'wan_dns' to be a list\") pulumi.set(__self__, \"wan_dns\", wan_dns) if wan_egress_qos and not isinstance(wan_egress_qos,", "@pulumi.getter(name=\"ipv6RaEnable\") def ipv6_ra_enable(self) -> bool: \"\"\" Specifies whether to enable router advertisements or", "a str\") pulumi.set(__self__, \"dhcp_start\", dhcp_start) if dhcp_stop and not isinstance(dhcp_stop, str): raise TypeError(\"Expected", "pulumi.get(self, \"x_wan_password\") class AwaitableGetNetworkResult(GetNetworkResult): # pylint: disable=using-constant-test def __await__(self): if False: yield self", "ipv6_ra_enable(self) -> bool: \"\"\" Specifies whether to enable router advertisements or not. \"\"\"", "import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from .", "'ipv6_interface_type' to be a str\") pulumi.set(__self__, \"ipv6_interface_type\", ipv6_interface_type) if ipv6_pd_interface and not isinstance(ipv6_pd_interface,", "of addresses stops. \"\"\" return pulumi.get(self, \"dhcp_stop\") @property @pulumi.getter(name=\"dhcpdBootEnabled\") def dhcpd_boot_enabled(self) -> bool:", "str\") pulumi.set(__self__, \"wan_gateway\", wan_gateway) if wan_ip and not isinstance(wan_ip, str): raise TypeError(\"Expected argument", "\"\"\" The VLAN ID of the network. \"\"\" return pulumi.get(self, \"vlan_id\") @property @pulumi.getter(name=\"wanDns\")", "of values returned by getNetwork. \"\"\" def __init__(__self__, dhcp_dns=None, dhcp_enabled=None, dhcp_lease=None, dhcp_start=None, dhcp_stop=None,", "ipv6_static_subnet) if name and not isinstance(name, str): raise TypeError(\"Expected argument 'name' to be", "Toggles on the DHCP boot options. will be set to true if you", "wan_gateway=None, wan_ip=None, wan_netmask=None, wan_networkgroup=None, wan_type=None, wan_username=None, x_wan_password=<PASSWORD>): if dhcp_dns and not isinstance(dhcp_dns, list):", "dhcp_dns=__ret__.dhcp_dns, dhcp_enabled=__ret__.dhcp_enabled, dhcp_lease=__ret__.dhcp_lease, dhcp_start=__ret__.dhcp_start, dhcp_stop=__ret__.dhcp_stop, dhcpd_boot_enabled=__ret__.dhcpd_boot_enabled, dhcpd_boot_filename=__ret__.dhcpd_boot_filename, dhcpd_boot_server=__ret__.dhcpd_boot_server, domain_name=__ret__.domain_name, id=__ret__.id, igmp_snooping=__ret__.igmp_snooping, ipv6_interface_type=__ret__.ipv6_interface_type, ipv6_pd_interface=__ret__.ipv6_pd_interface,", "wan_networkgroup and not isinstance(wan_networkgroup, str): raise TypeError(\"Expected argument 'wan_networkgroup' to be a str\")", "of the WAN. \"\"\" return pulumi.get(self, \"wan_dns\") @property @pulumi.getter(name=\"wanEgressQos\") def wan_egress_qos(self) -> int:", "network. \"\"\" return pulumi.get(self, \"domain_name\") @property @pulumi.getter def id(self) -> str: \"\"\" The", "str: \"\"\" The IPv4 address where the DHCP range of addresses starts. \"\"\"", "for IPv6 Prefix Delegation. \"\"\" return pulumi.get(self, \"ipv6_pd_interface\") @property @pulumi.getter(name=\"ipv6PdPrefixid\") def ipv6_pd_prefixid(self) ->", "dhcpd_boot_enabled=None, dhcpd_boot_filename=None, dhcpd_boot_server=None, domain_name=None, id=None, igmp_snooping=None, ipv6_interface_type=None, ipv6_pd_interface=None, ipv6_pd_prefixid=None, ipv6_ra_enable=None, ipv6_static_subnet=None, name=None, network_group=None,", "-> str: \"\"\" The IPv4 gateway of the WAN. \"\"\" return pulumi.get(self, \"wan_gateway\")", "and not isinstance(vlan_id, int): raise TypeError(\"Expected argument 'vlan_id' to be a int\") pulumi.set(__self__,", "be a list\") pulumi.set(__self__, \"wan_dns\", wan_dns) if wan_egress_qos and not isinstance(wan_egress_qos, int): raise", "name: The name of the network. :param str site: The name of the", "be a bool\") pulumi.set(__self__, \"ipv6_ra_enable\", ipv6_ra_enable) if ipv6_static_subnet and not isinstance(ipv6_static_subnet, str): raise", "def wan_dns(self) -> Sequence[str]: \"\"\" DNS servers IPs of the WAN. \"\"\" return", "wan_ip=None, wan_netmask=None, wan_networkgroup=None, wan_type=None, wan_username=None, x_wan_password=<PASSWORD>): if dhcp_dns and not isinstance(dhcp_dns, list): raise", "to be a str\") pulumi.set(__self__, \"network_group\", network_group) if purpose and not isinstance(purpose, str):", "raise TypeError(\"Expected argument 'ipv6_ra_enable' to be a bool\") pulumi.set(__self__, \"ipv6_ra_enable\", ipv6_ra_enable) if ipv6_static_subnet", "dhcp_stop=self.dhcp_stop, dhcpd_boot_enabled=self.dhcpd_boot_enabled, dhcpd_boot_filename=self.dhcpd_boot_filename, dhcpd_boot_server=self.dhcpd_boot_server, domain_name=self.domain_name, id=self.id, igmp_snooping=self.igmp_snooping, ipv6_interface_type=self.ipv6_interface_type, ipv6_pd_interface=self.ipv6_pd_interface, ipv6_pd_prefixid=self.ipv6_pd_prefixid, ipv6_ra_enable=self.ipv6_ra_enable, ipv6_static_subnet=self.ipv6_static_subnet, name=self.name,", "not isinstance(subnet, str): raise TypeError(\"Expected argument 'subnet' to be a str\") pulumi.set(__self__, \"subnet\",", "None, site: Optional[pulumi.Input[Optional[str]]] = None, opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetNetworkResult]: \"\"\" `Network`", "a str\") pulumi.set(__self__, \"ipv6_pd_interface\", ipv6_pd_interface) if ipv6_pd_prefixid and not isinstance(ipv6_pd_prefixid, str): raise TypeError(\"Expected", "\"\"\" return pulumi.get(self, \"ipv6_static_subnet\") @property @pulumi.getter def name(self) -> str: \"\"\" The name", "pulumi.get(self, \"ipv6_static_subnet\") @property @pulumi.getter def name(self) -> str: \"\"\" The name of the", "'AwaitableGetNetworkResult', 'get_network', 'get_network_output', ] @pulumi.output_type class GetNetworkResult: \"\"\" A collection of values returned", "@property @pulumi.getter(name=\"wanNetworkgroup\") def wan_networkgroup(self) -> str: \"\"\" Specifies the WAN network group. One", "if wan_networkgroup and not isinstance(wan_networkgroup, str): raise TypeError(\"Expected argument 'wan_networkgroup' to be a", "dhcpd_boot_filename(self) -> str: \"\"\" the file to PXE boot from on the dhcpd*boot*server.", "'dhcp_enabled' to be a bool\") pulumi.set(__self__, \"dhcp_enabled\", dhcp_enabled) if dhcp_lease and not isinstance(dhcp_lease,", "return pulumi.get(self, \"ipv6_pd_interface\") @property @pulumi.getter(name=\"ipv6PdPrefixid\") def ipv6_pd_prefixid(self) -> str: \"\"\" Specifies the IPv6", "to associate the network with. \"\"\" __args__ = dict() __args__['id'] = id __args__['name']", "argument 'wan_username' to be a str\") pulumi.set(__self__, \"wan_username\", wan_username) if x_wan_password and not", "argument 'wan_networkgroup' to be a str\") pulumi.set(__self__, \"wan_networkgroup\", wan_networkgroup) if wan_type and not", "the static IPv6 subnet (when ipv6*interface*type is 'static'). \"\"\" return pulumi.get(self, \"ipv6_static_subnet\") @property", "def dhcp_lease(self) -> int: \"\"\" lease time for DHCP addresses. \"\"\" return pulumi.get(self,", "the IPV4 WAN username. \"\"\" return pulumi.get(self, \"wan_username\") @property @pulumi.getter(name=\"xWanPassword\") def x_wan_password(self) ->", "str id: The ID of the network. :param str name: The name of", "time for DHCP addresses. \"\"\" return pulumi.get(self, \"dhcp_lease\") @property @pulumi.getter(name=\"dhcpStart\") def dhcp_start(self) ->", "TypeError(\"Expected argument 'site' to be a str\") pulumi.set(__self__, \"site\", site) if subnet and", "which WAN interface is used for IPv6 Prefix Delegation. \"\"\" return pulumi.get(self, \"ipv6_pd_interface\")", "IPv4 address where the DHCP range of addresses starts. \"\"\" return pulumi.get(self, \"dhcp_start\")", "associate the network with. \"\"\" __args__ = dict() __args__['id'] = id __args__['name'] =", "raise TypeError(\"Expected argument 'dhcp_enabled' to be a bool\") pulumi.set(__self__, \"dhcp_enabled\", dhcp_enabled) if dhcp_lease", "if site and not isinstance(site, str): raise TypeError(\"Expected argument 'site' to be a", "dhcp_stop=__ret__.dhcp_stop, dhcpd_boot_enabled=__ret__.dhcpd_boot_enabled, dhcpd_boot_filename=__ret__.dhcpd_boot_filename, dhcpd_boot_server=__ret__.dhcpd_boot_server, domain_name=__ret__.domain_name, id=__ret__.id, igmp_snooping=__ret__.igmp_snooping, ipv6_interface_type=__ret__.ipv6_interface_type, ipv6_pd_interface=__ret__.ipv6_pd_interface, ipv6_pd_prefixid=__ret__.ipv6_pd_prefixid, ipv6_ra_enable=__ret__.ipv6_ra_enable, ipv6_static_subnet=__ret__.ipv6_static_subnet, name=__ret__.name,", "wan_networkgroup) if wan_type and not isinstance(wan_type, str): raise TypeError(\"Expected argument 'wan_type' to be", "subnet and not isinstance(subnet, str): raise TypeError(\"Expected argument 'subnet' to be a str\")", "\"dhcp_lease\", dhcp_lease) if dhcp_start and not isinstance(dhcp_start, str): raise TypeError(\"Expected argument 'dhcp_start' to", "\"wan_ip\", wan_ip) if wan_netmask and not isinstance(wan_netmask, str): raise TypeError(\"Expected argument 'wan_netmask' to", "ipv6*interface*type is 'static'). \"\"\" return pulumi.get(self, \"ipv6_static_subnet\") @property @pulumi.getter def name(self) -> str:", "str): raise TypeError(\"Expected argument 'wan_gateway' to be a str\") pulumi.set(__self__, \"wan_gateway\", wan_gateway) if", "whether IGMP snooping is enabled or not. \"\"\" return pulumi.get(self, \"igmp_snooping\") @property @pulumi.getter(name=\"ipv6InterfaceType\")", "TypeError(\"Expected argument 'ipv6_ra_enable' to be a bool\") pulumi.set(__self__, \"ipv6_ra_enable\", ipv6_ra_enable) if ipv6_static_subnet and", "return pulumi.get(self, \"subnet\") @property @pulumi.getter(name=\"vlanId\") def vlan_id(self) -> int: \"\"\" The VLAN ID", "_utilities.get_plugin_download_url() __ret__ = pulumi.runtime.invoke('unifi:index/getNetwork:getNetwork', __args__, opts=opts, typ=GetNetworkResult).value return AwaitableGetNetworkResult( dhcp_dns=__ret__.dhcp_dns, dhcp_enabled=__ret__.dhcp_enabled, dhcp_lease=__ret__.dhcp_lease, dhcp_start=__ret__.dhcp_start,", "to be a str\") pulumi.set(__self__, \"site\", site) if subnet and not isinstance(subnet, str):", "str\") pulumi.set(__self__, \"wan_type\", wan_type) if wan_username and not isinstance(wan_username, str): raise TypeError(\"Expected argument", "hand unless you're certain you know what you are doing! *** import warnings", "to be a int\") pulumi.set(__self__, \"wan_egress_qos\", wan_egress_qos) if wan_gateway and not isinstance(wan_gateway, str):", "ID of the network. :param str name: The name of the network. :param", "not isinstance(dhcpd_boot_enabled, bool): raise TypeError(\"Expected argument 'dhcpd_boot_enabled' to be a bool\") pulumi.set(__self__, \"dhcpd_boot_enabled\",", "TypeError(\"Expected argument 'dhcp_dns' to be a list\") pulumi.set(__self__, \"dhcp_dns\", dhcp_dns) if dhcp_enabled and", "Usage ```python import pulumi import pulumi_unifi as unifi lan_network = unifi.get_network(name=\"LAN\") my_device =", "if ipv6_interface_type and not isinstance(ipv6_interface_type, str): raise TypeError(\"Expected argument 'ipv6_interface_type' to be a", "isinstance(wan_gateway, str): raise TypeError(\"Expected argument 'wan_gateway' to be a str\") pulumi.set(__self__, \"wan_gateway\", wan_gateway)", "raise TypeError(\"Expected argument 'dhcpd_boot_enabled' to be a bool\") pulumi.set(__self__, \"dhcpd_boot_enabled\", dhcpd_boot_enabled) if dhcpd_boot_filename", "pulumi.get(self, \"dhcp_dns\") @property @pulumi.getter(name=\"dhcpEnabled\") def dhcp_enabled(self) -> bool: \"\"\" whether DHCP is enabled", "\"\"\" IPv4 address of a TFTP server to network boot from. \"\"\" return", "TypeError(\"Expected argument 'dhcp_lease' to be a int\") pulumi.set(__self__, \"dhcp_lease\", dhcp_lease) if dhcp_start and", "purpose=None, site=None, subnet=None, vlan_id=None, wan_dns=None, wan_egress_qos=None, wan_gateway=None, wan_ip=None, wan_netmask=None, wan_networkgroup=None, wan_type=None, wan_username=None, x_wan_password=<PASSWORD>):", "enable router advertisements or not. \"\"\" return pulumi.get(self, \"ipv6_ra_enable\") @property @pulumi.getter(name=\"ipv6StaticSubnet\") def ipv6_static_subnet(self)", "def ipv6_interface_type(self) -> str: \"\"\" Specifies which type of IPv6 connection to use.", "to use. \"\"\" return pulumi.get(self, \"ipv6_interface_type\") @property @pulumi.getter(name=\"ipv6PdInterface\") def ipv6_pd_interface(self) -> str: \"\"\"", "you know what you are doing! *** import warnings import pulumi import pulumi.runtime", "name: Optional[str] = None, site: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) ->", "overload from . import _utilities __all__ = [ 'GetNetworkResult', 'AwaitableGetNetworkResult', 'get_network', 'get_network_output', ]", "@property @pulumi.getter(name=\"ipv6PdPrefixid\") def ipv6_pd_prefixid(self) -> str: \"\"\" Specifies the IPv6 Prefix ID. \"\"\"", "= None, site: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetNetworkResult: \"\"\"", "\"\"\" `Network` data source can be used to retrieve settings for a network", "not isinstance(wan_gateway, str): raise TypeError(\"Expected argument 'wan_gateway' to be a str\") pulumi.set(__self__, \"wan_gateway\",", "-> str: \"\"\" Specifies the IPV4 WAN connection type. One of either `disabled`,", "`dhcp`, or `pppoe`. \"\"\" return pulumi.get(self, \"wan_type\") @property @pulumi.getter(name=\"wanUsername\") def wan_username(self) -> str:", "str): raise TypeError(\"Expected argument 'site' to be a str\") pulumi.set(__self__, \"site\", site) if", "id: The ID of the network. :param str name: The name of the", "def wan_type(self) -> str: \"\"\" Specifies the IPV4 WAN connection type. One of", "a bool\") pulumi.set(__self__, \"ipv6_ra_enable\", ipv6_ra_enable) if ipv6_static_subnet and not isinstance(ipv6_static_subnet, str): raise TypeError(\"Expected", "a int\") pulumi.set(__self__, \"vlan_id\", vlan_id) if wan_dns and not isinstance(wan_dns, list): raise TypeError(\"Expected", "not isinstance(ipv6_ra_enable, bool): raise TypeError(\"Expected argument 'ipv6_ra_enable' to be a bool\") pulumi.set(__self__, \"ipv6_ra_enable\",", "addresses stops. \"\"\" return pulumi.get(self, \"dhcp_stop\") @property @pulumi.getter(name=\"dhcpdBootEnabled\") def dhcpd_boot_enabled(self) -> bool: \"\"\"", "Example Usage ```python import pulumi import pulumi_unifi as unifi lan_network = unifi.get_network(name=\"LAN\") my_device", "\"dhcpd_boot_filename\") @property @pulumi.getter(name=\"dhcpdBootServer\") def dhcpd_boot_server(self) -> str: \"\"\" IPv4 address of a TFTP", "of either `WAN`, `WAN2` or `WAN_LTE_FAILOVER`. \"\"\" return pulumi.get(self, \"wan_networkgroup\") @property @pulumi.getter(name=\"wanType\") def", "a int\") pulumi.set(__self__, \"dhcp_lease\", dhcp_lease) if dhcp_start and not isinstance(dhcp_start, str): raise TypeError(\"Expected", "Prefix Delegation. \"\"\" return pulumi.get(self, \"ipv6_pd_interface\") @property @pulumi.getter(name=\"ipv6PdPrefixid\") def ipv6_pd_prefixid(self) -> str: \"\"\"", "= unifi.get_user(mac=\"01:23:45:67:89:ab\") my_network = unifi.get_network(id=my_device.network_id) ``` :param str id: The ID of the", "was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do", "and not isinstance(wan_egress_qos, int): raise TypeError(\"Expected argument 'wan_egress_qos' to be a int\") pulumi.set(__self__,", "typ=GetNetworkResult).value return AwaitableGetNetworkResult( dhcp_dns=__ret__.dhcp_dns, dhcp_enabled=__ret__.dhcp_enabled, dhcp_lease=__ret__.dhcp_lease, dhcp_start=__ret__.dhcp_start, dhcp_stop=__ret__.dhcp_stop, dhcpd_boot_enabled=__ret__.dhcpd_boot_enabled, dhcpd_boot_filename=__ret__.dhcpd_boot_filename, dhcpd_boot_server=__ret__.dhcpd_boot_server, domain_name=__ret__.domain_name, id=__ret__.id,", "dhcp_lease=None, dhcp_start=None, dhcp_stop=None, dhcpd_boot_enabled=None, dhcpd_boot_filename=None, dhcpd_boot_server=None, domain_name=None, id=None, igmp_snooping=None, ipv6_interface_type=None, ipv6_pd_interface=None, ipv6_pd_prefixid=None, ipv6_ra_enable=None,", "subnet) if vlan_id and not isinstance(vlan_id, int): raise TypeError(\"Expected argument 'vlan_id' to be", "None, name: Optional[pulumi.Input[Optional[str]]] = None, site: Optional[pulumi.Input[Optional[str]]] = None, opts: Optional[pulumi.InvokeOptions] = None)", "raise TypeError(\"Expected argument 'ipv6_pd_prefixid' to be a str\") pulumi.set(__self__, \"ipv6_pd_prefixid\", ipv6_pd_prefixid) if ipv6_ra_enable", "'wan_username' to be a str\") pulumi.set(__self__, \"wan_username\", wan_username) if x_wan_password and not isinstance(x_wan_password,", "return pulumi.get(self, \"dhcp_enabled\") @property @pulumi.getter(name=\"dhcpLease\") def dhcp_lease(self) -> int: \"\"\" lease time for", "be returned from the DHCP server. \"\"\" return pulumi.get(self, \"dhcp_dns\") @property @pulumi.getter(name=\"dhcpEnabled\") def", "pulumi.get(self, \"ipv6_pd_interface\") @property @pulumi.getter(name=\"ipv6PdPrefixid\") def ipv6_pd_prefixid(self) -> str: \"\"\" Specifies the IPv6 Prefix", "argument 'ipv6_ra_enable' to be a bool\") pulumi.set(__self__, \"ipv6_ra_enable\", ipv6_ra_enable) if ipv6_static_subnet and not", "return pulumi.get(self, \"dhcp_stop\") @property @pulumi.getter(name=\"dhcpdBootEnabled\") def dhcpd_boot_enabled(self) -> bool: \"\"\" Toggles on the", "of the network. \"\"\" return pulumi.get(self, \"id\") @property @pulumi.getter(name=\"igmpSnooping\") def igmp_snooping(self) -> bool:", "-> str: \"\"\" The ID of the network. \"\"\" return pulumi.get(self, \"id\") @property", "\"wan_egress_qos\", wan_egress_qos) if wan_gateway and not isinstance(wan_gateway, str): raise TypeError(\"Expected argument 'wan_gateway' to", "@pulumi.getter(name=\"wanNetmask\") def wan_netmask(self) -> str: \"\"\" The IPv4 netmask of the WAN. \"\"\"", "where the DHCP range of addresses stops. \"\"\" return pulumi.get(self, \"dhcp_stop\") @property @pulumi.getter(name=\"dhcpdBootEnabled\")", "ipv6_static_subnet=None, name=None, network_group=None, purpose=None, site=None, subnet=None, vlan_id=None, wan_dns=None, wan_egress_qos=None, wan_gateway=None, wan_ip=None, wan_netmask=None, wan_networkgroup=None,", "def x_wan_password(self) -> str: \"\"\" Specifies the IPV4 WAN password. \"\"\" return pulumi.get(self,", "\"ipv6_ra_enable\", ipv6_ra_enable) if ipv6_static_subnet and not isinstance(ipv6_static_subnet, str): raise TypeError(\"Expected argument 'ipv6_static_subnet' to", "to be a str\") pulumi.set(__self__, \"dhcp_start\", dhcp_start) if dhcp_stop and not isinstance(dhcp_stop, str):", "isinstance(ipv6_static_subnet, str): raise TypeError(\"Expected argument 'ipv6_static_subnet' to be a str\") pulumi.set(__self__, \"ipv6_static_subnet\", ipv6_static_subnet)", "of the network. :param str site: The name of the site to associate", "__args__, opts=opts, typ=GetNetworkResult).value return AwaitableGetNetworkResult( dhcp_dns=__ret__.dhcp_dns, dhcp_enabled=__ret__.dhcp_enabled, dhcp_lease=__ret__.dhcp_lease, dhcp_start=__ret__.dhcp_start, dhcp_stop=__ret__.dhcp_stop, dhcpd_boot_enabled=__ret__.dhcpd_boot_enabled, dhcpd_boot_filename=__ret__.dhcpd_boot_filename, dhcpd_boot_server=__ret__.dhcpd_boot_server,", "dhcpd*boot*filename, and dhcpd*boot*server set. \"\"\" return pulumi.get(self, \"dhcpd_boot_enabled\") @property @pulumi.getter(name=\"dhcpdBootFilename\") def dhcpd_boot_filename(self) ->", "be a str\") pulumi.set(__self__, \"dhcpd_boot_filename\", dhcpd_boot_filename) if dhcpd_boot_server and not isinstance(dhcpd_boot_server, str): raise", "*** # *** Do not edit by hand unless you're certain you know", "as unifi lan_network = unifi.get_network(name=\"LAN\") my_device = unifi.get_user(mac=\"01:23:45:67:89:ab\") my_network = unifi.get_network(id=my_device.network_id) ``` :param", "Prefix ID. \"\"\" return pulumi.get(self, \"ipv6_pd_prefixid\") @property @pulumi.getter(name=\"ipv6RaEnable\") def ipv6_ra_enable(self) -> bool: \"\"\"", "Specifies the IPV4 WAN username. \"\"\" return pulumi.get(self, \"wan_username\") @property @pulumi.getter(name=\"xWanPassword\") def x_wan_password(self)", "from . import _utilities __all__ = [ 'GetNetworkResult', 'AwaitableGetNetworkResult', 'get_network', 'get_network_output', ] @pulumi.output_type", "dhcp_dns) if dhcp_enabled and not isinstance(dhcp_enabled, bool): raise TypeError(\"Expected argument 'dhcp_enabled' to be", "= None, opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetNetworkResult]: \"\"\" `Network` data source can", "def dhcpd_boot_enabled(self) -> bool: \"\"\" Toggles on the DHCP boot options. will be", "@property @pulumi.getter(name=\"dhcpEnabled\") def dhcp_enabled(self) -> bool: \"\"\" whether DHCP is enabled or not", "raise TypeError(\"Expected argument 'network_group' to be a str\") pulumi.set(__self__, \"network_group\", network_group) if purpose", "@property @pulumi.getter(name=\"dhcpdBootServer\") def dhcpd_boot_server(self) -> str: \"\"\" IPv4 address of a TFTP server", "\"\"\" return pulumi.get(self, \"ipv6_pd_interface\") @property @pulumi.getter(name=\"ipv6PdPrefixid\") def ipv6_pd_prefixid(self) -> str: \"\"\" Specifies the", "wan_netmask(self) -> str: \"\"\" The IPv4 netmask of the WAN. \"\"\" return pulumi.get(self,", "ipv6_pd_prefixid(self) -> str: \"\"\" Specifies the IPv6 Prefix ID. \"\"\" return pulumi.get(self, \"ipv6_pd_prefixid\")", "WAN. \"\"\" return pulumi.get(self, \"wan_ip\") @property @pulumi.getter(name=\"wanNetmask\") def wan_netmask(self) -> str: \"\"\" The", "TypeError(\"Expected argument 'wan_networkgroup' to be a str\") pulumi.set(__self__, \"wan_networkgroup\", wan_networkgroup) if wan_type and", "wan_ip=self.wan_ip, wan_netmask=self.wan_netmask, wan_networkgroup=self.wan_networkgroup, wan_type=self.wan_type, wan_username=self.wan_username, x_wan_password=self.x_wan_password) def get_network(id: Optional[str] = None, name: Optional[str]", "igmp_snooping=self.igmp_snooping, ipv6_interface_type=self.ipv6_interface_type, ipv6_pd_interface=self.ipv6_pd_interface, ipv6_pd_prefixid=self.ipv6_pd_prefixid, ipv6_ra_enable=self.ipv6_ra_enable, ipv6_static_subnet=self.ipv6_static_subnet, name=self.name, network_group=self.network_group, purpose=self.purpose, site=self.site, subnet=self.subnet, vlan_id=self.vlan_id, wan_dns=self.wan_dns,", "@pulumi.getter(name=\"ipv6InterfaceType\") def ipv6_interface_type(self) -> str: \"\"\" Specifies which type of IPv6 connection to", "a str\") pulumi.set(__self__, \"dhcpd_boot_filename\", dhcpd_boot_filename) if dhcpd_boot_server and not isinstance(dhcpd_boot_server, str): raise TypeError(\"Expected", "str): raise TypeError(\"Expected argument 'name' to be a str\") pulumi.set(__self__, \"name\", name) if", "(tfgen) Tool. *** # *** Do not edit by hand unless you're certain", "-> str: \"\"\" Specifies the WAN network group. One of either `WAN`, `WAN2`", "def dhcp_enabled(self) -> bool: \"\"\" whether DHCP is enabled or not on this", "# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen)", "site: The name of the site to associate the network with. \"\"\" ...", "raise TypeError(\"Expected argument 'dhcp_dns' to be a list\") pulumi.set(__self__, \"dhcp_dns\", dhcp_dns) if dhcp_enabled", "'get_network_output', ] @pulumi.output_type class GetNetworkResult: \"\"\" A collection of values returned by getNetwork.", "WAN. \"\"\" return pulumi.get(self, \"wan_netmask\") @property @pulumi.getter(name=\"wanNetworkgroup\") def wan_networkgroup(self) -> str: \"\"\" Specifies", "def name(self) -> str: \"\"\" The name of the network. \"\"\" return pulumi.get(self,", "str: \"\"\" Specifies the static IPv6 subnet (when ipv6*interface*type is 'static'). \"\"\" return", "ipv6_ra_enable=__ret__.ipv6_ra_enable, ipv6_static_subnet=__ret__.ipv6_static_subnet, name=__ret__.name, network_group=__ret__.network_group, purpose=__ret__.purpose, site=__ret__.site, subnet=__ret__.subnet, vlan_id=__ret__.vlan_id, wan_dns=__ret__.wan_dns, wan_egress_qos=__ret__.wan_egress_qos, wan_gateway=__ret__.wan_gateway, wan_ip=__ret__.wan_ip, wan_netmask=__ret__.wan_netmask,", "on this network. \"\"\" return pulumi.get(self, \"dhcp_enabled\") @property @pulumi.getter(name=\"dhcpLease\") def dhcp_lease(self) -> int:", "to be a str\") pulumi.set(__self__, \"wan_netmask\", wan_netmask) if wan_networkgroup and not isinstance(wan_networkgroup, str):", "str: \"\"\" The subnet of the network (CIDR address). \"\"\" return pulumi.get(self, \"subnet\")", "wan_egress_qos=self.wan_egress_qos, wan_gateway=self.wan_gateway, wan_ip=self.wan_ip, wan_netmask=self.wan_netmask, wan_networkgroup=self.wan_networkgroup, wan_type=self.wan_type, wan_username=self.wan_username, x_wan_password=self.x_wan_password) def get_network(id: Optional[str] = None,", "by getNetwork. \"\"\" def __init__(__self__, dhcp_dns=None, dhcp_enabled=None, dhcp_lease=None, dhcp_start=None, dhcp_stop=None, dhcpd_boot_enabled=None, dhcpd_boot_filename=None, dhcpd_boot_server=None,", "pulumi.Output[GetNetworkResult]: \"\"\" `Network` data source can be used to retrieve settings for a", "wan_ip and not isinstance(wan_ip, str): raise TypeError(\"Expected argument 'wan_ip' to be a str\")", "argument 'x_wan_password' to be a str\") pulumi.set(__self__, \"x_wan_password\", x_wan_password) @property @pulumi.getter(name=\"dhcpDns\") def dhcp_dns(self)", "a TFTP server to network boot from. \"\"\" return pulumi.get(self, \"dhcpd_boot_server\") @property @pulumi.getter(name=\"domainName\")", "pulumi.get(self, \"wan_type\") @property @pulumi.getter(name=\"wanUsername\") def wan_username(self) -> str: \"\"\" Specifies the IPV4 WAN", "isinstance(wan_ip, str): raise TypeError(\"Expected argument 'wan_ip' to be a str\") pulumi.set(__self__, \"wan_ip\", wan_ip)", "network_group=None, purpose=None, site=None, subnet=None, vlan_id=None, wan_dns=None, wan_egress_qos=None, wan_gateway=None, wan_ip=None, wan_netmask=None, wan_networkgroup=None, wan_type=None, wan_username=None,", "\"dhcp_dns\", dhcp_dns) if dhcp_enabled and not isinstance(dhcp_enabled, bool): raise TypeError(\"Expected argument 'dhcp_enabled' to", "igmp_snooping) if ipv6_interface_type and not isinstance(ipv6_interface_type, str): raise TypeError(\"Expected argument 'ipv6_interface_type' to be", "raise TypeError(\"Expected argument 'x_wan_password' to be a str\") pulumi.set(__self__, \"x_wan_password\", x_wan_password) @property @pulumi.getter(name=\"dhcpDns\")", "interface is used for IPv6 Prefix Delegation. \"\"\" return pulumi.get(self, \"ipv6_pd_interface\") @property @pulumi.getter(name=\"ipv6PdPrefixid\")", "if dhcp_enabled and not isinstance(dhcp_enabled, bool): raise TypeError(\"Expected argument 'dhcp_enabled' to be a", "def __init__(__self__, dhcp_dns=None, dhcp_enabled=None, dhcp_lease=None, dhcp_start=None, dhcp_stop=None, dhcpd_boot_enabled=None, dhcpd_boot_filename=None, dhcpd_boot_server=None, domain_name=None, id=None, igmp_snooping=None,", "range of addresses stops. \"\"\" return pulumi.get(self, \"dhcp_stop\") @property @pulumi.getter(name=\"dhcpdBootEnabled\") def dhcpd_boot_enabled(self) ->", "whether DHCP is enabled or not on this network. \"\"\" return pulumi.get(self, \"dhcp_enabled\")", "server to be returned from the DHCP server. \"\"\" return pulumi.get(self, \"dhcp_dns\") @property", "One of either `disabled`, `static`, `dhcp`, or `pppoe`. \"\"\" return pulumi.get(self, \"wan_type\") @property", "str): raise TypeError(\"Expected argument 'ipv6_pd_interface' to be a str\") pulumi.set(__self__, \"ipv6_pd_interface\", ipv6_pd_interface) if", "\"dhcp_stop\") @property @pulumi.getter(name=\"dhcpdBootEnabled\") def dhcpd_boot_enabled(self) -> bool: \"\"\" Toggles on the DHCP boot", ":param str name: The name of the network. :param str site: The name", "to associate the network with. \"\"\" return pulumi.get(self, \"site\") @property @pulumi.getter def subnet(self)", "argument 'dhcp_stop' to be a str\") pulumi.set(__self__, \"dhcp_stop\", dhcp_stop) if dhcpd_boot_enabled and not", "a str\") pulumi.set(__self__, \"site\", site) if subnet and not isinstance(subnet, str): raise TypeError(\"Expected", "ID of the network. \"\"\" return pulumi.get(self, \"id\") @property @pulumi.getter(name=\"igmpSnooping\") def igmp_snooping(self) ->", "raise TypeError(\"Expected argument 'wan_dns' to be a list\") pulumi.set(__self__, \"wan_dns\", wan_dns) if wan_egress_qos", "-> str: \"\"\" The IPv4 address where the DHCP range of addresses starts.", "of addresses starts. \"\"\" return pulumi.get(self, \"dhcp_start\") @property @pulumi.getter(name=\"dhcpStop\") def dhcp_stop(self) -> str:", "argument 'wan_ip' to be a str\") pulumi.set(__self__, \"wan_ip\", wan_ip) if wan_netmask and not", "pulumi_unifi as unifi lan_network = unifi.get_network(name=\"LAN\") my_device = unifi.get_user(mac=\"01:23:45:67:89:ab\") my_network = unifi.get_network(id=my_device.network_id) ```", "to be a str\") pulumi.set(__self__, \"domain_name\", domain_name) if id and not isinstance(id, str):", "a str\") pulumi.set(__self__, \"purpose\", purpose) if site and not isinstance(site, str): raise TypeError(\"Expected", "if wan_dns and not isinstance(wan_dns, list): raise TypeError(\"Expected argument 'wan_dns' to be a", "raise TypeError(\"Expected argument 'wan_username' to be a str\") pulumi.set(__self__, \"wan_username\", wan_username) if x_wan_password", "unless you're certain you know what you are doing! *** import warnings import", "= [ 'GetNetworkResult', 'AwaitableGetNetworkResult', 'get_network', 'get_network_output', ] @pulumi.output_type class GetNetworkResult: \"\"\" A collection", "import pulumi_unifi as unifi lan_network = unifi.get_network(name=\"LAN\") my_device = unifi.get_user(mac=\"01:23:45:67:89:ab\") my_network = unifi.get_network(id=my_device.network_id)", "wan_egress_qos=__ret__.wan_egress_qos, wan_gateway=__ret__.wan_gateway, wan_ip=__ret__.wan_ip, wan_netmask=__ret__.wan_netmask, wan_networkgroup=__ret__.wan_networkgroup, wan_type=__ret__.wan_type, wan_username=__ret__.wan_username, x_wan_password=__ret__.x_wan_password) @_utilities.lift_output_func(get_network) def get_network_output(id: Optional[pulumi.Input[Optional[str]]] =", "\"\"\" Toggles on the DHCP boot options. will be set to true if", "a list\") pulumi.set(__self__, \"wan_dns\", wan_dns) if wan_egress_qos and not isinstance(wan_egress_qos, int): raise TypeError(\"Expected", "[ 'GetNetworkResult', 'AwaitableGetNetworkResult', 'get_network', 'get_network_output', ] @pulumi.output_type class GetNetworkResult: \"\"\" A collection of", "wan_dns=self.wan_dns, wan_egress_qos=self.wan_egress_qos, wan_gateway=self.wan_gateway, wan_ip=self.wan_ip, wan_netmask=self.wan_netmask, wan_networkgroup=self.wan_networkgroup, wan_type=self.wan_type, wan_username=self.wan_username, x_wan_password=self.x_wan_password) def get_network(id: Optional[str] =", "to be a str\") pulumi.set(__self__, \"ipv6_pd_interface\", ipv6_pd_interface) if ipv6_pd_prefixid and not isinstance(ipv6_pd_prefixid, str):", "addresses starts. \"\"\" return pulumi.get(self, \"dhcp_start\") @property @pulumi.getter(name=\"dhcpStop\") def dhcp_stop(self) -> str: \"\"\"", "a str\") pulumi.set(__self__, \"dhcpd_boot_server\", dhcpd_boot_server) if domain_name and not isinstance(domain_name, str): raise TypeError(\"Expected", "str: \"\"\" The group of the network. \"\"\" return pulumi.get(self, \"network_group\") @property @pulumi.getter", "\"id\") @property @pulumi.getter(name=\"igmpSnooping\") def igmp_snooping(self) -> bool: \"\"\" Specifies whether IGMP snooping is", "Optional[pulumi.Input[Optional[str]]] = None, site: Optional[pulumi.Input[Optional[str]]] = None, opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetNetworkResult]:", "lan_network = unifi.get_network(name=\"LAN\") my_device = unifi.get_user(mac=\"01:23:45:67:89:ab\") my_network = unifi.get_network(id=my_device.network_id) ``` :param str id:", "opts=opts, typ=GetNetworkResult).value return AwaitableGetNetworkResult( dhcp_dns=__ret__.dhcp_dns, dhcp_enabled=__ret__.dhcp_enabled, dhcp_lease=__ret__.dhcp_lease, dhcp_start=__ret__.dhcp_start, dhcp_stop=__ret__.dhcp_stop, dhcpd_boot_enabled=__ret__.dhcpd_boot_enabled, dhcpd_boot_filename=__ret__.dhcpd_boot_filename, dhcpd_boot_server=__ret__.dhcpd_boot_server, domain_name=__ret__.domain_name,", "and not isinstance(dhcp_lease, int): raise TypeError(\"Expected argument 'dhcp_lease' to be a int\") pulumi.set(__self__,", "Specifies whether to enable router advertisements or not. \"\"\" return pulumi.get(self, \"ipv6_ra_enable\") @property", "The VLAN ID of the network. \"\"\" return pulumi.get(self, \"vlan_id\") @property @pulumi.getter(name=\"wanDns\") def", "wan_ip(self) -> str: \"\"\" The IPv4 address of the WAN. \"\"\" return pulumi.get(self,", "by hand unless you're certain you know what you are doing! *** import", "str): raise TypeError(\"Expected argument 'dhcp_start' to be a str\") pulumi.set(__self__, \"dhcp_start\", dhcp_start) if", "if vlan_id and not isinstance(vlan_id, int): raise TypeError(\"Expected argument 'vlan_id' to be a", "not isinstance(site, str): raise TypeError(\"Expected argument 'site' to be a str\") pulumi.set(__self__, \"site\",", "id=__ret__.id, igmp_snooping=__ret__.igmp_snooping, ipv6_interface_type=__ret__.ipv6_interface_type, ipv6_pd_interface=__ret__.ipv6_pd_interface, ipv6_pd_prefixid=__ret__.ipv6_pd_prefixid, ipv6_ra_enable=__ret__.ipv6_ra_enable, ipv6_static_subnet=__ret__.ipv6_static_subnet, name=__ret__.name, network_group=__ret__.network_group, purpose=__ret__.purpose, site=__ret__.site, subnet=__ret__.subnet, vlan_id=__ret__.vlan_id,", "\"\"\" return pulumi.get(self, \"wan_netmask\") @property @pulumi.getter(name=\"wanNetworkgroup\") def wan_networkgroup(self) -> str: \"\"\" Specifies the", "\"\"\" Specifies the IPV4 WAN connection type. One of either `disabled`, `static`, `dhcp`,", "\"domain_name\", domain_name) if id and not isinstance(id, str): raise TypeError(\"Expected argument 'id' to", "to be a str\") pulumi.set(__self__, \"dhcpd_boot_server\", dhcpd_boot_server) if domain_name and not isinstance(domain_name, str):", "argument 'domain_name' to be a str\") pulumi.set(__self__, \"domain_name\", domain_name) if id and not", "'wan_egress_qos' to be a int\") pulumi.set(__self__, \"wan_egress_qos\", wan_egress_qos) if wan_gateway and not isinstance(wan_gateway,", "a str\") pulumi.set(__self__, \"dhcp_stop\", dhcp_stop) if dhcpd_boot_enabled and not isinstance(dhcpd_boot_enabled, bool): raise TypeError(\"Expected", "dhcp_lease) if dhcp_start and not isinstance(dhcp_start, str): raise TypeError(\"Expected argument 'dhcp_start' to be", "str\") pulumi.set(__self__, \"purpose\", purpose) if site and not isinstance(site, str): raise TypeError(\"Expected argument", "wan_username(self) -> str: \"\"\" Specifies the IPV4 WAN username. \"\"\" return pulumi.get(self, \"wan_username\")", "site to associate the network with. \"\"\" __args__ = dict() __args__['id'] = id", "\"dhcp_stop\", dhcp_stop) if dhcpd_boot_enabled and not isinstance(dhcpd_boot_enabled, bool): raise TypeError(\"Expected argument 'dhcpd_boot_enabled' to", "dhcpd*boot*server set. \"\"\" return pulumi.get(self, \"dhcpd_boot_enabled\") @property @pulumi.getter(name=\"dhcpdBootFilename\") def dhcpd_boot_filename(self) -> str: \"\"\"", "name of this network. \"\"\" return pulumi.get(self, \"domain_name\") @property @pulumi.getter def id(self) ->", "\"name\", name) if network_group and not isinstance(network_group, str): raise TypeError(\"Expected argument 'network_group' to", "Sequence[str]: \"\"\" DNS servers IPs of the WAN. \"\"\" return pulumi.get(self, \"wan_dns\") @property", "@property @pulumi.getter(name=\"vlanId\") def vlan_id(self) -> int: \"\"\" The VLAN ID of the network.", "= None) -> pulumi.Output[GetNetworkResult]: \"\"\" `Network` data source can be used to retrieve", "purpose) if site and not isinstance(site, str): raise TypeError(\"Expected argument 'site' to be", "return pulumi.get(self, \"purpose\") @property @pulumi.getter def site(self) -> str: \"\"\" The name of", "isinstance(igmp_snooping, bool): raise TypeError(\"Expected argument 'igmp_snooping' to be a bool\") pulumi.set(__self__, \"igmp_snooping\", igmp_snooping)", "subnet of the network (CIDR address). \"\"\" return pulumi.get(self, \"subnet\") @property @pulumi.getter(name=\"vlanId\") def", "x_wan_password and not isinstance(x_wan_password, str): raise TypeError(\"Expected argument 'x_wan_password' to be a str\")", "-> str: \"\"\" Specifies the IPV4 WAN password. \"\"\" return pulumi.get(self, \"x_wan_password\") class", "None) -> AwaitableGetNetworkResult: \"\"\" `Network` data source can be used to retrieve settings", "if wan_egress_qos and not isinstance(wan_egress_qos, int): raise TypeError(\"Expected argument 'wan_egress_qos' to be a", "pulumi.get(self, \"network_group\") @property @pulumi.getter def purpose(self) -> str: \"\"\" The purpose of the", "\"wan_netmask\") @property @pulumi.getter(name=\"wanNetworkgroup\") def wan_networkgroup(self) -> str: \"\"\" Specifies the WAN network group.", "IPv6 subnet (when ipv6*interface*type is 'static'). \"\"\" return pulumi.get(self, \"ipv6_static_subnet\") @property @pulumi.getter def", "\"ipv6_ra_enable\") @property @pulumi.getter(name=\"ipv6StaticSubnet\") def ipv6_static_subnet(self) -> str: \"\"\" Specifies the static IPv6 subnet", "and not isinstance(dhcp_start, str): raise TypeError(\"Expected argument 'dhcp_start' to be a str\") pulumi.set(__self__,", "the dhcpd*boot*server. \"\"\" return pulumi.get(self, \"dhcpd_boot_filename\") @property @pulumi.getter(name=\"dhcpdBootServer\") def dhcpd_boot_server(self) -> str: \"\"\"", "be a str\") pulumi.set(__self__, \"wan_gateway\", wan_gateway) if wan_ip and not isinstance(wan_ip, str): raise", "\"\"\" return pulumi.get(self, \"network_group\") @property @pulumi.getter def purpose(self) -> str: \"\"\" The purpose", "Tool. *** # *** Do not edit by hand unless you're certain you", "int): raise TypeError(\"Expected argument 'dhcp_lease' to be a int\") pulumi.set(__self__, \"dhcp_lease\", dhcp_lease) if", "str: \"\"\" The domain name of this network. \"\"\" return pulumi.get(self, \"domain_name\") @property", "to be a int\") pulumi.set(__self__, \"vlan_id\", vlan_id) if wan_dns and not isinstance(wan_dns, list):", "if wan_ip and not isinstance(wan_ip, str): raise TypeError(\"Expected argument 'wan_ip' to be a", "site=None, subnet=None, vlan_id=None, wan_dns=None, wan_egress_qos=None, wan_gateway=None, wan_ip=None, wan_netmask=None, wan_networkgroup=None, wan_type=None, wan_username=None, x_wan_password=<PASSWORD>): if", "int: \"\"\" lease time for DHCP addresses. \"\"\" return pulumi.get(self, \"dhcp_lease\") @property @pulumi.getter(name=\"dhcpStart\")", "return pulumi.get(self, \"wan_dns\") @property @pulumi.getter(name=\"wanEgressQos\") def wan_egress_qos(self) -> int: \"\"\" Specifies the WAN", "@pulumi.getter(name=\"wanGateway\") def wan_gateway(self) -> str: \"\"\" The IPv4 gateway of the WAN. \"\"\"", "\"dhcp_start\", dhcp_start) if dhcp_stop and not isinstance(dhcp_stop, str): raise TypeError(\"Expected argument 'dhcp_stop' to", "'GetNetworkResult', 'AwaitableGetNetworkResult', 'get_network', 'get_network_output', ] @pulumi.output_type class GetNetworkResult: \"\"\" A collection of values", "pulumi.set(__self__, \"ipv6_pd_prefixid\", ipv6_pd_prefixid) if ipv6_ra_enable and not isinstance(ipv6_ra_enable, bool): raise TypeError(\"Expected argument 'ipv6_ra_enable'", "\"\"\" return pulumi.get(self, \"dhcp_start\") @property @pulumi.getter(name=\"dhcpStop\") def dhcp_stop(self) -> str: \"\"\" The IPv4", "dhcp_lease=self.dhcp_lease, dhcp_start=self.dhcp_start, dhcp_stop=self.dhcp_stop, dhcpd_boot_enabled=self.dhcpd_boot_enabled, dhcpd_boot_filename=self.dhcpd_boot_filename, dhcpd_boot_server=self.dhcpd_boot_server, domain_name=self.domain_name, id=self.id, igmp_snooping=self.igmp_snooping, ipv6_interface_type=self.ipv6_interface_type, ipv6_pd_interface=self.ipv6_pd_interface, ipv6_pd_prefixid=self.ipv6_pd_prefixid, ipv6_ra_enable=self.ipv6_ra_enable,", "address of the WAN. \"\"\" return pulumi.get(self, \"wan_ip\") @property @pulumi.getter(name=\"wanNetmask\") def wan_netmask(self) ->", "not isinstance(igmp_snooping, bool): raise TypeError(\"Expected argument 'igmp_snooping' to be a bool\") pulumi.set(__self__, \"igmp_snooping\",", "stops. \"\"\" return pulumi.get(self, \"dhcp_stop\") @property @pulumi.getter(name=\"dhcpdBootEnabled\") def dhcpd_boot_enabled(self) -> bool: \"\"\" Toggles", "@pulumi.getter(name=\"wanNetworkgroup\") def wan_networkgroup(self) -> str: \"\"\" Specifies the WAN network group. One of", "dhcp_enabled and not isinstance(dhcp_enabled, bool): raise TypeError(\"Expected argument 'dhcp_enabled' to be a bool\")", "str: \"\"\" The purpose of the network. One of `corporate`, `guest`, `wan`, or", "ID of the network. \"\"\" return pulumi.get(self, \"vlan_id\") @property @pulumi.getter(name=\"wanDns\") def wan_dns(self) ->", "pulumi.get(self, \"name\") @property @pulumi.getter(name=\"networkGroup\") def network_group(self) -> str: \"\"\" The group of the", "wan_netmask=None, wan_networkgroup=None, wan_type=None, wan_username=None, x_wan_password=<PASSWORD>): if dhcp_dns and not isinstance(dhcp_dns, list): raise TypeError(\"Expected", "wan_type=None, wan_username=None, x_wan_password=<PASSWORD>): if dhcp_dns and not isinstance(dhcp_dns, list): raise TypeError(\"Expected argument 'dhcp_dns'", "pulumi.set(__self__, \"dhcpd_boot_filename\", dhcpd_boot_filename) if dhcpd_boot_server and not isinstance(dhcpd_boot_server, str): raise TypeError(\"Expected argument 'dhcpd_boot_server'", "IPV4 WAN connection type. One of either `disabled`, `static`, `dhcp`, or `pppoe`. \"\"\"", "\"wan_username\") @property @pulumi.getter(name=\"xWanPassword\") def x_wan_password(self) -> str: \"\"\" Specifies the IPV4 WAN password.", "pulumi.InvokeOptions() if opts.version is None: opts.version = _utilities.get_version() if opts.plugin_download_url is None: opts.plugin_download_url", "where the DHCP range of addresses starts. \"\"\" return pulumi.get(self, \"dhcp_start\") @property @pulumi.getter(name=\"dhcpStop\")", "boot options. will be set to true if you have dhcpd*boot*filename, and dhcpd*boot*server", "\"\"\" return pulumi.get(self, \"x_wan_password\") class AwaitableGetNetworkResult(GetNetworkResult): # pylint: disable=using-constant-test def __await__(self): if False:", "dhcp_stop and not isinstance(dhcp_stop, str): raise TypeError(\"Expected argument 'dhcp_stop' to be a str\")", "-> str: \"\"\" Specifies which type of IPv6 connection to use. \"\"\" return", "site to associate the network with. \"\"\" return pulumi.get(self, \"site\") @property @pulumi.getter def", "igmp_snooping=__ret__.igmp_snooping, ipv6_interface_type=__ret__.ipv6_interface_type, ipv6_pd_interface=__ret__.ipv6_pd_interface, ipv6_pd_prefixid=__ret__.ipv6_pd_prefixid, ipv6_ra_enable=__ret__.ipv6_ra_enable, ipv6_static_subnet=__ret__.ipv6_static_subnet, name=__ret__.name, network_group=__ret__.network_group, purpose=__ret__.purpose, site=__ret__.site, subnet=__ret__.subnet, vlan_id=__ret__.vlan_id, wan_dns=__ret__.wan_dns,", "isinstance(wan_username, str): raise TypeError(\"Expected argument 'wan_username' to be a str\") pulumi.set(__self__, \"wan_username\", wan_username)", "\"\"\" return pulumi.get(self, \"dhcpd_boot_enabled\") @property @pulumi.getter(name=\"dhcpdBootFilename\") def dhcpd_boot_filename(self) -> str: \"\"\" the file", "not isinstance(purpose, str): raise TypeError(\"Expected argument 'purpose' to be a str\") pulumi.set(__self__, \"purpose\",", "wan_ip) if wan_netmask and not isinstance(wan_netmask, str): raise TypeError(\"Expected argument 'wan_netmask' to be", "ipv6_pd_interface(self) -> str: \"\"\" Specifies which WAN interface is used for IPv6 Prefix", "opts.plugin_download_url = _utilities.get_plugin_download_url() __ret__ = pulumi.runtime.invoke('unifi:index/getNetwork:getNetwork', __args__, opts=opts, typ=GetNetworkResult).value return AwaitableGetNetworkResult( dhcp_dns=__ret__.dhcp_dns, dhcp_enabled=__ret__.dhcp_enabled,", "\"\"\" return pulumi.get(self, \"id\") @property @pulumi.getter(name=\"igmpSnooping\") def igmp_snooping(self) -> bool: \"\"\" Specifies whether", "the network (CIDR address). \"\"\" return pulumi.get(self, \"subnet\") @property @pulumi.getter(name=\"vlanId\") def vlan_id(self) ->", "network_group) if purpose and not isinstance(purpose, str): raise TypeError(\"Expected argument 'purpose' to be", "dhcp_lease=__ret__.dhcp_lease, dhcp_start=__ret__.dhcp_start, dhcp_stop=__ret__.dhcp_stop, dhcpd_boot_enabled=__ret__.dhcpd_boot_enabled, dhcpd_boot_filename=__ret__.dhcpd_boot_filename, dhcpd_boot_server=__ret__.dhcpd_boot_server, domain_name=__ret__.domain_name, id=__ret__.id, igmp_snooping=__ret__.igmp_snooping, ipv6_interface_type=__ret__.ipv6_interface_type, ipv6_pd_interface=__ret__.ipv6_pd_interface, ipv6_pd_prefixid=__ret__.ipv6_pd_prefixid, ipv6_ra_enable=__ret__.ipv6_ra_enable,", "or ID. ## Example Usage ```python import pulumi import pulumi_unifi as unifi lan_network", "isinstance(dhcpd_boot_enabled, bool): raise TypeError(\"Expected argument 'dhcpd_boot_enabled' to be a bool\") pulumi.set(__self__, \"dhcpd_boot_enabled\", dhcpd_boot_enabled)", "\"\"\" return pulumi.get(self, \"wan_networkgroup\") @property @pulumi.getter(name=\"wanType\") def wan_type(self) -> str: \"\"\" Specifies the", "raise TypeError(\"Expected argument 'wan_egress_qos' to be a int\") pulumi.set(__self__, \"wan_egress_qos\", wan_egress_qos) if wan_gateway", "wan_username) if x_wan_password and not isinstance(x_wan_password, str): raise TypeError(\"Expected argument 'x_wan_password' to be", "the site to associate the network with. \"\"\" __args__ = dict() __args__['id'] =", "'dhcpd_boot_enabled' to be a bool\") pulumi.set(__self__, \"dhcpd_boot_enabled\", dhcpd_boot_enabled) if dhcpd_boot_filename and not isinstance(dhcpd_boot_filename,", "from. \"\"\" return pulumi.get(self, \"dhcpd_boot_server\") @property @pulumi.getter(name=\"domainName\") def domain_name(self) -> str: \"\"\" The", "*** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool.", "@property @pulumi.getter def purpose(self) -> str: \"\"\" The purpose of the network. One", "argument 'dhcp_lease' to be a int\") pulumi.set(__self__, \"dhcp_lease\", dhcp_lease) if dhcp_start and not", "site) if subnet and not isinstance(subnet, str): raise TypeError(\"Expected argument 'subnet' to be", "dhcpd_boot_enabled and not isinstance(dhcpd_boot_enabled, bool): raise TypeError(\"Expected argument 'dhcpd_boot_enabled' to be a bool\")", "of the network. \"\"\" return pulumi.get(self, \"network_group\") @property @pulumi.getter def purpose(self) -> str:", "-> str: \"\"\" The name of the network. \"\"\" return pulumi.get(self, \"name\") @property", "be a str\") pulumi.set(__self__, \"purpose\", purpose) if site and not isinstance(site, str): raise", "@property @pulumi.getter(name=\"dhcpLease\") def dhcp_lease(self) -> int: \"\"\" lease time for DHCP addresses. \"\"\"", "Optional[pulumi.Input[Optional[str]]] = None, name: Optional[pulumi.Input[Optional[str]]] = None, site: Optional[pulumi.Input[Optional[str]]] = None, opts: Optional[pulumi.InvokeOptions]", "Specifies the IPV4 WAN connection type. One of either `disabled`, `static`, `dhcp`, or", "pulumi.set(__self__, \"wan_networkgroup\", wan_networkgroup) if wan_type and not isinstance(wan_type, str): raise TypeError(\"Expected argument 'wan_type'", "pulumi.get(self, \"dhcpd_boot_enabled\") @property @pulumi.getter(name=\"dhcpdBootFilename\") def dhcpd_boot_filename(self) -> str: \"\"\" the file to PXE", "wan_gateway(self) -> str: \"\"\" The IPv4 gateway of the WAN. \"\"\" return pulumi.get(self,", "return pulumi.get(self, \"dhcp_start\") @property @pulumi.getter(name=\"dhcpStop\") def dhcp_stop(self) -> str: \"\"\" The IPv4 address", "The name of the network. \"\"\" return pulumi.get(self, \"name\") @property @pulumi.getter(name=\"networkGroup\") def network_group(self)", "ipv6_pd_interface=None, ipv6_pd_prefixid=None, ipv6_ra_enable=None, ipv6_static_subnet=None, name=None, network_group=None, purpose=None, site=None, subnet=None, vlan_id=None, wan_dns=None, wan_egress_qos=None, wan_gateway=None,", "used for IPv6 Prefix Delegation. \"\"\" return pulumi.get(self, \"ipv6_pd_interface\") @property @pulumi.getter(name=\"ipv6PdPrefixid\") def ipv6_pd_prefixid(self)", "IPv6 Prefix ID. \"\"\" return pulumi.get(self, \"ipv6_pd_prefixid\") @property @pulumi.getter(name=\"ipv6RaEnable\") def ipv6_ra_enable(self) -> bool:", "not isinstance(ipv6_pd_prefixid, str): raise TypeError(\"Expected argument 'ipv6_pd_prefixid' to be a str\") pulumi.set(__self__, \"ipv6_pd_prefixid\",", "\"\"\" Specifies the IPV4 WAN password. \"\"\" return pulumi.get(self, \"x_wan_password\") class AwaitableGetNetworkResult(GetNetworkResult): #", "\"\"\" Specifies whether to enable router advertisements or not. \"\"\" return pulumi.get(self, \"ipv6_ra_enable\")", "or not on this network. \"\"\" return pulumi.get(self, \"dhcp_enabled\") @property @pulumi.getter(name=\"dhcpLease\") def dhcp_lease(self)", "wan_type=self.wan_type, wan_username=self.wan_username, x_wan_password=self.x_wan_password) def get_network(id: Optional[str] = None, name: Optional[str] = None, site:", "argument 'ipv6_interface_type' to be a str\") pulumi.set(__self__, \"ipv6_interface_type\", ipv6_interface_type) if ipv6_pd_interface and not", "not. \"\"\" return pulumi.get(self, \"ipv6_ra_enable\") @property @pulumi.getter(name=\"ipv6StaticSubnet\") def ipv6_static_subnet(self) -> str: \"\"\" Specifies", "is None: opts.plugin_download_url = _utilities.get_plugin_download_url() __ret__ = pulumi.runtime.invoke('unifi:index/getNetwork:getNetwork', __args__, opts=opts, typ=GetNetworkResult).value return AwaitableGetNetworkResult(", "of the WAN. \"\"\" return pulumi.get(self, \"wan_ip\") @property @pulumi.getter(name=\"wanNetmask\") def wan_netmask(self) -> str:", "whether to enable router advertisements or not. \"\"\" return pulumi.get(self, \"ipv6_ra_enable\") @property @pulumi.getter(name=\"ipv6StaticSubnet\")", "-> str: \"\"\" The group of the network. \"\"\" return pulumi.get(self, \"network_group\") @property", "TypeError(\"Expected argument 'dhcp_stop' to be a str\") pulumi.set(__self__, \"dhcp_stop\", dhcp_stop) if dhcpd_boot_enabled and", "argument 'wan_egress_qos' to be a int\") pulumi.set(__self__, \"wan_egress_qos\", wan_egress_qos) if wan_gateway and not", "certain you know what you are doing! *** import warnings import pulumi import", "pulumi.set(__self__, \"wan_dns\", wan_dns) if wan_egress_qos and not isinstance(wan_egress_qos, int): raise TypeError(\"Expected argument 'wan_egress_qos'", "\"id\", id) if igmp_snooping and not isinstance(igmp_snooping, bool): raise TypeError(\"Expected argument 'igmp_snooping' to", "def id(self) -> str: \"\"\" The ID of the network. \"\"\" return pulumi.get(self,", "str\") pulumi.set(__self__, \"wan_networkgroup\", wan_networkgroup) if wan_type and not isinstance(wan_type, str): raise TypeError(\"Expected argument", "Optional[str] = None, site: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetNetworkResult:", "unifi lan_network = unifi.get_network(name=\"LAN\") my_device = unifi.get_user(mac=\"01:23:45:67:89:ab\") my_network = unifi.get_network(id=my_device.network_id) ``` :param str", "\"dhcp_lease\") @property @pulumi.getter(name=\"dhcpStart\") def dhcp_start(self) -> str: \"\"\" The IPv4 address where the", "pulumi.set(__self__, \"dhcpd_boot_server\", dhcpd_boot_server) if domain_name and not isinstance(domain_name, str): raise TypeError(\"Expected argument 'domain_name'", "str\") pulumi.set(__self__, \"id\", id) if igmp_snooping and not isinstance(igmp_snooping, bool): raise TypeError(\"Expected argument", "TypeError(\"Expected argument 'subnet' to be a str\") pulumi.set(__self__, \"subnet\", subnet) if vlan_id and", "site=self.site, subnet=self.subnet, vlan_id=self.vlan_id, wan_dns=self.wan_dns, wan_egress_qos=self.wan_egress_qos, wan_gateway=self.wan_gateway, wan_ip=self.wan_ip, wan_netmask=self.wan_netmask, wan_networkgroup=self.wan_networkgroup, wan_type=self.wan_type, wan_username=self.wan_username, x_wan_password=self.x_wan_password) def", "raise TypeError(\"Expected argument 'wan_type' to be a str\") pulumi.set(__self__, \"wan_type\", wan_type) if wan_username", "name) if network_group and not isinstance(network_group, str): raise TypeError(\"Expected argument 'network_group' to be", "'x_wan_password' to be a str\") pulumi.set(__self__, \"x_wan_password\", x_wan_password) @property @pulumi.getter(name=\"dhcpDns\") def dhcp_dns(self) ->", "be a str\") pulumi.set(__self__, \"name\", name) if network_group and not isinstance(network_group, str): raise", "pulumi.get(self, \"ipv6_pd_prefixid\") @property @pulumi.getter(name=\"ipv6RaEnable\") def ipv6_ra_enable(self) -> bool: \"\"\" Specifies whether to enable", "dhcp_dns(self) -> Sequence[str]: \"\"\" IPv4 addresses for the DNS server to be returned", "IPv4 address where the DHCP range of addresses stops. \"\"\" return pulumi.get(self, \"dhcp_stop\")", "return pulumi.get(self, \"dhcpd_boot_filename\") @property @pulumi.getter(name=\"dhcpdBootServer\") def dhcpd_boot_server(self) -> str: \"\"\" IPv4 address of", "if domain_name and not isinstance(domain_name, str): raise TypeError(\"Expected argument 'domain_name' to be a", "@pulumi.getter(name=\"dhcpStop\") def dhcp_stop(self) -> str: \"\"\" The IPv4 address where the DHCP range", "x_wan_password(self) -> str: \"\"\" Specifies the IPV4 WAN password. \"\"\" return pulumi.get(self, \"x_wan_password\")", "purpose(self) -> str: \"\"\" The purpose of the network. One of `corporate`, `guest`,", "def site(self) -> str: \"\"\" The name of the site to associate the", "a str\") pulumi.set(__self__, \"ipv6_static_subnet\", ipv6_static_subnet) if name and not isinstance(name, str): raise TypeError(\"Expected", "not isinstance(domain_name, str): raise TypeError(\"Expected argument 'domain_name' to be a str\") pulumi.set(__self__, \"domain_name\",", "name of the network. :param str site: The name of the site to", "and dhcpd*boot*server set. \"\"\" return pulumi.get(self, \"dhcpd_boot_enabled\") @property @pulumi.getter(name=\"dhcpdBootFilename\") def dhcpd_boot_filename(self) -> str:", "ipv6_pd_interface=self.ipv6_pd_interface, ipv6_pd_prefixid=self.ipv6_pd_prefixid, ipv6_ra_enable=self.ipv6_ra_enable, ipv6_static_subnet=self.ipv6_static_subnet, name=self.name, network_group=self.network_group, purpose=self.purpose, site=self.site, subnet=self.subnet, vlan_id=self.vlan_id, wan_dns=self.wan_dns, wan_egress_qos=self.wan_egress_qos, wan_gateway=self.wan_gateway,", "return pulumi.get(self, \"igmp_snooping\") @property @pulumi.getter(name=\"ipv6InterfaceType\") def ipv6_interface_type(self) -> str: \"\"\" Specifies which type", "x_wan_password) @property @pulumi.getter(name=\"dhcpDns\") def dhcp_dns(self) -> Sequence[str]: \"\"\" IPv4 addresses for the DNS", "\"ipv6_static_subnet\") @property @pulumi.getter def name(self) -> str: \"\"\" The name of the network.", "raise TypeError(\"Expected argument 'subnet' to be a str\") pulumi.set(__self__, \"subnet\", subnet) if vlan_id", "def dhcpd_boot_filename(self) -> str: \"\"\" the file to PXE boot from on the", "servers IPs of the WAN. \"\"\" return pulumi.get(self, \"wan_dns\") @property @pulumi.getter(name=\"wanEgressQos\") def wan_egress_qos(self)", "TypeError(\"Expected argument 'ipv6_pd_interface' to be a str\") pulumi.set(__self__, \"ipv6_pd_interface\", ipv6_pd_interface) if ipv6_pd_prefixid and", "network. \"\"\" return pulumi.get(self, \"id\") @property @pulumi.getter(name=\"igmpSnooping\") def igmp_snooping(self) -> bool: \"\"\" Specifies", "= site if opts is None: opts = pulumi.InvokeOptions() if opts.version is None:", "@property @pulumi.getter(name=\"wanNetmask\") def wan_netmask(self) -> str: \"\"\" The IPv4 netmask of the WAN.", "not isinstance(wan_egress_qos, int): raise TypeError(\"Expected argument 'wan_egress_qos' to be a int\") pulumi.set(__self__, \"wan_egress_qos\",", "return pulumi.get(self, \"wan_type\") @property @pulumi.getter(name=\"wanUsername\") def wan_username(self) -> str: \"\"\" Specifies the IPV4", "pulumi.set(__self__, \"dhcp_dns\", dhcp_dns) if dhcp_enabled and not isinstance(dhcp_enabled, bool): raise TypeError(\"Expected argument 'dhcp_enabled'", "@property @pulumi.getter(name=\"ipv6InterfaceType\") def ipv6_interface_type(self) -> str: \"\"\" Specifies which type of IPv6 connection", "connection type. One of either `disabled`, `static`, `dhcp`, or `pppoe`. \"\"\" return pulumi.get(self,", "__init__(__self__, dhcp_dns=None, dhcp_enabled=None, dhcp_lease=None, dhcp_start=None, dhcp_stop=None, dhcpd_boot_enabled=None, dhcpd_boot_filename=None, dhcpd_boot_server=None, domain_name=None, id=None, igmp_snooping=None, ipv6_interface_type=None,", "not isinstance(wan_ip, str): raise TypeError(\"Expected argument 'wan_ip' to be a str\") pulumi.set(__self__, \"wan_ip\",", "return pulumi.get(self, \"ipv6_interface_type\") @property @pulumi.getter(name=\"ipv6PdInterface\") def ipv6_pd_interface(self) -> str: \"\"\" Specifies which WAN", "-> str: \"\"\" Specifies the IPV4 WAN username. \"\"\" return pulumi.get(self, \"wan_username\") @property", "\"wan_ip\") @property @pulumi.getter(name=\"wanNetmask\") def wan_netmask(self) -> str: \"\"\" The IPv4 netmask of the", "the network. One of `corporate`, `guest`, `wan`, or `vlan-only`. \"\"\" return pulumi.get(self, \"purpose\")", "\"\"\" __args__ = dict() __args__['id'] = id __args__['name'] = name __args__['site'] = site", "argument 'igmp_snooping' to be a bool\") pulumi.set(__self__, \"igmp_snooping\", igmp_snooping) if ipv6_interface_type and not", "name=self.name, network_group=self.network_group, purpose=self.purpose, site=self.site, subnet=self.subnet, vlan_id=self.vlan_id, wan_dns=self.wan_dns, wan_egress_qos=self.wan_egress_qos, wan_gateway=self.wan_gateway, wan_ip=self.wan_ip, wan_netmask=self.wan_netmask, wan_networkgroup=self.wan_networkgroup, wan_type=self.wan_type,", "of the site to associate the network with. \"\"\" return pulumi.get(self, \"site\") @property", "ipv6_interface_type) if ipv6_pd_interface and not isinstance(ipv6_pd_interface, str): raise TypeError(\"Expected argument 'ipv6_pd_interface' to be", "@pulumi.getter(name=\"dhcpdBootEnabled\") def dhcpd_boot_enabled(self) -> bool: \"\"\" Toggles on the DHCP boot options. will", "\"\"\" Specifies whether IGMP snooping is enabled or not. \"\"\" return pulumi.get(self, \"igmp_snooping\")", "be a str\") pulumi.set(__self__, \"dhcp_stop\", dhcp_stop) if dhcpd_boot_enabled and not isinstance(dhcpd_boot_enabled, bool): raise", "site: Optional[pulumi.Input[Optional[str]]] = None, opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetNetworkResult]: \"\"\" `Network` data", "TypeError(\"Expected argument 'dhcpd_boot_server' to be a str\") pulumi.set(__self__, \"dhcpd_boot_server\", dhcpd_boot_server) if domain_name and", "int\") pulumi.set(__self__, \"vlan_id\", vlan_id) if wan_dns and not isinstance(wan_dns, list): raise TypeError(\"Expected argument", "be a str\") pulumi.set(__self__, \"wan_type\", wan_type) if wan_username and not isinstance(wan_username, str): raise", "not isinstance(ipv6_pd_interface, str): raise TypeError(\"Expected argument 'ipv6_pd_interface' to be a str\") pulumi.set(__self__, \"ipv6_pd_interface\",", "Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetNetworkResult: \"\"\" `Network` data source", "a bool\") pulumi.set(__self__, \"dhcp_enabled\", dhcp_enabled) if dhcp_lease and not isinstance(dhcp_lease, int): raise TypeError(\"Expected", "a str\") pulumi.set(__self__, \"domain_name\", domain_name) if id and not isinstance(id, str): raise TypeError(\"Expected", "\"dhcpd_boot_server\", dhcpd_boot_server) if domain_name and not isinstance(domain_name, str): raise TypeError(\"Expected argument 'domain_name' to", "'wan_gateway' to be a str\") pulumi.set(__self__, \"wan_gateway\", wan_gateway) if wan_ip and not isinstance(wan_ip,", "\"\"\" Specifies which type of IPv6 connection to use. \"\"\" return pulumi.get(self, \"ipv6_interface_type\")", "wan_dns and not isinstance(wan_dns, list): raise TypeError(\"Expected argument 'wan_dns' to be a list\")", "isinstance(wan_dns, list): raise TypeError(\"Expected argument 'wan_dns' to be a list\") pulumi.set(__self__, \"wan_dns\", wan_dns)", "str site: The name of the site to associate the network with. \"\"\"", "network. \"\"\" return pulumi.get(self, \"name\") @property @pulumi.getter(name=\"networkGroup\") def network_group(self) -> str: \"\"\" The", "not isinstance(wan_type, str): raise TypeError(\"Expected argument 'wan_type' to be a str\") pulumi.set(__self__, \"wan_type\",", "DHCP range of addresses starts. \"\"\" return pulumi.get(self, \"dhcp_start\") @property @pulumi.getter(name=\"dhcpStop\") def dhcp_stop(self)", "domain_name) if id and not isinstance(id, str): raise TypeError(\"Expected argument 'id' to be", "domain_name(self) -> str: \"\"\" The domain name of this network. \"\"\" return pulumi.get(self,", "\"subnet\", subnet) if vlan_id and not isinstance(vlan_id, int): raise TypeError(\"Expected argument 'vlan_id' to", "dict() __args__['id'] = id __args__['name'] = name __args__['site'] = site if opts is", "dhcpd_boot_filename=__ret__.dhcpd_boot_filename, dhcpd_boot_server=__ret__.dhcpd_boot_server, domain_name=__ret__.domain_name, id=__ret__.id, igmp_snooping=__ret__.igmp_snooping, ipv6_interface_type=__ret__.ipv6_interface_type, ipv6_pd_interface=__ret__.ipv6_pd_interface, ipv6_pd_prefixid=__ret__.ipv6_pd_prefixid, ipv6_ra_enable=__ret__.ipv6_ra_enable, ipv6_static_subnet=__ret__.ipv6_static_subnet, name=__ret__.name, network_group=__ret__.network_group, purpose=__ret__.purpose,", "pulumi.set(__self__, \"dhcp_lease\", dhcp_lease) if dhcp_start and not isinstance(dhcp_start, str): raise TypeError(\"Expected argument 'dhcp_start'", "can be used to retrieve settings for a network by name or ID.", "The group of the network. \"\"\" return pulumi.get(self, \"network_group\") @property @pulumi.getter def purpose(self)", "isinstance(dhcp_dns, list): raise TypeError(\"Expected argument 'dhcp_dns' to be a list\") pulumi.set(__self__, \"dhcp_dns\", dhcp_dns)", "str): raise TypeError(\"Expected argument 'purpose' to be a str\") pulumi.set(__self__, \"purpose\", purpose) if", "opts.version is None: opts.version = _utilities.get_version() if opts.plugin_download_url is None: opts.plugin_download_url = _utilities.get_plugin_download_url()", "str: \"\"\" IPv4 address of a TFTP server to network boot from. \"\"\"", "wan_netmask) if wan_networkgroup and not isinstance(wan_networkgroup, str): raise TypeError(\"Expected argument 'wan_networkgroup' to be", "\"\"\" Specifies the WAN egress quality of service. \"\"\" return pulumi.get(self, \"wan_egress_qos\") @property", "to be a bool\") pulumi.set(__self__, \"dhcpd_boot_enabled\", dhcpd_boot_enabled) if dhcpd_boot_filename and not isinstance(dhcpd_boot_filename, str):", "be a str\") pulumi.set(__self__, \"subnet\", subnet) if vlan_id and not isinstance(vlan_id, int): raise", "'wan_netmask' to be a str\") pulumi.set(__self__, \"wan_netmask\", wan_netmask) if wan_networkgroup and not isinstance(wan_networkgroup,", "id=self.id, igmp_snooping=self.igmp_snooping, ipv6_interface_type=self.ipv6_interface_type, ipv6_pd_interface=self.ipv6_pd_interface, ipv6_pd_prefixid=self.ipv6_pd_prefixid, ipv6_ra_enable=self.ipv6_ra_enable, ipv6_static_subnet=self.ipv6_static_subnet, name=self.name, network_group=self.network_group, purpose=self.purpose, site=self.site, subnet=self.subnet, vlan_id=self.vlan_id,", "of this network. \"\"\" return pulumi.get(self, \"domain_name\") @property @pulumi.getter def id(self) -> str:", "network group. One of either `WAN`, `WAN2` or `WAN_LTE_FAILOVER`. \"\"\" return pulumi.get(self, \"wan_networkgroup\")", "will be set to true if you have dhcpd*boot*filename, and dhcpd*boot*server set. \"\"\"", "typing import Any, Mapping, Optional, Sequence, Union, overload from . import _utilities __all__", "list\") pulumi.set(__self__, \"wan_dns\", wan_dns) if wan_egress_qos and not isinstance(wan_egress_qos, int): raise TypeError(\"Expected argument", "dhcpd_boot_enabled) if dhcpd_boot_filename and not isinstance(dhcpd_boot_filename, str): raise TypeError(\"Expected argument 'dhcpd_boot_filename' to be", "= None, name: Optional[pulumi.Input[Optional[str]]] = None, site: Optional[pulumi.Input[Optional[str]]] = None, opts: Optional[pulumi.InvokeOptions] =", "raise TypeError(\"Expected argument 'site' to be a str\") pulumi.set(__self__, \"site\", site) if subnet", "bool\") pulumi.set(__self__, \"ipv6_ra_enable\", ipv6_ra_enable) if ipv6_static_subnet and not isinstance(ipv6_static_subnet, str): raise TypeError(\"Expected argument", "def get_network_output(id: Optional[pulumi.Input[Optional[str]]] = None, name: Optional[pulumi.Input[Optional[str]]] = None, site: Optional[pulumi.Input[Optional[str]]] = None,", "this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** #", "return GetNetworkResult( dhcp_dns=self.dhcp_dns, dhcp_enabled=self.dhcp_enabled, dhcp_lease=self.dhcp_lease, dhcp_start=self.dhcp_start, dhcp_stop=self.dhcp_stop, dhcpd_boot_enabled=self.dhcpd_boot_enabled, dhcpd_boot_filename=self.dhcpd_boot_filename, dhcpd_boot_server=self.dhcpd_boot_server, domain_name=self.domain_name, id=self.id, igmp_snooping=self.igmp_snooping,", "\"ipv6_interface_type\", ipv6_interface_type) if ipv6_pd_interface and not isinstance(ipv6_pd_interface, str): raise TypeError(\"Expected argument 'ipv6_pd_interface' to", "dhcp_lease and not isinstance(dhcp_lease, int): raise TypeError(\"Expected argument 'dhcp_lease' to be a int\")", "argument 'subnet' to be a str\") pulumi.set(__self__, \"subnet\", subnet) if vlan_id and not", "or not. \"\"\" return pulumi.get(self, \"ipv6_ra_enable\") @property @pulumi.getter(name=\"ipv6StaticSubnet\") def ipv6_static_subnet(self) -> str: \"\"\"", "if name and not isinstance(name, str): raise TypeError(\"Expected argument 'name' to be a", "argument 'site' to be a str\") pulumi.set(__self__, \"site\", site) if subnet and not", "of the network. \"\"\" return pulumi.get(self, \"name\") @property @pulumi.getter(name=\"networkGroup\") def network_group(self) -> str:", "str\") pulumi.set(__self__, \"network_group\", network_group) if purpose and not isinstance(purpose, str): raise TypeError(\"Expected argument", "@pulumi.getter(name=\"wanUsername\") def wan_username(self) -> str: \"\"\" Specifies the IPV4 WAN username. \"\"\" return", "if network_group and not isinstance(network_group, str): raise TypeError(\"Expected argument 'network_group' to be a", "TypeError(\"Expected argument 'name' to be a str\") pulumi.set(__self__, \"name\", name) if network_group and", "DHCP addresses. \"\"\" return pulumi.get(self, \"dhcp_lease\") @property @pulumi.getter(name=\"dhcpStart\") def dhcp_start(self) -> str: \"\"\"", "ipv6_pd_interface) if ipv6_pd_prefixid and not isinstance(ipv6_pd_prefixid, str): raise TypeError(\"Expected argument 'ipv6_pd_prefixid' to be", "str): raise TypeError(\"Expected argument 'domain_name' to be a str\") pulumi.set(__self__, \"domain_name\", domain_name) if", "return pulumi.get(self, \"wan_netmask\") @property @pulumi.getter(name=\"wanNetworkgroup\") def wan_networkgroup(self) -> str: \"\"\" Specifies the WAN", "or `vlan-only`. \"\"\" return pulumi.get(self, \"purpose\") @property @pulumi.getter def site(self) -> str: \"\"\"", "\"\"\" return pulumi.get(self, \"dhcpd_boot_filename\") @property @pulumi.getter(name=\"dhcpdBootServer\") def dhcpd_boot_server(self) -> str: \"\"\" IPv4 address", "@pulumi.getter(name=\"wanEgressQos\") def wan_egress_qos(self) -> int: \"\"\" Specifies the WAN egress quality of service.", "pulumi.set(__self__, \"ipv6_ra_enable\", ipv6_ra_enable) if ipv6_static_subnet and not isinstance(ipv6_static_subnet, str): raise TypeError(\"Expected argument 'ipv6_static_subnet'", "Optional[pulumi.Input[Optional[str]]] = None, opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetNetworkResult]: \"\"\" `Network` data source", "ipv6_static_subnet=self.ipv6_static_subnet, name=self.name, network_group=self.network_group, purpose=self.purpose, site=self.site, subnet=self.subnet, vlan_id=self.vlan_id, wan_dns=self.wan_dns, wan_egress_qos=self.wan_egress_qos, wan_gateway=self.wan_gateway, wan_ip=self.wan_ip, wan_netmask=self.wan_netmask, wan_networkgroup=self.wan_networkgroup,", "or `pppoe`. \"\"\" return pulumi.get(self, \"wan_type\") @property @pulumi.getter(name=\"wanUsername\") def wan_username(self) -> str: \"\"\"", "(when ipv6*interface*type is 'static'). \"\"\" return pulumi.get(self, \"ipv6_static_subnet\") @property @pulumi.getter def name(self) ->", "str): raise TypeError(\"Expected argument 'dhcpd_boot_server' to be a str\") pulumi.set(__self__, \"dhcpd_boot_server\", dhcpd_boot_server) if", "\"\"\" The name of the site to associate the network with. \"\"\" return", "\"wan_dns\", wan_dns) if wan_egress_qos and not isinstance(wan_egress_qos, int): raise TypeError(\"Expected argument 'wan_egress_qos' to", "TypeError(\"Expected argument 'dhcpd_boot_enabled' to be a bool\") pulumi.set(__self__, \"dhcpd_boot_enabled\", dhcpd_boot_enabled) if dhcpd_boot_filename and", "wan_networkgroup=self.wan_networkgroup, wan_type=self.wan_type, wan_username=self.wan_username, x_wan_password=self.x_wan_password) def get_network(id: Optional[str] = None, name: Optional[str] = None,", "-> int: \"\"\" Specifies the WAN egress quality of service. \"\"\" return pulumi.get(self,", "\"\"\" return pulumi.get(self, \"dhcp_dns\") @property @pulumi.getter(name=\"dhcpEnabled\") def dhcp_enabled(self) -> bool: \"\"\" whether DHCP", "None) -> pulumi.Output[GetNetworkResult]: \"\"\" `Network` data source can be used to retrieve settings", "IPv4 address of a TFTP server to network boot from. \"\"\" return pulumi.get(self,", "\"ipv6_pd_interface\", ipv6_pd_interface) if ipv6_pd_prefixid and not isinstance(ipv6_pd_prefixid, str): raise TypeError(\"Expected argument 'ipv6_pd_prefixid' to", "ipv6_pd_interface and not isinstance(ipv6_pd_interface, str): raise TypeError(\"Expected argument 'ipv6_pd_interface' to be a str\")", "'site' to be a str\") pulumi.set(__self__, \"site\", site) if subnet and not isinstance(subnet,", "pulumi.get(self, \"ipv6_ra_enable\") @property @pulumi.getter(name=\"ipv6StaticSubnet\") def ipv6_static_subnet(self) -> str: \"\"\" Specifies the static IPv6", "isinstance(id, str): raise TypeError(\"Expected argument 'id' to be a str\") pulumi.set(__self__, \"id\", id)", "of service. \"\"\" return pulumi.get(self, \"wan_egress_qos\") @property @pulumi.getter(name=\"wanGateway\") def wan_gateway(self) -> str: \"\"\"", "pulumi.set(__self__, \"network_group\", network_group) if purpose and not isinstance(purpose, str): raise TypeError(\"Expected argument 'purpose'", "\"\"\" Specifies the static IPv6 subnet (when ipv6*interface*type is 'static'). \"\"\" return pulumi.get(self,", "str\") pulumi.set(__self__, \"wan_netmask\", wan_netmask) if wan_networkgroup and not isinstance(wan_networkgroup, str): raise TypeError(\"Expected argument", "\"dhcp_enabled\") @property @pulumi.getter(name=\"dhcpLease\") def dhcp_lease(self) -> int: \"\"\" lease time for DHCP addresses.", "and not isinstance(dhcpd_boot_filename, str): raise TypeError(\"Expected argument 'dhcpd_boot_filename' to be a str\") pulumi.set(__self__,", "and not isinstance(dhcp_stop, str): raise TypeError(\"Expected argument 'dhcp_stop' to be a str\") pulumi.set(__self__,", "str\") pulumi.set(__self__, \"dhcp_start\", dhcp_start) if dhcp_stop and not isinstance(dhcp_stop, str): raise TypeError(\"Expected argument", "= dict() __args__['id'] = id __args__['name'] = name __args__['site'] = site if opts", "if ipv6_ra_enable and not isinstance(ipv6_ra_enable, bool): raise TypeError(\"Expected argument 'ipv6_ra_enable' to be a", "if opts.version is None: opts.version = _utilities.get_version() if opts.plugin_download_url is None: opts.plugin_download_url =", "and not isinstance(ipv6_pd_prefixid, str): raise TypeError(\"Expected argument 'ipv6_pd_prefixid' to be a str\") pulumi.set(__self__,", "name=None, network_group=None, purpose=None, site=None, subnet=None, vlan_id=None, wan_dns=None, wan_egress_qos=None, wan_gateway=None, wan_ip=None, wan_netmask=None, wan_networkgroup=None, wan_type=None,", "not edit by hand unless you're certain you know what you are doing!", "and not isinstance(wan_type, str): raise TypeError(\"Expected argument 'wan_type' to be a str\") pulumi.set(__self__,", "name __args__['site'] = site if opts is None: opts = pulumi.InvokeOptions() if opts.version", "bool): raise TypeError(\"Expected argument 'dhcp_enabled' to be a bool\") pulumi.set(__self__, \"dhcp_enabled\", dhcp_enabled) if", "\"x_wan_password\", x_wan_password) @property @pulumi.getter(name=\"dhcpDns\") def dhcp_dns(self) -> Sequence[str]: \"\"\" IPv4 addresses for the", "isinstance(site, str): raise TypeError(\"Expected argument 'site' to be a str\") pulumi.set(__self__, \"site\", site)", "str\") pulumi.set(__self__, \"ipv6_interface_type\", ipv6_interface_type) if ipv6_pd_interface and not isinstance(ipv6_pd_interface, str): raise TypeError(\"Expected argument", "this network. \"\"\" return pulumi.get(self, \"dhcp_enabled\") @property @pulumi.getter(name=\"dhcpLease\") def dhcp_lease(self) -> int: \"\"\"", "my_device = unifi.get_user(mac=\"01:23:45:67:89:ab\") my_network = unifi.get_network(id=my_device.network_id) ``` :param str id: The ID of", "str): raise TypeError(\"Expected argument 'ipv6_interface_type' to be a str\") pulumi.set(__self__, \"ipv6_interface_type\", ipv6_interface_type) if", "\"\"\" The IPv4 netmask of the WAN. \"\"\" return pulumi.get(self, \"wan_netmask\") @property @pulumi.getter(name=\"wanNetworkgroup\")", "`vlan-only`. \"\"\" return pulumi.get(self, \"purpose\") @property @pulumi.getter def site(self) -> str: \"\"\" The", "= id __args__['name'] = name __args__['site'] = site if opts is None: opts", "isinstance(wan_type, str): raise TypeError(\"Expected argument 'wan_type' to be a str\") pulumi.set(__self__, \"wan_type\", wan_type)", "@pulumi.getter(name=\"dhcpLease\") def dhcp_lease(self) -> int: \"\"\" lease time for DHCP addresses. \"\"\" return", "@property @pulumi.getter(name=\"networkGroup\") def network_group(self) -> str: \"\"\" The group of the network. \"\"\"", "\"\"\" return pulumi.get(self, \"site\") @property @pulumi.getter def subnet(self) -> str: \"\"\" The subnet", "addresses for the DNS server to be returned from the DHCP server. \"\"\"", "pulumi.set(__self__, \"dhcp_enabled\", dhcp_enabled) if dhcp_lease and not isinstance(dhcp_lease, int): raise TypeError(\"Expected argument 'dhcp_lease'", "the network. \"\"\" return pulumi.get(self, \"id\") @property @pulumi.getter(name=\"igmpSnooping\") def igmp_snooping(self) -> bool: \"\"\"", "dhcp_enabled=self.dhcp_enabled, dhcp_lease=self.dhcp_lease, dhcp_start=self.dhcp_start, dhcp_stop=self.dhcp_stop, dhcpd_boot_enabled=self.dhcpd_boot_enabled, dhcpd_boot_filename=self.dhcpd_boot_filename, dhcpd_boot_server=self.dhcpd_boot_server, domain_name=self.domain_name, id=self.id, igmp_snooping=self.igmp_snooping, ipv6_interface_type=self.ipv6_interface_type, ipv6_pd_interface=self.ipv6_pd_interface, ipv6_pd_prefixid=self.ipv6_pd_prefixid,", "\"\"\" return pulumi.get(self, \"dhcpd_boot_server\") @property @pulumi.getter(name=\"domainName\") def domain_name(self) -> str: \"\"\" The domain", "bool: \"\"\" Specifies whether to enable router advertisements or not. \"\"\" return pulumi.get(self,", "DNS server to be returned from the DHCP server. \"\"\" return pulumi.get(self, \"dhcp_dns\")", "GetNetworkResult( dhcp_dns=self.dhcp_dns, dhcp_enabled=self.dhcp_enabled, dhcp_lease=self.dhcp_lease, dhcp_start=self.dhcp_start, dhcp_stop=self.dhcp_stop, dhcpd_boot_enabled=self.dhcpd_boot_enabled, dhcpd_boot_filename=self.dhcpd_boot_filename, dhcpd_boot_server=self.dhcpd_boot_server, domain_name=self.domain_name, id=self.id, igmp_snooping=self.igmp_snooping, ipv6_interface_type=self.ipv6_interface_type,", "\"\"\" The IPv4 address of the WAN. \"\"\" return pulumi.get(self, \"wan_ip\") @property @pulumi.getter(name=\"wanNetmask\")", "wan_gateway and not isinstance(wan_gateway, str): raise TypeError(\"Expected argument 'wan_gateway' to be a str\")", "def get_network(id: Optional[str] = None, name: Optional[str] = None, site: Optional[str] = None,", "isinstance(dhcpd_boot_server, str): raise TypeError(\"Expected argument 'dhcpd_boot_server' to be a str\") pulumi.set(__self__, \"dhcpd_boot_server\", dhcpd_boot_server)", "@pulumi.getter(name=\"vlanId\") def vlan_id(self) -> int: \"\"\" The VLAN ID of the network. \"\"\"", "= name __args__['site'] = site if opts is None: opts = pulumi.InvokeOptions() if", "str\") pulumi.set(__self__, \"ipv6_pd_interface\", ipv6_pd_interface) if ipv6_pd_prefixid and not isinstance(ipv6_pd_prefixid, str): raise TypeError(\"Expected argument", "dhcp_lease(self) -> int: \"\"\" lease time for DHCP addresses. \"\"\" return pulumi.get(self, \"dhcp_lease\")", "lease time for DHCP addresses. \"\"\" return pulumi.get(self, \"dhcp_lease\") @property @pulumi.getter(name=\"dhcpStart\") def dhcp_start(self)", "opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetNetworkResult: \"\"\" `Network` data source can be used", "network. One of `corporate`, `guest`, `wan`, or `vlan-only`. \"\"\" return pulumi.get(self, \"purpose\") @property", "str: \"\"\" the file to PXE boot from on the dhcpd*boot*server. \"\"\" return", "str): raise TypeError(\"Expected argument 'wan_netmask' to be a str\") pulumi.set(__self__, \"wan_netmask\", wan_netmask) if", "def network_group(self) -> str: \"\"\" The group of the network. \"\"\" return pulumi.get(self,", "to true if you have dhcpd*boot*filename, and dhcpd*boot*server set. \"\"\" return pulumi.get(self, \"dhcpd_boot_enabled\")", "not isinstance(dhcp_stop, str): raise TypeError(\"Expected argument 'dhcp_stop' to be a str\") pulumi.set(__self__, \"dhcp_stop\",", "connection to use. \"\"\" return pulumi.get(self, \"ipv6_interface_type\") @property @pulumi.getter(name=\"ipv6PdInterface\") def ipv6_pd_interface(self) -> str:", "isinstance(dhcp_enabled, bool): raise TypeError(\"Expected argument 'dhcp_enabled' to be a bool\") pulumi.set(__self__, \"dhcp_enabled\", dhcp_enabled)", "be a bool\") pulumi.set(__self__, \"dhcp_enabled\", dhcp_enabled) if dhcp_lease and not isinstance(dhcp_lease, int): raise", "isinstance(name, str): raise TypeError(\"Expected argument 'name' to be a str\") pulumi.set(__self__, \"name\", name)", "\"\"\" return pulumi.get(self, \"wan_gateway\") @property @pulumi.getter(name=\"wanIp\") def wan_ip(self) -> str: \"\"\" The IPv4", "values returned by getNetwork. \"\"\" def __init__(__self__, dhcp_dns=None, dhcp_enabled=None, dhcp_lease=None, dhcp_start=None, dhcp_stop=None, dhcpd_boot_enabled=None,", "str\") pulumi.set(__self__, \"domain_name\", domain_name) if id and not isinstance(id, str): raise TypeError(\"Expected argument", "returned from the DHCP server. \"\"\" return pulumi.get(self, \"dhcp_dns\") @property @pulumi.getter(name=\"dhcpEnabled\") def dhcp_enabled(self)", "is None: opts = pulumi.InvokeOptions() if opts.version is None: opts.version = _utilities.get_version() if", "if ipv6_static_subnet and not isinstance(ipv6_static_subnet, str): raise TypeError(\"Expected argument 'ipv6_static_subnet' to be a", "address). \"\"\" return pulumi.get(self, \"subnet\") @property @pulumi.getter(name=\"vlanId\") def vlan_id(self) -> int: \"\"\" The", "is used for IPv6 Prefix Delegation. \"\"\" return pulumi.get(self, \"ipv6_pd_interface\") @property @pulumi.getter(name=\"ipv6PdPrefixid\") def", "str): raise TypeError(\"Expected argument 'dhcpd_boot_filename' to be a str\") pulumi.set(__self__, \"dhcpd_boot_filename\", dhcpd_boot_filename) if", "a bool\") pulumi.set(__self__, \"igmp_snooping\", igmp_snooping) if ipv6_interface_type and not isinstance(ipv6_interface_type, str): raise TypeError(\"Expected", "str: \"\"\" Specifies which WAN interface is used for IPv6 Prefix Delegation. \"\"\"", "wan_gateway=self.wan_gateway, wan_ip=self.wan_ip, wan_netmask=self.wan_netmask, wan_networkgroup=self.wan_networkgroup, wan_type=self.wan_type, wan_username=self.wan_username, x_wan_password=self.x_wan_password) def get_network(id: Optional[str] = None, name:", "use. \"\"\" return pulumi.get(self, \"ipv6_interface_type\") @property @pulumi.getter(name=\"ipv6PdInterface\") def ipv6_pd_interface(self) -> str: \"\"\" Specifies", "name=__ret__.name, network_group=__ret__.network_group, purpose=__ret__.purpose, site=__ret__.site, subnet=__ret__.subnet, vlan_id=__ret__.vlan_id, wan_dns=__ret__.wan_dns, wan_egress_qos=__ret__.wan_egress_qos, wan_gateway=__ret__.wan_gateway, wan_ip=__ret__.wan_ip, wan_netmask=__ret__.wan_netmask, wan_networkgroup=__ret__.wan_networkgroup, wan_type=__ret__.wan_type,", "def domain_name(self) -> str: \"\"\" The domain name of this network. \"\"\" return", "network boot from. \"\"\" return pulumi.get(self, \"dhcpd_boot_server\") @property @pulumi.getter(name=\"domainName\") def domain_name(self) -> str:", "disable=using-constant-test def __await__(self): if False: yield self return GetNetworkResult( dhcp_dns=self.dhcp_dns, dhcp_enabled=self.dhcp_enabled, dhcp_lease=self.dhcp_lease, dhcp_start=self.dhcp_start,", "and not isinstance(ipv6_interface_type, str): raise TypeError(\"Expected argument 'ipv6_interface_type' to be a str\") pulumi.set(__self__,", "Sequence[str]: \"\"\" IPv4 addresses for the DNS server to be returned from the", "network with. \"\"\" __args__ = dict() __args__['id'] = id __args__['name'] = name __args__['site']", "be a str\") pulumi.set(__self__, \"site\", site) if subnet and not isinstance(subnet, str): raise", "'domain_name' to be a str\") pulumi.set(__self__, \"domain_name\", domain_name) if id and not isinstance(id,", "return pulumi.get(self, \"dhcpd_boot_server\") @property @pulumi.getter(name=\"domainName\") def domain_name(self) -> str: \"\"\" The domain name", "\"\"\" The IPv4 gateway of the WAN. \"\"\" return pulumi.get(self, \"wan_gateway\") @property @pulumi.getter(name=\"wanIp\")", "wan_egress_qos=None, wan_gateway=None, wan_ip=None, wan_netmask=None, wan_networkgroup=None, wan_type=None, wan_username=None, x_wan_password=<PASSWORD>): if dhcp_dns and not isinstance(dhcp_dns,", "str: \"\"\" Specifies the IPV4 WAN password. \"\"\" return pulumi.get(self, \"x_wan_password\") class AwaitableGetNetworkResult(GetNetworkResult):", "is enabled or not on this network. \"\"\" return pulumi.get(self, \"dhcp_enabled\") @property @pulumi.getter(name=\"dhcpLease\")", "not isinstance(x_wan_password, str): raise TypeError(\"Expected argument 'x_wan_password' to be a str\") pulumi.set(__self__, \"x_wan_password\",", "WAN. \"\"\" return pulumi.get(self, \"wan_gateway\") @property @pulumi.getter(name=\"wanIp\") def wan_ip(self) -> str: \"\"\" The", "TypeError(\"Expected argument 'ipv6_pd_prefixid' to be a str\") pulumi.set(__self__, \"ipv6_pd_prefixid\", ipv6_pd_prefixid) if ipv6_ra_enable and", "of the network. :param str name: The name of the network. :param str", "def dhcpd_boot_server(self) -> str: \"\"\" IPv4 address of a TFTP server to network", "= _utilities.get_plugin_download_url() __ret__ = pulumi.runtime.invoke('unifi:index/getNetwork:getNetwork', __args__, opts=opts, typ=GetNetworkResult).value return AwaitableGetNetworkResult( dhcp_dns=__ret__.dhcp_dns, dhcp_enabled=__ret__.dhcp_enabled, dhcp_lease=__ret__.dhcp_lease,", "\"wan_gateway\") @property @pulumi.getter(name=\"wanIp\") def wan_ip(self) -> str: \"\"\" The IPv4 address of the", "argument 'dhcpd_boot_enabled' to be a bool\") pulumi.set(__self__, \"dhcpd_boot_enabled\", dhcpd_boot_enabled) if dhcpd_boot_filename and not", "opts.version = _utilities.get_version() if opts.plugin_download_url is None: opts.plugin_download_url = _utilities.get_plugin_download_url() __ret__ = pulumi.runtime.invoke('unifi:index/getNetwork:getNetwork',", "\"subnet\") @property @pulumi.getter(name=\"vlanId\") def vlan_id(self) -> int: \"\"\" The VLAN ID of the", "@property @pulumi.getter def id(self) -> str: \"\"\" The ID of the network. \"\"\"", "pulumi.get(self, \"dhcpd_boot_filename\") @property @pulumi.getter(name=\"dhcpdBootServer\") def dhcpd_boot_server(self) -> str: \"\"\" IPv4 address of a", "a str\") pulumi.set(__self__, \"id\", id) if igmp_snooping and not isinstance(igmp_snooping, bool): raise TypeError(\"Expected", "@pulumi.getter(name=\"ipv6PdPrefixid\") def ipv6_pd_prefixid(self) -> str: \"\"\" Specifies the IPv6 Prefix ID. \"\"\" return", "and not isinstance(wan_dns, list): raise TypeError(\"Expected argument 'wan_dns' to be a list\") pulumi.set(__self__,", "@property @pulumi.getter(name=\"ipv6StaticSubnet\") def ipv6_static_subnet(self) -> str: \"\"\" Specifies the static IPv6 subnet (when", "\"\"\" whether DHCP is enabled or not on this network. \"\"\" return pulumi.get(self,", "addresses. \"\"\" return pulumi.get(self, \"dhcp_lease\") @property @pulumi.getter(name=\"dhcpStart\") def dhcp_start(self) -> str: \"\"\" The", "to be a str\") pulumi.set(__self__, \"purpose\", purpose) if site and not isinstance(site, str):", "argument 'name' to be a str\") pulumi.set(__self__, \"name\", name) if network_group and not", ". import _utilities __all__ = [ 'GetNetworkResult', 'AwaitableGetNetworkResult', 'get_network', 'get_network_output', ] @pulumi.output_type class", "pulumi.set(__self__, \"site\", site) if subnet and not isinstance(subnet, str): raise TypeError(\"Expected argument 'subnet'", "Specifies the static IPv6 subnet (when ipv6*interface*type is 'static'). \"\"\" return pulumi.get(self, \"ipv6_static_subnet\")", "@pulumi.getter(name=\"wanType\") def wan_type(self) -> str: \"\"\" Specifies the IPV4 WAN connection type. One", "and not isinstance(wan_gateway, str): raise TypeError(\"Expected argument 'wan_gateway' to be a str\") pulumi.set(__self__,", "pulumi.set(__self__, \"wan_username\", wan_username) if x_wan_password and not isinstance(x_wan_password, str): raise TypeError(\"Expected argument 'x_wan_password'", "self return GetNetworkResult( dhcp_dns=self.dhcp_dns, dhcp_enabled=self.dhcp_enabled, dhcp_lease=self.dhcp_lease, dhcp_start=self.dhcp_start, dhcp_stop=self.dhcp_stop, dhcpd_boot_enabled=self.dhcpd_boot_enabled, dhcpd_boot_filename=self.dhcpd_boot_filename, dhcpd_boot_server=self.dhcpd_boot_server, domain_name=self.domain_name, id=self.id,", "-> str: \"\"\" the file to PXE boot from on the dhcpd*boot*server. \"\"\"", "\"\"\" return pulumi.get(self, \"dhcp_enabled\") @property @pulumi.getter(name=\"dhcpLease\") def dhcp_lease(self) -> int: \"\"\" lease time", "if x_wan_password and not isinstance(x_wan_password, str): raise TypeError(\"Expected argument 'x_wan_password' to be a", "Specifies the IPV4 WAN password. \"\"\" return pulumi.get(self, \"x_wan_password\") class AwaitableGetNetworkResult(GetNetworkResult): # pylint:", "-> Sequence[str]: \"\"\" IPv4 addresses for the DNS server to be returned from", "str): raise TypeError(\"Expected argument 'wan_ip' to be a str\") pulumi.set(__self__, \"wan_ip\", wan_ip) if", "file to PXE boot from on the dhcpd*boot*server. \"\"\" return pulumi.get(self, \"dhcpd_boot_filename\") @property", "not isinstance(ipv6_static_subnet, str): raise TypeError(\"Expected argument 'ipv6_static_subnet' to be a str\") pulumi.set(__self__, \"ipv6_static_subnet\",", "\"\"\" IPv4 addresses for the DNS server to be returned from the DHCP", "if dhcpd_boot_filename and not isinstance(dhcpd_boot_filename, str): raise TypeError(\"Expected argument 'dhcpd_boot_filename' to be a", "\"\"\" return pulumi.get(self, \"subnet\") @property @pulumi.getter(name=\"vlanId\") def vlan_id(self) -> int: \"\"\" The VLAN", "False: yield self return GetNetworkResult( dhcp_dns=self.dhcp_dns, dhcp_enabled=self.dhcp_enabled, dhcp_lease=self.dhcp_lease, dhcp_start=self.dhcp_start, dhcp_stop=self.dhcp_stop, dhcpd_boot_enabled=self.dhcpd_boot_enabled, dhcpd_boot_filename=self.dhcpd_boot_filename, dhcpd_boot_server=self.dhcpd_boot_server,", "# *** Do not edit by hand unless you're certain you know what", "returned by getNetwork. \"\"\" def __init__(__self__, dhcp_dns=None, dhcp_enabled=None, dhcp_lease=None, dhcp_start=None, dhcp_stop=None, dhcpd_boot_enabled=None, dhcpd_boot_filename=None,", "ipv6_interface_type=__ret__.ipv6_interface_type, ipv6_pd_interface=__ret__.ipv6_pd_interface, ipv6_pd_prefixid=__ret__.ipv6_pd_prefixid, ipv6_ra_enable=__ret__.ipv6_ra_enable, ipv6_static_subnet=__ret__.ipv6_static_subnet, name=__ret__.name, network_group=__ret__.network_group, purpose=__ret__.purpose, site=__ret__.site, subnet=__ret__.subnet, vlan_id=__ret__.vlan_id, wan_dns=__ret__.wan_dns, wan_egress_qos=__ret__.wan_egress_qos,", "DHCP range of addresses stops. \"\"\" return pulumi.get(self, \"dhcp_stop\") @property @pulumi.getter(name=\"dhcpdBootEnabled\") def dhcpd_boot_enabled(self)", "a bool\") pulumi.set(__self__, \"dhcpd_boot_enabled\", dhcpd_boot_enabled) if dhcpd_boot_filename and not isinstance(dhcpd_boot_filename, str): raise TypeError(\"Expected", "for a network by name or ID. ## Example Usage ```python import pulumi", "TypeError(\"Expected argument 'wan_dns' to be a list\") pulumi.set(__self__, \"wan_dns\", wan_dns) if wan_egress_qos and", "@pulumi.getter(name=\"dhcpEnabled\") def dhcp_enabled(self) -> bool: \"\"\" whether DHCP is enabled or not on", "\"\"\" return pulumi.get(self, \"ipv6_pd_prefixid\") @property @pulumi.getter(name=\"ipv6RaEnable\") def ipv6_ra_enable(self) -> bool: \"\"\" Specifies whether", "def wan_networkgroup(self) -> str: \"\"\" Specifies the WAN network group. One of either", "pulumi.get(self, \"wan_networkgroup\") @property @pulumi.getter(name=\"wanType\") def wan_type(self) -> str: \"\"\" Specifies the IPV4 WAN", "argument 'id' to be a str\") pulumi.set(__self__, \"id\", id) if igmp_snooping and not", "The ID of the network. :param str name: The name of the network.", "pulumi.get(self, \"wan_dns\") @property @pulumi.getter(name=\"wanEgressQos\") def wan_egress_qos(self) -> int: \"\"\" Specifies the WAN egress", "def wan_gateway(self) -> str: \"\"\" The IPv4 gateway of the WAN. \"\"\" return", "dhcpd_boot_server) if domain_name and not isinstance(domain_name, str): raise TypeError(\"Expected argument 'domain_name' to be", "id=None, igmp_snooping=None, ipv6_interface_type=None, ipv6_pd_interface=None, ipv6_pd_prefixid=None, ipv6_ra_enable=None, ipv6_static_subnet=None, name=None, network_group=None, purpose=None, site=None, subnet=None, vlan_id=None,", "pulumi.get(self, \"dhcp_enabled\") @property @pulumi.getter(name=\"dhcpLease\") def dhcp_lease(self) -> int: \"\"\" lease time for DHCP", "str: \"\"\" Specifies which type of IPv6 connection to use. \"\"\" return pulumi.get(self,", "def ipv6_static_subnet(self) -> str: \"\"\" Specifies the static IPv6 subnet (when ipv6*interface*type is", "have dhcpd*boot*filename, and dhcpd*boot*server set. \"\"\" return pulumi.get(self, \"dhcpd_boot_enabled\") @property @pulumi.getter(name=\"dhcpdBootFilename\") def dhcpd_boot_filename(self)", "argument 'network_group' to be a str\") pulumi.set(__self__, \"network_group\", network_group) if purpose and not", "argument 'wan_netmask' to be a str\") pulumi.set(__self__, \"wan_netmask\", wan_netmask) if wan_networkgroup and not", "network. :param str site: The name of the site to associate the network", "purpose of the network. One of `corporate`, `guest`, `wan`, or `vlan-only`. \"\"\" return", "the DHCP server. \"\"\" return pulumi.get(self, \"dhcp_dns\") @property @pulumi.getter(name=\"dhcpEnabled\") def dhcp_enabled(self) -> bool:", "IPv4 addresses for the DNS server to be returned from the DHCP server.", "wan_dns(self) -> Sequence[str]: \"\"\" DNS servers IPs of the WAN. \"\"\" return pulumi.get(self,", "\"\"\" return pulumi.get(self, \"wan_egress_qos\") @property @pulumi.getter(name=\"wanGateway\") def wan_gateway(self) -> str: \"\"\" The IPv4", "if dhcpd_boot_enabled and not isinstance(dhcpd_boot_enabled, bool): raise TypeError(\"Expected argument 'dhcpd_boot_enabled' to be a", "AwaitableGetNetworkResult: \"\"\" `Network` data source can be used to retrieve settings for a", "of the WAN. \"\"\" return pulumi.get(self, \"wan_gateway\") @property @pulumi.getter(name=\"wanIp\") def wan_ip(self) -> str:", "\"\"\" The ID of the network. \"\"\" return pulumi.get(self, \"id\") @property @pulumi.getter(name=\"igmpSnooping\") def", "not. \"\"\" return pulumi.get(self, \"igmp_snooping\") @property @pulumi.getter(name=\"ipv6InterfaceType\") def ipv6_interface_type(self) -> str: \"\"\" Specifies", "TypeError(\"Expected argument 'wan_egress_qos' to be a int\") pulumi.set(__self__, \"wan_egress_qos\", wan_egress_qos) if wan_gateway and", "service. \"\"\" return pulumi.get(self, \"wan_egress_qos\") @property @pulumi.getter(name=\"wanGateway\") def wan_gateway(self) -> str: \"\"\" The", "return pulumi.get(self, \"domain_name\") @property @pulumi.getter def id(self) -> str: \"\"\" The ID of", "return pulumi.get(self, \"wan_gateway\") @property @pulumi.getter(name=\"wanIp\") def wan_ip(self) -> str: \"\"\" The IPv4 address", "be a int\") pulumi.set(__self__, \"wan_egress_qos\", wan_egress_qos) if wan_gateway and not isinstance(wan_gateway, str): raise", "`disabled`, `static`, `dhcp`, or `pppoe`. \"\"\" return pulumi.get(self, \"wan_type\") @property @pulumi.getter(name=\"wanUsername\") def wan_username(self)", "not isinstance(dhcp_dns, list): raise TypeError(\"Expected argument 'dhcp_dns' to be a list\") pulumi.set(__self__, \"dhcp_dns\",", "type of IPv6 connection to use. \"\"\" return pulumi.get(self, \"ipv6_interface_type\") @property @pulumi.getter(name=\"ipv6PdInterface\") def", "edit by hand unless you're certain you know what you are doing! ***", "isinstance(wan_networkgroup, str): raise TypeError(\"Expected argument 'wan_networkgroup' to be a str\") pulumi.set(__self__, \"wan_networkgroup\", wan_networkgroup)", "name or ID. ## Example Usage ```python import pulumi import pulumi_unifi as unifi", "str): raise TypeError(\"Expected argument 'wan_networkgroup' to be a str\") pulumi.set(__self__, \"wan_networkgroup\", wan_networkgroup) if", "raise TypeError(\"Expected argument 'ipv6_pd_interface' to be a str\") pulumi.set(__self__, \"ipv6_pd_interface\", ipv6_pd_interface) if ipv6_pd_prefixid", "the WAN. \"\"\" return pulumi.get(self, \"wan_netmask\") @property @pulumi.getter(name=\"wanNetworkgroup\") def wan_networkgroup(self) -> str: \"\"\"", "return pulumi.get(self, \"site\") @property @pulumi.getter def subnet(self) -> str: \"\"\" The subnet of", "WAN network group. One of either `WAN`, `WAN2` or `WAN_LTE_FAILOVER`. \"\"\" return pulumi.get(self,", "'dhcpd_boot_server' to be a str\") pulumi.set(__self__, \"dhcpd_boot_server\", dhcpd_boot_server) if domain_name and not isinstance(domain_name,", "the file to PXE boot from on the dhcpd*boot*server. \"\"\" return pulumi.get(self, \"dhcpd_boot_filename\")", "TypeError(\"Expected argument 'purpose' to be a str\") pulumi.set(__self__, \"purpose\", purpose) if site and", "(CIDR address). \"\"\" return pulumi.get(self, \"subnet\") @property @pulumi.getter(name=\"vlanId\") def vlan_id(self) -> int: \"\"\"", "'static'). \"\"\" return pulumi.get(self, \"ipv6_static_subnet\") @property @pulumi.getter def name(self) -> str: \"\"\" The", "pulumi.set(__self__, \"vlan_id\", vlan_id) if wan_dns and not isinstance(wan_dns, list): raise TypeError(\"Expected argument 'wan_dns'", "site and not isinstance(site, str): raise TypeError(\"Expected argument 'site' to be a str\")", "be a str\") pulumi.set(__self__, \"wan_networkgroup\", wan_networkgroup) if wan_type and not isinstance(wan_type, str): raise", "dhcpd*boot*server. \"\"\" return pulumi.get(self, \"dhcpd_boot_filename\") @property @pulumi.getter(name=\"dhcpdBootServer\") def dhcpd_boot_server(self) -> str: \"\"\" IPv4", "for the DNS server to be returned from the DHCP server. \"\"\" return", "@pulumi.getter(name=\"dhcpdBootServer\") def dhcpd_boot_server(self) -> str: \"\"\" IPv4 address of a TFTP server to", "vlan_id) if wan_dns and not isinstance(wan_dns, list): raise TypeError(\"Expected argument 'wan_dns' to be", "pulumi.set(__self__, \"dhcp_stop\", dhcp_stop) if dhcpd_boot_enabled and not isinstance(dhcpd_boot_enabled, bool): raise TypeError(\"Expected argument 'dhcpd_boot_enabled'", "wan_gateway) if wan_ip and not isinstance(wan_ip, str): raise TypeError(\"Expected argument 'wan_ip' to be", "IPs of the WAN. \"\"\" return pulumi.get(self, \"wan_dns\") @property @pulumi.getter(name=\"wanEgressQos\") def wan_egress_qos(self) ->", "if wan_netmask and not isinstance(wan_netmask, str): raise TypeError(\"Expected argument 'wan_netmask' to be a", "collection of values returned by getNetwork. \"\"\" def __init__(__self__, dhcp_dns=None, dhcp_enabled=None, dhcp_lease=None, dhcp_start=None,", "to be a list\") pulumi.set(__self__, \"wan_dns\", wan_dns) if wan_egress_qos and not isinstance(wan_egress_qos, int):", "ipv6_pd_prefixid=__ret__.ipv6_pd_prefixid, ipv6_ra_enable=__ret__.ipv6_ra_enable, ipv6_static_subnet=__ret__.ipv6_static_subnet, name=__ret__.name, network_group=__ret__.network_group, purpose=__ret__.purpose, site=__ret__.site, subnet=__ret__.subnet, vlan_id=__ret__.vlan_id, wan_dns=__ret__.wan_dns, wan_egress_qos=__ret__.wan_egress_qos, wan_gateway=__ret__.wan_gateway, wan_ip=__ret__.wan_ip,", "a str\") pulumi.set(__self__, \"wan_netmask\", wan_netmask) if wan_networkgroup and not isinstance(wan_networkgroup, str): raise TypeError(\"Expected", "argument 'wan_dns' to be a list\") pulumi.set(__self__, \"wan_dns\", wan_dns) if wan_egress_qos and not", "Specifies which WAN interface is used for IPv6 Prefix Delegation. \"\"\" return pulumi.get(self,", "if ipv6_pd_prefixid and not isinstance(ipv6_pd_prefixid, str): raise TypeError(\"Expected argument 'ipv6_pd_prefixid' to be a", "pulumi.get(self, \"wan_egress_qos\") @property @pulumi.getter(name=\"wanGateway\") def wan_gateway(self) -> str: \"\"\" The IPv4 gateway of", "wan_gateway=__ret__.wan_gateway, wan_ip=__ret__.wan_ip, wan_netmask=__ret__.wan_netmask, wan_networkgroup=__ret__.wan_networkgroup, wan_type=__ret__.wan_type, wan_username=__ret__.wan_username, x_wan_password=__ret__.x_wan_password) @_utilities.lift_output_func(get_network) def get_network_output(id: Optional[pulumi.Input[Optional[str]]] = None,", "dhcp_enabled(self) -> bool: \"\"\" whether DHCP is enabled or not on this network.", "router advertisements or not. \"\"\" return pulumi.get(self, \"ipv6_ra_enable\") @property @pulumi.getter(name=\"ipv6StaticSubnet\") def ipv6_static_subnet(self) ->", "be a str\") pulumi.set(__self__, \"wan_netmask\", wan_netmask) if wan_networkgroup and not isinstance(wan_networkgroup, str): raise", "of a TFTP server to network boot from. \"\"\" return pulumi.get(self, \"dhcpd_boot_server\") @property", "import pulumi import pulumi_unifi as unifi lan_network = unifi.get_network(name=\"LAN\") my_device = unifi.get_user(mac=\"01:23:45:67:89:ab\") my_network", "return pulumi.get(self, \"dhcpd_boot_enabled\") @property @pulumi.getter(name=\"dhcpdBootFilename\") def dhcpd_boot_filename(self) -> str: \"\"\" the file to", "Any, Mapping, Optional, Sequence, Union, overload from . import _utilities __all__ = [", "-> Sequence[str]: \"\"\" DNS servers IPs of the WAN. \"\"\" return pulumi.get(self, \"wan_dns\")", "not isinstance(vlan_id, int): raise TypeError(\"Expected argument 'vlan_id' to be a int\") pulumi.set(__self__, \"vlan_id\",", "] @pulumi.output_type class GetNetworkResult: \"\"\" A collection of values returned by getNetwork. \"\"\"", "to be a str\") pulumi.set(__self__, \"wan_gateway\", wan_gateway) if wan_ip and not isinstance(wan_ip, str):", "wan_networkgroup=__ret__.wan_networkgroup, wan_type=__ret__.wan_type, wan_username=__ret__.wan_username, x_wan_password=__ret__.x_wan_password) @_utilities.lift_output_func(get_network) def get_network_output(id: Optional[pulumi.Input[Optional[str]]] = None, name: Optional[pulumi.Input[Optional[str]]] =", "pulumi.get(self, \"dhcp_start\") @property @pulumi.getter(name=\"dhcpStop\") def dhcp_stop(self) -> str: \"\"\" The IPv4 address where", "domain_name=__ret__.domain_name, id=__ret__.id, igmp_snooping=__ret__.igmp_snooping, ipv6_interface_type=__ret__.ipv6_interface_type, ipv6_pd_interface=__ret__.ipv6_pd_interface, ipv6_pd_prefixid=__ret__.ipv6_pd_prefixid, ipv6_ra_enable=__ret__.ipv6_ra_enable, ipv6_static_subnet=__ret__.ipv6_static_subnet, name=__ret__.name, network_group=__ret__.network_group, purpose=__ret__.purpose, site=__ret__.site, subnet=__ret__.subnet,", "import _utilities __all__ = [ 'GetNetworkResult', 'AwaitableGetNetworkResult', 'get_network', 'get_network_output', ] @pulumi.output_type class GetNetworkResult:", "TypeError(\"Expected argument 'id' to be a str\") pulumi.set(__self__, \"id\", id) if igmp_snooping and", "opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetNetworkResult]: \"\"\" `Network` data source can be used", "options. will be set to true if you have dhcpd*boot*filename, and dhcpd*boot*server set.", "vlan_id=self.vlan_id, wan_dns=self.wan_dns, wan_egress_qos=self.wan_egress_qos, wan_gateway=self.wan_gateway, wan_ip=self.wan_ip, wan_netmask=self.wan_netmask, wan_networkgroup=self.wan_networkgroup, wan_type=self.wan_type, wan_username=self.wan_username, x_wan_password=self.x_wan_password) def get_network(id: Optional[str]", "wan_egress_qos and not isinstance(wan_egress_qos, int): raise TypeError(\"Expected argument 'wan_egress_qos' to be a int\")", "advertisements or not. \"\"\" return pulumi.get(self, \"ipv6_ra_enable\") @property @pulumi.getter(name=\"ipv6StaticSubnet\") def ipv6_static_subnet(self) -> str:", "be a str\") pulumi.set(__self__, \"dhcpd_boot_server\", dhcpd_boot_server) if domain_name and not isinstance(domain_name, str): raise", "pulumi.set(__self__, \"domain_name\", domain_name) if id and not isinstance(id, str): raise TypeError(\"Expected argument 'id'", "this network. \"\"\" return pulumi.get(self, \"domain_name\") @property @pulumi.getter def id(self) -> str: \"\"\"", "return pulumi.get(self, \"network_group\") @property @pulumi.getter def purpose(self) -> str: \"\"\" The purpose of", "= _utilities.get_version() if opts.plugin_download_url is None: opts.plugin_download_url = _utilities.get_plugin_download_url() __ret__ = pulumi.runtime.invoke('unifi:index/getNetwork:getNetwork', __args__,", "wan_egress_qos(self) -> int: \"\"\" Specifies the WAN egress quality of service. \"\"\" return", "warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union,", "to be a bool\") pulumi.set(__self__, \"ipv6_ra_enable\", ipv6_ra_enable) if ipv6_static_subnet and not isinstance(ipv6_static_subnet, str):", "network_group=__ret__.network_group, purpose=__ret__.purpose, site=__ret__.site, subnet=__ret__.subnet, vlan_id=__ret__.vlan_id, wan_dns=__ret__.wan_dns, wan_egress_qos=__ret__.wan_egress_qos, wan_gateway=__ret__.wan_gateway, wan_ip=__ret__.wan_ip, wan_netmask=__ret__.wan_netmask, wan_networkgroup=__ret__.wan_networkgroup, wan_type=__ret__.wan_type, wan_username=__ret__.wan_username,", "ipv6_static_subnet and not isinstance(ipv6_static_subnet, str): raise TypeError(\"Expected argument 'ipv6_static_subnet' to be a str\")", "@pulumi.getter(name=\"ipv6PdInterface\") def ipv6_pd_interface(self) -> str: \"\"\" Specifies which WAN interface is used for", "pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from . import", "-> bool: \"\"\" whether DHCP is enabled or not on this network. \"\"\"", "raise TypeError(\"Expected argument 'vlan_id' to be a int\") pulumi.set(__self__, \"vlan_id\", vlan_id) if wan_dns", "raise TypeError(\"Expected argument 'wan_ip' to be a str\") pulumi.set(__self__, \"wan_ip\", wan_ip) if wan_netmask", "dhcpd_boot_server=self.dhcpd_boot_server, domain_name=self.domain_name, id=self.id, igmp_snooping=self.igmp_snooping, ipv6_interface_type=self.ipv6_interface_type, ipv6_pd_interface=self.ipv6_pd_interface, ipv6_pd_prefixid=self.ipv6_pd_prefixid, ipv6_ra_enable=self.ipv6_ra_enable, ipv6_static_subnet=self.ipv6_static_subnet, name=self.name, network_group=self.network_group, purpose=self.purpose, site=self.site,", "def wan_egress_qos(self) -> int: \"\"\" Specifies the WAN egress quality of service. \"\"\"", "yield self return GetNetworkResult( dhcp_dns=self.dhcp_dns, dhcp_enabled=self.dhcp_enabled, dhcp_lease=self.dhcp_lease, dhcp_start=self.dhcp_start, dhcp_stop=self.dhcp_stop, dhcpd_boot_enabled=self.dhcpd_boot_enabled, dhcpd_boot_filename=self.dhcpd_boot_filename, dhcpd_boot_server=self.dhcpd_boot_server, domain_name=self.domain_name,", "raise TypeError(\"Expected argument 'purpose' to be a str\") pulumi.set(__self__, \"purpose\", purpose) if site", "to be a list\") pulumi.set(__self__, \"dhcp_dns\", dhcp_dns) if dhcp_enabled and not isinstance(dhcp_enabled, bool):", "vlan_id and not isinstance(vlan_id, int): raise TypeError(\"Expected argument 'vlan_id' to be a int\")", "a str\") pulumi.set(__self__, \"wan_gateway\", wan_gateway) if wan_ip and not isinstance(wan_ip, str): raise TypeError(\"Expected", "_utilities.get_version() if opts.plugin_download_url is None: opts.plugin_download_url = _utilities.get_plugin_download_url() __ret__ = pulumi.runtime.invoke('unifi:index/getNetwork:getNetwork', __args__, opts=opts,", "site: The name of the site to associate the network with. \"\"\" __args__", "\"wan_egress_qos\") @property @pulumi.getter(name=\"wanGateway\") def wan_gateway(self) -> str: \"\"\" The IPv4 gateway of the", "igmp_snooping and not isinstance(igmp_snooping, bool): raise TypeError(\"Expected argument 'igmp_snooping' to be a bool\")", "str): raise TypeError(\"Expected argument 'wan_username' to be a str\") pulumi.set(__self__, \"wan_username\", wan_username) if", "str: \"\"\" The IPv4 gateway of the WAN. \"\"\" return pulumi.get(self, \"wan_gateway\") @property", "used to retrieve settings for a network by name or ID. ## Example", "@pulumi.getter(name=\"igmpSnooping\") def igmp_snooping(self) -> bool: \"\"\" Specifies whether IGMP snooping is enabled or", "'purpose' to be a str\") pulumi.set(__self__, \"purpose\", purpose) if site and not isinstance(site,", "the DHCP range of addresses stops. \"\"\" return pulumi.get(self, \"dhcp_stop\") @property @pulumi.getter(name=\"dhcpdBootEnabled\") def", "= None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetNetworkResult: \"\"\" `Network` data source can", "The IPv4 address where the DHCP range of addresses starts. \"\"\" return pulumi.get(self,", "@property @pulumi.getter(name=\"dhcpDns\") def dhcp_dns(self) -> Sequence[str]: \"\"\" IPv4 addresses for the DNS server", "bool): raise TypeError(\"Expected argument 'dhcpd_boot_enabled' to be a bool\") pulumi.set(__self__, \"dhcpd_boot_enabled\", dhcpd_boot_enabled) if", "= None) -> AwaitableGetNetworkResult: \"\"\" `Network` data source can be used to retrieve", "get_network_output(id: Optional[pulumi.Input[Optional[str]]] = None, name: Optional[pulumi.Input[Optional[str]]] = None, site: Optional[pulumi.Input[Optional[str]]] = None, opts:", "\"\"\" return pulumi.get(self, \"domain_name\") @property @pulumi.getter def id(self) -> str: \"\"\" The ID", "to be a str\") pulumi.set(__self__, \"wan_networkgroup\", wan_networkgroup) if wan_type and not isinstance(wan_type, str):", "@property @pulumi.getter(name=\"dhcpdBootEnabled\") def dhcpd_boot_enabled(self) -> bool: \"\"\" Toggles on the DHCP boot options.", "pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from", "wan_type) if wan_username and not isinstance(wan_username, str): raise TypeError(\"Expected argument 'wan_username' to be", "subnet (when ipv6*interface*type is 'static'). \"\"\" return pulumi.get(self, \"ipv6_static_subnet\") @property @pulumi.getter def name(self)", "argument 'dhcp_start' to be a str\") pulumi.set(__self__, \"dhcp_start\", dhcp_start) if dhcp_stop and not", "str): raise TypeError(\"Expected argument 'wan_type' to be a str\") pulumi.set(__self__, \"wan_type\", wan_type) if", "argument 'ipv6_pd_interface' to be a str\") pulumi.set(__self__, \"ipv6_pd_interface\", ipv6_pd_interface) if ipv6_pd_prefixid and not", "'dhcp_dns' to be a list\") pulumi.set(__self__, \"dhcp_dns\", dhcp_dns) if dhcp_enabled and not isinstance(dhcp_enabled,", "return pulumi.get(self, \"wan_networkgroup\") @property @pulumi.getter(name=\"wanType\") def wan_type(self) -> str: \"\"\" Specifies the IPV4", "__await__(self): if False: yield self return GetNetworkResult( dhcp_dns=self.dhcp_dns, dhcp_enabled=self.dhcp_enabled, dhcp_lease=self.dhcp_lease, dhcp_start=self.dhcp_start, dhcp_stop=self.dhcp_stop, dhcpd_boot_enabled=self.dhcpd_boot_enabled,", "to be a int\") pulumi.set(__self__, \"dhcp_lease\", dhcp_lease) if dhcp_start and not isinstance(dhcp_start, str):", "wan_type(self) -> str: \"\"\" Specifies the IPV4 WAN connection type. One of either", "def subnet(self) -> str: \"\"\" The subnet of the network (CIDR address). \"\"\"", "if wan_type and not isinstance(wan_type, str): raise TypeError(\"Expected argument 'wan_type' to be a", "Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetNetworkResult]: \"\"\" `Network` data source can be used to", "address where the DHCP range of addresses stops. \"\"\" return pulumi.get(self, \"dhcp_stop\") @property", "pulumi.set(__self__, \"wan_gateway\", wan_gateway) if wan_ip and not isinstance(wan_ip, str): raise TypeError(\"Expected argument 'wan_ip'", "str): raise TypeError(\"Expected argument 'ipv6_static_subnet' to be a str\") pulumi.set(__self__, \"ipv6_static_subnet\", ipv6_static_subnet) if", "# pylint: disable=using-constant-test def __await__(self): if False: yield self return GetNetworkResult( dhcp_dns=self.dhcp_dns, dhcp_enabled=self.dhcp_enabled,", "unifi.get_network(name=\"LAN\") my_device = unifi.get_user(mac=\"01:23:45:67:89:ab\") my_network = unifi.get_network(id=my_device.network_id) ``` :param str id: The ID", "def __await__(self): if False: yield self return GetNetworkResult( dhcp_dns=self.dhcp_dns, dhcp_enabled=self.dhcp_enabled, dhcp_lease=self.dhcp_lease, dhcp_start=self.dhcp_start, dhcp_stop=self.dhcp_stop,", "to be a str\") pulumi.set(__self__, \"dhcpd_boot_filename\", dhcpd_boot_filename) if dhcpd_boot_server and not isinstance(dhcpd_boot_server, str):", "the IPv6 Prefix ID. \"\"\" return pulumi.get(self, \"ipv6_pd_prefixid\") @property @pulumi.getter(name=\"ipv6RaEnable\") def ipv6_ra_enable(self) ->", "the DNS server to be returned from the DHCP server. \"\"\" return pulumi.get(self,", "pulumi.set(__self__, \"subnet\", subnet) if vlan_id and not isinstance(vlan_id, int): raise TypeError(\"Expected argument 'vlan_id'", "wan_egress_qos) if wan_gateway and not isinstance(wan_gateway, str): raise TypeError(\"Expected argument 'wan_gateway' to be", "= None, name: Optional[str] = None, site: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] =", "None, site: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetNetworkResult: \"\"\" `Network`", "`pppoe`. \"\"\" return pulumi.get(self, \"wan_type\") @property @pulumi.getter(name=\"wanUsername\") def wan_username(self) -> str: \"\"\" Specifies", "None: opts.plugin_download_url = _utilities.get_plugin_download_url() __ret__ = pulumi.runtime.invoke('unifi:index/getNetwork:getNetwork', __args__, opts=opts, typ=GetNetworkResult).value return AwaitableGetNetworkResult( dhcp_dns=__ret__.dhcp_dns,", "not isinstance(network_group, str): raise TypeError(\"Expected argument 'network_group' to be a str\") pulumi.set(__self__, \"network_group\",", "wan_netmask and not isinstance(wan_netmask, str): raise TypeError(\"Expected argument 'wan_netmask' to be a str\")", "\"site\") @property @pulumi.getter def subnet(self) -> str: \"\"\" The subnet of the network", "dhcp_start and not isinstance(dhcp_start, str): raise TypeError(\"Expected argument 'dhcp_start' to be a str\")", "wan_type=__ret__.wan_type, wan_username=__ret__.wan_username, x_wan_password=__ret__.x_wan_password) @_utilities.lift_output_func(get_network) def get_network_output(id: Optional[pulumi.Input[Optional[str]]] = None, name: Optional[pulumi.Input[Optional[str]]] = None,", "id and not isinstance(id, str): raise TypeError(\"Expected argument 'id' to be a str\")", "str): raise TypeError(\"Expected argument 'x_wan_password' to be a str\") pulumi.set(__self__, \"x_wan_password\", x_wan_password) @property", "`WAN`, `WAN2` or `WAN_LTE_FAILOVER`. \"\"\" return pulumi.get(self, \"wan_networkgroup\") @property @pulumi.getter(name=\"wanType\") def wan_type(self) ->", "Do not edit by hand unless you're certain you know what you are", "the WAN egress quality of service. \"\"\" return pulumi.get(self, \"wan_egress_qos\") @property @pulumi.getter(name=\"wanGateway\") def", "bool\") pulumi.set(__self__, \"dhcp_enabled\", dhcp_enabled) if dhcp_lease and not isinstance(dhcp_lease, int): raise TypeError(\"Expected argument", "@pulumi.getter def subnet(self) -> str: \"\"\" The subnet of the network (CIDR address).", "be a list\") pulumi.set(__self__, \"dhcp_dns\", dhcp_dns) if dhcp_enabled and not isinstance(dhcp_enabled, bool): raise", "are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any,", "WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***", "-> str: \"\"\" Specifies the IPv6 Prefix ID. \"\"\" return pulumi.get(self, \"ipv6_pd_prefixid\") @property", "'igmp_snooping' to be a bool\") pulumi.set(__self__, \"igmp_snooping\", igmp_snooping) if ipv6_interface_type and not isinstance(ipv6_interface_type,", "\"\"\" The group of the network. \"\"\" return pulumi.get(self, \"network_group\") @property @pulumi.getter def", "\"\"\" return pulumi.get(self, \"purpose\") @property @pulumi.getter def site(self) -> str: \"\"\" The name", "id(self) -> str: \"\"\" The ID of the network. \"\"\" return pulumi.get(self, \"id\")", "network. \"\"\" return pulumi.get(self, \"vlan_id\") @property @pulumi.getter(name=\"wanDns\") def wan_dns(self) -> Sequence[str]: \"\"\" DNS", "network_group=self.network_group, purpose=self.purpose, site=self.site, subnet=self.subnet, vlan_id=self.vlan_id, wan_dns=self.wan_dns, wan_egress_qos=self.wan_egress_qos, wan_gateway=self.wan_gateway, wan_ip=self.wan_ip, wan_netmask=self.wan_netmask, wan_networkgroup=self.wan_networkgroup, wan_type=self.wan_type, wan_username=self.wan_username,", "and not isinstance(wan_username, str): raise TypeError(\"Expected argument 'wan_username' to be a str\") pulumi.set(__self__,", "``` :param str id: The ID of the network. :param str name: The", "\"\"\" DNS servers IPs of the WAN. \"\"\" return pulumi.get(self, \"wan_dns\") @property @pulumi.getter(name=\"wanEgressQos\")", "IPv4 netmask of the WAN. \"\"\" return pulumi.get(self, \"wan_netmask\") @property @pulumi.getter(name=\"wanNetworkgroup\") def wan_networkgroup(self)", "the DHCP boot options. will be set to true if you have dhcpd*boot*filename,", "the IPV4 WAN connection type. One of either `disabled`, `static`, `dhcp`, or `pppoe`.", "x_wan_password=<PASSWORD>): if dhcp_dns and not isinstance(dhcp_dns, list): raise TypeError(\"Expected argument 'dhcp_dns' to be", "str\") pulumi.set(__self__, \"subnet\", subnet) if vlan_id and not isinstance(vlan_id, int): raise TypeError(\"Expected argument", "'id' to be a str\") pulumi.set(__self__, \"id\", id) if igmp_snooping and not isinstance(igmp_snooping,", "bool: \"\"\" Toggles on the DHCP boot options. will be set to true", "egress quality of service. \"\"\" return pulumi.get(self, \"wan_egress_qos\") @property @pulumi.getter(name=\"wanGateway\") def wan_gateway(self) ->", "AwaitableGetNetworkResult(GetNetworkResult): # pylint: disable=using-constant-test def __await__(self): if False: yield self return GetNetworkResult( dhcp_dns=self.dhcp_dns,", "be a int\") pulumi.set(__self__, \"vlan_id\", vlan_id) if wan_dns and not isinstance(wan_dns, list): raise", "raise TypeError(\"Expected argument 'dhcp_lease' to be a int\") pulumi.set(__self__, \"dhcp_lease\", dhcp_lease) if dhcp_start", "'dhcp_stop' to be a str\") pulumi.set(__self__, \"dhcp_stop\", dhcp_stop) if dhcpd_boot_enabled and not isinstance(dhcpd_boot_enabled,", "str: \"\"\" Specifies the IPv6 Prefix ID. \"\"\" return pulumi.get(self, \"ipv6_pd_prefixid\") @property @pulumi.getter(name=\"ipv6RaEnable\")", "The purpose of the network. One of `corporate`, `guest`, `wan`, or `vlan-only`. \"\"\"", "and not isinstance(igmp_snooping, bool): raise TypeError(\"Expected argument 'igmp_snooping' to be a bool\") pulumi.set(__self__,", "type. One of either `disabled`, `static`, `dhcp`, or `pppoe`. \"\"\" return pulumi.get(self, \"wan_type\")", "dhcp_enabled) if dhcp_lease and not isinstance(dhcp_lease, int): raise TypeError(\"Expected argument 'dhcp_lease' to be", "dhcpd_boot_enabled=__ret__.dhcpd_boot_enabled, dhcpd_boot_filename=__ret__.dhcpd_boot_filename, dhcpd_boot_server=__ret__.dhcpd_boot_server, domain_name=__ret__.domain_name, id=__ret__.id, igmp_snooping=__ret__.igmp_snooping, ipv6_interface_type=__ret__.ipv6_interface_type, ipv6_pd_interface=__ret__.ipv6_pd_interface, ipv6_pd_prefixid=__ret__.ipv6_pd_prefixid, ipv6_ra_enable=__ret__.ipv6_ra_enable, ipv6_static_subnet=__ret__.ipv6_static_subnet, name=__ret__.name, network_group=__ret__.network_group,", "@property @pulumi.getter(name=\"wanIp\") def wan_ip(self) -> str: \"\"\" The IPv4 address of the WAN.", "TypeError(\"Expected argument 'wan_netmask' to be a str\") pulumi.set(__self__, \"wan_netmask\", wan_netmask) if wan_networkgroup and", "purpose=__ret__.purpose, site=__ret__.site, subnet=__ret__.subnet, vlan_id=__ret__.vlan_id, wan_dns=__ret__.wan_dns, wan_egress_qos=__ret__.wan_egress_qos, wan_gateway=__ret__.wan_gateway, wan_ip=__ret__.wan_ip, wan_netmask=__ret__.wan_netmask, wan_networkgroup=__ret__.wan_networkgroup, wan_type=__ret__.wan_type, wan_username=__ret__.wan_username, x_wan_password=__ret__.x_wan_password)", "be a bool\") pulumi.set(__self__, \"igmp_snooping\", igmp_snooping) if ipv6_interface_type and not isinstance(ipv6_interface_type, str): raise", "on the dhcpd*boot*server. \"\"\" return pulumi.get(self, \"dhcpd_boot_filename\") @property @pulumi.getter(name=\"dhcpdBootServer\") def dhcpd_boot_server(self) -> str:", "raise TypeError(\"Expected argument 'dhcp_start' to be a str\") pulumi.set(__self__, \"dhcp_start\", dhcp_start) if dhcp_stop", "argument 'purpose' to be a str\") pulumi.set(__self__, \"purpose\", purpose) if site and not", "password. \"\"\" return pulumi.get(self, \"x_wan_password\") class AwaitableGetNetworkResult(GetNetworkResult): # pylint: disable=using-constant-test def __await__(self): if", "to be a str\") pulumi.set(__self__, \"wan_username\", wan_username) if x_wan_password and not isinstance(x_wan_password, str):", "ID. ## Example Usage ```python import pulumi import pulumi_unifi as unifi lan_network =", "to be a str\") pulumi.set(__self__, \"dhcp_stop\", dhcp_stop) if dhcpd_boot_enabled and not isinstance(dhcpd_boot_enabled, bool):", "str: \"\"\" The ID of the network. \"\"\" return pulumi.get(self, \"id\") @property @pulumi.getter(name=\"igmpSnooping\")", "\"purpose\") @property @pulumi.getter def site(self) -> str: \"\"\" The name of the site", "what you are doing! *** import warnings import pulumi import pulumi.runtime from typing", "return pulumi.get(self, \"ipv6_ra_enable\") @property @pulumi.getter(name=\"ipv6StaticSubnet\") def ipv6_static_subnet(self) -> str: \"\"\" Specifies the static", "to be a str\") pulumi.set(__self__, \"ipv6_static_subnet\", ipv6_static_subnet) if name and not isinstance(name, str):", "'name' to be a str\") pulumi.set(__self__, \"name\", name) if network_group and not isinstance(network_group,", "def dhcp_stop(self) -> str: \"\"\" The IPv4 address where the DHCP range of", "pulumi.get(self, \"dhcpd_boot_server\") @property @pulumi.getter(name=\"domainName\") def domain_name(self) -> str: \"\"\" The domain name of", "pulumi.get(self, \"purpose\") @property @pulumi.getter def site(self) -> str: \"\"\" The name of the", "ipv6_pd_prefixid=self.ipv6_pd_prefixid, ipv6_ra_enable=self.ipv6_ra_enable, ipv6_static_subnet=self.ipv6_static_subnet, name=self.name, network_group=self.network_group, purpose=self.purpose, site=self.site, subnet=self.subnet, vlan_id=self.vlan_id, wan_dns=self.wan_dns, wan_egress_qos=self.wan_egress_qos, wan_gateway=self.wan_gateway, wan_ip=self.wan_ip,", "dhcpd_boot_server(self) -> str: \"\"\" IPv4 address of a TFTP server to network boot", "argument 'vlan_id' to be a int\") pulumi.set(__self__, \"vlan_id\", vlan_id) if wan_dns and not", "dhcpd_boot_server=__ret__.dhcpd_boot_server, domain_name=__ret__.domain_name, id=__ret__.id, igmp_snooping=__ret__.igmp_snooping, ipv6_interface_type=__ret__.ipv6_interface_type, ipv6_pd_interface=__ret__.ipv6_pd_interface, ipv6_pd_prefixid=__ret__.ipv6_pd_prefixid, ipv6_ra_enable=__ret__.ipv6_ra_enable, ipv6_static_subnet=__ret__.ipv6_static_subnet, name=__ret__.name, network_group=__ret__.network_group, purpose=__ret__.purpose, site=__ret__.site,", "= pulumi.runtime.invoke('unifi:index/getNetwork:getNetwork', __args__, opts=opts, typ=GetNetworkResult).value return AwaitableGetNetworkResult( dhcp_dns=__ret__.dhcp_dns, dhcp_enabled=__ret__.dhcp_enabled, dhcp_lease=__ret__.dhcp_lease, dhcp_start=__ret__.dhcp_start, dhcp_stop=__ret__.dhcp_stop, dhcpd_boot_enabled=__ret__.dhcpd_boot_enabled,", "'dhcpd_boot_filename' to be a str\") pulumi.set(__self__, \"dhcpd_boot_filename\", dhcpd_boot_filename) if dhcpd_boot_server and not isinstance(dhcpd_boot_server,", "ipv6_pd_prefixid) if ipv6_ra_enable and not isinstance(ipv6_ra_enable, bool): raise TypeError(\"Expected argument 'ipv6_ra_enable' to be", "return pulumi.get(self, \"x_wan_password\") class AwaitableGetNetworkResult(GetNetworkResult): # pylint: disable=using-constant-test def __await__(self): if False: yield", "@pulumi.getter(name=\"xWanPassword\") def x_wan_password(self) -> str: \"\"\" Specifies the IPV4 WAN password. \"\"\" return", "'wan_ip' to be a str\") pulumi.set(__self__, \"wan_ip\", wan_ip) if wan_netmask and not isinstance(wan_netmask,", "-> str: \"\"\" IPv4 address of a TFTP server to network boot from.", "pulumi.get(self, \"wan_username\") @property @pulumi.getter(name=\"xWanPassword\") def x_wan_password(self) -> str: \"\"\" Specifies the IPV4 WAN", "int: \"\"\" The VLAN ID of the network. \"\"\" return pulumi.get(self, \"vlan_id\") @property", "\"\"\" the file to PXE boot from on the dhcpd*boot*server. \"\"\" return pulumi.get(self,", "dhcp_dns=self.dhcp_dns, dhcp_enabled=self.dhcp_enabled, dhcp_lease=self.dhcp_lease, dhcp_start=self.dhcp_start, dhcp_stop=self.dhcp_stop, dhcpd_boot_enabled=self.dhcpd_boot_enabled, dhcpd_boot_filename=self.dhcpd_boot_filename, dhcpd_boot_server=self.dhcpd_boot_server, domain_name=self.domain_name, id=self.id, igmp_snooping=self.igmp_snooping, ipv6_interface_type=self.ipv6_interface_type, ipv6_pd_interface=self.ipv6_pd_interface,", "\"vlan_id\") @property @pulumi.getter(name=\"wanDns\") def wan_dns(self) -> Sequence[str]: \"\"\" DNS servers IPs of the", "\"\"\" return pulumi.get(self, \"dhcp_lease\") @property @pulumi.getter(name=\"dhcpStart\") def dhcp_start(self) -> str: \"\"\" The IPv4", "pulumi.get(self, \"wan_ip\") @property @pulumi.getter(name=\"wanNetmask\") def wan_netmask(self) -> str: \"\"\" The IPv4 netmask of", "TypeError(\"Expected argument 'domain_name' to be a str\") pulumi.set(__self__, \"domain_name\", domain_name) if id and", "str: \"\"\" The IPv4 address where the DHCP range of addresses stops. \"\"\"", "@pulumi.getter(name=\"networkGroup\") def network_group(self) -> str: \"\"\" The group of the network. \"\"\" return", "TypeError(\"Expected argument 'ipv6_interface_type' to be a str\") pulumi.set(__self__, \"ipv6_interface_type\", ipv6_interface_type) if ipv6_pd_interface and", "= unifi.get_network(name=\"LAN\") my_device = unifi.get_user(mac=\"01:23:45:67:89:ab\") my_network = unifi.get_network(id=my_device.network_id) ``` :param str id: The", "ipv6_pd_prefixid and not isinstance(ipv6_pd_prefixid, str): raise TypeError(\"Expected argument 'ipv6_pd_prefixid' to be a str\")", "\"wan_dns\") @property @pulumi.getter(name=\"wanEgressQos\") def wan_egress_qos(self) -> int: \"\"\" Specifies the WAN egress quality", "import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload", "isinstance(subnet, str): raise TypeError(\"Expected argument 'subnet' to be a str\") pulumi.set(__self__, \"subnet\", subnet)", "the network. \"\"\" return pulumi.get(self, \"vlan_id\") @property @pulumi.getter(name=\"wanDns\") def wan_dns(self) -> Sequence[str]: \"\"\"", "wan_username and not isinstance(wan_username, str): raise TypeError(\"Expected argument 'wan_username' to be a str\")", "\"\"\" Specifies the IPv6 Prefix ID. \"\"\" return pulumi.get(self, \"ipv6_pd_prefixid\") @property @pulumi.getter(name=\"ipv6RaEnable\") def", "the IPV4 WAN password. \"\"\" return pulumi.get(self, \"x_wan_password\") class AwaitableGetNetworkResult(GetNetworkResult): # pylint: disable=using-constant-test", "the site to associate the network with. \"\"\" return pulumi.get(self, \"site\") @property @pulumi.getter", "\"wan_networkgroup\") @property @pulumi.getter(name=\"wanType\") def wan_type(self) -> str: \"\"\" Specifies the IPV4 WAN connection", "enabled or not on this network. \"\"\" return pulumi.get(self, \"dhcp_enabled\") @property @pulumi.getter(name=\"dhcpLease\") def", "-> str: \"\"\" Specifies which WAN interface is used for IPv6 Prefix Delegation.", "if dhcp_start and not isinstance(dhcp_start, str): raise TypeError(\"Expected argument 'dhcp_start' to be a", "\"wan_username\", wan_username) if x_wan_password and not isinstance(x_wan_password, str): raise TypeError(\"Expected argument 'x_wan_password' to" ]
[ "1.0e2 h_p, p_p = interpolate(h_p, p_p, n) h_T = np.array([0.0, 13.0, 17.0, 25.0,", "c[i,:]) # 0 1 2 3 4 5 6 7 8 9 10", "5, 6, 7, 8, 10, 10, 11] for i in range(n): ii =", "abus[j] np.save(os.path.join(data_dir, hitran_file + \".npy\"), c) for j in range(30,50): s = []", "= int(iid[i]) j = itoj[ii] c[i, 11] = j c[i, 12] = masss[j]", "the dexription file for i, line in enumerate(lines): ls = line.split() global_id =", "γ_s = γ_s / 1.0e-2 * 1.0e-5 # cm => m, bar =>", "=> pascal γ_s = γ_s / 1.0e-2 * 1.0e-5 # cm => m,", "> 1.0e-3: # print(np.abs(np.sum(T[0] - T[i]))) TQ[:,i+1] = Q[i] return TQ, paths, isotope_id,", "timer import platform script_root = os.path.abspath(os.path.dirname(__file__)) def interpolate(x0, y0, n): \"\"\"Interpolate data onto", "=> pascal γ_a = γ_a / 1.0e-2 * 1.0e-5 # cm => m,", "1.0 or np.amin(dT) < 1.0: # print(TT) QQ = np.array(TQ[:,1]) index = np.where(TT", "np.array(p1 + p2) * 1.0e2 h_p, p_p = interpolate(h_p, p_p, n) h_T =", "n0-2): break j = min(j, n0-2) v = (x[i] - x0[j]) / (x0[j+1]", "True, False) index = index1*index2 λ = λ[index] iid = iid[index] S =", "the CO2 partition function https://hitran.org/docs/iso-meta/ global ID local ID Formula AFGL code Abundance", "Arguments: x0 {numpy float} -- [description] y0 {numpy float} -- [description] n {int}", "(full range) gi 7 1 12C16O2 626 0.984204 43.98983 286.09 q7.txt 1 Arguments:", "= m[index, 5] γ_s = m[index, 6] ν_l = m[index, 7] n_a =", "11] ν_l = ν_l / 1.0e-2 # cm => m, bar => pascal", "lmin, lmax = 1.19e-5, 1.81e-5 hitran_file = os.path.join(data_dir, \"CO2_rwfmt_ISO-0-12_wl-12-18-mum\") make_spectrum(hitran_file, data_dir, lmin, lmax,", "abus, masss, gis): h = 6.62607004e-34 c0 = 2.99792458e8 with open(os.path.join(data_dir, hitran_file +", "data onto a equidistant grid with n grid points Arguments: x0 {numpy float}", "5 6 7 8 9 10 11 # 1 2 3 4 5", "'r') as ein: lines = ein.read().splitlines()[2:] paths = [] isotope_id = [] isotope_c", "x0 = np.array(x0) y0 = np.array(y0) n0 = x0.shape[0] x = np.mgrid[x0[0]:x0[-1]:n*1j] y", "cm => m, bar => pascal ΔE_ul = h * c0 / λ", "# read the partition function files for path in paths: with open(os.path.join(CO2_Q_dir, path),", "ν_l = ν_l / 1.0e-2 # cm => m, bar => pascal γ_a", "os.path.join(data_dir, \"h_p.npy\") T[:,0] = h_T T[:,1] = T_T p[:,0] = h_p p[:,1] =", "=> m, bar => pascal ΔE_ul = h * c0 / λ E_l", "pascal ΔE_ul = h * c0 / λ E_l = h * c0", "2 3 4 5 6 7 8 9 0 11 12 itoj =", "9] = g_u c[:, 10] = g_l i = np.argmax(c[:,3]) #print(i, c[i,:]) #", "[description] y0 {numpy float} -- [description] n {int} -- [description] Returns: (numpy, numpy)", "n = T[0].shape[0] m = len(T) TQ = np.zeros((n, m + 1), np.double)", "-- [description] \"\"\" h1 = [ 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0,", "\"\"\"Create lokup tables for the CO2 partition function https://hitran.org/docs/iso-meta/ global ID local ID", "T,Q pairs Returns: T, Q -- [description] \"\"\" with open(os.path.join(CO2_Q_dir, \"q7-q122-description.txt\"), 'r') as", "m[index, 10] g_l = m[index, 11] ν_l = ν_l / 1.0e-2 # cm", "np.zeros((n, 14)) c[:, 0] = λ c[:, 1] = E_l c[:, 2] =", "17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 25.0, 30.0, 35.0, 40.0, 45.0, 50.0, 70.0]", "= h * c0 / λ E_l = h * c0 * ν_l", "x[i] <= x0[j+1]): j += 1 if (j > n0-2): break j =", "(numpy, numpy) -- [description] \"\"\" x0 = np.array(x0) y0 = np.array(y0) n0 =", "10, 11] for i in range(n): ii = int(iid[i]) j = itoj[ii] c[i,", "int(iid[i]) j = itoj[ii] c[i, 11] = j c[i, 12] = masss[j] c[i,", "# print(np.abs(np.sum(T[0] - T[i]))) TQ[:,i+1] = Q[i] return TQ, paths, isotope_id, isotope_c, isotope_m,", "y0 {numpy float} -- [description] n {int} -- [description] Returns: (numpy, numpy) --", "m = np.array([[float(x) for x in line.split(',')] for line in lines]) mid =", "ii = int(iid[i]) j = itoj[ii] c[i, 11] = j c[i, 12] =", "25.0, 30.0, 35.0, 40.0, 45.0, 50.0, 70.0] p2 = [179.0, 153.0, 130.0, 111.0,", "n_a = m[index, 8] δ_a = m[index, 9] g_u = m[index, 10] g_l", "[] for i in range(14): s.append(\"%12.6e\" % c[j, i]) #print(\" \".join(s)) def create_npy_data_files(data_dir):", "numpy as np import os, sys from timeit import default_timer as timer import", "/ 1.0e-2 * 1.0e-5 # cm => m, bar => pascal γ_s =", "2), np.double) h_T_path = os.path.join(data_dir, \"h_T.npy\") h_p_path = os.path.join(data_dir, \"h_p.npy\") T[:,0] = h_T", "[description] \"\"\" with open(os.path.join(CO2_Q_dir, \"q7-q122-description.txt\"), 'r') as ein: lines = ein.read().splitlines()[2:] paths =", "/ ν index1 = np.where(λ >= lmin, True, False) index2 = np.where(λ <=", "[type] -- [description] \"\"\" h1 = [ 0.0, 1.0, 2.0, 3.0, 4.0, 5.0,", "itoj = [9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 10,", "+ p2) * 1.0e2 h_p, p_p = interpolate(h_p, p_p, n) h_T = np.array([0.0,", "7 8 9 0 11 12 itoj = [9, 0, 1, 2, 3,", "E_l + ΔE_ul n = A.shape[0] c = np.zeros((n, 14)) c[:, 0] =", "h_T = np.array([0.0, 13.0, 17.0, 25.0, 30.0, 45.0, 50.0, 70.0])*1.0e3 T_T = np.array([288.0,", "m[:, 1] ν = m[:, 2] ν = ν / 1.0e-2 λ =", "= ν_l / 1.0e-2 # cm => m, bar => pascal γ_a =", "-- number of T,Q pairs Returns: T, Q -- [description] \"\"\" with open(os.path.join(CO2_Q_dir,", "np.array([[float(x) for x in line.split()] for line in lines]) TT = np.array(TQ[:,0]) dT", "isotope_m, gis def make_T_p_over_height(data_dir): \"\"\"Create Arguments: Returns: [type] -- [description] \"\"\" h1 =", "ein.read().splitlines() TQ = np.array([[float(x) for x in line.split()] for line in lines]) TT", "j = 0 for i in range(n): xx = x[i] while not (x0[j]", "# 0 1 2 3 4 5 6 7 8 9 10 11", "3, 4, 5, 6, 7, 8, 10, 10, 11] for i in range(n):", "802.0, 710.0, 628.0, 554.0, 487.0, 426.0, 372.0, 324.0, 281.0, 243.0, 209.0] h2 =", "* y0[j+1] return x, y def make_lookup_for_Q(CO2_Q_dir, Tmax=300.0): \"\"\"Create lokup tables for the", "equidistant grid with n grid points Arguments: x0 {numpy float} -- [description] y0", "values (HITRAN data) n {int} -- number of T,Q pairs Returns: T, Q", "372.0, 324.0, 281.0, 243.0, 209.0] h2 = [ 13.0, 14.0, 15.0, 16.0, 17.0,", "6.62607004e-34 c0 = 2.99792458e8 with open(os.path.join(data_dir, hitran_file + \".out\"), 'r') as ein: lines", "m, bar => pascal γ_s = γ_s / 1.0e-2 * 1.0e-5 # cm", "default_timer as timer import platform script_root = os.path.abspath(os.path.dirname(__file__)) def interpolate(x0, y0, n): \"\"\"Interpolate", "paths = [] isotope_id = [] isotope_c = [] isotope_m = [] gis", "111.0, 95.0, 81.2, 69.5, 59.5, 51.0, 43.4, 27.7, 13.2, 6.52, 3.33, 1.76, 0.951,", "17.0, 25.0, 30.0, 45.0, 50.0, 70.0])*1.0e3 T_T = np.array([288.0, 215.8, 215.7, 225.1, 233.7,", "CO2_Q_file {str} -- file with T, Q values (HITRAN data) n {int} --", "for x in line.split(',')] for line in lines]) mid = m[:, 0] iid", "-- [description] \"\"\" with open(os.path.join(CO2_Q_dir, \"q7-q122-description.txt\"), 'r') as ein: lines = ein.read().splitlines()[2:] paths", "1.0e3 p_p = np.array(p1 + p2) * 1.0e2 h_p, p_p = interpolate(h_p, p_p,", "print(TT) QQ = np.array(TQ[:,1]) index = np.where(TT < Tmax, True, False) T.append(TT[index]) Q.append(QQ[index])", "c0 * ν_l E_u = E_l + ΔE_ul n = A.shape[0] c =", "{int} -- [description] Returns: (numpy, numpy) -- [description] \"\"\" x0 = np.array(x0) y0", "-- [description] n {int} -- [description] Returns: (numpy, numpy) -- [description] \"\"\" x0", "ein: lines = ein.read().splitlines()[2:] paths = [] isotope_id = [] isotope_c = []", "= iid[index] S = m[index, 3] A = m[index, 4] γ_a = m[index,", "Q(296 K) Q (full range) gi 7 1 12C16O2 626 0.984204 43.98983 286.09", "δ_a = m[index, 9] g_u = m[index, 10] g_l = m[index, 11] ν_l", "= np.zeros((h_T.shape[0], 2), np.double) p = np.zeros((h_p.shape[0], 2), np.double) h_T_path = os.path.join(data_dir, \"h_T.npy\")", "gis): h = 6.62607004e-34 c0 = 2.99792458e8 with open(os.path.join(data_dir, hitran_file + \".out\"), 'r')", "cm => m, bar => pascal δ_a = δ_a / 1.0e-2 * 1.0e-5", "= m[index, 7] n_a = m[index, 8] δ_a = m[index, 9] g_u =", "= line.split() global_id = int(ls[0]) isotope_id.append(int(ls[1])) isotope_c.append(float(ls[4])) paths.append(ls[7]) gis.append(int(ls[8])) mass = float(ls[5]) *", "c[j, i]) #print(\" \".join(s)) def create_npy_data_files(data_dir): make_T_p_over_height(data_dir) CO2_Q_dir = os.path.join(data_dir, \"CO2_Q\") TQ, paths,", "x0[j]) y[i] = (1.0 - v) * y0[j] + v * y0[j+1] return", "13] = abus[j] np.save(os.path.join(data_dir, hitran_file + \".npy\"), c) for j in range(30,50): s", "Q[i] return TQ, paths, isotope_id, isotope_c, isotope_m, gis def make_T_p_over_height(data_dir): \"\"\"Create Arguments: Returns:", "os.path.join(data_dir, \"CO2_Q\") TQ, paths, isotope_id, isotope_c, isotope_m, gis = make_lookup_for_Q(CO2_Q_dir, Tmax = 300.0)", "QQ = np.array(TQ[:,1]) index = np.where(TT < Tmax, True, False) T.append(TT[index]) Q.append(QQ[index]) n", "mass[kg] => g/mol * mass_factor mass_factor = 1.0e-3/6.02214076e23 # read the dexription file", "- x0[j]) / (x0[j+1] - x0[j]) y[i] = (1.0 - v) * y0[j]", "E_l = h * c0 * ν_l E_u = E_l + ΔE_ul n", "426.0, 372.0, 324.0, 281.0, 243.0, 209.0] h2 = [ 13.0, 14.0, 15.0, 16.0,", "11] for i in range(n): ii = int(iid[i]) j = itoj[ii] c[i, 11]", "h_T T[:,1] = T_T p[:,0] = h_p p[:,1] = p_p np.save(h_T_path, T) np.save(h_p_path,", "np.zeros((h_p.shape[0], 2), np.double) h_T_path = os.path.join(data_dir, \"h_T.npy\") h_p_path = os.path.join(data_dir, \"h_p.npy\") T[:,0] =", "dT = TT[1:]-TT[:-1] #if np.amax(dT) > 1.0 or np.amin(dT) < 1.0: # print(TT)", "os import numpy as np import os, sys from timeit import default_timer as", "(x0[j+1] - x0[j]) y[i] = (1.0 - v) * y0[j] + v *", "= np.array(h1 + h2) * 1.0e3 p_p = np.array(p1 + p2) * 1.0e2", "if (j > n0-2): break j = min(j, n0-2) v = (x[i] -", "A = m[index, 4] γ_a = m[index, 5] γ_s = m[index, 6] ν_l", "isotope_id, isotope_c, isotope_m, gis = make_lookup_for_Q(CO2_Q_dir, Tmax = 300.0) TQ_path = os.path.join(data_dir, \"T_Q.npy\")", "ein: lines = ein.read().splitlines()[1:] m = np.array([[float(x) for x in line.split(',')] for line", "= float(ls[5]) * mass_factor isotope_m.append(mass) T = [] Q = [] # read", "lines]) mid = m[:, 0] iid = m[:, 1] ν = m[:, 2]", "= h * c0 * ν_l E_u = E_l + ΔE_ul n =", "3] A = m[index, 4] γ_a = m[index, 5] γ_s = m[index, 6]", "Mass /g·mol-1 Q(296 K) Q (full range) gi 7 1 12C16O2 626 0.984204", "< 1.0: # print(TT) QQ = np.array(TQ[:,1]) index = np.where(TT < Tmax, True,", "TQ[:,0] = T[0] for i in range(m): #if np.abs(np.sum(T[0] - T[i])) > 1.0e-3:", "E_u = E_l + ΔE_ul n = A.shape[0] c = np.zeros((n, 14)) c[:,", "1] ν = m[:, 2] ν = ν / 1.0e-2 λ = 1.0", "from timeit import default_timer as timer import platform script_root = os.path.abspath(os.path.dirname(__file__)) def interpolate(x0,", "= np.zeros((n, m + 1), np.double) TQ[:,0] = T[0] for i in range(m):", "{numpy float} -- [description] y0 {numpy float} -- [description] n {int} -- [description]", "= 1.0e-3/6.02214076e23 # read the dexription file for i, line in enumerate(lines): ls", "153.0, 130.0, 111.0, 95.0, 81.2, 69.5, 59.5, 51.0, 43.4, 27.7, 13.2, 6.52, 3.33,", "9 10 11 # 1 2 3 4 5 6 7 8 9", "215.8, 215.7, 225.1, 233.7, 269.9, 275.7, 218.1]) h_T, T_T = interpolate(h_T, T_T, n)", "T_T = interpolate(h_T, T_T, n) T = np.zeros((h_T.shape[0], 2), np.double) p = np.zeros((h_p.shape[0],", "7 8 9 10 11 # 1 2 3 4 5 6 7", "3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0] p1 = [1013.0,", "create_npy_data_files(data_dir): make_T_p_over_height(data_dir) CO2_Q_dir = os.path.join(data_dir, \"CO2_Q\") TQ, paths, isotope_id, isotope_c, isotope_m, gis =", "12.0] p1 = [1013.0, 902.0, 802.0, 710.0, 628.0, 554.0, 487.0, 426.0, 372.0, 324.0,", "code Abundance Molar Mass /g·mol-1 Q(296 K) Q (full range) gi 7 1", "np.save(h_p_path, p) def make_spectrum(hitran_file, data_dir, lmin, lmax, lids, abus, masss, gis): h =", "λ = λ[index] iid = iid[index] S = m[index, 3] A = m[index,", "8 9 0 11 12 itoj = [9, 0, 1, 2, 3, 4,", "= 0 for i in range(n): xx = x[i] while not (x0[j] <=", "K) Q (full range) gi 7 1 12C16O2 626 0.984204 43.98983 286.09 q7.txt", "p2) * 1.0e2 h_p, p_p = interpolate(h_p, p_p, n) h_T = np.array([0.0, 13.0,", "= [] # mass[kg] => g/mol * mass_factor mass_factor = 1.0e-3/6.02214076e23 # read", "[] # mass[kg] => g/mol * mass_factor mass_factor = 1.0e-3/6.02214076e23 # read the", "Arguments: CO2_Q_file {str} -- file with T, Q values (HITRAN data) n {int}", "interpolate(h_p, p_p, n) h_T = np.array([0.0, 13.0, 17.0, 25.0, 30.0, 45.0, 50.0, 70.0])*1.0e3", "open(os.path.join(CO2_Q_dir, path), 'r') as ein: lines = ein.read().splitlines() TQ = np.array([[float(x) for x", "/ 1.0e-2 λ = 1.0 / ν index1 = np.where(λ >= lmin, True,", "i in range(n): xx = x[i] while not (x0[j] <= xx and x[i]", "A c[:, 5] = γ_a c[:, 6] = γ_s c[:, 7] = n_a", "j in range(30,50): s = [] for i in range(14): s.append(\"%12.6e\" % c[j,", "1.19e-5, 1.81e-5 hitran_file = os.path.join(data_dir, \"CO2_rwfmt_ISO-0-12_wl-12-18-mum\") make_spectrum(hitran_file, data_dir, lmin, lmax, isotope_id, isotope_c, isotope_m,", "γ_a = m[index, 5] γ_s = m[index, 6] ν_l = m[index, 7] n_a", "j += 1 if (j > n0-2): break j = min(j, n0-2) v", "1 12C16O2 626 0.984204 43.98983 286.09 q7.txt 1 Arguments: CO2_Q_file {str} -- file", "-- [description] y0 {numpy float} -- [description] n {int} -- [description] Returns: (numpy,", "not (x0[j] <= xx and x[i] <= x0[j+1]): j += 1 if (j", "= np.array(p1 + p2) * 1.0e2 h_p, p_p = interpolate(h_p, p_p, n) h_T", "= j c[i, 12] = masss[j] c[i, 13] = abus[j] np.save(os.path.join(data_dir, hitran_file +", "= 300.0) TQ_path = os.path.join(data_dir, \"T_Q.npy\") np.save(TQ_path, TQ) lmin, lmax = 1.19e-5, 1.81e-5", "λ = 1.0 / ν index1 = np.where(λ >= lmin, True, False) index2", "Abundance Molar Mass /g·mol-1 Q(296 K) Q (full range) gi 7 1 12C16O2", "c) for j in range(30,50): s = [] for i in range(14): s.append(\"%12.6e\"", "in range(14): s.append(\"%12.6e\" % c[j, i]) #print(\" \".join(s)) def create_npy_data_files(data_dir): make_T_p_over_height(data_dir) CO2_Q_dir =", "[description] \"\"\" h1 = [ 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0,", "TT = np.array(TQ[:,0]) dT = TT[1:]-TT[:-1] #if np.amax(dT) > 1.0 or np.amin(dT) <", "γ_a c[:, 6] = γ_s c[:, 7] = n_a c[:, 8] = δ_a", "= np.array(TQ[:,0]) dT = TT[1:]-TT[:-1] #if np.amax(dT) > 1.0 or np.amin(dT) < 1.0:", "= np.array([0.0, 13.0, 17.0, 25.0, 30.0, 45.0, 50.0, 70.0])*1.0e3 T_T = np.array([288.0, 215.8,", "iid = m[:, 1] ν = m[:, 2] ν = ν / 1.0e-2", "= δ_a / 1.0e-2 * 1.0e-5 # cm => m, bar => pascal", "= n_a c[:, 8] = δ_a c[:, 9] = g_u c[:, 10] =", "/ 1.0e-2 # cm => m, bar => pascal γ_a = γ_a /", "h * c0 / λ E_l = h * c0 * ν_l E_u", "ν_l = m[index, 7] n_a = m[index, 8] δ_a = m[index, 9] g_u", "# read the dexription file for i, line in enumerate(lines): ls = line.split()", "λ[index] iid = iid[index] S = m[index, 3] A = m[index, 4] γ_a", "np.array(h1 + h2) * 1.0e3 p_p = np.array(p1 + p2) * 1.0e2 h_p,", "= T[0].shape[0] m = len(T) TQ = np.zeros((n, m + 1), np.double) TQ[:,0]", "lmin, True, False) index2 = np.where(λ <= lmax, True, False) index = index1*index2", "* ν_l E_u = E_l + ΔE_ul n = A.shape[0] c = np.zeros((n,", "59.5, 51.0, 43.4, 27.7, 13.2, 6.52, 3.33, 1.76, 0.951, 0.067] n = 100", "8 9 10 11 # 1 2 3 4 5 6 7 8", "+ v * y0[j+1] return x, y def make_lookup_for_Q(CO2_Q_dir, Tmax=300.0): \"\"\"Create lokup tables", "25.0, 30.0, 45.0, 50.0, 70.0])*1.0e3 T_T = np.array([288.0, 215.8, 215.7, 225.1, 233.7, 269.9,", "n = 100 h_p = np.array(h1 + h2) * 1.0e3 p_p = np.array(p1", "np import os, sys from timeit import default_timer as timer import platform script_root", "T[:,0] = h_T T[:,1] = T_T p[:,0] = h_p p[:,1] = p_p np.save(h_T_path,", "Formula AFGL code Abundance Molar Mass /g·mol-1 Q(296 K) Q (full range) gi", "= len(T) TQ = np.zeros((n, m + 1), np.double) TQ[:,0] = T[0] for", "= m[index, 8] δ_a = m[index, 9] g_u = m[index, 10] g_l =", "isotope_c.append(float(ls[4])) paths.append(ls[7]) gis.append(int(ls[8])) mass = float(ls[5]) * mass_factor isotope_m.append(mass) T = [] Q", "isotope_m = [] gis = [] # mass[kg] => g/mol * mass_factor mass_factor", "'r') as ein: lines = ein.read().splitlines() TQ = np.array([[float(x) for x in line.split()]", "<= lmax, True, False) index = index1*index2 λ = λ[index] iid = iid[index]", "print(np.abs(np.sum(T[0] - T[i]))) TQ[:,i+1] = Q[i] return TQ, paths, isotope_id, isotope_c, isotope_m, gis", "2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0] p1 =", "TT[1:]-TT[:-1] #if np.amax(dT) > 1.0 or np.amin(dT) < 1.0: # print(TT) QQ =", "function files for path in paths: with open(os.path.join(CO2_Q_dir, path), 'r') as ein: lines", "= T[0] for i in range(m): #if np.abs(np.sum(T[0] - T[i])) > 1.0e-3: #", "gi 7 1 12C16O2 626 0.984204 43.98983 286.09 q7.txt 1 Arguments: CO2_Q_file {str}", "[179.0, 153.0, 130.0, 111.0, 95.0, 81.2, 69.5, 59.5, 51.0, 43.4, 27.7, 13.2, 6.52,", "onto a equidistant grid with n grid points Arguments: x0 {numpy float} --", "or np.amin(dT) < 1.0: # print(TT) QQ = np.array(TQ[:,1]) index = np.where(TT <", "\".out\"), 'r') as ein: lines = ein.read().splitlines()[1:] m = np.array([[float(x) for x in", "ν = m[:, 2] ν = ν / 1.0e-2 λ = 1.0 /", "function https://hitran.org/docs/iso-meta/ global ID local ID Formula AFGL code Abundance Molar Mass /g·mol-1", "δ_a c[:, 9] = g_u c[:, 10] = g_l i = np.argmax(c[:,3]) #print(i,", "8] δ_a = m[index, 9] g_u = m[index, 10] g_l = m[index, 11]", "h1 = [ 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0,", "lids, abus, masss, gis): h = 6.62607004e-34 c0 = 2.99792458e8 with open(os.path.join(data_dir, hitran_file", "γ_s c[:, 7] = n_a c[:, 8] = δ_a c[:, 9] = g_u", "= interpolate(h_T, T_T, n) T = np.zeros((h_T.shape[0], 2), np.double) p = np.zeros((h_p.shape[0], 2),", "m[:, 2] ν = ν / 1.0e-2 λ = 1.0 / ν index1", "CO2_Q_dir = os.path.join(data_dir, \"CO2_Q\") TQ, paths, isotope_id, isotope_c, isotope_m, gis = make_lookup_for_Q(CO2_Q_dir, Tmax", "30.0, 45.0, 50.0, 70.0])*1.0e3 T_T = np.array([288.0, 215.8, 215.7, 225.1, 233.7, 269.9, 275.7,", "lmin, lmax, lids, abus, masss, gis): h = 6.62607004e-34 c0 = 2.99792458e8 with", "gis def make_T_p_over_height(data_dir): \"\"\"Create Arguments: Returns: [type] -- [description] \"\"\" h1 = [", "m[index, 9] g_u = m[index, 10] g_l = m[index, 11] ν_l = ν_l", "+ ΔE_ul n = A.shape[0] c = np.zeros((n, 14)) c[:, 0] = λ", "1, 2, 3, 4, 5, 6, 7, 8, 10, 10, 11] for i", "c[:, 5] = γ_a c[:, 6] = γ_s c[:, 7] = n_a c[:,", "ein.read().splitlines()[2:] paths = [] isotope_id = [] isotope_c = [] isotope_m = []", "(j > n0-2): break j = min(j, n0-2) v = (x[i] - x0[j])", "Arguments: Returns: [type] -- [description] \"\"\" h1 = [ 0.0, 1.0, 2.0, 3.0,", "= np.array([[float(x) for x in line.split(',')] for line in lines]) mid = m[:,", "g_u = m[index, 10] g_l = m[index, 11] ν_l = ν_l / 1.0e-2", "np.array(TQ[:,1]) index = np.where(TT < Tmax, True, False) T.append(TT[index]) Q.append(QQ[index]) n = T[0].shape[0]", "626 0.984204 43.98983 286.09 q7.txt 1 Arguments: CO2_Q_file {str} -- file with T,", "range(n): ii = int(iid[i]) j = itoj[ii] c[i, 11] = j c[i, 12]", ">= lmin, True, False) index2 = np.where(λ <= lmax, True, False) index =", "= masss[j] c[i, 13] = abus[j] np.save(os.path.join(data_dir, hitran_file + \".npy\"), c) for j", "v = (x[i] - x0[j]) / (x0[j+1] - x0[j]) y[i] = (1.0 -", "3 4 5 6 7 8 9 10 11 # 1 2 3", "1.0e-2 λ = 1.0 / ν index1 = np.where(λ >= lmin, True, False)", "= make_lookup_for_Q(CO2_Q_dir, Tmax = 300.0) TQ_path = os.path.join(data_dir, \"T_Q.npy\") np.save(TQ_path, TQ) lmin, lmax", "= S c[:, 4] = A c[:, 5] = γ_a c[:, 6] =", "isotope_c = [] isotope_m = [] gis = [] # mass[kg] => g/mol", "0] iid = m[:, 1] ν = m[:, 2] ν = ν /", "= Q[i] return TQ, paths, isotope_id, isotope_c, isotope_m, gis def make_T_p_over_height(data_dir): \"\"\"Create Arguments:", "p) def make_spectrum(hitran_file, data_dir, lmin, lmax, lids, abus, masss, gis): h = 6.62607004e-34", "p_p = interpolate(h_p, p_p, n) h_T = np.array([0.0, 13.0, 17.0, 25.0, 30.0, 45.0,", "/ λ E_l = h * c0 * ν_l E_u = E_l +", "Tmax=300.0): \"\"\"Create lokup tables for the CO2 partition function https://hitran.org/docs/iso-meta/ global ID local", "35.0, 40.0, 45.0, 50.0, 70.0] p2 = [179.0, 153.0, 130.0, 111.0, 95.0, 81.2,", "interpolate(h_T, T_T, n) T = np.zeros((h_T.shape[0], 2), np.double) p = np.zeros((h_p.shape[0], 2), np.double)", "in range(m): #if np.abs(np.sum(T[0] - T[i])) > 1.0e-3: # print(np.abs(np.sum(T[0] - T[i]))) TQ[:,i+1]", "= np.mgrid[x0[0]:x0[-1]:n*1j] y = np.zeros(n) j = 0 for i in range(n): xx", "isotope_c, isotope_m, gis = make_lookup_for_Q(CO2_Q_dir, Tmax = 300.0) TQ_path = os.path.join(data_dir, \"T_Q.npy\") np.save(TQ_path,", "h_p_path = os.path.join(data_dir, \"h_p.npy\") T[:,0] = h_T T[:,1] = T_T p[:,0] = h_p", "= 100 h_p = np.array(h1 + h2) * 1.0e3 p_p = np.array(p1 +", "T_T = np.array([288.0, 215.8, 215.7, 225.1, 233.7, 269.9, 275.7, 218.1]) h_T, T_T =", "'r') as ein: lines = ein.read().splitlines()[1:] m = np.array([[float(x) for x in line.split(',')]", "* mass_factor mass_factor = 1.0e-3/6.02214076e23 # read the dexription file for i, line", "= [9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 10,", "T_T, n) T = np.zeros((h_T.shape[0], 2), np.double) p = np.zeros((h_p.shape[0], 2), np.double) h_T_path", "= np.zeros(n) j = 0 for i in range(n): xx = x[i] while", "218.1]) h_T, T_T = interpolate(h_T, T_T, n) T = np.zeros((h_T.shape[0], 2), np.double) p", "50.0, 70.0] p2 = [179.0, 153.0, 130.0, 111.0, 95.0, 81.2, 69.5, 59.5, 51.0,", "=> m, bar => pascal γ_a = γ_a / 1.0e-2 * 1.0e-5 #", "interpolate(x0, y0, n): \"\"\"Interpolate data onto a equidistant grid with n grid points", "y[i] = (1.0 - v) * y0[j] + v * y0[j+1] return x,", "T, Q -- [description] \"\"\" with open(os.path.join(CO2_Q_dir, \"q7-q122-description.txt\"), 'r') as ein: lines =", "A.shape[0] c = np.zeros((n, 14)) c[:, 0] = λ c[:, 1] = E_l", "points Arguments: x0 {numpy float} -- [description] y0 {numpy float} -- [description] n", "file for i, line in enumerate(lines): ls = line.split() global_id = int(ls[0]) isotope_id.append(int(ls[1]))", "in line.split(',')] for line in lines]) mid = m[:, 0] iid = m[:,", "isotope_id = [] isotope_c = [] isotope_m = [] gis = [] #", "< Tmax, True, False) T.append(TT[index]) Q.append(QQ[index]) n = T[0].shape[0] m = len(T) TQ", "np.where(λ >= lmin, True, False) index2 = np.where(λ <= lmax, True, False) index", "<filename>radinput/create_npy_files.py import os import numpy as np import os, sys from timeit import", "the partition function files for path in paths: with open(os.path.join(CO2_Q_dir, path), 'r') as", "line.split(',')] for line in lines]) mid = m[:, 0] iid = m[:, 1]", "= np.array([288.0, 215.8, 215.7, 225.1, 233.7, 269.9, 275.7, 218.1]) h_T, T_T = interpolate(h_T,", "p_p np.save(h_T_path, T) np.save(h_p_path, p) def make_spectrum(hitran_file, data_dir, lmin, lmax, lids, abus, masss,", "isotope_id, isotope_c, isotope_m, gis def make_T_p_over_height(data_dir): \"\"\"Create Arguments: Returns: [type] -- [description] \"\"\"", "E_u c[:, 3] = S c[:, 4] = A c[:, 5] = γ_a", "masss[j] c[i, 13] = abus[j] np.save(os.path.join(data_dir, hitran_file + \".npy\"), c) for j in", "pascal δ_a = δ_a / 1.0e-2 * 1.0e-5 # cm => m, bar", "n) h_T = np.array([0.0, 13.0, 17.0, 25.0, 30.0, 45.0, 50.0, 70.0])*1.0e3 T_T =", "def interpolate(x0, y0, n): \"\"\"Interpolate data onto a equidistant grid with n grid", "10 11 # 1 2 3 4 5 6 7 8 9 0", "CO2 partition function https://hitran.org/docs/iso-meta/ global ID local ID Formula AFGL code Abundance Molar", "10] g_l = m[index, 11] ν_l = ν_l / 1.0e-2 # cm =>", "grid points Arguments: x0 {numpy float} -- [description] y0 {numpy float} -- [description]", "14)) c[:, 0] = λ c[:, 1] = E_l c[:, 2] = E_u", "make_lookup_for_Q(CO2_Q_dir, Tmax=300.0): \"\"\"Create lokup tables for the CO2 partition function https://hitran.org/docs/iso-meta/ global ID", "np.where(λ <= lmax, True, False) index = index1*index2 λ = λ[index] iid =", "1.0e-3/6.02214076e23 # read the dexription file for i, line in enumerate(lines): ls =", "import os, sys from timeit import default_timer as timer import platform script_root =", "= np.array([[float(x) for x in line.split()] for line in lines]) TT = np.array(TQ[:,0])", "Q values (HITRAN data) n {int} -- number of T,Q pairs Returns: T,", "= h_T T[:,1] = T_T p[:,0] = h_p p[:,1] = p_p np.save(h_T_path, T)", "open(os.path.join(data_dir, hitran_file + \".out\"), 'r') as ein: lines = ein.read().splitlines()[1:] m = np.array([[float(x)", "= [] for i in range(14): s.append(\"%12.6e\" % c[j, i]) #print(\" \".join(s)) def", "i, line in enumerate(lines): ls = line.split() global_id = int(ls[0]) isotope_id.append(int(ls[1])) isotope_c.append(float(ls[4])) paths.append(ls[7])", "13.0, 17.0, 25.0, 30.0, 45.0, 50.0, 70.0])*1.0e3 T_T = np.array([288.0, 215.8, 215.7, 225.1,", "Q (full range) gi 7 1 12C16O2 626 0.984204 43.98983 286.09 q7.txt 1", "np.array([288.0, 215.8, 215.7, 225.1, 233.7, 269.9, 275.7, 218.1]) h_T, T_T = interpolate(h_T, T_T,", "- T[i]))) TQ[:,i+1] = Q[i] return TQ, paths, isotope_id, isotope_c, isotope_m, gis def", "script_root = os.path.abspath(os.path.dirname(__file__)) def interpolate(x0, y0, n): \"\"\"Interpolate data onto a equidistant grid", "grid with n grid points Arguments: x0 {numpy float} -- [description] y0 {numpy", "ein.read().splitlines()[1:] m = np.array([[float(x) for x in line.split(',')] for line in lines]) mid", "1.0 / ν index1 = np.where(λ >= lmin, True, False) index2 = np.where(λ", "{numpy float} -- [description] n {int} -- [description] Returns: (numpy, numpy) -- [description]", "= np.zeros((h_p.shape[0], 2), np.double) h_T_path = os.path.join(data_dir, \"h_T.npy\") h_p_path = os.path.join(data_dir, \"h_p.npy\") T[:,0]", "γ_s = m[index, 6] ν_l = m[index, 7] n_a = m[index, 8] δ_a", "10, 10, 11] for i in range(n): ii = int(iid[i]) j = itoj[ii]", "= [] # read the partition function files for path in paths: with", "lmax = 1.19e-5, 1.81e-5 hitran_file = os.path.join(data_dir, \"CO2_rwfmt_ISO-0-12_wl-12-18-mum\") make_spectrum(hitran_file, data_dir, lmin, lmax, isotope_id,", "lines = ein.read().splitlines() TQ = np.array([[float(x) for x in line.split()] for line in", "c[:, 8] = δ_a c[:, 9] = g_u c[:, 10] = g_l i", "n): \"\"\"Interpolate data onto a equidistant grid with n grid points Arguments: x0", "TQ_path = os.path.join(data_dir, \"T_Q.npy\") np.save(TQ_path, TQ) lmin, lmax = 1.19e-5, 1.81e-5 hitran_file =", "[] isotope_id = [] isotope_c = [] isotope_m = [] gis = []", "[] Q = [] # read the partition function files for path in", "> n0-2): break j = min(j, n0-2) v = (x[i] - x0[j]) /", "2, 3, 4, 5, 6, 7, 8, 10, 10, 11] for i in", "TQ, paths, isotope_id, isotope_c, isotope_m, gis = make_lookup_for_Q(CO2_Q_dir, Tmax = 300.0) TQ_path =", "* 1.0e-5 # cm => m, bar => pascal ΔE_ul = h *", "Returns: T, Q -- [description] \"\"\" with open(os.path.join(CO2_Q_dir, \"q7-q122-description.txt\"), 'r') as ein: lines", "as ein: lines = ein.read().splitlines()[2:] paths = [] isotope_id = [] isotope_c =", "209.0] h2 = [ 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0,", "y = np.zeros(n) j = 0 for i in range(n): xx = x[i]", "40.0, 45.0, 50.0, 70.0] p2 = [179.0, 153.0, 130.0, 111.0, 95.0, 81.2, 69.5,", "np.array(TQ[:,0]) dT = TT[1:]-TT[:-1] #if np.amax(dT) > 1.0 or np.amin(dT) < 1.0: #", "ν index1 = np.where(λ >= lmin, True, False) index2 = np.where(λ <= lmax,", "3 4 5 6 7 8 9 0 11 12 itoj = [9,", "300.0) TQ_path = os.path.join(data_dir, \"T_Q.npy\") np.save(TQ_path, TQ) lmin, lmax = 1.19e-5, 1.81e-5 hitran_file", "TQ = np.array([[float(x) for x in line.split()] for line in lines]) TT =", "* 1.0e-5 # cm => m, bar => pascal γ_s = γ_s /", "c[:, 2] = E_u c[:, 3] = S c[:, 4] = A c[:,", "mass_factor = 1.0e-3/6.02214076e23 # read the dexription file for i, line in enumerate(lines):", "bar => pascal δ_a = δ_a / 1.0e-2 * 1.0e-5 # cm =>", "= 1.19e-5, 1.81e-5 hitran_file = os.path.join(data_dir, \"CO2_rwfmt_ISO-0-12_wl-12-18-mum\") make_spectrum(hitran_file, data_dir, lmin, lmax, isotope_id, isotope_c,", "pairs Returns: T, Q -- [description] \"\"\" with open(os.path.join(CO2_Q_dir, \"q7-q122-description.txt\"), 'r') as ein:", "γ_s / 1.0e-2 * 1.0e-5 # cm => m, bar => pascal δ_a", "partition function files for path in paths: with open(os.path.join(CO2_Q_dir, path), 'r') as ein:", "i in range(m): #if np.abs(np.sum(T[0] - T[i])) > 1.0e-3: # print(np.abs(np.sum(T[0] - T[i])))", "γ_a = γ_a / 1.0e-2 * 1.0e-5 # cm => m, bar =>", "for line in lines]) TT = np.array(TQ[:,0]) dT = TT[1:]-TT[:-1] #if np.amax(dT) >", "16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 25.0, 30.0, 35.0, 40.0, 45.0, 50.0,", "[ 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 25.0, 30.0,", "import platform script_root = os.path.abspath(os.path.dirname(__file__)) def interpolate(x0, y0, n): \"\"\"Interpolate data onto a", "import os import numpy as np import os, sys from timeit import default_timer", "1 2 3 4 5 6 7 8 9 0 11 12 itoj", "#print(\" \".join(s)) def create_npy_data_files(data_dir): make_T_p_over_height(data_dir) CO2_Q_dir = os.path.join(data_dir, \"CO2_Q\") TQ, paths, isotope_id, isotope_c,", "\"q7-q122-description.txt\"), 'r') as ein: lines = ein.read().splitlines()[2:] paths = [] isotope_id = []", "x, y def make_lookup_for_Q(CO2_Q_dir, Tmax=300.0): \"\"\"Create lokup tables for the CO2 partition function", "n grid points Arguments: x0 {numpy float} -- [description] y0 {numpy float} --", "gis.append(int(ls[8])) mass = float(ls[5]) * mass_factor isotope_m.append(mass) T = [] Q = []", "make_lookup_for_Q(CO2_Q_dir, Tmax = 300.0) TQ_path = os.path.join(data_dir, \"T_Q.npy\") np.save(TQ_path, TQ) lmin, lmax =", "partition function https://hitran.org/docs/iso-meta/ global ID local ID Formula AFGL code Abundance Molar Mass", "281.0, 243.0, 209.0] h2 = [ 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0,", "= min(j, n0-2) v = (x[i] - x0[j]) / (x0[j+1] - x0[j]) y[i]", "[] gis = [] # mass[kg] => g/mol * mass_factor mass_factor = 1.0e-3/6.02214076e23", "as ein: lines = ein.read().splitlines()[1:] m = np.array([[float(x) for x in line.split(',')] for", "import default_timer as timer import platform script_root = os.path.abspath(os.path.dirname(__file__)) def interpolate(x0, y0, n):", "1.0e-2 * 1.0e-5 # cm => m, bar => pascal ΔE_ul = h", "n_a c[:, 8] = δ_a c[:, 9] = g_u c[:, 10] = g_l", "q7.txt 1 Arguments: CO2_Q_file {str} -- file with T, Q values (HITRAN data)", "81.2, 69.5, 59.5, 51.0, 43.4, 27.7, 13.2, 6.52, 3.33, 1.76, 0.951, 0.067] n", "m = len(T) TQ = np.zeros((n, m + 1), np.double) TQ[:,0] = T[0]", "= h_p p[:,1] = p_p np.save(h_T_path, T) np.save(h_p_path, p) def make_spectrum(hitran_file, data_dir, lmin,", "= T_T p[:,0] = h_p p[:,1] = p_p np.save(h_T_path, T) np.save(h_p_path, p) def", "gis = make_lookup_for_Q(CO2_Q_dir, Tmax = 300.0) TQ_path = os.path.join(data_dir, \"T_Q.npy\") np.save(TQ_path, TQ) lmin,", "s = [] for i in range(14): s.append(\"%12.6e\" % c[j, i]) #print(\" \".join(s))", "= abus[j] np.save(os.path.join(data_dir, hitran_file + \".npy\"), c) for j in range(30,50): s =", "=> m, bar => pascal γ_s = γ_s / 1.0e-2 * 1.0e-5 #", "= np.zeros((n, 14)) c[:, 0] = λ c[:, 1] = E_l c[:, 2]", "- x0[j]) y[i] = (1.0 - v) * y0[j] + v * y0[j+1]", "mass_factor mass_factor = 1.0e-3/6.02214076e23 # read the dexription file for i, line in", "iid = iid[index] S = m[index, 3] A = m[index, 4] γ_a =", "7.0, 8.0, 9.0, 10.0, 11.0, 12.0] p1 = [1013.0, 902.0, 802.0, 710.0, 628.0,", "18.0, 19.0, 20.0, 21.0, 22.0, 25.0, 30.0, 35.0, 40.0, 45.0, 50.0, 70.0] p2", "h = 6.62607004e-34 c0 = 2.99792458e8 with open(os.path.join(data_dir, hitran_file + \".out\"), 'r') as", "= γ_a / 1.0e-2 * 1.0e-5 # cm => m, bar => pascal", "1.0e-2 * 1.0e-5 # cm => m, bar => pascal δ_a = δ_a", "global_id = int(ls[0]) isotope_id.append(int(ls[1])) isotope_c.append(float(ls[4])) paths.append(ls[7]) gis.append(int(ls[8])) mass = float(ls[5]) * mass_factor isotope_m.append(mass)", "isotope_id.append(int(ls[1])) isotope_c.append(float(ls[4])) paths.append(ls[7]) gis.append(int(ls[8])) mass = float(ls[5]) * mass_factor isotope_m.append(mass) T = []", "x0 {numpy float} -- [description] y0 {numpy float} -- [description] n {int} --", "n0 = x0.shape[0] x = np.mgrid[x0[0]:x0[-1]:n*1j] y = np.zeros(n) j = 0 for", "= m[index, 10] g_l = m[index, 11] ν_l = ν_l / 1.0e-2 #", "+= 1 if (j > n0-2): break j = min(j, n0-2) v =", "def make_spectrum(hitran_file, data_dir, lmin, lmax, lids, abus, masss, gis): h = 6.62607004e-34 c0", "= ein.read().splitlines()[1:] m = np.array([[float(x) for x in line.split(',')] for line in lines])", "1), np.double) TQ[:,0] = T[0] for i in range(m): #if np.abs(np.sum(T[0] - T[i]))", "6 7 8 9 0 11 12 itoj = [9, 0, 1, 2,", "h_p = np.array(h1 + h2) * 1.0e3 p_p = np.array(p1 + p2) *", "Q -- [description] \"\"\" with open(os.path.join(CO2_Q_dir, \"q7-q122-description.txt\"), 'r') as ein: lines = ein.read().splitlines()[2:]", "m, bar => pascal γ_a = γ_a / 1.0e-2 * 1.0e-5 # cm", "for path in paths: with open(os.path.join(CO2_Q_dir, path), 'r') as ein: lines = ein.read().splitlines()", "= os.path.join(data_dir, \"CO2_Q\") TQ, paths, isotope_id, isotope_c, isotope_m, gis = make_lookup_for_Q(CO2_Q_dir, Tmax =", "in range(30,50): s = [] for i in range(14): s.append(\"%12.6e\" % c[j, i])", "69.5, 59.5, 51.0, 43.4, 27.7, 13.2, 6.52, 3.33, 1.76, 0.951, 0.067] n =", "= m[index, 9] g_u = m[index, 10] g_l = m[index, 11] ν_l =", "0.984204 43.98983 286.09 q7.txt 1 Arguments: CO2_Q_file {str} -- file with T, Q", "Tmax, True, False) T.append(TT[index]) Q.append(QQ[index]) n = T[0].shape[0] m = len(T) TQ =", "= os.path.abspath(os.path.dirname(__file__)) def interpolate(x0, y0, n): \"\"\"Interpolate data onto a equidistant grid with", "Molar Mass /g·mol-1 Q(296 K) Q (full range) gi 7 1 12C16O2 626", "T, Q values (HITRAN data) n {int} -- number of T,Q pairs Returns:", "{int} -- number of T,Q pairs Returns: T, Q -- [description] \"\"\" with", "m[index, 7] n_a = m[index, 8] δ_a = m[index, 9] g_u = m[index,", "file with T, Q values (HITRAN data) n {int} -- number of T,Q", "1.0e-2 # cm => m, bar => pascal γ_a = γ_a / 1.0e-2", "= np.where(TT < Tmax, True, False) T.append(TT[index]) Q.append(QQ[index]) n = T[0].shape[0] m =", "= os.path.join(data_dir, \"h_T.npy\") h_p_path = os.path.join(data_dir, \"h_p.npy\") T[:,0] = h_T T[:,1] = T_T", "m[:, 0] iid = m[:, 1] ν = m[:, 2] ν = ν", "isotope_m, gis = make_lookup_for_Q(CO2_Q_dir, Tmax = 300.0) TQ_path = os.path.join(data_dir, \"T_Q.npy\") np.save(TQ_path, TQ)", "11 # 1 2 3 4 5 6 7 8 9 0 11", "= os.path.join(data_dir, \"T_Q.npy\") np.save(TQ_path, TQ) lmin, lmax = 1.19e-5, 1.81e-5 hitran_file = os.path.join(data_dir,", "= E_l c[:, 2] = E_u c[:, 3] = S c[:, 4] =", "as np import os, sys from timeit import default_timer as timer import platform", "y0[j+1] return x, y def make_lookup_for_Q(CO2_Q_dir, Tmax=300.0): \"\"\"Create lokup tables for the CO2", "= [1013.0, 902.0, 802.0, 710.0, 628.0, 554.0, 487.0, 426.0, 372.0, 324.0, 281.0, 243.0,", "12 itoj = [9, 0, 1, 2, 3, 4, 5, 6, 7, 8,", "= A c[:, 5] = γ_a c[:, 6] = γ_s c[:, 7] =", "np.save(os.path.join(data_dir, hitran_file + \".npy\"), c) for j in range(30,50): s = [] for", "269.9, 275.7, 218.1]) h_T, T_T = interpolate(h_T, T_T, n) T = np.zeros((h_T.shape[0], 2),", "c[i, 12] = masss[j] c[i, 13] = abus[j] np.save(os.path.join(data_dir, hitran_file + \".npy\"), c)", "\".join(s)) def create_npy_data_files(data_dir): make_T_p_over_height(data_dir) CO2_Q_dir = os.path.join(data_dir, \"CO2_Q\") TQ, paths, isotope_id, isotope_c, isotope_m,", "os, sys from timeit import default_timer as timer import platform script_root = os.path.abspath(os.path.dirname(__file__))", "os.path.abspath(os.path.dirname(__file__)) def interpolate(x0, y0, n): \"\"\"Interpolate data onto a equidistant grid with n", "= γ_s / 1.0e-2 * 1.0e-5 # cm => m, bar => pascal", "index1*index2 λ = λ[index] iid = iid[index] S = m[index, 3] A =", "path), 'r') as ein: lines = ein.read().splitlines() TQ = np.array([[float(x) for x in", "\"\"\"Interpolate data onto a equidistant grid with n grid points Arguments: x0 {numpy", "-- [description] Returns: (numpy, numpy) -- [description] \"\"\" x0 = np.array(x0) y0 =", "hitran_file + \".npy\"), c) for j in range(30,50): s = [] for i", "[1013.0, 902.0, 802.0, 710.0, 628.0, 554.0, 487.0, 426.0, 372.0, 324.0, 281.0, 243.0, 209.0]", "g_u c[:, 10] = g_l i = np.argmax(c[:,3]) #print(i, c[i,:]) # 0 1", "11.0, 12.0] p1 = [1013.0, 902.0, 802.0, 710.0, 628.0, 554.0, 487.0, 426.0, 372.0,", "=> g/mol * mass_factor mass_factor = 1.0e-3/6.02214076e23 # read the dexription file for", "45.0, 50.0, 70.0] p2 = [179.0, 153.0, 130.0, 111.0, 95.0, 81.2, 69.5, 59.5,", "Returns: [type] -- [description] \"\"\" h1 = [ 0.0, 1.0, 2.0, 3.0, 4.0,", "files for path in paths: with open(os.path.join(CO2_Q_dir, path), 'r') as ein: lines =", "1 if (j > n0-2): break j = min(j, n0-2) v = (x[i]", "# cm => m, bar => pascal γ_a = γ_a / 1.0e-2 *", "(HITRAN data) n {int} -- number of T,Q pairs Returns: T, Q --", "index = np.where(TT < Tmax, True, False) T.append(TT[index]) Q.append(QQ[index]) n = T[0].shape[0] m", "30.0, 35.0, 40.0, 45.0, 50.0, 70.0] p2 = [179.0, 153.0, 130.0, 111.0, 95.0,", "lmax, lids, abus, masss, gis): h = 6.62607004e-34 c0 = 2.99792458e8 with open(os.path.join(data_dir,", "S = m[index, 3] A = m[index, 4] γ_a = m[index, 5] γ_s", "+ 1), np.double) TQ[:,0] = T[0] for i in range(m): #if np.abs(np.sum(T[0] -", "= x0.shape[0] x = np.mgrid[x0[0]:x0[-1]:n*1j] y = np.zeros(n) j = 0 for i", "m[index, 3] A = m[index, 4] γ_a = m[index, 5] γ_s = m[index,", "(x0[j] <= xx and x[i] <= x0[j+1]): j += 1 if (j >", "s.append(\"%12.6e\" % c[j, i]) #print(\" \".join(s)) def create_npy_data_files(data_dir): make_T_p_over_height(data_dir) CO2_Q_dir = os.path.join(data_dir, \"CO2_Q\")", "\"\"\" x0 = np.array(x0) y0 = np.array(y0) n0 = x0.shape[0] x = np.mgrid[x0[0]:x0[-1]:n*1j]", "2.99792458e8 with open(os.path.join(data_dir, hitran_file + \".out\"), 'r') as ein: lines = ein.read().splitlines()[1:] m", "10.0, 11.0, 12.0] p1 = [1013.0, 902.0, 802.0, 710.0, 628.0, 554.0, 487.0, 426.0,", "c[:, 0] = λ c[:, 1] = E_l c[:, 2] = E_u c[:,", "False) index = index1*index2 λ = λ[index] iid = iid[index] S = m[index,", "# 1 2 3 4 5 6 7 8 9 0 11 12", "v * y0[j+1] return x, y def make_lookup_for_Q(CO2_Q_dir, Tmax=300.0): \"\"\"Create lokup tables for", "50.0, 70.0])*1.0e3 T_T = np.array([288.0, 215.8, 215.7, 225.1, 233.7, 269.9, 275.7, 218.1]) h_T,", "233.7, 269.9, 275.7, 218.1]) h_T, T_T = interpolate(h_T, T_T, n) T = np.zeros((h_T.shape[0],", "= γ_s c[:, 7] = n_a c[:, 8] = δ_a c[:, 9] =", "def create_npy_data_files(data_dir): make_T_p_over_height(data_dir) CO2_Q_dir = os.path.join(data_dir, \"CO2_Q\") TQ, paths, isotope_id, isotope_c, isotope_m, gis", "return x, y def make_lookup_for_Q(CO2_Q_dir, Tmax=300.0): \"\"\"Create lokup tables for the CO2 partition", "bar => pascal γ_s = γ_s / 1.0e-2 * 1.0e-5 # cm =>", "c[:, 6] = γ_s c[:, 7] = n_a c[:, 8] = δ_a c[:,", "= m[:, 2] ν = ν / 1.0e-2 λ = 1.0 / ν", "c = np.zeros((n, 14)) c[:, 0] = λ c[:, 1] = E_l c[:,", "7] = n_a c[:, 8] = δ_a c[:, 9] = g_u c[:, 10]", "= λ c[:, 1] = E_l c[:, 2] = E_u c[:, 3] =", "(1.0 - v) * y0[j] + v * y0[j+1] return x, y def", "= int(ls[0]) isotope_id.append(int(ls[1])) isotope_c.append(float(ls[4])) paths.append(ls[7]) gis.append(int(ls[8])) mass = float(ls[5]) * mass_factor isotope_m.append(mass) T", "make_T_p_over_height(data_dir): \"\"\"Create Arguments: Returns: [type] -- [description] \"\"\" h1 = [ 0.0, 1.0,", "c[:, 10] = g_l i = np.argmax(c[:,3]) #print(i, c[i,:]) # 0 1 2", "c[:, 9] = g_u c[:, 10] = g_l i = np.argmax(c[:,3]) #print(i, c[i,:])", "p_p, n) h_T = np.array([0.0, 13.0, 17.0, 25.0, 30.0, 45.0, 50.0, 70.0])*1.0e3 T_T", "4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0] p1 = [1013.0, 902.0,", "in lines]) mid = m[:, 0] iid = m[:, 1] ν = m[:,", "/g·mol-1 Q(296 K) Q (full range) gi 7 1 12C16O2 626 0.984204 43.98983", "for i in range(14): s.append(\"%12.6e\" % c[j, i]) #print(\" \".join(s)) def create_npy_data_files(data_dir): make_T_p_over_height(data_dir)", "[9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 10, 11]", "ID Formula AFGL code Abundance Molar Mass /g·mol-1 Q(296 K) Q (full range)", "range) gi 7 1 12C16O2 626 0.984204 43.98983 286.09 q7.txt 1 Arguments: CO2_Q_file", "1 Arguments: CO2_Q_file {str} -- file with T, Q values (HITRAN data) n", "np.double) p = np.zeros((h_p.shape[0], 2), np.double) h_T_path = os.path.join(data_dir, \"h_T.npy\") h_p_path = os.path.join(data_dir,", "13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 25.0, 30.0, 35.0,", "break j = min(j, n0-2) v = (x[i] - x0[j]) / (x0[j+1] -", "in paths: with open(os.path.join(CO2_Q_dir, path), 'r') as ein: lines = ein.read().splitlines() TQ =", "5] = γ_a c[:, 6] = γ_s c[:, 7] = n_a c[:, 8]", "np.argmax(c[:,3]) #print(i, c[i,:]) # 0 1 2 3 4 5 6 7 8", "with open(os.path.join(CO2_Q_dir, \"q7-q122-description.txt\"), 'r') as ein: lines = ein.read().splitlines()[2:] paths = [] isotope_id", "i = np.argmax(c[:,3]) #print(i, c[i,:]) # 0 1 2 3 4 5 6", "T[i]))) TQ[:,i+1] = Q[i] return TQ, paths, isotope_id, isotope_c, isotope_m, gis def make_T_p_over_height(data_dir):", "14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 25.0, 30.0, 35.0, 40.0,", "c[:, 3] = S c[:, 4] = A c[:, 5] = γ_a c[:,", "0 11 12 itoj = [9, 0, 1, 2, 3, 4, 5, 6,", "1.0e-5 # cm => m, bar => pascal γ_s = γ_s / 1.0e-2", "= E_l + ΔE_ul n = A.shape[0] c = np.zeros((n, 14)) c[:, 0]", "2 3 4 5 6 7 8 9 10 11 # 1 2", "6, 7, 8, 10, 10, 11] for i in range(n): ii = int(iid[i])", "c[:, 1] = E_l c[:, 2] = E_u c[:, 3] = S c[:,", "= λ[index] iid = iid[index] S = m[index, 3] A = m[index, 4]", "p_p = np.array(p1 + p2) * 1.0e2 h_p, p_p = interpolate(h_p, p_p, n)", "554.0, 487.0, 426.0, 372.0, 324.0, 281.0, 243.0, 209.0] h2 = [ 13.0, 14.0,", "γ_a / 1.0e-2 * 1.0e-5 # cm => m, bar => pascal γ_s", "8.0, 9.0, 10.0, 11.0, 12.0] p1 = [1013.0, 902.0, 802.0, 710.0, 628.0, 554.0,", "T) np.save(h_p_path, p) def make_spectrum(hitran_file, data_dir, lmin, lmax, lids, abus, masss, gis): h", "i in range(14): s.append(\"%12.6e\" % c[j, i]) #print(\" \".join(s)) def create_npy_data_files(data_dir): make_T_p_over_height(data_dir) CO2_Q_dir", "data_dir, lmin, lmax, lids, abus, masss, gis): h = 6.62607004e-34 c0 = 2.99792458e8", "isotope_m.append(mass) T = [] Q = [] # read the partition function files", "- v) * y0[j] + v * y0[j+1] return x, y def make_lookup_for_Q(CO2_Q_dir,", "/ (x0[j+1] - x0[j]) y[i] = (1.0 - v) * y0[j] + v", "T = np.zeros((h_T.shape[0], 2), np.double) p = np.zeros((h_p.shape[0], 2), np.double) h_T_path = os.path.join(data_dir,", "[description] n {int} -- [description] Returns: (numpy, numpy) -- [description] \"\"\" x0 =", "/ 1.0e-2 * 1.0e-5 # cm => m, bar => pascal δ_a =", "paths, isotope_id, isotope_c, isotope_m, gis = make_lookup_for_Q(CO2_Q_dir, Tmax = 300.0) TQ_path = os.path.join(data_dir,", "ID local ID Formula AFGL code Abundance Molar Mass /g·mol-1 Q(296 K) Q", "= 1.0 / ν index1 = np.where(λ >= lmin, True, False) index2 =", "= TT[1:]-TT[:-1] #if np.amax(dT) > 1.0 or np.amin(dT) < 1.0: # print(TT) QQ", "h_p, p_p = interpolate(h_p, p_p, n) h_T = np.array([0.0, 13.0, 17.0, 25.0, 30.0,", "ΔE_ul n = A.shape[0] c = np.zeros((n, 14)) c[:, 0] = λ c[:,", "T_T p[:,0] = h_p p[:,1] = p_p np.save(h_T_path, T) np.save(h_p_path, p) def make_spectrum(hitran_file,", "= [179.0, 153.0, 130.0, 111.0, 95.0, 81.2, 69.5, 59.5, 51.0, 43.4, 27.7, 13.2,", "index = index1*index2 λ = λ[index] iid = iid[index] S = m[index, 3]", "12C16O2 626 0.984204 43.98983 286.09 q7.txt 1 Arguments: CO2_Q_file {str} -- file with", "tables for the CO2 partition function https://hitran.org/docs/iso-meta/ global ID local ID Formula AFGL", "= np.array(TQ[:,1]) index = np.where(TT < Tmax, True, False) T.append(TT[index]) Q.append(QQ[index]) n =", "range(14): s.append(\"%12.6e\" % c[j, i]) #print(\" \".join(s)) def create_npy_data_files(data_dir): make_T_p_over_height(data_dir) CO2_Q_dir = os.path.join(data_dir,", "index2 = np.where(λ <= lmax, True, False) index = index1*index2 λ = λ[index]", "3.33, 1.76, 0.951, 0.067] n = 100 h_p = np.array(h1 + h2) *", "{str} -- file with T, Q values (HITRAN data) n {int} -- number", "+ h2) * 1.0e3 p_p = np.array(p1 + p2) * 1.0e2 h_p, p_p", "4 5 6 7 8 9 0 11 12 itoj = [9, 0,", "= ein.read().splitlines() TQ = np.array([[float(x) for x in line.split()] for line in lines])", "= (x[i] - x0[j]) / (x0[j+1] - x0[j]) y[i] = (1.0 - v)", "* 1.0e-5 # cm => m, bar => pascal δ_a = δ_a /", "x = np.mgrid[x0[0]:x0[-1]:n*1j] y = np.zeros(n) j = 0 for i in range(n):", "data) n {int} -- number of T,Q pairs Returns: T, Q -- [description]", "xx = x[i] while not (x0[j] <= xx and x[i] <= x0[j+1]): j", "mid = m[:, 0] iid = m[:, 1] ν = m[:, 2] ν", "215.7, 225.1, 233.7, 269.9, 275.7, 218.1]) h_T, T_T = interpolate(h_T, T_T, n) T", "range(m): #if np.abs(np.sum(T[0] - T[i])) > 1.0e-3: # print(np.abs(np.sum(T[0] - T[i]))) TQ[:,i+1] =", "* mass_factor isotope_m.append(mass) T = [] Q = [] # read the partition", "* 1.0e3 p_p = np.array(p1 + p2) * 1.0e2 h_p, p_p = interpolate(h_p,", "21.0, 22.0, 25.0, 30.0, 35.0, 40.0, 45.0, 50.0, 70.0] p2 = [179.0, 153.0,", "lmax, True, False) index = index1*index2 λ = λ[index] iid = iid[index] S", "= m[index, 11] ν_l = ν_l / 1.0e-2 # cm => m, bar", "in range(n): xx = x[i] while not (x0[j] <= xx and x[i] <=", "243.0, 209.0] h2 = [ 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0,", "iid[index] S = m[index, 3] A = m[index, 4] γ_a = m[index, 5]", "c0 / λ E_l = h * c0 * ν_l E_u = E_l", "x0[j+1]): j += 1 if (j > n0-2): break j = min(j, n0-2)", "for i, line in enumerate(lines): ls = line.split() global_id = int(ls[0]) isotope_id.append(int(ls[1])) isotope_c.append(float(ls[4]))", "ls = line.split() global_id = int(ls[0]) isotope_id.append(int(ls[1])) isotope_c.append(float(ls[4])) paths.append(ls[7]) gis.append(int(ls[8])) mass = float(ls[5])", "for line in lines]) mid = m[:, 0] iid = m[:, 1] ν", "10] = g_l i = np.argmax(c[:,3]) #print(i, c[i,:]) # 0 1 2 3", "TQ = np.zeros((n, m + 1), np.double) TQ[:,0] = T[0] for i in", "= np.where(λ >= lmin, True, False) index2 = np.where(λ <= lmax, True, False)", "δ_a = δ_a / 1.0e-2 * 1.0e-5 # cm => m, bar =>", "324.0, 281.0, 243.0, 209.0] h2 = [ 13.0, 14.0, 15.0, 16.0, 17.0, 18.0,", "= os.path.join(data_dir, \"h_p.npy\") T[:,0] = h_T T[:,1] = T_T p[:,0] = h_p p[:,1]", "masss, gis): h = 6.62607004e-34 c0 = 2.99792458e8 with open(os.path.join(data_dir, hitran_file + \".out\"),", "1.76, 0.951, 0.067] n = 100 h_p = np.array(h1 + h2) * 1.0e3", "* y0[j] + v * y0[j+1] return x, y def make_lookup_for_Q(CO2_Q_dir, Tmax=300.0): \"\"\"Create", "p = np.zeros((h_p.shape[0], 2), np.double) h_T_path = os.path.join(data_dir, \"h_T.npy\") h_p_path = os.path.join(data_dir, \"h_p.npy\")", "TQ) lmin, lmax = 1.19e-5, 1.81e-5 hitran_file = os.path.join(data_dir, \"CO2_rwfmt_ISO-0-12_wl-12-18-mum\") make_spectrum(hitran_file, data_dir, lmin,", "δ_a / 1.0e-2 * 1.0e-5 # cm => m, bar => pascal ΔE_ul", "[] isotope_m = [] gis = [] # mass[kg] => g/mol * mass_factor", "= E_u c[:, 3] = S c[:, 4] = A c[:, 5] =", "6] ν_l = m[index, 7] n_a = m[index, 8] δ_a = m[index, 9]", "=> pascal δ_a = δ_a / 1.0e-2 * 1.0e-5 # cm => m,", "T[:,1] = T_T p[:,0] = h_p p[:,1] = p_p np.save(h_T_path, T) np.save(h_p_path, p)", "i in range(n): ii = int(iid[i]) j = itoj[ii] c[i, 11] = j", "-- file with T, Q values (HITRAN data) n {int} -- number of", "y0 = np.array(y0) n0 = x0.shape[0] x = np.mgrid[x0[0]:x0[-1]:n*1j] y = np.zeros(n) j", "np.zeros((n, m + 1), np.double) TQ[:,0] = T[0] for i in range(m): #if", "<= x0[j+1]): j += 1 if (j > n0-2): break j = min(j,", "for the CO2 partition function https://hitran.org/docs/iso-meta/ global ID local ID Formula AFGL code", "\"T_Q.npy\") np.save(TQ_path, TQ) lmin, lmax = 1.19e-5, 1.81e-5 hitran_file = os.path.join(data_dir, \"CO2_rwfmt_ISO-0-12_wl-12-18-mum\") make_spectrum(hitran_file,", "= (1.0 - v) * y0[j] + v * y0[j+1] return x, y", "T[0] for i in range(m): #if np.abs(np.sum(T[0] - T[i])) > 1.0e-3: # print(np.abs(np.sum(T[0]", "m[index, 11] ν_l = ν_l / 1.0e-2 # cm => m, bar =>", "275.7, 218.1]) h_T, T_T = interpolate(h_T, T_T, n) T = np.zeros((h_T.shape[0], 2), np.double)", "= m[index, 3] A = m[index, 4] γ_a = m[index, 5] γ_s =", "make_spectrum(hitran_file, data_dir, lmin, lmax, lids, abus, masss, gis): h = 6.62607004e-34 c0 =", "T = [] Q = [] # read the partition function files for", "/ 1.0e-2 * 1.0e-5 # cm => m, bar => pascal ΔE_ul =", "\"h_p.npy\") T[:,0] = h_T T[:,1] = T_T p[:,0] = h_p p[:,1] = p_p", "False) index2 = np.where(λ <= lmax, True, False) index = index1*index2 λ =", "T[i])) > 1.0e-3: # print(np.abs(np.sum(T[0] - T[i]))) TQ[:,i+1] = Q[i] return TQ, paths,", "710.0, 628.0, 554.0, 487.0, 426.0, 372.0, 324.0, 281.0, 243.0, 209.0] h2 = [", "np.array(x0) y0 = np.array(y0) n0 = x0.shape[0] x = np.mgrid[x0[0]:x0[-1]:n*1j] y = np.zeros(n)", "15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 25.0, 30.0, 35.0, 40.0, 45.0,", "12] = masss[j] c[i, 13] = abus[j] np.save(os.path.join(data_dir, hitran_file + \".npy\"), c) for", "import numpy as np import os, sys from timeit import default_timer as timer", "6] = γ_s c[:, 7] = n_a c[:, 8] = δ_a c[:, 9]", "4] γ_a = m[index, 5] γ_s = m[index, 6] ν_l = m[index, 7]", "= m[:, 1] ν = m[:, 2] ν = ν / 1.0e-2 λ", "np.abs(np.sum(T[0] - T[i])) > 1.0e-3: # print(np.abs(np.sum(T[0] - T[i]))) TQ[:,i+1] = Q[i] return", "np.save(TQ_path, TQ) lmin, lmax = 1.19e-5, 1.81e-5 hitran_file = os.path.join(data_dir, \"CO2_rwfmt_ISO-0-12_wl-12-18-mum\") make_spectrum(hitran_file, data_dir,", "- T[i])) > 1.0e-3: # print(np.abs(np.sum(T[0] - T[i]))) TQ[:,i+1] = Q[i] return TQ,", "https://hitran.org/docs/iso-meta/ global ID local ID Formula AFGL code Abundance Molar Mass /g·mol-1 Q(296", "\"\"\" with open(os.path.join(CO2_Q_dir, \"q7-q122-description.txt\"), 'r') as ein: lines = ein.read().splitlines()[2:] paths = []", "= [] isotope_c = [] isotope_m = [] gis = [] # mass[kg]", "np.array([0.0, 13.0, 17.0, 25.0, 30.0, 45.0, 50.0, 70.0])*1.0e3 T_T = np.array([288.0, 215.8, 215.7,", "=> pascal ΔE_ul = h * c0 / λ E_l = h *", "0 1 2 3 4 5 6 7 8 9 10 11 #", "11] = j c[i, 12] = masss[j] c[i, 13] = abus[j] np.save(os.path.join(data_dir, hitran_file", "[description] Returns: (numpy, numpy) -- [description] \"\"\" x0 = np.array(x0) y0 = np.array(y0)", "3] = S c[:, 4] = A c[:, 5] = γ_a c[:, 6]", "y0[j] + v * y0[j+1] return x, y def make_lookup_for_Q(CO2_Q_dir, Tmax=300.0): \"\"\"Create lokup", "2), np.double) p = np.zeros((h_p.shape[0], 2), np.double) h_T_path = os.path.join(data_dir, \"h_T.npy\") h_p_path =", "h_T, T_T = interpolate(h_T, T_T, n) T = np.zeros((h_T.shape[0], 2), np.double) p =", "itoj[ii] c[i, 11] = j c[i, 12] = masss[j] c[i, 13] = abus[j]", "return TQ, paths, isotope_id, isotope_c, isotope_m, gis def make_T_p_over_height(data_dir): \"\"\"Create Arguments: Returns: [type]", "+ \".out\"), 'r') as ein: lines = ein.read().splitlines()[1:] m = np.array([[float(x) for x", "#if np.amax(dT) > 1.0 or np.amin(dT) < 1.0: # print(TT) QQ = np.array(TQ[:,1])", "a equidistant grid with n grid points Arguments: x0 {numpy float} -- [description]", "of T,Q pairs Returns: T, Q -- [description] \"\"\" with open(os.path.join(CO2_Q_dir, \"q7-q122-description.txt\"), 'r')", "13.2, 6.52, 3.33, 1.76, 0.951, 0.067] n = 100 h_p = np.array(h1 +", "np.save(h_T_path, T) np.save(h_p_path, p) def make_spectrum(hitran_file, data_dir, lmin, lmax, lids, abus, masss, gis):", "pascal γ_a = γ_a / 1.0e-2 * 1.0e-5 # cm => m, bar", "\"\"\"Create Arguments: Returns: [type] -- [description] \"\"\" h1 = [ 0.0, 1.0, 2.0,", "p[:,1] = p_p np.save(h_T_path, T) np.save(h_p_path, p) def make_spectrum(hitran_file, data_dir, lmin, lmax, lids,", "np.double) h_T_path = os.path.join(data_dir, \"h_T.npy\") h_p_path = os.path.join(data_dir, \"h_p.npy\") T[:,0] = h_T T[:,1]", "27.7, 13.2, 6.52, 3.33, 1.76, 0.951, 0.067] n = 100 h_p = np.array(h1", "c[:, 7] = n_a c[:, 8] = δ_a c[:, 9] = g_u c[:,", "225.1, 233.7, 269.9, 275.7, 218.1]) h_T, T_T = interpolate(h_T, T_T, n) T =", "\"h_T.npy\") h_p_path = os.path.join(data_dir, \"h_p.npy\") T[:,0] = h_T T[:,1] = T_T p[:,0] =", "#print(i, c[i,:]) # 0 1 2 3 4 5 6 7 8 9", "line in lines]) TT = np.array(TQ[:,0]) dT = TT[1:]-TT[:-1] #if np.amax(dT) > 1.0", "x0[j]) / (x0[j+1] - x0[j]) y[i] = (1.0 - v) * y0[j] +", "7, 8, 10, 10, 11] for i in range(n): ii = int(iid[i]) j", "9.0, 10.0, 11.0, 12.0] p1 = [1013.0, 902.0, 802.0, 710.0, 628.0, 554.0, 487.0,", "m[index, 4] γ_a = m[index, 5] γ_s = m[index, 6] ν_l = m[index,", "paths, isotope_id, isotope_c, isotope_m, gis def make_T_p_over_height(data_dir): \"\"\"Create Arguments: Returns: [type] -- [description]", "n {int} -- [description] Returns: (numpy, numpy) -- [description] \"\"\" x0 = np.array(x0)", "float} -- [description] n {int} -- [description] Returns: (numpy, numpy) -- [description] \"\"\"", "1.0e-5 # cm => m, bar => pascal ΔE_ul = h * c0", "(x[i] - x0[j]) / (x0[j+1] - x0[j]) y[i] = (1.0 - v) *", "c[:, 4] = A c[:, 5] = γ_a c[:, 6] = γ_s c[:,", "= [ 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 25.0,", "0.951, 0.067] n = 100 h_p = np.array(h1 + h2) * 1.0e3 p_p", "TQ, paths, isotope_id, isotope_c, isotope_m, gis def make_T_p_over_height(data_dir): \"\"\"Create Arguments: Returns: [type] --", "timeit import default_timer as timer import platform script_root = os.path.abspath(os.path.dirname(__file__)) def interpolate(x0, y0,", "ν = ν / 1.0e-2 λ = 1.0 / ν index1 = np.where(λ", "g/mol * mass_factor mass_factor = 1.0e-3/6.02214076e23 # read the dexription file for i,", "y def make_lookup_for_Q(CO2_Q_dir, Tmax=300.0): \"\"\"Create lokup tables for the CO2 partition function https://hitran.org/docs/iso-meta/", "False) T.append(TT[index]) Q.append(QQ[index]) n = T[0].shape[0] m = len(T) TQ = np.zeros((n, m", "with n grid points Arguments: x0 {numpy float} -- [description] y0 {numpy float}", "np.mgrid[x0[0]:x0[-1]:n*1j] y = np.zeros(n) j = 0 for i in range(n): xx =", "n {int} -- number of T,Q pairs Returns: T, Q -- [description] \"\"\"", "path in paths: with open(os.path.join(CO2_Q_dir, path), 'r') as ein: lines = ein.read().splitlines() TQ", "in lines]) TT = np.array(TQ[:,0]) dT = TT[1:]-TT[:-1] #if np.amax(dT) > 1.0 or", "0.067] n = 100 h_p = np.array(h1 + h2) * 1.0e3 p_p =", "h2 = [ 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0,", "bar => pascal ΔE_ul = h * c0 / λ E_l = h", "= A.shape[0] c = np.zeros((n, 14)) c[:, 0] = λ c[:, 1] =", "c[i, 13] = abus[j] np.save(os.path.join(data_dir, hitran_file + \".npy\"), c) for j in range(30,50):", "= np.array(y0) n0 = x0.shape[0] x = np.mgrid[x0[0]:x0[-1]:n*1j] y = np.zeros(n) j =", "c0 = 2.99792458e8 with open(os.path.join(data_dir, hitran_file + \".out\"), 'r') as ein: lines =", "enumerate(lines): ls = line.split() global_id = int(ls[0]) isotope_id.append(int(ls[1])) isotope_c.append(float(ls[4])) paths.append(ls[7]) gis.append(int(ls[8])) mass =", "for i in range(m): #if np.abs(np.sum(T[0] - T[i])) > 1.0e-3: # print(np.abs(np.sum(T[0] -", "1 2 3 4 5 6 7 8 9 10 11 # 1", "line.split()] for line in lines]) TT = np.array(TQ[:,0]) dT = TT[1:]-TT[:-1] #if np.amax(dT)", "1.0: # print(TT) QQ = np.array(TQ[:,1]) index = np.where(TT < Tmax, True, False)", "hitran_file + \".out\"), 'r') as ein: lines = ein.read().splitlines()[1:] m = np.array([[float(x) for", "9] g_u = m[index, 10] g_l = m[index, 11] ν_l = ν_l /", "6 7 8 9 10 11 # 1 2 3 4 5 6", "= itoj[ii] c[i, 11] = j c[i, 12] = masss[j] c[i, 13] =", "numpy) -- [description] \"\"\" x0 = np.array(x0) y0 = np.array(y0) n0 = x0.shape[0]", "1] = E_l c[:, 2] = E_u c[:, 3] = S c[:, 4]", "2] = E_u c[:, 3] = S c[:, 4] = A c[:, 5]", "43.98983 286.09 q7.txt 1 Arguments: CO2_Q_file {str} -- file with T, Q values", "len(T) TQ = np.zeros((n, m + 1), np.double) TQ[:,0] = T[0] for i", "np.where(TT < Tmax, True, False) T.append(TT[index]) Q.append(QQ[index]) n = T[0].shape[0] m = len(T)", "70.0] p2 = [179.0, 153.0, 130.0, 111.0, 95.0, 81.2, 69.5, 59.5, 51.0, 43.4,", "np.zeros((h_T.shape[0], 2), np.double) p = np.zeros((h_p.shape[0], 2), np.double) h_T_path = os.path.join(data_dir, \"h_T.npy\") h_p_path", "λ c[:, 1] = E_l c[:, 2] = E_u c[:, 3] = S", "[] isotope_c = [] isotope_m = [] gis = [] # mass[kg] =>", "min(j, n0-2) v = (x[i] - x0[j]) / (x0[j+1] - x0[j]) y[i] =", "int(ls[0]) isotope_id.append(int(ls[1])) isotope_c.append(float(ls[4])) paths.append(ls[7]) gis.append(int(ls[8])) mass = float(ls[5]) * mass_factor isotope_m.append(mass) T =", "True, False) T.append(TT[index]) Q.append(QQ[index]) n = T[0].shape[0] m = len(T) TQ = np.zeros((n,", "Q.append(QQ[index]) n = T[0].shape[0] m = len(T) TQ = np.zeros((n, m + 1),", "= g_u c[:, 10] = g_l i = np.argmax(c[:,3]) #print(i, c[i,:]) # 0", "xx and x[i] <= x0[j+1]): j += 1 if (j > n0-2): break", "= ν / 1.0e-2 λ = 1.0 / ν index1 = np.where(λ >=", "<= xx and x[i] <= x0[j+1]): j += 1 if (j > n0-2):", "= np.where(λ <= lmax, True, False) index = index1*index2 λ = λ[index] iid", "m, bar => pascal ΔE_ul = h * c0 / λ E_l =", "mass_factor isotope_m.append(mass) T = [] Q = [] # read the partition function", "m + 1), np.double) TQ[:,0] = T[0] for i in range(m): #if np.abs(np.sum(T[0]", "gis = [] # mass[kg] => g/mol * mass_factor mass_factor = 1.0e-3/6.02214076e23 #", "isotope_c, isotope_m, gis def make_T_p_over_height(data_dir): \"\"\"Create Arguments: Returns: [type] -- [description] \"\"\" h1", "6.52, 3.33, 1.76, 0.951, 0.067] n = 100 h_p = np.array(h1 + h2)", "cm => m, bar => pascal γ_s = γ_s / 1.0e-2 * 1.0e-5", "x in line.split()] for line in lines]) TT = np.array(TQ[:,0]) dT = TT[1:]-TT[:-1]", "= np.array(x0) y0 = np.array(y0) n0 = x0.shape[0] x = np.mgrid[x0[0]:x0[-1]:n*1j] y =", "# mass[kg] => g/mol * mass_factor mass_factor = 1.0e-3/6.02214076e23 # read the dexription", "ΔE_ul = h * c0 / λ E_l = h * c0 *", "read the dexription file for i, line in enumerate(lines): ls = line.split() global_id", "= 2.99792458e8 with open(os.path.join(data_dir, hitran_file + \".out\"), 'r') as ein: lines = ein.read().splitlines()[1:]", "mass = float(ls[5]) * mass_factor isotope_m.append(mass) T = [] Q = [] #", "os.path.join(data_dir, \"T_Q.npy\") np.save(TQ_path, TQ) lmin, lmax = 1.19e-5, 1.81e-5 hitran_file = os.path.join(data_dir, \"CO2_rwfmt_ISO-0-12_wl-12-18-mum\")", "# cm => m, bar => pascal δ_a = δ_a / 1.0e-2 *", "130.0, 111.0, 95.0, 81.2, 69.5, 59.5, 51.0, 43.4, 27.7, 13.2, 6.52, 3.33, 1.76,", "0 for i in range(n): xx = x[i] while not (x0[j] <= xx", "for j in range(30,50): s = [] for i in range(14): s.append(\"%12.6e\" %", "while not (x0[j] <= xx and x[i] <= x0[j+1]): j += 1 if", "n0-2) v = (x[i] - x0[j]) / (x0[j+1] - x0[j]) y[i] = (1.0", "T[0].shape[0] m = len(T) TQ = np.zeros((n, m + 1), np.double) TQ[:,0] =", "70.0])*1.0e3 T_T = np.array([288.0, 215.8, 215.7, 225.1, 233.7, 269.9, 275.7, 218.1]) h_T, T_T", "n) T = np.zeros((h_T.shape[0], 2), np.double) p = np.zeros((h_p.shape[0], 2), np.double) h_T_path =", "902.0, 802.0, 710.0, 628.0, 554.0, 487.0, 426.0, 372.0, 324.0, 281.0, 243.0, 209.0] h2", "= g_l i = np.argmax(c[:,3]) #print(i, c[i,:]) # 0 1 2 3 4", "20.0, 21.0, 22.0, 25.0, 30.0, 35.0, 40.0, 45.0, 50.0, 70.0] p2 = [179.0,", "0] = λ c[:, 1] = E_l c[:, 2] = E_u c[:, 3]", "= m[:, 0] iid = m[:, 1] ν = m[:, 2] ν =", "and x[i] <= x0[j+1]): j += 1 if (j > n0-2): break j", "for x in line.split()] for line in lines]) TT = np.array(TQ[:,0]) dT =", "#if np.abs(np.sum(T[0] - T[i])) > 1.0e-3: # print(np.abs(np.sum(T[0] - T[i]))) TQ[:,i+1] = Q[i]", "= x[i] while not (x0[j] <= xx and x[i] <= x0[j+1]): j +=", "y0, n): \"\"\"Interpolate data onto a equidistant grid with n grid points Arguments:", "np.array(y0) n0 = x0.shape[0] x = np.mgrid[x0[0]:x0[-1]:n*1j] y = np.zeros(n) j = 0", "7 1 12C16O2 626 0.984204 43.98983 286.09 q7.txt 1 Arguments: CO2_Q_file {str} --", "with T, Q values (HITRAN data) n {int} -- number of T,Q pairs", "p[:,0] = h_p p[:,1] = p_p np.save(h_T_path, T) np.save(h_p_path, p) def make_spectrum(hitran_file, data_dir,", "pascal γ_s = γ_s / 1.0e-2 * 1.0e-5 # cm => m, bar", "S c[:, 4] = A c[:, 5] = γ_a c[:, 6] = γ_s", "22.0, 25.0, 30.0, 35.0, 40.0, 45.0, 50.0, 70.0] p2 = [179.0, 153.0, 130.0,", "bar => pascal γ_a = γ_a / 1.0e-2 * 1.0e-5 # cm =>", "95.0, 81.2, 69.5, 59.5, 51.0, 43.4, 27.7, 13.2, 6.52, 3.33, 1.76, 0.951, 0.067]", "np.amax(dT) > 1.0 or np.amin(dT) < 1.0: # print(TT) QQ = np.array(TQ[:,1]) index", "5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0] p1 = [1013.0, 902.0, 802.0,", "v) * y0[j] + v * y0[j+1] return x, y def make_lookup_for_Q(CO2_Q_dir, Tmax=300.0):", "h_T_path = os.path.join(data_dir, \"h_T.npy\") h_p_path = os.path.join(data_dir, \"h_p.npy\") T[:,0] = h_T T[:,1] =", "line in lines]) mid = m[:, 0] iid = m[:, 1] ν =", "np.array([[float(x) for x in line.split(',')] for line in lines]) mid = m[:, 0]", "index1 = np.where(λ >= lmin, True, False) index2 = np.where(λ <= lmax, True,", "+ \".npy\"), c) for j in range(30,50): s = [] for i in", "h2) * 1.0e3 p_p = np.array(p1 + p2) * 1.0e2 h_p, p_p =", "19.0, 20.0, 21.0, 22.0, 25.0, 30.0, 35.0, 40.0, 45.0, 50.0, 70.0] p2 =", "= ein.read().splitlines()[2:] paths = [] isotope_id = [] isotope_c = [] isotope_m =", "x0.shape[0] x = np.mgrid[x0[0]:x0[-1]:n*1j] y = np.zeros(n) j = 0 for i in", "paths: with open(os.path.join(CO2_Q_dir, path), 'r') as ein: lines = ein.read().splitlines() TQ = np.array([[float(x)", "in line.split()] for line in lines]) TT = np.array(TQ[:,0]) dT = TT[1:]-TT[:-1] #if", "np.double) TQ[:,0] = T[0] for i in range(m): #if np.abs(np.sum(T[0] - T[i])) >", "[ 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0,", "= m[index, 6] ν_l = m[index, 7] n_a = m[index, 8] δ_a =", "0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 10, 11] for", "1.81e-5 hitran_file = os.path.join(data_dir, \"CO2_rwfmt_ISO-0-12_wl-12-18-mum\") make_spectrum(hitran_file, data_dir, lmin, lmax, isotope_id, isotope_c, isotope_m, gis)", "g_l i = np.argmax(c[:,3]) #print(i, c[i,:]) # 0 1 2 3 4 5", "c[i, 11] = j c[i, 12] = masss[j] c[i, 13] = abus[j] np.save(os.path.join(data_dir,", "45.0, 50.0, 70.0])*1.0e3 T_T = np.array([288.0, 215.8, 215.7, 225.1, 233.7, 269.9, 275.7, 218.1])", "np.zeros(n) j = 0 for i in range(n): xx = x[i] while not", "open(os.path.join(CO2_Q_dir, \"q7-q122-description.txt\"), 'r') as ein: lines = ein.read().splitlines()[2:] paths = [] isotope_id =", "Tmax = 300.0) TQ_path = os.path.join(data_dir, \"T_Q.npy\") np.save(TQ_path, TQ) lmin, lmax = 1.19e-5,", "lines]) TT = np.array(TQ[:,0]) dT = TT[1:]-TT[:-1] #if np.amax(dT) > 1.0 or np.amin(dT)", "for i in range(n): ii = int(iid[i]) j = itoj[ii] c[i, 11] =", "in range(n): ii = int(iid[i]) j = itoj[ii] c[i, 11] = j c[i,", "11 12 itoj = [9, 0, 1, 2, 3, 4, 5, 6, 7,", "paths.append(ls[7]) gis.append(int(ls[8])) mass = float(ls[5]) * mass_factor isotope_m.append(mass) T = [] Q =", "global ID local ID Formula AFGL code Abundance Molar Mass /g·mol-1 Q(296 K)", "= [ 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0,", "make_T_p_over_height(data_dir) CO2_Q_dir = os.path.join(data_dir, \"CO2_Q\") TQ, paths, isotope_id, isotope_c, isotope_m, gis = make_lookup_for_Q(CO2_Q_dir,", "in enumerate(lines): ls = line.split() global_id = int(ls[0]) isotope_id.append(int(ls[1])) isotope_c.append(float(ls[4])) paths.append(ls[7]) gis.append(int(ls[8])) mass", "1.0e-3: # print(np.abs(np.sum(T[0] - T[i]))) TQ[:,i+1] = Q[i] return TQ, paths, isotope_id, isotope_c,", "h_p p[:,1] = p_p np.save(h_T_path, T) np.save(h_p_path, p) def make_spectrum(hitran_file, data_dir, lmin, lmax,", "\".npy\"), c) for j in range(30,50): s = [] for i in range(14):", "g_l = m[index, 11] ν_l = ν_l / 1.0e-2 # cm => m,", "os.path.join(data_dir, \"h_T.npy\") h_p_path = os.path.join(data_dir, \"h_p.npy\") T[:,0] = h_T T[:,1] = T_T p[:,0]", "lokup tables for the CO2 partition function https://hitran.org/docs/iso-meta/ global ID local ID Formula", "1.0e-5 # cm => m, bar => pascal δ_a = δ_a / 1.0e-2", "\"\"\" h1 = [ 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0,", "i]) #print(\" \".join(s)) def create_npy_data_files(data_dir): make_T_p_over_height(data_dir) CO2_Q_dir = os.path.join(data_dir, \"CO2_Q\") TQ, paths, isotope_id,", "= δ_a c[:, 9] = g_u c[:, 10] = g_l i = np.argmax(c[:,3])", "5] γ_s = m[index, 6] ν_l = m[index, 7] n_a = m[index, 8]", "sys from timeit import default_timer as timer import platform script_root = os.path.abspath(os.path.dirname(__file__)) def", "lines = ein.read().splitlines()[1:] m = np.array([[float(x) for x in line.split(',')] for line in", "λ E_l = h * c0 * ν_l E_u = E_l + ΔE_ul", "[description] \"\"\" x0 = np.array(x0) y0 = np.array(y0) n0 = x0.shape[0] x =", "# cm => m, bar => pascal ΔE_ul = h * c0 /", "= p_p np.save(h_T_path, T) np.save(h_p_path, p) def make_spectrum(hitran_file, data_dir, lmin, lmax, lids, abus,", "h * c0 * ν_l E_u = E_l + ΔE_ul n = A.shape[0]", "> 1.0 or np.amin(dT) < 1.0: # print(TT) QQ = np.array(TQ[:,1]) index =", "AFGL code Abundance Molar Mass /g·mol-1 Q(296 K) Q (full range) gi 7", "* 1.0e2 h_p, p_p = interpolate(h_p, p_p, n) h_T = np.array([0.0, 13.0, 17.0,", "m, bar => pascal δ_a = δ_a / 1.0e-2 * 1.0e-5 # cm", "with open(os.path.join(CO2_Q_dir, path), 'r') as ein: lines = ein.read().splitlines() TQ = np.array([[float(x) for", "487.0, 426.0, 372.0, 324.0, 281.0, 243.0, 209.0] h2 = [ 13.0, 14.0, 15.0,", "% c[j, i]) #print(\" \".join(s)) def create_npy_data_files(data_dir): make_T_p_over_height(data_dir) CO2_Q_dir = os.path.join(data_dir, \"CO2_Q\") TQ,", "43.4, 27.7, 13.2, 6.52, 3.33, 1.76, 0.951, 0.067] n = 100 h_p =", "line in enumerate(lines): ls = line.split() global_id = int(ls[0]) isotope_id.append(int(ls[1])) isotope_c.append(float(ls[4])) paths.append(ls[7]) gis.append(int(ls[8]))", "float(ls[5]) * mass_factor isotope_m.append(mass) T = [] Q = [] # read the", "m[index, 6] ν_l = m[index, 7] n_a = m[index, 8] δ_a = m[index,", "4, 5, 6, 7, 8, 10, 10, 11] for i in range(n): ii", "* c0 * ν_l E_u = E_l + ΔE_ul n = A.shape[0] c", "= γ_a c[:, 6] = γ_s c[:, 7] = n_a c[:, 8] =", "with open(os.path.join(data_dir, hitran_file + \".out\"), 'r') as ein: lines = ein.read().splitlines()[1:] m =", "read the partition function files for path in paths: with open(os.path.join(CO2_Q_dir, path), 'r')", "= index1*index2 λ = λ[index] iid = iid[index] S = m[index, 3] A", "float} -- [description] y0 {numpy float} -- [description] n {int} -- [description] Returns:", "8, 10, 10, 11] for i in range(n): ii = int(iid[i]) j =", "for i in range(n): xx = x[i] while not (x0[j] <= xx and", "T.append(TT[index]) Q.append(QQ[index]) n = T[0].shape[0] m = len(T) TQ = np.zeros((n, m +", "2] ν = ν / 1.0e-2 λ = 1.0 / ν index1 =", "51.0, 43.4, 27.7, 13.2, 6.52, 3.33, 1.76, 0.951, 0.067] n = 100 h_p", "j = itoj[ii] c[i, 11] = j c[i, 12] = masss[j] c[i, 13]", "j = min(j, n0-2) v = (x[i] - x0[j]) / (x0[j+1] - x0[j])", "cm => m, bar => pascal γ_a = γ_a / 1.0e-2 * 1.0e-5", "=> m, bar => pascal δ_a = δ_a / 1.0e-2 * 1.0e-5 #", "as ein: lines = ein.read().splitlines() TQ = np.array([[float(x) for x in line.split()] for", "m[index, 5] γ_s = m[index, 6] ν_l = m[index, 7] n_a = m[index,", "number of T,Q pairs Returns: T, Q -- [description] \"\"\" with open(os.path.join(CO2_Q_dir, \"q7-q122-description.txt\"),", "= [] isotope_id = [] isotope_c = [] isotope_m = [] gis =", "True, False) index2 = np.where(λ <= lmax, True, False) index = index1*index2 λ", "1.0e-2 * 1.0e-5 # cm => m, bar => pascal γ_s = γ_s", "100 h_p = np.array(h1 + h2) * 1.0e3 p_p = np.array(p1 + p2)", "# cm => m, bar => pascal γ_s = γ_s / 1.0e-2 *", "1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0] p1", "platform script_root = os.path.abspath(os.path.dirname(__file__)) def interpolate(x0, y0, n): \"\"\"Interpolate data onto a equidistant", "628.0, 554.0, 487.0, 426.0, 372.0, 324.0, 281.0, 243.0, 209.0] h2 = [ 13.0,", "= m[index, 4] γ_a = m[index, 5] γ_s = m[index, 6] ν_l =", "p2 = [179.0, 153.0, 130.0, 111.0, 95.0, 81.2, 69.5, 59.5, 51.0, 43.4, 27.7,", "= 6.62607004e-34 c0 = 2.99792458e8 with open(os.path.join(data_dir, hitran_file + \".out\"), 'r') as ein:", "E_l c[:, 2] = E_u c[:, 3] = S c[:, 4] = A", "ein: lines = ein.read().splitlines() TQ = np.array([[float(x) for x in line.split()] for line", "def make_lookup_for_Q(CO2_Q_dir, Tmax=300.0): \"\"\"Create lokup tables for the CO2 partition function https://hitran.org/docs/iso-meta/ global", "6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0] p1 = [1013.0, 902.0, 802.0, 710.0,", "\"CO2_Q\") TQ, paths, isotope_id, isotope_c, isotope_m, gis = make_lookup_for_Q(CO2_Q_dir, Tmax = 300.0) TQ_path", "4] = A c[:, 5] = γ_a c[:, 6] = γ_s c[:, 7]", "lines = ein.read().splitlines()[2:] paths = [] isotope_id = [] isotope_c = [] isotope_m", "dexription file for i, line in enumerate(lines): ls = line.split() global_id = int(ls[0])", "ν_l / 1.0e-2 # cm => m, bar => pascal γ_a = γ_a", "ν_l E_u = E_l + ΔE_ul n = A.shape[0] c = np.zeros((n, 14))", "local ID Formula AFGL code Abundance Molar Mass /g·mol-1 Q(296 K) Q (full", "p1 = [1013.0, 902.0, 802.0, 710.0, 628.0, 554.0, 487.0, 426.0, 372.0, 324.0, 281.0,", "4 5 6 7 8 9 10 11 # 1 2 3 4", "-- [description] \"\"\" x0 = np.array(x0) y0 = np.array(y0) n0 = x0.shape[0] x", "def make_T_p_over_height(data_dir): \"\"\"Create Arguments: Returns: [type] -- [description] \"\"\" h1 = [ 0.0,", "* c0 / λ E_l = h * c0 * ν_l E_u =", "np.amin(dT) < 1.0: # print(TT) QQ = np.array(TQ[:,1]) index = np.where(TT < Tmax,", "j c[i, 12] = masss[j] c[i, 13] = abus[j] np.save(os.path.join(data_dir, hitran_file + \".npy\"),", "# print(TT) QQ = np.array(TQ[:,1]) index = np.where(TT < Tmax, True, False) T.append(TT[index])", "x in line.split(',')] for line in lines]) mid = m[:, 0] iid =", "8] = δ_a c[:, 9] = g_u c[:, 10] = g_l i =", "TQ[:,i+1] = Q[i] return TQ, paths, isotope_id, isotope_c, isotope_m, gis def make_T_p_over_height(data_dir): \"\"\"Create", "= interpolate(h_p, p_p, n) h_T = np.array([0.0, 13.0, 17.0, 25.0, 30.0, 45.0, 50.0,", "5 6 7 8 9 0 11 12 itoj = [9, 0, 1,", "Q = [] # read the partition function files for path in paths:", "0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0]", "7] n_a = m[index, 8] δ_a = m[index, 9] g_u = m[index, 10]", "= np.argmax(c[:,3]) #print(i, c[i,:]) # 0 1 2 3 4 5 6 7", "286.09 q7.txt 1 Arguments: CO2_Q_file {str} -- file with T, Q values (HITRAN", "[] # read the partition function files for path in paths: with open(os.path.join(CO2_Q_dir,", "m[index, 8] δ_a = m[index, 9] g_u = m[index, 10] g_l = m[index,", "as timer import platform script_root = os.path.abspath(os.path.dirname(__file__)) def interpolate(x0, y0, n): \"\"\"Interpolate data", "= [] gis = [] # mass[kg] => g/mol * mass_factor mass_factor =", "line.split() global_id = int(ls[0]) isotope_id.append(int(ls[1])) isotope_c.append(float(ls[4])) paths.append(ls[7]) gis.append(int(ls[8])) mass = float(ls[5]) * mass_factor", "n = A.shape[0] c = np.zeros((n, 14)) c[:, 0] = λ c[:, 1]", "9 0 11 12 itoj = [9, 0, 1, 2, 3, 4, 5,", "range(30,50): s = [] for i in range(14): s.append(\"%12.6e\" % c[j, i]) #print(\"", "x[i] while not (x0[j] <= xx and x[i] <= x0[j+1]): j += 1", "= [] isotope_m = [] gis = [] # mass[kg] => g/mol *", "range(n): xx = x[i] while not (x0[j] <= xx and x[i] <= x0[j+1]):", "= [] Q = [] # read the partition function files for path", "ν / 1.0e-2 λ = 1.0 / ν index1 = np.where(λ >= lmin,", "Returns: (numpy, numpy) -- [description] \"\"\" x0 = np.array(x0) y0 = np.array(y0) n0" ]
[ "= file.read() file.close() file = open(\"linalgd.h\", \"r\") linalgdh = file.read() file.close() file =", "\\\"linalg.h\\\"\\n#include <assert.h>\\n\" + linalgc file = open(\"linalg.h\", \"w\") file.write(linalgh) file.close() file = open(\"linalg.c\",", "s = s.replace(\"LINALGF_H\", \"LINALGD_H\"); s = s.replace(\"float\", \"double\"); s = s.replace(\"mat2f\", \"mat2d\"); s", "\"Vec3d\"); s = s.replace(\"Mat2f\", \"Mat2d\"); s = s.replace(\"Mat3f\", \"Mat3d\"); s = s.replace(\"Mat4f\", \"Mat4d\");", "= s.replace(\"_m3f\", \"_m3d\"); s = s.replace(\"_m4f\", \"_m4d\"); s = s.replace(\"Vec2f\", \"Vec2d\"); s =", "= s.replace(\"_m4f\", \"_m4d\"); s = s.replace(\"Vec2f\", \"Vec2d\"); s = s.replace(\"Vec3f\", \"Vec3d\"); s =", "s.replace(\"mat3f\", \"mat3d\"); s = s.replace(\"mat4f\", \"mat4d\"); s = s.replace(\"vec2f\", \"vec2d\"); s = s.replace(\"vec3f\",", "s.replace(\"det2f\", \"det2d\"); s = s.replace(\"det3f\", \"det3d\"); file = open(\"linalgd.c\", \"w\") file.write(s) file.close() #----------------------------------------------", "file = open(\"linalgd.h\", \"r\") linalgdh = file.read() file.close() file = open(\"linalgf.c\", \"r\") linalgfc", "s = s.replace(\"m4f_\", \"m4d_\"); s = s.replace(\"_m2f\", \"_m2d\"); s = s.replace(\"_m3f\", \"_m3d\"); s", "s = s.replace(\"linear_solver_base_f\", \"linear_solver_base_d\"); s = s.replace(\"det2f\", \"det2d\"); s = s.replace(\"det3f\", \"det3d\"); file", "linalgc = linalgc.replace(\"#include \\\"linalgf.h\\\"\\n\", \"\") linalgc = linalgc.replace(\"#include \\\"linalgd.h\\\"\\n\", \"\") linalgc = linalgc.replace(\"#include", "s.replace(\"_m4f\", \"_m4d\"); s = s.replace(\"Vec2f\", \"Vec2d\"); s = s.replace(\"Vec3f\", \"Vec3d\"); s = s.replace(\"Mat2f\",", "import os #---------------------------------------------- file = open(\"linalgf.h\", \"r\") s = file.read() file.close() s =", "s = s.replace(\"_m3f\", \"_m3d\"); s = s.replace(\"_m4f\", \"_m4d\"); s = s.replace(\"linalgf_\", \"linalgd_\"); s", "sys import os #---------------------------------------------- file = open(\"linalgf.h\", \"r\") s = file.read() file.close() s", "open(\"linalgf.h\", \"r\") s = file.read() file.close() s = s.replace(\"LINALGF_H\", \"LINALGD_H\"); s = s.replace(\"float\",", "= open(\"linalgd.h\", \"r\") linalgdh = file.read() file.close() file = open(\"linalgf.c\", \"r\") linalgfc =", "s = s.replace(\"Mat3f\", \"Mat3d\"); s = s.replace(\"Mat4f\", \"Mat4d\"); s = s.replace(\"linalgf_\", \"linalgd_\"); s", "\"m4d_\"); s = s.replace(\"_m2f\", \"_m2d\"); s = s.replace(\"_m3f\", \"_m3d\"); s = s.replace(\"_m4f\", \"_m4d\");", "file.close() s = s.replace(\"linalgf.h\", \"linalgd.h\"); s = s.replace(\"float\", \"double\"); s = s.replace(\"mat2f\", \"mat2d\");", "= s.replace(\"det2f\", \"det2d\"); s = s.replace(\"det3f\", \"det3d\"); file = open(\"linalgd.c\", \"w\") file.write(s) file.close()", "s.replace(\"float\", \"double\"); s = s.replace(\"mat2f\", \"mat2d\"); s = s.replace(\"mat3f\", \"mat3d\"); s = s.replace(\"mat4f\",", "\"r\") linalgfh = file.read() file.close() file = open(\"linalgd.h\", \"r\") linalgdh = file.read() file.close()", "= s.replace(\"_v4f\", \"_v4d\"); s = s.replace(\"m2f_\", \"m2d_\"); s = s.replace(\"m3f_\", \"m3d_\"); s =", "= s.replace(\"cpxf\", \"cpxd\"); s = s.replace(\"v2f_\", \"v2d_\"); s = s.replace(\"v3f_\", \"v3d_\"); s =", "s = s.replace(\"_m2f\", \"_m2d\"); s = s.replace(\"_m3f\", \"_m3d\"); s = s.replace(\"_m4f\", \"_m4d\"); s", "file.close() s = s.replace(\"LINALGF_H\", \"LINALGD_H\"); s = s.replace(\"float\", \"double\"); s = s.replace(\"mat2f\", \"mat2d\");", "s = s.replace(\"_v2f\", \"_v2d\"); s = s.replace(\"_v3f\", \"_v3d\"); s = s.replace(\"_v4f\", \"_v4d\"); s", "= s.replace(\"vec4f\", \"vec4d\"); s = s.replace(\"cpxf\", \"cpxd\"); s = s.replace(\"v2f_\", \"v2d_\"); s =", "file = open(\"linalgf.h\", \"r\") linalgfh = file.read() file.close() file = open(\"linalgd.h\", \"r\") linalgdh", "= s.replace(\"LINALGF_H\", \"LINALGD_H\"); s = s.replace(\"float\", \"double\"); s = s.replace(\"mat2f\", \"mat2d\"); s =", "s = file.read() file.close() s = s.replace(\"LINALGF_H\", \"LINALGD_H\"); s = s.replace(\"float\", \"double\"); s", "\"m2d_\"); s = s.replace(\"m3f_\", \"m3d_\"); s = s.replace(\"m4f_\", \"m4d_\"); s = s.replace(\"_m2f\", \"_m2d\");", "s = s.replace(\"v2f_\", \"v2d_\"); s = s.replace(\"v3f_\", \"v3d_\"); s = s.replace(\"v4f_\", \"v4d_\"); s", "s = s.replace(\"cpxf\", \"cpxd\"); s = s.replace(\"v2f_\", \"v2d_\"); s = s.replace(\"v3f_\", \"v3d_\"); s", "\"v3d_\"); s = s.replace(\"v4f_\", \"v4d_\"); s = s.replace(\"_v2f\", \"_v2d\"); s = s.replace(\"_v3f\", \"_v3d\");", "= s.replace(\"_m3f\", \"_m3d\"); s = s.replace(\"_m4f\", \"_m4d\"); s = s.replace(\"linalgf_\", \"linalgd_\"); s =", "\"_m2d\"); s = s.replace(\"_m3f\", \"_m3d\"); s = s.replace(\"_m4f\", \"_m4d\"); s = s.replace(\"Vec2f\", \"Vec2d\");", "s = s.replace(\"vec4f\", \"vec4d\"); s = s.replace(\"cpxf\", \"cpxd\"); s = s.replace(\"v2f_\", \"v2d_\"); s", "file = open(\"linalgf.c\", \"r\") linalgfc = file.read() file.close() file = open(\"linalgd.c\", \"r\") linalgdc", "file.write(s) file.close() #---------------------------------------------- file = open(\"linalgf.c\", \"r\") s = file.read() file.close() s =", "\"det3d\"); file = open(\"linalgd.c\", \"w\") file.write(s) file.close() #---------------------------------------------- file = open(\"linalgf.h\", \"r\") linalgfh", "file = open(\"linalgd.c\", \"w\") file.write(s) file.close() #---------------------------------------------- file = open(\"linalgf.h\", \"r\") linalgfh =", "s = s.replace(\"_m3f\", \"_m3d\"); s = s.replace(\"_m4f\", \"_m4d\"); s = s.replace(\"Vec2f\", \"Vec2d\"); s", "s = s.replace(\"Vec3f\", \"Vec3d\"); s = s.replace(\"Mat2f\", \"Mat2d\"); s = s.replace(\"Mat3f\", \"Mat3d\"); s", "\"v2d_\"); s = s.replace(\"v3f_\", \"v3d_\"); s = s.replace(\"v4f_\", \"v4d_\"); s = s.replace(\"_v2f\", \"_v2d\");", "\"mat3d\"); s = s.replace(\"mat4f\", \"mat4d\"); s = s.replace(\"vec2f\", \"vec2d\"); s = s.replace(\"vec3f\", \"vec3d\");", "s = file.read() file.close() s = s.replace(\"linalgf.h\", \"linalgd.h\"); s = s.replace(\"float\", \"double\"); s", "file = open(\"linalgd.c\", \"r\") linalgdc = file.read() file.close() linalgh = linalgfh + linalgdh", "s.replace(\"cpxf\", \"cpxd\"); s = s.replace(\"v2f_\", \"v2d_\"); s = s.replace(\"v3f_\", \"v3d_\"); s = s.replace(\"v4f_\",", "file = open(\"linalgd.h\", \"w\") file.write(s) file.close() #---------------------------------------------- file = open(\"linalgf.c\", \"r\") s =", "open(\"linalgd.c\", \"w\") file.write(s) file.close() #---------------------------------------------- file = open(\"linalgf.h\", \"r\") linalgfh = file.read() file.close()", "\"_m3d\"); s = s.replace(\"_m4f\", \"_m4d\"); s = s.replace(\"linalgf_\", \"linalgd_\"); s = s.replace(\"linsolverf\", \"linsolverd\");", "s.replace(\"vec4f\", \"vec4d\"); s = s.replace(\"cpxf\", \"cpxd\"); s = s.replace(\"v2f_\", \"v2d_\"); s = s.replace(\"v3f_\",", "= s.replace(\"mat2f\", \"mat2d\"); s = s.replace(\"mat3f\", \"mat3d\"); s = s.replace(\"mat4f\", \"mat4d\"); s =", "s = s.replace(\"mat4f\", \"mat4d\"); s = s.replace(\"vec2f\", \"vec2d\"); s = s.replace(\"vec3f\", \"vec3d\"); s", "\"_m4d\"); s = s.replace(\"linalgf_\", \"linalgd_\"); s = s.replace(\"linsolverf\", \"linsolverd\"); s = s.replace(\"find_leading_order_f\", \"find_leading_order_d\");", "file = open(\"linalgf.c\", \"r\") s = file.read() file.close() s = s.replace(\"linalgf.h\", \"linalgd.h\"); s", "\"_m4d\"); s = s.replace(\"Vec2f\", \"Vec2d\"); s = s.replace(\"Vec3f\", \"Vec3d\"); s = s.replace(\"Mat2f\", \"Mat2d\");", "\"#include \\\"linalg.h\\\"\\n#include <assert.h>\\n\" + linalgc file = open(\"linalg.h\", \"w\") file.write(linalgh) file.close() file =", "= s.replace(\"m4f_\", \"m4d_\"); s = s.replace(\"_m2f\", \"_m2d\"); s = s.replace(\"_m3f\", \"_m3d\"); s =", "s = s.replace(\"find_leading_order_f\", \"find_leading_order_d\"); s = s.replace(\"linear_solver_base_f\", \"linear_solver_base_d\"); s = s.replace(\"det2f\", \"det2d\"); s", "= s.replace(\"linalgf_\", \"linalgd_\"); s = s.replace(\"linsolverf\", \"linsolverd\"); s = s.replace(\"find_leading_order_f\", \"find_leading_order_d\"); s =", "\"_v4d\"); s = s.replace(\"m2f_\", \"m2d_\"); s = s.replace(\"m3f_\", \"m3d_\"); s = s.replace(\"m4f_\", \"m4d_\");", "\"r\") linalgfc = file.read() file.close() file = open(\"linalgd.c\", \"r\") linalgdc = file.read() file.close()", "s = s.replace(\"m3f_\", \"m3d_\"); s = s.replace(\"m4f_\", \"m4d_\"); s = s.replace(\"_m2f\", \"_m2d\"); s", "\"linalgd_\"); s = s.replace(\"linsolverf\", \"linsolverd\"); file = open(\"linalgd.h\", \"w\") file.write(s) file.close() #---------------------------------------------- file", "linalgfh + linalgdh linalgc = linalgfc + linalgdc linalgc = linalgc.replace(\"#include \\\"linalgf.h\\\"\\n\", \"\")", "\"mat2d\"); s = s.replace(\"mat3f\", \"mat3d\"); s = s.replace(\"mat4f\", \"mat4d\"); s = s.replace(\"vec2f\", \"vec2d\");", "= s.replace(\"Vec3f\", \"Vec3d\"); s = s.replace(\"Mat2f\", \"Mat2d\"); s = s.replace(\"Mat3f\", \"Mat3d\"); s =", "= s.replace(\"Mat3f\", \"Mat3d\"); s = s.replace(\"Mat4f\", \"Mat4d\"); s = s.replace(\"linalgf_\", \"linalgd_\"); s =", "= linalgc.replace(\"#include <assert.h>\\n\", \"\") linalgc = \"#include \\\"linalg.h\\\"\\n#include <assert.h>\\n\" + linalgc file =", "\"v4d_\"); s = s.replace(\"_v2f\", \"_v2d\"); s = s.replace(\"_v3f\", \"_v3d\"); s = s.replace(\"_v4f\", \"_v4d\");", "= s.replace(\"v3f_\", \"v3d_\"); s = s.replace(\"v4f_\", \"v4d_\"); s = s.replace(\"_v2f\", \"_v2d\"); s =", "+ linalgc file = open(\"linalg.h\", \"w\") file.write(linalgh) file.close() file = open(\"linalg.c\", \"w\") file.write(linalgc)", "s.replace(\"_m2f\", \"_m2d\"); s = s.replace(\"_m3f\", \"_m3d\"); s = s.replace(\"_m4f\", \"_m4d\"); s = s.replace(\"linalgf_\",", "file.write(s) file.close() #---------------------------------------------- file = open(\"linalgf.h\", \"r\") linalgfh = file.read() file.close() file =", "s.replace(\"_v2f\", \"_v2d\"); s = s.replace(\"_v3f\", \"_v3d\"); s = s.replace(\"_v4f\", \"_v4d\"); s = s.replace(\"m2f_\",", "s.replace(\"mat2f\", \"mat2d\"); s = s.replace(\"mat3f\", \"mat3d\"); s = s.replace(\"mat4f\", \"mat4d\"); s = s.replace(\"vec2f\",", "= open(\"linalgd.c\", \"w\") file.write(s) file.close() #---------------------------------------------- file = open(\"linalgf.h\", \"r\") linalgfh = file.read()", "\"det2d\"); s = s.replace(\"det3f\", \"det3d\"); file = open(\"linalgd.c\", \"w\") file.write(s) file.close() #---------------------------------------------- file", "file.close() file = open(\"linalgd.h\", \"r\") linalgdh = file.read() file.close() file = open(\"linalgf.c\", \"r\")", "linalgc.replace(\"#include <assert.h>\\n\", \"\") linalgc = \"#include \\\"linalg.h\\\"\\n#include <assert.h>\\n\" + linalgc file = open(\"linalg.h\",", "s.replace(\"Vec3f\", \"Vec3d\"); s = s.replace(\"Mat2f\", \"Mat2d\"); s = s.replace(\"Mat3f\", \"Mat3d\"); s = s.replace(\"Mat4f\",", "\"\") linalgc = linalgc.replace(\"#include <assert.h>\\n\", \"\") linalgc = \"#include \\\"linalg.h\\\"\\n#include <assert.h>\\n\" + linalgc", "s.replace(\"mat4f\", \"mat4d\"); s = s.replace(\"vec2f\", \"vec2d\"); s = s.replace(\"vec3f\", \"vec3d\"); s = s.replace(\"vec4f\",", "s = s.replace(\"_v3f\", \"_v3d\"); s = s.replace(\"_v4f\", \"_v4d\"); s = s.replace(\"m2f_\", \"m2d_\"); s", "= s.replace(\"linsolverf\", \"linsolverd\"); file = open(\"linalgd.h\", \"w\") file.write(s) file.close() #---------------------------------------------- file = open(\"linalgf.c\",", "= s.replace(\"_m2f\", \"_m2d\"); s = s.replace(\"_m3f\", \"_m3d\"); s = s.replace(\"_m4f\", \"_m4d\"); s =", "s = s.replace(\"vec3f\", \"vec3d\"); s = s.replace(\"vec4f\", \"vec4d\"); s = s.replace(\"cpxf\", \"cpxd\"); s", "= linalgc.replace(\"#include \\\"linalgd.h\\\"\\n\", \"\") linalgc = linalgc.replace(\"#include <assert.h>\\n\", \"\") linalgc = \"#include \\\"linalg.h\\\"\\n#include", "s = s.replace(\"linalgf_\", \"linalgd_\"); s = s.replace(\"linsolverf\", \"linsolverd\"); file = open(\"linalgd.h\", \"w\") file.write(s)", "= s.replace(\"vec3f\", \"vec3d\"); s = s.replace(\"vec4f\", \"vec4d\"); s = s.replace(\"cpxf\", \"cpxd\"); s =", "file.read() file.close() linalgh = linalgfh + linalgdh linalgc = linalgfc + linalgdc linalgc", "\"linalgd_\"); s = s.replace(\"linsolverf\", \"linsolverd\"); s = s.replace(\"find_leading_order_f\", \"find_leading_order_d\"); s = s.replace(\"linear_solver_base_f\", \"linear_solver_base_d\");", "file.close() file = open(\"linalgd.c\", \"r\") linalgdc = file.read() file.close() linalgh = linalgfh +", "os #---------------------------------------------- file = open(\"linalgf.h\", \"r\") s = file.read() file.close() s = s.replace(\"LINALGF_H\",", "s.replace(\"_v4f\", \"_v4d\"); s = s.replace(\"m2f_\", \"m2d_\"); s = s.replace(\"m3f_\", \"m3d_\"); s = s.replace(\"m4f_\",", "s = s.replace(\"m2f_\", \"m2d_\"); s = s.replace(\"m3f_\", \"m3d_\"); s = s.replace(\"m4f_\", \"m4d_\"); s", "s.replace(\"Mat3f\", \"Mat3d\"); s = s.replace(\"Mat4f\", \"Mat4d\"); s = s.replace(\"linalgf_\", \"linalgd_\"); s = s.replace(\"linsolverf\",", "= s.replace(\"mat3f\", \"mat3d\"); s = s.replace(\"mat4f\", \"mat4d\"); s = s.replace(\"vec2f\", \"vec2d\"); s =", "\"\") linalgc = \"#include \\\"linalg.h\\\"\\n#include <assert.h>\\n\" + linalgc file = open(\"linalg.h\", \"w\") file.write(linalgh)", "\"r\") linalgdc = file.read() file.close() linalgh = linalgfh + linalgdh linalgc = linalgfc", "\"w\") file.write(s) file.close() #---------------------------------------------- file = open(\"linalgf.h\", \"r\") linalgfh = file.read() file.close() file", "s = s.replace(\"float\", \"double\"); s = s.replace(\"mat2f\", \"mat2d\"); s = s.replace(\"mat3f\", \"mat3d\"); s", "file.read() file.close() file = open(\"linalgd.h\", \"r\") linalgdh = file.read() file.close() file = open(\"linalgf.c\",", "s.replace(\"v4f_\", \"v4d_\"); s = s.replace(\"_v2f\", \"_v2d\"); s = s.replace(\"_v3f\", \"_v3d\"); s = s.replace(\"_v4f\",", "s.replace(\"_m2f\", \"_m2d\"); s = s.replace(\"_m3f\", \"_m3d\"); s = s.replace(\"_m4f\", \"_m4d\"); s = s.replace(\"Vec2f\",", "s.replace(\"linear_solver_base_f\", \"linear_solver_base_d\"); s = s.replace(\"det2f\", \"det2d\"); s = s.replace(\"det3f\", \"det3d\"); file = open(\"linalgd.c\",", "= linalgfc + linalgdc linalgc = linalgc.replace(\"#include \\\"linalgf.h\\\"\\n\", \"\") linalgc = linalgc.replace(\"#include \\\"linalgd.h\\\"\\n\",", "s.replace(\"vec3f\", \"vec3d\"); s = s.replace(\"vec4f\", \"vec4d\"); s = s.replace(\"cpxf\", \"cpxd\"); s = s.replace(\"v2f_\",", "s = s.replace(\"mat3f\", \"mat3d\"); s = s.replace(\"mat4f\", \"mat4d\"); s = s.replace(\"vec2f\", \"vec2d\"); s", "s = s.replace(\"linsolverf\", \"linsolverd\"); file = open(\"linalgd.h\", \"w\") file.write(s) file.close() #---------------------------------------------- file =", "s = s.replace(\"det3f\", \"det3d\"); file = open(\"linalgd.c\", \"w\") file.write(s) file.close() #---------------------------------------------- file =", "#---------------------------------------------- file = open(\"linalgf.c\", \"r\") s = file.read() file.close() s = s.replace(\"linalgf.h\", \"linalgd.h\");", "s.replace(\"LINALGF_H\", \"LINALGD_H\"); s = s.replace(\"float\", \"double\"); s = s.replace(\"mat2f\", \"mat2d\"); s = s.replace(\"mat3f\",", "s.replace(\"linsolverf\", \"linsolverd\"); s = s.replace(\"find_leading_order_f\", \"find_leading_order_d\"); s = s.replace(\"linear_solver_base_f\", \"linear_solver_base_d\"); s = s.replace(\"det2f\",", "= linalgfh + linalgdh linalgc = linalgfc + linalgdc linalgc = linalgc.replace(\"#include \\\"linalgf.h\\\"\\n\",", "s = s.replace(\"Mat4f\", \"Mat4d\"); s = s.replace(\"linalgf_\", \"linalgd_\"); s = s.replace(\"linsolverf\", \"linsolverd\"); file", "\"double\"); s = s.replace(\"mat2f\", \"mat2d\"); s = s.replace(\"mat3f\", \"mat3d\"); s = s.replace(\"mat4f\", \"mat4d\");", "\"_v3d\"); s = s.replace(\"_v4f\", \"_v4d\"); s = s.replace(\"m2f_\", \"m2d_\"); s = s.replace(\"m3f_\", \"m3d_\");", "= open(\"linalgd.h\", \"w\") file.write(s) file.close() #---------------------------------------------- file = open(\"linalgf.c\", \"r\") s = file.read()", "s.replace(\"_m3f\", \"_m3d\"); s = s.replace(\"_m4f\", \"_m4d\"); s = s.replace(\"linalgf_\", \"linalgd_\"); s = s.replace(\"linsolverf\",", "= s.replace(\"det3f\", \"det3d\"); file = open(\"linalgd.c\", \"w\") file.write(s) file.close() #---------------------------------------------- file = open(\"linalgf.h\",", "open(\"linalgf.c\", \"r\") linalgfc = file.read() file.close() file = open(\"linalgd.c\", \"r\") linalgdc = file.read()", "s = s.replace(\"v3f_\", \"v3d_\"); s = s.replace(\"v4f_\", \"v4d_\"); s = s.replace(\"_v2f\", \"_v2d\"); s", "= linalgc.replace(\"#include \\\"linalgf.h\\\"\\n\", \"\") linalgc = linalgc.replace(\"#include \\\"linalgd.h\\\"\\n\", \"\") linalgc = linalgc.replace(\"#include <assert.h>\\n\",", "s.replace(\"m2f_\", \"m2d_\"); s = s.replace(\"m3f_\", \"m3d_\"); s = s.replace(\"m4f_\", \"m4d_\"); s = s.replace(\"_m2f\",", "<assert.h>\\n\", \"\") linalgc = \"#include \\\"linalg.h\\\"\\n#include <assert.h>\\n\" + linalgc file = open(\"linalg.h\", \"w\")", "s.replace(\"find_leading_order_f\", \"find_leading_order_d\"); s = s.replace(\"linear_solver_base_f\", \"linear_solver_base_d\"); s = s.replace(\"det2f\", \"det2d\"); s = s.replace(\"det3f\",", "= open(\"linalgd.c\", \"r\") linalgdc = file.read() file.close() linalgh = linalgfh + linalgdh linalgc", "\"vec2d\"); s = s.replace(\"vec3f\", \"vec3d\"); s = s.replace(\"vec4f\", \"vec4d\"); s = s.replace(\"cpxf\", \"cpxd\");", "python import sys import os #---------------------------------------------- file = open(\"linalgf.h\", \"r\") s = file.read()", "file = open(\"linalgf.h\", \"r\") s = file.read() file.close() s = s.replace(\"LINALGF_H\", \"LINALGD_H\"); s", "\"cpxd\"); s = s.replace(\"v2f_\", \"v2d_\"); s = s.replace(\"v3f_\", \"v3d_\"); s = s.replace(\"v4f_\", \"v4d_\");", "file.read() file.close() s = s.replace(\"LINALGF_H\", \"LINALGD_H\"); s = s.replace(\"float\", \"double\"); s = s.replace(\"mat2f\",", "s.replace(\"v3f_\", \"v3d_\"); s = s.replace(\"v4f_\", \"v4d_\"); s = s.replace(\"_v2f\", \"_v2d\"); s = s.replace(\"_v3f\",", "= s.replace(\"Mat4f\", \"Mat4d\"); s = s.replace(\"linalgf_\", \"linalgd_\"); s = s.replace(\"linsolverf\", \"linsolverd\"); file =", "linalgfc = file.read() file.close() file = open(\"linalgd.c\", \"r\") linalgdc = file.read() file.close() linalgh", "s.replace(\"m4f_\", \"m4d_\"); s = s.replace(\"_m2f\", \"_m2d\"); s = s.replace(\"_m3f\", \"_m3d\"); s = s.replace(\"_m4f\",", "linalgc.replace(\"#include \\\"linalgd.h\\\"\\n\", \"\") linalgc = linalgc.replace(\"#include <assert.h>\\n\", \"\") linalgc = \"#include \\\"linalg.h\\\"\\n#include <assert.h>\\n\"", "= s.replace(\"vec2f\", \"vec2d\"); s = s.replace(\"vec3f\", \"vec3d\"); s = s.replace(\"vec4f\", \"vec4d\"); s =", "s.replace(\"vec2f\", \"vec2d\"); s = s.replace(\"vec3f\", \"vec3d\"); s = s.replace(\"vec4f\", \"vec4d\"); s = s.replace(\"cpxf\",", "s.replace(\"det3f\", \"det3d\"); file = open(\"linalgd.c\", \"w\") file.write(s) file.close() #---------------------------------------------- file = open(\"linalgf.h\", \"r\")", "linalgc = linalgc.replace(\"#include \\\"linalgd.h\\\"\\n\", \"\") linalgc = linalgc.replace(\"#include <assert.h>\\n\", \"\") linalgc = \"#include", "= s.replace(\"float\", \"double\"); s = s.replace(\"mat2f\", \"mat2d\"); s = s.replace(\"mat3f\", \"mat3d\"); s =", "\\\"linalgd.h\\\"\\n\", \"\") linalgc = linalgc.replace(\"#include <assert.h>\\n\", \"\") linalgc = \"#include \\\"linalg.h\\\"\\n#include <assert.h>\\n\" +", "s = s.replace(\"v4f_\", \"v4d_\"); s = s.replace(\"_v2f\", \"_v2d\"); s = s.replace(\"_v3f\", \"_v3d\"); s", "= s.replace(\"find_leading_order_f\", \"find_leading_order_d\"); s = s.replace(\"linear_solver_base_f\", \"linear_solver_base_d\"); s = s.replace(\"det2f\", \"det2d\"); s =", "linalgc = \"#include \\\"linalg.h\\\"\\n#include <assert.h>\\n\" + linalgc file = open(\"linalg.h\", \"w\") file.write(linalgh) file.close()", "\"mat4d\"); s = s.replace(\"vec2f\", \"vec2d\"); s = s.replace(\"vec3f\", \"vec3d\"); s = s.replace(\"vec4f\", \"vec4d\");", "s.replace(\"v2f_\", \"v2d_\"); s = s.replace(\"v3f_\", \"v3d_\"); s = s.replace(\"v4f_\", \"v4d_\"); s = s.replace(\"_v2f\",", "\"w\") file.write(s) file.close() #---------------------------------------------- file = open(\"linalgf.c\", \"r\") s = file.read() file.close() s", "s = s.replace(\"vec2f\", \"vec2d\"); s = s.replace(\"vec3f\", \"vec3d\"); s = s.replace(\"vec4f\", \"vec4d\"); s", "file.close() linalgh = linalgfh + linalgdh linalgc = linalgfc + linalgdc linalgc =", "s.replace(\"_v3f\", \"_v3d\"); s = s.replace(\"_v4f\", \"_v4d\"); s = s.replace(\"m2f_\", \"m2d_\"); s = s.replace(\"m3f_\",", "s.replace(\"Mat2f\", \"Mat2d\"); s = s.replace(\"Mat3f\", \"Mat3d\"); s = s.replace(\"Mat4f\", \"Mat4d\"); s = s.replace(\"linalgf_\",", "s.replace(\"linalgf_\", \"linalgd_\"); s = s.replace(\"linsolverf\", \"linsolverd\"); file = open(\"linalgd.h\", \"w\") file.write(s) file.close() #----------------------------------------------", "file.read() file.close() file = open(\"linalgd.c\", \"r\") linalgdc = file.read() file.close() linalgh = linalgfh", "#---------------------------------------------- file = open(\"linalgf.h\", \"r\") s = file.read() file.close() s = s.replace(\"LINALGF_H\", \"LINALGD_H\");", "open(\"linalgf.c\", \"r\") s = file.read() file.close() s = s.replace(\"linalgf.h\", \"linalgd.h\"); s = s.replace(\"float\",", "<assert.h>\\n\" + linalgc file = open(\"linalg.h\", \"w\") file.write(linalgh) file.close() file = open(\"linalg.c\", \"w\")", "#---------------------------------------------- file = open(\"linalgf.h\", \"r\") linalgfh = file.read() file.close() file = open(\"linalgd.h\", \"r\")", "\"m3d_\"); s = s.replace(\"m4f_\", \"m4d_\"); s = s.replace(\"_m2f\", \"_m2d\"); s = s.replace(\"_m3f\", \"_m3d\");", "linalgh = linalgfh + linalgdh linalgc = linalgfc + linalgdc linalgc = linalgc.replace(\"#include", "\"r\") s = file.read() file.close() s = s.replace(\"linalgf.h\", \"linalgd.h\"); s = s.replace(\"float\", \"double\");", "= file.read() file.close() file = open(\"linalgf.c\", \"r\") linalgfc = file.read() file.close() file =", "open(\"linalgd.h\", \"w\") file.write(s) file.close() #---------------------------------------------- file = open(\"linalgf.c\", \"r\") s = file.read() file.close()", "file.close() file = open(\"linalgf.c\", \"r\") linalgfc = file.read() file.close() file = open(\"linalgd.c\", \"r\")", "\"r\") s = file.read() file.close() s = s.replace(\"LINALGF_H\", \"LINALGD_H\"); s = s.replace(\"float\", \"double\");", "s = s.replace(\"_m4f\", \"_m4d\"); s = s.replace(\"linalgf_\", \"linalgd_\"); s = s.replace(\"linsolverf\", \"linsolverd\"); s", "linalgfc + linalgdc linalgc = linalgc.replace(\"#include \\\"linalgf.h\\\"\\n\", \"\") linalgc = linalgc.replace(\"#include \\\"linalgd.h\\\"\\n\", \"\")", "\"linsolverd\"); s = s.replace(\"find_leading_order_f\", \"find_leading_order_d\"); s = s.replace(\"linear_solver_base_f\", \"linear_solver_base_d\"); s = s.replace(\"det2f\", \"det2d\");", "file.close() #---------------------------------------------- file = open(\"linalgf.h\", \"r\") linalgfh = file.read() file.close() file = open(\"linalgd.h\",", "\"_m3d\"); s = s.replace(\"_m4f\", \"_m4d\"); s = s.replace(\"Vec2f\", \"Vec2d\"); s = s.replace(\"Vec3f\", \"Vec3d\");", "file.close() #---------------------------------------------- file = open(\"linalgf.c\", \"r\") s = file.read() file.close() s = s.replace(\"linalgf.h\",", "s = s.replace(\"linalgf_\", \"linalgd_\"); s = s.replace(\"linsolverf\", \"linsolverd\"); s = s.replace(\"find_leading_order_f\", \"find_leading_order_d\"); s", "s.replace(\"_m3f\", \"_m3d\"); s = s.replace(\"_m4f\", \"_m4d\"); s = s.replace(\"Vec2f\", \"Vec2d\"); s = s.replace(\"Vec3f\",", "s.replace(\"Mat4f\", \"Mat4d\"); s = s.replace(\"linalgf_\", \"linalgd_\"); s = s.replace(\"linsolverf\", \"linsolverd\"); file = open(\"linalgd.h\",", "s.replace(\"Vec2f\", \"Vec2d\"); s = s.replace(\"Vec3f\", \"Vec3d\"); s = s.replace(\"Mat2f\", \"Mat2d\"); s = s.replace(\"Mat3f\",", "= s.replace(\"_v3f\", \"_v3d\"); s = s.replace(\"_v4f\", \"_v4d\"); s = s.replace(\"m2f_\", \"m2d_\"); s =", "\"Mat2d\"); s = s.replace(\"Mat3f\", \"Mat3d\"); s = s.replace(\"Mat4f\", \"Mat4d\"); s = s.replace(\"linalgf_\", \"linalgd_\");", "open(\"linalgd.h\", \"r\") linalgdh = file.read() file.close() file = open(\"linalgf.c\", \"r\") linalgfc = file.read()", "= \"#include \\\"linalg.h\\\"\\n#include <assert.h>\\n\" + linalgc file = open(\"linalg.h\", \"w\") file.write(linalgh) file.close() file", "= s.replace(\"mat4f\", \"mat4d\"); s = s.replace(\"vec2f\", \"vec2d\"); s = s.replace(\"vec3f\", \"vec3d\"); s =", "s = s.replace(\"Mat2f\", \"Mat2d\"); s = s.replace(\"Mat3f\", \"Mat3d\"); s = s.replace(\"Mat4f\", \"Mat4d\"); s", "s.replace(\"linalgf.h\", \"linalgd.h\"); s = s.replace(\"float\", \"double\"); s = s.replace(\"mat2f\", \"mat2d\"); s = s.replace(\"mat3f\",", "\"_m2d\"); s = s.replace(\"_m3f\", \"_m3d\"); s = s.replace(\"_m4f\", \"_m4d\"); s = s.replace(\"linalgf_\", \"linalgd_\");", "= open(\"linalgf.c\", \"r\") linalgfc = file.read() file.close() file = open(\"linalgd.c\", \"r\") linalgdc =", "s.replace(\"m3f_\", \"m3d_\"); s = s.replace(\"m4f_\", \"m4d_\"); s = s.replace(\"_m2f\", \"_m2d\"); s = s.replace(\"_m3f\",", "+ linalgdh linalgc = linalgfc + linalgdc linalgc = linalgc.replace(\"#include \\\"linalgf.h\\\"\\n\", \"\") linalgc", "\"Vec2d\"); s = s.replace(\"Vec3f\", \"Vec3d\"); s = s.replace(\"Mat2f\", \"Mat2d\"); s = s.replace(\"Mat3f\", \"Mat3d\");", "+ linalgdc linalgc = linalgc.replace(\"#include \\\"linalgf.h\\\"\\n\", \"\") linalgc = linalgc.replace(\"#include \\\"linalgd.h\\\"\\n\", \"\") linalgc", "= s.replace(\"m2f_\", \"m2d_\"); s = s.replace(\"m3f_\", \"m3d_\"); s = s.replace(\"m4f_\", \"m4d_\"); s =", "linalgfh = file.read() file.close() file = open(\"linalgd.h\", \"r\") linalgdh = file.read() file.close() file", "= s.replace(\"v2f_\", \"v2d_\"); s = s.replace(\"v3f_\", \"v3d_\"); s = s.replace(\"v4f_\", \"v4d_\"); s =", "linalgc.replace(\"#include \\\"linalgf.h\\\"\\n\", \"\") linalgc = linalgc.replace(\"#include \\\"linalgd.h\\\"\\n\", \"\") linalgc = linalgc.replace(\"#include <assert.h>\\n\", \"\")", "= s.replace(\"Mat2f\", \"Mat2d\"); s = s.replace(\"Mat3f\", \"Mat3d\"); s = s.replace(\"Mat4f\", \"Mat4d\"); s =", "linalgdh linalgc = linalgfc + linalgdc linalgc = linalgc.replace(\"#include \\\"linalgf.h\\\"\\n\", \"\") linalgc =", "s.replace(\"_m4f\", \"_m4d\"); s = s.replace(\"linalgf_\", \"linalgd_\"); s = s.replace(\"linsolverf\", \"linsolverd\"); s = s.replace(\"find_leading_order_f\",", "\"linsolverd\"); file = open(\"linalgd.h\", \"w\") file.write(s) file.close() #---------------------------------------------- file = open(\"linalgf.c\", \"r\") s", "linalgc = linalgfc + linalgdc linalgc = linalgc.replace(\"#include \\\"linalgf.h\\\"\\n\", \"\") linalgc = linalgc.replace(\"#include", "linalgdc linalgc = linalgc.replace(\"#include \\\"linalgf.h\\\"\\n\", \"\") linalgc = linalgc.replace(\"#include \\\"linalgd.h\\\"\\n\", \"\") linalgc =", "\"find_leading_order_d\"); s = s.replace(\"linear_solver_base_f\", \"linear_solver_base_d\"); s = s.replace(\"det2f\", \"det2d\"); s = s.replace(\"det3f\", \"det3d\");", "linalgc file = open(\"linalg.h\", \"w\") file.write(linalgh) file.close() file = open(\"linalg.c\", \"w\") file.write(linalgc) file.close()", "\\\"linalgf.h\\\"\\n\", \"\") linalgc = linalgc.replace(\"#include \\\"linalgd.h\\\"\\n\", \"\") linalgc = linalgc.replace(\"#include <assert.h>\\n\", \"\") linalgc", "\"vec4d\"); s = s.replace(\"cpxf\", \"cpxd\"); s = s.replace(\"v2f_\", \"v2d_\"); s = s.replace(\"v3f_\", \"v3d_\");", "= open(\"linalgf.c\", \"r\") s = file.read() file.close() s = s.replace(\"linalgf.h\", \"linalgd.h\"); s =", "import sys import os #---------------------------------------------- file = open(\"linalgf.h\", \"r\") s = file.read() file.close()", "\"r\") linalgdh = file.read() file.close() file = open(\"linalgf.c\", \"r\") linalgfc = file.read() file.close()", "s = s.replace(\"Vec2f\", \"Vec2d\"); s = s.replace(\"Vec3f\", \"Vec3d\"); s = s.replace(\"Mat2f\", \"Mat2d\"); s", "= s.replace(\"linear_solver_base_f\", \"linear_solver_base_d\"); s = s.replace(\"det2f\", \"det2d\"); s = s.replace(\"det3f\", \"det3d\"); file =", "\"Mat4d\"); s = s.replace(\"linalgf_\", \"linalgd_\"); s = s.replace(\"linsolverf\", \"linsolverd\"); file = open(\"linalgd.h\", \"w\")", "s = s.replace(\"linalgf.h\", \"linalgd.h\"); s = s.replace(\"float\", \"double\"); s = s.replace(\"mat2f\", \"mat2d\"); s", "\"LINALGD_H\"); s = s.replace(\"float\", \"double\"); s = s.replace(\"mat2f\", \"mat2d\"); s = s.replace(\"mat3f\", \"mat3d\");", "\"linalgd.h\"); s = s.replace(\"float\", \"double\"); s = s.replace(\"mat2f\", \"mat2d\"); s = s.replace(\"mat3f\", \"mat3d\");", "\"Mat3d\"); s = s.replace(\"Mat4f\", \"Mat4d\"); s = s.replace(\"linalgf_\", \"linalgd_\"); s = s.replace(\"linsolverf\", \"linsolverd\");", "s = s.replace(\"det2f\", \"det2d\"); s = s.replace(\"det3f\", \"det3d\"); file = open(\"linalgd.c\", \"w\") file.write(s)", "open(\"linalgf.h\", \"r\") linalgfh = file.read() file.close() file = open(\"linalgd.h\", \"r\") linalgdh = file.read()", "= open(\"linalgf.h\", \"r\") linalgfh = file.read() file.close() file = open(\"linalgd.h\", \"r\") linalgdh =", "linalgdc = file.read() file.close() linalgh = linalgfh + linalgdh linalgc = linalgfc +", "linalgc = linalgc.replace(\"#include <assert.h>\\n\", \"\") linalgc = \"#include \\\"linalg.h\\\"\\n#include <assert.h>\\n\" + linalgc file", "s = s.replace(\"linsolverf\", \"linsolverd\"); s = s.replace(\"find_leading_order_f\", \"find_leading_order_d\"); s = s.replace(\"linear_solver_base_f\", \"linear_solver_base_d\"); s", "= s.replace(\"linalgf_\", \"linalgd_\"); s = s.replace(\"linsolverf\", \"linsolverd\"); file = open(\"linalgd.h\", \"w\") file.write(s) file.close()", "= file.read() file.close() file = open(\"linalgd.c\", \"r\") linalgdc = file.read() file.close() linalgh =", "\"linear_solver_base_d\"); s = s.replace(\"det2f\", \"det2d\"); s = s.replace(\"det3f\", \"det3d\"); file = open(\"linalgd.c\", \"w\")", "= s.replace(\"linalgf.h\", \"linalgd.h\"); s = s.replace(\"float\", \"double\"); s = s.replace(\"mat2f\", \"mat2d\"); s =", "\"vec3d\"); s = s.replace(\"vec4f\", \"vec4d\"); s = s.replace(\"cpxf\", \"cpxd\"); s = s.replace(\"v2f_\", \"v2d_\");", "s = s.replace(\"mat2f\", \"mat2d\"); s = s.replace(\"mat3f\", \"mat3d\"); s = s.replace(\"mat4f\", \"mat4d\"); s", "= s.replace(\"Vec2f\", \"Vec2d\"); s = s.replace(\"Vec3f\", \"Vec3d\"); s = s.replace(\"Mat2f\", \"Mat2d\"); s =", "s.replace(\"linsolverf\", \"linsolverd\"); file = open(\"linalgd.h\", \"w\") file.write(s) file.close() #---------------------------------------------- file = open(\"linalgf.c\", \"r\")", "s.replace(\"linalgf_\", \"linalgd_\"); s = s.replace(\"linsolverf\", \"linsolverd\"); s = s.replace(\"find_leading_order_f\", \"find_leading_order_d\"); s = s.replace(\"linear_solver_base_f\",", "= s.replace(\"linsolverf\", \"linsolverd\"); s = s.replace(\"find_leading_order_f\", \"find_leading_order_d\"); s = s.replace(\"linear_solver_base_f\", \"linear_solver_base_d\"); s =", "file.read() file.close() file = open(\"linalgf.c\", \"r\") linalgfc = file.read() file.close() file = open(\"linalgd.c\",", "= file.read() file.close() linalgh = linalgfh + linalgdh linalgc = linalgfc + linalgdc", "= file.read() file.close() s = s.replace(\"linalgf.h\", \"linalgd.h\"); s = s.replace(\"float\", \"double\"); s =", "file.read() file.close() s = s.replace(\"linalgf.h\", \"linalgd.h\"); s = s.replace(\"float\", \"double\"); s = s.replace(\"mat2f\",", "\"\") linalgc = linalgc.replace(\"#include \\\"linalgd.h\\\"\\n\", \"\") linalgc = linalgc.replace(\"#include <assert.h>\\n\", \"\") linalgc =", "= open(\"linalgf.h\", \"r\") s = file.read() file.close() s = s.replace(\"LINALGF_H\", \"LINALGD_H\"); s =", "linalgdh = file.read() file.close() file = open(\"linalgf.c\", \"r\") linalgfc = file.read() file.close() file", "open(\"linalgd.c\", \"r\") linalgdc = file.read() file.close() linalgh = linalgfh + linalgdh linalgc =", "\"_v2d\"); s = s.replace(\"_v3f\", \"_v3d\"); s = s.replace(\"_v4f\", \"_v4d\"); s = s.replace(\"m2f_\", \"m2d_\");", "= s.replace(\"_v2f\", \"_v2d\"); s = s.replace(\"_v3f\", \"_v3d\"); s = s.replace(\"_v4f\", \"_v4d\"); s =", "#!/usr/bin/env python import sys import os #---------------------------------------------- file = open(\"linalgf.h\", \"r\") s =", "s = s.replace(\"_v4f\", \"_v4d\"); s = s.replace(\"m2f_\", \"m2d_\"); s = s.replace(\"m3f_\", \"m3d_\"); s", "= s.replace(\"m3f_\", \"m3d_\"); s = s.replace(\"m4f_\", \"m4d_\"); s = s.replace(\"_m2f\", \"_m2d\"); s =", "= file.read() file.close() s = s.replace(\"LINALGF_H\", \"LINALGD_H\"); s = s.replace(\"float\", \"double\"); s =", "s = s.replace(\"_m4f\", \"_m4d\"); s = s.replace(\"Vec2f\", \"Vec2d\"); s = s.replace(\"Vec3f\", \"Vec3d\"); s", "= s.replace(\"_m4f\", \"_m4d\"); s = s.replace(\"linalgf_\", \"linalgd_\"); s = s.replace(\"linsolverf\", \"linsolverd\"); s =", "= s.replace(\"v4f_\", \"v4d_\"); s = s.replace(\"_v2f\", \"_v2d\"); s = s.replace(\"_v3f\", \"_v3d\"); s =" ]
[ "/ \"inputs\" / \"day3.txt\" with open(input_file) as fii: map = [line.rstrip('\\n') for line", "<reponame>rfrazier716/aoc_2020 from pathlib import Path import numpy as np def tree_in_path(map_line,map_x_coord): \"\"\" Checks", "results for part 2 print() # print a newline for slope,hit_count in zip(slopes_to_test,trees_hit_per_slope):", "\"\"\" offset = map_x_coord % len(map_line) # module operater for rollover return map_line[offset]=='#'", "tree_in_path(map_line,map_x_coord): \"\"\" Checks if a tree is in the x-cord of the map", "x is > len(map_line) returns: True if a tree is in the path,", "the puzzle - try the 5 given slopes and spit out the total", "product slopes_to_test = [[1,1],[3,1],[5,1],[7,1],[1,2]] trees_hit_per_slope = [traverse_map(map,*slope) for slope in slopes_to_test] product_of_trees =", "otherwise rtype: Bool \"\"\" offset = map_x_coord % len(map_line) # module operater for", "trees_hit if __name__ == \"__main__\": # Load the puzzle import to a map", "zip(slopes_to_test,trees_hit_per_slope): print(f\"Slope of {slope} results in {hit_count} trees hit\") print(f\"Part Two Solution: {product_of_trees}\")", "trees_hit_per_slope = [traverse_map(map,*slope) for slope in slopes_to_test] product_of_trees = np.prod(trees_hit_per_slope) # print the", "rtype: int \"\"\" trees_hit = 0 map_depth = len(map) y_steps = range(0,map_depth,y_step) for", "strings) starting at the top left until reaching the bottom of the map.", "the map. every iteration advances position by <x_step,y_step> and checks if a tree", "of the puzzle - try the 5 given slopes and spit out the", "iterates over a \"map\" (array of strings) starting at the top left until", "map_depth = len(map) y_steps = range(0,map_depth,y_step) for j,step in enumerate(y_steps): trees_hit += 1", "if a tree is in the path, False otherwise rtype: Bool \"\"\" offset", "of strings) starting at the top left until reaching the bottom of the", "import Path import numpy as np def tree_in_path(map_line,map_x_coord): \"\"\" Checks if a tree", "try the 5 given slopes and spit out the total product slopes_to_test =", "if a tree is in the x-cord of the map line, looping if", "in the path, False otherwise rtype: Bool \"\"\" offset = map_x_coord % len(map_line)", "= [line.rstrip('\\n') for line in fii] # Strip newline characters # Part one", "top left until reaching the bottom of the map. every iteration advances position", "out the total product slopes_to_test = [[1,1],[3,1],[5,1],[7,1],[1,2]] trees_hit_per_slope = [traverse_map(map,*slope) for slope in", "[traverse_map(map,*slope) for slope in slopes_to_test] product_of_trees = np.prod(trees_hit_per_slope) # print the results for", "the puzzle, traverse the map with a 3-1 slope and count trees #", "y_steps = range(0,map_depth,y_step) for j,step in enumerate(y_steps): trees_hit += 1 if tree_in_path(map[step],j*x_step) else", "# print a newline for slope,hit_count in zip(slopes_to_test,trees_hit_per_slope): print(f\"Slope of {slope} results in", "1 if tree_in_path(map[step],j*x_step) else 0 return trees_hit if __name__ == \"__main__\": # Load", "(array of strings) starting at the top left until reaching the bottom of", "of the puzzle, traverse the map with a 3-1 slope and count trees", "and count trees # encountered print(f\"Part One Solution: {traverse_map(map,3,1)}\") # part two of", "in fii] # Strip newline characters # Part one of the puzzle, traverse", "line, looping if x is > len(map_line) returns: True if a tree is", "Solution: {traverse_map(map,3,1)}\") # part two of the puzzle - try the 5 given", "and spit out the total product slopes_to_test = [[1,1],[3,1],[5,1],[7,1],[1,2]] trees_hit_per_slope = [traverse_map(map,*slope) for", "x_step, y_step): \"\"\" iterates over a \"map\" (array of strings) starting at the", "a \"map\" (array of strings) starting at the top left until reaching the", "the map with a 3-1 slope and count trees # encountered print(f\"Part One", "# part two of the puzzle - try the 5 given slopes and", "returns: the total number of Trees hit rtype: int \"\"\" trees_hit = 0", "the path, False otherwise rtype: Bool \"\"\" offset = map_x_coord % len(map_line) #", "count trees # encountered print(f\"Part One Solution: {traverse_map(map,3,1)}\") # part two of the", "of Trees hit rtype: int \"\"\" trees_hit = 0 map_depth = len(map) y_steps", "trees_hit += 1 if tree_in_path(map[step],j*x_step) else 0 return trees_hit if __name__ == \"__main__\":", "the 5 given slopes and spit out the total product slopes_to_test = [[1,1],[3,1],[5,1],[7,1],[1,2]]", "looping if x is > len(map_line) returns: True if a tree is in", "part two of the puzzle - try the 5 given slopes and spit", "to a map input_file = Path(__file__).resolve().parents[2] / \"inputs\" / \"day3.txt\" with open(input_file) as", "map_line[offset]=='#' def traverse_map(map, x_step, y_step): \"\"\" iterates over a \"map\" (array of strings)", "np.prod(trees_hit_per_slope) # print the results for part 2 print() # print a newline", "Bool \"\"\" offset = map_x_coord % len(map_line) # module operater for rollover return", "enumerate(y_steps): trees_hit += 1 if tree_in_path(map[step],j*x_step) else 0 return trees_hit if __name__ ==", "5 given slopes and spit out the total product slopes_to_test = [[1,1],[3,1],[5,1],[7,1],[1,2]] trees_hit_per_slope", "the x-cord of the map line, looping if x is > len(map_line) returns:", "the map line, looping if x is > len(map_line) returns: True if a", "and checks if a tree is hit returns: the total number of Trees", "line in fii] # Strip newline characters # Part one of the puzzle,", "encountered print(f\"Part One Solution: {traverse_map(map,3,1)}\") # part two of the puzzle - try", "path, False otherwise rtype: Bool \"\"\" offset = map_x_coord % len(map_line) # module", "= [traverse_map(map,*slope) for slope in slopes_to_test] product_of_trees = np.prod(trees_hit_per_slope) # print the results", "print a newline for slope,hit_count in zip(slopes_to_test,trees_hit_per_slope): print(f\"Slope of {slope} results in {hit_count}", "is in the x-cord of the map line, looping if x is >", "slopes and spit out the total product slopes_to_test = [[1,1],[3,1],[5,1],[7,1],[1,2]] trees_hit_per_slope = [traverse_map(map,*slope)", "if a tree is hit returns: the total number of Trees hit rtype:", "a newline for slope,hit_count in zip(slopes_to_test,trees_hit_per_slope): print(f\"Slope of {slope} results in {hit_count} trees", "return map_line[offset]=='#' def traverse_map(map, x_step, y_step): \"\"\" iterates over a \"map\" (array of", "slope in slopes_to_test] product_of_trees = np.prod(trees_hit_per_slope) # print the results for part 2", "slope and count trees # encountered print(f\"Part One Solution: {traverse_map(map,3,1)}\") # part two", "One Solution: {traverse_map(map,3,1)}\") # part two of the puzzle - try the 5", "puzzle - try the 5 given slopes and spit out the total product", "a tree is in the path, False otherwise rtype: Bool \"\"\" offset =", "fii: map = [line.rstrip('\\n') for line in fii] # Strip newline characters #", "module operater for rollover return map_line[offset]=='#' def traverse_map(map, x_step, y_step): \"\"\" iterates over", "the puzzle import to a map input_file = Path(__file__).resolve().parents[2] / \"inputs\" / \"day3.txt\"", "numpy as np def tree_in_path(map_line,map_x_coord): \"\"\" Checks if a tree is in the", "rtype: Bool \"\"\" offset = map_x_coord % len(map_line) # module operater for rollover", "traverse the map with a 3-1 slope and count trees # encountered print(f\"Part", "for slope,hit_count in zip(slopes_to_test,trees_hit_per_slope): print(f\"Slope of {slope} results in {hit_count} trees hit\") print(f\"Part", "import to a map input_file = Path(__file__).resolve().parents[2] / \"inputs\" / \"day3.txt\" with open(input_file)", "# Load the puzzle import to a map input_file = Path(__file__).resolve().parents[2] / \"inputs\"", "total number of Trees hit rtype: int \"\"\" trees_hit = 0 map_depth =", "in the x-cord of the map line, looping if x is > len(map_line)", "bottom of the map. every iteration advances position by <x_step,y_step> and checks if", "\"\"\" iterates over a \"map\" (array of strings) starting at the top left", "len(map_line) returns: True if a tree is in the path, False otherwise rtype:", "# module operater for rollover return map_line[offset]=='#' def traverse_map(map, x_step, y_step): \"\"\" iterates", "a map input_file = Path(__file__).resolve().parents[2] / \"inputs\" / \"day3.txt\" with open(input_file) as fii:", "as fii: map = [line.rstrip('\\n') for line in fii] # Strip newline characters", "trees # encountered print(f\"Part One Solution: {traverse_map(map,3,1)}\") # part two of the puzzle", "map = [line.rstrip('\\n') for line in fii] # Strip newline characters # Part", "if __name__ == \"__main__\": # Load the puzzle import to a map input_file", "== \"__main__\": # Load the puzzle import to a map input_file = Path(__file__).resolve().parents[2]", "of the map line, looping if x is > len(map_line) returns: True if", "fii] # Strip newline characters # Part one of the puzzle, traverse the", "len(map_line) # module operater for rollover return map_line[offset]=='#' def traverse_map(map, x_step, y_step): \"\"\"", "2 print() # print a newline for slope,hit_count in zip(slopes_to_test,trees_hit_per_slope): print(f\"Slope of {slope}", "tree is in the x-cord of the map line, looping if x is", "is > len(map_line) returns: True if a tree is in the path, False", "= np.prod(trees_hit_per_slope) # print the results for part 2 print() # print a", "left until reaching the bottom of the map. every iteration advances position by", "= len(map) y_steps = range(0,map_depth,y_step) for j,step in enumerate(y_steps): trees_hit += 1 if", "the total number of Trees hit rtype: int \"\"\" trees_hit = 0 map_depth", "Path import numpy as np def tree_in_path(map_line,map_x_coord): \"\"\" Checks if a tree is", "0 return trees_hit if __name__ == \"__main__\": # Load the puzzle import to", "iteration advances position by <x_step,y_step> and checks if a tree is hit returns:", "Path(__file__).resolve().parents[2] / \"inputs\" / \"day3.txt\" with open(input_file) as fii: map = [line.rstrip('\\n') for", "slopes_to_test] product_of_trees = np.prod(trees_hit_per_slope) # print the results for part 2 print() #", "hit returns: the total number of Trees hit rtype: int \"\"\" trees_hit =", "\"inputs\" / \"day3.txt\" with open(input_file) as fii: map = [line.rstrip('\\n') for line in", "map. every iteration advances position by <x_step,y_step> and checks if a tree is", "slopes_to_test = [[1,1],[3,1],[5,1],[7,1],[1,2]] trees_hit_per_slope = [traverse_map(map,*slope) for slope in slopes_to_test] product_of_trees = np.prod(trees_hit_per_slope)", "input_file = Path(__file__).resolve().parents[2] / \"inputs\" / \"day3.txt\" with open(input_file) as fii: map =", "else 0 return trees_hit if __name__ == \"__main__\": # Load the puzzle import", "def traverse_map(map, x_step, y_step): \"\"\" iterates over a \"map\" (array of strings) starting", "returns: True if a tree is in the path, False otherwise rtype: Bool", "a 3-1 slope and count trees # encountered print(f\"Part One Solution: {traverse_map(map,3,1)}\") #", "for rollover return map_line[offset]=='#' def traverse_map(map, x_step, y_step): \"\"\" iterates over a \"map\"", "\"\"\" Checks if a tree is in the x-cord of the map line,", "number of Trees hit rtype: int \"\"\" trees_hit = 0 map_depth = len(map)", "+= 1 if tree_in_path(map[step],j*x_step) else 0 return trees_hit if __name__ == \"__main__\": #", "= [[1,1],[3,1],[5,1],[7,1],[1,2]] trees_hit_per_slope = [traverse_map(map,*slope) for slope in slopes_to_test] product_of_trees = np.prod(trees_hit_per_slope) #", "- try the 5 given slopes and spit out the total product slopes_to_test", "= Path(__file__).resolve().parents[2] / \"inputs\" / \"day3.txt\" with open(input_file) as fii: map = [line.rstrip('\\n')", "# Part one of the puzzle, traverse the map with a 3-1 slope", "y_step): \"\"\" iterates over a \"map\" (array of strings) starting at the top", "newline characters # Part one of the puzzle, traverse the map with a", "one of the puzzle, traverse the map with a 3-1 slope and count", "Load the puzzle import to a map input_file = Path(__file__).resolve().parents[2] / \"inputs\" /", "product_of_trees = np.prod(trees_hit_per_slope) # print the results for part 2 print() # print", "puzzle, traverse the map with a 3-1 slope and count trees # encountered", "map_x_coord % len(map_line) # module operater for rollover return map_line[offset]=='#' def traverse_map(map, x_step,", "\"map\" (array of strings) starting at the top left until reaching the bottom", "every iteration advances position by <x_step,y_step> and checks if a tree is hit", "advances position by <x_step,y_step> and checks if a tree is hit returns: the", "return trees_hit if __name__ == \"__main__\": # Load the puzzle import to a", "in slopes_to_test] product_of_trees = np.prod(trees_hit_per_slope) # print the results for part 2 print()", "position by <x_step,y_step> and checks if a tree is hit returns: the total", "j,step in enumerate(y_steps): trees_hit += 1 if tree_in_path(map[step],j*x_step) else 0 return trees_hit if", "spit out the total product slopes_to_test = [[1,1],[3,1],[5,1],[7,1],[1,2]] trees_hit_per_slope = [traverse_map(map,*slope) for slope", "the total product slopes_to_test = [[1,1],[3,1],[5,1],[7,1],[1,2]] trees_hit_per_slope = [traverse_map(map,*slope) for slope in slopes_to_test]", "{traverse_map(map,3,1)}\") # part two of the puzzle - try the 5 given slopes", "0 map_depth = len(map) y_steps = range(0,map_depth,y_step) for j,step in enumerate(y_steps): trees_hit +=", "at the top left until reaching the bottom of the map. every iteration", "[[1,1],[3,1],[5,1],[7,1],[1,2]] trees_hit_per_slope = [traverse_map(map,*slope) for slope in slopes_to_test] product_of_trees = np.prod(trees_hit_per_slope) # print", "Part one of the puzzle, traverse the map with a 3-1 slope and", "operater for rollover return map_line[offset]=='#' def traverse_map(map, x_step, y_step): \"\"\" iterates over a", "Checks if a tree is in the x-cord of the map line, looping", "= range(0,map_depth,y_step) for j,step in enumerate(y_steps): trees_hit += 1 if tree_in_path(map[step],j*x_step) else 0", "map input_file = Path(__file__).resolve().parents[2] / \"inputs\" / \"day3.txt\" with open(input_file) as fii: map", "part 2 print() # print a newline for slope,hit_count in zip(slopes_to_test,trees_hit_per_slope): print(f\"Slope of", "total product slopes_to_test = [[1,1],[3,1],[5,1],[7,1],[1,2]] trees_hit_per_slope = [traverse_map(map,*slope) for slope in slopes_to_test] product_of_trees", "def tree_in_path(map_line,map_x_coord): \"\"\" Checks if a tree is in the x-cord of the", "the results for part 2 print() # print a newline for slope,hit_count in", "% len(map_line) # module operater for rollover return map_line[offset]=='#' def traverse_map(map, x_step, y_step):", "Strip newline characters # Part one of the puzzle, traverse the map with", "two of the puzzle - try the 5 given slopes and spit out", "from pathlib import Path import numpy as np def tree_in_path(map_line,map_x_coord): \"\"\" Checks if", "by <x_step,y_step> and checks if a tree is hit returns: the total number", "tree_in_path(map[step],j*x_step) else 0 return trees_hit if __name__ == \"__main__\": # Load the puzzle", "int \"\"\" trees_hit = 0 map_depth = len(map) y_steps = range(0,map_depth,y_step) for j,step", "given slopes and spit out the total product slopes_to_test = [[1,1],[3,1],[5,1],[7,1],[1,2]] trees_hit_per_slope =", "until reaching the bottom of the map. every iteration advances position by <x_step,y_step>", "puzzle import to a map input_file = Path(__file__).resolve().parents[2] / \"inputs\" / \"day3.txt\" with", "np def tree_in_path(map_line,map_x_coord): \"\"\" Checks if a tree is in the x-cord of", "for line in fii] # Strip newline characters # Part one of the", "of the map. every iteration advances position by <x_step,y_step> and checks if a", "x-cord of the map line, looping if x is > len(map_line) returns: True", "print the results for part 2 print() # print a newline for slope,hit_count", "is in the path, False otherwise rtype: Bool \"\"\" offset = map_x_coord %", "pathlib import Path import numpy as np def tree_in_path(map_line,map_x_coord): \"\"\" Checks if a", "hit rtype: int \"\"\" trees_hit = 0 map_depth = len(map) y_steps = range(0,map_depth,y_step)", "for part 2 print() # print a newline for slope,hit_count in zip(slopes_to_test,trees_hit_per_slope): print(f\"Slope", "map line, looping if x is > len(map_line) returns: True if a tree", "reaching the bottom of the map. every iteration advances position by <x_step,y_step> and", "rollover return map_line[offset]=='#' def traverse_map(map, x_step, y_step): \"\"\" iterates over a \"map\" (array", "for j,step in enumerate(y_steps): trees_hit += 1 if tree_in_path(map[step],j*x_step) else 0 return trees_hit", "over a \"map\" (array of strings) starting at the top left until reaching", "the top left until reaching the bottom of the map. every iteration advances", "if x is > len(map_line) returns: True if a tree is in the", "len(map) y_steps = range(0,map_depth,y_step) for j,step in enumerate(y_steps): trees_hit += 1 if tree_in_path(map[step],j*x_step)", "slope,hit_count in zip(slopes_to_test,trees_hit_per_slope): print(f\"Slope of {slope} results in {hit_count} trees hit\") print(f\"Part Two", "as np def tree_in_path(map_line,map_x_coord): \"\"\" Checks if a tree is in the x-cord", "<x_step,y_step> and checks if a tree is hit returns: the total number of", "in zip(slopes_to_test,trees_hit_per_slope): print(f\"Slope of {slope} results in {hit_count} trees hit\") print(f\"Part Two Solution:", "tree is in the path, False otherwise rtype: Bool \"\"\" offset = map_x_coord", "open(input_file) as fii: map = [line.rstrip('\\n') for line in fii] # Strip newline", "= map_x_coord % len(map_line) # module operater for rollover return map_line[offset]=='#' def traverse_map(map,", "newline for slope,hit_count in zip(slopes_to_test,trees_hit_per_slope): print(f\"Slope of {slope} results in {hit_count} trees hit\")", "print() # print a newline for slope,hit_count in zip(slopes_to_test,trees_hit_per_slope): print(f\"Slope of {slope} results", "/ \"day3.txt\" with open(input_file) as fii: map = [line.rstrip('\\n') for line in fii]", "3-1 slope and count trees # encountered print(f\"Part One Solution: {traverse_map(map,3,1)}\") # part", "\"__main__\": # Load the puzzle import to a map input_file = Path(__file__).resolve().parents[2] /", "with open(input_file) as fii: map = [line.rstrip('\\n') for line in fii] # Strip", "for slope in slopes_to_test] product_of_trees = np.prod(trees_hit_per_slope) # print the results for part", "# print the results for part 2 print() # print a newline for", "Trees hit rtype: int \"\"\" trees_hit = 0 map_depth = len(map) y_steps =", "print(f\"Part One Solution: {traverse_map(map,3,1)}\") # part two of the puzzle - try the", "in enumerate(y_steps): trees_hit += 1 if tree_in_path(map[step],j*x_step) else 0 return trees_hit if __name__", "if tree_in_path(map[step],j*x_step) else 0 return trees_hit if __name__ == \"__main__\": # Load the", "import numpy as np def tree_in_path(map_line,map_x_coord): \"\"\" Checks if a tree is in", "tree is hit returns: the total number of Trees hit rtype: int \"\"\"", "__name__ == \"__main__\": # Load the puzzle import to a map input_file =", "# Strip newline characters # Part one of the puzzle, traverse the map", "> len(map_line) returns: True if a tree is in the path, False otherwise", "# encountered print(f\"Part One Solution: {traverse_map(map,3,1)}\") # part two of the puzzle -", "False otherwise rtype: Bool \"\"\" offset = map_x_coord % len(map_line) # module operater", "map with a 3-1 slope and count trees # encountered print(f\"Part One Solution:", "traverse_map(map, x_step, y_step): \"\"\" iterates over a \"map\" (array of strings) starting at", "starting at the top left until reaching the bottom of the map. every", "\"\"\" trees_hit = 0 map_depth = len(map) y_steps = range(0,map_depth,y_step) for j,step in", "checks if a tree is hit returns: the total number of Trees hit", "a tree is hit returns: the total number of Trees hit rtype: int", "\"day3.txt\" with open(input_file) as fii: map = [line.rstrip('\\n') for line in fii] #", "range(0,map_depth,y_step) for j,step in enumerate(y_steps): trees_hit += 1 if tree_in_path(map[step],j*x_step) else 0 return", "[line.rstrip('\\n') for line in fii] # Strip newline characters # Part one of", "characters # Part one of the puzzle, traverse the map with a 3-1", "is hit returns: the total number of Trees hit rtype: int \"\"\" trees_hit", "with a 3-1 slope and count trees # encountered print(f\"Part One Solution: {traverse_map(map,3,1)}\")", "a tree is in the x-cord of the map line, looping if x", "= 0 map_depth = len(map) y_steps = range(0,map_depth,y_step) for j,step in enumerate(y_steps): trees_hit", "the bottom of the map. every iteration advances position by <x_step,y_step> and checks", "trees_hit = 0 map_depth = len(map) y_steps = range(0,map_depth,y_step) for j,step in enumerate(y_steps):", "True if a tree is in the path, False otherwise rtype: Bool \"\"\"", "offset = map_x_coord % len(map_line) # module operater for rollover return map_line[offset]=='#' def" ]
[ ": '<EMAIL>',#你的微博帐号 password : '<PASSWORD>', #你的微博密码 'remember' : 'on', 'backURL' : 'https://weibo.cn/fishli28', #此处请填写微博地址", ": '<PASSWORD>', #你的微博密码 'remember' : 'on', 'backURL' : 'https://weibo.cn/fishli28', #此处请填写微博地址 'backTitle' : u'微博',", "'tryCount' : '', 'vk' : vk, 'capId':capId, 'code':code, 'submit' : u'登录' } newhtml", "vk = selector.xpath('//input[@name=\"vk\"]/@value')[0] action = selector.xpath('//form[@method=\"post\"]/@action')[0] imgsrc = selector.xpath('/html/body/div[2]/form/div/img[1]/@src')[0] index = imgsrc.find('cpt=') capId", "imgsrc ### 验证码 code = raw_input(\"plz input code:\") print action print password print", "password print vk new_url = url_login + action data = { 'mobile' :", "'submit' : u'登录' } newhtml = requests.post(new_url,data=data).content new_selector = etree.HTML(newhtml) content = new_selector.xpath('//span[@class=\"ctt\"]')", "= selector.xpath('//input[@name=\"vk\"]/@value')[0] action = selector.xpath('//form[@method=\"post\"]/@action')[0] imgsrc = selector.xpath('/html/body/div[2]/form/div/img[1]/@src')[0] index = imgsrc.find('cpt=') capId =", "'', 'vk' : vk, 'capId':capId, 'code':code, 'submit' : u'登录' } newhtml = requests.post(new_url,data=data).content", "vk, 'capId':capId, 'code':code, 'submit' : u'登录' } newhtml = requests.post(new_url,data=data).content new_selector = etree.HTML(newhtml)", ": 'https://weibo.cn/fishli28', #此处请填写微博地址 'backTitle' : u'微博', 'tryCount' : '', 'vk' : vk, 'capId':capId,", "etree.HTML(html) password = selector.xpath('//input[@type=\"password\"]/@name')[0] vk = selector.xpath('//input[@name=\"vk\"]/@value')[0] action = selector.xpath('//form[@method=\"post\"]/@action')[0] imgsrc = selector.xpath('/html/body/div[2]/form/div/img[1]/@src')[0]", "lxml import etree url = 'http://weibo.cn/fishli28' #此处请修改为微博地址 url_login = 'https://login.weibo.cn/login/' html = requests.get(url_login).content", "index = imgsrc.find('cpt=') capId = imgsrc[index + 4:] print imgsrc ### 验证码 code", "#coding=utf-8 import requests from lxml import etree url = 'http://weibo.cn/fishli28' #此处请修改为微博地址 url_login =", "+ 4:] print imgsrc ### 验证码 code = raw_input(\"plz input code:\") print action", "'capId':capId, 'code':code, 'submit' : u'登录' } newhtml = requests.post(new_url,data=data).content new_selector = etree.HTML(newhtml) content", "#此处请填写微博地址 'backTitle' : u'微博', 'tryCount' : '', 'vk' : vk, 'capId':capId, 'code':code, 'submit'", "requests.get(url_login).content selector = etree.HTML(html) password = selector.xpath('//input[@type=\"password\"]/@name')[0] vk = selector.xpath('//input[@name=\"vk\"]/@value')[0] action = selector.xpath('//form[@method=\"post\"]/@action')[0]", "etree.HTML(newhtml) content = new_selector.xpath('//span[@class=\"ctt\"]') for each in content: text = each.xpath('string(.)') print text", "requests.post(new_url,data=data).content new_selector = etree.HTML(newhtml) content = new_selector.xpath('//span[@class=\"ctt\"]') for each in content: text =", "password = selector.xpath('//input[@type=\"password\"]/@name')[0] vk = selector.xpath('//input[@name=\"vk\"]/@value')[0] action = selector.xpath('//form[@method=\"post\"]/@action')[0] imgsrc = selector.xpath('/html/body/div[2]/form/div/img[1]/@src')[0] index", ": u'微博', 'tryCount' : '', 'vk' : vk, 'capId':capId, 'code':code, 'submit' : u'登录'", "= requests.post(new_url,data=data).content new_selector = etree.HTML(newhtml) content = new_selector.xpath('//span[@class=\"ctt\"]') for each in content: text", "'<EMAIL>',#你的微博帐号 password : '<PASSWORD>', #你的微博密码 'remember' : 'on', 'backURL' : 'https://weibo.cn/fishli28', #此处请填写微博地址 'backTitle'", "'backURL' : 'https://weibo.cn/fishli28', #此处请填写微博地址 'backTitle' : u'微博', 'tryCount' : '', 'vk' : vk,", ": u'登录' } newhtml = requests.post(new_url,data=data).content new_selector = etree.HTML(newhtml) content = new_selector.xpath('//span[@class=\"ctt\"]') for", "html = requests.get(url_login).content selector = etree.HTML(html) password = selector.xpath('//input[@type=\"password\"]/@name')[0] vk = selector.xpath('//input[@name=\"vk\"]/@value')[0] action", "= imgsrc.find('cpt=') capId = imgsrc[index + 4:] print imgsrc ### 验证码 code =", "4:] print imgsrc ### 验证码 code = raw_input(\"plz input code:\") print action print", "newhtml = requests.post(new_url,data=data).content new_selector = etree.HTML(newhtml) content = new_selector.xpath('//span[@class=\"ctt\"]') for each in content:", "import requests from lxml import etree url = 'http://weibo.cn/fishli28' #此处请修改为微博地址 url_login = 'https://login.weibo.cn/login/'", "验证码 code = raw_input(\"plz input code:\") print action print password print vk new_url", "imgsrc.find('cpt=') capId = imgsrc[index + 4:] print imgsrc ### 验证码 code = raw_input(\"plz", "data = { 'mobile' : '<EMAIL>',#你的微博帐号 password : '<PASSWORD>', #你的微博密码 'remember' : 'on',", "new_selector = etree.HTML(newhtml) content = new_selector.xpath('//span[@class=\"ctt\"]') for each in content: text = each.xpath('string(.)')", "'remember' : 'on', 'backURL' : 'https://weibo.cn/fishli28', #此处请填写微博地址 'backTitle' : u'微博', 'tryCount' : '',", "= selector.xpath('/html/body/div[2]/form/div/img[1]/@src')[0] index = imgsrc.find('cpt=') capId = imgsrc[index + 4:] print imgsrc ###", "+ action data = { 'mobile' : '<EMAIL>',#你的微博帐号 password : '<PASSWORD>', #你的微博密码 'remember'", "print action print password print vk new_url = url_login + action data =", "action data = { 'mobile' : '<EMAIL>',#你的微博帐号 password : '<PASSWORD>', #你的微博密码 'remember' :", "= { 'mobile' : '<EMAIL>',#你的微博帐号 password : '<PASSWORD>', #你的微博密码 'remember' : 'on', 'backURL'", "action = selector.xpath('//form[@method=\"post\"]/@action')[0] imgsrc = selector.xpath('/html/body/div[2]/form/div/img[1]/@src')[0] index = imgsrc.find('cpt=') capId = imgsrc[index +", "imgsrc = selector.xpath('/html/body/div[2]/form/div/img[1]/@src')[0] index = imgsrc.find('cpt=') capId = imgsrc[index + 4:] print imgsrc", "selector.xpath('//input[@name=\"vk\"]/@value')[0] action = selector.xpath('//form[@method=\"post\"]/@action')[0] imgsrc = selector.xpath('/html/body/div[2]/form/div/img[1]/@src')[0] index = imgsrc.find('cpt=') capId = imgsrc[index", "'<PASSWORD>', #你的微博密码 'remember' : 'on', 'backURL' : 'https://weibo.cn/fishli28', #此处请填写微博地址 'backTitle' : u'微博', 'tryCount'", "selector.xpath('//input[@type=\"password\"]/@name')[0] vk = selector.xpath('//input[@name=\"vk\"]/@value')[0] action = selector.xpath('//form[@method=\"post\"]/@action')[0] imgsrc = selector.xpath('/html/body/div[2]/form/div/img[1]/@src')[0] index = imgsrc.find('cpt=')", "capId = imgsrc[index + 4:] print imgsrc ### 验证码 code = raw_input(\"plz input", "= 'http://weibo.cn/fishli28' #此处请修改为微博地址 url_login = 'https://login.weibo.cn/login/' html = requests.get(url_login).content selector = etree.HTML(html) password", "= selector.xpath('//form[@method=\"post\"]/@action')[0] imgsrc = selector.xpath('/html/body/div[2]/form/div/img[1]/@src')[0] index = imgsrc.find('cpt=') capId = imgsrc[index + 4:]", "= imgsrc[index + 4:] print imgsrc ### 验证码 code = raw_input(\"plz input code:\")", "= etree.HTML(newhtml) content = new_selector.xpath('//span[@class=\"ctt\"]') for each in content: text = each.xpath('string(.)') print", "print vk new_url = url_login + action data = { 'mobile' : '<EMAIL>',#你的微博帐号", "#此处请修改为微博地址 url_login = 'https://login.weibo.cn/login/' html = requests.get(url_login).content selector = etree.HTML(html) password = selector.xpath('//input[@type=\"password\"]/@name')[0]", "{ 'mobile' : '<EMAIL>',#你的微博帐号 password : '<PASSWORD>', #你的微博密码 'remember' : 'on', 'backURL' :", "vk new_url = url_login + action data = { 'mobile' : '<EMAIL>',#你的微博帐号 password", "= 'https://login.weibo.cn/login/' html = requests.get(url_login).content selector = etree.HTML(html) password = selector.xpath('//input[@type=\"password\"]/@name')[0] vk =", "url_login = 'https://login.weibo.cn/login/' html = requests.get(url_login).content selector = etree.HTML(html) password = selector.xpath('//input[@type=\"password\"]/@name')[0] vk", "= selector.xpath('//input[@type=\"password\"]/@name')[0] vk = selector.xpath('//input[@name=\"vk\"]/@value')[0] action = selector.xpath('//form[@method=\"post\"]/@action')[0] imgsrc = selector.xpath('/html/body/div[2]/form/div/img[1]/@src')[0] index =", "selector = etree.HTML(html) password = selector.xpath('//input[@type=\"password\"]/@name')[0] vk = selector.xpath('//input[@name=\"vk\"]/@value')[0] action = selector.xpath('//form[@method=\"post\"]/@action')[0] imgsrc", "code = raw_input(\"plz input code:\") print action print password print vk new_url =", "imgsrc[index + 4:] print imgsrc ### 验证码 code = raw_input(\"plz input code:\") print", "selector.xpath('//form[@method=\"post\"]/@action')[0] imgsrc = selector.xpath('/html/body/div[2]/form/div/img[1]/@src')[0] index = imgsrc.find('cpt=') capId = imgsrc[index + 4:] print", "} newhtml = requests.post(new_url,data=data).content new_selector = etree.HTML(newhtml) content = new_selector.xpath('//span[@class=\"ctt\"]') for each in", "action print password print vk new_url = url_login + action data = {", "#你的微博密码 'remember' : 'on', 'backURL' : 'https://weibo.cn/fishli28', #此处请填写微博地址 'backTitle' : u'微博', 'tryCount' :", "= requests.get(url_login).content selector = etree.HTML(html) password = selector.xpath('//input[@type=\"password\"]/@name')[0] vk = selector.xpath('//input[@name=\"vk\"]/@value')[0] action =", "print imgsrc ### 验证码 code = raw_input(\"plz input code:\") print action print password", "print password print vk new_url = url_login + action data = { 'mobile'", "'https://login.weibo.cn/login/' html = requests.get(url_login).content selector = etree.HTML(html) password = selector.xpath('//input[@type=\"password\"]/@name')[0] vk = selector.xpath('//input[@name=\"vk\"]/@value')[0]", "'https://weibo.cn/fishli28', #此处请填写微博地址 'backTitle' : u'微博', 'tryCount' : '', 'vk' : vk, 'capId':capId, 'code':code,", "'code':code, 'submit' : u'登录' } newhtml = requests.post(new_url,data=data).content new_selector = etree.HTML(newhtml) content =", "url = 'http://weibo.cn/fishli28' #此处请修改为微博地址 url_login = 'https://login.weibo.cn/login/' html = requests.get(url_login).content selector = etree.HTML(html)", "from lxml import etree url = 'http://weibo.cn/fishli28' #此处请修改为微博地址 url_login = 'https://login.weibo.cn/login/' html =", "= etree.HTML(html) password = selector.xpath('//input[@type=\"password\"]/@name')[0] vk = selector.xpath('//input[@name=\"vk\"]/@value')[0] action = selector.xpath('//form[@method=\"post\"]/@action')[0] imgsrc =", ": 'on', 'backURL' : 'https://weibo.cn/fishli28', #此处请填写微博地址 'backTitle' : u'微博', 'tryCount' : '', 'vk'", "etree url = 'http://weibo.cn/fishli28' #此处请修改为微博地址 url_login = 'https://login.weibo.cn/login/' html = requests.get(url_login).content selector =", "### 验证码 code = raw_input(\"plz input code:\") print action print password print vk", "selector.xpath('/html/body/div[2]/form/div/img[1]/@src')[0] index = imgsrc.find('cpt=') capId = imgsrc[index + 4:] print imgsrc ### 验证码", "code:\") print action print password print vk new_url = url_login + action data", "= raw_input(\"plz input code:\") print action print password print vk new_url = url_login", "import etree url = 'http://weibo.cn/fishli28' #此处请修改为微博地址 url_login = 'https://login.weibo.cn/login/' html = requests.get(url_login).content selector", "'backTitle' : u'微博', 'tryCount' : '', 'vk' : vk, 'capId':capId, 'code':code, 'submit' :", "password : '<PASSWORD>', #你的微博密码 'remember' : 'on', 'backURL' : 'https://weibo.cn/fishli28', #此处请填写微博地址 'backTitle' :", "input code:\") print action print password print vk new_url = url_login + action", "new_url = url_login + action data = { 'mobile' : '<EMAIL>',#你的微博帐号 password :", "u'微博', 'tryCount' : '', 'vk' : vk, 'capId':capId, 'code':code, 'submit' : u'登录' }", "'on', 'backURL' : 'https://weibo.cn/fishli28', #此处请填写微博地址 'backTitle' : u'微博', 'tryCount' : '', 'vk' :", ": '', 'vk' : vk, 'capId':capId, 'code':code, 'submit' : u'登录' } newhtml =", "'vk' : vk, 'capId':capId, 'code':code, 'submit' : u'登录' } newhtml = requests.post(new_url,data=data).content new_selector", "'mobile' : '<EMAIL>',#你的微博帐号 password : '<PASSWORD>', #你的微博密码 'remember' : 'on', 'backURL' : 'https://weibo.cn/fishli28',", ": vk, 'capId':capId, 'code':code, 'submit' : u'登录' } newhtml = requests.post(new_url,data=data).content new_selector =", "u'登录' } newhtml = requests.post(new_url,data=data).content new_selector = etree.HTML(newhtml) content = new_selector.xpath('//span[@class=\"ctt\"]') for each", "= url_login + action data = { 'mobile' : '<EMAIL>',#你的微博帐号 password : '<PASSWORD>',", "'http://weibo.cn/fishli28' #此处请修改为微博地址 url_login = 'https://login.weibo.cn/login/' html = requests.get(url_login).content selector = etree.HTML(html) password =", "raw_input(\"plz input code:\") print action print password print vk new_url = url_login +", "url_login + action data = { 'mobile' : '<EMAIL>',#你的微博帐号 password : '<PASSWORD>', #你的微博密码", "requests from lxml import etree url = 'http://weibo.cn/fishli28' #此处请修改为微博地址 url_login = 'https://login.weibo.cn/login/' html" ]
[ "plt.ylabel('Elevation [ft]') plt.xlabel('Distance [ft]') plt.grid() plt.legend(['filtered', 'unfiltered']) plt.title('Elevation vs. Distance') plt.show() else: plt.plot(df['cumulative_original_distance_ft'],", "df['elevation_ft_filtered'], df['cumulative_original_distance_ft'], df['elevation_ft']) plt.ylabel('Elevation [ft]') plt.xlabel('Distance [ft]') plt.grid() plt.legend(['filtered', 'unfiltered']) plt.title('Elevation vs. Distance')", "np import matplotlib.pyplot as plt def plot_data(df, general_filter, plot_param): if plot_param[0]: # visualization", "import matplotlib.pyplot as plt def plot_data(df, general_filter, plot_param): if plot_param[0]: # visualization of", "else: plt.plot(df['cumulative_original_distance_ft'], df['elevation_ft']) plt.ylabel('Elevation [ft]') plt.xlabel('Distance [ft]') plt.grid() plt.title('Elevation vs. Distance') plt.show() if", "plt.title('Grade vs. Distance') plt.show() else: plt.plot(df_filtered['cumulative_uniform_distance_ft'], df_filtered['grade_dec_unfiltered']) plt.ylabel('Grade]') plt.xlabel('Distance [ft]') plt.grid() plt.title('Grade vs.", "plt.xlabel('Distance [ft]') plt.grid() plt.title('Elevation vs. Distance') plt.show() if plot_param[1]: # visulalization of grade", "plt.title('Elevation vs. Distance') plt.show() else: plt.plot(df['cumulative_original_distance_ft'], df['elevation_ft']) plt.ylabel('Elevation [ft]') plt.xlabel('Distance [ft]') plt.grid() plt.title('Elevation", "plt.title('Elevation vs. Distance') plt.show() if plot_param[1]: # visulalization of grade data if general_filter:", "data if general_filter: plt.plot(df['cumulative_uniform_distance_ft'], df['elevation_ft_filtered'], df['cumulative_original_distance_ft'], df['elevation_ft']) plt.ylabel('Elevation [ft]') plt.xlabel('Distance [ft]') plt.grid() plt.legend(['filtered',", "plt.show() if plot_param[1]: # visulalization of grade data if general_filter: plt.plot(df['cumulative_uniform_distance_ft'], df['grade_dec_filtered'], df['cumulative_original_distance_ft'],", "visualization of elevation data if general_filter: plt.plot(df['cumulative_uniform_distance_ft'], df['elevation_ft_filtered'], df['cumulative_original_distance_ft'], df['elevation_ft']) plt.ylabel('Elevation [ft]') plt.xlabel('Distance", "elevation data if general_filter: plt.plot(df['cumulative_uniform_distance_ft'], df['elevation_ft_filtered'], df['cumulative_original_distance_ft'], df['elevation_ft']) plt.ylabel('Elevation [ft]') plt.xlabel('Distance [ft]') plt.grid()", "df['elevation_ft']) plt.ylabel('Elevation [ft]') plt.xlabel('Distance [ft]') plt.grid() plt.legend(['filtered', 'unfiltered']) plt.title('Elevation vs. Distance') plt.show() else:", "matplotlib.pyplot as plt def plot_data(df, general_filter, plot_param): if plot_param[0]: # visualization of elevation", "plt def plot_data(df, general_filter, plot_param): if plot_param[0]: # visualization of elevation data if", "df['cumulative_original_distance_ft'], df['elevation_ft']) plt.ylabel('Elevation [ft]') plt.xlabel('Distance [ft]') plt.grid() plt.legend(['filtered', 'unfiltered']) plt.title('Elevation vs. Distance') plt.show()", "plt.ylabel('Elevation [ft]') plt.xlabel('Distance [ft]') plt.grid() plt.title('Elevation vs. Distance') plt.show() if plot_param[1]: # visulalization", "plot_param[1]: # visulalization of grade data if general_filter: plt.plot(df['cumulative_uniform_distance_ft'], df['grade_dec_filtered'], df['cumulative_original_distance_ft'], df['grade_dec_unfiltered']) plt.ylabel('Grade]')", "general_filter, plot_param): if plot_param[0]: # visualization of elevation data if general_filter: plt.plot(df['cumulative_uniform_distance_ft'], df['elevation_ft_filtered'],", "grade data if general_filter: plt.plot(df['cumulative_uniform_distance_ft'], df['grade_dec_filtered'], df['cumulative_original_distance_ft'], df['grade_dec_unfiltered']) plt.ylabel('Grade]') plt.xlabel('Distance [ft]') plt.grid() plt.legend(['filtered',", "Distance') plt.show() else: plt.plot(df['cumulative_original_distance_ft'], df['elevation_ft']) plt.ylabel('Elevation [ft]') plt.xlabel('Distance [ft]') plt.grid() plt.title('Elevation vs. Distance')", "plt.plot(df['cumulative_uniform_distance_ft'], df['elevation_ft_filtered'], df['cumulative_original_distance_ft'], df['elevation_ft']) plt.ylabel('Elevation [ft]') plt.xlabel('Distance [ft]') plt.grid() plt.legend(['filtered', 'unfiltered']) plt.title('Elevation vs.", "[ft]') plt.grid() plt.title('Elevation vs. Distance') plt.show() if plot_param[1]: # visulalization of grade data", "if plot_param[0]: # visualization of elevation data if general_filter: plt.plot(df['cumulative_uniform_distance_ft'], df['elevation_ft_filtered'], df['cumulative_original_distance_ft'], df['elevation_ft'])", "plt.grid() plt.title('Elevation vs. Distance') plt.show() if plot_param[1]: # visulalization of grade data if", "numpy as np import matplotlib.pyplot as plt def plot_data(df, general_filter, plot_param): if plot_param[0]:", "<reponame>NREL/gradeit<gh_stars>0 import numpy as np import matplotlib.pyplot as plt def plot_data(df, general_filter, plot_param):", "df['grade_dec_filtered'], df['cumulative_original_distance_ft'], df['grade_dec_unfiltered']) plt.ylabel('Grade]') plt.xlabel('Distance [ft]') plt.grid() plt.legend(['filtered', 'unfiltered']) plt.title('Grade vs. Distance') plt.show()", "df['grade_dec_unfiltered']) plt.ylabel('Grade]') plt.xlabel('Distance [ft]') plt.grid() plt.legend(['filtered', 'unfiltered']) plt.title('Grade vs. Distance') plt.show() else: plt.plot(df_filtered['cumulative_uniform_distance_ft'],", "plt.xlabel('Distance [ft]') plt.grid() plt.legend(['filtered', 'unfiltered']) plt.title('Elevation vs. Distance') plt.show() else: plt.plot(df['cumulative_original_distance_ft'], df['elevation_ft']) plt.ylabel('Elevation", "'unfiltered']) plt.title('Elevation vs. Distance') plt.show() else: plt.plot(df['cumulative_original_distance_ft'], df['elevation_ft']) plt.ylabel('Elevation [ft]') plt.xlabel('Distance [ft]') plt.grid()", "'unfiltered']) plt.title('Grade vs. Distance') plt.show() else: plt.plot(df_filtered['cumulative_uniform_distance_ft'], df_filtered['grade_dec_unfiltered']) plt.ylabel('Grade]') plt.xlabel('Distance [ft]') plt.grid() plt.title('Grade", "plt.legend(['filtered', 'unfiltered']) plt.title('Grade vs. Distance') plt.show() else: plt.plot(df_filtered['cumulative_uniform_distance_ft'], df_filtered['grade_dec_unfiltered']) plt.ylabel('Grade]') plt.xlabel('Distance [ft]') plt.grid()", "plt.xlabel('Distance [ft]') plt.grid() plt.title('Grade vs. Distance') plt.show() if not plot_param[0] and not plot_param[0]:", "Distance') plt.show() if plot_param[1]: # visulalization of grade data if general_filter: plt.plot(df['cumulative_uniform_distance_ft'], df['grade_dec_filtered'],", "plot_data(df, general_filter, plot_param): if plot_param[0]: # visualization of elevation data if general_filter: plt.plot(df['cumulative_uniform_distance_ft'],", "if general_filter: plt.plot(df['cumulative_uniform_distance_ft'], df['elevation_ft_filtered'], df['cumulative_original_distance_ft'], df['elevation_ft']) plt.ylabel('Elevation [ft]') plt.xlabel('Distance [ft]') plt.grid() plt.legend(['filtered', 'unfiltered'])", "visulalization of grade data if general_filter: plt.plot(df['cumulative_uniform_distance_ft'], df['grade_dec_filtered'], df['cumulative_original_distance_ft'], df['grade_dec_unfiltered']) plt.ylabel('Grade]') plt.xlabel('Distance [ft]')", "plt.show() else: plt.plot(df_filtered['cumulative_uniform_distance_ft'], df_filtered['grade_dec_unfiltered']) plt.ylabel('Grade]') plt.xlabel('Distance [ft]') plt.grid() plt.title('Grade vs. Distance') plt.show() if", "plt.plot(df['cumulative_original_distance_ft'], df['elevation_ft']) plt.ylabel('Elevation [ft]') plt.xlabel('Distance [ft]') plt.grid() plt.title('Elevation vs. Distance') plt.show() if plot_param[1]:", "[ft]') plt.grid() plt.legend(['filtered', 'unfiltered']) plt.title('Elevation vs. Distance') plt.show() else: plt.plot(df['cumulative_original_distance_ft'], df['elevation_ft']) plt.ylabel('Elevation [ft]')", "[ft]') plt.grid() plt.title('Grade vs. Distance') plt.show() if not plot_param[0] and not plot_param[0]: print('No", "general_filter: plt.plot(df['cumulative_uniform_distance_ft'], df['grade_dec_filtered'], df['cumulative_original_distance_ft'], df['grade_dec_unfiltered']) plt.ylabel('Grade]') plt.xlabel('Distance [ft]') plt.grid() plt.legend(['filtered', 'unfiltered']) plt.title('Grade vs.", "[ft]') plt.xlabel('Distance [ft]') plt.grid() plt.legend(['filtered', 'unfiltered']) plt.title('Elevation vs. Distance') plt.show() else: plt.plot(df['cumulative_original_distance_ft'], df['elevation_ft'])", "vs. Distance') plt.show() else: plt.plot(df['cumulative_original_distance_ft'], df['elevation_ft']) plt.ylabel('Elevation [ft]') plt.xlabel('Distance [ft]') plt.grid() plt.title('Elevation vs.", "plt.plot(df_filtered['cumulative_uniform_distance_ft'], df_filtered['grade_dec_unfiltered']) plt.ylabel('Grade]') plt.xlabel('Distance [ft]') plt.grid() plt.title('Grade vs. Distance') plt.show() if not plot_param[0]", "plt.title('Grade vs. Distance') plt.show() if not plot_param[0] and not plot_param[0]: print('No visualization selected.')", "plt.show() else: plt.plot(df['cumulative_original_distance_ft'], df['elevation_ft']) plt.ylabel('Elevation [ft]') plt.xlabel('Distance [ft]') plt.grid() plt.title('Elevation vs. Distance') plt.show()", "else: plt.plot(df_filtered['cumulative_uniform_distance_ft'], df_filtered['grade_dec_unfiltered']) plt.ylabel('Grade]') plt.xlabel('Distance [ft]') plt.grid() plt.title('Grade vs. Distance') plt.show() if not", "plot_param[0]: # visualization of elevation data if general_filter: plt.plot(df['cumulative_uniform_distance_ft'], df['elevation_ft_filtered'], df['cumulative_original_distance_ft'], df['elevation_ft']) plt.ylabel('Elevation", "vs. Distance') plt.show() if plot_param[1]: # visulalization of grade data if general_filter: plt.plot(df['cumulative_uniform_distance_ft'],", "df['cumulative_original_distance_ft'], df['grade_dec_unfiltered']) plt.ylabel('Grade]') plt.xlabel('Distance [ft]') plt.grid() plt.legend(['filtered', 'unfiltered']) plt.title('Grade vs. Distance') plt.show() else:", "plt.grid() plt.legend(['filtered', 'unfiltered']) plt.title('Elevation vs. Distance') plt.show() else: plt.plot(df['cumulative_original_distance_ft'], df['elevation_ft']) plt.ylabel('Elevation [ft]') plt.xlabel('Distance", "plt.xlabel('Distance [ft]') plt.grid() plt.legend(['filtered', 'unfiltered']) plt.title('Grade vs. Distance') plt.show() else: plt.plot(df_filtered['cumulative_uniform_distance_ft'], df_filtered['grade_dec_unfiltered']) plt.ylabel('Grade]')", "if plot_param[1]: # visulalization of grade data if general_filter: plt.plot(df['cumulative_uniform_distance_ft'], df['grade_dec_filtered'], df['cumulative_original_distance_ft'], df['grade_dec_unfiltered'])", "df['elevation_ft']) plt.ylabel('Elevation [ft]') plt.xlabel('Distance [ft]') plt.grid() plt.title('Elevation vs. Distance') plt.show() if plot_param[1]: #", "plt.ylabel('Grade]') plt.xlabel('Distance [ft]') plt.grid() plt.title('Grade vs. Distance') plt.show() if not plot_param[0] and not", "data if general_filter: plt.plot(df['cumulative_uniform_distance_ft'], df['grade_dec_filtered'], df['cumulative_original_distance_ft'], df['grade_dec_unfiltered']) plt.ylabel('Grade]') plt.xlabel('Distance [ft]') plt.grid() plt.legend(['filtered', 'unfiltered'])", "of elevation data if general_filter: plt.plot(df['cumulative_uniform_distance_ft'], df['elevation_ft_filtered'], df['cumulative_original_distance_ft'], df['elevation_ft']) plt.ylabel('Elevation [ft]') plt.xlabel('Distance [ft]')", "if general_filter: plt.plot(df['cumulative_uniform_distance_ft'], df['grade_dec_filtered'], df['cumulative_original_distance_ft'], df['grade_dec_unfiltered']) plt.ylabel('Grade]') plt.xlabel('Distance [ft]') plt.grid() plt.legend(['filtered', 'unfiltered']) plt.title('Grade", "[ft]') plt.xlabel('Distance [ft]') plt.grid() plt.title('Elevation vs. Distance') plt.show() if plot_param[1]: # visulalization of", "general_filter: plt.plot(df['cumulative_uniform_distance_ft'], df['elevation_ft_filtered'], df['cumulative_original_distance_ft'], df['elevation_ft']) plt.ylabel('Elevation [ft]') plt.xlabel('Distance [ft]') plt.grid() plt.legend(['filtered', 'unfiltered']) plt.title('Elevation", "df_filtered['grade_dec_unfiltered']) plt.ylabel('Grade]') plt.xlabel('Distance [ft]') plt.grid() plt.title('Grade vs. Distance') plt.show() if not plot_param[0] and", "plt.legend(['filtered', 'unfiltered']) plt.title('Elevation vs. Distance') plt.show() else: plt.plot(df['cumulative_original_distance_ft'], df['elevation_ft']) plt.ylabel('Elevation [ft]') plt.xlabel('Distance [ft]')", "plt.ylabel('Grade]') plt.xlabel('Distance [ft]') plt.grid() plt.legend(['filtered', 'unfiltered']) plt.title('Grade vs. Distance') plt.show() else: plt.plot(df_filtered['cumulative_uniform_distance_ft'], df_filtered['grade_dec_unfiltered'])", "plt.plot(df['cumulative_uniform_distance_ft'], df['grade_dec_filtered'], df['cumulative_original_distance_ft'], df['grade_dec_unfiltered']) plt.ylabel('Grade]') plt.xlabel('Distance [ft]') plt.grid() plt.legend(['filtered', 'unfiltered']) plt.title('Grade vs. Distance')", "Distance') plt.show() else: plt.plot(df_filtered['cumulative_uniform_distance_ft'], df_filtered['grade_dec_unfiltered']) plt.ylabel('Grade]') plt.xlabel('Distance [ft]') plt.grid() plt.title('Grade vs. Distance') plt.show()", "as plt def plot_data(df, general_filter, plot_param): if plot_param[0]: # visualization of elevation data", "def plot_data(df, general_filter, plot_param): if plot_param[0]: # visualization of elevation data if general_filter:", "plot_param): if plot_param[0]: # visualization of elevation data if general_filter: plt.plot(df['cumulative_uniform_distance_ft'], df['elevation_ft_filtered'], df['cumulative_original_distance_ft'],", "plt.grid() plt.title('Grade vs. Distance') plt.show() if not plot_param[0] and not plot_param[0]: print('No visualization", "[ft]') plt.grid() plt.legend(['filtered', 'unfiltered']) plt.title('Grade vs. Distance') plt.show() else: plt.plot(df_filtered['cumulative_uniform_distance_ft'], df_filtered['grade_dec_unfiltered']) plt.ylabel('Grade]') plt.xlabel('Distance", "# visualization of elevation data if general_filter: plt.plot(df['cumulative_uniform_distance_ft'], df['elevation_ft_filtered'], df['cumulative_original_distance_ft'], df['elevation_ft']) plt.ylabel('Elevation [ft]')", "# visulalization of grade data if general_filter: plt.plot(df['cumulative_uniform_distance_ft'], df['grade_dec_filtered'], df['cumulative_original_distance_ft'], df['grade_dec_unfiltered']) plt.ylabel('Grade]') plt.xlabel('Distance", "plt.grid() plt.legend(['filtered', 'unfiltered']) plt.title('Grade vs. Distance') plt.show() else: plt.plot(df_filtered['cumulative_uniform_distance_ft'], df_filtered['grade_dec_unfiltered']) plt.ylabel('Grade]') plt.xlabel('Distance [ft]')", "as np import matplotlib.pyplot as plt def plot_data(df, general_filter, plot_param): if plot_param[0]: #", "vs. Distance') plt.show() else: plt.plot(df_filtered['cumulative_uniform_distance_ft'], df_filtered['grade_dec_unfiltered']) plt.ylabel('Grade]') plt.xlabel('Distance [ft]') plt.grid() plt.title('Grade vs. Distance')", "of grade data if general_filter: plt.plot(df['cumulative_uniform_distance_ft'], df['grade_dec_filtered'], df['cumulative_original_distance_ft'], df['grade_dec_unfiltered']) plt.ylabel('Grade]') plt.xlabel('Distance [ft]') plt.grid()", "import numpy as np import matplotlib.pyplot as plt def plot_data(df, general_filter, plot_param): if" ]
[ "github_tags = github.fetch_github_available_docker_versions() github_tags = [t.replace('v', '').replace('.m1', '') for t in github_tags] #", "tags, the github tags contains gitlab version for gitlab_version in gitlab_versions: if gitlab_version", "match the gitlab version which not inside GitHub tags, the github tags contains", "gitlab.fetch_gitlab_map_versions() github_tags = github.fetch_github_available_docker_versions() github_tags = [t.replace('v', '').replace('.m1', '') for t in github_tags]", "github_tags] # match the gitlab version which not inside GitHub tags, the github", "github.fetch_github_available_docker_versions() github_tags = [t.replace('v', '').replace('.m1', '') for t in github_tags] # match the", "inside GitHub tags, the github tags contains gitlab version for gitlab_version in gitlab_versions:", "# match the gitlab version which not inside GitHub tags, the github tags", "the gitlab version which not inside GitHub tags, the github tags contains gitlab", "github = Github() gitlab = Gitlab() new_tags = list() execute_now('git fetch --all') gitlab_versions", "and int(gitlab_version.split('.')[0]) > 12: new_tags.append(gitlab_version) for tag in new_tags: github.create_new_branch(tag) if __name__ ==", "import Gitlab from utils.shell_executor.executor import execute_now def main(): github = Github() gitlab =", "utils.shell_executor.executor import execute_now def main(): github = Github() gitlab = Gitlab() new_tags =", "github_tags = [t.replace('v', '').replace('.m1', '') for t in github_tags] # match the gitlab", "[t.replace('v', '').replace('.m1', '') for t in github_tags] # match the gitlab version which", "= gitlab.fetch_gitlab_map_versions() github_tags = github.fetch_github_available_docker_versions() github_tags = [t.replace('v', '').replace('.m1', '') for t in", "from selenium_controller.gitlab import Gitlab from utils.shell_executor.executor import execute_now def main(): github = Github()", "'').replace('.m1', '') for t in github_tags] # match the gitlab version which not", "t in github_tags] # match the gitlab version which not inside GitHub tags,", "in github_tags] # match the gitlab version which not inside GitHub tags, the", "for t in github_tags] # match the gitlab version which not inside GitHub", "tags contains gitlab version for gitlab_version in gitlab_versions: if gitlab_version not in github_tags", "int(gitlab_version.split('.')[0]) > 12: new_tags.append(gitlab_version) for tag in new_tags: github.create_new_branch(tag) if __name__ == \"__main__\":", "in gitlab_versions: if gitlab_version not in github_tags and int(gitlab_version.split('.')[0]) > 12: new_tags.append(gitlab_version) for", "new_tags = list() execute_now('git fetch --all') gitlab_versions = gitlab.fetch_gitlab_map_versions() github_tags = github.fetch_github_available_docker_versions() github_tags", "Github() gitlab = Gitlab() new_tags = list() execute_now('git fetch --all') gitlab_versions = gitlab.fetch_gitlab_map_versions()", "version which not inside GitHub tags, the github tags contains gitlab version for", "github tags contains gitlab version for gitlab_version in gitlab_versions: if gitlab_version not in", "selenium_controller.github import Github from selenium_controller.gitlab import Gitlab from utils.shell_executor.executor import execute_now def main():", "fetch --all') gitlab_versions = gitlab.fetch_gitlab_map_versions() github_tags = github.fetch_github_available_docker_versions() github_tags = [t.replace('v', '').replace('.m1', '')", "not inside GitHub tags, the github tags contains gitlab version for gitlab_version in", "version for gitlab_version in gitlab_versions: if gitlab_version not in github_tags and int(gitlab_version.split('.')[0]) >", "in github_tags and int(gitlab_version.split('.')[0]) > 12: new_tags.append(gitlab_version) for tag in new_tags: github.create_new_branch(tag) if", "selenium_controller.gitlab import Gitlab from utils.shell_executor.executor import execute_now def main(): github = Github() gitlab", "the github tags contains gitlab version for gitlab_version in gitlab_versions: if gitlab_version not", "execute_now('git fetch --all') gitlab_versions = gitlab.fetch_gitlab_map_versions() github_tags = github.fetch_github_available_docker_versions() github_tags = [t.replace('v', '').replace('.m1',", "gitlab_versions: if gitlab_version not in github_tags and int(gitlab_version.split('.')[0]) > 12: new_tags.append(gitlab_version) for tag", "Gitlab from utils.shell_executor.executor import execute_now def main(): github = Github() gitlab = Gitlab()", "not in github_tags and int(gitlab_version.split('.')[0]) > 12: new_tags.append(gitlab_version) for tag in new_tags: github.create_new_branch(tag)", "> 12: new_tags.append(gitlab_version) for tag in new_tags: github.create_new_branch(tag) if __name__ == \"__main__\": main()", "Gitlab() new_tags = list() execute_now('git fetch --all') gitlab_versions = gitlab.fetch_gitlab_map_versions() github_tags = github.fetch_github_available_docker_versions()", "GitHub tags, the github tags contains gitlab version for gitlab_version in gitlab_versions: if", "list() execute_now('git fetch --all') gitlab_versions = gitlab.fetch_gitlab_map_versions() github_tags = github.fetch_github_available_docker_versions() github_tags = [t.replace('v',", "gitlab_versions = gitlab.fetch_gitlab_map_versions() github_tags = github.fetch_github_available_docker_versions() github_tags = [t.replace('v', '').replace('.m1', '') for t", "for gitlab_version in gitlab_versions: if gitlab_version not in github_tags and int(gitlab_version.split('.')[0]) > 12:", "gitlab = Gitlab() new_tags = list() execute_now('git fetch --all') gitlab_versions = gitlab.fetch_gitlab_map_versions() github_tags", "= Github() gitlab = Gitlab() new_tags = list() execute_now('git fetch --all') gitlab_versions =", "--all') gitlab_versions = gitlab.fetch_gitlab_map_versions() github_tags = github.fetch_github_available_docker_versions() github_tags = [t.replace('v', '').replace('.m1', '') for", "gitlab_version not in github_tags and int(gitlab_version.split('.')[0]) > 12: new_tags.append(gitlab_version) for tag in new_tags:", "def main(): github = Github() gitlab = Gitlab() new_tags = list() execute_now('git fetch", "= github.fetch_github_available_docker_versions() github_tags = [t.replace('v', '').replace('.m1', '') for t in github_tags] # match", "gitlab version which not inside GitHub tags, the github tags contains gitlab version", "gitlab version for gitlab_version in gitlab_versions: if gitlab_version not in github_tags and int(gitlab_version.split('.')[0])", "import Github from selenium_controller.gitlab import Gitlab from utils.shell_executor.executor import execute_now def main(): github", "main(): github = Github() gitlab = Gitlab() new_tags = list() execute_now('git fetch --all')", "github_tags and int(gitlab_version.split('.')[0]) > 12: new_tags.append(gitlab_version) for tag in new_tags: github.create_new_branch(tag) if __name__", "= list() execute_now('git fetch --all') gitlab_versions = gitlab.fetch_gitlab_map_versions() github_tags = github.fetch_github_available_docker_versions() github_tags =", "which not inside GitHub tags, the github tags contains gitlab version for gitlab_version", "contains gitlab version for gitlab_version in gitlab_versions: if gitlab_version not in github_tags and", "Github from selenium_controller.gitlab import Gitlab from utils.shell_executor.executor import execute_now def main(): github =", "from utils.shell_executor.executor import execute_now def main(): github = Github() gitlab = Gitlab() new_tags", "= Gitlab() new_tags = list() execute_now('git fetch --all') gitlab_versions = gitlab.fetch_gitlab_map_versions() github_tags =", "'') for t in github_tags] # match the gitlab version which not inside", "if gitlab_version not in github_tags and int(gitlab_version.split('.')[0]) > 12: new_tags.append(gitlab_version) for tag in", "= [t.replace('v', '').replace('.m1', '') for t in github_tags] # match the gitlab version", "from selenium_controller.github import Github from selenium_controller.gitlab import Gitlab from utils.shell_executor.executor import execute_now def", "gitlab_version in gitlab_versions: if gitlab_version not in github_tags and int(gitlab_version.split('.')[0]) > 12: new_tags.append(gitlab_version)", "execute_now def main(): github = Github() gitlab = Gitlab() new_tags = list() execute_now('git", "import execute_now def main(): github = Github() gitlab = Gitlab() new_tags = list()" ]
[ "\"key\": \"key1\", \"value\": \"value1\" }, { \"key\": \"key2\", \"value\": \"value3\" } ] }", "username=username, password=password) def create_server_tags(server_id): data = { \"tags\": [ { \"key\": \"key1\", \"value\":", "tags: print(tag.key, tag.value) def get_project_tags(): tags = conn.ecs.get_project_tags() for tag in tags: print(tag.key,", "tags: print(tag.key, tag.values) if __name__ == \"__main__\": server_id = \"b0a9d2b4-2cae-4b66-a6ba-6af70f3bd7f8\" create_server_tags(server_id) get_server_tags(server_id) delete_server_tags(server_id)", "}, { \"key\": \"key2\", \"value\": \"value3\" } ] } conn.ecs.create_server_tags(server_id, **data) def delete_server_tags(server_id):", "tags = conn.ecs.get_project_tags() for tag in tags: print(tag.key, tag.values) if __name__ == \"__main__\":", "# create connection username = \"xxxxxx\" password = \"<PASSWORD>\" projectId = \"xxxxxxxxxxxxxxxxxxxxxxxxxxxx\" #", "= { \"tags\": [ { \"key\": \"key1\", \"value\": \"value1\" } ] } conn.ecs.delete_server_tags(server_id,", "[ { \"key\": \"key1\", \"value\": \"value1\" } ] } conn.ecs.delete_server_tags(server_id, **data) def get_server_tags(server_id):", "\"xxxxxxxxxxxxxxxxxxxxxxxxxxxx\" # tenant ID userDomainId = \"xxxxxxxxxxxxxxxxxxxxxxxxxxxx\" # user account ID auth_url =", "-*-coding:utf-8 -*- from openstack import connection # create connection username = \"xxxxxx\" password", "\"xxxxxxxxxxxxxxxxxxxxxxxxxxxx\" # endpoint url conn = connection.Connection(auth_url=auth_url, user_domain_id=userDomainId, project_id=projectId, username=username, password=password) def create_server_tags(server_id):", "= \"xxxxxxxxxxxxxxxxxxxxxxxxxxxx\" # endpoint url conn = connection.Connection(auth_url=auth_url, user_domain_id=userDomainId, project_id=projectId, username=username, password=password) def", "connection.Connection(auth_url=auth_url, user_domain_id=userDomainId, project_id=projectId, username=username, password=password) def create_server_tags(server_id): data = { \"tags\": [ {", "\"value1\" } ] } conn.ecs.delete_server_tags(server_id, **data) def get_server_tags(server_id): tags = conn.ecs.get_server_tags(server_id) for tag", "} conn.ecs.create_server_tags(server_id, **data) def delete_server_tags(server_id): data = { \"tags\": [ { \"key\": \"key1\",", "\"xxxxxxxxxxxxxxxxxxxxxxxxxxxx\" # user account ID auth_url = \"xxxxxxxxxxxxxxxxxxxxxxxxxxxx\" # endpoint url conn =", "= \"xxxxxxxxxxxxxxxxxxxxxxxxxxxx\" # user account ID auth_url = \"xxxxxxxxxxxxxxxxxxxxxxxxxxxx\" # endpoint url conn", "print(tag.key, tag.value) def get_project_tags(): tags = conn.ecs.get_project_tags() for tag in tags: print(tag.key, tag.values)", "\"value1\" }, { \"key\": \"key2\", \"value\": \"value3\" } ] } conn.ecs.create_server_tags(server_id, **data) def", "project_id=projectId, username=username, password=password) def create_server_tags(server_id): data = { \"tags\": [ { \"key\": \"key1\",", "\"value\": \"value1\" }, { \"key\": \"key2\", \"value\": \"value3\" } ] } conn.ecs.create_server_tags(server_id, **data)", "\"key\": \"key2\", \"value\": \"value3\" } ] } conn.ecs.create_server_tags(server_id, **data) def delete_server_tags(server_id): data =", "tag in tags: print(tag.key, tag.values) if __name__ == \"__main__\": server_id = \"b0a9d2b4-2cae-4b66-a6ba-6af70f3bd7f8\" create_server_tags(server_id)", "= conn.ecs.get_server_tags(server_id) for tag in tags: print(tag.key, tag.value) def get_project_tags(): tags = conn.ecs.get_project_tags()", "= \"xxxxxxxxxxxxxxxxxxxxxxxxxxxx\" # tenant ID userDomainId = \"xxxxxxxxxxxxxxxxxxxxxxxxxxxx\" # user account ID auth_url", "user_domain_id=userDomainId, project_id=projectId, username=username, password=password) def create_server_tags(server_id): data = { \"tags\": [ { \"key\":", "\"key1\", \"value\": \"value1\" }, { \"key\": \"key2\", \"value\": \"value3\" } ] } conn.ecs.create_server_tags(server_id,", "create connection username = \"xxxxxx\" password = \"<PASSWORD>\" projectId = \"xxxxxxxxxxxxxxxxxxxxxxxxxxxx\" # tenant", "userDomainId = \"xxxxxxxxxxxxxxxxxxxxxxxxxxxx\" # user account ID auth_url = \"xxxxxxxxxxxxxxxxxxxxxxxxxxxx\" # endpoint url", "= conn.ecs.get_project_tags() for tag in tags: print(tag.key, tag.values) if __name__ == \"__main__\": server_id", "[ { \"key\": \"key1\", \"value\": \"value1\" }, { \"key\": \"key2\", \"value\": \"value3\" }", "conn.ecs.get_project_tags() for tag in tags: print(tag.key, tag.values) if __name__ == \"__main__\": server_id =", "**data) def delete_server_tags(server_id): data = { \"tags\": [ { \"key\": \"key1\", \"value\": \"value1\"", "\"tags\": [ { \"key\": \"key1\", \"value\": \"value1\" }, { \"key\": \"key2\", \"value\": \"value3\"", "= \"<PASSWORD>\" projectId = \"xxxxxxxxxxxxxxxxxxxxxxxxxxxx\" # tenant ID userDomainId = \"xxxxxxxxxxxxxxxxxxxxxxxxxxxx\" # user", "\"key\": \"key1\", \"value\": \"value1\" } ] } conn.ecs.delete_server_tags(server_id, **data) def get_server_tags(server_id): tags =", "\"value3\" } ] } conn.ecs.create_server_tags(server_id, **data) def delete_server_tags(server_id): data = { \"tags\": [", "{ \"key\": \"key1\", \"value\": \"value1\" }, { \"key\": \"key2\", \"value\": \"value3\" } ]", "\"key1\", \"value\": \"value1\" } ] } conn.ecs.delete_server_tags(server_id, **data) def get_server_tags(server_id): tags = conn.ecs.get_server_tags(server_id)", "projectId = \"xxxxxxxxxxxxxxxxxxxxxxxxxxxx\" # tenant ID userDomainId = \"xxxxxxxxxxxxxxxxxxxxxxxxxxxx\" # user account ID", "url conn = connection.Connection(auth_url=auth_url, user_domain_id=userDomainId, project_id=projectId, username=username, password=password) def create_server_tags(server_id): data = {", "password=password) def create_server_tags(server_id): data = { \"tags\": [ { \"key\": \"key1\", \"value\": \"value1\"", "\"tags\": [ { \"key\": \"key1\", \"value\": \"value1\" } ] } conn.ecs.delete_server_tags(server_id, **data) def", "password = \"<PASSWORD>\" projectId = \"xxxxxxxxxxxxxxxxxxxxxxxxxxxx\" # tenant ID userDomainId = \"xxxxxxxxxxxxxxxxxxxxxxxxxxxx\" #", "data = { \"tags\": [ { \"key\": \"key1\", \"value\": \"value1\" } ] }", "{ \"key\": \"key2\", \"value\": \"value3\" } ] } conn.ecs.create_server_tags(server_id, **data) def delete_server_tags(server_id): data", "def get_project_tags(): tags = conn.ecs.get_project_tags() for tag in tags: print(tag.key, tag.values) if __name__", "ID auth_url = \"xxxxxxxxxxxxxxxxxxxxxxxxxxxx\" # endpoint url conn = connection.Connection(auth_url=auth_url, user_domain_id=userDomainId, project_id=projectId, username=username,", "conn = connection.Connection(auth_url=auth_url, user_domain_id=userDomainId, project_id=projectId, username=username, password=password) def create_server_tags(server_id): data = { \"tags\":", "data = { \"tags\": [ { \"key\": \"key1\", \"value\": \"value1\" }, { \"key\":", "for tag in tags: print(tag.key, tag.values) if __name__ == \"__main__\": server_id = \"b0a9d2b4-2cae-4b66-a6ba-6af70f3bd7f8\"", "ID userDomainId = \"xxxxxxxxxxxxxxxxxxxxxxxxxxxx\" # user account ID auth_url = \"xxxxxxxxxxxxxxxxxxxxxxxxxxxx\" # endpoint", "print(tag.key, tag.values) if __name__ == \"__main__\": server_id = \"b0a9d2b4-2cae-4b66-a6ba-6af70f3bd7f8\" create_server_tags(server_id) get_server_tags(server_id) delete_server_tags(server_id) get_project_tags()", "tag in tags: print(tag.key, tag.value) def get_project_tags(): tags = conn.ecs.get_project_tags() for tag in", "tag.value) def get_project_tags(): tags = conn.ecs.get_project_tags() for tag in tags: print(tag.key, tag.values) if", "# -*-coding:utf-8 -*- from openstack import connection # create connection username = \"xxxxxx\"", "get_server_tags(server_id): tags = conn.ecs.get_server_tags(server_id) for tag in tags: print(tag.key, tag.value) def get_project_tags(): tags", "def delete_server_tags(server_id): data = { \"tags\": [ { \"key\": \"key1\", \"value\": \"value1\" }", "} ] } conn.ecs.create_server_tags(server_id, **data) def delete_server_tags(server_id): data = { \"tags\": [ {", "connection # create connection username = \"xxxxxx\" password = \"<PASSWORD>\" projectId = \"xxxxxxxxxxxxxxxxxxxxxxxxxxxx\"", "} ] } conn.ecs.delete_server_tags(server_id, **data) def get_server_tags(server_id): tags = conn.ecs.get_server_tags(server_id) for tag in", "import connection # create connection username = \"xxxxxx\" password = \"<PASSWORD>\" projectId =", "\"xxxxxx\" password = \"<PASSWORD>\" projectId = \"xxxxxxxxxxxxxxxxxxxxxxxxxxxx\" # tenant ID userDomainId = \"xxxxxxxxxxxxxxxxxxxxxxxxxxxx\"", "for tag in tags: print(tag.key, tag.value) def get_project_tags(): tags = conn.ecs.get_project_tags() for tag", "openstack import connection # create connection username = \"xxxxxx\" password = \"<PASSWORD>\" projectId", "\"key2\", \"value\": \"value3\" } ] } conn.ecs.create_server_tags(server_id, **data) def delete_server_tags(server_id): data = {", "def create_server_tags(server_id): data = { \"tags\": [ { \"key\": \"key1\", \"value\": \"value1\" },", "} conn.ecs.delete_server_tags(server_id, **data) def get_server_tags(server_id): tags = conn.ecs.get_server_tags(server_id) for tag in tags: print(tag.key,", "delete_server_tags(server_id): data = { \"tags\": [ { \"key\": \"key1\", \"value\": \"value1\" } ]", "**data) def get_server_tags(server_id): tags = conn.ecs.get_server_tags(server_id) for tag in tags: print(tag.key, tag.value) def", "# user account ID auth_url = \"xxxxxxxxxxxxxxxxxxxxxxxxxxxx\" # endpoint url conn = connection.Connection(auth_url=auth_url,", "\"<PASSWORD>\" projectId = \"xxxxxxxxxxxxxxxxxxxxxxxxxxxx\" # tenant ID userDomainId = \"xxxxxxxxxxxxxxxxxxxxxxxxxxxx\" # user account", "conn.ecs.delete_server_tags(server_id, **data) def get_server_tags(server_id): tags = conn.ecs.get_server_tags(server_id) for tag in tags: print(tag.key, tag.value)", "= \"xxxxxx\" password = \"<PASSWORD>\" projectId = \"xxxxxxxxxxxxxxxxxxxxxxxxxxxx\" # tenant ID userDomainId =", "conn.ecs.get_server_tags(server_id) for tag in tags: print(tag.key, tag.value) def get_project_tags(): tags = conn.ecs.get_project_tags() for", "{ \"key\": \"key1\", \"value\": \"value1\" } ] } conn.ecs.delete_server_tags(server_id, **data) def get_server_tags(server_id): tags", "-*- from openstack import connection # create connection username = \"xxxxxx\" password =", "\"value\": \"value1\" } ] } conn.ecs.delete_server_tags(server_id, **data) def get_server_tags(server_id): tags = conn.ecs.get_server_tags(server_id) for", "from openstack import connection # create connection username = \"xxxxxx\" password = \"<PASSWORD>\"", "in tags: print(tag.key, tag.value) def get_project_tags(): tags = conn.ecs.get_project_tags() for tag in tags:", "tenant ID userDomainId = \"xxxxxxxxxxxxxxxxxxxxxxxxxxxx\" # user account ID auth_url = \"xxxxxxxxxxxxxxxxxxxxxxxxxxxx\" #", "{ \"tags\": [ { \"key\": \"key1\", \"value\": \"value1\" } ] } conn.ecs.delete_server_tags(server_id, **data)", "endpoint url conn = connection.Connection(auth_url=auth_url, user_domain_id=userDomainId, project_id=projectId, username=username, password=password) def create_server_tags(server_id): data =", "# endpoint url conn = connection.Connection(auth_url=auth_url, user_domain_id=userDomainId, project_id=projectId, username=username, password=password) def create_server_tags(server_id): data", "# tenant ID userDomainId = \"xxxxxxxxxxxxxxxxxxxxxxxxxxxx\" # user account ID auth_url = \"xxxxxxxxxxxxxxxxxxxxxxxxxxxx\"", "conn.ecs.create_server_tags(server_id, **data) def delete_server_tags(server_id): data = { \"tags\": [ { \"key\": \"key1\", \"value\":", "connection username = \"xxxxxx\" password = \"<PASSWORD>\" projectId = \"xxxxxxxxxxxxxxxxxxxxxxxxxxxx\" # tenant ID", "] } conn.ecs.delete_server_tags(server_id, **data) def get_server_tags(server_id): tags = conn.ecs.get_server_tags(server_id) for tag in tags:", "= connection.Connection(auth_url=auth_url, user_domain_id=userDomainId, project_id=projectId, username=username, password=password) def create_server_tags(server_id): data = { \"tags\": [", "def get_server_tags(server_id): tags = conn.ecs.get_server_tags(server_id) for tag in tags: print(tag.key, tag.value) def get_project_tags():", "] } conn.ecs.create_server_tags(server_id, **data) def delete_server_tags(server_id): data = { \"tags\": [ { \"key\":", "tags = conn.ecs.get_server_tags(server_id) for tag in tags: print(tag.key, tag.value) def get_project_tags(): tags =", "create_server_tags(server_id): data = { \"tags\": [ { \"key\": \"key1\", \"value\": \"value1\" }, {", "{ \"tags\": [ { \"key\": \"key1\", \"value\": \"value1\" }, { \"key\": \"key2\", \"value\":", "get_project_tags(): tags = conn.ecs.get_project_tags() for tag in tags: print(tag.key, tag.values) if __name__ ==", "\"value\": \"value3\" } ] } conn.ecs.create_server_tags(server_id, **data) def delete_server_tags(server_id): data = { \"tags\":", "username = \"xxxxxx\" password = \"<PASSWORD>\" projectId = \"xxxxxxxxxxxxxxxxxxxxxxxxxxxx\" # tenant ID userDomainId", "user account ID auth_url = \"xxxxxxxxxxxxxxxxxxxxxxxxxxxx\" # endpoint url conn = connection.Connection(auth_url=auth_url, user_domain_id=userDomainId,", "in tags: print(tag.key, tag.values) if __name__ == \"__main__\": server_id = \"b0a9d2b4-2cae-4b66-a6ba-6af70f3bd7f8\" create_server_tags(server_id) get_server_tags(server_id)", "= { \"tags\": [ { \"key\": \"key1\", \"value\": \"value1\" }, { \"key\": \"key2\",", "auth_url = \"xxxxxxxxxxxxxxxxxxxxxxxxxxxx\" # endpoint url conn = connection.Connection(auth_url=auth_url, user_domain_id=userDomainId, project_id=projectId, username=username, password=password)", "account ID auth_url = \"xxxxxxxxxxxxxxxxxxxxxxxxxxxx\" # endpoint url conn = connection.Connection(auth_url=auth_url, user_domain_id=userDomainId, project_id=projectId," ]
[ "def pr_body() -> str: if target_branch() == \"staging\": return 'To merge into the", "def docker_tag() -> str: return f\"{git_branch_name()}-{git_short_sha()}\" def docker_stack_name() -> str: return f\"{github_repo_name()}-{git_branch_name()}-{git_short_sha()}\" def", "import json from pathlib import Path import click __dir__ = Path(__file__).parent.absolute() def github_repo_name()", "as content: package = json.load(content) return package[\"version\"] def pr_body() -> str: if target_branch()", "content += f\"{key}={val.__repr__()}\\n\" if write: with open(os.environ[\"GITHUB_ENV\"], \"a\") as env_file: env_file.write(content) else: print(content,", "typing import Dict, List, Union import json from pathlib import Path import click", "ci_yaml_changed() -> bool: return \".github/workflows/ci.yml\" in git_list_changes() def docker_tag() -> str: return f\"{git_branch_name()}-{git_short_sha()}\"", "def should_upload_package() -> bool: return git_branch_name() == \"release\" def should_upload_image() -> bool: return", "\"CI_YAML_CHANGED\": ci_yaml_changed(), \"IS_DEV_BRANCH\": is_dev_branch(), \"BRANCH_NAME\": git_branch_name(), \"TARGET_BRANCH\": target_branch(), \"COMMIT_TITLE\": git_commit_title(), \"SHOULD_UPLOAD_PACKAGE\": should_upload_package(), \"SHOULD_UPLOAD_IMAGE\":", "run( [\"git\", \"log\", \"-1\", \"--name-only\", \"--pretty=\"], check=True, capture_output=True, text=True, ).stdout.splitlines() def git_branch_name() ->", "if target_branch() == \"staging\": return 'To merge into the staging branch, please use", "{ \"PROJECT_NAME\": github_repo_name(), \"DOCKER_TAG\": docker_tag(), \"CI_YAML_CHANGED\": ci_yaml_changed(), \"IS_DEV_BRANCH\": is_dev_branch(), \"BRANCH_NAME\": git_branch_name(), \"TARGET_BRANCH\": target_branch(),", "@click.option(\"-w\", \"--write\", is_flag=True) def main(write): content = \"\" for key, val in get_env().items():", "print(content, end=\"\") if __name__ == \"__main__\": try: main() except CalledProcessError as err: exit(err.stdout", "package[\"version\"] def pr_body() -> str: if target_branch() == \"staging\": return 'To merge into", "def github_repo_name() -> str: if repo_full := os.environ.get(\"GITHUB_REPOSITORY\"): return repo_full.split(\"/\")[1] else: return \"\"", ") def get_env() -> Dict[str, Union[str, bool]]: return { \"PROJECT_NAME\": github_repo_name(), \"DOCKER_TAG\": docker_tag(),", "str: if repo_full := os.environ.get(\"GITHUB_REPOSITORY\"): return repo_full.split(\"/\")[1] else: return \"\" def git_list_changes() ->", ").stdout.splitlines() def git_branch_name() -> str: if fullref := os.environ.get(\"GITHUB_REF\", \"\"): return fullref[len(\"refs/heads/\") :]", "return \":\".join( [ str(__dir__), os.environ[\"PATH\"], ] ) def get_env() -> Dict[str, Union[str, bool]]:", "is_flag=True) def main(write): content = \"\" for key, val in get_env().items(): if write:", "-> Dict[str, Union[str, bool]]: return { \"PROJECT_NAME\": github_repo_name(), \"DOCKER_TAG\": docker_tag(), \"CI_YAML_CHANGED\": ci_yaml_changed(), \"IS_DEV_BRANCH\":", "please use \"Rebase and merge\", or \"Squash and merge\".' elif target_branch == \"release\":", "-> str: return run( [\"git\", \"log\", \"-1\", r\"--pretty=format:%s\"], check=True, capture_output=True, text=True, ).stdout.splitlines()[0] def", "git_short_sha() -> str: if fullsha := os.environ.get(\"GITHUB_SHA\", \"\"): return fullsha[:7] else: return \"\"", "if __name__ == \"__main__\": try: main() except CalledProcessError as err: exit(err.stdout + err.stderr)", "f\"{git_branch_name()}-{git_short_sha()}\" def docker_stack_name() -> str: return f\"{github_repo_name()}-{git_branch_name()}-{git_short_sha()}\" def should_upload_package() -> bool: return git_branch_name()", "Dict, List, Union import json from pathlib import Path import click __dir__ =", "Path import click __dir__ = Path(__file__).parent.absolute() def github_repo_name() -> str: if repo_full :=", "str: if fullsha := os.environ.get(\"GITHUB_SHA\", \"\"): return fullsha[:7] else: return \"\" def is_dev_branch()", "+= f\"{key}={val.__repr__()}\\n\" if write: with open(os.environ[\"GITHUB_ENV\"], \"a\") as env_file: env_file.write(content) else: print(content, end=\"\")", "str: return \":\".join( [ str(__dir__), os.environ[\"PATH\"], ] ) def get_env() -> Dict[str, Union[str,", "from typing import Dict, List, Union import json from pathlib import Path import", "in [\"release\", \"staging\"] def ci_yaml_changed() -> bool: return \".github/workflows/ci.yml\" in git_list_changes() def docker_tag()", "str: with open(\"package.json\", \"rb\") as content: package = json.load(content) return package[\"version\"] def pr_body()", "\"staging\": return 'To merge into the staging branch, please use \"Rebase and merge\",", "git_branch_name(), \"TARGET_BRANCH\": target_branch(), \"COMMIT_TITLE\": git_commit_title(), \"SHOULD_UPLOAD_PACKAGE\": should_upload_package(), \"SHOULD_UPLOAD_IMAGE\": should_upload_image(), \"PACKAGE_VERSION\": package_version(), \"PATH\": overwrite_path(),", "get_env().items(): if write: content += f\"{key}={val}\\n\" else: content += f\"{key}={val.__repr__()}\\n\" if write: with", "with open(os.environ[\"GITHUB_ENV\"], \"a\") as env_file: env_file.write(content) else: print(content, end=\"\") if __name__ == \"__main__\":", "\"\" def target_branch() -> str: if git_branch_name() == \"staging\": return \"release\" else: return", "target_branch(), \"COMMIT_TITLE\": git_commit_title(), \"SHOULD_UPLOAD_PACKAGE\": should_upload_package(), \"SHOULD_UPLOAD_IMAGE\": should_upload_image(), \"PACKAGE_VERSION\": package_version(), \"PATH\": overwrite_path(), \"PR_BODY\": pr_body(),", "if write: content += f\"{key}={val}\\n\" else: content += f\"{key}={val.__repr__()}\\n\" if write: with open(os.environ[\"GITHUB_ENV\"],", "\"-1\", r\"--pretty=format:%s\"], check=True, capture_output=True, text=True, ).stdout.splitlines()[0] def git_short_sha() -> str: if fullsha :=", "write: content += f\"{key}={val}\\n\" else: content += f\"{key}={val.__repr__()}\\n\" if write: with open(os.environ[\"GITHUB_ENV\"], \"a\")", "[\"release\", \"staging\"] def ci_yaml_changed() -> bool: return \".github/workflows/ci.yml\" in git_list_changes() def docker_tag() ->", "f\"{key}={val.__repr__()}\\n\" if write: with open(os.environ[\"GITHUB_ENV\"], \"a\") as env_file: env_file.write(content) else: print(content, end=\"\") if", "docker_tag(), \"CI_YAML_CHANGED\": ci_yaml_changed(), \"IS_DEV_BRANCH\": is_dev_branch(), \"BRANCH_NAME\": git_branch_name(), \"TARGET_BRANCH\": target_branch(), \"COMMIT_TITLE\": git_commit_title(), \"SHOULD_UPLOAD_PACKAGE\": should_upload_package(),", "content += f\"{key}={val}\\n\" else: content += f\"{key}={val.__repr__()}\\n\" if write: with open(os.environ[\"GITHUB_ENV\"], \"a\") as", "\"Rebase and merge\", or \"Squash and merge\".' elif target_branch == \"release\": return 'To", "merge into the release branch, please use \"Create a merge commit\".' return \"\"", "\"\" for key, val in get_env().items(): if write: content += f\"{key}={val}\\n\" else: content", "str: if target_branch() == \"staging\": return 'To merge into the staging branch, please", "import CalledProcessError, run from typing import Dict, List, Union import json from pathlib", "capture_output=True, text=True, ).stdout.splitlines()[0] def git_short_sha() -> str: if fullsha := os.environ.get(\"GITHUB_SHA\", \"\"): return", "in [\"release\", \"staging\"] def package_version() -> str: with open(\"package.json\", \"rb\") as content: package", "fullref[len(\"refs/heads/\") :] else: return \"\" def target_branch() -> str: if git_branch_name() == \"staging\":", "please use \"Create a merge commit\".' return \"\" def overwrite_path() -> str: return", "== \"staging\": return 'To merge into the staging branch, please use \"Rebase and", "def get_env() -> Dict[str, Union[str, bool]]: return { \"PROJECT_NAME\": github_repo_name(), \"DOCKER_TAG\": docker_tag(), \"CI_YAML_CHANGED\":", "from pathlib import Path import click __dir__ = Path(__file__).parent.absolute() def github_repo_name() -> str:", "should_upload_package(), \"SHOULD_UPLOAD_IMAGE\": should_upload_image(), \"PACKAGE_VERSION\": package_version(), \"PATH\": overwrite_path(), \"PR_BODY\": pr_body(), } @click.command() @click.option(\"-w\", \"--write\",", "else: return \"\" def git_list_changes() -> List[str]: return run( [\"git\", \"log\", \"-1\", \"--name-only\",", "into the staging branch, please use \"Rebase and merge\", or \"Squash and merge\".'", "should_upload_image(), \"PACKAGE_VERSION\": package_version(), \"PATH\": overwrite_path(), \"PR_BODY\": pr_body(), } @click.command() @click.option(\"-w\", \"--write\", is_flag=True) def", "if fullref := os.environ.get(\"GITHUB_REF\", \"\"): return fullref[len(\"refs/heads/\") :] else: return \"\" def target_branch()", "-> bool: return git_branch_name() not in [\"release\", \"staging\"] def ci_yaml_changed() -> bool: return", "git_branch_name() not in [\"release\", \"staging\"] def ci_yaml_changed() -> bool: return \".github/workflows/ci.yml\" in git_list_changes()", "in git_list_changes() def docker_tag() -> str: return f\"{git_branch_name()}-{git_short_sha()}\" def docker_stack_name() -> str: return", "elif target_branch == \"release\": return 'To merge into the release branch, please use", "else: return \"\" def is_dev_branch() -> bool: return git_branch_name() not in [\"release\", \"staging\"]", "and merge\".' elif target_branch == \"release\": return 'To merge into the release branch,", "return 'To merge into the release branch, please use \"Create a merge commit\".'", "def is_dev_branch() -> bool: return git_branch_name() not in [\"release\", \"staging\"] def ci_yaml_changed() ->", "if write: with open(os.environ[\"GITHUB_ENV\"], \"a\") as env_file: env_file.write(content) else: print(content, end=\"\") if __name__", "package = json.load(content) return package[\"version\"] def pr_body() -> str: if target_branch() == \"staging\":", "val in get_env().items(): if write: content += f\"{key}={val}\\n\" else: content += f\"{key}={val.__repr__()}\\n\" if", "pr_body() -> str: if target_branch() == \"staging\": return 'To merge into the staging", "pr_body(), } @click.command() @click.option(\"-w\", \"--write\", is_flag=True) def main(write): content = \"\" for key,", "\"PATH\": overwrite_path(), \"PR_BODY\": pr_body(), } @click.command() @click.option(\"-w\", \"--write\", is_flag=True) def main(write): content =", "end=\"\") if __name__ == \"__main__\": try: main() except CalledProcessError as err: exit(err.stdout +", "\"-1\", \"--name-only\", \"--pretty=\"], check=True, capture_output=True, text=True, ).stdout.splitlines() def git_branch_name() -> str: if fullref", "import click __dir__ = Path(__file__).parent.absolute() def github_repo_name() -> str: if repo_full := os.environ.get(\"GITHUB_REPOSITORY\"):", ":= os.environ.get(\"GITHUB_REPOSITORY\"): return repo_full.split(\"/\")[1] else: return \"\" def git_list_changes() -> List[str]: return run(", "\"release\": return 'To merge into the release branch, please use \"Create a merge", "branch, please use \"Rebase and merge\", or \"Squash and merge\".' elif target_branch ==", "merge into the staging branch, please use \"Rebase and merge\", or \"Squash and", "\"COMMIT_TITLE\": git_commit_title(), \"SHOULD_UPLOAD_PACKAGE\": should_upload_package(), \"SHOULD_UPLOAD_IMAGE\": should_upload_image(), \"PACKAGE_VERSION\": package_version(), \"PATH\": overwrite_path(), \"PR_BODY\": pr_body(), }", ").stdout.splitlines()[0] def git_short_sha() -> str: if fullsha := os.environ.get(\"GITHUB_SHA\", \"\"): return fullsha[:7] else:", "bool: return git_branch_name() == \"release\" def should_upload_image() -> bool: return git_branch_name() in [\"release\",", "the staging branch, please use \"Rebase and merge\", or \"Squash and merge\".' elif", "os.environ[\"PATH\"], ] ) def get_env() -> Dict[str, Union[str, bool]]: return { \"PROJECT_NAME\": github_repo_name(),", "def git_short_sha() -> str: if fullsha := os.environ.get(\"GITHUB_SHA\", \"\"): return fullsha[:7] else: return", "fullsha[:7] else: return \"\" def is_dev_branch() -> bool: return git_branch_name() not in [\"release\",", "\"a\") as env_file: env_file.write(content) else: print(content, end=\"\") if __name__ == \"__main__\": try: main()", "fullsha := os.environ.get(\"GITHUB_SHA\", \"\"): return fullsha[:7] else: return \"\" def is_dev_branch() -> bool:", "not in [\"release\", \"staging\"] def ci_yaml_changed() -> bool: return \".github/workflows/ci.yml\" in git_list_changes() def", "github_repo_name(), \"DOCKER_TAG\": docker_tag(), \"CI_YAML_CHANGED\": ci_yaml_changed(), \"IS_DEV_BRANCH\": is_dev_branch(), \"BRANCH_NAME\": git_branch_name(), \"TARGET_BRANCH\": target_branch(), \"COMMIT_TITLE\": git_commit_title(),", "def ci_yaml_changed() -> bool: return \".github/workflows/ci.yml\" in git_list_changes() def docker_tag() -> str: return", "git_branch_name() == \"release\" def should_upload_image() -> bool: return git_branch_name() in [\"release\", \"staging\"] def", "Dict[str, Union[str, bool]]: return { \"PROJECT_NAME\": github_repo_name(), \"DOCKER_TAG\": docker_tag(), \"CI_YAML_CHANGED\": ci_yaml_changed(), \"IS_DEV_BRANCH\": is_dev_branch(),", "open(os.environ[\"GITHUB_ENV\"], \"a\") as env_file: env_file.write(content) else: print(content, end=\"\") if __name__ == \"__main__\": try:", "if git_branch_name() == \"staging\": return \"release\" else: return \"staging\" def git_commit_title() -> str:", "is_dev_branch() -> bool: return git_branch_name() not in [\"release\", \"staging\"] def ci_yaml_changed() -> bool:", "return git_branch_name() == \"release\" def should_upload_image() -> bool: return git_branch_name() in [\"release\", \"staging\"]", "[ str(__dir__), os.environ[\"PATH\"], ] ) def get_env() -> Dict[str, Union[str, bool]]: return {", "\"log\", \"-1\", r\"--pretty=format:%s\"], check=True, capture_output=True, text=True, ).stdout.splitlines()[0] def git_short_sha() -> str: if fullsha", "] ) def get_env() -> Dict[str, Union[str, bool]]: return { \"PROJECT_NAME\": github_repo_name(), \"DOCKER_TAG\":", "env_file: env_file.write(content) else: print(content, end=\"\") if __name__ == \"__main__\": try: main() except CalledProcessError", "-> str: if fullref := os.environ.get(\"GITHUB_REF\", \"\"): return fullref[len(\"refs/heads/\") :] else: return \"\"", "overwrite_path(), \"PR_BODY\": pr_body(), } @click.command() @click.option(\"-w\", \"--write\", is_flag=True) def main(write): content = \"\"", "merge commit\".' return \"\" def overwrite_path() -> str: return \":\".join( [ str(__dir__), os.environ[\"PATH\"],", "str: return f\"{git_branch_name()}-{git_short_sha()}\" def docker_stack_name() -> str: return f\"{github_repo_name()}-{git_branch_name()}-{git_short_sha()}\" def should_upload_package() -> bool:", "\"Squash and merge\".' elif target_branch == \"release\": return 'To merge into the release", "def docker_stack_name() -> str: return f\"{github_repo_name()}-{git_branch_name()}-{git_short_sha()}\" def should_upload_package() -> bool: return git_branch_name() ==", "\"BRANCH_NAME\": git_branch_name(), \"TARGET_BRANCH\": target_branch(), \"COMMIT_TITLE\": git_commit_title(), \"SHOULD_UPLOAD_PACKAGE\": should_upload_package(), \"SHOULD_UPLOAD_IMAGE\": should_upload_image(), \"PACKAGE_VERSION\": package_version(), \"PATH\":", "\"\" def overwrite_path() -> str: return \":\".join( [ str(__dir__), os.environ[\"PATH\"], ] ) def", "\"--write\", is_flag=True) def main(write): content = \"\" for key, val in get_env().items(): if", "'To merge into the staging branch, please use \"Rebase and merge\", or \"Squash", "git_branch_name() == \"staging\": return \"release\" else: return \"staging\" def git_commit_title() -> str: return", "os.environ.get(\"GITHUB_REPOSITORY\"): return repo_full.split(\"/\")[1] else: return \"\" def git_list_changes() -> List[str]: return run( [\"git\",", "target_branch() == \"staging\": return 'To merge into the staging branch, please use \"Rebase", "\"staging\"] def package_version() -> str: with open(\"package.json\", \"rb\") as content: package = json.load(content)", "\"--pretty=\"], check=True, capture_output=True, text=True, ).stdout.splitlines() def git_branch_name() -> str: if fullref := os.environ.get(\"GITHUB_REF\",", "str: return run( [\"git\", \"log\", \"-1\", r\"--pretty=format:%s\"], check=True, capture_output=True, text=True, ).stdout.splitlines()[0] def git_short_sha()", "def git_list_changes() -> List[str]: return run( [\"git\", \"log\", \"-1\", \"--name-only\", \"--pretty=\"], check=True, capture_output=True,", ":= os.environ.get(\"GITHUB_SHA\", \"\"): return fullsha[:7] else: return \"\" def is_dev_branch() -> bool: return", "-> bool: return git_branch_name() == \"release\" def should_upload_image() -> bool: return git_branch_name() in", "\"PROJECT_NAME\": github_repo_name(), \"DOCKER_TAG\": docker_tag(), \"CI_YAML_CHANGED\": ci_yaml_changed(), \"IS_DEV_BRANCH\": is_dev_branch(), \"BRANCH_NAME\": git_branch_name(), \"TARGET_BRANCH\": target_branch(), \"COMMIT_TITLE\":", "package_version(), \"PATH\": overwrite_path(), \"PR_BODY\": pr_body(), } @click.command() @click.option(\"-w\", \"--write\", is_flag=True) def main(write): content", "os.environ.get(\"GITHUB_REF\", \"\"): return fullref[len(\"refs/heads/\") :] else: return \"\" def target_branch() -> str: if", "main(write): content = \"\" for key, val in get_env().items(): if write: content +=", "#!/usr/bin/env python3 import os from subprocess import CalledProcessError, run from typing import Dict,", "-> bool: return git_branch_name() in [\"release\", \"staging\"] def package_version() -> str: with open(\"package.json\",", "-> str: with open(\"package.json\", \"rb\") as content: package = json.load(content) return package[\"version\"] def", "== \"staging\": return \"release\" else: return \"staging\" def git_commit_title() -> str: return run(", "return \"\" def target_branch() -> str: if git_branch_name() == \"staging\": return \"release\" else:", "return f\"{git_branch_name()}-{git_short_sha()}\" def docker_stack_name() -> str: return f\"{github_repo_name()}-{git_branch_name()}-{git_short_sha()}\" def should_upload_package() -> bool: return", "release branch, please use \"Create a merge commit\".' return \"\" def overwrite_path() ->", "'To merge into the release branch, please use \"Create a merge commit\".' return", "if repo_full := os.environ.get(\"GITHUB_REPOSITORY\"): return repo_full.split(\"/\")[1] else: return \"\" def git_list_changes() -> List[str]:", "return run( [\"git\", \"log\", \"-1\", r\"--pretty=format:%s\"], check=True, capture_output=True, text=True, ).stdout.splitlines()[0] def git_short_sha() ->", "check=True, capture_output=True, text=True, ).stdout.splitlines() def git_branch_name() -> str: if fullref := os.environ.get(\"GITHUB_REF\", \"\"):", "\"staging\": return \"release\" else: return \"staging\" def git_commit_title() -> str: return run( [\"git\",", "return 'To merge into the staging branch, please use \"Rebase and merge\", or", "python3 import os from subprocess import CalledProcessError, run from typing import Dict, List,", "check=True, capture_output=True, text=True, ).stdout.splitlines()[0] def git_short_sha() -> str: if fullsha := os.environ.get(\"GITHUB_SHA\", \"\"):", "bool]]: return { \"PROJECT_NAME\": github_repo_name(), \"DOCKER_TAG\": docker_tag(), \"CI_YAML_CHANGED\": ci_yaml_changed(), \"IS_DEV_BRANCH\": is_dev_branch(), \"BRANCH_NAME\": git_branch_name(),", "\"DOCKER_TAG\": docker_tag(), \"CI_YAML_CHANGED\": ci_yaml_changed(), \"IS_DEV_BRANCH\": is_dev_branch(), \"BRANCH_NAME\": git_branch_name(), \"TARGET_BRANCH\": target_branch(), \"COMMIT_TITLE\": git_commit_title(), \"SHOULD_UPLOAD_PACKAGE\":", "[\"git\", \"log\", \"-1\", r\"--pretty=format:%s\"], check=True, capture_output=True, text=True, ).stdout.splitlines()[0] def git_short_sha() -> str: if", "git_list_changes() def docker_tag() -> str: return f\"{git_branch_name()}-{git_short_sha()}\" def docker_stack_name() -> str: return f\"{github_repo_name()}-{git_branch_name()}-{git_short_sha()}\"", "with open(\"package.json\", \"rb\") as content: package = json.load(content) return package[\"version\"] def pr_body() ->", "\"\" def is_dev_branch() -> bool: return git_branch_name() not in [\"release\", \"staging\"] def ci_yaml_changed()", "branch, please use \"Create a merge commit\".' return \"\" def overwrite_path() -> str:", "repo_full := os.environ.get(\"GITHUB_REPOSITORY\"): return repo_full.split(\"/\")[1] else: return \"\" def git_list_changes() -> List[str]: return", "-> str: return \":\".join( [ str(__dir__), os.environ[\"PATH\"], ] ) def get_env() -> Dict[str,", "a merge commit\".' return \"\" def overwrite_path() -> str: return \":\".join( [ str(__dir__),", "ci_yaml_changed(), \"IS_DEV_BRANCH\": is_dev_branch(), \"BRANCH_NAME\": git_branch_name(), \"TARGET_BRANCH\": target_branch(), \"COMMIT_TITLE\": git_commit_title(), \"SHOULD_UPLOAD_PACKAGE\": should_upload_package(), \"SHOULD_UPLOAD_IMAGE\": should_upload_image(),", "else: return \"\" def target_branch() -> str: if git_branch_name() == \"staging\": return \"release\"", "def package_version() -> str: with open(\"package.json\", \"rb\") as content: package = json.load(content) return", "text=True, ).stdout.splitlines() def git_branch_name() -> str: if fullref := os.environ.get(\"GITHUB_REF\", \"\"): return fullref[len(\"refs/heads/\")", "\"staging\" def git_commit_title() -> str: return run( [\"git\", \"log\", \"-1\", r\"--pretty=format:%s\"], check=True, capture_output=True,", "[\"git\", \"log\", \"-1\", \"--name-only\", \"--pretty=\"], check=True, capture_output=True, text=True, ).stdout.splitlines() def git_branch_name() -> str:", "the release branch, please use \"Create a merge commit\".' return \"\" def overwrite_path()", "def target_branch() -> str: if git_branch_name() == \"staging\": return \"release\" else: return \"staging\"", "get_env() -> Dict[str, Union[str, bool]]: return { \"PROJECT_NAME\": github_repo_name(), \"DOCKER_TAG\": docker_tag(), \"CI_YAML_CHANGED\": ci_yaml_changed(),", "\"PR_BODY\": pr_body(), } @click.command() @click.option(\"-w\", \"--write\", is_flag=True) def main(write): content = \"\" for", "f\"{key}={val}\\n\" else: content += f\"{key}={val.__repr__()}\\n\" if write: with open(os.environ[\"GITHUB_ENV\"], \"a\") as env_file: env_file.write(content)", "text=True, ).stdout.splitlines()[0] def git_short_sha() -> str: if fullsha := os.environ.get(\"GITHUB_SHA\", \"\"): return fullsha[:7]", "should_upload_image() -> bool: return git_branch_name() in [\"release\", \"staging\"] def package_version() -> str: with", "} @click.command() @click.option(\"-w\", \"--write\", is_flag=True) def main(write): content = \"\" for key, val", "content: package = json.load(content) return package[\"version\"] def pr_body() -> str: if target_branch() ==", "-> str: return f\"{git_branch_name()}-{git_short_sha()}\" def docker_stack_name() -> str: return f\"{github_repo_name()}-{git_branch_name()}-{git_short_sha()}\" def should_upload_package() ->", "return \"\" def git_list_changes() -> List[str]: return run( [\"git\", \"log\", \"-1\", \"--name-only\", \"--pretty=\"],", "github_repo_name() -> str: if repo_full := os.environ.get(\"GITHUB_REPOSITORY\"): return repo_full.split(\"/\")[1] else: return \"\" def", "-> List[str]: return run( [\"git\", \"log\", \"-1\", \"--name-only\", \"--pretty=\"], check=True, capture_output=True, text=True, ).stdout.splitlines()", "is_dev_branch(), \"BRANCH_NAME\": git_branch_name(), \"TARGET_BRANCH\": target_branch(), \"COMMIT_TITLE\": git_commit_title(), \"SHOULD_UPLOAD_PACKAGE\": should_upload_package(), \"SHOULD_UPLOAD_IMAGE\": should_upload_image(), \"PACKAGE_VERSION\": package_version(),", "str: if fullref := os.environ.get(\"GITHUB_REF\", \"\"): return fullref[len(\"refs/heads/\") :] else: return \"\" def", "\"release\" else: return \"staging\" def git_commit_title() -> str: return run( [\"git\", \"log\", \"-1\",", "run( [\"git\", \"log\", \"-1\", r\"--pretty=format:%s\"], check=True, capture_output=True, text=True, ).stdout.splitlines()[0] def git_short_sha() -> str:", "target_branch == \"release\": return 'To merge into the release branch, please use \"Create", "\"\"): return fullsha[:7] else: return \"\" def is_dev_branch() -> bool: return git_branch_name() not", "into the release branch, please use \"Create a merge commit\".' return \"\" def", "\"PACKAGE_VERSION\": package_version(), \"PATH\": overwrite_path(), \"PR_BODY\": pr_body(), } @click.command() @click.option(\"-w\", \"--write\", is_flag=True) def main(write):", "fullref := os.environ.get(\"GITHUB_REF\", \"\"): return fullref[len(\"refs/heads/\") :] else: return \"\" def target_branch() ->", "[\"release\", \"staging\"] def package_version() -> str: with open(\"package.json\", \"rb\") as content: package =", "= Path(__file__).parent.absolute() def github_repo_name() -> str: if repo_full := os.environ.get(\"GITHUB_REPOSITORY\"): return repo_full.split(\"/\")[1] else:", "\"release\" def should_upload_image() -> bool: return git_branch_name() in [\"release\", \"staging\"] def package_version() ->", "commit\".' return \"\" def overwrite_path() -> str: return \":\".join( [ str(__dir__), os.environ[\"PATH\"], ]", "git_list_changes() -> List[str]: return run( [\"git\", \"log\", \"-1\", \"--name-only\", \"--pretty=\"], check=True, capture_output=True, text=True,", "-> str: if git_branch_name() == \"staging\": return \"release\" else: return \"staging\" def git_commit_title()", "docker_tag() -> str: return f\"{git_branch_name()}-{git_short_sha()}\" def docker_stack_name() -> str: return f\"{github_repo_name()}-{git_branch_name()}-{git_short_sha()}\" def should_upload_package()", "\"IS_DEV_BRANCH\": is_dev_branch(), \"BRANCH_NAME\": git_branch_name(), \"TARGET_BRANCH\": target_branch(), \"COMMIT_TITLE\": git_commit_title(), \"SHOULD_UPLOAD_PACKAGE\": should_upload_package(), \"SHOULD_UPLOAD_IMAGE\": should_upload_image(), \"PACKAGE_VERSION\":", "def main(write): content = \"\" for key, val in get_env().items(): if write: content", "\"staging\"] def ci_yaml_changed() -> bool: return \".github/workflows/ci.yml\" in git_list_changes() def docker_tag() -> str:", "def overwrite_path() -> str: return \":\".join( [ str(__dir__), os.environ[\"PATH\"], ] ) def get_env()", ":] else: return \"\" def target_branch() -> str: if git_branch_name() == \"staging\": return", "-> str: if target_branch() == \"staging\": return 'To merge into the staging branch,", "repo_full.split(\"/\")[1] else: return \"\" def git_list_changes() -> List[str]: return run( [\"git\", \"log\", \"-1\",", "git_branch_name() in [\"release\", \"staging\"] def package_version() -> str: with open(\"package.json\", \"rb\") as content:", "content = \"\" for key, val in get_env().items(): if write: content += f\"{key}={val}\\n\"", "key, val in get_env().items(): if write: content += f\"{key}={val}\\n\" else: content += f\"{key}={val.__repr__()}\\n\"", "__dir__ = Path(__file__).parent.absolute() def github_repo_name() -> str: if repo_full := os.environ.get(\"GITHUB_REPOSITORY\"): return repo_full.split(\"/\")[1]", "os.environ.get(\"GITHUB_SHA\", \"\"): return fullsha[:7] else: return \"\" def is_dev_branch() -> bool: return git_branch_name()", "else: content += f\"{key}={val.__repr__()}\\n\" if write: with open(os.environ[\"GITHUB_ENV\"], \"a\") as env_file: env_file.write(content) else:", "import Dict, List, Union import json from pathlib import Path import click __dir__", "or \"Squash and merge\".' elif target_branch == \"release\": return 'To merge into the", "\"TARGET_BRANCH\": target_branch(), \"COMMIT_TITLE\": git_commit_title(), \"SHOULD_UPLOAD_PACKAGE\": should_upload_package(), \"SHOULD_UPLOAD_IMAGE\": should_upload_image(), \"PACKAGE_VERSION\": package_version(), \"PATH\": overwrite_path(), \"PR_BODY\":", "+= f\"{key}={val}\\n\" else: content += f\"{key}={val.__repr__()}\\n\" if write: with open(os.environ[\"GITHUB_ENV\"], \"a\") as env_file:", "else: print(content, end=\"\") if __name__ == \"__main__\": try: main() except CalledProcessError as err:", "import os from subprocess import CalledProcessError, run from typing import Dict, List, Union", "bool: return \".github/workflows/ci.yml\" in git_list_changes() def docker_tag() -> str: return f\"{git_branch_name()}-{git_short_sha()}\" def docker_stack_name()", "-> bool: return \".github/workflows/ci.yml\" in git_list_changes() def docker_tag() -> str: return f\"{git_branch_name()}-{git_short_sha()}\" def", "== \"release\": return 'To merge into the release branch, please use \"Create a", "\"\"): return fullref[len(\"refs/heads/\") :] else: return \"\" def target_branch() -> str: if git_branch_name()", "\"\" def git_list_changes() -> List[str]: return run( [\"git\", \"log\", \"-1\", \"--name-only\", \"--pretty=\"], check=True,", "return package[\"version\"] def pr_body() -> str: if target_branch() == \"staging\": return 'To merge", "return \"\" def is_dev_branch() -> bool: return git_branch_name() not in [\"release\", \"staging\"] def", "should_upload_package() -> bool: return git_branch_name() == \"release\" def should_upload_image() -> bool: return git_branch_name()", "= \"\" for key, val in get_env().items(): if write: content += f\"{key}={val}\\n\" else:", "return git_branch_name() not in [\"release\", \"staging\"] def ci_yaml_changed() -> bool: return \".github/workflows/ci.yml\" in", "pathlib import Path import click __dir__ = Path(__file__).parent.absolute() def github_repo_name() -> str: if", "return run( [\"git\", \"log\", \"-1\", \"--name-only\", \"--pretty=\"], check=True, capture_output=True, text=True, ).stdout.splitlines() def git_branch_name()", "str(__dir__), os.environ[\"PATH\"], ] ) def get_env() -> Dict[str, Union[str, bool]]: return { \"PROJECT_NAME\":", "package_version() -> str: with open(\"package.json\", \"rb\") as content: package = json.load(content) return package[\"version\"]", "use \"Rebase and merge\", or \"Squash and merge\".' elif target_branch == \"release\": return", "else: return \"staging\" def git_commit_title() -> str: return run( [\"git\", \"log\", \"-1\", r\"--pretty=format:%s\"],", "\":\".join( [ str(__dir__), os.environ[\"PATH\"], ] ) def get_env() -> Dict[str, Union[str, bool]]: return", "os from subprocess import CalledProcessError, run from typing import Dict, List, Union import", "return repo_full.split(\"/\")[1] else: return \"\" def git_list_changes() -> List[str]: return run( [\"git\", \"log\",", "== \"release\" def should_upload_image() -> bool: return git_branch_name() in [\"release\", \"staging\"] def package_version()", "bool: return git_branch_name() in [\"release\", \"staging\"] def package_version() -> str: with open(\"package.json\", \"rb\")", "merge\", or \"Squash and merge\".' elif target_branch == \"release\": return 'To merge into", "-> str: if fullsha := os.environ.get(\"GITHUB_SHA\", \"\"): return fullsha[:7] else: return \"\" def", "r\"--pretty=format:%s\"], check=True, capture_output=True, text=True, ).stdout.splitlines()[0] def git_short_sha() -> str: if fullsha := os.environ.get(\"GITHUB_SHA\",", "\"rb\") as content: package = json.load(content) return package[\"version\"] def pr_body() -> str: if", "@click.command() @click.option(\"-w\", \"--write\", is_flag=True) def main(write): content = \"\" for key, val in", "def git_branch_name() -> str: if fullref := os.environ.get(\"GITHUB_REF\", \"\"): return fullref[len(\"refs/heads/\") :] else:", "str: if git_branch_name() == \"staging\": return \"release\" else: return \"staging\" def git_commit_title() ->", "CalledProcessError, run from typing import Dict, List, Union import json from pathlib import", "import Path import click __dir__ = Path(__file__).parent.absolute() def github_repo_name() -> str: if repo_full", "return \"staging\" def git_commit_title() -> str: return run( [\"git\", \"log\", \"-1\", r\"--pretty=format:%s\"], check=True,", "\"Create a merge commit\".' return \"\" def overwrite_path() -> str: return \":\".join( [", "target_branch() -> str: if git_branch_name() == \"staging\": return \"release\" else: return \"staging\" def", "= json.load(content) return package[\"version\"] def pr_body() -> str: if target_branch() == \"staging\": return", "return \"\" def overwrite_path() -> str: return \":\".join( [ str(__dir__), os.environ[\"PATH\"], ] )", "from subprocess import CalledProcessError, run from typing import Dict, List, Union import json", "click __dir__ = Path(__file__).parent.absolute() def github_repo_name() -> str: if repo_full := os.environ.get(\"GITHUB_REPOSITORY\"): return", "\"log\", \"-1\", \"--name-only\", \"--pretty=\"], check=True, capture_output=True, text=True, ).stdout.splitlines() def git_branch_name() -> str: if", "return \"release\" else: return \"staging\" def git_commit_title() -> str: return run( [\"git\", \"log\",", "\"SHOULD_UPLOAD_IMAGE\": should_upload_image(), \"PACKAGE_VERSION\": package_version(), \"PATH\": overwrite_path(), \"PR_BODY\": pr_body(), } @click.command() @click.option(\"-w\", \"--write\", is_flag=True)", "for key, val in get_env().items(): if write: content += f\"{key}={val}\\n\" else: content +=", "merge\".' elif target_branch == \"release\": return 'To merge into the release branch, please", "def should_upload_image() -> bool: return git_branch_name() in [\"release\", \"staging\"] def package_version() -> str:", "return { \"PROJECT_NAME\": github_repo_name(), \"DOCKER_TAG\": docker_tag(), \"CI_YAML_CHANGED\": ci_yaml_changed(), \"IS_DEV_BRANCH\": is_dev_branch(), \"BRANCH_NAME\": git_branch_name(), \"TARGET_BRANCH\":", "write: with open(os.environ[\"GITHUB_ENV\"], \"a\") as env_file: env_file.write(content) else: print(content, end=\"\") if __name__ ==", "staging branch, please use \"Rebase and merge\", or \"Squash and merge\".' elif target_branch", "List[str]: return run( [\"git\", \"log\", \"-1\", \"--name-only\", \"--pretty=\"], check=True, capture_output=True, text=True, ).stdout.splitlines() def", "-> str: if repo_full := os.environ.get(\"GITHUB_REPOSITORY\"): return repo_full.split(\"/\")[1] else: return \"\" def git_list_changes()", "git_commit_title() -> str: return run( [\"git\", \"log\", \"-1\", r\"--pretty=format:%s\"], check=True, capture_output=True, text=True, ).stdout.splitlines()[0]", "return fullref[len(\"refs/heads/\") :] else: return \"\" def target_branch() -> str: if git_branch_name() ==", ":= os.environ.get(\"GITHUB_REF\", \"\"): return fullref[len(\"refs/heads/\") :] else: return \"\" def target_branch() -> str:", "if fullsha := os.environ.get(\"GITHUB_SHA\", \"\"): return fullsha[:7] else: return \"\" def is_dev_branch() ->", "-> str: return f\"{github_repo_name()}-{git_branch_name()}-{git_short_sha()}\" def should_upload_package() -> bool: return git_branch_name() == \"release\" def", "bool: return git_branch_name() not in [\"release\", \"staging\"] def ci_yaml_changed() -> bool: return \".github/workflows/ci.yml\"", "List, Union import json from pathlib import Path import click __dir__ = Path(__file__).parent.absolute()", "\".github/workflows/ci.yml\" in git_list_changes() def docker_tag() -> str: return f\"{git_branch_name()}-{git_short_sha()}\" def docker_stack_name() -> str:", "overwrite_path() -> str: return \":\".join( [ str(__dir__), os.environ[\"PATH\"], ] ) def get_env() ->", "git_commit_title(), \"SHOULD_UPLOAD_PACKAGE\": should_upload_package(), \"SHOULD_UPLOAD_IMAGE\": should_upload_image(), \"PACKAGE_VERSION\": package_version(), \"PATH\": overwrite_path(), \"PR_BODY\": pr_body(), } @click.command()", "Path(__file__).parent.absolute() def github_repo_name() -> str: if repo_full := os.environ.get(\"GITHUB_REPOSITORY\"): return repo_full.split(\"/\")[1] else: return", "\"--name-only\", \"--pretty=\"], check=True, capture_output=True, text=True, ).stdout.splitlines() def git_branch_name() -> str: if fullref :=", "return \".github/workflows/ci.yml\" in git_list_changes() def docker_tag() -> str: return f\"{git_branch_name()}-{git_short_sha()}\" def docker_stack_name() ->", "return git_branch_name() in [\"release\", \"staging\"] def package_version() -> str: with open(\"package.json\", \"rb\") as", "Union import json from pathlib import Path import click __dir__ = Path(__file__).parent.absolute() def", "env_file.write(content) else: print(content, end=\"\") if __name__ == \"__main__\": try: main() except CalledProcessError as", "return fullsha[:7] else: return \"\" def is_dev_branch() -> bool: return git_branch_name() not in", "git_branch_name() -> str: if fullref := os.environ.get(\"GITHUB_REF\", \"\"): return fullref[len(\"refs/heads/\") :] else: return", "and merge\", or \"Squash and merge\".' elif target_branch == \"release\": return 'To merge", "return f\"{github_repo_name()}-{git_branch_name()}-{git_short_sha()}\" def should_upload_package() -> bool: return git_branch_name() == \"release\" def should_upload_image() ->", "in get_env().items(): if write: content += f\"{key}={val}\\n\" else: content += f\"{key}={val.__repr__()}\\n\" if write:", "use \"Create a merge commit\".' return \"\" def overwrite_path() -> str: return \":\".join(", "json.load(content) return package[\"version\"] def pr_body() -> str: if target_branch() == \"staging\": return 'To", "f\"{github_repo_name()}-{git_branch_name()}-{git_short_sha()}\" def should_upload_package() -> bool: return git_branch_name() == \"release\" def should_upload_image() -> bool:", "\"SHOULD_UPLOAD_PACKAGE\": should_upload_package(), \"SHOULD_UPLOAD_IMAGE\": should_upload_image(), \"PACKAGE_VERSION\": package_version(), \"PATH\": overwrite_path(), \"PR_BODY\": pr_body(), } @click.command() @click.option(\"-w\",", "open(\"package.json\", \"rb\") as content: package = json.load(content) return package[\"version\"] def pr_body() -> str:", "as env_file: env_file.write(content) else: print(content, end=\"\") if __name__ == \"__main__\": try: main() except", "json from pathlib import Path import click __dir__ = Path(__file__).parent.absolute() def github_repo_name() ->", "docker_stack_name() -> str: return f\"{github_repo_name()}-{git_branch_name()}-{git_short_sha()}\" def should_upload_package() -> bool: return git_branch_name() == \"release\"", "Union[str, bool]]: return { \"PROJECT_NAME\": github_repo_name(), \"DOCKER_TAG\": docker_tag(), \"CI_YAML_CHANGED\": ci_yaml_changed(), \"IS_DEV_BRANCH\": is_dev_branch(), \"BRANCH_NAME\":", "capture_output=True, text=True, ).stdout.splitlines() def git_branch_name() -> str: if fullref := os.environ.get(\"GITHUB_REF\", \"\"): return", "subprocess import CalledProcessError, run from typing import Dict, List, Union import json from", "def git_commit_title() -> str: return run( [\"git\", \"log\", \"-1\", r\"--pretty=format:%s\"], check=True, capture_output=True, text=True,", "run from typing import Dict, List, Union import json from pathlib import Path", "str: return f\"{github_repo_name()}-{git_branch_name()}-{git_short_sha()}\" def should_upload_package() -> bool: return git_branch_name() == \"release\" def should_upload_image()" ]
[ "{'foo': b'qq', 'bar': b'aa'}) def test_reserved_full(self): f = self.ReservedFullBitfield(foo=3456, bar=3) self.assertEqual(f.to_bytes(), b'\\x80\\x0d\\x03') data", "class BitFieldTest(unittest.TestCase): def assertFieldsEqual(self, field, field_dict): for key, val in field_dict.items(): self.assertEqual(field[key], val)", "] class FullBitfield(Bitfield): fields = [ Field('foo', 1, FieldType.bool), Field('bar', 30, FieldType.uint), Field('baz',", "expected = {'foo': -1234, 'bar': b'foobar', 'baz': 5.25} self.assertFieldsEqual(f, expected) def test_from_bytes_full(self): val1", "struct.pack('<d', 3.14) self.assertEqual(f.to_bytes(), expected) def test_to_bytes_full(self): f = self.FullBitfield(foo=True, bar=123456, baz=987654, fiz=1.55) expected", "Field('bar', 6 * 8, FieldType.bytes), Field('baz', 64, FieldType.float), ] class FullBitfield(Bitfield): fields =", "expected) def test_from_bytes_simple(self): value = (-1234).to_bytes(2, 'little', signed=True) + b'foobar' + struct.pack('<d', 5.25)", "] class ReservedSimpleBitfield(Bitfield): fields = [ Field(RESERVED, 16), Field('foo', 16, FieldType.bytes), Field(RESERVED, 8),", "self.assertFieldsEqual(f, expected) def test_from_bytes_full(self): val1 = (((1 << 30) | 9999) << 33)", "in field_dict.items(): self.assertEqual(field[key], val) @classmethod def setUpClass(cls): class SimpleBitfield(Bitfield): fields = [ Field('foo',", "val) @classmethod def setUpClass(cls): class SimpleBitfield(Bitfield): fields = [ Field('foo', 16, FieldType.int), Field('bar',", "30) | 123456) << 33) | 987654 expected = expected.to_bytes(8, 'little') + struct.pack('<f',", "= self.FullBitfield(foo=True, bar=123456, baz=987654, fiz=1.55) expected = (((1 << 30) | 123456) <<", "'bar': b'foobar', 'baz': 5.25} self.assertFieldsEqual(f, expected) def test_from_bytes_full(self): val1 = (((1 << 30)", "1.0, 1.0)), ((255, 0, 255), (300, 1.0, 1.0)), ] def test_from_rgb(self): for rgb,", "{'foo': 3456, 'bar': 3}) class ColorsTest(unittest.TestCase): test_colors = [ ((255, 0, 0), (", "unittest from licht.base import LightColor from licht.utils import RESERVED, Bitfield, Field, FieldType class", "setUpClass(cls): class SimpleBitfield(Bitfield): fields = [ Field('foo', 16, FieldType.int), Field('bar', 6 * 8,", "for rgb, hsb in self.test_colors: self.assertEqual(LightColor.from_rgb(rgb), hsb) def test_to_rgb(self): for rgb, hsb in", "FieldType.bool), Field('bar', 30, FieldType.uint), Field('baz', 33, FieldType.uint), Field('fiz', 32, FieldType.float), ] class ReservedSimpleBitfield(Bitfield):", "self.assertEqual(f.to_bytes(), b'\\x00\\x00qq\\x00aa') data = b'zzqqzaa' f = self.ReservedSimpleBitfield.from_bytes(data) self.assertFieldsEqual(f, {'foo': b'qq', 'bar': b'aa'})", "= [ Field('foo', 1, FieldType.bool), Field('bar', 30, FieldType.uint), Field('baz', 33, FieldType.uint), Field('fiz', 32,", "struct.pack('<f', 6.125) f = self.FullBitfield.from_bytes(value) expected = {'foo': True, 'bar': 9999, 'baz': 123123,", "30) | 9999) << 33) | 123123 value = val1.to_bytes(8, 'little') + struct.pack('<f',", "self.assertEqual(field[key], val) @classmethod def setUpClass(cls): class SimpleBitfield(Bitfield): fields = [ Field('foo', 16, FieldType.int),", "ColorsTest(unittest.TestCase): test_colors = [ ((255, 0, 0), ( 0, 1.0, 1.0)), ((255, 255,", "b'hello!' + struct.pack('<d', 3.14) self.assertEqual(f.to_bytes(), expected) def test_to_bytes_full(self): f = self.FullBitfield(foo=True, bar=123456, baz=987654,", "self.ReservedFullBitfield.from_bytes(data) self.assertFieldsEqual(f, {'foo': 3456, 'bar': 3}) class ColorsTest(unittest.TestCase): test_colors = [ ((255, 0,", "(( 0, 255, 255), (180, 1.0, 1.0)), (( 0, 0, 255), (240, 1.0,", "expected) def test_to_bytes_full(self): f = self.FullBitfield(foo=True, bar=123456, baz=987654, fiz=1.55) expected = (((1 <<", "val in field_dict.items(): self.assertEqual(field[key], val) @classmethod def setUpClass(cls): class SimpleBitfield(Bitfield): fields = [", "Field('bar', 16, FieldType.bytes), ] class ReservedFullBitfield(Bitfield): fields = [ Field(RESERVED, 4), Field('foo', 12,", "fields = [ Field('foo', 1, FieldType.bool), Field('bar', 30, FieldType.uint), Field('baz', 33, FieldType.uint), Field('fiz',", "expected.to_bytes(8, 'little') + struct.pack('<f', 1.55) self.assertEqual(f.to_bytes(), expected) def test_from_bytes_simple(self): value = (-1234).to_bytes(2, 'little',", "((255, 0, 255), (300, 1.0, 1.0)), ] def test_from_rgb(self): for rgb, hsb in", "from licht.base import LightColor from licht.utils import RESERVED, Bitfield, Field, FieldType class BitFieldTest(unittest.TestCase):", "test_to_bytes_full(self): f = self.FullBitfield(foo=True, bar=123456, baz=987654, fiz=1.55) expected = (((1 << 30) |", "expected = expected.to_bytes(8, 'little') + struct.pack('<f', 1.55) self.assertEqual(f.to_bytes(), expected) def test_from_bytes_simple(self): value =", "1.0)), (( 0, 255, 0), (120, 1.0, 1.0)), (( 0, 255, 255), (180,", "0), (120, 1.0, 1.0)), (( 0, 255, 255), (180, 1.0, 1.0)), (( 0,", "import struct import unittest from licht.base import LightColor from licht.utils import RESERVED, Bitfield,", "= self.ReservedFullBitfield(foo=3456, bar=3) self.assertEqual(f.to_bytes(), b'\\x80\\x0d\\x03') data = b'\\x80\\x9d\\xab' f = self.ReservedFullBitfield.from_bytes(data) self.assertFieldsEqual(f, {'foo':", "<reponame>MoritzS/licht<gh_stars>1-10 #!/usr/bin/env python import struct import unittest from licht.base import LightColor from licht.utils", "test_from_bytes_simple(self): value = (-1234).to_bytes(2, 'little', signed=True) + b'foobar' + struct.pack('<d', 5.25) f =", "def test_to_rgb(self): for rgb, hsb in self.test_colors: self.assertEqual(LightColor(*hsb).rgb, rgb) if __name__ == '__main__':", "0, 255), (300, 1.0, 1.0)), ] def test_from_rgb(self): for rgb, hsb in self.test_colors:", "3.14) self.assertEqual(f.to_bytes(), expected) def test_to_bytes_full(self): f = self.FullBitfield(foo=True, bar=123456, baz=987654, fiz=1.55) expected =", "64, FieldType.float), ] class FullBitfield(Bitfield): fields = [ Field('foo', 1, FieldType.bool), Field('bar', 30,", "= {'foo': -1234, 'bar': b'foobar', 'baz': 5.25} self.assertFieldsEqual(f, expected) def test_from_bytes_full(self): val1 =", "Field('foo', 12, FieldType.uint), Field(RESERVED, 5), Field('bar', 3, FieldType.uint), ] cls.SimpleBitfield = SimpleBitfield cls.FullBitfield", "test_reserved_simple(self): f = self.ReservedSimpleBitfield(foo=b'qq', bar=b'aa') self.assertEqual(f.to_bytes(), b'\\x00\\x00qq\\x00aa') data = b'zzqqzaa' f = self.ReservedSimpleBitfield.from_bytes(data)", "255), (180, 1.0, 1.0)), (( 0, 0, 255), (240, 1.0, 1.0)), ((255, 0,", "f = self.ReservedSimpleBitfield(foo=b'qq', bar=b'aa') self.assertEqual(f.to_bytes(), b'\\x00\\x00qq\\x00aa') data = b'zzqqzaa' f = self.ReservedSimpleBitfield.from_bytes(data) self.assertFieldsEqual(f,", "expected = {'foo': True, 'bar': 9999, 'baz': 123123, 'fiz': 6.125} self.assertFieldsEqual(f, expected) def", "= self.SimpleBitfield.from_bytes(value) expected = {'foo': -1234, 'bar': b'foobar', 'baz': 5.25} self.assertFieldsEqual(f, expected) def", "1.0, 1.0)), (( 0, 255, 255), (180, 1.0, 1.0)), (( 0, 0, 255),", "self.assertEqual(f.to_bytes(), b'\\x80\\x0d\\x03') data = b'\\x80\\x9d\\xab' f = self.ReservedFullBitfield.from_bytes(data) self.assertFieldsEqual(f, {'foo': 3456, 'bar': 3})", "'bar': b'aa'}) def test_reserved_full(self): f = self.ReservedFullBitfield(foo=3456, bar=3) self.assertEqual(f.to_bytes(), b'\\x80\\x0d\\x03') data = b'\\x80\\x9d\\xab'", "class ReservedFullBitfield(Bitfield): fields = [ Field(RESERVED, 4), Field('foo', 12, FieldType.uint), Field(RESERVED, 5), Field('bar',", "30, FieldType.uint), Field('baz', 33, FieldType.uint), Field('fiz', 32, FieldType.float), ] class ReservedSimpleBitfield(Bitfield): fields =", "struct import unittest from licht.base import LightColor from licht.utils import RESERVED, Bitfield, Field,", "ReservedFullBitfield(Bitfield): fields = [ Field(RESERVED, 4), Field('foo', 12, FieldType.uint), Field(RESERVED, 5), Field('bar', 3,", "ReservedFullBitfield def test_to_bytes_simple(self): f = self.SimpleBitfield(foo=1234, bar=b'hello!', baz=3.14) expected = (1234).to_bytes(2, 'little') +", "python import struct import unittest from licht.base import LightColor from licht.utils import RESERVED,", "1, FieldType.bool), Field('bar', 30, FieldType.uint), Field('baz', 33, FieldType.uint), Field('fiz', 32, FieldType.float), ] class", "self.ReservedSimpleBitfield(foo=b'qq', bar=b'aa') self.assertEqual(f.to_bytes(), b'\\x00\\x00qq\\x00aa') data = b'zzqqzaa' f = self.ReservedSimpleBitfield.from_bytes(data) self.assertFieldsEqual(f, {'foo': b'qq',", "FieldType.bytes), Field('baz', 64, FieldType.float), ] class FullBitfield(Bitfield): fields = [ Field('foo', 1, FieldType.bool),", "| 123123 value = val1.to_bytes(8, 'little') + struct.pack('<f', 6.125) f = self.FullBitfield.from_bytes(value) expected", "0, 255), (240, 1.0, 1.0)), ((255, 0, 255), (300, 1.0, 1.0)), ] def", "f = self.SimpleBitfield.from_bytes(value) expected = {'foo': -1234, 'bar': b'foobar', 'baz': 5.25} self.assertFieldsEqual(f, expected)", "6 * 8, FieldType.bytes), Field('baz', 64, FieldType.float), ] class FullBitfield(Bitfield): fields = [", "rgb, hsb in self.test_colors: self.assertEqual(LightColor.from_rgb(rgb), hsb) def test_to_rgb(self): for rgb, hsb in self.test_colors:", "f = self.ReservedSimpleBitfield.from_bytes(data) self.assertFieldsEqual(f, {'foo': b'qq', 'bar': b'aa'}) def test_reserved_full(self): f = self.ReservedFullBitfield(foo=3456,", "0, 0, 255), (240, 1.0, 1.0)), ((255, 0, 255), (300, 1.0, 1.0)), ]", "= (((1 << 30) | 9999) << 33) | 123123 value = val1.to_bytes(8,", "3, FieldType.uint), ] cls.SimpleBitfield = SimpleBitfield cls.FullBitfield = FullBitfield cls.ReservedSimpleBitfield = ReservedSimpleBitfield cls.ReservedFullBitfield", "Field('foo', 16, FieldType.bytes), Field(RESERVED, 8), Field('bar', 16, FieldType.bytes), ] class ReservedFullBitfield(Bitfield): fields =", "= ReservedSimpleBitfield cls.ReservedFullBitfield = ReservedFullBitfield def test_to_bytes_simple(self): f = self.SimpleBitfield(foo=1234, bar=b'hello!', baz=3.14) expected", "#!/usr/bin/env python import struct import unittest from licht.base import LightColor from licht.utils import", "value = val1.to_bytes(8, 'little') + struct.pack('<f', 6.125) f = self.FullBitfield.from_bytes(value) expected = {'foo':", "255, 0), ( 60, 1.0, 1.0)), (( 0, 255, 0), (120, 1.0, 1.0)),", "1.0)), (( 0, 0, 255), (240, 1.0, 1.0)), ((255, 0, 255), (300, 1.0,", "self.test_colors: self.assertEqual(LightColor.from_rgb(rgb), hsb) def test_to_rgb(self): for rgb, hsb in self.test_colors: self.assertEqual(LightColor(*hsb).rgb, rgb) if", "signed=True) + b'foobar' + struct.pack('<d', 5.25) f = self.SimpleBitfield.from_bytes(value) expected = {'foo': -1234,", "baz=987654, fiz=1.55) expected = (((1 << 30) | 123456) << 33) | 987654", "expected = (((1 << 30) | 123456) << 33) | 987654 expected =", "((255, 255, 0), ( 60, 1.0, 1.0)), (( 0, 255, 0), (120, 1.0,", "1.0, 1.0)), ((255, 255, 0), ( 60, 1.0, 1.0)), (( 0, 255, 0),", "= b'\\x80\\x9d\\xab' f = self.ReservedFullBitfield.from_bytes(data) self.assertFieldsEqual(f, {'foo': 3456, 'bar': 3}) class ColorsTest(unittest.TestCase): test_colors", "FieldType.uint), Field(RESERVED, 5), Field('bar', 3, FieldType.uint), ] cls.SimpleBitfield = SimpleBitfield cls.FullBitfield = FullBitfield", "b'qq', 'bar': b'aa'}) def test_reserved_full(self): f = self.ReservedFullBitfield(foo=3456, bar=3) self.assertEqual(f.to_bytes(), b'\\x80\\x0d\\x03') data =", "f = self.FullBitfield.from_bytes(value) expected = {'foo': True, 'bar': 9999, 'baz': 123123, 'fiz': 6.125}", "(180, 1.0, 1.0)), (( 0, 0, 255), (240, 1.0, 1.0)), ((255, 0, 255),", "self.assertEqual(f.to_bytes(), expected) def test_to_bytes_full(self): f = self.FullBitfield(foo=True, bar=123456, baz=987654, fiz=1.55) expected = (((1", "def test_reserved_full(self): f = self.ReservedFullBitfield(foo=3456, bar=3) self.assertEqual(f.to_bytes(), b'\\x80\\x0d\\x03') data = b'\\x80\\x9d\\xab' f =", "val1.to_bytes(8, 'little') + struct.pack('<f', 6.125) f = self.FullBitfield.from_bytes(value) expected = {'foo': True, 'bar':", "33) | 987654 expected = expected.to_bytes(8, 'little') + struct.pack('<f', 1.55) self.assertEqual(f.to_bytes(), expected) def", "f = self.ReservedFullBitfield.from_bytes(data) self.assertFieldsEqual(f, {'foo': 3456, 'bar': 3}) class ColorsTest(unittest.TestCase): test_colors = [", "+ b'hello!' + struct.pack('<d', 3.14) self.assertEqual(f.to_bytes(), expected) def test_to_bytes_full(self): f = self.FullBitfield(foo=True, bar=123456,", "Field(RESERVED, 16), Field('foo', 16, FieldType.bytes), Field(RESERVED, 8), Field('bar', 16, FieldType.bytes), ] class ReservedFullBitfield(Bitfield):", "Field(RESERVED, 5), Field('bar', 3, FieldType.uint), ] cls.SimpleBitfield = SimpleBitfield cls.FullBitfield = FullBitfield cls.ReservedSimpleBitfield", "= expected.to_bytes(8, 'little') + struct.pack('<f', 1.55) self.assertEqual(f.to_bytes(), expected) def test_from_bytes_simple(self): value = (-1234).to_bytes(2,", "field, field_dict): for key, val in field_dict.items(): self.assertEqual(field[key], val) @classmethod def setUpClass(cls): class", "self.ReservedFullBitfield(foo=3456, bar=3) self.assertEqual(f.to_bytes(), b'\\x80\\x0d\\x03') data = b'\\x80\\x9d\\xab' f = self.ReservedFullBitfield.from_bytes(data) self.assertFieldsEqual(f, {'foo': 3456,", "BitFieldTest(unittest.TestCase): def assertFieldsEqual(self, field, field_dict): for key, val in field_dict.items(): self.assertEqual(field[key], val) @classmethod", "expected) def test_from_bytes_full(self): val1 = (((1 << 30) | 9999) << 33) |", "def test_from_rgb(self): for rgb, hsb in self.test_colors: self.assertEqual(LightColor.from_rgb(rgb), hsb) def test_to_rgb(self): for rgb,", "FieldType.float), ] class ReservedSimpleBitfield(Bitfield): fields = [ Field(RESERVED, 16), Field('foo', 16, FieldType.bytes), Field(RESERVED,", "= b'zzqqzaa' f = self.ReservedSimpleBitfield.from_bytes(data) self.assertFieldsEqual(f, {'foo': b'qq', 'bar': b'aa'}) def test_reserved_full(self): f", "b'foobar', 'baz': 5.25} self.assertFieldsEqual(f, expected) def test_from_bytes_full(self): val1 = (((1 << 30) |", "b'zzqqzaa' f = self.ReservedSimpleBitfield.from_bytes(data) self.assertFieldsEqual(f, {'foo': b'qq', 'bar': b'aa'}) def test_reserved_full(self): f =", "Field('bar', 3, FieldType.uint), ] cls.SimpleBitfield = SimpleBitfield cls.FullBitfield = FullBitfield cls.ReservedSimpleBitfield = ReservedSimpleBitfield", "= FullBitfield cls.ReservedSimpleBitfield = ReservedSimpleBitfield cls.ReservedFullBitfield = ReservedFullBitfield def test_to_bytes_simple(self): f = self.SimpleBitfield(foo=1234,", "8), Field('bar', 16, FieldType.bytes), ] class ReservedFullBitfield(Bitfield): fields = [ Field(RESERVED, 4), Field('foo',", "bar=3) self.assertEqual(f.to_bytes(), b'\\x80\\x0d\\x03') data = b'\\x80\\x9d\\xab' f = self.ReservedFullBitfield.from_bytes(data) self.assertFieldsEqual(f, {'foo': 3456, 'bar':", "FieldType.uint), Field('fiz', 32, FieldType.float), ] class ReservedSimpleBitfield(Bitfield): fields = [ Field(RESERVED, 16), Field('foo',", "(-1234).to_bytes(2, 'little', signed=True) + b'foobar' + struct.pack('<d', 5.25) f = self.SimpleBitfield.from_bytes(value) expected =", "255, 0), (120, 1.0, 1.0)), (( 0, 255, 255), (180, 1.0, 1.0)), ((", "expected) def test_reserved_simple(self): f = self.ReservedSimpleBitfield(foo=b'qq', bar=b'aa') self.assertEqual(f.to_bytes(), b'\\x00\\x00qq\\x00aa') data = b'zzqqzaa' f", "= self.ReservedSimpleBitfield.from_bytes(data) self.assertFieldsEqual(f, {'foo': b'qq', 'bar': b'aa'}) def test_reserved_full(self): f = self.ReservedFullBitfield(foo=3456, bar=3)", "fields = [ Field('foo', 16, FieldType.int), Field('bar', 6 * 8, FieldType.bytes), Field('baz', 64,", "def test_from_bytes_simple(self): value = (-1234).to_bytes(2, 'little', signed=True) + b'foobar' + struct.pack('<d', 5.25) f", "Bitfield, Field, FieldType class BitFieldTest(unittest.TestCase): def assertFieldsEqual(self, field, field_dict): for key, val in", "fields = [ Field(RESERVED, 4), Field('foo', 12, FieldType.uint), Field(RESERVED, 5), Field('bar', 3, FieldType.uint),", "= [ Field('foo', 16, FieldType.int), Field('bar', 6 * 8, FieldType.bytes), Field('baz', 64, FieldType.float),", "@classmethod def setUpClass(cls): class SimpleBitfield(Bitfield): fields = [ Field('foo', 16, FieldType.int), Field('bar', 6", "255), (300, 1.0, 1.0)), ] def test_from_rgb(self): for rgb, hsb in self.test_colors: self.assertEqual(LightColor.from_rgb(rgb),", "expected = (1234).to_bytes(2, 'little') + b'hello!' + struct.pack('<d', 3.14) self.assertEqual(f.to_bytes(), expected) def test_to_bytes_full(self):", "[ ((255, 0, 0), ( 0, 1.0, 1.0)), ((255, 255, 0), ( 60,", "= (((1 << 30) | 123456) << 33) | 987654 expected = expected.to_bytes(8,", "for key, val in field_dict.items(): self.assertEqual(field[key], val) @classmethod def setUpClass(cls): class SimpleBitfield(Bitfield): fields", "{'foo': True, 'bar': 9999, 'baz': 123123, 'fiz': 6.125} self.assertFieldsEqual(f, expected) def test_reserved_simple(self): f", "+ struct.pack('<d', 3.14) self.assertEqual(f.to_bytes(), expected) def test_to_bytes_full(self): f = self.FullBitfield(foo=True, bar=123456, baz=987654, fiz=1.55)", "def test_to_bytes_simple(self): f = self.SimpleBitfield(foo=1234, bar=b'hello!', baz=3.14) expected = (1234).to_bytes(2, 'little') + b'hello!'", "self.assertFieldsEqual(f, {'foo': 3456, 'bar': 3}) class ColorsTest(unittest.TestCase): test_colors = [ ((255, 0, 0),", "LightColor from licht.utils import RESERVED, Bitfield, Field, FieldType class BitFieldTest(unittest.TestCase): def assertFieldsEqual(self, field,", "= self.ReservedSimpleBitfield(foo=b'qq', bar=b'aa') self.assertEqual(f.to_bytes(), b'\\x00\\x00qq\\x00aa') data = b'zzqqzaa' f = self.ReservedSimpleBitfield.from_bytes(data) self.assertFieldsEqual(f, {'foo':", "{'foo': -1234, 'bar': b'foobar', 'baz': 5.25} self.assertFieldsEqual(f, expected) def test_from_bytes_full(self): val1 = (((1", "FieldType.uint), ] cls.SimpleBitfield = SimpleBitfield cls.FullBitfield = FullBitfield cls.ReservedSimpleBitfield = ReservedSimpleBitfield cls.ReservedFullBitfield =", "self.FullBitfield.from_bytes(value) expected = {'foo': True, 'bar': 9999, 'baz': 123123, 'fiz': 6.125} self.assertFieldsEqual(f, expected)", "self.FullBitfield(foo=True, bar=123456, baz=987654, fiz=1.55) expected = (((1 << 30) | 123456) << 33)", "= {'foo': True, 'bar': 9999, 'baz': 123123, 'fiz': 6.125} self.assertFieldsEqual(f, expected) def test_reserved_simple(self):", "fields = [ Field(RESERVED, 16), Field('foo', 16, FieldType.bytes), Field(RESERVED, 8), Field('bar', 16, FieldType.bytes),", "def test_from_bytes_full(self): val1 = (((1 << 30) | 9999) << 33) | 123123", "<< 30) | 123456) << 33) | 987654 expected = expected.to_bytes(8, 'little') +", "((255, 0, 0), ( 0, 1.0, 1.0)), ((255, 255, 0), ( 60, 1.0,", "= [ ((255, 0, 0), ( 0, 1.0, 1.0)), ((255, 255, 0), (", "1.0, 1.0)), (( 0, 0, 255), (240, 1.0, 1.0)), ((255, 0, 255), (300,", "'baz': 5.25} self.assertFieldsEqual(f, expected) def test_from_bytes_full(self): val1 = (((1 << 30) | 9999)", "3}) class ColorsTest(unittest.TestCase): test_colors = [ ((255, 0, 0), ( 0, 1.0, 1.0)),", "= val1.to_bytes(8, 'little') + struct.pack('<f', 6.125) f = self.FullBitfield.from_bytes(value) expected = {'foo': True,", "b'foobar' + struct.pack('<d', 5.25) f = self.SimpleBitfield.from_bytes(value) expected = {'foo': -1234, 'bar': b'foobar',", "5.25} self.assertFieldsEqual(f, expected) def test_from_bytes_full(self): val1 = (((1 << 30) | 9999) <<", "0), ( 60, 1.0, 1.0)), (( 0, 255, 0), (120, 1.0, 1.0)), ((", "baz=3.14) expected = (1234).to_bytes(2, 'little') + b'hello!' + struct.pack('<d', 3.14) self.assertEqual(f.to_bytes(), expected) def", "ReservedSimpleBitfield(Bitfield): fields = [ Field(RESERVED, 16), Field('foo', 16, FieldType.bytes), Field(RESERVED, 8), Field('bar', 16,", "123456) << 33) | 987654 expected = expected.to_bytes(8, 'little') + struct.pack('<f', 1.55) self.assertEqual(f.to_bytes(),", "6.125} self.assertFieldsEqual(f, expected) def test_reserved_simple(self): f = self.ReservedSimpleBitfield(foo=b'qq', bar=b'aa') self.assertEqual(f.to_bytes(), b'\\x00\\x00qq\\x00aa') data =", "self.assertEqual(LightColor.from_rgb(rgb), hsb) def test_to_rgb(self): for rgb, hsb in self.test_colors: self.assertEqual(LightColor(*hsb).rgb, rgb) if __name__", "[ Field(RESERVED, 16), Field('foo', 16, FieldType.bytes), Field(RESERVED, 8), Field('bar', 16, FieldType.bytes), ] class", "from licht.utils import RESERVED, Bitfield, Field, FieldType class BitFieldTest(unittest.TestCase): def assertFieldsEqual(self, field, field_dict):", "33) | 123123 value = val1.to_bytes(8, 'little') + struct.pack('<f', 6.125) f = self.FullBitfield.from_bytes(value)", "FieldType.bytes), Field(RESERVED, 8), Field('bar', 16, FieldType.bytes), ] class ReservedFullBitfield(Bitfield): fields = [ Field(RESERVED,", "[ Field('foo', 16, FieldType.int), Field('bar', 6 * 8, FieldType.bytes), Field('baz', 64, FieldType.float), ]", "Field('foo', 1, FieldType.bool), Field('bar', 30, FieldType.uint), Field('baz', 33, FieldType.uint), Field('fiz', 32, FieldType.float), ]", "in self.test_colors: self.assertEqual(LightColor.from_rgb(rgb), hsb) def test_to_rgb(self): for rgb, hsb in self.test_colors: self.assertEqual(LightColor(*hsb).rgb, rgb)", "33, FieldType.uint), Field('fiz', 32, FieldType.float), ] class ReservedSimpleBitfield(Bitfield): fields = [ Field(RESERVED, 16),", "licht.base import LightColor from licht.utils import RESERVED, Bitfield, Field, FieldType class BitFieldTest(unittest.TestCase): def", "= (1234).to_bytes(2, 'little') + b'hello!' + struct.pack('<d', 3.14) self.assertEqual(f.to_bytes(), expected) def test_to_bytes_full(self): f", "cls.ReservedSimpleBitfield = ReservedSimpleBitfield cls.ReservedFullBitfield = ReservedFullBitfield def test_to_bytes_simple(self): f = self.SimpleBitfield(foo=1234, bar=b'hello!', baz=3.14)", "self.SimpleBitfield(foo=1234, bar=b'hello!', baz=3.14) expected = (1234).to_bytes(2, 'little') + b'hello!' + struct.pack('<d', 3.14) self.assertEqual(f.to_bytes(),", "] def test_from_rgb(self): for rgb, hsb in self.test_colors: self.assertEqual(LightColor.from_rgb(rgb), hsb) def test_to_rgb(self): for", "bar=123456, baz=987654, fiz=1.55) expected = (((1 << 30) | 123456) << 33) |", "+ b'foobar' + struct.pack('<d', 5.25) f = self.SimpleBitfield.from_bytes(value) expected = {'foo': -1234, 'bar':", "FieldType.bytes), ] class ReservedFullBitfield(Bitfield): fields = [ Field(RESERVED, 4), Field('foo', 12, FieldType.uint), Field(RESERVED,", "+ struct.pack('<d', 5.25) f = self.SimpleBitfield.from_bytes(value) expected = {'foo': -1234, 'bar': b'foobar', 'baz':", "(120, 1.0, 1.0)), (( 0, 255, 255), (180, 1.0, 1.0)), (( 0, 0,", "0, 0), ( 0, 1.0, 1.0)), ((255, 255, 0), ( 60, 1.0, 1.0)),", "4), Field('foo', 12, FieldType.uint), Field(RESERVED, 5), Field('bar', 3, FieldType.uint), ] cls.SimpleBitfield = SimpleBitfield", "SimpleBitfield cls.FullBitfield = FullBitfield cls.ReservedSimpleBitfield = ReservedSimpleBitfield cls.ReservedFullBitfield = ReservedFullBitfield def test_to_bytes_simple(self): f", "val1 = (((1 << 30) | 9999) << 33) | 123123 value =", "field_dict): for key, val in field_dict.items(): self.assertEqual(field[key], val) @classmethod def setUpClass(cls): class SimpleBitfield(Bitfield):", "import RESERVED, Bitfield, Field, FieldType class BitFieldTest(unittest.TestCase): def assertFieldsEqual(self, field, field_dict): for key,", "Field('baz', 64, FieldType.float), ] class FullBitfield(Bitfield): fields = [ Field('foo', 1, FieldType.bool), Field('bar',", "data = b'\\x80\\x9d\\xab' f = self.ReservedFullBitfield.from_bytes(data) self.assertFieldsEqual(f, {'foo': 3456, 'bar': 3}) class ColorsTest(unittest.TestCase):", "<< 33) | 123123 value = val1.to_bytes(8, 'little') + struct.pack('<f', 6.125) f =", "FieldType.float), ] class FullBitfield(Bitfield): fields = [ Field('foo', 1, FieldType.bool), Field('bar', 30, FieldType.uint),", "987654 expected = expected.to_bytes(8, 'little') + struct.pack('<f', 1.55) self.assertEqual(f.to_bytes(), expected) def test_from_bytes_simple(self): value", "| 123456) << 33) | 987654 expected = expected.to_bytes(8, 'little') + struct.pack('<f', 1.55)", "def setUpClass(cls): class SimpleBitfield(Bitfield): fields = [ Field('foo', 16, FieldType.int), Field('bar', 6 *", "struct.pack('<d', 5.25) f = self.SimpleBitfield.from_bytes(value) expected = {'foo': -1234, 'bar': b'foobar', 'baz': 5.25}", "16, FieldType.int), Field('bar', 6 * 8, FieldType.bytes), Field('baz', 64, FieldType.float), ] class FullBitfield(Bitfield):", "test_reserved_full(self): f = self.ReservedFullBitfield(foo=3456, bar=3) self.assertEqual(f.to_bytes(), b'\\x80\\x0d\\x03') data = b'\\x80\\x9d\\xab' f = self.ReservedFullBitfield.from_bytes(data)", "Field(RESERVED, 4), Field('foo', 12, FieldType.uint), Field(RESERVED, 5), Field('bar', 3, FieldType.uint), ] cls.SimpleBitfield =", "0, 255, 0), (120, 1.0, 1.0)), (( 0, 255, 255), (180, 1.0, 1.0)),", "'baz': 123123, 'fiz': 6.125} self.assertFieldsEqual(f, expected) def test_reserved_simple(self): f = self.ReservedSimpleBitfield(foo=b'qq', bar=b'aa') self.assertEqual(f.to_bytes(),", "b'\\x80\\x0d\\x03') data = b'\\x80\\x9d\\xab' f = self.ReservedFullBitfield.from_bytes(data) self.assertFieldsEqual(f, {'foo': 3456, 'bar': 3}) class", "Field('baz', 33, FieldType.uint), Field('fiz', 32, FieldType.float), ] class ReservedSimpleBitfield(Bitfield): fields = [ Field(RESERVED,", "(1234).to_bytes(2, 'little') + b'hello!' + struct.pack('<d', 3.14) self.assertEqual(f.to_bytes(), expected) def test_to_bytes_full(self): f =", "Field, FieldType class BitFieldTest(unittest.TestCase): def assertFieldsEqual(self, field, field_dict): for key, val in field_dict.items():", "'bar': 3}) class ColorsTest(unittest.TestCase): test_colors = [ ((255, 0, 0), ( 0, 1.0,", "(300, 1.0, 1.0)), ] def test_from_rgb(self): for rgb, hsb in self.test_colors: self.assertEqual(LightColor.from_rgb(rgb), hsb)", "import LightColor from licht.utils import RESERVED, Bitfield, Field, FieldType class BitFieldTest(unittest.TestCase): def assertFieldsEqual(self,", "9999, 'baz': 123123, 'fiz': 6.125} self.assertFieldsEqual(f, expected) def test_reserved_simple(self): f = self.ReservedSimpleBitfield(foo=b'qq', bar=b'aa')", "32, FieldType.float), ] class ReservedSimpleBitfield(Bitfield): fields = [ Field(RESERVED, 16), Field('foo', 16, FieldType.bytes),", "123123, 'fiz': 6.125} self.assertFieldsEqual(f, expected) def test_reserved_simple(self): f = self.ReservedSimpleBitfield(foo=b'qq', bar=b'aa') self.assertEqual(f.to_bytes(), b'\\x00\\x00qq\\x00aa')", "123123 value = val1.to_bytes(8, 'little') + struct.pack('<f', 6.125) f = self.FullBitfield.from_bytes(value) expected =", "[ Field(RESERVED, 4), Field('foo', 12, FieldType.uint), Field(RESERVED, 5), Field('bar', 3, FieldType.uint), ] cls.SimpleBitfield", "(((1 << 30) | 123456) << 33) | 987654 expected = expected.to_bytes(8, 'little')", "def test_to_bytes_full(self): f = self.FullBitfield(foo=True, bar=123456, baz=987654, fiz=1.55) expected = (((1 << 30)", "1.0)), ((255, 0, 255), (300, 1.0, 1.0)), ] def test_from_rgb(self): for rgb, hsb", "class SimpleBitfield(Bitfield): fields = [ Field('foo', 16, FieldType.int), Field('bar', 6 * 8, FieldType.bytes),", "def assertFieldsEqual(self, field, field_dict): for key, val in field_dict.items(): self.assertEqual(field[key], val) @classmethod def", "FullBitfield cls.ReservedSimpleBitfield = ReservedSimpleBitfield cls.ReservedFullBitfield = ReservedFullBitfield def test_to_bytes_simple(self): f = self.SimpleBitfield(foo=1234, bar=b'hello!',", "1.0, 1.0)), (( 0, 255, 0), (120, 1.0, 1.0)), (( 0, 255, 255),", "struct.pack('<f', 1.55) self.assertEqual(f.to_bytes(), expected) def test_from_bytes_simple(self): value = (-1234).to_bytes(2, 'little', signed=True) + b'foobar'", "test_colors = [ ((255, 0, 0), ( 0, 1.0, 1.0)), ((255, 255, 0),", "5.25) f = self.SimpleBitfield.from_bytes(value) expected = {'foo': -1234, 'bar': b'foobar', 'baz': 5.25} self.assertFieldsEqual(f,", "'little') + struct.pack('<f', 1.55) self.assertEqual(f.to_bytes(), expected) def test_from_bytes_simple(self): value = (-1234).to_bytes(2, 'little', signed=True)", "(240, 1.0, 1.0)), ((255, 0, 255), (300, 1.0, 1.0)), ] def test_from_rgb(self): for", "field_dict.items(): self.assertEqual(field[key], val) @classmethod def setUpClass(cls): class SimpleBitfield(Bitfield): fields = [ Field('foo', 16,", "cls.FullBitfield = FullBitfield cls.ReservedSimpleBitfield = ReservedSimpleBitfield cls.ReservedFullBitfield = ReservedFullBitfield def test_to_bytes_simple(self): f =", "'little') + struct.pack('<f', 6.125) f = self.FullBitfield.from_bytes(value) expected = {'foo': True, 'bar': 9999,", "16), Field('foo', 16, FieldType.bytes), Field(RESERVED, 8), Field('bar', 16, FieldType.bytes), ] class ReservedFullBitfield(Bitfield): fields", "test_from_rgb(self): for rgb, hsb in self.test_colors: self.assertEqual(LightColor.from_rgb(rgb), hsb) def test_to_rgb(self): for rgb, hsb", "| 987654 expected = expected.to_bytes(8, 'little') + struct.pack('<f', 1.55) self.assertEqual(f.to_bytes(), expected) def test_from_bytes_simple(self):", "( 0, 1.0, 1.0)), ((255, 255, 0), ( 60, 1.0, 1.0)), (( 0,", "1.55) self.assertEqual(f.to_bytes(), expected) def test_from_bytes_simple(self): value = (-1234).to_bytes(2, 'little', signed=True) + b'foobar' +", "import unittest from licht.base import LightColor from licht.utils import RESERVED, Bitfield, Field, FieldType", "1.0)), (( 0, 255, 255), (180, 1.0, 1.0)), (( 0, 0, 255), (240,", "cls.SimpleBitfield = SimpleBitfield cls.FullBitfield = FullBitfield cls.ReservedSimpleBitfield = ReservedSimpleBitfield cls.ReservedFullBitfield = ReservedFullBitfield def", "255), (240, 1.0, 1.0)), ((255, 0, 255), (300, 1.0, 1.0)), ] def test_from_rgb(self):", "<< 30) | 9999) << 33) | 123123 value = val1.to_bytes(8, 'little') +", "b'\\x00\\x00qq\\x00aa') data = b'zzqqzaa' f = self.ReservedSimpleBitfield.from_bytes(data) self.assertFieldsEqual(f, {'foo': b'qq', 'bar': b'aa'}) def", "class ReservedSimpleBitfield(Bitfield): fields = [ Field(RESERVED, 16), Field('foo', 16, FieldType.bytes), Field(RESERVED, 8), Field('bar',", "= (-1234).to_bytes(2, 'little', signed=True) + b'foobar' + struct.pack('<d', 5.25) f = self.SimpleBitfield.from_bytes(value) expected", "5), Field('bar', 3, FieldType.uint), ] cls.SimpleBitfield = SimpleBitfield cls.FullBitfield = FullBitfield cls.ReservedSimpleBitfield =", "self.assertEqual(f.to_bytes(), expected) def test_from_bytes_simple(self): value = (-1234).to_bytes(2, 'little', signed=True) + b'foobar' + struct.pack('<d',", "+ struct.pack('<f', 6.125) f = self.FullBitfield.from_bytes(value) expected = {'foo': True, 'bar': 9999, 'baz':", "] class ReservedFullBitfield(Bitfield): fields = [ Field(RESERVED, 4), Field('foo', 12, FieldType.uint), Field(RESERVED, 5),", "= ReservedFullBitfield def test_to_bytes_simple(self): f = self.SimpleBitfield(foo=1234, bar=b'hello!', baz=3.14) expected = (1234).to_bytes(2, 'little')", "3456, 'bar': 3}) class ColorsTest(unittest.TestCase): test_colors = [ ((255, 0, 0), ( 0,", "RESERVED, Bitfield, Field, FieldType class BitFieldTest(unittest.TestCase): def assertFieldsEqual(self, field, field_dict): for key, val", "255, 255), (180, 1.0, 1.0)), (( 0, 0, 255), (240, 1.0, 1.0)), ((255,", "1.0)), ] def test_from_rgb(self): for rgb, hsb in self.test_colors: self.assertEqual(LightColor.from_rgb(rgb), hsb) def test_to_rgb(self):", "6.125) f = self.FullBitfield.from_bytes(value) expected = {'foo': True, 'bar': 9999, 'baz': 123123, 'fiz':", "data = b'zzqqzaa' f = self.ReservedSimpleBitfield.from_bytes(data) self.assertFieldsEqual(f, {'foo': b'qq', 'bar': b'aa'}) def test_reserved_full(self):", "FullBitfield(Bitfield): fields = [ Field('foo', 1, FieldType.bool), Field('bar', 30, FieldType.uint), Field('baz', 33, FieldType.uint),", "0, 255, 255), (180, 1.0, 1.0)), (( 0, 0, 255), (240, 1.0, 1.0)),", "(( 0, 255, 0), (120, 1.0, 1.0)), (( 0, 255, 255), (180, 1.0,", "'bar': 9999, 'baz': 123123, 'fiz': 6.125} self.assertFieldsEqual(f, expected) def test_reserved_simple(self): f = self.ReservedSimpleBitfield(foo=b'qq',", "value = (-1234).to_bytes(2, 'little', signed=True) + b'foobar' + struct.pack('<d', 5.25) f = self.SimpleBitfield.from_bytes(value)", "self.ReservedSimpleBitfield.from_bytes(data) self.assertFieldsEqual(f, {'foo': b'qq', 'bar': b'aa'}) def test_reserved_full(self): f = self.ReservedFullBitfield(foo=3456, bar=3) self.assertEqual(f.to_bytes(),", "= SimpleBitfield cls.FullBitfield = FullBitfield cls.ReservedSimpleBitfield = ReservedSimpleBitfield cls.ReservedFullBitfield = ReservedFullBitfield def test_to_bytes_simple(self):", "= [ Field(RESERVED, 16), Field('foo', 16, FieldType.bytes), Field(RESERVED, 8), Field('bar', 16, FieldType.bytes), ]", "f = self.ReservedFullBitfield(foo=3456, bar=3) self.assertEqual(f.to_bytes(), b'\\x80\\x0d\\x03') data = b'\\x80\\x9d\\xab' f = self.ReservedFullBitfield.from_bytes(data) self.assertFieldsEqual(f,", "8, FieldType.bytes), Field('baz', 64, FieldType.float), ] class FullBitfield(Bitfield): fields = [ Field('foo', 1,", "bar=b'hello!', baz=3.14) expected = (1234).to_bytes(2, 'little') + b'hello!' + struct.pack('<d', 3.14) self.assertEqual(f.to_bytes(), expected)", "( 60, 1.0, 1.0)), (( 0, 255, 0), (120, 1.0, 1.0)), (( 0,", "self.assertFieldsEqual(f, expected) def test_reserved_simple(self): f = self.ReservedSimpleBitfield(foo=b'qq', bar=b'aa') self.assertEqual(f.to_bytes(), b'\\x00\\x00qq\\x00aa') data = b'zzqqzaa'", "fiz=1.55) expected = (((1 << 30) | 123456) << 33) | 987654 expected", "'little') + b'hello!' + struct.pack('<d', 3.14) self.assertEqual(f.to_bytes(), expected) def test_to_bytes_full(self): f = self.FullBitfield(foo=True,", "f = self.SimpleBitfield(foo=1234, bar=b'hello!', baz=3.14) expected = (1234).to_bytes(2, 'little') + b'hello!' + struct.pack('<d',", "<< 33) | 987654 expected = expected.to_bytes(8, 'little') + struct.pack('<f', 1.55) self.assertEqual(f.to_bytes(), expected)", "0, 1.0, 1.0)), ((255, 255, 0), ( 60, 1.0, 1.0)), (( 0, 255,", "1.0, 1.0)), ] def test_from_rgb(self): for rgb, hsb in self.test_colors: self.assertEqual(LightColor.from_rgb(rgb), hsb) def", "def test_reserved_simple(self): f = self.ReservedSimpleBitfield(foo=b'qq', bar=b'aa') self.assertEqual(f.to_bytes(), b'\\x00\\x00qq\\x00aa') data = b'zzqqzaa' f =", "FieldType.uint), Field('baz', 33, FieldType.uint), Field('fiz', 32, FieldType.float), ] class ReservedSimpleBitfield(Bitfield): fields = [", "assertFieldsEqual(self, field, field_dict): for key, val in field_dict.items(): self.assertEqual(field[key], val) @classmethod def setUpClass(cls):", "licht.utils import RESERVED, Bitfield, Field, FieldType class BitFieldTest(unittest.TestCase): def assertFieldsEqual(self, field, field_dict): for", "key, val in field_dict.items(): self.assertEqual(field[key], val) @classmethod def setUpClass(cls): class SimpleBitfield(Bitfield): fields =", "self.assertFieldsEqual(f, {'foo': b'qq', 'bar': b'aa'}) def test_reserved_full(self): f = self.ReservedFullBitfield(foo=3456, bar=3) self.assertEqual(f.to_bytes(), b'\\x80\\x0d\\x03')", "[ Field('foo', 1, FieldType.bool), Field('bar', 30, FieldType.uint), Field('baz', 33, FieldType.uint), Field('fiz', 32, FieldType.float),", "'fiz': 6.125} self.assertFieldsEqual(f, expected) def test_reserved_simple(self): f = self.ReservedSimpleBitfield(foo=b'qq', bar=b'aa') self.assertEqual(f.to_bytes(), b'\\x00\\x00qq\\x00aa') data", "9999) << 33) | 123123 value = val1.to_bytes(8, 'little') + struct.pack('<f', 6.125) f", "+ struct.pack('<f', 1.55) self.assertEqual(f.to_bytes(), expected) def test_from_bytes_simple(self): value = (-1234).to_bytes(2, 'little', signed=True) +", "= self.FullBitfield.from_bytes(value) expected = {'foo': True, 'bar': 9999, 'baz': 123123, 'fiz': 6.125} self.assertFieldsEqual(f,", "] cls.SimpleBitfield = SimpleBitfield cls.FullBitfield = FullBitfield cls.ReservedSimpleBitfield = ReservedSimpleBitfield cls.ReservedFullBitfield = ReservedFullBitfield", "16, FieldType.bytes), Field(RESERVED, 8), Field('bar', 16, FieldType.bytes), ] class ReservedFullBitfield(Bitfield): fields = [", "1.0)), ((255, 255, 0), ( 60, 1.0, 1.0)), (( 0, 255, 0), (120,", "| 9999) << 33) | 123123 value = val1.to_bytes(8, 'little') + struct.pack('<f', 6.125)", "bar=b'aa') self.assertEqual(f.to_bytes(), b'\\x00\\x00qq\\x00aa') data = b'zzqqzaa' f = self.ReservedSimpleBitfield.from_bytes(data) self.assertFieldsEqual(f, {'foo': b'qq', 'bar':", "FieldType.int), Field('bar', 6 * 8, FieldType.bytes), Field('baz', 64, FieldType.float), ] class FullBitfield(Bitfield): fields", "(((1 << 30) | 9999) << 33) | 123123 value = val1.to_bytes(8, 'little')", "Field(RESERVED, 8), Field('bar', 16, FieldType.bytes), ] class ReservedFullBitfield(Bitfield): fields = [ Field(RESERVED, 4),", "True, 'bar': 9999, 'baz': 123123, 'fiz': 6.125} self.assertFieldsEqual(f, expected) def test_reserved_simple(self): f =", "hsb) def test_to_rgb(self): for rgb, hsb in self.test_colors: self.assertEqual(LightColor(*hsb).rgb, rgb) if __name__ ==", "12, FieldType.uint), Field(RESERVED, 5), Field('bar', 3, FieldType.uint), ] cls.SimpleBitfield = SimpleBitfield cls.FullBitfield =", "Field('bar', 30, FieldType.uint), Field('baz', 33, FieldType.uint), Field('fiz', 32, FieldType.float), ] class ReservedSimpleBitfield(Bitfield): fields", "= self.ReservedFullBitfield.from_bytes(data) self.assertFieldsEqual(f, {'foo': 3456, 'bar': 3}) class ColorsTest(unittest.TestCase): test_colors = [ ((255,", "* 8, FieldType.bytes), Field('baz', 64, FieldType.float), ] class FullBitfield(Bitfield): fields = [ Field('foo',", "class FullBitfield(Bitfield): fields = [ Field('foo', 1, FieldType.bool), Field('bar', 30, FieldType.uint), Field('baz', 33,", "ReservedSimpleBitfield cls.ReservedFullBitfield = ReservedFullBitfield def test_to_bytes_simple(self): f = self.SimpleBitfield(foo=1234, bar=b'hello!', baz=3.14) expected =", "test_to_bytes_simple(self): f = self.SimpleBitfield(foo=1234, bar=b'hello!', baz=3.14) expected = (1234).to_bytes(2, 'little') + b'hello!' +", "b'aa'}) def test_reserved_full(self): f = self.ReservedFullBitfield(foo=3456, bar=3) self.assertEqual(f.to_bytes(), b'\\x80\\x0d\\x03') data = b'\\x80\\x9d\\xab' f", "cls.ReservedFullBitfield = ReservedFullBitfield def test_to_bytes_simple(self): f = self.SimpleBitfield(foo=1234, bar=b'hello!', baz=3.14) expected = (1234).to_bytes(2,", "Field('fiz', 32, FieldType.float), ] class ReservedSimpleBitfield(Bitfield): fields = [ Field(RESERVED, 16), Field('foo', 16,", "16, FieldType.bytes), ] class ReservedFullBitfield(Bitfield): fields = [ Field(RESERVED, 4), Field('foo', 12, FieldType.uint),", "FieldType class BitFieldTest(unittest.TestCase): def assertFieldsEqual(self, field, field_dict): for key, val in field_dict.items(): self.assertEqual(field[key],", "test_from_bytes_full(self): val1 = (((1 << 30) | 9999) << 33) | 123123 value", "60, 1.0, 1.0)), (( 0, 255, 0), (120, 1.0, 1.0)), (( 0, 255,", "SimpleBitfield(Bitfield): fields = [ Field('foo', 16, FieldType.int), Field('bar', 6 * 8, FieldType.bytes), Field('baz',", "-1234, 'bar': b'foobar', 'baz': 5.25} self.assertFieldsEqual(f, expected) def test_from_bytes_full(self): val1 = (((1 <<", "= self.SimpleBitfield(foo=1234, bar=b'hello!', baz=3.14) expected = (1234).to_bytes(2, 'little') + b'hello!' + struct.pack('<d', 3.14)", "0), ( 0, 1.0, 1.0)), ((255, 255, 0), ( 60, 1.0, 1.0)), ((", "'little', signed=True) + b'foobar' + struct.pack('<d', 5.25) f = self.SimpleBitfield.from_bytes(value) expected = {'foo':", "f = self.FullBitfield(foo=True, bar=123456, baz=987654, fiz=1.55) expected = (((1 << 30) | 123456)", "test_to_rgb(self): for rgb, hsb in self.test_colors: self.assertEqual(LightColor(*hsb).rgb, rgb) if __name__ == '__main__': unittest.main()", "= [ Field(RESERVED, 4), Field('foo', 12, FieldType.uint), Field(RESERVED, 5), Field('bar', 3, FieldType.uint), ]", "hsb in self.test_colors: self.assertEqual(LightColor.from_rgb(rgb), hsb) def test_to_rgb(self): for rgb, hsb in self.test_colors: self.assertEqual(LightColor(*hsb).rgb,", "Field('foo', 16, FieldType.int), Field('bar', 6 * 8, FieldType.bytes), Field('baz', 64, FieldType.float), ] class", "self.SimpleBitfield.from_bytes(value) expected = {'foo': -1234, 'bar': b'foobar', 'baz': 5.25} self.assertFieldsEqual(f, expected) def test_from_bytes_full(self):", "class ColorsTest(unittest.TestCase): test_colors = [ ((255, 0, 0), ( 0, 1.0, 1.0)), ((255,", "b'\\x80\\x9d\\xab' f = self.ReservedFullBitfield.from_bytes(data) self.assertFieldsEqual(f, {'foo': 3456, 'bar': 3}) class ColorsTest(unittest.TestCase): test_colors =", "(( 0, 0, 255), (240, 1.0, 1.0)), ((255, 0, 255), (300, 1.0, 1.0))," ]
[ "Config Config.set('kivy', 'log_level', 'debug') Config.set('kivy', 'keyboard_mode', 'systemandmulti') from kivy_garden.ebs.cefkivy.browser import CefBrowser, cefpython from", "CefBrowserApp(App): def build(self): return CefBrowser(start_url='https://india.gov.in/') def run(): CefBrowserApp().run() cefpython.Shutdown() if __name__ == '__main__':", "def build(self): return CefBrowser(start_url='https://india.gov.in/') def run(): CefBrowserApp().run() cefpython.Shutdown() if __name__ == '__main__': run()", "Config.set('kivy', 'keyboard_mode', 'systemandmulti') from kivy_garden.ebs.cefkivy.browser import CefBrowser, cefpython from kivy.app import App class", "cefpython from kivy.app import App class CefBrowserApp(App): def build(self): return CefBrowser(start_url='https://india.gov.in/') def run():", "from kivy.app import App class CefBrowserApp(App): def build(self): return CefBrowser(start_url='https://india.gov.in/') def run(): CefBrowserApp().run()", "import CefBrowser, cefpython from kivy.app import App class CefBrowserApp(App): def build(self): return CefBrowser(start_url='https://india.gov.in/')", "'systemandmulti') from kivy_garden.ebs.cefkivy.browser import CefBrowser, cefpython from kivy.app import App class CefBrowserApp(App): def", "Config.set('kivy', 'log_level', 'debug') Config.set('kivy', 'keyboard_mode', 'systemandmulti') from kivy_garden.ebs.cefkivy.browser import CefBrowser, cefpython from kivy.app", "'debug') Config.set('kivy', 'keyboard_mode', 'systemandmulti') from kivy_garden.ebs.cefkivy.browser import CefBrowser, cefpython from kivy.app import App", "from kivy.config import Config Config.set('kivy', 'log_level', 'debug') Config.set('kivy', 'keyboard_mode', 'systemandmulti') from kivy_garden.ebs.cefkivy.browser import", "import Config Config.set('kivy', 'log_level', 'debug') Config.set('kivy', 'keyboard_mode', 'systemandmulti') from kivy_garden.ebs.cefkivy.browser import CefBrowser, cefpython", "import App class CefBrowserApp(App): def build(self): return CefBrowser(start_url='https://india.gov.in/') def run(): CefBrowserApp().run() cefpython.Shutdown() if", "class CefBrowserApp(App): def build(self): return CefBrowser(start_url='https://india.gov.in/') def run(): CefBrowserApp().run() cefpython.Shutdown() if __name__ ==", "kivy_garden.ebs.cefkivy.browser import CefBrowser, cefpython from kivy.app import App class CefBrowserApp(App): def build(self): return", "'log_level', 'debug') Config.set('kivy', 'keyboard_mode', 'systemandmulti') from kivy_garden.ebs.cefkivy.browser import CefBrowser, cefpython from kivy.app import", "kivy.config import Config Config.set('kivy', 'log_level', 'debug') Config.set('kivy', 'keyboard_mode', 'systemandmulti') from kivy_garden.ebs.cefkivy.browser import CefBrowser,", "from kivy_garden.ebs.cefkivy.browser import CefBrowser, cefpython from kivy.app import App class CefBrowserApp(App): def build(self):", "CefBrowser, cefpython from kivy.app import App class CefBrowserApp(App): def build(self): return CefBrowser(start_url='https://india.gov.in/') def", "<reponame>ebs-universe/cefkivy<gh_stars>0 from kivy.config import Config Config.set('kivy', 'log_level', 'debug') Config.set('kivy', 'keyboard_mode', 'systemandmulti') from kivy_garden.ebs.cefkivy.browser", "App class CefBrowserApp(App): def build(self): return CefBrowser(start_url='https://india.gov.in/') def run(): CefBrowserApp().run() cefpython.Shutdown() if __name__", "kivy.app import App class CefBrowserApp(App): def build(self): return CefBrowser(start_url='https://india.gov.in/') def run(): CefBrowserApp().run() cefpython.Shutdown()", "'keyboard_mode', 'systemandmulti') from kivy_garden.ebs.cefkivy.browser import CefBrowser, cefpython from kivy.app import App class CefBrowserApp(App):" ]
[ "custom = TwitterProfileQuerySet.as_manager() __str__ = lambda self: self.name def update_(self, tw_user): update_fields =", "self.DONE: self.status = self.DONE self.save() @staticmethod def run(**kwargs): if kwargs.get('created', False) or 'from_view'", "self.DONE self.save() @staticmethod def run(**kwargs): if kwargs.get('created', False) or 'from_view' in kwargs: tasks.twitter_scraper.delay(kwargs['instance'].id)", "ordering = ('query', ) PENDING = 'PD' DONE = 'DN' STATUS = (", "update_fields = [] if self.name != tw_user.name: self.name = tw_user.name update_fields.append('name') if self.description", "def search(self, query): return self.filter(name__icontains=query) class TaskQuerySet(models.QuerySet): def search(self, query): return self.filter(query__icontains=query) def", "class Task(models.Model): class Meta: ordering = ('query', ) PENDING = 'PD' DONE =", "update_fields: self.save(update_fields=update_fields) class Task(models.Model): class Meta: ordering = ('query', ) PENDING = 'PD'", "models.TextField(blank=True, null=True) image = models.URLField(blank=True, null=True) popularity = models.PositiveIntegerField(blank=True, default=0) objects = models.Manager()", "return self.filter(status='PD') def done(self): return self.filter(status='DN') ### Define Models class TwitterProfile(models.Model): class Meta:", "is not self.DONE: self.status = self.DONE self.save() @staticmethod def run(**kwargs): if kwargs.get('created', False)", "= TaskQuerySet.as_manager() def __str__(self): return \"%s -> Status: %s\" % (self.query, self.get_status_display()) def", "objects = models.Manager() custom = TwitterProfileQuerySet.as_manager() __str__ = lambda self: self.name def update_(self,", "__str__ = lambda self: self.name def update_(self, tw_user): update_fields = [] if self.name", "custom = TaskQuerySet.as_manager() def __str__(self): return \"%s -> Status: %s\" % (self.query, self.get_status_display())", "!= tw_user.description: self.description = tw_user.description update_fields.append('description') if self.image != tw_user.profile_image_url: self.image = tw_user.profile_image_url", "( (PENDING, 'Pending'), (DONE, 'Done') ) query = models.CharField(max_length=100) status = models.CharField(max_length=2, choices=STATUS,", "class Meta: ordering = ('popularity', 'name') tw_id = models.PositiveIntegerField(unique=True) name = models.CharField(max_length=200) description", "models.URLField(blank=True, null=True) popularity = models.PositiveIntegerField(blank=True, default=0) objects = models.Manager() custom = TwitterProfileQuerySet.as_manager() __str__", "= models.Manager() custom = TaskQuerySet.as_manager() def __str__(self): return \"%s -> Status: %s\" %", "('popularity', 'name') tw_id = models.PositiveIntegerField(unique=True) name = models.CharField(max_length=200) description = models.TextField(blank=True, null=True) image", "return \"%s -> Status: %s\" % (self.query, self.get_status_display()) def update_to_done(self): if self.status is", "self.save(update_fields=update_fields) class Task(models.Model): class Meta: ordering = ('query', ) PENDING = 'PD' DONE", "self.status = self.DONE self.save() @staticmethod def run(**kwargs): if kwargs.get('created', False) or 'from_view' in", "TwitterProfileQuerySet.as_manager() __str__ = lambda self: self.name def update_(self, tw_user): update_fields = [] if", "if update_fields: self.save(update_fields=update_fields) class Task(models.Model): class Meta: ordering = ('query', ) PENDING =", "__str__(self): return \"%s -> Status: %s\" % (self.query, self.get_status_display()) def update_to_done(self): if self.status", "def update_(self, tw_user): update_fields = [] if self.name != tw_user.name: self.name = tw_user.name", "'Done') ) query = models.CharField(max_length=100) status = models.CharField(max_length=2, choices=STATUS, default=PENDING) objects = models.Manager()", "query): return self.filter(name__icontains=query) class TaskQuerySet(models.QuerySet): def search(self, query): return self.filter(query__icontains=query) def pending(self): return", "tw_user.description: self.description = tw_user.description update_fields.append('description') if self.image != tw_user.profile_image_url: self.image = tw_user.profile_image_url update_fields.append('image')", "TaskQuerySet(models.QuerySet): def search(self, query): return self.filter(query__icontains=query) def pending(self): return self.filter(status='PD') def done(self): return", "class Meta: ordering = ('query', ) PENDING = 'PD' DONE = 'DN' STATUS", "default=PENDING) objects = models.Manager() custom = TaskQuerySet.as_manager() def __str__(self): return \"%s -> Status:", "\"%s -> Status: %s\" % (self.query, self.get_status_display()) def update_to_done(self): if self.status is not", "tw_user.followers_count: self.popularity = tw_user.followers_count update_fields.append('popularity') if update_fields: self.save(update_fields=update_fields) class Task(models.Model): class Meta: ordering", "default=0) objects = models.Manager() custom = TwitterProfileQuerySet.as_manager() __str__ = lambda self: self.name def", "self.description = tw_user.description update_fields.append('description') if self.image != tw_user.profile_image_url: self.image = tw_user.profile_image_url update_fields.append('image') if", "'name') tw_id = models.PositiveIntegerField(unique=True) name = models.CharField(max_length=200) description = models.TextField(blank=True, null=True) image =", "tw_id = models.PositiveIntegerField(unique=True) name = models.CharField(max_length=200) description = models.TextField(blank=True, null=True) image = models.URLField(blank=True,", "### Define Models class TwitterProfile(models.Model): class Meta: ordering = ('popularity', 'name') tw_id =", "models.Manager() custom = TwitterProfileQuerySet.as_manager() __str__ = lambda self: self.name def update_(self, tw_user): update_fields", "self.image = tw_user.profile_image_url update_fields.append('image') if self.popularity != tw_user.followers_count: self.popularity = tw_user.followers_count update_fields.append('popularity') if", "django.db.models.signals import post_save from . import tasks ### Define Querysets class TwitterProfileQuerySet(models.QuerySet): def", "Querysets class TwitterProfileQuerySet(models.QuerySet): def search(self, query): return self.filter(name__icontains=query) class TaskQuerySet(models.QuerySet): def search(self, query):", "% (self.query, self.get_status_display()) def update_to_done(self): if self.status is not self.DONE: self.status = self.DONE", "if self.image != tw_user.profile_image_url: self.image = tw_user.profile_image_url update_fields.append('image') if self.popularity != tw_user.followers_count: self.popularity", "[] if self.name != tw_user.name: self.name = tw_user.name update_fields.append('name') if self.description != tw_user.description:", "tw_user.followers_count update_fields.append('popularity') if update_fields: self.save(update_fields=update_fields) class Task(models.Model): class Meta: ordering = ('query', )", "if self.status is not self.DONE: self.status = self.DONE self.save() @staticmethod def run(**kwargs): if", "done(self): return self.filter(status='DN') ### Define Models class TwitterProfile(models.Model): class Meta: ordering = ('popularity',", "self.get_status_display()) def update_to_done(self): if self.status is not self.DONE: self.status = self.DONE self.save() @staticmethod", "lambda self: self.name def update_(self, tw_user): update_fields = [] if self.name != tw_user.name:", "if self.name != tw_user.name: self.name = tw_user.name update_fields.append('name') if self.description != tw_user.description: self.description", "(self.query, self.get_status_display()) def update_to_done(self): if self.status is not self.DONE: self.status = self.DONE self.save()", "update_fields.append('image') if self.popularity != tw_user.followers_count: self.popularity = tw_user.followers_count update_fields.append('popularity') if update_fields: self.save(update_fields=update_fields) class", "return self.filter(query__icontains=query) def pending(self): return self.filter(status='PD') def done(self): return self.filter(status='DN') ### Define Models", "Define Querysets class TwitterProfileQuerySet(models.QuerySet): def search(self, query): return self.filter(name__icontains=query) class TaskQuerySet(models.QuerySet): def search(self,", "update_to_done(self): if self.status is not self.DONE: self.status = self.DONE self.save() @staticmethod def run(**kwargs):", "def update_to_done(self): if self.status is not self.DONE: self.status = self.DONE self.save() @staticmethod def", "= models.TextField(blank=True, null=True) image = models.URLField(blank=True, null=True) popularity = models.PositiveIntegerField(blank=True, default=0) objects =", "django.db import models from django.db.models.signals import post_save from . import tasks ### Define", "status = models.CharField(max_length=2, choices=STATUS, default=PENDING) objects = models.Manager() custom = TaskQuerySet.as_manager() def __str__(self):", "choices=STATUS, default=PENDING) objects = models.Manager() custom = TaskQuerySet.as_manager() def __str__(self): return \"%s ->", "'DN' STATUS = ( (PENDING, 'Pending'), (DONE, 'Done') ) query = models.CharField(max_length=100) status", "= models.CharField(max_length=2, choices=STATUS, default=PENDING) objects = models.Manager() custom = TaskQuerySet.as_manager() def __str__(self): return", "'Pending'), (DONE, 'Done') ) query = models.CharField(max_length=100) status = models.CharField(max_length=2, choices=STATUS, default=PENDING) objects", "= models.CharField(max_length=100) status = models.CharField(max_length=2, choices=STATUS, default=PENDING) objects = models.Manager() custom = TaskQuerySet.as_manager()", "Task(models.Model): class Meta: ordering = ('query', ) PENDING = 'PD' DONE = 'DN'", "Meta: ordering = ('query', ) PENDING = 'PD' DONE = 'DN' STATUS =", "DONE = 'DN' STATUS = ( (PENDING, 'Pending'), (DONE, 'Done') ) query =", "= models.PositiveIntegerField(unique=True) name = models.CharField(max_length=200) description = models.TextField(blank=True, null=True) image = models.URLField(blank=True, null=True)", "query = models.CharField(max_length=100) status = models.CharField(max_length=2, choices=STATUS, default=PENDING) objects = models.Manager() custom =", ") PENDING = 'PD' DONE = 'DN' STATUS = ( (PENDING, 'Pending'), (DONE,", "ordering = ('popularity', 'name') tw_id = models.PositiveIntegerField(unique=True) name = models.CharField(max_length=200) description = models.TextField(blank=True,", "= ('popularity', 'name') tw_id = models.PositiveIntegerField(unique=True) name = models.CharField(max_length=200) description = models.TextField(blank=True, null=True)", "popularity = models.PositiveIntegerField(blank=True, default=0) objects = models.Manager() custom = TwitterProfileQuerySet.as_manager() __str__ = lambda", "update_fields.append('name') if self.description != tw_user.description: self.description = tw_user.description update_fields.append('description') if self.image != tw_user.profile_image_url:", "= ( (PENDING, 'Pending'), (DONE, 'Done') ) query = models.CharField(max_length=100) status = models.CharField(max_length=2,", "import models from django.db.models.signals import post_save from . import tasks ### Define Querysets", "from . import tasks ### Define Querysets class TwitterProfileQuerySet(models.QuerySet): def search(self, query): return", "null=True) popularity = models.PositiveIntegerField(blank=True, default=0) objects = models.Manager() custom = TwitterProfileQuerySet.as_manager() __str__ =", "tw_user.profile_image_url: self.image = tw_user.profile_image_url update_fields.append('image') if self.popularity != tw_user.followers_count: self.popularity = tw_user.followers_count update_fields.append('popularity')", "class TwitterProfile(models.Model): class Meta: ordering = ('popularity', 'name') tw_id = models.PositiveIntegerField(unique=True) name =", "self.filter(query__icontains=query) def pending(self): return self.filter(status='PD') def done(self): return self.filter(status='DN') ### Define Models class", "tw_user.description update_fields.append('description') if self.image != tw_user.profile_image_url: self.image = tw_user.profile_image_url update_fields.append('image') if self.popularity !=", "post_save from . import tasks ### Define Querysets class TwitterProfileQuerySet(models.QuerySet): def search(self, query):", "tw_user.name update_fields.append('name') if self.description != tw_user.description: self.description = tw_user.description update_fields.append('description') if self.image !=", "Define Models class TwitterProfile(models.Model): class Meta: ordering = ('popularity', 'name') tw_id = models.PositiveIntegerField(unique=True)", "'PD' DONE = 'DN' STATUS = ( (PENDING, 'Pending'), (DONE, 'Done') ) query", "self.filter(status='PD') def done(self): return self.filter(status='DN') ### Define Models class TwitterProfile(models.Model): class Meta: ordering", "import tasks ### Define Querysets class TwitterProfileQuerySet(models.QuerySet): def search(self, query): return self.filter(name__icontains=query) class", "%s\" % (self.query, self.get_status_display()) def update_to_done(self): if self.status is not self.DONE: self.status =", "return self.filter(name__icontains=query) class TaskQuerySet(models.QuerySet): def search(self, query): return self.filter(query__icontains=query) def pending(self): return self.filter(status='PD')", "= TwitterProfileQuerySet.as_manager() __str__ = lambda self: self.name def update_(self, tw_user): update_fields = []", "class TwitterProfileQuerySet(models.QuerySet): def search(self, query): return self.filter(name__icontains=query) class TaskQuerySet(models.QuerySet): def search(self, query): return", "if self.description != tw_user.description: self.description = tw_user.description update_fields.append('description') if self.image != tw_user.profile_image_url: self.image", "name = models.CharField(max_length=200) description = models.TextField(blank=True, null=True) image = models.URLField(blank=True, null=True) popularity =", "= ('query', ) PENDING = 'PD' DONE = 'DN' STATUS = ( (PENDING,", "= 'DN' STATUS = ( (PENDING, 'Pending'), (DONE, 'Done') ) query = models.CharField(max_length=100)", "def __str__(self): return \"%s -> Status: %s\" % (self.query, self.get_status_display()) def update_to_done(self): if", "!= tw_user.profile_image_url: self.image = tw_user.profile_image_url update_fields.append('image') if self.popularity != tw_user.followers_count: self.popularity = tw_user.followers_count", "return self.filter(status='DN') ### Define Models class TwitterProfile(models.Model): class Meta: ordering = ('popularity', 'name')", "def run(**kwargs): if kwargs.get('created', False) or 'from_view' in kwargs: tasks.twitter_scraper.delay(kwargs['instance'].id) # Signals post_save.connect(Task.run,", "(DONE, 'Done') ) query = models.CharField(max_length=100) status = models.CharField(max_length=2, choices=STATUS, default=PENDING) objects =", "self.filter(name__icontains=query) class TaskQuerySet(models.QuerySet): def search(self, query): return self.filter(query__icontains=query) def pending(self): return self.filter(status='PD') def", "tasks ### Define Querysets class TwitterProfileQuerySet(models.QuerySet): def search(self, query): return self.filter(name__icontains=query) class TaskQuerySet(models.QuerySet):", "@staticmethod def run(**kwargs): if kwargs.get('created', False) or 'from_view' in kwargs: tasks.twitter_scraper.delay(kwargs['instance'].id) # Signals", "null=True) image = models.URLField(blank=True, null=True) popularity = models.PositiveIntegerField(blank=True, default=0) objects = models.Manager() custom", "image = models.URLField(blank=True, null=True) popularity = models.PositiveIntegerField(blank=True, default=0) objects = models.Manager() custom =", "= models.URLField(blank=True, null=True) popularity = models.PositiveIntegerField(blank=True, default=0) objects = models.Manager() custom = TwitterProfileQuerySet.as_manager()", "('query', ) PENDING = 'PD' DONE = 'DN' STATUS = ( (PENDING, 'Pending'),", "Meta: ordering = ('popularity', 'name') tw_id = models.PositiveIntegerField(unique=True) name = models.CharField(max_length=200) description =", "models.CharField(max_length=100) status = models.CharField(max_length=2, choices=STATUS, default=PENDING) objects = models.Manager() custom = TaskQuerySet.as_manager() def", "search(self, query): return self.filter(name__icontains=query) class TaskQuerySet(models.QuerySet): def search(self, query): return self.filter(query__icontains=query) def pending(self):", "update_(self, tw_user): update_fields = [] if self.name != tw_user.name: self.name = tw_user.name update_fields.append('name')", "self.filter(status='DN') ### Define Models class TwitterProfile(models.Model): class Meta: ordering = ('popularity', 'name') tw_id", "update_fields.append('popularity') if update_fields: self.save(update_fields=update_fields) class Task(models.Model): class Meta: ordering = ('query', ) PENDING", "query): return self.filter(query__icontains=query) def pending(self): return self.filter(status='PD') def done(self): return self.filter(status='DN') ### Define", "TwitterProfile(models.Model): class Meta: ordering = ('popularity', 'name') tw_id = models.PositiveIntegerField(unique=True) name = models.CharField(max_length=200)", "= self.DONE self.save() @staticmethod def run(**kwargs): if kwargs.get('created', False) or 'from_view' in kwargs:", "tw_user): update_fields = [] if self.name != tw_user.name: self.name = tw_user.name update_fields.append('name') if", ". import tasks ### Define Querysets class TwitterProfileQuerySet(models.QuerySet): def search(self, query): return self.filter(name__icontains=query)", "pending(self): return self.filter(status='PD') def done(self): return self.filter(status='DN') ### Define Models class TwitterProfile(models.Model): class", "TwitterProfileQuerySet(models.QuerySet): def search(self, query): return self.filter(name__icontains=query) class TaskQuerySet(models.QuerySet): def search(self, query): return self.filter(query__icontains=query)", "= models.PositiveIntegerField(blank=True, default=0) objects = models.Manager() custom = TwitterProfileQuerySet.as_manager() __str__ = lambda self:", "self.description != tw_user.description: self.description = tw_user.description update_fields.append('description') if self.image != tw_user.profile_image_url: self.image =", "Models class TwitterProfile(models.Model): class Meta: ordering = ('popularity', 'name') tw_id = models.PositiveIntegerField(unique=True) name", "PENDING = 'PD' DONE = 'DN' STATUS = ( (PENDING, 'Pending'), (DONE, 'Done')", "self.save() @staticmethod def run(**kwargs): if kwargs.get('created', False) or 'from_view' in kwargs: tasks.twitter_scraper.delay(kwargs['instance'].id) #", "self: self.name def update_(self, tw_user): update_fields = [] if self.name != tw_user.name: self.name", "tw_user.profile_image_url update_fields.append('image') if self.popularity != tw_user.followers_count: self.popularity = tw_user.followers_count update_fields.append('popularity') if update_fields: self.save(update_fields=update_fields)", "Status: %s\" % (self.query, self.get_status_display()) def update_to_done(self): if self.status is not self.DONE: self.status", "def done(self): return self.filter(status='DN') ### Define Models class TwitterProfile(models.Model): class Meta: ordering =", "= tw_user.name update_fields.append('name') if self.description != tw_user.description: self.description = tw_user.description update_fields.append('description') if self.image", "class TaskQuerySet(models.QuerySet): def search(self, query): return self.filter(query__icontains=query) def pending(self): return self.filter(status='PD') def done(self):", "not self.DONE: self.status = self.DONE self.save() @staticmethod def run(**kwargs): if kwargs.get('created', False) or", "self.popularity != tw_user.followers_count: self.popularity = tw_user.followers_count update_fields.append('popularity') if update_fields: self.save(update_fields=update_fields) class Task(models.Model): class", "= models.CharField(max_length=200) description = models.TextField(blank=True, null=True) image = models.URLField(blank=True, null=True) popularity = models.PositiveIntegerField(blank=True,", "models.CharField(max_length=200) description = models.TextField(blank=True, null=True) image = models.URLField(blank=True, null=True) popularity = models.PositiveIntegerField(blank=True, default=0)", "= models.Manager() custom = TwitterProfileQuerySet.as_manager() __str__ = lambda self: self.name def update_(self, tw_user):", "def search(self, query): return self.filter(query__icontains=query) def pending(self): return self.filter(status='PD') def done(self): return self.filter(status='DN')", "= 'PD' DONE = 'DN' STATUS = ( (PENDING, 'Pending'), (DONE, 'Done') )", "models.PositiveIntegerField(blank=True, default=0) objects = models.Manager() custom = TwitterProfileQuerySet.as_manager() __str__ = lambda self: self.name", "self.name = tw_user.name update_fields.append('name') if self.description != tw_user.description: self.description = tw_user.description update_fields.append('description') if", "tw_user.name: self.name = tw_user.name update_fields.append('name') if self.description != tw_user.description: self.description = tw_user.description update_fields.append('description')", "self.name def update_(self, tw_user): update_fields = [] if self.name != tw_user.name: self.name =", "self.popularity = tw_user.followers_count update_fields.append('popularity') if update_fields: self.save(update_fields=update_fields) class Task(models.Model): class Meta: ordering =", "import post_save from . import tasks ### Define Querysets class TwitterProfileQuerySet(models.QuerySet): def search(self,", "!= tw_user.name: self.name = tw_user.name update_fields.append('name') if self.description != tw_user.description: self.description = tw_user.description", "search(self, query): return self.filter(query__icontains=query) def pending(self): return self.filter(status='PD') def done(self): return self.filter(status='DN') ###", "def pending(self): return self.filter(status='PD') def done(self): return self.filter(status='DN') ### Define Models class TwitterProfile(models.Model):", "= [] if self.name != tw_user.name: self.name = tw_user.name update_fields.append('name') if self.description !=", "self.name != tw_user.name: self.name = tw_user.name update_fields.append('name') if self.description != tw_user.description: self.description =", "from django.db.models.signals import post_save from . import tasks ### Define Querysets class TwitterProfileQuerySet(models.QuerySet):", "from django.db import models from django.db.models.signals import post_save from . import tasks ###", "if self.popularity != tw_user.followers_count: self.popularity = tw_user.followers_count update_fields.append('popularity') if update_fields: self.save(update_fields=update_fields) class Task(models.Model):", "run(**kwargs): if kwargs.get('created', False) or 'from_view' in kwargs: tasks.twitter_scraper.delay(kwargs['instance'].id) # Signals post_save.connect(Task.run, Task)", "models from django.db.models.signals import post_save from . import tasks ### Define Querysets class", "= lambda self: self.name def update_(self, tw_user): update_fields = [] if self.name !=", "update_fields.append('description') if self.image != tw_user.profile_image_url: self.image = tw_user.profile_image_url update_fields.append('image') if self.popularity != tw_user.followers_count:", "!= tw_user.followers_count: self.popularity = tw_user.followers_count update_fields.append('popularity') if update_fields: self.save(update_fields=update_fields) class Task(models.Model): class Meta:", "STATUS = ( (PENDING, 'Pending'), (DONE, 'Done') ) query = models.CharField(max_length=100) status =", "= tw_user.followers_count update_fields.append('popularity') if update_fields: self.save(update_fields=update_fields) class Task(models.Model): class Meta: ordering = ('query',", "-> Status: %s\" % (self.query, self.get_status_display()) def update_to_done(self): if self.status is not self.DONE:", ") query = models.CharField(max_length=100) status = models.CharField(max_length=2, choices=STATUS, default=PENDING) objects = models.Manager() custom", "models.CharField(max_length=2, choices=STATUS, default=PENDING) objects = models.Manager() custom = TaskQuerySet.as_manager() def __str__(self): return \"%s", "objects = models.Manager() custom = TaskQuerySet.as_manager() def __str__(self): return \"%s -> Status: %s\"", "TaskQuerySet.as_manager() def __str__(self): return \"%s -> Status: %s\" % (self.query, self.get_status_display()) def update_to_done(self):", "(PENDING, 'Pending'), (DONE, 'Done') ) query = models.CharField(max_length=100) status = models.CharField(max_length=2, choices=STATUS, default=PENDING)", "= tw_user.description update_fields.append('description') if self.image != tw_user.profile_image_url: self.image = tw_user.profile_image_url update_fields.append('image') if self.popularity", "models.PositiveIntegerField(unique=True) name = models.CharField(max_length=200) description = models.TextField(blank=True, null=True) image = models.URLField(blank=True, null=True) popularity", "description = models.TextField(blank=True, null=True) image = models.URLField(blank=True, null=True) popularity = models.PositiveIntegerField(blank=True, default=0) objects", "= tw_user.profile_image_url update_fields.append('image') if self.popularity != tw_user.followers_count: self.popularity = tw_user.followers_count update_fields.append('popularity') if update_fields:", "models.Manager() custom = TaskQuerySet.as_manager() def __str__(self): return \"%s -> Status: %s\" % (self.query,", "self.status is not self.DONE: self.status = self.DONE self.save() @staticmethod def run(**kwargs): if kwargs.get('created',", "### Define Querysets class TwitterProfileQuerySet(models.QuerySet): def search(self, query): return self.filter(name__icontains=query) class TaskQuerySet(models.QuerySet): def", "self.image != tw_user.profile_image_url: self.image = tw_user.profile_image_url update_fields.append('image') if self.popularity != tw_user.followers_count: self.popularity =" ]
[ ": break try: inum=int(num) except: print(\"Invalid Number\") if inum > largest: largest=inum if", "True: num = input(\"Enter a number: \") if num == \"done\" : break", "smallest is None: smallest=inum elif inum<smallest: smallest=inum print(\"Maximum is\", largest) print(\"Minimum is\", smallest)", "largest = -1 smallest = None while True: num = input(\"Enter a number:", "inum=int(num) except: print(\"Invalid Number\") if inum > largest: largest=inum if smallest is None:", "\") if num == \"done\" : break try: inum=int(num) except: print(\"Invalid Number\") if", "if num == \"done\" : break try: inum=int(num) except: print(\"Invalid Number\") if inum", "except: print(\"Invalid Number\") if inum > largest: largest=inum if smallest is None: smallest=inum", "= None while True: num = input(\"Enter a number: \") if num ==", "smallest = None while True: num = input(\"Enter a number: \") if num", "= input(\"Enter a number: \") if num == \"done\" : break try: inum=int(num)", "inum > largest: largest=inum if smallest is None: smallest=inum elif inum<smallest: smallest=inum print(\"Maximum", "if inum > largest: largest=inum if smallest is None: smallest=inum elif inum<smallest: smallest=inum", "-1 smallest = None while True: num = input(\"Enter a number: \") if", "print(\"Invalid Number\") if inum > largest: largest=inum if smallest is None: smallest=inum elif", "try: inum=int(num) except: print(\"Invalid Number\") if inum > largest: largest=inum if smallest is", "None while True: num = input(\"Enter a number: \") if num == \"done\"", "if smallest is None: smallest=inum elif inum<smallest: smallest=inum print(\"Maximum is\", largest) print(\"Minimum is\",", "\"done\" : break try: inum=int(num) except: print(\"Invalid Number\") if inum > largest: largest=inum", "number: \") if num == \"done\" : break try: inum=int(num) except: print(\"Invalid Number\")", "num == \"done\" : break try: inum=int(num) except: print(\"Invalid Number\") if inum >", "largest=inum if smallest is None: smallest=inum elif inum<smallest: smallest=inum print(\"Maximum is\", largest) print(\"Minimum", "num = input(\"Enter a number: \") if num == \"done\" : break try:", "= -1 smallest = None while True: num = input(\"Enter a number: \")", "== \"done\" : break try: inum=int(num) except: print(\"Invalid Number\") if inum > largest:", "> largest: largest=inum if smallest is None: smallest=inum elif inum<smallest: smallest=inum print(\"Maximum is\",", "while True: num = input(\"Enter a number: \") if num == \"done\" :", "a number: \") if num == \"done\" : break try: inum=int(num) except: print(\"Invalid", "input(\"Enter a number: \") if num == \"done\" : break try: inum=int(num) except:", "largest: largest=inum if smallest is None: smallest=inum elif inum<smallest: smallest=inum print(\"Maximum is\", largest)", "Number\") if inum > largest: largest=inum if smallest is None: smallest=inum elif inum<smallest:", "break try: inum=int(num) except: print(\"Invalid Number\") if inum > largest: largest=inum if smallest" ]
[ "cms from Configuration.AlCa.autoCond import autoCond process = cms.Process(\"TEST\") process.maxEvents = cms.untracked.PSet( input =", "timetype = cms.string('runnumber'), firstValue = cms.uint64(1), interval = cms.uint64(1) ) from CondCore.ESSources.GlobalTag import", "from CondCore.ESSources.GlobalTag import GlobalTag # Prepare the list of globalTags process.load(\"Configuration.StandardSequences.FrontierConditions_GlobalTag_cff\") globalTag =", "cms.string('runnumber'), firstValue = cms.uint64(1), interval = cms.uint64(1) ) from CondCore.ESSources.GlobalTag import GlobalTag #", "GlobalTag # Prepare the list of globalTags process.load(\"Configuration.StandardSequences.FrontierConditions_GlobalTag_cff\") globalTag = GlobalTag(autoCond['run2_data'],\"frontier://FrontierProd/CMS_CONDITIONS\") process.GlobalTag.connect =", "globalTags process.load(\"Configuration.StandardSequences.FrontierConditions_GlobalTag_cff\") globalTag = GlobalTag(autoCond['run2_data'],\"frontier://FrontierProd/CMS_CONDITIONS\") process.GlobalTag.connect = cms.string(globalTag.connect()) process.GlobalTag.globaltag = globalTag.gt() print(\"Final connection", "import autoCond process = cms.Process(\"TEST\") process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(100) ) process.source", "cms.Process(\"TEST\") process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(100) ) process.source = cms.Source(\"EmptyIOVSource\", lastValue =", "= GlobalTag(autoCond['run2_data'],\"frontier://FrontierProd/CMS_CONDITIONS\") process.GlobalTag.connect = cms.string(globalTag.connect()) process.GlobalTag.globaltag = globalTag.gt() print(\"Final connection string =\", process.GlobalTag.connect)", "of globalTags process.load(\"Configuration.StandardSequences.FrontierConditions_GlobalTag_cff\") globalTag = GlobalTag(autoCond['run2_data'],\"frontier://FrontierProd/CMS_CONDITIONS\") process.GlobalTag.connect = cms.string(globalTag.connect()) process.GlobalTag.globaltag = globalTag.gt() print(\"Final", "CondCore.ESSources.GlobalTag import GlobalTag # Prepare the list of globalTags process.load(\"Configuration.StandardSequences.FrontierConditions_GlobalTag_cff\") globalTag = GlobalTag(autoCond['run2_data'],\"frontier://FrontierProd/CMS_CONDITIONS\")", "cms.string(globalTag.connect()) process.GlobalTag.globaltag = globalTag.gt() print(\"Final connection string =\", process.GlobalTag.connect) print(\"Final globalTag =\", process.GlobalTag.globaltag)", "cms.untracked.PSet( input = cms.untracked.int32(100) ) process.source = cms.Source(\"EmptyIOVSource\", lastValue = cms.uint64(3), timetype =", "= cms.Source(\"EmptyIOVSource\", lastValue = cms.uint64(3), timetype = cms.string('runnumber'), firstValue = cms.uint64(1), interval =", "interval = cms.uint64(1) ) from CondCore.ESSources.GlobalTag import GlobalTag # Prepare the list of", "= cms.uint64(3), timetype = cms.string('runnumber'), firstValue = cms.uint64(1), interval = cms.uint64(1) ) from", "= cms.uint64(1), interval = cms.uint64(1) ) from CondCore.ESSources.GlobalTag import GlobalTag # Prepare the", "as cms from Configuration.AlCa.autoCond import autoCond process = cms.Process(\"TEST\") process.maxEvents = cms.untracked.PSet( input", "= cms.untracked.int32(100) ) process.source = cms.Source(\"EmptyIOVSource\", lastValue = cms.uint64(3), timetype = cms.string('runnumber'), firstValue", "the list of globalTags process.load(\"Configuration.StandardSequences.FrontierConditions_GlobalTag_cff\") globalTag = GlobalTag(autoCond['run2_data'],\"frontier://FrontierProd/CMS_CONDITIONS\") process.GlobalTag.connect = cms.string(globalTag.connect()) process.GlobalTag.globaltag =", "globalTag = GlobalTag(autoCond['run2_data'],\"frontier://FrontierProd/CMS_CONDITIONS\") process.GlobalTag.connect = cms.string(globalTag.connect()) process.GlobalTag.globaltag = globalTag.gt() print(\"Final connection string =\",", "= cms.uint64(1) ) from CondCore.ESSources.GlobalTag import GlobalTag # Prepare the list of globalTags", "# Prepare the list of globalTags process.load(\"Configuration.StandardSequences.FrontierConditions_GlobalTag_cff\") globalTag = GlobalTag(autoCond['run2_data'],\"frontier://FrontierProd/CMS_CONDITIONS\") process.GlobalTag.connect = cms.string(globalTag.connect())", "process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(100) ) process.source = cms.Source(\"EmptyIOVSource\", lastValue = cms.uint64(3),", "process.GlobalTag.connect = cms.string(globalTag.connect()) process.GlobalTag.globaltag = globalTag.gt() print(\"Final connection string =\", process.GlobalTag.connect) print(\"Final globalTag", ") from CondCore.ESSources.GlobalTag import GlobalTag # Prepare the list of globalTags process.load(\"Configuration.StandardSequences.FrontierConditions_GlobalTag_cff\") globalTag", "import print_function import FWCore.ParameterSet.Config as cms from Configuration.AlCa.autoCond import autoCond process = cms.Process(\"TEST\")", "process.load(\"Configuration.StandardSequences.FrontierConditions_GlobalTag_cff\") globalTag = GlobalTag(autoCond['run2_data'],\"frontier://FrontierProd/CMS_CONDITIONS\") process.GlobalTag.connect = cms.string(globalTag.connect()) process.GlobalTag.globaltag = globalTag.gt() print(\"Final connection string", "import GlobalTag # Prepare the list of globalTags process.load(\"Configuration.StandardSequences.FrontierConditions_GlobalTag_cff\") globalTag = GlobalTag(autoCond['run2_data'],\"frontier://FrontierProd/CMS_CONDITIONS\") process.GlobalTag.connect", "__future__ import print_function import FWCore.ParameterSet.Config as cms from Configuration.AlCa.autoCond import autoCond process =", "import FWCore.ParameterSet.Config as cms from Configuration.AlCa.autoCond import autoCond process = cms.Process(\"TEST\") process.maxEvents =", "list of globalTags process.load(\"Configuration.StandardSequences.FrontierConditions_GlobalTag_cff\") globalTag = GlobalTag(autoCond['run2_data'],\"frontier://FrontierProd/CMS_CONDITIONS\") process.GlobalTag.connect = cms.string(globalTag.connect()) process.GlobalTag.globaltag = globalTag.gt()", "process.GlobalTag.globaltag = globalTag.gt() print(\"Final connection string =\", process.GlobalTag.connect) print(\"Final globalTag =\", process.GlobalTag.globaltag) process.path", "Prepare the list of globalTags process.load(\"Configuration.StandardSequences.FrontierConditions_GlobalTag_cff\") globalTag = GlobalTag(autoCond['run2_data'],\"frontier://FrontierProd/CMS_CONDITIONS\") process.GlobalTag.connect = cms.string(globalTag.connect()) process.GlobalTag.globaltag", "process = cms.Process(\"TEST\") process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(100) ) process.source = cms.Source(\"EmptyIOVSource\",", "from Configuration.AlCa.autoCond import autoCond process = cms.Process(\"TEST\") process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(100)", "print_function import FWCore.ParameterSet.Config as cms from Configuration.AlCa.autoCond import autoCond process = cms.Process(\"TEST\") process.maxEvents", "cms.uint64(3), timetype = cms.string('runnumber'), firstValue = cms.uint64(1), interval = cms.uint64(1) ) from CondCore.ESSources.GlobalTag", ") process.source = cms.Source(\"EmptyIOVSource\", lastValue = cms.uint64(3), timetype = cms.string('runnumber'), firstValue = cms.uint64(1),", "cms.uint64(1), interval = cms.uint64(1) ) from CondCore.ESSources.GlobalTag import GlobalTag # Prepare the list", "Configuration.AlCa.autoCond import autoCond process = cms.Process(\"TEST\") process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(100) )", "= cms.string(globalTag.connect()) process.GlobalTag.globaltag = globalTag.gt() print(\"Final connection string =\", process.GlobalTag.connect) print(\"Final globalTag =\",", "firstValue = cms.uint64(1), interval = cms.uint64(1) ) from CondCore.ESSources.GlobalTag import GlobalTag # Prepare", "GlobalTag(autoCond['run2_data'],\"frontier://FrontierProd/CMS_CONDITIONS\") process.GlobalTag.connect = cms.string(globalTag.connect()) process.GlobalTag.globaltag = globalTag.gt() print(\"Final connection string =\", process.GlobalTag.connect) print(\"Final", "from __future__ import print_function import FWCore.ParameterSet.Config as cms from Configuration.AlCa.autoCond import autoCond process", "lastValue = cms.uint64(3), timetype = cms.string('runnumber'), firstValue = cms.uint64(1), interval = cms.uint64(1) )", "process.source = cms.Source(\"EmptyIOVSource\", lastValue = cms.uint64(3), timetype = cms.string('runnumber'), firstValue = cms.uint64(1), interval", "= cms.Process(\"TEST\") process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(100) ) process.source = cms.Source(\"EmptyIOVSource\", lastValue", "= cms.string('runnumber'), firstValue = cms.uint64(1), interval = cms.uint64(1) ) from CondCore.ESSources.GlobalTag import GlobalTag", "autoCond process = cms.Process(\"TEST\") process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(100) ) process.source =", "FWCore.ParameterSet.Config as cms from Configuration.AlCa.autoCond import autoCond process = cms.Process(\"TEST\") process.maxEvents = cms.untracked.PSet(", "= cms.untracked.PSet( input = cms.untracked.int32(100) ) process.source = cms.Source(\"EmptyIOVSource\", lastValue = cms.uint64(3), timetype", "globalTag.gt() print(\"Final connection string =\", process.GlobalTag.connect) print(\"Final globalTag =\", process.GlobalTag.globaltag) process.path = cms.Path()", "= globalTag.gt() print(\"Final connection string =\", process.GlobalTag.connect) print(\"Final globalTag =\", process.GlobalTag.globaltag) process.path =", "cms.uint64(1) ) from CondCore.ESSources.GlobalTag import GlobalTag # Prepare the list of globalTags process.load(\"Configuration.StandardSequences.FrontierConditions_GlobalTag_cff\")", "input = cms.untracked.int32(100) ) process.source = cms.Source(\"EmptyIOVSource\", lastValue = cms.uint64(3), timetype = cms.string('runnumber'),", "cms.Source(\"EmptyIOVSource\", lastValue = cms.uint64(3), timetype = cms.string('runnumber'), firstValue = cms.uint64(1), interval = cms.uint64(1)", "cms.untracked.int32(100) ) process.source = cms.Source(\"EmptyIOVSource\", lastValue = cms.uint64(3), timetype = cms.string('runnumber'), firstValue =" ]
[ "eat = Method(\"eat\", parameters=[food]) orc = create_orc() fat_orc = ClassData(\"FatOrc\", methods=[eat], inheritances=[orc]) return", "src.dataToCode.dataClasses.interface import Interface from src.dataToCode.dataClasses.method import Method from src.dataToCode.write_files import write_files from src.dataToCode.dataClasses.visibility", "inheritances=[fat_orc]) return obese_orc objects = [create_spell(), create_food(), create_weapon(), create_attribute(), create_attack(), create_walk(), create_orc(), create_high_orc(),", "truth_path = [os.path.join(ultimate_path, x) for x in files_path] for truth_file_path, generated_file_path in zip(truth_path,", "concrete_a def create_concrete_b(): method = create_do_algorithm() strategy = create_strategy() concrete_b = ClassData(\"ConcreteStrategyB\", methods=[method],", "in zip(truth_path, generated_path): assert filecmp.cmp(truth_file_path, generated_file_path) def test_ultimate_example(tmpdir): def create_spell(): method = Method(\"doEffect\")", "return_type=\"Attribute\") setAttribute = Method(\"setAttribute\", return_type=\"void\", parameters=[attribute]) weapon = ClassData(\"Weapon\", methods=[getAttribute, setAttribute], fields=[name, age,", "\"python\") ultimate_path = os.path.abspath(os.path.join(__file__, \"../ultimate_example\")) all_files_path = os.listdir(ultimate_path) files_path = [] for file_path", "Method(\"sleep\", parameters=[hours], visibility=Visibility.private, modifier=Modifier.override) orc = create_orc() high_orc = ClassData(\"HighOrc\", methods=[attack, sleep], fields=[spell],", "subprocess.run([\"python3\", main_path, f\"--xml_file={xml_path}\", f\"--code_path={tmpdir}\", \"--language=python\"]) files_path = [\"strategy.py\", \"context.py\", \"concrete_strategy_a.py\", \"concrete_strategy_b.py\"] strategy_path =", "x in files_path] for truth_file_path, generated_file_path in zip(truth_path, generated_path): assert filecmp.cmp(truth_file_path, generated_file_path) def", "methods=[eat], fields=[heart_attack], inheritances=[fat_orc]) return obese_orc objects = [create_spell(), create_food(), create_weapon(), create_attribute(), create_attack(), create_walk(),", "visibility=Visibility.public) attack = Method(\"attack\", parameters=[damage], modifier=Modifier.override) sleep = Method(\"sleep\", parameters=[hours], visibility=Visibility.private, modifier=Modifier.override) orc", "= Method(\"attack\", parameters=[damage]) sleep = Method(\"sleep\", parameters=[hours], visibility=Visibility.private) orc = ClassData(\"Orc\", methods=[attack_method, sleep],", "= os.path.abspath(os.path.join(__file__,\"../../../../../main.py\")) xml_path = os.path.abspath(os.path.join(__file__,\"../../../../strategy.xml\")) subprocess.run([\"python3\", main_path, f\"--xml_file={xml_path}\", f\"--code_path={tmpdir}\", \"--language=python\"]) files_path = [\"strategy.py\",", "\"int\", visibility=Visibility.public) walk = create_walk() attack_interface = create_attack() attack_method = Method(\"attack\", parameters=[damage]) sleep", "import write_files from src.dataToCode.dataClasses.visibility import Visibility from src.dataToCode.dataClasses.modifier import Modifier def test_strategy_example(tmpdir): def", "return weapon def create_attribute(): method = Method(\"method\") field = Attribute(\"field\", \"Type\", visibility=Visibility.public) attribute", "Attribute(\"food\", \"IFood\", visibility=Visibility.public) heart_attack = Attribute(\"heartAttackChance\", \"int\", visibility=Visibility.public) eat = Method(\"eat\", parameters=[food], modifier=Modifier.override)", "from src.dataToCode.dataClasses.interface import Interface from src.dataToCode.dataClasses.method import Method from src.dataToCode.write_files import write_files from", "assert filecmp.cmp(truth_file_path, generated_file_path) def test_strategy_xml(tmpdir): main_path = os.path.abspath(os.path.join(__file__,\"../../../../../main.py\")) xml_path = os.path.abspath(os.path.join(__file__,\"../../../../strategy.xml\")) subprocess.run([\"python3\", main_path,", "= Attribute(\"spell\", \"ISpell\", visibility=Visibility.public) attack = Method(\"attack\", parameters=[damage], modifier=Modifier.override) sleep = Method(\"sleep\", parameters=[hours],", "methods=[method]) return interface def create_orc(): name = Attribute(\"name\", \"str\", visibility=Visibility.public) age = Attribute(\"age\",", "src.dataToCode.write_files import write_files from src.dataToCode.dataClasses.visibility import Visibility from src.dataToCode.dataClasses.modifier import Modifier def test_strategy_example(tmpdir):", "methods=[getAttribute, setAttribute], fields=[name, age, attribute]) return weapon def create_attribute(): method = Method(\"method\") field", "fields=[heart_attack], inheritances=[fat_orc]) return obese_orc objects = [create_spell(), create_food(), create_weapon(), create_attribute(), create_attack(), create_walk(), create_orc(),", "src.dataToCode.dataClasses.visibility import Visibility from src.dataToCode.dataClasses.modifier import Modifier def test_strategy_example(tmpdir): def create_do_algorithm(): attribute =", "create_concrete_b(): method = create_do_algorithm() strategy = create_strategy() concrete_b = ClassData(\"ConcreteStrategyB\", methods=[method], implementations=[strategy]) return", "generated_file_path in zip(truth_path, generated_path): assert filecmp.cmp(truth_file_path, generated_file_path) def test_strategy_xml(tmpdir): main_path = os.path.abspath(os.path.join(__file__,\"../../../../../main.py\")) xml_path", "visibility=Visibility.public) eat = Method(\"eat\", parameters=[food]) orc = create_orc() fat_orc = ClassData(\"FatOrc\", methods=[eat], inheritances=[orc])", "interface def create_attack(): damage = Attribute(\"damage\", \"int\", visibility=Visibility.public) method = Method(\"attack\", parameters=[damage]) interface", "return concrete_a def create_concrete_b(): method = create_do_algorithm() strategy = create_strategy() concrete_b = ClassData(\"ConcreteStrategyB\",", "\"context.py\", \"concrete_strategy_a.py\", \"concrete_strategy_b.py\"] strategy_path = os.path.abspath(os.path.join(__file__, \"../strategy_example\")) generated_path = [os.path.join(tmpdir, x) for x", "visibility=Visibility.public) age = Attribute(\"age\", \"int\", visibility=Visibility.private) attribute = Attribute(\"attribute\", \"Attribute\", visibility=Visibility.protected) getAttribute =", "<filename>test/test_dataToCode/test_to_python/test_system/test_system.py import pytest import os import filecmp import subprocess from src.dataToCode.dataClasses.attribute import Attribute", "os.path.abspath(os.path.join(__file__, \"../ultimate_example\")) all_files_path = os.listdir(ultimate_path) files_path = [] for file_path in all_files_path: if", "= ClassData(\"Weapon\", methods=[getAttribute, setAttribute], fields=[name, age, attribute]) return weapon def create_attribute(): method =", "create_attack(): damage = Attribute(\"damage\", \"int\", visibility=Visibility.public) method = Method(\"attack\", parameters=[damage]) interface = Interface(\"IAttack\",", "\"--language=python\"]) files_path = [\"strategy.py\", \"context.py\", \"concrete_strategy_a.py\", \"concrete_strategy_b.py\"] strategy_path = os.path.abspath(os.path.join(__file__, \"../strategy_example\")) generated_path =", "interface = Interface(\"IFood\", methods=[method]) return interface def create_weapon(): name = Attribute(\"name\", \"str\", visibility=Visibility.public)", "create_orc(), create_high_orc(), create_fat_orc(), create_obese_orc()] write_files(objects, tmpdir, \"python\") ultimate_path = os.path.abspath(os.path.join(__file__, \"../ultimate_example\")) all_files_path =", "x in files_path] truth_path = [os.path.join(ultimate_path, x) for x in files_path] for truth_file_path,", "attack = Method(\"attack\", parameters=[damage], modifier=Modifier.override) sleep = Method(\"sleep\", parameters=[hours], visibility=Visibility.private, modifier=Modifier.override) orc =", "x in files_path] truth_path = [os.path.join(strategy_path, x) for x in files_path] for truth_file_path,", "strategy def create_context(): attribute = Attribute(\"strategy\", \"Strategy\", visibility=Visibility.public) method = Method(\"doSomeBusinessLogic\") context =", "orc def create_high_orc(): damage = Attribute(\"damage\", \"int\", visibility=Visibility.public) hours = Attribute(\"hours\", \"int\", visibility=Visibility.public)", "setAttribute = Method(\"setAttribute\", return_type=\"void\", parameters=[attribute]) weapon = ClassData(\"Weapon\", methods=[getAttribute, setAttribute], fields=[name, age, attribute])", "fields=[spell], inheritances=[orc]) return high_orc def create_fat_orc(): food = Attribute(\"food\", \"IFood\", visibility=Visibility.public) eat =", "os.path.abspath(os.path.join(__file__,\"../../../../strategy.xml\")) subprocess.run([\"python3\", main_path, f\"--xml_file={xml_path}\", f\"--code_path={tmpdir}\", \"--language=python\"]) files_path = [\"strategy.py\", \"context.py\", \"concrete_strategy_a.py\", \"concrete_strategy_b.py\"] strategy_path", "fields=[attribute]) return context def create_concrete_a(): method = create_do_algorithm() strategy = create_strategy() concrete_a =", "from src.dataToCode.dataClasses.attribute import Attribute from src.dataToCode.dataClasses.classData import ClassData from src.dataToCode.dataClasses.interface import Interface from", "from src.dataToCode.write_files import write_files from src.dataToCode.dataClasses.visibility import Visibility from src.dataToCode.dataClasses.modifier import Modifier def", "= ClassData(\"ObeseOrc\", methods=[eat], fields=[heart_attack], inheritances=[fat_orc]) return obese_orc objects = [create_spell(), create_food(), create_weapon(), create_attribute(),", "def test_ultimate_example(tmpdir): def create_spell(): method = Method(\"doEffect\") interface = Interface(\"ISpell\", methods=[method]) return interface", "for file_path in all_files_path: if file_path.endswith(\".py\"): files_path.append(file_path) generated_path = [os.path.join(tmpdir, x) for x", "in zip(truth_path, generated_path): assert filecmp.cmp(truth_file_path, generated_file_path) def test_strategy_xml(tmpdir): main_path = os.path.abspath(os.path.join(__file__,\"../../../../../main.py\")) xml_path =", "Attribute(\"name\", \"str\", visibility=Visibility.public) age = Attribute(\"age\", \"int\", visibility=Visibility.private) damage = Attribute(\"damage\", \"int\", visibility=Visibility.public)", "visibility=Visibility.public) hours = Attribute(\"hours\", \"int\", visibility=Visibility.public) spell = Attribute(\"spell\", \"ISpell\", visibility=Visibility.public) attack =", "= create_fat_orc() obese_orc = ClassData(\"ObeseOrc\", methods=[eat], fields=[heart_attack], inheritances=[fat_orc]) return obese_orc objects = [create_spell(),", "= Attribute(\"name\", \"str\", visibility=Visibility.public) age = Attribute(\"age\", \"int\", visibility=Visibility.private) attribute = Attribute(\"attribute\", \"Attribute\",", "generated_path = [os.path.join(tmpdir, x) for x in files_path] truth_path = [os.path.join(ultimate_path, x) for", "implementations=[strategy]) return concrete_a def create_concrete_b(): method = create_do_algorithm() strategy = create_strategy() concrete_b =", "\"concrete_strategy_a.py\", \"concrete_strategy_b.py\"] strategy_path = os.path.abspath(os.path.join(__file__, \"../strategy_example\")) generated_path = [os.path.join(tmpdir, x) for x in", "interface def create_orc(): name = Attribute(\"name\", \"str\", visibility=Visibility.public) age = Attribute(\"age\", \"int\", visibility=Visibility.private)", "create_food(): method = Method(\"getNutrients\", return_type=\"str\") interface = Interface(\"IFood\", methods=[method]) return interface def create_weapon():", "Attribute(\"damage\", \"int\", visibility=Visibility.public) method = Method(\"attack\", parameters=[damage]) interface = Interface(\"IAttack\", methods=[method]) return interface", "test_strategy_xml(tmpdir): main_path = os.path.abspath(os.path.join(__file__,\"../../../../../main.py\")) xml_path = os.path.abspath(os.path.join(__file__,\"../../../../strategy.xml\")) subprocess.run([\"python3\", main_path, f\"--xml_file={xml_path}\", f\"--code_path={tmpdir}\", \"--language=python\"]) files_path", "methods=[method]) return strategy def create_context(): attribute = Attribute(\"strategy\", \"Strategy\", visibility=Visibility.public) method = Method(\"doSomeBusinessLogic\")", "= Attribute(\"food\", \"IFood\", visibility=Visibility.public) eat = Method(\"eat\", parameters=[food]) orc = create_orc() fat_orc =", "file_path.endswith(\".py\"): files_path.append(file_path) generated_path = [os.path.join(tmpdir, x) for x in files_path] truth_path = [os.path.join(ultimate_path,", "create_do_algorithm() strategy = Interface(\"Strategy\", methods=[method]) return strategy def create_context(): attribute = Attribute(\"strategy\", \"Strategy\",", "= Interface(\"IWalk\", methods=[method]) return interface def create_attack(): damage = Attribute(\"damage\", \"int\", visibility=Visibility.public) method", "parameters=[damage]) sleep = Method(\"sleep\", parameters=[hours], visibility=Visibility.private) orc = ClassData(\"Orc\", methods=[attack_method, sleep], fields=[name, age],", "\"int\", visibility=Visibility.private) damage = Attribute(\"damage\", \"int\", visibility=Visibility.public) hours = Attribute(\"hours\", \"int\", visibility=Visibility.public) walk", "orc = ClassData(\"Orc\", methods=[attack_method, sleep], fields=[name, age], implementations=[attack_interface, walk]) return orc def create_high_orc():", "generated_path = [os.path.join(tmpdir, x) for x in files_path] truth_path = [os.path.join(strategy_path, x) for", "attribute]) return weapon def create_attribute(): method = Method(\"method\") field = Attribute(\"field\", \"Type\", visibility=Visibility.public)", "= Method(\"eat\", parameters=[food]) orc = create_orc() fat_orc = ClassData(\"FatOrc\", methods=[eat], inheritances=[orc]) return fat_orc", "import os import filecmp import subprocess from src.dataToCode.dataClasses.attribute import Attribute from src.dataToCode.dataClasses.classData import", "[os.path.join(tmpdir, x) for x in files_path] truth_path = [os.path.join(ultimate_path, x) for x in", "truth_file_path, generated_file_path in zip(truth_path, generated_path): assert filecmp.cmp(truth_file_path, generated_file_path) def test_strategy_xml(tmpdir): main_path = os.path.abspath(os.path.join(__file__,\"../../../../../main.py\"))", "Method(\"attack\", parameters=[damage], modifier=Modifier.override) sleep = Method(\"sleep\", parameters=[hours], visibility=Visibility.private, modifier=Modifier.override) orc = create_orc() high_orc", "= [\"strategy.py\", \"context.py\", \"concrete_strategy_a.py\", \"concrete_strategy_b.py\"] strategy_path = os.path.abspath(os.path.join(__file__, \"../strategy_example\")) generated_path = [os.path.join(tmpdir, x)", "= os.path.abspath(os.path.join(__file__,\"../../../../strategy.xml\")) subprocess.run([\"python3\", main_path, f\"--xml_file={xml_path}\", f\"--code_path={tmpdir}\", \"--language=python\"]) files_path = [\"strategy.py\", \"context.py\", \"concrete_strategy_a.py\", \"concrete_strategy_b.py\"]", "= [os.path.join(tmpdir, x) for x in files_path] truth_path = [os.path.join(ultimate_path, x) for x", "= create_strategy() concrete_a = ClassData(\"ConcreteStrategyA\", methods=[method], implementations=[strategy]) return concrete_a def create_concrete_b(): method =", "in files_path] truth_path = [os.path.join(strategy_path, x) for x in files_path] for truth_file_path, generated_file_path", "concrete_b objects = [create_strategy(), create_context(), create_concrete_a(), create_concrete_b()] write_files(objects, tmpdir, \"python\") files_path = [\"strategy.py\",", "[] for file_path in all_files_path: if file_path.endswith(\".py\"): files_path.append(file_path) generated_path = [os.path.join(tmpdir, x) for", "= Method(\"attack\", parameters=[damage], modifier=Modifier.override) sleep = Method(\"sleep\", parameters=[hours], visibility=Visibility.private, modifier=Modifier.override) orc = create_orc()", "create_obese_orc()] write_files(objects, tmpdir, \"python\") ultimate_path = os.path.abspath(os.path.join(__file__, \"../ultimate_example\")) all_files_path = os.listdir(ultimate_path) files_path =", "create_context(): attribute = Attribute(\"strategy\", \"Strategy\", visibility=Visibility.public) method = Method(\"doSomeBusinessLogic\") context = ClassData(\"Context\", methods=[method],", "truth_path = [os.path.join(strategy_path, x) for x in files_path] for truth_file_path, generated_file_path in zip(truth_path,", "visibility=Visibility.public) eat = Method(\"eat\", parameters=[food], modifier=Modifier.override) fat_orc = create_fat_orc() obese_orc = ClassData(\"ObeseOrc\", methods=[eat],", "= Attribute(\"hours\", \"int\", visibility=Visibility.public) spell = Attribute(\"spell\", \"ISpell\", visibility=Visibility.public) attack = Method(\"attack\", parameters=[damage],", "= Attribute(\"heartAttackChance\", \"int\", visibility=Visibility.public) eat = Method(\"eat\", parameters=[food], modifier=Modifier.override) fat_orc = create_fat_orc() obese_orc", "implementations=[attack_interface, walk]) return orc def create_high_orc(): damage = Attribute(\"damage\", \"int\", visibility=Visibility.public) hours =", "= create_orc() fat_orc = ClassData(\"FatOrc\", methods=[eat], inheritances=[orc]) return fat_orc def create_obese_orc(): food =", "\"int\", visibility=Visibility.public) eat = Method(\"eat\", parameters=[food], modifier=Modifier.override) fat_orc = create_fat_orc() obese_orc = ClassData(\"ObeseOrc\",", "age], implementations=[attack_interface, walk]) return orc def create_high_orc(): damage = Attribute(\"damage\", \"int\", visibility=Visibility.public) hours", "create_attribute(), create_attack(), create_walk(), create_orc(), create_high_orc(), create_fat_orc(), create_obese_orc()] write_files(objects, tmpdir, \"python\") ultimate_path = os.path.abspath(os.path.join(__file__,", "= [] for file_path in all_files_path: if file_path.endswith(\".py\"): files_path.append(file_path) generated_path = [os.path.join(tmpdir, x)", "damage = Attribute(\"damage\", \"int\", visibility=Visibility.public) hours = Attribute(\"hours\", \"int\", visibility=Visibility.public) spell = Attribute(\"spell\",", "create_walk(): method = Method(\"walk\") interface = Interface(\"IWalk\", methods=[method]) return interface def create_attack(): damage", "filecmp import subprocess from src.dataToCode.dataClasses.attribute import Attribute from src.dataToCode.dataClasses.classData import ClassData from src.dataToCode.dataClasses.interface", "return orc def create_high_orc(): damage = Attribute(\"damage\", \"int\", visibility=Visibility.public) hours = Attribute(\"hours\", \"int\",", "\"int\", visibility=Visibility.private) attribute = Attribute(\"attribute\", \"Attribute\", visibility=Visibility.protected) getAttribute = Method(\"getAttribute\", return_type=\"Attribute\") setAttribute =", "= Interface(\"ISpell\", methods=[method]) return interface def create_food(): method = Method(\"getNutrients\", return_type=\"str\") interface =", "getAttribute = Method(\"getAttribute\", return_type=\"Attribute\") setAttribute = Method(\"setAttribute\", return_type=\"void\", parameters=[attribute]) weapon = ClassData(\"Weapon\", methods=[getAttribute,", "import Interface from src.dataToCode.dataClasses.method import Method from src.dataToCode.write_files import write_files from src.dataToCode.dataClasses.visibility import", "for truth_file_path, generated_file_path in zip(truth_path, generated_path): assert filecmp.cmp(truth_file_path, generated_file_path) def test_ultimate_example(tmpdir): def create_spell():", "Interface(\"IWalk\", methods=[method]) return interface def create_attack(): damage = Attribute(\"damage\", \"int\", visibility=Visibility.public) method =", "files_path.append(file_path) generated_path = [os.path.join(tmpdir, x) for x in files_path] truth_path = [os.path.join(ultimate_path, x)", "methods=[attack, sleep], fields=[spell], inheritances=[orc]) return high_orc def create_fat_orc(): food = Attribute(\"food\", \"IFood\", visibility=Visibility.public)", "create_walk() attack_interface = create_attack() attack_method = Method(\"attack\", parameters=[damage]) sleep = Method(\"sleep\", parameters=[hours], visibility=Visibility.private)", "\"int\", visibility=Visibility.public) hours = Attribute(\"hours\", \"int\", visibility=Visibility.public) walk = create_walk() attack_interface = create_attack()", "= Method(\"getAttribute\", return_type=\"Attribute\") setAttribute = Method(\"setAttribute\", return_type=\"void\", parameters=[attribute]) weapon = ClassData(\"Weapon\", methods=[getAttribute, setAttribute],", "attack_interface = create_attack() attack_method = Method(\"attack\", parameters=[damage]) sleep = Method(\"sleep\", parameters=[hours], visibility=Visibility.private) orc", "create_attack(), create_walk(), create_orc(), create_high_orc(), create_fat_orc(), create_obese_orc()] write_files(objects, tmpdir, \"python\") ultimate_path = os.path.abspath(os.path.join(__file__, \"../ultimate_example\"))", "Method(\"getNutrients\", return_type=\"str\") interface = Interface(\"IFood\", methods=[method]) return interface def create_weapon(): name = Attribute(\"name\",", "= [os.path.join(strategy_path, x) for x in files_path] for truth_file_path, generated_file_path in zip(truth_path, generated_path):", "x) for x in files_path] truth_path = [os.path.join(ultimate_path, x) for x in files_path]", "setAttribute], fields=[name, age, attribute]) return weapon def create_attribute(): method = Method(\"method\") field =", "zip(truth_path, generated_path): assert filecmp.cmp(truth_file_path, generated_file_path) def test_strategy_xml(tmpdir): main_path = os.path.abspath(os.path.join(__file__,\"../../../../../main.py\")) xml_path = os.path.abspath(os.path.join(__file__,\"../../../../strategy.xml\"))", "def create_orc(): name = Attribute(\"name\", \"str\", visibility=Visibility.public) age = Attribute(\"age\", \"int\", visibility=Visibility.private) damage", "\"int\", visibility=Visibility.public) hours = Attribute(\"hours\", \"int\", visibility=Visibility.public) spell = Attribute(\"spell\", \"ISpell\", visibility=Visibility.public) attack", "import Visibility from src.dataToCode.dataClasses.modifier import Modifier def test_strategy_example(tmpdir): def create_do_algorithm(): attribute = Attribute(\"data\",", "return interface def create_weapon(): name = Attribute(\"name\", \"str\", visibility=Visibility.public) age = Attribute(\"age\", \"int\",", "attribute = Attribute(\"strategy\", \"Strategy\", visibility=Visibility.public) method = Method(\"doSomeBusinessLogic\") context = ClassData(\"Context\", methods=[method], fields=[attribute])", "parameters=[food]) orc = create_orc() fat_orc = ClassData(\"FatOrc\", methods=[eat], inheritances=[orc]) return fat_orc def create_obese_orc():", "Attribute(\"heartAttackChance\", \"int\", visibility=Visibility.public) eat = Method(\"eat\", parameters=[food], modifier=Modifier.override) fat_orc = create_fat_orc() obese_orc =", "ClassData(\"Attribute\", methods=[method], fields=[field]) return attribute def create_walk(): method = Method(\"walk\") interface = Interface(\"IWalk\",", "Method(\"setAttribute\", return_type=\"void\", parameters=[attribute]) weapon = ClassData(\"Weapon\", methods=[getAttribute, setAttribute], fields=[name, age, attribute]) return weapon", "method = Method(\"doEffect\") interface = Interface(\"ISpell\", methods=[method]) return interface def create_food(): method =", "fields=[name, age], implementations=[attack_interface, walk]) return orc def create_high_orc(): damage = Attribute(\"damage\", \"int\", visibility=Visibility.public)", "Method(\"sleep\", parameters=[hours], visibility=Visibility.private) orc = ClassData(\"Orc\", methods=[attack_method, sleep], fields=[name, age], implementations=[attack_interface, walk]) return", "Attribute(\"hours\", \"int\", visibility=Visibility.public) walk = create_walk() attack_interface = create_attack() attack_method = Method(\"attack\", parameters=[damage])", "sleep = Method(\"sleep\", parameters=[hours], visibility=Visibility.private) orc = ClassData(\"Orc\", methods=[attack_method, sleep], fields=[name, age], implementations=[attack_interface,", "create_obese_orc(): food = Attribute(\"food\", \"IFood\", visibility=Visibility.public) heart_attack = Attribute(\"heartAttackChance\", \"int\", visibility=Visibility.public) eat =", "method = create_do_algorithm() strategy = create_strategy() concrete_a = ClassData(\"ConcreteStrategyA\", methods=[method], implementations=[strategy]) return concrete_a", "= os.listdir(ultimate_path) files_path = [] for file_path in all_files_path: if file_path.endswith(\".py\"): files_path.append(file_path) generated_path", "in all_files_path: if file_path.endswith(\".py\"): files_path.append(file_path) generated_path = [os.path.join(tmpdir, x) for x in files_path]", "generated_file_path) def test_strategy_xml(tmpdir): main_path = os.path.abspath(os.path.join(__file__,\"../../../../../main.py\")) xml_path = os.path.abspath(os.path.join(__file__,\"../../../../strategy.xml\")) subprocess.run([\"python3\", main_path, f\"--xml_file={xml_path}\", f\"--code_path={tmpdir}\",", "def create_concrete_b(): method = create_do_algorithm() strategy = create_strategy() concrete_b = ClassData(\"ConcreteStrategyB\", methods=[method], implementations=[strategy])", "def create_food(): method = Method(\"getNutrients\", return_type=\"str\") interface = Interface(\"IFood\", methods=[method]) return interface def", "Modifier def test_strategy_example(tmpdir): def create_do_algorithm(): attribute = Attribute(\"data\", \"str\") method = Method(\"doAlgorithm\", parameters=[attribute])", "create_strategy() concrete_b = ClassData(\"ConcreteStrategyB\", methods=[method], implementations=[strategy]) return concrete_b objects = [create_strategy(), create_context(), create_concrete_a(),", "\"Type\", visibility=Visibility.public) attribute = ClassData(\"Attribute\", methods=[method], fields=[field]) return attribute def create_walk(): method =", "visibility=Visibility.private) damage = Attribute(\"damage\", \"int\", visibility=Visibility.public) hours = Attribute(\"hours\", \"int\", visibility=Visibility.public) walk =", "return interface def create_attack(): damage = Attribute(\"damage\", \"int\", visibility=Visibility.public) method = Method(\"attack\", parameters=[damage])", "Attribute(\"hours\", \"int\", visibility=Visibility.public) spell = Attribute(\"spell\", \"ISpell\", visibility=Visibility.public) attack = Method(\"attack\", parameters=[damage], modifier=Modifier.override)", "\"int\", visibility=Visibility.public) method = Method(\"attack\", parameters=[damage]) interface = Interface(\"IAttack\", methods=[method]) return interface def", "concrete_b = ClassData(\"ConcreteStrategyB\", methods=[method], implementations=[strategy]) return concrete_b objects = [create_strategy(), create_context(), create_concrete_a(), create_concrete_b()]", "= create_walk() attack_interface = create_attack() attack_method = Method(\"attack\", parameters=[damage]) sleep = Method(\"sleep\", parameters=[hours],", "test_ultimate_example(tmpdir): def create_spell(): method = Method(\"doEffect\") interface = Interface(\"ISpell\", methods=[method]) return interface def", "Interface(\"ISpell\", methods=[method]) return interface def create_food(): method = Method(\"getNutrients\", return_type=\"str\") interface = Interface(\"IFood\",", "= Attribute(\"data\", \"str\") method = Method(\"doAlgorithm\", parameters=[attribute]) return method def create_strategy(): method =", "in files_path] for truth_file_path, generated_file_path in zip(truth_path, generated_path): assert filecmp.cmp(truth_file_path, generated_file_path) def test_ultimate_example(tmpdir):", "generated_path): assert filecmp.cmp(truth_file_path, generated_file_path) def test_ultimate_example(tmpdir): def create_spell(): method = Method(\"doEffect\") interface =", "= create_orc() high_orc = ClassData(\"HighOrc\", methods=[attack, sleep], fields=[spell], inheritances=[orc]) return high_orc def create_fat_orc():", "tmpdir, \"python\") ultimate_path = os.path.abspath(os.path.join(__file__, \"../ultimate_example\")) all_files_path = os.listdir(ultimate_path) files_path = [] for", "Attribute(\"data\", \"str\") method = Method(\"doAlgorithm\", parameters=[attribute]) return method def create_strategy(): method = create_do_algorithm()", "interface = Interface(\"IWalk\", methods=[method]) return interface def create_attack(): damage = Attribute(\"damage\", \"int\", visibility=Visibility.public)", "\"../strategy_example\")) generated_path = [os.path.join(tmpdir, x) for x in files_path] truth_path = [os.path.join(strategy_path, x)", "write_files from src.dataToCode.dataClasses.visibility import Visibility from src.dataToCode.dataClasses.modifier import Modifier def test_strategy_example(tmpdir): def create_do_algorithm():", "sleep], fields=[spell], inheritances=[orc]) return high_orc def create_fat_orc(): food = Attribute(\"food\", \"IFood\", visibility=Visibility.public) eat", "from src.dataToCode.dataClasses.modifier import Modifier def test_strategy_example(tmpdir): def create_do_algorithm(): attribute = Attribute(\"data\", \"str\") method", "visibility=Visibility.public) attribute = ClassData(\"Attribute\", methods=[method], fields=[field]) return attribute def create_walk(): method = Method(\"walk\")", "methods=[method], fields=[field]) return attribute def create_walk(): method = Method(\"walk\") interface = Interface(\"IWalk\", methods=[method])", "= ClassData(\"Context\", methods=[method], fields=[attribute]) return context def create_concrete_a(): method = create_do_algorithm() strategy =", "import Modifier def test_strategy_example(tmpdir): def create_do_algorithm(): attribute = Attribute(\"data\", \"str\") method = Method(\"doAlgorithm\",", "= Method(\"attack\", parameters=[damage]) interface = Interface(\"IAttack\", methods=[method]) return interface def create_orc(): name =", "all_files_path: if file_path.endswith(\".py\"): files_path.append(file_path) generated_path = [os.path.join(tmpdir, x) for x in files_path] truth_path", "Visibility from src.dataToCode.dataClasses.modifier import Modifier def test_strategy_example(tmpdir): def create_do_algorithm(): attribute = Attribute(\"data\", \"str\")", "os.listdir(ultimate_path) files_path = [] for file_path in all_files_path: if file_path.endswith(\".py\"): files_path.append(file_path) generated_path =", "\"IFood\", visibility=Visibility.public) eat = Method(\"eat\", parameters=[food]) orc = create_orc() fat_orc = ClassData(\"FatOrc\", methods=[eat],", "os import filecmp import subprocess from src.dataToCode.dataClasses.attribute import Attribute from src.dataToCode.dataClasses.classData import ClassData", "def create_attack(): damage = Attribute(\"damage\", \"int\", visibility=Visibility.public) method = Method(\"attack\", parameters=[damage]) interface =", "create_context(), create_concrete_a(), create_concrete_b()] write_files(objects, tmpdir, \"python\") files_path = [\"strategy.py\", \"context.py\", \"concrete_strategy_a.py\", \"concrete_strategy_b.py\"] strategy_path", "f\"--xml_file={xml_path}\", f\"--code_path={tmpdir}\", \"--language=python\"]) files_path = [\"strategy.py\", \"context.py\", \"concrete_strategy_a.py\", \"concrete_strategy_b.py\"] strategy_path = os.path.abspath(os.path.join(__file__, \"../strategy_example\"))", "= Attribute(\"strategy\", \"Strategy\", visibility=Visibility.public) method = Method(\"doSomeBusinessLogic\") context = ClassData(\"Context\", methods=[method], fields=[attribute]) return", "parameters=[damage], modifier=Modifier.override) sleep = Method(\"sleep\", parameters=[hours], visibility=Visibility.private, modifier=Modifier.override) orc = create_orc() high_orc =", "spell = Attribute(\"spell\", \"ISpell\", visibility=Visibility.public) attack = Method(\"attack\", parameters=[damage], modifier=Modifier.override) sleep = Method(\"sleep\",", "return strategy def create_context(): attribute = Attribute(\"strategy\", \"Strategy\", visibility=Visibility.public) method = Method(\"doSomeBusinessLogic\") context", "= Attribute(\"hours\", \"int\", visibility=Visibility.public) walk = create_walk() attack_interface = create_attack() attack_method = Method(\"attack\",", "Method(\"attack\", parameters=[damage]) interface = Interface(\"IAttack\", methods=[method]) return interface def create_orc(): name = Attribute(\"name\",", "= [create_spell(), create_food(), create_weapon(), create_attribute(), create_attack(), create_walk(), create_orc(), create_high_orc(), create_fat_orc(), create_obese_orc()] write_files(objects, tmpdir,", "import subprocess from src.dataToCode.dataClasses.attribute import Attribute from src.dataToCode.dataClasses.classData import ClassData from src.dataToCode.dataClasses.interface import", "create_concrete_a(): method = create_do_algorithm() strategy = create_strategy() concrete_a = ClassData(\"ConcreteStrategyA\", methods=[method], implementations=[strategy]) return", "Method(\"attack\", parameters=[damage]) sleep = Method(\"sleep\", parameters=[hours], visibility=Visibility.private) orc = ClassData(\"Orc\", methods=[attack_method, sleep], fields=[name,", "orc = create_orc() fat_orc = ClassData(\"FatOrc\", methods=[eat], inheritances=[orc]) return fat_orc def create_obese_orc(): food", "create_walk(), create_orc(), create_high_orc(), create_fat_orc(), create_obese_orc()] write_files(objects, tmpdir, \"python\") ultimate_path = os.path.abspath(os.path.join(__file__, \"../ultimate_example\")) all_files_path", "[create_strategy(), create_context(), create_concrete_a(), create_concrete_b()] write_files(objects, tmpdir, \"python\") files_path = [\"strategy.py\", \"context.py\", \"concrete_strategy_a.py\", \"concrete_strategy_b.py\"]", "= Method(\"method\") field = Attribute(\"field\", \"Type\", visibility=Visibility.public) attribute = ClassData(\"Attribute\", methods=[method], fields=[field]) return", "visibility=Visibility.private) orc = ClassData(\"Orc\", methods=[attack_method, sleep], fields=[name, age], implementations=[attack_interface, walk]) return orc def", "from src.dataToCode.dataClasses.method import Method from src.dataToCode.write_files import write_files from src.dataToCode.dataClasses.visibility import Visibility from", "Method(\"eat\", parameters=[food]) orc = create_orc() fat_orc = ClassData(\"FatOrc\", methods=[eat], inheritances=[orc]) return fat_orc def", "visibility=Visibility.public) spell = Attribute(\"spell\", \"ISpell\", visibility=Visibility.public) attack = Method(\"attack\", parameters=[damage], modifier=Modifier.override) sleep =", "obese_orc = ClassData(\"ObeseOrc\", methods=[eat], fields=[heart_attack], inheritances=[fat_orc]) return obese_orc objects = [create_spell(), create_food(), create_weapon(),", "method = Method(\"attack\", parameters=[damage]) interface = Interface(\"IAttack\", methods=[method]) return interface def create_orc(): name", "attribute def create_walk(): method = Method(\"walk\") interface = Interface(\"IWalk\", methods=[method]) return interface def", "sleep], fields=[name, age], implementations=[attack_interface, walk]) return orc def create_high_orc(): damage = Attribute(\"damage\", \"int\",", "src.dataToCode.dataClasses.classData import ClassData from src.dataToCode.dataClasses.interface import Interface from src.dataToCode.dataClasses.method import Method from src.dataToCode.write_files", "[os.path.join(strategy_path, x) for x in files_path] for truth_file_path, generated_file_path in zip(truth_path, generated_path): assert", "Attribute(\"age\", \"int\", visibility=Visibility.private) attribute = Attribute(\"attribute\", \"Attribute\", visibility=Visibility.protected) getAttribute = Method(\"getAttribute\", return_type=\"Attribute\") setAttribute", "modifier=Modifier.override) sleep = Method(\"sleep\", parameters=[hours], visibility=Visibility.private, modifier=Modifier.override) orc = create_orc() high_orc = ClassData(\"HighOrc\",", "ClassData(\"FatOrc\", methods=[eat], inheritances=[orc]) return fat_orc def create_obese_orc(): food = Attribute(\"food\", \"IFood\", visibility=Visibility.public) heart_attack", "= ClassData(\"HighOrc\", methods=[attack, sleep], fields=[spell], inheritances=[orc]) return high_orc def create_fat_orc(): food = Attribute(\"food\",", "subprocess from src.dataToCode.dataClasses.attribute import Attribute from src.dataToCode.dataClasses.classData import ClassData from src.dataToCode.dataClasses.interface import Interface", "return concrete_b objects = [create_strategy(), create_context(), create_concrete_a(), create_concrete_b()] write_files(objects, tmpdir, \"python\") files_path =", "def create_strategy(): method = create_do_algorithm() strategy = Interface(\"Strategy\", methods=[method]) return strategy def create_context():", "damage = Attribute(\"damage\", \"int\", visibility=Visibility.public) method = Method(\"attack\", parameters=[damage]) interface = Interface(\"IAttack\", methods=[method])", "files_path] truth_path = [os.path.join(strategy_path, x) for x in files_path] for truth_file_path, generated_file_path in", "walk = create_walk() attack_interface = create_attack() attack_method = Method(\"attack\", parameters=[damage]) sleep = Method(\"sleep\",", "filecmp.cmp(truth_file_path, generated_file_path) def test_ultimate_example(tmpdir): def create_spell(): method = Method(\"doEffect\") interface = Interface(\"ISpell\", methods=[method])", "write_files(objects, tmpdir, \"python\") ultimate_path = os.path.abspath(os.path.join(__file__, \"../ultimate_example\")) all_files_path = os.listdir(ultimate_path) files_path = []", "def create_attribute(): method = Method(\"method\") field = Attribute(\"field\", \"Type\", visibility=Visibility.public) attribute = ClassData(\"Attribute\",", "create_do_algorithm() strategy = create_strategy() concrete_a = ClassData(\"ConcreteStrategyA\", methods=[method], implementations=[strategy]) return concrete_a def create_concrete_b():", "food = Attribute(\"food\", \"IFood\", visibility=Visibility.public) eat = Method(\"eat\", parameters=[food]) orc = create_orc() fat_orc", "= Method(\"sleep\", parameters=[hours], visibility=Visibility.private) orc = ClassData(\"Orc\", methods=[attack_method, sleep], fields=[name, age], implementations=[attack_interface, walk])", "parameters=[damage]) interface = Interface(\"IAttack\", methods=[method]) return interface def create_orc(): name = Attribute(\"name\", \"str\",", "parameters=[hours], visibility=Visibility.private) orc = ClassData(\"Orc\", methods=[attack_method, sleep], fields=[name, age], implementations=[attack_interface, walk]) return orc", "return context def create_concrete_a(): method = create_do_algorithm() strategy = create_strategy() concrete_a = ClassData(\"ConcreteStrategyA\",", "def create_do_algorithm(): attribute = Attribute(\"data\", \"str\") method = Method(\"doAlgorithm\", parameters=[attribute]) return method def", "def create_context(): attribute = Attribute(\"strategy\", \"Strategy\", visibility=Visibility.public) method = Method(\"doSomeBusinessLogic\") context = ClassData(\"Context\",", "[\"strategy.py\", \"context.py\", \"concrete_strategy_a.py\", \"concrete_strategy_b.py\"] strategy_path = os.path.abspath(os.path.join(__file__, \"../strategy_example\")) generated_path = [os.path.join(tmpdir, x) for", "from src.dataToCode.dataClasses.classData import ClassData from src.dataToCode.dataClasses.interface import Interface from src.dataToCode.dataClasses.method import Method from", "methods=[method], implementations=[strategy]) return concrete_b objects = [create_strategy(), create_context(), create_concrete_a(), create_concrete_b()] write_files(objects, tmpdir, \"python\")", "import pytest import os import filecmp import subprocess from src.dataToCode.dataClasses.attribute import Attribute from", "visibility=Visibility.protected) getAttribute = Method(\"getAttribute\", return_type=\"Attribute\") setAttribute = Method(\"setAttribute\", return_type=\"void\", parameters=[attribute]) weapon = ClassData(\"Weapon\",", "inheritances=[orc]) return fat_orc def create_obese_orc(): food = Attribute(\"food\", \"IFood\", visibility=Visibility.public) heart_attack = Attribute(\"heartAttackChance\",", "= [os.path.join(tmpdir, x) for x in files_path] truth_path = [os.path.join(strategy_path, x) for x", "import Attribute from src.dataToCode.dataClasses.classData import ClassData from src.dataToCode.dataClasses.interface import Interface from src.dataToCode.dataClasses.method import", "= os.path.abspath(os.path.join(__file__, \"../strategy_example\")) generated_path = [os.path.join(tmpdir, x) for x in files_path] truth_path =", "from src.dataToCode.dataClasses.visibility import Visibility from src.dataToCode.dataClasses.modifier import Modifier def test_strategy_example(tmpdir): def create_do_algorithm(): attribute", "Attribute(\"field\", \"Type\", visibility=Visibility.public) attribute = ClassData(\"Attribute\", methods=[method], fields=[field]) return attribute def create_walk(): method", "return method def create_strategy(): method = create_do_algorithm() strategy = Interface(\"Strategy\", methods=[method]) return strategy", "interface = Interface(\"IAttack\", methods=[method]) return interface def create_orc(): name = Attribute(\"name\", \"str\", visibility=Visibility.public)", "Method from src.dataToCode.write_files import write_files from src.dataToCode.dataClasses.visibility import Visibility from src.dataToCode.dataClasses.modifier import Modifier", "return interface def create_food(): method = Method(\"getNutrients\", return_type=\"str\") interface = Interface(\"IFood\", methods=[method]) return", "visibility=Visibility.private, modifier=Modifier.override) orc = create_orc() high_orc = ClassData(\"HighOrc\", methods=[attack, sleep], fields=[spell], inheritances=[orc]) return", "methods=[method]) return interface def create_food(): method = Method(\"getNutrients\", return_type=\"str\") interface = Interface(\"IFood\", methods=[method])", "create_concrete_a(), create_concrete_b()] write_files(objects, tmpdir, \"python\") files_path = [\"strategy.py\", \"context.py\", \"concrete_strategy_a.py\", \"concrete_strategy_b.py\"] strategy_path =", "Attribute(\"food\", \"IFood\", visibility=Visibility.public) eat = Method(\"eat\", parameters=[food]) orc = create_orc() fat_orc = ClassData(\"FatOrc\",", "= Method(\"setAttribute\", return_type=\"void\", parameters=[attribute]) weapon = ClassData(\"Weapon\", methods=[getAttribute, setAttribute], fields=[name, age, attribute]) return", "age = Attribute(\"age\", \"int\", visibility=Visibility.private) attribute = Attribute(\"attribute\", \"Attribute\", visibility=Visibility.protected) getAttribute = Method(\"getAttribute\",", "method = Method(\"doSomeBusinessLogic\") context = ClassData(\"Context\", methods=[method], fields=[attribute]) return context def create_concrete_a(): method", "return_type=\"str\") interface = Interface(\"IFood\", methods=[method]) return interface def create_weapon(): name = Attribute(\"name\", \"str\",", "objects = [create_strategy(), create_context(), create_concrete_a(), create_concrete_b()] write_files(objects, tmpdir, \"python\") files_path = [\"strategy.py\", \"context.py\",", "Attribute(\"name\", \"str\", visibility=Visibility.public) age = Attribute(\"age\", \"int\", visibility=Visibility.private) attribute = Attribute(\"attribute\", \"Attribute\", visibility=Visibility.protected)", "method = Method(\"method\") field = Attribute(\"field\", \"Type\", visibility=Visibility.public) attribute = ClassData(\"Attribute\", methods=[method], fields=[field])", "for x in files_path] truth_path = [os.path.join(ultimate_path, x) for x in files_path] for", "test_strategy_example(tmpdir): def create_do_algorithm(): attribute = Attribute(\"data\", \"str\") method = Method(\"doAlgorithm\", parameters=[attribute]) return method", "inheritances=[orc]) return high_orc def create_fat_orc(): food = Attribute(\"food\", \"IFood\", visibility=Visibility.public) eat = Method(\"eat\",", "def create_concrete_a(): method = create_do_algorithm() strategy = create_strategy() concrete_a = ClassData(\"ConcreteStrategyA\", methods=[method], implementations=[strategy])", "in files_path] for truth_file_path, generated_file_path in zip(truth_path, generated_path): assert filecmp.cmp(truth_file_path, generated_file_path) def test_strategy_xml(tmpdir):", "= Method(\"walk\") interface = Interface(\"IWalk\", methods=[method]) return interface def create_attack(): damage = Attribute(\"damage\",", "method = Method(\"walk\") interface = Interface(\"IWalk\", methods=[method]) return interface def create_attack(): damage =", "strategy_path = os.path.abspath(os.path.join(__file__, \"../strategy_example\")) generated_path = [os.path.join(tmpdir, x) for x in files_path] truth_path", "assert filecmp.cmp(truth_file_path, generated_file_path) def test_ultimate_example(tmpdir): def create_spell(): method = Method(\"doEffect\") interface = Interface(\"ISpell\",", "parameters=[attribute]) return method def create_strategy(): method = create_do_algorithm() strategy = Interface(\"Strategy\", methods=[method]) return", "damage = Attribute(\"damage\", \"int\", visibility=Visibility.public) hours = Attribute(\"hours\", \"int\", visibility=Visibility.public) walk = create_walk()", "= Attribute(\"age\", \"int\", visibility=Visibility.private) attribute = Attribute(\"attribute\", \"Attribute\", visibility=Visibility.protected) getAttribute = Method(\"getAttribute\", return_type=\"Attribute\")", "Attribute from src.dataToCode.dataClasses.classData import ClassData from src.dataToCode.dataClasses.interface import Interface from src.dataToCode.dataClasses.method import Method", "concrete_a = ClassData(\"ConcreteStrategyA\", methods=[method], implementations=[strategy]) return concrete_a def create_concrete_b(): method = create_do_algorithm() strategy", "= Attribute(\"damage\", \"int\", visibility=Visibility.public) method = Method(\"attack\", parameters=[damage]) interface = Interface(\"IAttack\", methods=[method]) return", "= Interface(\"IFood\", methods=[method]) return interface def create_weapon(): name = Attribute(\"name\", \"str\", visibility=Visibility.public) age", "= create_attack() attack_method = Method(\"attack\", parameters=[damage]) sleep = Method(\"sleep\", parameters=[hours], visibility=Visibility.private) orc =", "for truth_file_path, generated_file_path in zip(truth_path, generated_path): assert filecmp.cmp(truth_file_path, generated_file_path) def test_strategy_xml(tmpdir): main_path =", "Interface(\"IFood\", methods=[method]) return interface def create_weapon(): name = Attribute(\"name\", \"str\", visibility=Visibility.public) age =", "= Attribute(\"damage\", \"int\", visibility=Visibility.public) hours = Attribute(\"hours\", \"int\", visibility=Visibility.public) spell = Attribute(\"spell\", \"ISpell\",", "create_weapon(), create_attribute(), create_attack(), create_walk(), create_orc(), create_high_orc(), create_fat_orc(), create_obese_orc()] write_files(objects, tmpdir, \"python\") ultimate_path =", "Interface(\"IAttack\", methods=[method]) return interface def create_orc(): name = Attribute(\"name\", \"str\", visibility=Visibility.public) age =", "sleep = Method(\"sleep\", parameters=[hours], visibility=Visibility.private, modifier=Modifier.override) orc = create_orc() high_orc = ClassData(\"HighOrc\", methods=[attack,", "return high_orc def create_fat_orc(): food = Attribute(\"food\", \"IFood\", visibility=Visibility.public) eat = Method(\"eat\", parameters=[food])", "= create_strategy() concrete_b = ClassData(\"ConcreteStrategyB\", methods=[method], implementations=[strategy]) return concrete_b objects = [create_strategy(), create_context(),", "strategy = create_strategy() concrete_a = ClassData(\"ConcreteStrategyA\", methods=[method], implementations=[strategy]) return concrete_a def create_concrete_b(): method", "= Method(\"sleep\", parameters=[hours], visibility=Visibility.private, modifier=Modifier.override) orc = create_orc() high_orc = ClassData(\"HighOrc\", methods=[attack, sleep],", "walk]) return orc def create_high_orc(): damage = Attribute(\"damage\", \"int\", visibility=Visibility.public) hours = Attribute(\"hours\",", "[os.path.join(tmpdir, x) for x in files_path] truth_path = [os.path.join(strategy_path, x) for x in", "fields=[name, age, attribute]) return weapon def create_attribute(): method = Method(\"method\") field = Attribute(\"field\",", "ClassData(\"Weapon\", methods=[getAttribute, setAttribute], fields=[name, age, attribute]) return weapon def create_attribute(): method = Method(\"method\")", "create_orc() fat_orc = ClassData(\"FatOrc\", methods=[eat], inheritances=[orc]) return fat_orc def create_obese_orc(): food = Attribute(\"food\",", "= Attribute(\"damage\", \"int\", visibility=Visibility.public) hours = Attribute(\"hours\", \"int\", visibility=Visibility.public) walk = create_walk() attack_interface", "= Attribute(\"field\", \"Type\", visibility=Visibility.public) attribute = ClassData(\"Attribute\", methods=[method], fields=[field]) return attribute def create_walk():", "files_path] for truth_file_path, generated_file_path in zip(truth_path, generated_path): assert filecmp.cmp(truth_file_path, generated_file_path) def test_strategy_xml(tmpdir): main_path", "import Method from src.dataToCode.write_files import write_files from src.dataToCode.dataClasses.visibility import Visibility from src.dataToCode.dataClasses.modifier import", "files_path = [\"strategy.py\", \"context.py\", \"concrete_strategy_a.py\", \"concrete_strategy_b.py\"] strategy_path = os.path.abspath(os.path.join(__file__, \"../strategy_example\")) generated_path = [os.path.join(tmpdir,", "obese_orc objects = [create_spell(), create_food(), create_weapon(), create_attribute(), create_attack(), create_walk(), create_orc(), create_high_orc(), create_fat_orc(), create_obese_orc()]", "\"Strategy\", visibility=Visibility.public) method = Method(\"doSomeBusinessLogic\") context = ClassData(\"Context\", methods=[method], fields=[attribute]) return context def", "= Method(\"eat\", parameters=[food], modifier=Modifier.override) fat_orc = create_fat_orc() obese_orc = ClassData(\"ObeseOrc\", methods=[eat], fields=[heart_attack], inheritances=[fat_orc])", "objects = [create_spell(), create_food(), create_weapon(), create_attribute(), create_attack(), create_walk(), create_orc(), create_high_orc(), create_fat_orc(), create_obese_orc()] write_files(objects,", "create_weapon(): name = Attribute(\"name\", \"str\", visibility=Visibility.public) age = Attribute(\"age\", \"int\", visibility=Visibility.private) attribute =", "strategy = Interface(\"Strategy\", methods=[method]) return strategy def create_context(): attribute = Attribute(\"strategy\", \"Strategy\", visibility=Visibility.public)", "\"int\", visibility=Visibility.public) spell = Attribute(\"spell\", \"ISpell\", visibility=Visibility.public) attack = Method(\"attack\", parameters=[damage], modifier=Modifier.override) sleep", "\"../ultimate_example\")) all_files_path = os.listdir(ultimate_path) files_path = [] for file_path in all_files_path: if file_path.endswith(\".py\"):", "return obese_orc objects = [create_spell(), create_food(), create_weapon(), create_attribute(), create_attack(), create_walk(), create_orc(), create_high_orc(), create_fat_orc(),", "truth_file_path, generated_file_path in zip(truth_path, generated_path): assert filecmp.cmp(truth_file_path, generated_file_path) def test_ultimate_example(tmpdir): def create_spell(): method", "all_files_path = os.listdir(ultimate_path) files_path = [] for file_path in all_files_path: if file_path.endswith(\".py\"): files_path.append(file_path)", "= create_do_algorithm() strategy = create_strategy() concrete_a = ClassData(\"ConcreteStrategyA\", methods=[method], implementations=[strategy]) return concrete_a def", "create_strategy(): method = create_do_algorithm() strategy = Interface(\"Strategy\", methods=[method]) return strategy def create_context(): attribute", "return interface def create_orc(): name = Attribute(\"name\", \"str\", visibility=Visibility.public) age = Attribute(\"age\", \"int\",", "methods=[attack_method, sleep], fields=[name, age], implementations=[attack_interface, walk]) return orc def create_high_orc(): damage = Attribute(\"damage\",", "Attribute(\"damage\", \"int\", visibility=Visibility.public) hours = Attribute(\"hours\", \"int\", visibility=Visibility.public) spell = Attribute(\"spell\", \"ISpell\", visibility=Visibility.public)", "def create_fat_orc(): food = Attribute(\"food\", \"IFood\", visibility=Visibility.public) eat = Method(\"eat\", parameters=[food]) orc =", "visibility=Visibility.public) walk = create_walk() attack_interface = create_attack() attack_method = Method(\"attack\", parameters=[damage]) sleep =", "weapon def create_attribute(): method = Method(\"method\") field = Attribute(\"field\", \"Type\", visibility=Visibility.public) attribute =", "\"str\", visibility=Visibility.public) age = Attribute(\"age\", \"int\", visibility=Visibility.private) damage = Attribute(\"damage\", \"int\", visibility=Visibility.public) hours", "files_path] truth_path = [os.path.join(ultimate_path, x) for x in files_path] for truth_file_path, generated_file_path in", "parameters=[attribute]) weapon = ClassData(\"Weapon\", methods=[getAttribute, setAttribute], fields=[name, age, attribute]) return weapon def create_attribute():", "= Attribute(\"name\", \"str\", visibility=Visibility.public) age = Attribute(\"age\", \"int\", visibility=Visibility.private) damage = Attribute(\"damage\", \"int\",", "\"Attribute\", visibility=Visibility.protected) getAttribute = Method(\"getAttribute\", return_type=\"Attribute\") setAttribute = Method(\"setAttribute\", return_type=\"void\", parameters=[attribute]) weapon =", "= Method(\"doSomeBusinessLogic\") context = ClassData(\"Context\", methods=[method], fields=[attribute]) return context def create_concrete_a(): method =", "x) for x in files_path] truth_path = [os.path.join(strategy_path, x) for x in files_path]", "os.path.abspath(os.path.join(__file__,\"../../../../../main.py\")) xml_path = os.path.abspath(os.path.join(__file__,\"../../../../strategy.xml\")) subprocess.run([\"python3\", main_path, f\"--xml_file={xml_path}\", f\"--code_path={tmpdir}\", \"--language=python\"]) files_path = [\"strategy.py\", \"context.py\",", "filecmp.cmp(truth_file_path, generated_file_path) def test_strategy_xml(tmpdir): main_path = os.path.abspath(os.path.join(__file__,\"../../../../../main.py\")) xml_path = os.path.abspath(os.path.join(__file__,\"../../../../strategy.xml\")) subprocess.run([\"python3\", main_path, f\"--xml_file={xml_path}\",", "interface def create_weapon(): name = Attribute(\"name\", \"str\", visibility=Visibility.public) age = Attribute(\"age\", \"int\", visibility=Visibility.private)", "ClassData(\"Orc\", methods=[attack_method, sleep], fields=[name, age], implementations=[attack_interface, walk]) return orc def create_high_orc(): damage =", "= [os.path.join(ultimate_path, x) for x in files_path] for truth_file_path, generated_file_path in zip(truth_path, generated_path):", "context = ClassData(\"Context\", methods=[method], fields=[attribute]) return context def create_concrete_a(): method = create_do_algorithm() strategy", "= Method(\"doEffect\") interface = Interface(\"ISpell\", methods=[method]) return interface def create_food(): method = Method(\"getNutrients\",", "create_food(), create_weapon(), create_attribute(), create_attack(), create_walk(), create_orc(), create_high_orc(), create_fat_orc(), create_obese_orc()] write_files(objects, tmpdir, \"python\") ultimate_path", "\"ISpell\", visibility=Visibility.public) attack = Method(\"attack\", parameters=[damage], modifier=Modifier.override) sleep = Method(\"sleep\", parameters=[hours], visibility=Visibility.private, modifier=Modifier.override)", "= create_do_algorithm() strategy = Interface(\"Strategy\", methods=[method]) return strategy def create_context(): attribute = Attribute(\"strategy\",", "def create_walk(): method = Method(\"walk\") interface = Interface(\"IWalk\", methods=[method]) return interface def create_attack():", "fat_orc = ClassData(\"FatOrc\", methods=[eat], inheritances=[orc]) return fat_orc def create_obese_orc(): food = Attribute(\"food\", \"IFood\",", "create_do_algorithm(): attribute = Attribute(\"data\", \"str\") method = Method(\"doAlgorithm\", parameters=[attribute]) return method def create_strategy():", "interface def create_food(): method = Method(\"getNutrients\", return_type=\"str\") interface = Interface(\"IFood\", methods=[method]) return interface", "method def create_strategy(): method = create_do_algorithm() strategy = Interface(\"Strategy\", methods=[method]) return strategy def", "methods=[method]) return interface def create_weapon(): name = Attribute(\"name\", \"str\", visibility=Visibility.public) age = Attribute(\"age\",", "for x in files_path] truth_path = [os.path.join(strategy_path, x) for x in files_path] for", "method = create_do_algorithm() strategy = create_strategy() concrete_b = ClassData(\"ConcreteStrategyB\", methods=[method], implementations=[strategy]) return concrete_b", "Attribute(\"strategy\", \"Strategy\", visibility=Visibility.public) method = Method(\"doSomeBusinessLogic\") context = ClassData(\"Context\", methods=[method], fields=[attribute]) return context", "generated_path): assert filecmp.cmp(truth_file_path, generated_file_path) def test_strategy_xml(tmpdir): main_path = os.path.abspath(os.path.join(__file__,\"../../../../../main.py\")) xml_path = os.path.abspath(os.path.join(__file__,\"../../../../strategy.xml\")) subprocess.run([\"python3\",", "= Attribute(\"attribute\", \"Attribute\", visibility=Visibility.protected) getAttribute = Method(\"getAttribute\", return_type=\"Attribute\") setAttribute = Method(\"setAttribute\", return_type=\"void\", parameters=[attribute])", "create_high_orc(), create_fat_orc(), create_obese_orc()] write_files(objects, tmpdir, \"python\") ultimate_path = os.path.abspath(os.path.join(__file__, \"../ultimate_example\")) all_files_path = os.listdir(ultimate_path)", "create_orc(): name = Attribute(\"name\", \"str\", visibility=Visibility.public) age = Attribute(\"age\", \"int\", visibility=Visibility.private) damage =", "attribute = Attribute(\"data\", \"str\") method = Method(\"doAlgorithm\", parameters=[attribute]) return method def create_strategy(): method", "= Interface(\"Strategy\", methods=[method]) return strategy def create_context(): attribute = Attribute(\"strategy\", \"Strategy\", visibility=Visibility.public) method", "xml_path = os.path.abspath(os.path.join(__file__,\"../../../../strategy.xml\")) subprocess.run([\"python3\", main_path, f\"--xml_file={xml_path}\", f\"--code_path={tmpdir}\", \"--language=python\"]) files_path = [\"strategy.py\", \"context.py\", \"concrete_strategy_a.py\",", "create_spell(): method = Method(\"doEffect\") interface = Interface(\"ISpell\", methods=[method]) return interface def create_food(): method", "import ClassData from src.dataToCode.dataClasses.interface import Interface from src.dataToCode.dataClasses.method import Method from src.dataToCode.write_files import", "= create_do_algorithm() strategy = create_strategy() concrete_b = ClassData(\"ConcreteStrategyB\", methods=[method], implementations=[strategy]) return concrete_b objects", "Attribute(\"age\", \"int\", visibility=Visibility.private) damage = Attribute(\"damage\", \"int\", visibility=Visibility.public) hours = Attribute(\"hours\", \"int\", visibility=Visibility.public)", "ClassData(\"HighOrc\", methods=[attack, sleep], fields=[spell], inheritances=[orc]) return high_orc def create_fat_orc(): food = Attribute(\"food\", \"IFood\",", "fat_orc def create_obese_orc(): food = Attribute(\"food\", \"IFood\", visibility=Visibility.public) heart_attack = Attribute(\"heartAttackChance\", \"int\", visibility=Visibility.public)", "= os.path.abspath(os.path.join(__file__, \"../ultimate_example\")) all_files_path = os.listdir(ultimate_path) files_path = [] for file_path in all_files_path:", "Attribute(\"attribute\", \"Attribute\", visibility=Visibility.protected) getAttribute = Method(\"getAttribute\", return_type=\"Attribute\") setAttribute = Method(\"setAttribute\", return_type=\"void\", parameters=[attribute]) weapon", "files_path] for truth_file_path, generated_file_path in zip(truth_path, generated_path): assert filecmp.cmp(truth_file_path, generated_file_path) def test_ultimate_example(tmpdir): def", "parameters=[food], modifier=Modifier.override) fat_orc = create_fat_orc() obese_orc = ClassData(\"ObeseOrc\", methods=[eat], fields=[heart_attack], inheritances=[fat_orc]) return obese_orc", "\"IFood\", visibility=Visibility.public) heart_attack = Attribute(\"heartAttackChance\", \"int\", visibility=Visibility.public) eat = Method(\"eat\", parameters=[food], modifier=Modifier.override) fat_orc", "create_attribute(): method = Method(\"method\") field = Attribute(\"field\", \"Type\", visibility=Visibility.public) attribute = ClassData(\"Attribute\", methods=[method],", "def create_spell(): method = Method(\"doEffect\") interface = Interface(\"ISpell\", methods=[method]) return interface def create_food():", "write_files(objects, tmpdir, \"python\") files_path = [\"strategy.py\", \"context.py\", \"concrete_strategy_a.py\", \"concrete_strategy_b.py\"] strategy_path = os.path.abspath(os.path.join(__file__, \"../strategy_example\"))", "def create_weapon(): name = Attribute(\"name\", \"str\", visibility=Visibility.public) age = Attribute(\"age\", \"int\", visibility=Visibility.private) attribute", "ClassData(\"ObeseOrc\", methods=[eat], fields=[heart_attack], inheritances=[fat_orc]) return obese_orc objects = [create_spell(), create_food(), create_weapon(), create_attribute(), create_attack(),", "visibility=Visibility.private) attribute = Attribute(\"attribute\", \"Attribute\", visibility=Visibility.protected) getAttribute = Method(\"getAttribute\", return_type=\"Attribute\") setAttribute = Method(\"setAttribute\",", "Attribute(\"damage\", \"int\", visibility=Visibility.public) hours = Attribute(\"hours\", \"int\", visibility=Visibility.public) walk = create_walk() attack_interface =", "methods=[method], implementations=[strategy]) return concrete_a def create_concrete_b(): method = create_do_algorithm() strategy = create_strategy() concrete_b", "weapon = ClassData(\"Weapon\", methods=[getAttribute, setAttribute], fields=[name, age, attribute]) return weapon def create_attribute(): method", "orc = create_orc() high_orc = ClassData(\"HighOrc\", methods=[attack, sleep], fields=[spell], inheritances=[orc]) return high_orc def", "hours = Attribute(\"hours\", \"int\", visibility=Visibility.public) walk = create_walk() attack_interface = create_attack() attack_method =", "src.dataToCode.dataClasses.modifier import Modifier def test_strategy_example(tmpdir): def create_do_algorithm(): attribute = Attribute(\"data\", \"str\") method =", "strategy = create_strategy() concrete_b = ClassData(\"ConcreteStrategyB\", methods=[method], implementations=[strategy]) return concrete_b objects = [create_strategy(),", "field = Attribute(\"field\", \"Type\", visibility=Visibility.public) attribute = ClassData(\"Attribute\", methods=[method], fields=[field]) return attribute def", "method = Method(\"getNutrients\", return_type=\"str\") interface = Interface(\"IFood\", methods=[method]) return interface def create_weapon(): name", "Method(\"eat\", parameters=[food], modifier=Modifier.override) fat_orc = create_fat_orc() obese_orc = ClassData(\"ObeseOrc\", methods=[eat], fields=[heart_attack], inheritances=[fat_orc]) return", "method = Method(\"doAlgorithm\", parameters=[attribute]) return method def create_strategy(): method = create_do_algorithm() strategy =", "visibility=Visibility.public) age = Attribute(\"age\", \"int\", visibility=Visibility.private) damage = Attribute(\"damage\", \"int\", visibility=Visibility.public) hours =", "create_concrete_b()] write_files(objects, tmpdir, \"python\") files_path = [\"strategy.py\", \"context.py\", \"concrete_strategy_a.py\", \"concrete_strategy_b.py\"] strategy_path = os.path.abspath(os.path.join(__file__,", "src.dataToCode.dataClasses.attribute import Attribute from src.dataToCode.dataClasses.classData import ClassData from src.dataToCode.dataClasses.interface import Interface from src.dataToCode.dataClasses.method", "in files_path] truth_path = [os.path.join(ultimate_path, x) for x in files_path] for truth_file_path, generated_file_path", "create_strategy() concrete_a = ClassData(\"ConcreteStrategyA\", methods=[method], implementations=[strategy]) return concrete_a def create_concrete_b(): method = create_do_algorithm()", "implementations=[strategy]) return concrete_b objects = [create_strategy(), create_context(), create_concrete_a(), create_concrete_b()] write_files(objects, tmpdir, \"python\") files_path", "file_path in all_files_path: if file_path.endswith(\".py\"): files_path.append(file_path) generated_path = [os.path.join(tmpdir, x) for x in", "main_path, f\"--xml_file={xml_path}\", f\"--code_path={tmpdir}\", \"--language=python\"]) files_path = [\"strategy.py\", \"context.py\", \"concrete_strategy_a.py\", \"concrete_strategy_b.py\"] strategy_path = os.path.abspath(os.path.join(__file__,", "[os.path.join(ultimate_path, x) for x in files_path] for truth_file_path, generated_file_path in zip(truth_path, generated_path): assert", "Interface(\"Strategy\", methods=[method]) return strategy def create_context(): attribute = Attribute(\"strategy\", \"Strategy\", visibility=Visibility.public) method =", "main_path = os.path.abspath(os.path.join(__file__,\"../../../../../main.py\")) xml_path = os.path.abspath(os.path.join(__file__,\"../../../../strategy.xml\")) subprocess.run([\"python3\", main_path, f\"--xml_file={xml_path}\", f\"--code_path={tmpdir}\", \"--language=python\"]) files_path =", "create_fat_orc(): food = Attribute(\"food\", \"IFood\", visibility=Visibility.public) eat = Method(\"eat\", parameters=[food]) orc = create_orc()", "ClassData(\"ConcreteStrategyB\", methods=[method], implementations=[strategy]) return concrete_b objects = [create_strategy(), create_context(), create_concrete_a(), create_concrete_b()] write_files(objects, tmpdir,", "[create_spell(), create_food(), create_weapon(), create_attribute(), create_attack(), create_walk(), create_orc(), create_high_orc(), create_fat_orc(), create_obese_orc()] write_files(objects, tmpdir, \"python\")", "ClassData(\"ConcreteStrategyA\", methods=[method], implementations=[strategy]) return concrete_a def create_concrete_b(): method = create_do_algorithm() strategy = create_strategy()", "fields=[field]) return attribute def create_walk(): method = Method(\"walk\") interface = Interface(\"IWalk\", methods=[method]) return", "visibility=Visibility.public) method = Method(\"attack\", parameters=[damage]) interface = Interface(\"IAttack\", methods=[method]) return interface def create_orc():", "create_orc() high_orc = ClassData(\"HighOrc\", methods=[attack, sleep], fields=[spell], inheritances=[orc]) return high_orc def create_fat_orc(): food", "ClassData from src.dataToCode.dataClasses.interface import Interface from src.dataToCode.dataClasses.method import Method from src.dataToCode.write_files import write_files", "Method(\"getAttribute\", return_type=\"Attribute\") setAttribute = Method(\"setAttribute\", return_type=\"void\", parameters=[attribute]) weapon = ClassData(\"Weapon\", methods=[getAttribute, setAttribute], fields=[name,", "Method(\"method\") field = Attribute(\"field\", \"Type\", visibility=Visibility.public) attribute = ClassData(\"Attribute\", methods=[method], fields=[field]) return attribute", "attribute = Attribute(\"attribute\", \"Attribute\", visibility=Visibility.protected) getAttribute = Method(\"getAttribute\", return_type=\"Attribute\") setAttribute = Method(\"setAttribute\", return_type=\"void\",", "create_fat_orc() obese_orc = ClassData(\"ObeseOrc\", methods=[eat], fields=[heart_attack], inheritances=[fat_orc]) return obese_orc objects = [create_spell(), create_food(),", "generated_file_path) def test_ultimate_example(tmpdir): def create_spell(): method = Method(\"doEffect\") interface = Interface(\"ISpell\", methods=[method]) return", "visibility=Visibility.public) hours = Attribute(\"hours\", \"int\", visibility=Visibility.public) walk = create_walk() attack_interface = create_attack() attack_method", "name = Attribute(\"name\", \"str\", visibility=Visibility.public) age = Attribute(\"age\", \"int\", visibility=Visibility.private) attribute = Attribute(\"attribute\",", "= Attribute(\"food\", \"IFood\", visibility=Visibility.public) heart_attack = Attribute(\"heartAttackChance\", \"int\", visibility=Visibility.public) eat = Method(\"eat\", parameters=[food],", "files_path = [] for file_path in all_files_path: if file_path.endswith(\".py\"): files_path.append(file_path) generated_path = [os.path.join(tmpdir,", "Method(\"doEffect\") interface = Interface(\"ISpell\", methods=[method]) return interface def create_food(): method = Method(\"getNutrients\", return_type=\"str\")", "hours = Attribute(\"hours\", \"int\", visibility=Visibility.public) spell = Attribute(\"spell\", \"ISpell\", visibility=Visibility.public) attack = Method(\"attack\",", "Attribute(\"spell\", \"ISpell\", visibility=Visibility.public) attack = Method(\"attack\", parameters=[damage], modifier=Modifier.override) sleep = Method(\"sleep\", parameters=[hours], visibility=Visibility.private,", "create_fat_orc(), create_obese_orc()] write_files(objects, tmpdir, \"python\") ultimate_path = os.path.abspath(os.path.join(__file__, \"../ultimate_example\")) all_files_path = os.listdir(ultimate_path) files_path", "age = Attribute(\"age\", \"int\", visibility=Visibility.private) damage = Attribute(\"damage\", \"int\", visibility=Visibility.public) hours = Attribute(\"hours\",", "return attribute def create_walk(): method = Method(\"walk\") interface = Interface(\"IWalk\", methods=[method]) return interface", "= Method(\"doAlgorithm\", parameters=[attribute]) return method def create_strategy(): method = create_do_algorithm() strategy = Interface(\"Strategy\",", "food = Attribute(\"food\", \"IFood\", visibility=Visibility.public) heart_attack = Attribute(\"heartAttackChance\", \"int\", visibility=Visibility.public) eat = Method(\"eat\",", "ultimate_path = os.path.abspath(os.path.join(__file__, \"../ultimate_example\")) all_files_path = os.listdir(ultimate_path) files_path = [] for file_path in", "fat_orc = create_fat_orc() obese_orc = ClassData(\"ObeseOrc\", methods=[eat], fields=[heart_attack], inheritances=[fat_orc]) return obese_orc objects =", "\"python\") files_path = [\"strategy.py\", \"context.py\", \"concrete_strategy_a.py\", \"concrete_strategy_b.py\"] strategy_path = os.path.abspath(os.path.join(__file__, \"../strategy_example\")) generated_path =", "Method(\"doSomeBusinessLogic\") context = ClassData(\"Context\", methods=[method], fields=[attribute]) return context def create_concrete_a(): method = create_do_algorithm()", "modifier=Modifier.override) orc = create_orc() high_orc = ClassData(\"HighOrc\", methods=[attack, sleep], fields=[spell], inheritances=[orc]) return high_orc", "method = create_do_algorithm() strategy = Interface(\"Strategy\", methods=[method]) return strategy def create_context(): attribute =", "= Attribute(\"age\", \"int\", visibility=Visibility.private) damage = Attribute(\"damage\", \"int\", visibility=Visibility.public) hours = Attribute(\"hours\", \"int\",", "\"concrete_strategy_b.py\"] strategy_path = os.path.abspath(os.path.join(__file__, \"../strategy_example\")) generated_path = [os.path.join(tmpdir, x) for x in files_path]", "f\"--code_path={tmpdir}\", \"--language=python\"]) files_path = [\"strategy.py\", \"context.py\", \"concrete_strategy_a.py\", \"concrete_strategy_b.py\"] strategy_path = os.path.abspath(os.path.join(__file__, \"../strategy_example\")) generated_path", "methods=[method], fields=[attribute]) return context def create_concrete_a(): method = create_do_algorithm() strategy = create_strategy() concrete_a", "parameters=[hours], visibility=Visibility.private, modifier=Modifier.override) orc = create_orc() high_orc = ClassData(\"HighOrc\", methods=[attack, sleep], fields=[spell], inheritances=[orc])", "= ClassData(\"FatOrc\", methods=[eat], inheritances=[orc]) return fat_orc def create_obese_orc(): food = Attribute(\"food\", \"IFood\", visibility=Visibility.public)", "def create_high_orc(): damage = Attribute(\"damage\", \"int\", visibility=Visibility.public) hours = Attribute(\"hours\", \"int\", visibility=Visibility.public) spell", "ClassData(\"Context\", methods=[method], fields=[attribute]) return context def create_concrete_a(): method = create_do_algorithm() strategy = create_strategy()", "for x in files_path] for truth_file_path, generated_file_path in zip(truth_path, generated_path): assert filecmp.cmp(truth_file_path, generated_file_path)", "def test_strategy_example(tmpdir): def create_do_algorithm(): attribute = Attribute(\"data\", \"str\") method = Method(\"doAlgorithm\", parameters=[attribute]) return", "high_orc def create_fat_orc(): food = Attribute(\"food\", \"IFood\", visibility=Visibility.public) eat = Method(\"eat\", parameters=[food]) orc", "= ClassData(\"ConcreteStrategyB\", methods=[method], implementations=[strategy]) return concrete_b objects = [create_strategy(), create_context(), create_concrete_a(), create_concrete_b()] write_files(objects,", "return_type=\"void\", parameters=[attribute]) weapon = ClassData(\"Weapon\", methods=[getAttribute, setAttribute], fields=[name, age, attribute]) return weapon def", "\"str\", visibility=Visibility.public) age = Attribute(\"age\", \"int\", visibility=Visibility.private) attribute = Attribute(\"attribute\", \"Attribute\", visibility=Visibility.protected) getAttribute", "zip(truth_path, generated_path): assert filecmp.cmp(truth_file_path, generated_file_path) def test_ultimate_example(tmpdir): def create_spell(): method = Method(\"doEffect\") interface", "tmpdir, \"python\") files_path = [\"strategy.py\", \"context.py\", \"concrete_strategy_a.py\", \"concrete_strategy_b.py\"] strategy_path = os.path.abspath(os.path.join(__file__, \"../strategy_example\")) generated_path", "import filecmp import subprocess from src.dataToCode.dataClasses.attribute import Attribute from src.dataToCode.dataClasses.classData import ClassData from", "name = Attribute(\"name\", \"str\", visibility=Visibility.public) age = Attribute(\"age\", \"int\", visibility=Visibility.private) damage = Attribute(\"damage\",", "= ClassData(\"Attribute\", methods=[method], fields=[field]) return attribute def create_walk(): method = Method(\"walk\") interface =", "heart_attack = Attribute(\"heartAttackChance\", \"int\", visibility=Visibility.public) eat = Method(\"eat\", parameters=[food], modifier=Modifier.override) fat_orc = create_fat_orc()", "x) for x in files_path] for truth_file_path, generated_file_path in zip(truth_path, generated_path): assert filecmp.cmp(truth_file_path,", "= Method(\"getNutrients\", return_type=\"str\") interface = Interface(\"IFood\", methods=[method]) return interface def create_weapon(): name =", "def create_obese_orc(): food = Attribute(\"food\", \"IFood\", visibility=Visibility.public) heart_attack = Attribute(\"heartAttackChance\", \"int\", visibility=Visibility.public) eat", "visibility=Visibility.public) method = Method(\"doSomeBusinessLogic\") context = ClassData(\"Context\", methods=[method], fields=[attribute]) return context def create_concrete_a():", "eat = Method(\"eat\", parameters=[food], modifier=Modifier.override) fat_orc = create_fat_orc() obese_orc = ClassData(\"ObeseOrc\", methods=[eat], fields=[heart_attack],", "return fat_orc def create_obese_orc(): food = Attribute(\"food\", \"IFood\", visibility=Visibility.public) heart_attack = Attribute(\"heartAttackChance\", \"int\",", "create_do_algorithm() strategy = create_strategy() concrete_b = ClassData(\"ConcreteStrategyB\", methods=[method], implementations=[strategy]) return concrete_b objects =", "if file_path.endswith(\".py\"): files_path.append(file_path) generated_path = [os.path.join(tmpdir, x) for x in files_path] truth_path =", "age, attribute]) return weapon def create_attribute(): method = Method(\"method\") field = Attribute(\"field\", \"Type\",", "Interface from src.dataToCode.dataClasses.method import Method from src.dataToCode.write_files import write_files from src.dataToCode.dataClasses.visibility import Visibility", "Method(\"walk\") interface = Interface(\"IWalk\", methods=[method]) return interface def create_attack(): damage = Attribute(\"damage\", \"int\",", "attack_method = Method(\"attack\", parameters=[damage]) sleep = Method(\"sleep\", parameters=[hours], visibility=Visibility.private) orc = ClassData(\"Orc\", methods=[attack_method,", "\"str\") method = Method(\"doAlgorithm\", parameters=[attribute]) return method def create_strategy(): method = create_do_algorithm() strategy", "context def create_concrete_a(): method = create_do_algorithm() strategy = create_strategy() concrete_a = ClassData(\"ConcreteStrategyA\", methods=[method],", "os.path.abspath(os.path.join(__file__, \"../strategy_example\")) generated_path = [os.path.join(tmpdir, x) for x in files_path] truth_path = [os.path.join(strategy_path,", "Method(\"doAlgorithm\", parameters=[attribute]) return method def create_strategy(): method = create_do_algorithm() strategy = Interface(\"Strategy\", methods=[method])", "= Interface(\"IAttack\", methods=[method]) return interface def create_orc(): name = Attribute(\"name\", \"str\", visibility=Visibility.public) age", "create_attack() attack_method = Method(\"attack\", parameters=[damage]) sleep = Method(\"sleep\", parameters=[hours], visibility=Visibility.private) orc = ClassData(\"Orc\",", "def test_strategy_xml(tmpdir): main_path = os.path.abspath(os.path.join(__file__,\"../../../../../main.py\")) xml_path = os.path.abspath(os.path.join(__file__,\"../../../../strategy.xml\")) subprocess.run([\"python3\", main_path, f\"--xml_file={xml_path}\", f\"--code_path={tmpdir}\", \"--language=python\"])", "high_orc = ClassData(\"HighOrc\", methods=[attack, sleep], fields=[spell], inheritances=[orc]) return high_orc def create_fat_orc(): food =", "create_high_orc(): damage = Attribute(\"damage\", \"int\", visibility=Visibility.public) hours = Attribute(\"hours\", \"int\", visibility=Visibility.public) spell =", "interface = Interface(\"ISpell\", methods=[method]) return interface def create_food(): method = Method(\"getNutrients\", return_type=\"str\") interface", "methods=[method]) return interface def create_attack(): damage = Attribute(\"damage\", \"int\", visibility=Visibility.public) method = Method(\"attack\",", "attribute = ClassData(\"Attribute\", methods=[method], fields=[field]) return attribute def create_walk(): method = Method(\"walk\") interface", "visibility=Visibility.public) heart_attack = Attribute(\"heartAttackChance\", \"int\", visibility=Visibility.public) eat = Method(\"eat\", parameters=[food], modifier=Modifier.override) fat_orc =", "modifier=Modifier.override) fat_orc = create_fat_orc() obese_orc = ClassData(\"ObeseOrc\", methods=[eat], fields=[heart_attack], inheritances=[fat_orc]) return obese_orc objects", "pytest import os import filecmp import subprocess from src.dataToCode.dataClasses.attribute import Attribute from src.dataToCode.dataClasses.classData", "generated_file_path in zip(truth_path, generated_path): assert filecmp.cmp(truth_file_path, generated_file_path) def test_ultimate_example(tmpdir): def create_spell(): method =", "methods=[eat], inheritances=[orc]) return fat_orc def create_obese_orc(): food = Attribute(\"food\", \"IFood\", visibility=Visibility.public) heart_attack =", "= ClassData(\"Orc\", methods=[attack_method, sleep], fields=[name, age], implementations=[attack_interface, walk]) return orc def create_high_orc(): damage", "= [create_strategy(), create_context(), create_concrete_a(), create_concrete_b()] write_files(objects, tmpdir, \"python\") files_path = [\"strategy.py\", \"context.py\", \"concrete_strategy_a.py\",", "= ClassData(\"ConcreteStrategyA\", methods=[method], implementations=[strategy]) return concrete_a def create_concrete_b(): method = create_do_algorithm() strategy =", "src.dataToCode.dataClasses.method import Method from src.dataToCode.write_files import write_files from src.dataToCode.dataClasses.visibility import Visibility from src.dataToCode.dataClasses.modifier" ]
[]
[ "<gh_stars>0 from healpy import nside2npix, pix2ang class Sky(): \"\"\" Generates an array of", "sky positions. Output should be an array. \"\"\" def __init__(self, NSIDE, strength): #", "on the sky w/ GRBs self.Ao = strength self.pixels = nside2npix(NSIDE) # want", "NSIDE, strength): # depending on NSIDE, there will be anywhere # from 12", "__init__(self, NSIDE, strength): # depending on NSIDE, there will be anywhere # from", "import nside2npix, pix2ang class Sky(): \"\"\" Generates an array of GRB's given certains", "convert these pixels into theta phi coords. self.sourceangs = [] for i in", "self.Ao = strength self.pixels = nside2npix(NSIDE) # want to convert these pixels into", "Generates an array of GRB's given certains strength at different sky positions. Output", "pixels into theta phi coords. self.sourceangs = [] for i in range(self.pixels): self.sourceangs.append(pix2ang(NSIDE,", "nside2npix, pix2ang class Sky(): \"\"\" Generates an array of GRB's given certains strength", "array. \"\"\" def __init__(self, NSIDE, strength): # depending on NSIDE, there will be", "GRB's given certains strength at different sky positions. Output should be an array.", "positions. Output should be an array. \"\"\" def __init__(self, NSIDE, strength): # depending", "\"\"\" def __init__(self, NSIDE, strength): # depending on NSIDE, there will be anywhere", "# from 12 to infinite spots on the sky w/ GRBs self.Ao =", "want to convert these pixels into theta phi coords. self.sourceangs = [] for", "anywhere # from 12 to infinite spots on the sky w/ GRBs self.Ao", "self.pixels = nside2npix(NSIDE) # want to convert these pixels into theta phi coords.", "# want to convert these pixels into theta phi coords. self.sourceangs = []", "should be an array. \"\"\" def __init__(self, NSIDE, strength): # depending on NSIDE,", "into theta phi coords. self.sourceangs = [] for i in range(self.pixels): self.sourceangs.append(pix2ang(NSIDE, i))", "on NSIDE, there will be anywhere # from 12 to infinite spots on", "from 12 to infinite spots on the sky w/ GRBs self.Ao = strength", "Output should be an array. \"\"\" def __init__(self, NSIDE, strength): # depending on", "different sky positions. Output should be an array. \"\"\" def __init__(self, NSIDE, strength):", "of GRB's given certains strength at different sky positions. Output should be an", "GRBs self.Ao = strength self.pixels = nside2npix(NSIDE) # want to convert these pixels", "12 to infinite spots on the sky w/ GRBs self.Ao = strength self.pixels", "depending on NSIDE, there will be anywhere # from 12 to infinite spots", "nside2npix(NSIDE) # want to convert these pixels into theta phi coords. self.sourceangs =", "w/ GRBs self.Ao = strength self.pixels = nside2npix(NSIDE) # want to convert these", "from healpy import nside2npix, pix2ang class Sky(): \"\"\" Generates an array of GRB's", "strength): # depending on NSIDE, there will be anywhere # from 12 to", "these pixels into theta phi coords. self.sourceangs = [] for i in range(self.pixels):", "spots on the sky w/ GRBs self.Ao = strength self.pixels = nside2npix(NSIDE) #", "= strength self.pixels = nside2npix(NSIDE) # want to convert these pixels into theta", "NSIDE, there will be anywhere # from 12 to infinite spots on the", "the sky w/ GRBs self.Ao = strength self.pixels = nside2npix(NSIDE) # want to", "Sky(): \"\"\" Generates an array of GRB's given certains strength at different sky", "certains strength at different sky positions. Output should be an array. \"\"\" def", "an array of GRB's given certains strength at different sky positions. Output should", "sky w/ GRBs self.Ao = strength self.pixels = nside2npix(NSIDE) # want to convert", "# depending on NSIDE, there will be anywhere # from 12 to infinite", "there will be anywhere # from 12 to infinite spots on the sky", "array of GRB's given certains strength at different sky positions. Output should be", "to infinite spots on the sky w/ GRBs self.Ao = strength self.pixels =", "infinite spots on the sky w/ GRBs self.Ao = strength self.pixels = nside2npix(NSIDE)", "strength self.pixels = nside2npix(NSIDE) # want to convert these pixels into theta phi", "= nside2npix(NSIDE) # want to convert these pixels into theta phi coords. self.sourceangs", "to convert these pixels into theta phi coords. self.sourceangs = [] for i", "class Sky(): \"\"\" Generates an array of GRB's given certains strength at different", "healpy import nside2npix, pix2ang class Sky(): \"\"\" Generates an array of GRB's given", "at different sky positions. Output should be an array. \"\"\" def __init__(self, NSIDE,", "be anywhere # from 12 to infinite spots on the sky w/ GRBs", "be an array. \"\"\" def __init__(self, NSIDE, strength): # depending on NSIDE, there", "def __init__(self, NSIDE, strength): # depending on NSIDE, there will be anywhere #", "an array. \"\"\" def __init__(self, NSIDE, strength): # depending on NSIDE, there will", "pix2ang class Sky(): \"\"\" Generates an array of GRB's given certains strength at", "strength at different sky positions. Output should be an array. \"\"\" def __init__(self,", "\"\"\" Generates an array of GRB's given certains strength at different sky positions.", "given certains strength at different sky positions. Output should be an array. \"\"\"", "will be anywhere # from 12 to infinite spots on the sky w/" ]
[ "multiline string in Python print('''Dear Alice, Eve's cat has been arrested for catnapping,", "string in Python print('''Dear Alice, Eve's cat has been arrested for catnapping, cat", "in Python print('''Dear Alice, Eve's cat has been arrested for catnapping, cat burglary,", "print('''Dear Alice, Eve's cat has been arrested for catnapping, cat burglary, and extortion.", "create a multiline string in Python print('''Dear Alice, Eve's cat has been arrested", "to create a multiline string in Python print('''Dear Alice, Eve's cat has been", "# Using triple-quote marks to create a multiline string in Python print('''Dear Alice,", "Python print('''Dear Alice, Eve's cat has been arrested for catnapping, cat burglary, and", "triple-quote marks to create a multiline string in Python print('''Dear Alice, Eve's cat", "Using triple-quote marks to create a multiline string in Python print('''Dear Alice, Eve's", "Alice, Eve's cat has been arrested for catnapping, cat burglary, and extortion. Sincerely,", "Eve's cat has been arrested for catnapping, cat burglary, and extortion. Sincerely, Bob''')", "a multiline string in Python print('''Dear Alice, Eve's cat has been arrested for", "marks to create a multiline string in Python print('''Dear Alice, Eve's cat has" ]
[ "query: return Staff.objects.filter( Q(supplier_name__icontains=query) | Q(tags__icontains=query) | Q(email__icontains=query) | Q(phone__icontains=query) | Q(description__icontains=query) |", "SearchSearchView(ListView): model = Staff paginate_by = 10 queryset = Staff.objects.all() def get_queryset(self): query", "StaffDetailView(DetailView): model = Staff class StaffUpdateView(UpdateView): model = Staff form_class = StaffUpdateForm template_name_suffix", "self.request.GET.get(\"q\") if query: return Staff.objects.filter( Q(supplier_name__icontains=query) | Q(tags__icontains=query) | Q(email__icontains=query) | Q(phone__icontains=query) |", "return Staff.objects.all() class StaffListView(ListView): model = Staff class StaffDetailView(DetailView): model = Staff class", "| Q(tags__icontains=query) | Q(email__icontains=query) | Q(phone__icontains=query) | Q(description__icontains=query) | Q(address__icontains=query) | Q(district__icontains=query) )", "query = self.request.GET.get(\"q\") if query: return Staff.objects.filter( Q(supplier_name__icontains=query) | Q(tags__icontains=query) | Q(email__icontains=query) |", ") else: return Staff.objects.all() class StaffListView(ListView): model = Staff class StaffDetailView(DetailView): model =", "ListView, UpdateView from .forms import StaffUpdateForm from .models import Staff class SearchSearchView(ListView): model", ".models import Staff class SearchSearchView(ListView): model = Staff paginate_by = 10 queryset =", "10 queryset = Staff.objects.all() def get_queryset(self): query = self.request.GET.get(\"q\") if query: return Staff.objects.filter(", "| Q(description__icontains=query) | Q(address__icontains=query) | Q(district__icontains=query) ) else: return Staff.objects.all() class StaffListView(ListView): model", "render from django.views.generic import DetailView, ListView, UpdateView from .forms import StaffUpdateForm from .models", "= Staff.objects.all() def get_queryset(self): query = self.request.GET.get(\"q\") if query: return Staff.objects.filter( Q(supplier_name__icontains=query) |", "= self.request.GET.get(\"q\") if query: return Staff.objects.filter( Q(supplier_name__icontains=query) | Q(tags__icontains=query) | Q(email__icontains=query) | Q(phone__icontains=query)", "Staff paginate_by = 10 queryset = Staff.objects.all() def get_queryset(self): query = self.request.GET.get(\"q\") if", "| Q(address__icontains=query) | Q(district__icontains=query) ) else: return Staff.objects.all() class StaffListView(ListView): model = Staff", "= 10 queryset = Staff.objects.all() def get_queryset(self): query = self.request.GET.get(\"q\") if query: return", "django.shortcuts import render from django.views.generic import DetailView, ListView, UpdateView from .forms import StaffUpdateForm", "import DetailView, ListView, UpdateView from .forms import StaffUpdateForm from .models import Staff class", "UpdateView from .forms import StaffUpdateForm from .models import Staff class SearchSearchView(ListView): model =", "StaffUpdateForm from .models import Staff class SearchSearchView(ListView): model = Staff paginate_by = 10", "Staff.objects.all() class StaffListView(ListView): model = Staff class StaffDetailView(DetailView): model = Staff class StaffUpdateView(UpdateView):", "class StaffListView(ListView): model = Staff class StaffDetailView(DetailView): model = Staff class StaffUpdateView(UpdateView): model", "class SearchSearchView(ListView): model = Staff paginate_by = 10 queryset = Staff.objects.all() def get_queryset(self):", "get_queryset(self): query = self.request.GET.get(\"q\") if query: return Staff.objects.filter( Q(supplier_name__icontains=query) | Q(tags__icontains=query) | Q(email__icontains=query)", "return Staff.objects.filter( Q(supplier_name__icontains=query) | Q(tags__icontains=query) | Q(email__icontains=query) | Q(phone__icontains=query) | Q(description__icontains=query) | Q(address__icontains=query)", "from django.views.generic import DetailView, ListView, UpdateView from .forms import StaffUpdateForm from .models import", "Q(email__icontains=query) | Q(phone__icontains=query) | Q(description__icontains=query) | Q(address__icontains=query) | Q(district__icontains=query) ) else: return Staff.objects.all()", "model = Staff class StaffDetailView(DetailView): model = Staff class StaffUpdateView(UpdateView): model = Staff", "django.views.generic import DetailView, ListView, UpdateView from .forms import StaffUpdateForm from .models import Staff", "Staff class StaffDetailView(DetailView): model = Staff class StaffUpdateView(UpdateView): model = Staff form_class =", "from django.db.models import Q from django.shortcuts import render from django.views.generic import DetailView, ListView,", "Staff.objects.all() def get_queryset(self): query = self.request.GET.get(\"q\") if query: return Staff.objects.filter( Q(supplier_name__icontains=query) | Q(tags__icontains=query)", "django.contrib.messages.views import SuccessMessageMixin from django.db.models import Q from django.shortcuts import render from django.views.generic", "from .forms import StaffUpdateForm from .models import Staff class SearchSearchView(ListView): model = Staff", "Staff.objects.filter( Q(supplier_name__icontains=query) | Q(tags__icontains=query) | Q(email__icontains=query) | Q(phone__icontains=query) | Q(description__icontains=query) | Q(address__icontains=query) |", "| Q(email__icontains=query) | Q(phone__icontains=query) | Q(description__icontains=query) | Q(address__icontains=query) | Q(district__icontains=query) ) else: return", "Q(description__icontains=query) | Q(address__icontains=query) | Q(district__icontains=query) ) else: return Staff.objects.all() class StaffListView(ListView): model =", "| Q(district__icontains=query) ) else: return Staff.objects.all() class StaffListView(ListView): model = Staff class StaffDetailView(DetailView):", "else: return Staff.objects.all() class StaffListView(ListView): model = Staff class StaffDetailView(DetailView): model = Staff", "from .models import Staff class SearchSearchView(ListView): model = Staff paginate_by = 10 queryset", "| Q(phone__icontains=query) | Q(description__icontains=query) | Q(address__icontains=query) | Q(district__icontains=query) ) else: return Staff.objects.all() class", "if query: return Staff.objects.filter( Q(supplier_name__icontains=query) | Q(tags__icontains=query) | Q(email__icontains=query) | Q(phone__icontains=query) | Q(description__icontains=query)", "django.db.models import Q from django.shortcuts import render from django.views.generic import DetailView, ListView, UpdateView", "Q from django.shortcuts import render from django.views.generic import DetailView, ListView, UpdateView from .forms", ".forms import StaffUpdateForm from .models import Staff class SearchSearchView(ListView): model = Staff paginate_by", "import StaffUpdateForm from .models import Staff class SearchSearchView(ListView): model = Staff paginate_by =", "paginate_by = 10 queryset = Staff.objects.all() def get_queryset(self): query = self.request.GET.get(\"q\") if query:", "Q(supplier_name__icontains=query) | Q(tags__icontains=query) | Q(email__icontains=query) | Q(phone__icontains=query) | Q(description__icontains=query) | Q(address__icontains=query) | Q(district__icontains=query)", "class StaffDetailView(DetailView): model = Staff class StaffUpdateView(UpdateView): model = Staff form_class = StaffUpdateForm", "SuccessMessageMixin from django.db.models import Q from django.shortcuts import render from django.views.generic import DetailView,", "import SuccessMessageMixin from django.db.models import Q from django.shortcuts import render from django.views.generic import", "from django.shortcuts import render from django.views.generic import DetailView, ListView, UpdateView from .forms import", "Q(address__icontains=query) | Q(district__icontains=query) ) else: return Staff.objects.all() class StaffListView(ListView): model = Staff class", "model = Staff class StaffUpdateView(UpdateView): model = Staff form_class = StaffUpdateForm template_name_suffix =", "= Staff class StaffUpdateView(UpdateView): model = Staff form_class = StaffUpdateForm template_name_suffix = \"_update_form\"", "Q(tags__icontains=query) | Q(email__icontains=query) | Q(phone__icontains=query) | Q(description__icontains=query) | Q(address__icontains=query) | Q(district__icontains=query) ) else:", "model = Staff paginate_by = 10 queryset = Staff.objects.all() def get_queryset(self): query =", "DetailView, ListView, UpdateView from .forms import StaffUpdateForm from .models import Staff class SearchSearchView(ListView):", "import Q from django.shortcuts import render from django.views.generic import DetailView, ListView, UpdateView from", "Q(district__icontains=query) ) else: return Staff.objects.all() class StaffListView(ListView): model = Staff class StaffDetailView(DetailView): model", "import Staff class SearchSearchView(ListView): model = Staff paginate_by = 10 queryset = Staff.objects.all()", "Staff class SearchSearchView(ListView): model = Staff paginate_by = 10 queryset = Staff.objects.all() def", "from django.contrib.messages.views import SuccessMessageMixin from django.db.models import Q from django.shortcuts import render from", "= Staff paginate_by = 10 queryset = Staff.objects.all() def get_queryset(self): query = self.request.GET.get(\"q\")", "queryset = Staff.objects.all() def get_queryset(self): query = self.request.GET.get(\"q\") if query: return Staff.objects.filter( Q(supplier_name__icontains=query)", "def get_queryset(self): query = self.request.GET.get(\"q\") if query: return Staff.objects.filter( Q(supplier_name__icontains=query) | Q(tags__icontains=query) |", "import render from django.views.generic import DetailView, ListView, UpdateView from .forms import StaffUpdateForm from", "Q(phone__icontains=query) | Q(description__icontains=query) | Q(address__icontains=query) | Q(district__icontains=query) ) else: return Staff.objects.all() class StaffListView(ListView):", "StaffListView(ListView): model = Staff class StaffDetailView(DetailView): model = Staff class StaffUpdateView(UpdateView): model =", "= Staff class StaffDetailView(DetailView): model = Staff class StaffUpdateView(UpdateView): model = Staff form_class" ]
[ "# # Licensed under the Apache License, Version 2.0 (the \"License\"); # you", "writing, software # distributed under the License is distributed on an \"AS IS\"", "flask.abort(403) # initialize the namespace for the execution namespace = {} result =", "sys.stderr.flush() if result and isinstance(result, dict): response = flask.jsonify(result) response.status_code = 200 return", "exec(\"param = \" + json.dumps(value), namespace) exec(\"fun = main(param)\", namespace) result = namespace['fun']", "the namespace for the execution namespace = {} result = None try: exec(flask.g,", "KIND, either express or implied. # See the License for the specific language", "# import sys import os import json import subprocess import codecs import traceback", "Unless required by applicable law or agreed to in writing, software # distributed", "import traceback import flask from gevent.wsgi import WSGIServer proxy = flask.Flask(__name__) proxy.debug =", "You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "# See the License for the specific language governing permissions and # limitations", "return response # start server in a forever loop if __name__ == \"__main__\":", "License. # You may obtain a copy of the License at # #", "traceback import flask from gevent.wsgi import WSGIServer proxy = flask.Flask(__name__) proxy.debug = False", "proxy = flask.Flask(__name__) proxy.debug = False @proxy.route(\"/init\", methods=['POST']) def init(): flask.g = None", "initialize the namespace for the execution namespace = {} result = None try:", "not \"value\" in message: flask.abort(403) value = message[\"value\"] if not isinstance(value, dict): flask.abort(403)", "not return a dictionary\", \"action_output\": result }) response.status_code = 502 return response #", "= namespace['fun'] except Exception: traceback.print_exc(file = sys.stderr) sys.stdout.flush() sys.stderr.flush() if result and isinstance(result,", "main(param)\", namespace) result = namespace['fun'] except Exception: traceback.print_exc(file = sys.stderr) sys.stdout.flush() sys.stderr.flush() if", "None try: exec(flask.g, namespace) exec(\"param = \" + json.dumps(value), namespace) exec(\"fun = main(param)\",", "law or agreed to in writing, software # distributed under the License is", "flask.abort(403) @proxy.route(\"/run\", methods=['POST']) def run(): message = flask.request.get_json(force=True,silent=True) if not message or not", "the License for the specific language governing permissions and # limitations under the", "compliance with the License. # You may obtain a copy of the License", "= \" + json.dumps(value), namespace) exec(\"fun = main(param)\", namespace) result = namespace['fun'] except", "WSGIServer proxy = flask.Flask(__name__) proxy.debug = False @proxy.route(\"/init\", methods=['POST']) def init(): flask.g =", "flask.jsonify(result) response.status_code = 200 return response else: response = flask.jsonify({ \"error\": \"the action", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "this file except in compliance with the License. # You may obtain a", "message[\"code\"] return ('OK', 200) else: flask.abort(403) @proxy.route(\"/run\", methods=['POST']) def run(): message = flask.request.get_json(force=True,silent=True)", "the Apache License, Version 2.0 (the \"License\"); # you may not use this", "did not return a dictionary\", \"action_output\": result }) response.status_code = 502 return response", "the execution namespace = {} result = None try: exec(flask.g, namespace) exec(\"param =", "@proxy.route(\"/run\", methods=['POST']) def run(): message = flask.request.get_json(force=True,silent=True) if not message or not isinstance(message,", "you may not use this file except in compliance with the License. #", "code flask.g = message[\"code\"] return ('OK', 200) else: flask.abort(403) @proxy.route(\"/run\", methods=['POST']) def run():", "# start server in a forever loop if __name__ == \"__main__\": PORT =", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "specific language governing permissions and # limitations under the License. # import sys", "and # limitations under the License. # import sys import os import json", "exec(flask.g, namespace) exec(\"param = \" + json.dumps(value), namespace) exec(\"fun = main(param)\", namespace) result", "namespace) exec(\"param = \" + json.dumps(value), namespace) exec(\"fun = main(param)\", namespace) result =", "json.dumps(value), namespace) exec(\"fun = main(param)\", namespace) result = namespace['fun'] except Exception: traceback.print_exc(file =", "namespace) result = namespace['fun'] except Exception: traceback.print_exc(file = sys.stderr) sys.stdout.flush() sys.stderr.flush() if result", "flask.jsonify({ \"error\": \"the action did not return a dictionary\", \"action_output\": result }) response.status_code", "not isinstance(payload, dict): flask.abort(403) message = payload.get(\"value\", {}) if \"code\" in message: #", "run(): message = flask.request.get_json(force=True,silent=True) if not message or not isinstance(message, dict): flask.abort(403) if", "not message or not isinstance(message, dict): flask.abort(403) if not \"value\" in message: flask.abort(403)", "# initialize the namespace for the execution namespace = {} result = None", "+ json.dumps(value), namespace) exec(\"fun = main(param)\", namespace) result = namespace['fun'] except Exception: traceback.print_exc(file", "sys.stderr) sys.stdout.flush() sys.stderr.flush() if result and isinstance(result, dict): response = flask.jsonify(result) response.status_code =", "start server in a forever loop if __name__ == \"__main__\": PORT = int(os.getenv(\"FLASK_PROXY_PORT\",", "ANY KIND, either express or implied. # See the License for the specific", "codecs import traceback import flask from gevent.wsgi import WSGIServer proxy = flask.Flask(__name__) proxy.debug", "flask from gevent.wsgi import WSGIServer proxy = flask.Flask(__name__) proxy.debug = False @proxy.route(\"/init\", methods=['POST'])", "traceback.print_exc(file = sys.stderr) sys.stdout.flush() sys.stderr.flush() if result and isinstance(result, dict): response = flask.jsonify(result)", "message: # store the code flask.g = message[\"code\"] return ('OK', 200) else: flask.abort(403)", "from gevent.wsgi import WSGIServer proxy = flask.Flask(__name__) proxy.debug = False @proxy.route(\"/init\", methods=['POST']) def", "in compliance with the License. # You may obtain a copy of the", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "action did not return a dictionary\", \"action_output\": result }) response.status_code = 502 return", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #", "__name__ == \"__main__\": PORT = int(os.getenv(\"FLASK_PROXY_PORT\", 8080)) server = WSGIServer(('', PORT), proxy, log=None)", "use this file except in compliance with the License. # You may obtain", "sys import os import json import subprocess import codecs import traceback import flask", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "result }) response.status_code = 502 return response # start server in a forever", "if __name__ == \"__main__\": PORT = int(os.getenv(\"FLASK_PROXY_PORT\", 8080)) server = WSGIServer(('', PORT), proxy,", "not use this file except in compliance with the License. # You may", "# # Copyright 2015-2016 IBM Corporation # # Licensed under the Apache License,", "flask.abort(403) value = message[\"value\"] if not isinstance(value, dict): flask.abort(403) # initialize the namespace", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See", "isinstance(payload, dict): flask.abort(403) message = payload.get(\"value\", {}) if \"code\" in message: # store", "See the License for the specific language governing permissions and # limitations under", "for the execution namespace = {} result = None try: exec(flask.g, namespace) exec(\"param", "\"code\" in message: # store the code flask.g = message[\"code\"] return ('OK', 200)", "\" + json.dumps(value), namespace) exec(\"fun = main(param)\", namespace) result = namespace['fun'] except Exception:", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "payload.get(\"value\", {}) if \"code\" in message: # store the code flask.g = message[\"code\"]", "namespace = {} result = None try: exec(flask.g, namespace) exec(\"param = \" +", "License, Version 2.0 (the \"License\"); # you may not use this file except", "# Licensed under the Apache License, Version 2.0 (the \"License\"); # you may", "# store the code flask.g = message[\"code\"] return ('OK', 200) else: flask.abort(403) @proxy.route(\"/run\",", "\"action_output\": result }) response.status_code = 502 return response # start server in a", "Corporation # # Licensed under the Apache License, Version 2.0 (the \"License\"); #", "store the code flask.g = message[\"code\"] return ('OK', 200) else: flask.abort(403) @proxy.route(\"/run\", methods=['POST'])", "@proxy.route(\"/init\", methods=['POST']) def init(): flask.g = None payload = flask.request.get_json(force=True,silent=True) if not payload", "response # start server in a forever loop if __name__ == \"__main__\": PORT", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "import json import subprocess import codecs import traceback import flask from gevent.wsgi import", "subprocess import codecs import traceback import flask from gevent.wsgi import WSGIServer proxy =", "or not isinstance(message, dict): flask.abort(403) if not \"value\" in message: flask.abort(403) value =", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "dict): flask.abort(403) if not \"value\" in message: flask.abort(403) value = message[\"value\"] if not", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in", "flask.Flask(__name__) proxy.debug = False @proxy.route(\"/init\", methods=['POST']) def init(): flask.g = None payload =", "dict): flask.abort(403) message = payload.get(\"value\", {}) if \"code\" in message: # store the", "OF ANY KIND, either express or implied. # See the License for the", "response.status_code = 200 return response else: response = flask.jsonify({ \"error\": \"the action did", "forever loop if __name__ == \"__main__\": PORT = int(os.getenv(\"FLASK_PROXY_PORT\", 8080)) server = WSGIServer(('',", "2.0 (the \"License\"); # you may not use this file except in compliance", "try: exec(flask.g, namespace) exec(\"param = \" + json.dumps(value), namespace) exec(\"fun = main(param)\", namespace)", "a forever loop if __name__ == \"__main__\": PORT = int(os.getenv(\"FLASK_PROXY_PORT\", 8080)) server =", "= message[\"code\"] return ('OK', 200) else: flask.abort(403) @proxy.route(\"/run\", methods=['POST']) def run(): message =", "server in a forever loop if __name__ == \"__main__\": PORT = int(os.getenv(\"FLASK_PROXY_PORT\", 8080))", "# you may not use this file except in compliance with the License.", "methods=['POST']) def init(): flask.g = None payload = flask.request.get_json(force=True,silent=True) if not payload or", "result = None try: exec(flask.g, namespace) exec(\"param = \" + json.dumps(value), namespace) exec(\"fun", "flask.abort(403) if not \"value\" in message: flask.abort(403) value = message[\"value\"] if not isinstance(value,", "json import subprocess import codecs import traceback import flask from gevent.wsgi import WSGIServer", "payload = flask.request.get_json(force=True,silent=True) if not payload or not isinstance(payload, dict): flask.abort(403) message =", "dict): response = flask.jsonify(result) response.status_code = 200 return response else: response = flask.jsonify({", "\"error\": \"the action did not return a dictionary\", \"action_output\": result }) response.status_code =", "import flask from gevent.wsgi import WSGIServer proxy = flask.Flask(__name__) proxy.debug = False @proxy.route(\"/init\",", "for the specific language governing permissions and # limitations under the License. #", "2015-2016 IBM Corporation # # Licensed under the Apache License, Version 2.0 (the", "agreed to in writing, software # distributed under the License is distributed on", "return a dictionary\", \"action_output\": result }) response.status_code = 502 return response # start", "= flask.Flask(__name__) proxy.debug = False @proxy.route(\"/init\", methods=['POST']) def init(): flask.g = None payload", "502 return response # start server in a forever loop if __name__ ==", "the specific language governing permissions and # limitations under the License. # import", "and isinstance(result, dict): response = flask.jsonify(result) response.status_code = 200 return response else: response", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the", "IBM Corporation # # Licensed under the Apache License, Version 2.0 (the \"License\");", "isinstance(message, dict): flask.abort(403) if not \"value\" in message: flask.abort(403) value = message[\"value\"] if", "else: flask.abort(403) @proxy.route(\"/run\", methods=['POST']) def run(): message = flask.request.get_json(force=True,silent=True) if not message or", "value = message[\"value\"] if not isinstance(value, dict): flask.abort(403) # initialize the namespace for", "(the \"License\"); # you may not use this file except in compliance with", "namespace) exec(\"fun = main(param)\", namespace) result = namespace['fun'] except Exception: traceback.print_exc(file = sys.stderr)", "in a forever loop if __name__ == \"__main__\": PORT = int(os.getenv(\"FLASK_PROXY_PORT\", 8080)) server", "sys.stdout.flush() sys.stderr.flush() if result and isinstance(result, dict): response = flask.jsonify(result) response.status_code = 200", "response else: response = flask.jsonify({ \"error\": \"the action did not return a dictionary\",", "None payload = flask.request.get_json(force=True,silent=True) if not payload or not isinstance(payload, dict): flask.abort(403) message", "False @proxy.route(\"/init\", methods=['POST']) def init(): flask.g = None payload = flask.request.get_json(force=True,silent=True) if not", "# # Unless required by applicable law or agreed to in writing, software", "language governing permissions and # limitations under the License. # import sys import", "License. # import sys import os import json import subprocess import codecs import", "= None payload = flask.request.get_json(force=True,silent=True) if not payload or not isinstance(payload, dict): flask.abort(403)", "200) else: flask.abort(403) @proxy.route(\"/run\", methods=['POST']) def run(): message = flask.request.get_json(force=True,silent=True) if not message", "isinstance(value, dict): flask.abort(403) # initialize the namespace for the execution namespace = {}", "== \"__main__\": PORT = int(os.getenv(\"FLASK_PROXY_PORT\", 8080)) server = WSGIServer(('', PORT), proxy, log=None) server.serve_forever()", "express or implied. # See the License for the specific language governing permissions", "loop if __name__ == \"__main__\": PORT = int(os.getenv(\"FLASK_PROXY_PORT\", 8080)) server = WSGIServer(('', PORT),", "Version 2.0 (the \"License\"); # you may not use this file except in", "# Unless required by applicable law or agreed to in writing, software #", "response = flask.jsonify({ \"error\": \"the action did not return a dictionary\", \"action_output\": result", "except in compliance with the License. # You may obtain a copy of", "if \"code\" in message: # store the code flask.g = message[\"code\"] return ('OK',", "return ('OK', 200) else: flask.abort(403) @proxy.route(\"/run\", methods=['POST']) def run(): message = flask.request.get_json(force=True,silent=True) if", "by applicable law or agreed to in writing, software # distributed under the", "in message: # store the code flask.g = message[\"code\"] return ('OK', 200) else:", "execution namespace = {} result = None try: exec(flask.g, namespace) exec(\"param = \"", "if not payload or not isinstance(payload, dict): flask.abort(403) message = payload.get(\"value\", {}) if", "os import json import subprocess import codecs import traceback import flask from gevent.wsgi", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "message: flask.abort(403) value = message[\"value\"] if not isinstance(value, dict): flask.abort(403) # initialize the", "message or not isinstance(message, dict): flask.abort(403) if not \"value\" in message: flask.abort(403) value", "else: response = flask.jsonify({ \"error\": \"the action did not return a dictionary\", \"action_output\":", "either express or implied. # See the License for the specific language governing", "except Exception: traceback.print_exc(file = sys.stderr) sys.stdout.flush() sys.stderr.flush() if result and isinstance(result, dict): response", "return response else: response = flask.jsonify({ \"error\": \"the action did not return a", "software # distributed under the License is distributed on an \"AS IS\" BASIS,", "gevent.wsgi import WSGIServer proxy = flask.Flask(__name__) proxy.debug = False @proxy.route(\"/init\", methods=['POST']) def init():", "# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "may not use this file except in compliance with the License. # You", "License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "message = flask.request.get_json(force=True,silent=True) if not message or not isinstance(message, dict): flask.abort(403) if not", "flask.request.get_json(force=True,silent=True) if not message or not isinstance(message, dict): flask.abort(403) if not \"value\" in", "= message[\"value\"] if not isinstance(value, dict): flask.abort(403) # initialize the namespace for the", "governing permissions and # limitations under the License. # import sys import os", "# Copyright 2015-2016 IBM Corporation # # Licensed under the Apache License, Version", "the License. # import sys import os import json import subprocess import codecs", "flask.g = None payload = flask.request.get_json(force=True,silent=True) if not payload or not isinstance(payload, dict):", "Licensed under the Apache License, Version 2.0 (the \"License\"); # you may not", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to", "permissions and # limitations under the License. # import sys import os import", "import sys import os import json import subprocess import codecs import traceback import", "flask.request.get_json(force=True,silent=True) if not payload or not isinstance(payload, dict): flask.abort(403) message = payload.get(\"value\", {})", "{}) if \"code\" in message: # store the code flask.g = message[\"code\"] return", "\"value\" in message: flask.abort(403) value = message[\"value\"] if not isinstance(value, dict): flask.abort(403) #", "{} result = None try: exec(flask.g, namespace) exec(\"param = \" + json.dumps(value), namespace)", "= flask.jsonify({ \"error\": \"the action did not return a dictionary\", \"action_output\": result })", "= main(param)\", namespace) result = namespace['fun'] except Exception: traceback.print_exc(file = sys.stderr) sys.stdout.flush() sys.stderr.flush()", "}) response.status_code = 502 return response # start server in a forever loop", "file except in compliance with the License. # You may obtain a copy", "Copyright 2015-2016 IBM Corporation # # Licensed under the Apache License, Version 2.0", "= flask.request.get_json(force=True,silent=True) if not message or not isinstance(message, dict): flask.abort(403) if not \"value\"", "= False @proxy.route(\"/init\", methods=['POST']) def init(): flask.g = None payload = flask.request.get_json(force=True,silent=True) if", "= 200 return response else: response = flask.jsonify({ \"error\": \"the action did not", "= flask.jsonify(result) response.status_code = 200 return response else: response = flask.jsonify({ \"error\": \"the", "('OK', 200) else: flask.abort(403) @proxy.route(\"/run\", methods=['POST']) def run(): message = flask.request.get_json(force=True,silent=True) if not", "flask.abort(403) message = payload.get(\"value\", {}) if \"code\" in message: # store the code", "import os import json import subprocess import codecs import traceback import flask from", "# limitations under the License. # import sys import os import json import", "under the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "isinstance(result, dict): response = flask.jsonify(result) response.status_code = 200 return response else: response =", "License for the specific language governing permissions and # limitations under the License.", "import subprocess import codecs import traceback import flask from gevent.wsgi import WSGIServer proxy", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "\"the action did not return a dictionary\", \"action_output\": result }) response.status_code = 502", "import codecs import traceback import flask from gevent.wsgi import WSGIServer proxy = flask.Flask(__name__)", "not isinstance(value, dict): flask.abort(403) # initialize the namespace for the execution namespace =", "the License. # You may obtain a copy of the License at #", "import WSGIServer proxy = flask.Flask(__name__) proxy.debug = False @proxy.route(\"/init\", methods=['POST']) def init(): flask.g", "to in writing, software # distributed under the License is distributed on an", "dictionary\", \"action_output\": result }) response.status_code = 502 return response # start server in", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "namespace['fun'] except Exception: traceback.print_exc(file = sys.stderr) sys.stdout.flush() sys.stderr.flush() if result and isinstance(result, dict):", "# distributed under the License is distributed on an \"AS IS\" BASIS, #", "implied. # See the License for the specific language governing permissions and #", "\"License\"); # you may not use this file except in compliance with the", "or not isinstance(payload, dict): flask.abort(403) message = payload.get(\"value\", {}) if \"code\" in message:", "payload or not isinstance(payload, dict): flask.abort(403) message = payload.get(\"value\", {}) if \"code\" in", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "= payload.get(\"value\", {}) if \"code\" in message: # store the code flask.g =", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "= 502 return response # start server in a forever loop if __name__", "required by applicable law or agreed to in writing, software # distributed under", "def run(): message = flask.request.get_json(force=True,silent=True) if not message or not isinstance(message, dict): flask.abort(403)", "response.status_code = 502 return response # start server in a forever loop if", "result = namespace['fun'] except Exception: traceback.print_exc(file = sys.stderr) sys.stdout.flush() sys.stderr.flush() if result and", "a dictionary\", \"action_output\": result }) response.status_code = 502 return response # start server", "def init(): flask.g = None payload = flask.request.get_json(force=True,silent=True) if not payload or not", "applicable law or agreed to in writing, software # distributed under the License", "limitations under the License. # import sys import os import json import subprocess", "result and isinstance(result, dict): response = flask.jsonify(result) response.status_code = 200 return response else:", "flask.g = message[\"code\"] return ('OK', 200) else: flask.abort(403) @proxy.route(\"/run\", methods=['POST']) def run(): message", "= flask.request.get_json(force=True,silent=True) if not payload or not isinstance(payload, dict): flask.abort(403) message = payload.get(\"value\",", "if not message or not isinstance(message, dict): flask.abort(403) if not \"value\" in message:", "in message: flask.abort(403) value = message[\"value\"] if not isinstance(value, dict): flask.abort(403) # initialize", "proxy.debug = False @proxy.route(\"/init\", methods=['POST']) def init(): flask.g = None payload = flask.request.get_json(force=True,silent=True)", "response = flask.jsonify(result) response.status_code = 200 return response else: response = flask.jsonify({ \"error\":", "or agreed to in writing, software # distributed under the License is distributed", "message = payload.get(\"value\", {}) if \"code\" in message: # store the code flask.g", "or implied. # See the License for the specific language governing permissions and", "if not isinstance(value, dict): flask.abort(403) # initialize the namespace for the execution namespace", "methods=['POST']) def run(): message = flask.request.get_json(force=True,silent=True) if not message or not isinstance(message, dict):", "= sys.stderr) sys.stdout.flush() sys.stderr.flush() if result and isinstance(result, dict): response = flask.jsonify(result) response.status_code", "distributed under the License is distributed on an \"AS IS\" BASIS, # WITHOUT", "CONDITIONS OF ANY KIND, either express or implied. # See the License for", "if not \"value\" in message: flask.abort(403) value = message[\"value\"] if not isinstance(value, dict):", "namespace for the execution namespace = {} result = None try: exec(flask.g, namespace)", "Apache License, Version 2.0 (the \"License\"); # you may not use this file", "= {} result = None try: exec(flask.g, namespace) exec(\"param = \" + json.dumps(value),", "message[\"value\"] if not isinstance(value, dict): flask.abort(403) # initialize the namespace for the execution", "200 return response else: response = flask.jsonify({ \"error\": \"the action did not return", "OR CONDITIONS OF ANY KIND, either express or implied. # See the License", "may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "not payload or not isinstance(payload, dict): flask.abort(403) message = payload.get(\"value\", {}) if \"code\"", "dict): flask.abort(403) # initialize the namespace for the execution namespace = {} result", "init(): flask.g = None payload = flask.request.get_json(force=True,silent=True) if not payload or not isinstance(payload,", "Exception: traceback.print_exc(file = sys.stderr) sys.stdout.flush() sys.stderr.flush() if result and isinstance(result, dict): response =", "with the License. # You may obtain a copy of the License at", "= None try: exec(flask.g, namespace) exec(\"param = \" + json.dumps(value), namespace) exec(\"fun =", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,", "exec(\"fun = main(param)\", namespace) result = namespace['fun'] except Exception: traceback.print_exc(file = sys.stderr) sys.stdout.flush()", "under the License. # import sys import os import json import subprocess import", "in writing, software # distributed under the License is distributed on an \"AS", "not isinstance(message, dict): flask.abort(403) if not \"value\" in message: flask.abort(403) value = message[\"value\"]", "if result and isinstance(result, dict): response = flask.jsonify(result) response.status_code = 200 return response", "the code flask.g = message[\"code\"] return ('OK', 200) else: flask.abort(403) @proxy.route(\"/run\", methods=['POST']) def", "under the Apache License, Version 2.0 (the \"License\"); # you may not use" ]
[ "return font def render(font, char, size=(128, 128), pad=20): width, height = font.getsize(char) max_size", "font.getsize(char) max_size = max(width, height) if width < height: start_w = (height -", "_moveTo(self, pt): pass def _lineTo(self, pt): self.is_space = False raise StopDraw def _curveToOne(self,", "img.mean() == 255.: # pass is_space = is_space_char(char, ttFont) if is_space: pass else:", "return avail_chars def read_font(fontfile, size=150): font = ImageFont.truetype(str(fontfile), size=size) return font def render(font,", "gs = ttFont.getGlyphSet() uni = ord(char) gname = cmap[uni] g = gs[gname] pen", "StopDraw(Exception): pass class SpaceOrNotPen(BasePen): def __init__(self, glyphSet=None): super().__init__(glyphSet) self.is_space = True def _moveTo(self,", "StopDraw def _curveToOne(self, pt1, pt2, pt3): self.is_space = False raise StopDraw def get_defined_chars(fontfile):", "uni = ord(char) gname = cmap[uni] g = gs[gname] pen = SpaceOrNotPen(gs) try:", "size=(128, 128), pad=20): width, height = font.getsize(char) max_size = max(width, height) if width", "max(width, height) if width < height: start_w = (height - width) // 2", "= False raise StopDraw def _curveToOne(self, pt1, pt2, pt3): self.is_space = False raise", "= ttFont.getGlyphSet() uni = ord(char) gname = cmap[uni] g = gs[gname] pen =", "(c) 2021-present NAVER Corp. MIT license \"\"\" from fontTools.ttLib import TTFont from fontTools.pens.basePen", "for char in defined_chars: # img = np.array(render(ttf, char)) # if img.mean() ==", "pad=20): width, height = font.getsize(char) max_size = max(width, height) if width < height:", "2 + pad start_h = pad else: start_w = pad start_h = (width", "height = font.getsize(char) max_size = max(width, height) if width < height: start_w =", "import BasePen from PIL import Image, ImageFont, ImageDraw import numpy as np class", "2021-present NAVER Corp. MIT license \"\"\" from fontTools.ttLib import TTFont from fontTools.pens.basePen import", "def _lineTo(self, pt): self.is_space = False raise StopDraw def _curveToOne(self, pt1, pt2, pt3):", "# ttf = read_font(fontpath) defined_chars = get_defined_chars(fontpath) avail_chars = [] ttFont = TTFont(fontpath)", "width < height: start_w = (height - width) // 2 + pad start_h", "get_defined_chars(fontfile): ttf = TTFont(fontfile) chars = [chr(y) for y in ttf[\"cmap\"].tables[0].cmap.keys()] return chars", "False raise StopDraw def get_defined_chars(fontfile): ttf = TTFont(fontfile) chars = [chr(y) for y", "BasePen from PIL import Image, ImageFont, ImageDraw import numpy as np class StopDraw(Exception):", "= TTFont(fontfile) chars = [chr(y) for y in ttf[\"cmap\"].tables[0].cmap.keys()] return chars def is_space_char(char,", "raise StopDraw def get_defined_chars(fontfile): ttf = TTFont(fontfile) chars = [chr(y) for y in", "chars = [chr(y) for y in ttf[\"cmap\"].tables[0].cmap.keys()] return chars def is_space_char(char, ttFont): cmap", "- height) // 2 + pad img = Image.new(\"L\", (max_size+(pad*2), max_size+(pad*2)), 255) draw", "g.draw(pen) except StopDraw: pass return pen.is_space def get_filtered_chars(fontpath): # ttf = read_font(fontpath) defined_chars", "= TTFont(fontpath) for char in defined_chars: # img = np.array(render(ttf, char)) # if", "True def _moveTo(self, pt): pass def _lineTo(self, pt): self.is_space = False raise StopDraw", "SpaceOrNotPen(BasePen): def __init__(self, glyphSet=None): super().__init__(glyphSet) self.is_space = True def _moveTo(self, pt): pass def", "NAVER Corp. MIT license \"\"\" from fontTools.ttLib import TTFont from fontTools.pens.basePen import BasePen", "Image, ImageFont, ImageDraw import numpy as np class StopDraw(Exception): pass class SpaceOrNotPen(BasePen): def", "y in ttf[\"cmap\"].tables[0].cmap.keys()] return chars def is_space_char(char, ttFont): cmap = ttFont.getBestCmap() gs =", "\"\"\" MX-Font Copyright (c) 2021-present NAVER Corp. MIT license \"\"\" from fontTools.ttLib import", "from PIL import Image, ImageFont, ImageDraw import numpy as np class StopDraw(Exception): pass", "is_space: pass else: avail_chars.append(char.encode('utf-16', 'surrogatepass').decode('utf-16')) return avail_chars def read_font(fontfile, size=150): font = ImageFont.truetype(str(fontfile),", "= cmap[uni] g = gs[gname] pen = SpaceOrNotPen(gs) try: g.draw(pen) except StopDraw: pass", "= SpaceOrNotPen(gs) try: g.draw(pen) except StopDraw: pass return pen.is_space def get_filtered_chars(fontpath): # ttf", "pt): self.is_space = False raise StopDraw def _curveToOne(self, pt1, pt2, pt3): self.is_space =", "= [] ttFont = TTFont(fontpath) for char in defined_chars: # img = np.array(render(ttf,", "'surrogatepass').decode('utf-16')) return avail_chars def read_font(fontfile, size=150): font = ImageFont.truetype(str(fontfile), size=size) return font def", "size=size) return font def render(font, char, size=(128, 128), pad=20): width, height = font.getsize(char)", "Copyright (c) 2021-present NAVER Corp. MIT license \"\"\" from fontTools.ttLib import TTFont from", "draw = ImageDraw.Draw(img) draw.text((start_w, start_h), char, font=font) img = img.resize(size, 2) return img", "try: g.draw(pen) except StopDraw: pass return pen.is_space def get_filtered_chars(fontpath): # ttf = read_font(fontpath)", "pass def _lineTo(self, pt): self.is_space = False raise StopDraw def _curveToOne(self, pt1, pt2,", "start_w = pad start_h = (width - height) // 2 + pad img", "= np.array(render(ttf, char)) # if img.mean() == 255.: # pass is_space = is_space_char(char,", "= True def _moveTo(self, pt): pass def _lineTo(self, pt): self.is_space = False raise", "gs[gname] pen = SpaceOrNotPen(gs) try: g.draw(pen) except StopDraw: pass return pen.is_space def get_filtered_chars(fontpath):", "width) // 2 + pad start_h = pad else: start_w = pad start_h", "in defined_chars: # img = np.array(render(ttf, char)) # if img.mean() == 255.: #", "license \"\"\" from fontTools.ttLib import TTFont from fontTools.pens.basePen import BasePen from PIL import", "font = ImageFont.truetype(str(fontfile), size=size) return font def render(font, char, size=(128, 128), pad=20): width,", "def _moveTo(self, pt): pass def _lineTo(self, pt): self.is_space = False raise StopDraw def", "return chars def is_space_char(char, ttFont): cmap = ttFont.getBestCmap() gs = ttFont.getGlyphSet() uni =", "in ttf[\"cmap\"].tables[0].cmap.keys()] return chars def is_space_char(char, ttFont): cmap = ttFont.getBestCmap() gs = ttFont.getGlyphSet()", "= False raise StopDraw def get_defined_chars(fontfile): ttf = TTFont(fontfile) chars = [chr(y) for", "else: avail_chars.append(char.encode('utf-16', 'surrogatepass').decode('utf-16')) return avail_chars def read_font(fontfile, size=150): font = ImageFont.truetype(str(fontfile), size=size) return", "except StopDraw: pass return pen.is_space def get_filtered_chars(fontpath): # ttf = read_font(fontpath) defined_chars =", "start_w = (height - width) // 2 + pad start_h = pad else:", "# img = np.array(render(ttf, char)) # if img.mean() == 255.: # pass is_space", "class SpaceOrNotPen(BasePen): def __init__(self, glyphSet=None): super().__init__(glyphSet) self.is_space = True def _moveTo(self, pt): pass", "= pad else: start_w = pad start_h = (width - height) // 2", "else: start_w = pad start_h = (width - height) // 2 + pad", "ttf = read_font(fontpath) defined_chars = get_defined_chars(fontpath) avail_chars = [] ttFont = TTFont(fontpath) for", "ttFont.getBestCmap() gs = ttFont.getGlyphSet() uni = ord(char) gname = cmap[uni] g = gs[gname]", "avail_chars.append(char.encode('utf-16', 'surrogatepass').decode('utf-16')) return avail_chars def read_font(fontfile, size=150): font = ImageFont.truetype(str(fontfile), size=size) return font", "import Image, ImageFont, ImageDraw import numpy as np class StopDraw(Exception): pass class SpaceOrNotPen(BasePen):", "font def render(font, char, size=(128, 128), pad=20): width, height = font.getsize(char) max_size =", "MIT license \"\"\" from fontTools.ttLib import TTFont from fontTools.pens.basePen import BasePen from PIL", "height) if width < height: start_w = (height - width) // 2 +", "= pad start_h = (width - height) // 2 + pad img =", "gname = cmap[uni] g = gs[gname] pen = SpaceOrNotPen(gs) try: g.draw(pen) except StopDraw:", "= is_space_char(char, ttFont) if is_space: pass else: avail_chars.append(char.encode('utf-16', 'surrogatepass').decode('utf-16')) return avail_chars def read_font(fontfile,", "ttFont = TTFont(fontpath) for char in defined_chars: # img = np.array(render(ttf, char)) #", "ImageFont.truetype(str(fontfile), size=size) return font def render(font, char, size=(128, 128), pad=20): width, height =", "Image.new(\"L\", (max_size+(pad*2), max_size+(pad*2)), 255) draw = ImageDraw.Draw(img) draw.text((start_w, start_h), char, font=font) img =", "render(font, char, size=(128, 128), pad=20): width, height = font.getsize(char) max_size = max(width, height)", "is_space = is_space_char(char, ttFont) if is_space: pass else: avail_chars.append(char.encode('utf-16', 'surrogatepass').decode('utf-16')) return avail_chars def", "(height - width) // 2 + pad start_h = pad else: start_w =", "fontTools.ttLib import TTFont from fontTools.pens.basePen import BasePen from PIL import Image, ImageFont, ImageDraw", "pad start_h = (width - height) // 2 + pad img = Image.new(\"L\",", "+ pad img = Image.new(\"L\", (max_size+(pad*2), max_size+(pad*2)), 255) draw = ImageDraw.Draw(img) draw.text((start_w, start_h),", "return pen.is_space def get_filtered_chars(fontpath): # ttf = read_font(fontpath) defined_chars = get_defined_chars(fontpath) avail_chars =", "chars def is_space_char(char, ttFont): cmap = ttFont.getBestCmap() gs = ttFont.getGlyphSet() uni = ord(char)", "<filename>datasets/ttf_utils.py<gh_stars>0 \"\"\" MX-Font Copyright (c) 2021-present NAVER Corp. MIT license \"\"\" from fontTools.ttLib", "pen = SpaceOrNotPen(gs) try: g.draw(pen) except StopDraw: pass return pen.is_space def get_filtered_chars(fontpath): #", "as np class StopDraw(Exception): pass class SpaceOrNotPen(BasePen): def __init__(self, glyphSet=None): super().__init__(glyphSet) self.is_space =", "height) // 2 + pad img = Image.new(\"L\", (max_size+(pad*2), max_size+(pad*2)), 255) draw =", "(width - height) // 2 + pad img = Image.new(\"L\", (max_size+(pad*2), max_size+(pad*2)), 255)", "size=150): font = ImageFont.truetype(str(fontfile), size=size) return font def render(font, char, size=(128, 128), pad=20):", "ttFont) if is_space: pass else: avail_chars.append(char.encode('utf-16', 'surrogatepass').decode('utf-16')) return avail_chars def read_font(fontfile, size=150): font", "ord(char) gname = cmap[uni] g = gs[gname] pen = SpaceOrNotPen(gs) try: g.draw(pen) except", "< height: start_w = (height - width) // 2 + pad start_h =", "np.array(render(ttf, char)) # if img.mean() == 255.: # pass is_space = is_space_char(char, ttFont)", "pad else: start_w = pad start_h = (width - height) // 2 +", "get_filtered_chars(fontpath): # ttf = read_font(fontpath) defined_chars = get_defined_chars(fontpath) avail_chars = [] ttFont =", "255) draw = ImageDraw.Draw(img) draw.text((start_w, start_h), char, font=font) img = img.resize(size, 2) return", "SpaceOrNotPen(gs) try: g.draw(pen) except StopDraw: pass return pen.is_space def get_filtered_chars(fontpath): # ttf =", "- width) // 2 + pad start_h = pad else: start_w = pad", "from fontTools.ttLib import TTFont from fontTools.pens.basePen import BasePen from PIL import Image, ImageFont,", "is_space_char(char, ttFont) if is_space: pass else: avail_chars.append(char.encode('utf-16', 'surrogatepass').decode('utf-16')) return avail_chars def read_font(fontfile, size=150):", "(max_size+(pad*2), max_size+(pad*2)), 255) draw = ImageDraw.Draw(img) draw.text((start_w, start_h), char, font=font) img = img.resize(size,", "= (height - width) // 2 + pad start_h = pad else: start_w", "if is_space: pass else: avail_chars.append(char.encode('utf-16', 'surrogatepass').decode('utf-16')) return avail_chars def read_font(fontfile, size=150): font =", "pt): pass def _lineTo(self, pt): self.is_space = False raise StopDraw def _curveToOne(self, pt1,", "# pass is_space = is_space_char(char, ttFont) if is_space: pass else: avail_chars.append(char.encode('utf-16', 'surrogatepass').decode('utf-16')) return", "max_size+(pad*2)), 255) draw = ImageDraw.Draw(img) draw.text((start_w, start_h), char, font=font) img = img.resize(size, 2)", "defined_chars = get_defined_chars(fontpath) avail_chars = [] ttFont = TTFont(fontpath) for char in defined_chars:", "= max(width, height) if width < height: start_w = (height - width) //", "ttf[\"cmap\"].tables[0].cmap.keys()] return chars def is_space_char(char, ttFont): cmap = ttFont.getBestCmap() gs = ttFont.getGlyphSet() uni", "import numpy as np class StopDraw(Exception): pass class SpaceOrNotPen(BasePen): def __init__(self, glyphSet=None): super().__init__(glyphSet)", "import TTFont from fontTools.pens.basePen import BasePen from PIL import Image, ImageFont, ImageDraw import", "= ord(char) gname = cmap[uni] g = gs[gname] pen = SpaceOrNotPen(gs) try: g.draw(pen)", "PIL import Image, ImageFont, ImageDraw import numpy as np class StopDraw(Exception): pass class", "pass return pen.is_space def get_filtered_chars(fontpath): # ttf = read_font(fontpath) defined_chars = get_defined_chars(fontpath) avail_chars", "def get_defined_chars(fontfile): ttf = TTFont(fontfile) chars = [chr(y) for y in ttf[\"cmap\"].tables[0].cmap.keys()] return", "= gs[gname] pen = SpaceOrNotPen(gs) try: g.draw(pen) except StopDraw: pass return pen.is_space def", "== 255.: # pass is_space = is_space_char(char, ttFont) if is_space: pass else: avail_chars.append(char.encode('utf-16',", "if width < height: start_w = (height - width) // 2 + pad", "char, size=(128, 128), pad=20): width, height = font.getsize(char) max_size = max(width, height) if", "= [chr(y) for y in ttf[\"cmap\"].tables[0].cmap.keys()] return chars def is_space_char(char, ttFont): cmap =", "is_space_char(char, ttFont): cmap = ttFont.getBestCmap() gs = ttFont.getGlyphSet() uni = ord(char) gname =", "char)) # if img.mean() == 255.: # pass is_space = is_space_char(char, ttFont) if", "pass is_space = is_space_char(char, ttFont) if is_space: pass else: avail_chars.append(char.encode('utf-16', 'surrogatepass').decode('utf-16')) return avail_chars", "__init__(self, glyphSet=None): super().__init__(glyphSet) self.is_space = True def _moveTo(self, pt): pass def _lineTo(self, pt):", "img = np.array(render(ttf, char)) # if img.mean() == 255.: # pass is_space =", "= (width - height) // 2 + pad img = Image.new(\"L\", (max_size+(pad*2), max_size+(pad*2)),", "img = Image.new(\"L\", (max_size+(pad*2), max_size+(pad*2)), 255) draw = ImageDraw.Draw(img) draw.text((start_w, start_h), char, font=font)", "def render(font, char, size=(128, 128), pad=20): width, height = font.getsize(char) max_size = max(width,", "raise StopDraw def _curveToOne(self, pt1, pt2, pt3): self.is_space = False raise StopDraw def", "_lineTo(self, pt): self.is_space = False raise StopDraw def _curveToOne(self, pt1, pt2, pt3): self.is_space", "// 2 + pad start_h = pad else: start_w = pad start_h =", "class StopDraw(Exception): pass class SpaceOrNotPen(BasePen): def __init__(self, glyphSet=None): super().__init__(glyphSet) self.is_space = True def", "StopDraw def get_defined_chars(fontfile): ttf = TTFont(fontfile) chars = [chr(y) for y in ttf[\"cmap\"].tables[0].cmap.keys()]", "avail_chars def read_font(fontfile, size=150): font = ImageFont.truetype(str(fontfile), size=size) return font def render(font, char,", "from fontTools.pens.basePen import BasePen from PIL import Image, ImageFont, ImageDraw import numpy as", "cmap[uni] g = gs[gname] pen = SpaceOrNotPen(gs) try: g.draw(pen) except StopDraw: pass return", "// 2 + pad img = Image.new(\"L\", (max_size+(pad*2), max_size+(pad*2)), 255) draw = ImageDraw.Draw(img)", "\"\"\" from fontTools.ttLib import TTFont from fontTools.pens.basePen import BasePen from PIL import Image,", "ImageDraw import numpy as np class StopDraw(Exception): pass class SpaceOrNotPen(BasePen): def __init__(self, glyphSet=None):", "ttFont): cmap = ttFont.getBestCmap() gs = ttFont.getGlyphSet() uni = ord(char) gname = cmap[uni]", "TTFont(fontfile) chars = [chr(y) for y in ttf[\"cmap\"].tables[0].cmap.keys()] return chars def is_space_char(char, ttFont):", "pt1, pt2, pt3): self.is_space = False raise StopDraw def get_defined_chars(fontfile): ttf = TTFont(fontfile)", "max_size = max(width, height) if width < height: start_w = (height - width)", "ttf = TTFont(fontfile) chars = [chr(y) for y in ttf[\"cmap\"].tables[0].cmap.keys()] return chars def", "avail_chars = [] ttFont = TTFont(fontpath) for char in defined_chars: # img =", "if img.mean() == 255.: # pass is_space = is_space_char(char, ttFont) if is_space: pass", "np class StopDraw(Exception): pass class SpaceOrNotPen(BasePen): def __init__(self, glyphSet=None): super().__init__(glyphSet) self.is_space = True", "255.: # pass is_space = is_space_char(char, ttFont) if is_space: pass else: avail_chars.append(char.encode('utf-16', 'surrogatepass').decode('utf-16'))", "pad start_h = pad else: start_w = pad start_h = (width - height)", "def is_space_char(char, ttFont): cmap = ttFont.getBestCmap() gs = ttFont.getGlyphSet() uni = ord(char) gname", "read_font(fontpath) defined_chars = get_defined_chars(fontpath) avail_chars = [] ttFont = TTFont(fontpath) for char in", "char in defined_chars: # img = np.array(render(ttf, char)) # if img.mean() == 255.:", "TTFont from fontTools.pens.basePen import BasePen from PIL import Image, ImageFont, ImageDraw import numpy", "# if img.mean() == 255.: # pass is_space = is_space_char(char, ttFont) if is_space:", "defined_chars: # img = np.array(render(ttf, char)) # if img.mean() == 255.: # pass", "def _curveToOne(self, pt1, pt2, pt3): self.is_space = False raise StopDraw def get_defined_chars(fontfile): ttf", "glyphSet=None): super().__init__(glyphSet) self.is_space = True def _moveTo(self, pt): pass def _lineTo(self, pt): self.is_space", "width, height = font.getsize(char) max_size = max(width, height) if width < height: start_w", "pt2, pt3): self.is_space = False raise StopDraw def get_defined_chars(fontfile): ttf = TTFont(fontfile) chars", "pen.is_space def get_filtered_chars(fontpath): # ttf = read_font(fontpath) defined_chars = get_defined_chars(fontpath) avail_chars = []", "fontTools.pens.basePen import BasePen from PIL import Image, ImageFont, ImageDraw import numpy as np", "start_h = pad else: start_w = pad start_h = (width - height) //", "g = gs[gname] pen = SpaceOrNotPen(gs) try: g.draw(pen) except StopDraw: pass return pen.is_space", "pt3): self.is_space = False raise StopDraw def get_defined_chars(fontfile): ttf = TTFont(fontfile) chars =", "self.is_space = False raise StopDraw def get_defined_chars(fontfile): ttf = TTFont(fontfile) chars = [chr(y)", "start_h = (width - height) // 2 + pad img = Image.new(\"L\", (max_size+(pad*2),", "StopDraw: pass return pen.is_space def get_filtered_chars(fontpath): # ttf = read_font(fontpath) defined_chars = get_defined_chars(fontpath)", "ImageFont, ImageDraw import numpy as np class StopDraw(Exception): pass class SpaceOrNotPen(BasePen): def __init__(self,", "= ttFont.getBestCmap() gs = ttFont.getGlyphSet() uni = ord(char) gname = cmap[uni] g =", "2 + pad img = Image.new(\"L\", (max_size+(pad*2), max_size+(pad*2)), 255) draw = ImageDraw.Draw(img) draw.text((start_w,", "cmap = ttFont.getBestCmap() gs = ttFont.getGlyphSet() uni = ord(char) gname = cmap[uni] g", "self.is_space = False raise StopDraw def _curveToOne(self, pt1, pt2, pt3): self.is_space = False", "False raise StopDraw def _curveToOne(self, pt1, pt2, pt3): self.is_space = False raise StopDraw", "Corp. MIT license \"\"\" from fontTools.ttLib import TTFont from fontTools.pens.basePen import BasePen from", "ttFont.getGlyphSet() uni = ord(char) gname = cmap[uni] g = gs[gname] pen = SpaceOrNotPen(gs)", "def read_font(fontfile, size=150): font = ImageFont.truetype(str(fontfile), size=size) return font def render(font, char, size=(128,", "MX-Font Copyright (c) 2021-present NAVER Corp. MIT license \"\"\" from fontTools.ttLib import TTFont", "[chr(y) for y in ttf[\"cmap\"].tables[0].cmap.keys()] return chars def is_space_char(char, ttFont): cmap = ttFont.getBestCmap()", "TTFont(fontpath) for char in defined_chars: # img = np.array(render(ttf, char)) # if img.mean()", "_curveToOne(self, pt1, pt2, pt3): self.is_space = False raise StopDraw def get_defined_chars(fontfile): ttf =", "pass class SpaceOrNotPen(BasePen): def __init__(self, glyphSet=None): super().__init__(glyphSet) self.is_space = True def _moveTo(self, pt):", "pass else: avail_chars.append(char.encode('utf-16', 'surrogatepass').decode('utf-16')) return avail_chars def read_font(fontfile, size=150): font = ImageFont.truetype(str(fontfile), size=size)", "def __init__(self, glyphSet=None): super().__init__(glyphSet) self.is_space = True def _moveTo(self, pt): pass def _lineTo(self,", "= font.getsize(char) max_size = max(width, height) if width < height: start_w = (height", "+ pad start_h = pad else: start_w = pad start_h = (width -", "def get_filtered_chars(fontpath): # ttf = read_font(fontpath) defined_chars = get_defined_chars(fontpath) avail_chars = [] ttFont", "super().__init__(glyphSet) self.is_space = True def _moveTo(self, pt): pass def _lineTo(self, pt): self.is_space =", "get_defined_chars(fontpath) avail_chars = [] ttFont = TTFont(fontpath) for char in defined_chars: # img", "= get_defined_chars(fontpath) avail_chars = [] ttFont = TTFont(fontpath) for char in defined_chars: #", "height: start_w = (height - width) // 2 + pad start_h = pad", "pad img = Image.new(\"L\", (max_size+(pad*2), max_size+(pad*2)), 255) draw = ImageDraw.Draw(img) draw.text((start_w, start_h), char,", "[] ttFont = TTFont(fontpath) for char in defined_chars: # img = np.array(render(ttf, char))", "self.is_space = True def _moveTo(self, pt): pass def _lineTo(self, pt): self.is_space = False", "128), pad=20): width, height = font.getsize(char) max_size = max(width, height) if width <", "= Image.new(\"L\", (max_size+(pad*2), max_size+(pad*2)), 255) draw = ImageDraw.Draw(img) draw.text((start_w, start_h), char, font=font) img", "numpy as np class StopDraw(Exception): pass class SpaceOrNotPen(BasePen): def __init__(self, glyphSet=None): super().__init__(glyphSet) self.is_space", "= ImageFont.truetype(str(fontfile), size=size) return font def render(font, char, size=(128, 128), pad=20): width, height", "= read_font(fontpath) defined_chars = get_defined_chars(fontpath) avail_chars = [] ttFont = TTFont(fontpath) for char", "read_font(fontfile, size=150): font = ImageFont.truetype(str(fontfile), size=size) return font def render(font, char, size=(128, 128),", "for y in ttf[\"cmap\"].tables[0].cmap.keys()] return chars def is_space_char(char, ttFont): cmap = ttFont.getBestCmap() gs" ]
[ "H, H) else: # -------------------------- channel ---------------------------- vector_thresh_percent = math.ceil(num_channel * 1 /", "th18_mask_value = torch.sort(spatial_mean, dim=1, descending=True)[0][:, spatial_drop_num] th18_mask_value = th18_mask_value.view(num_batch, 1).expand(num_batch, 36864) mask_all_cuda =", "vector_thresh_value, torch.zeros(channel_mean.shape).cuda(), torch.ones(channel_mean.shape).cuda()) mask_all = vector.view(num_batch, num_channel, 1, 1) mask_all[int(num_batch/3):,:,:,:] = 1 self.train()", "self.sub_mean(x) x = self.head(x) res = self.body(x) res += x x = self.tail[0](res)", "x = self.sub_mean(x) x = self.head(x) res = self.body(x) res += x x", "if name in own_state: if isinstance(param, nn.Parameter): param = param.data try: own_state[name].copy_(param) except", "* 1 / 3.2) vector_thresh_value = torch.sort(channel_mean, dim=1, descending=True)[0][:, vector_thresh_percent] vector_thresh_value = vector_thresh_value.view(num_batch,", "common.ResBlock( conv, n_feats, kernel_size, bn=True, act=act, res_scale=args.res_scale ) for _ in range(n_resblocks) ]", "64, kernel_size=9, padding=4), act ] # define body module m_body = [ common.ResBlock(", "<= 4: # ---------------------------- spatial ----------------------- spatial_drop_num = math.ceil(HW * 1 / 3.0)", "nn.Sequential(*m_tail) def forward(self, x, flag=False, hr=None): x = self.sub_mean(x) x = self.head(x) res", "= torch.mean(grads_val.view(num_batch, num_channel, -1), dim=2) channel_mean = grad_channel_mean grad_channel_mean = grad_channel_mean.view(num_batch, num_channel, 1,", "numpy as np import torch.nn.functional as F import random import math def make_model(args,", "torch.where(spatial_mean > th18_mask_value, torch.zeros(spatial_mean.shape).cuda(), torch.ones(spatial_mean.shape).cuda()) mask_all = mask_all_cuda.reshape(num_batch, H, H).view(num_batch, 1, H, H)", "x = self.add_mean(x) return x def load_state_dict(self, state_dict, strict=True): own_state = self.state_dict() for", "] self.head = nn.Sequential(*m_head) self.body = nn.Sequential(*m_body) self.tail = nn.Sequential(*m_tail) def forward(self, x,", "= mask_all_cuda.reshape(num_batch, H, H).view(num_batch, 1, H, H) else: # -------------------------- channel ---------------------------- vector_thresh_percent", "self.eval() x_new = x.clone().detach() x_new = Variable(x_new.data, requires_grad=True).cuda() num_batch, num_channel, H, W =", "spatial_mean = spatial_mean.view(num_batch, HW) self.zero_grad() choose_one = random.randint(0,9) if choose_one <= 4: #", "' 'whose dimensions in the checkpoint are {}.' .format(name, own_state[name].size(), param.size())) elif strict:", "= [ nn.Conv2d(3, 64, kernel_size=9, padding=4), act ] # define body module m_body", "the parameter named {}, ' 'whose dimensions in the model are {} and", "1).expand(num_batch, 36864) mask_all_cuda = torch.where(spatial_mean > th18_mask_value, torch.zeros(spatial_mean.shape).cuda(), torch.ones(spatial_mean.shape).cuda()) mask_all = mask_all_cuda.reshape(num_batch, H,", "= H*W sr = self.tail[-1](x_new) criterion = nn.L1Loss() loss = criterion(sr, hr) self.zero_grad()", "3.0) th18_mask_value = torch.sort(spatial_mean, dim=1, descending=True)[0][:, spatial_drop_num] th18_mask_value = th18_mask_value.view(num_batch, 1).expand(num_batch, 36864) mask_all_cuda", "random import math def make_model(args, parent=False): return SRResNet(args) class SRResNet(nn.Module): def __init__(self, args,", "num_channel) vector = torch.where(channel_mean > vector_thresh_value, torch.zeros(channel_mean.shape).cuda(), torch.ones(channel_mean.shape).cuda()) mask_all = vector.view(num_batch, num_channel, 1,", "module m_tail = [ common.Upsampler(conv, scale, n_feats, act='prelu'), nn.Conv2d(n_feats, 3, kernel_size=9, padding=4) ]", "[ common.ResBlock( conv, n_feats, kernel_size, bn=True, act=act, res_scale=args.res_scale ) for _ in range(n_resblocks)", "/ 3.0) th18_mask_value = torch.sort(spatial_mean, dim=1, descending=True)[0][:, spatial_drop_num] th18_mask_value = th18_mask_value.view(num_batch, 1).expand(num_batch, 36864)", "padding=4) ] self.head = nn.Sequential(*m_head) self.body = nn.Sequential(*m_body) self.tail = nn.Sequential(*m_tail) def forward(self,", "= args.scale[0] act = nn.PReLU() self.sub_mean = common.MeanShift(args.rgb_range) self.add_mean = common.MeanShift(args.rgb_range, sign=1) #", "nn import torch from torch.autograd import Variable import numpy.random as npr import numpy", "vector_thresh_value = torch.sort(channel_mean, dim=1, descending=True)[0][:, vector_thresh_percent] vector_thresh_value = vector_thresh_value.view(num_batch, 1).expand(num_batch, num_channel) vector =", "x def load_state_dict(self, state_dict, strict=True): own_state = self.state_dict() for name, param in state_dict.items():", "common.MeanShift(args.rgb_range, sign=1) # define head module m_head = [ nn.Conv2d(3, 64, kernel_size=9, padding=4),", "torch.sort(spatial_mean, dim=1, descending=True)[0][:, spatial_drop_num] th18_mask_value = th18_mask_value.view(num_batch, 1).expand(num_batch, 36864) mask_all_cuda = torch.where(spatial_mean >", "x * mask_all x = self.tail[-1](x) x = self.add_mean(x) return x def load_state_dict(self,", "64 kernel_size = 3 scale = args.scale[0] act = nn.PReLU() self.sub_mean = common.MeanShift(args.rgb_range)", "-1: raise RuntimeError('While copying the parameter named {}, ' 'whose dimensions in the", "----------------------- spatial_drop_num = math.ceil(HW * 1 / 3.0) th18_mask_value = torch.sort(spatial_mean, dim=1, descending=True)[0][:,", "res += x x = self.tail[0](res) if flag: self.eval() x_new = x.clone().detach() x_new", "num_channel, 1, 1) mask_all[int(num_batch/3):,:,:,:] = 1 self.train() mask_all = Variable(mask_all, requires_grad=True) x =", "+ from model import common import torch.nn as nn import torch from torch.autograd", "= torch.where(channel_mean > vector_thresh_value, torch.zeros(channel_mean.shape).cuda(), torch.ones(channel_mean.shape).cuda()) mask_all = vector.view(num_batch, num_channel, 1, 1) mask_all[int(num_batch/3):,:,:,:]", ".format(name, own_state[name].size(), param.size())) elif strict: if name.find('tail') == -1: raise KeyError('unexpected key \"{}\"", "# -------------------------- channel ---------------------------- vector_thresh_percent = math.ceil(num_channel * 1 / 3.2) vector_thresh_value =", "self).__init__() n_resblocks = 5 n_feats = 64 kernel_size = 3 scale = args.scale[0]", "def load_state_dict(self, state_dict, strict=True): own_state = self.state_dict() for name, param in state_dict.items(): if", "m_body.append(nn.BatchNorm2d(n_feats)) # define tail module m_tail = [ common.Upsampler(conv, scale, n_feats, act='prelu'), nn.Conv2d(n_feats,", "spatial ----------------------- spatial_drop_num = math.ceil(HW * 1 / 3.0) th18_mask_value = torch.sort(spatial_mean, dim=1,", "torch.where(channel_mean > vector_thresh_value, torch.zeros(channel_mean.shape).cuda(), torch.ones(channel_mean.shape).cuda()) mask_all = vector.view(num_batch, num_channel, 1, 1) mask_all[int(num_batch/3):,:,:,:] =", "'whose dimensions in the checkpoint are {}.' .format(name, own_state[name].size(), param.size())) elif strict: if", "---------------------------- spatial ----------------------- spatial_drop_num = math.ceil(HW * 1 / 3.0) th18_mask_value = torch.sort(spatial_mean,", "1 self.train() mask_all = Variable(mask_all, requires_grad=True) x = x * mask_all x =", "param.data try: own_state[name].copy_(param) except Exception: if name.find('tail') == -1: raise RuntimeError('While copying the", "= self.body(x) res += x x = self.tail[0](res) if flag: self.eval() x_new =", "in range(n_resblocks) ] m_body.append(conv(n_feats, n_feats, kernel_size)) m_body.append(nn.BatchNorm2d(n_feats)) # define tail module m_tail =", "grad_channel_mean, 1) spatial_mean = spatial_mean.view(num_batch, HW) self.zero_grad() choose_one = random.randint(0,9) if choose_one <=", "> vector_thresh_value, torch.zeros(channel_mean.shape).cuda(), torch.ones(channel_mean.shape).cuda()) mask_all = vector.view(num_batch, num_channel, 1, 1) mask_all[int(num_batch/3):,:,:,:] = 1", "torch.zeros(spatial_mean.shape).cuda(), torch.ones(spatial_mean.shape).cuda()) mask_all = mask_all_cuda.reshape(num_batch, H, H).view(num_batch, 1, H, H) else: # --------------------------", "mask_all[int(num_batch/3):,:,:,:] = 1 self.train() mask_all = Variable(mask_all, requires_grad=True) x = x * mask_all", "define body module m_body = [ common.ResBlock( conv, n_feats, kernel_size, bn=True, act=act, res_scale=args.res_scale", "mask_all = Variable(mask_all, requires_grad=True) x = x * mask_all x = self.tail[-1](x) x", "name, param in state_dict.items(): if name in own_state: if isinstance(param, nn.Parameter): param =", "= criterion(sr, hr) self.zero_grad() loss.backward() grads_val = x_new.grad.clone().detach() grad_channel_mean = torch.mean(grads_val.view(num_batch, num_channel, -1),", "np import torch.nn.functional as F import random import math def make_model(args, parent=False): return", "= torch.where(spatial_mean > th18_mask_value, torch.zeros(spatial_mean.shape).cuda(), torch.ones(spatial_mean.shape).cuda()) mask_all = mask_all_cuda.reshape(num_batch, H, H).view(num_batch, 1, H,", "math def make_model(args, parent=False): return SRResNet(args) class SRResNet(nn.Module): def __init__(self, args, conv=common.default_conv): super(SRResNet,", "common.Upsampler(conv, scale, n_feats, act='prelu'), nn.Conv2d(n_feats, 3, kernel_size=9, padding=4) ] self.head = nn.Sequential(*m_head) self.body", "sr = self.tail[-1](x_new) criterion = nn.L1Loss() loss = criterion(sr, hr) self.zero_grad() loss.backward() grads_val", "H).view(num_batch, 1, H, H) else: # -------------------------- channel ---------------------------- vector_thresh_percent = math.ceil(num_channel *", "choose_one <= 4: # ---------------------------- spatial ----------------------- spatial_drop_num = math.ceil(HW * 1 /", "own_state = self.state_dict() for name, param in state_dict.items(): if name in own_state: if", "x = self.tail[-1](x) x = self.add_mean(x) return x def load_state_dict(self, state_dict, strict=True): own_state", "padding=4), act ] # define body module m_body = [ common.ResBlock( conv, n_feats,", "H, H).view(num_batch, 1, H, H) else: # -------------------------- channel ---------------------------- vector_thresh_percent = math.ceil(num_channel", "import numpy as np import torch.nn.functional as F import random import math def", "m_tail = [ common.Upsampler(conv, scale, n_feats, act='prelu'), nn.Conv2d(n_feats, 3, kernel_size=9, padding=4) ] self.head", "vector = torch.where(channel_mean > vector_thresh_value, torch.zeros(channel_mean.shape).cuda(), torch.ones(channel_mean.shape).cuda()) mask_all = vector.view(num_batch, num_channel, 1, 1)", "= self.state_dict() for name, param in state_dict.items(): if name in own_state: if isinstance(param,", "] m_body.append(conv(n_feats, n_feats, kernel_size)) m_body.append(nn.BatchNorm2d(n_feats)) # define tail module m_tail = [ common.Upsampler(conv,", "1 / 3.0) th18_mask_value = torch.sort(spatial_mean, dim=1, descending=True)[0][:, spatial_drop_num] th18_mask_value = th18_mask_value.view(num_batch, 1).expand(num_batch,", "x_new.shape HW = H*W sr = self.tail[-1](x_new) criterion = nn.L1Loss() loss = criterion(sr,", "the checkpoint are {}.' .format(name, own_state[name].size(), param.size())) elif strict: if name.find('tail') == -1:", "model are {} and ' 'whose dimensions in the checkpoint are {}.' .format(name,", "* grad_channel_mean, 1) spatial_mean = spatial_mean.view(num_batch, HW) self.zero_grad() choose_one = random.randint(0,9) if choose_one", "param in state_dict.items(): if name in own_state: if isinstance(param, nn.Parameter): param = param.data", "loss.backward() grads_val = x_new.grad.clone().detach() grad_channel_mean = torch.mean(grads_val.view(num_batch, num_channel, -1), dim=2) channel_mean = grad_channel_mean", "parameter named {}, ' 'whose dimensions in the model are {} and '", "kernel_size=9, padding=4), act ] # define body module m_body = [ common.ResBlock( conv,", "vector_thresh_value.view(num_batch, 1).expand(num_batch, num_channel) vector = torch.where(channel_mean > vector_thresh_value, torch.zeros(channel_mean.shape).cuda(), torch.ones(channel_mean.shape).cuda()) mask_all = vector.view(num_batch,", "SRResNet(nn.Module): def __init__(self, args, conv=common.default_conv): super(SRResNet, self).__init__() n_resblocks = 5 n_feats = 64", "H, W = x_new.shape HW = H*W sr = self.tail[-1](x_new) criterion = nn.L1Loss()", "in the checkpoint are {}.' .format(name, own_state[name].size(), param.size())) elif strict: if name.find('tail') ==", "channel ---------------------------- vector_thresh_percent = math.ceil(num_channel * 1 / 3.2) vector_thresh_value = torch.sort(channel_mean, dim=1,", "__init__(self, args, conv=common.default_conv): super(SRResNet, self).__init__() n_resblocks = 5 n_feats = 64 kernel_size =", "dimensions in the model are {} and ' 'whose dimensions in the checkpoint", "[ nn.Conv2d(3, 64, kernel_size=9, padding=4), act ] # define body module m_body =", "self.head = nn.Sequential(*m_head) self.body = nn.Sequential(*m_body) self.tail = nn.Sequential(*m_tail) def forward(self, x, flag=False,", "super(SRResNet, self).__init__() n_resblocks = 5 n_feats = 64 kernel_size = 3 scale =", "range(n_resblocks) ] m_body.append(conv(n_feats, n_feats, kernel_size)) m_body.append(nn.BatchNorm2d(n_feats)) # define tail module m_tail = [", "grad_channel_mean.view(num_batch, num_channel, 1, 1) spatial_mean = torch.sum(x_new * grad_channel_mean, 1) spatial_mean = spatial_mean.view(num_batch,", "self.head(x) res = self.body(x) res += x x = self.tail[0](res) if flag: self.eval()", "torch.ones(channel_mean.shape).cuda()) mask_all = vector.view(num_batch, num_channel, 1, 1) mask_all[int(num_batch/3):,:,:,:] = 1 self.train() mask_all =", "= [ common.Upsampler(conv, scale, n_feats, act='prelu'), nn.Conv2d(n_feats, 3, kernel_size=9, padding=4) ] self.head =", "RuntimeError('While copying the parameter named {}, ' 'whose dimensions in the model are", "torch.nn.functional as F import random import math def make_model(args, parent=False): return SRResNet(args) class", "requires_grad=True).cuda() num_batch, num_channel, H, W = x_new.shape HW = H*W sr = self.tail[-1](x_new)", "param = param.data try: own_state[name].copy_(param) except Exception: if name.find('tail') == -1: raise RuntimeError('While", "isinstance(param, nn.Parameter): param = param.data try: own_state[name].copy_(param) except Exception: if name.find('tail') == -1:", "3 scale = args.scale[0] act = nn.PReLU() self.sub_mean = common.MeanShift(args.rgb_range) self.add_mean = common.MeanShift(args.rgb_range,", "= th18_mask_value.view(num_batch, 1).expand(num_batch, 36864) mask_all_cuda = torch.where(spatial_mean > th18_mask_value, torch.zeros(spatial_mean.shape).cuda(), torch.ones(spatial_mean.shape).cuda()) mask_all =", "kernel_size=9, padding=4) ] self.head = nn.Sequential(*m_head) self.body = nn.Sequential(*m_body) self.tail = nn.Sequential(*m_tail) def", "numpy.random as npr import numpy as np import torch.nn.functional as F import random", "-1), dim=2) channel_mean = grad_channel_mean grad_channel_mean = grad_channel_mean.view(num_batch, num_channel, 1, 1) spatial_mean =", "def make_model(args, parent=False): return SRResNet(args) class SRResNet(nn.Module): def __init__(self, args, conv=common.default_conv): super(SRResNet, self).__init__()", "for name, param in state_dict.items(): if name in own_state: if isinstance(param, nn.Parameter): param", "= [ common.ResBlock( conv, n_feats, kernel_size, bn=True, act=act, res_scale=args.res_scale ) for _ in", "] # define body module m_body = [ common.ResBlock( conv, n_feats, kernel_size, bn=True,", "self.tail[-1](x_new) criterion = nn.L1Loss() loss = criterion(sr, hr) self.zero_grad() loss.backward() grads_val = x_new.grad.clone().detach()", "num_channel, H, W = x_new.shape HW = H*W sr = self.tail[-1](x_new) criterion =", "grad_channel_mean = grad_channel_mean.view(num_batch, num_channel, 1, 1) spatial_mean = torch.sum(x_new * grad_channel_mean, 1) spatial_mean", "= nn.Sequential(*m_body) self.tail = nn.Sequential(*m_tail) def forward(self, x, flag=False, hr=None): x = self.sub_mean(x)", "torch.ones(spatial_mean.shape).cuda()) mask_all = mask_all_cuda.reshape(num_batch, H, H).view(num_batch, 1, H, H) else: # -------------------------- channel", "dim=1, descending=True)[0][:, spatial_drop_num] th18_mask_value = th18_mask_value.view(num_batch, 1).expand(num_batch, 36864) mask_all_cuda = torch.where(spatial_mean > th18_mask_value,", "x_new = x.clone().detach() x_new = Variable(x_new.data, requires_grad=True).cuda() num_batch, num_channel, H, W = x_new.shape", "named {}, ' 'whose dimensions in the model are {} and ' 'whose", "= self.add_mean(x) return x def load_state_dict(self, state_dict, strict=True): own_state = self.state_dict() for name,", "and ' 'whose dimensions in the checkpoint are {}.' .format(name, own_state[name].size(), param.size())) elif", "bn=True, act=act, res_scale=args.res_scale ) for _ in range(n_resblocks) ] m_body.append(conv(n_feats, n_feats, kernel_size)) m_body.append(nn.BatchNorm2d(n_feats))", "dimensions in the checkpoint are {}.' .format(name, own_state[name].size(), param.size())) elif strict: if name.find('tail')", "1) mask_all[int(num_batch/3):,:,:,:] = 1 self.train() mask_all = Variable(mask_all, requires_grad=True) x = x *", "= x.clone().detach() x_new = Variable(x_new.data, requires_grad=True).cuda() num_batch, num_channel, H, W = x_new.shape HW", "dim=1, descending=True)[0][:, vector_thresh_percent] vector_thresh_value = vector_thresh_value.view(num_batch, 1).expand(num_batch, num_channel) vector = torch.where(channel_mean > vector_thresh_value,", "x x = self.tail[0](res) if flag: self.eval() x_new = x.clone().detach() x_new = Variable(x_new.data,", "param.size())) elif strict: if name.find('tail') == -1: raise KeyError('unexpected key \"{}\" in state_dict'", ") for _ in range(n_resblocks) ] m_body.append(conv(n_feats, n_feats, kernel_size)) m_body.append(nn.BatchNorm2d(n_feats)) # define tail", "model import common import torch.nn as nn import torch from torch.autograd import Variable", "th18_mask_value, torch.zeros(spatial_mean.shape).cuda(), torch.ones(spatial_mean.shape).cuda()) mask_all = mask_all_cuda.reshape(num_batch, H, H).view(num_batch, 1, H, H) else: #", "name in own_state: if isinstance(param, nn.Parameter): param = param.data try: own_state[name].copy_(param) except Exception:", "= vector.view(num_batch, num_channel, 1, 1) mask_all[int(num_batch/3):,:,:,:] = 1 self.train() mask_all = Variable(mask_all, requires_grad=True)", "as F import random import math def make_model(args, parent=False): return SRResNet(args) class SRResNet(nn.Module):", "n_feats = 64 kernel_size = 3 scale = args.scale[0] act = nn.PReLU() self.sub_mean", "n_resblocks = 5 n_feats = 64 kernel_size = 3 scale = args.scale[0] act", "own_state[name].size(), param.size())) elif strict: if name.find('tail') == -1: raise KeyError('unexpected key \"{}\" in", "= torch.sort(channel_mean, dim=1, descending=True)[0][:, vector_thresh_percent] vector_thresh_value = vector_thresh_value.view(num_batch, 1).expand(num_batch, num_channel) vector = torch.where(channel_mean", "num_batch, num_channel, H, W = x_new.shape HW = H*W sr = self.tail[-1](x_new) criterion", "5 n_feats = 64 kernel_size = 3 scale = args.scale[0] act = nn.PReLU()", "loss = criterion(sr, hr) self.zero_grad() loss.backward() grads_val = x_new.grad.clone().detach() grad_channel_mean = torch.mean(grads_val.view(num_batch, num_channel,", "tail module m_tail = [ common.Upsampler(conv, scale, n_feats, act='prelu'), nn.Conv2d(n_feats, 3, kernel_size=9, padding=4)", "are {} and ' 'whose dimensions in the checkpoint are {}.' .format(name, own_state[name].size(),", "self.zero_grad() choose_one = random.randint(0,9) if choose_one <= 4: # ---------------------------- spatial ----------------------- spatial_drop_num", "criterion(sr, hr) self.zero_grad() loss.backward() grads_val = x_new.grad.clone().detach() grad_channel_mean = torch.mean(grads_val.view(num_batch, num_channel, -1), dim=2)", "= common.MeanShift(args.rgb_range) self.add_mean = common.MeanShift(args.rgb_range, sign=1) # define head module m_head = [", "act = nn.PReLU() self.sub_mean = common.MeanShift(args.rgb_range) self.add_mean = common.MeanShift(args.rgb_range, sign=1) # define head", "nn.Parameter): param = param.data try: own_state[name].copy_(param) except Exception: if name.find('tail') == -1: raise", "import torch from torch.autograd import Variable import numpy.random as npr import numpy as", "return SRResNet(args) class SRResNet(nn.Module): def __init__(self, args, conv=common.default_conv): super(SRResNet, self).__init__() n_resblocks = 5", "'whose dimensions in the model are {} and ' 'whose dimensions in the", "act=act, res_scale=args.res_scale ) for _ in range(n_resblocks) ] m_body.append(conv(n_feats, n_feats, kernel_size)) m_body.append(nn.BatchNorm2d(n_feats)) #", "self.train() mask_all = Variable(mask_all, requires_grad=True) x = x * mask_all x = self.tail[-1](x)", "= random.randint(0,9) if choose_one <= 4: # ---------------------------- spatial ----------------------- spatial_drop_num = math.ceil(HW", "head module m_head = [ nn.Conv2d(3, 64, kernel_size=9, padding=4), act ] # define", "x = self.head(x) res = self.body(x) res += x x = self.tail[0](res) if", "x_new = Variable(x_new.data, requires_grad=True).cuda() num_batch, num_channel, H, W = x_new.shape HW = H*W", "def __init__(self, args, conv=common.default_conv): super(SRResNet, self).__init__() n_resblocks = 5 n_feats = 64 kernel_size", "strict=True): own_state = self.state_dict() for name, param in state_dict.items(): if name in own_state:", "hr=None): x = self.sub_mean(x) x = self.head(x) res = self.body(x) res += x", "* 1 / 3.0) th18_mask_value = torch.sort(spatial_mean, dim=1, descending=True)[0][:, spatial_drop_num] th18_mask_value = th18_mask_value.view(num_batch,", "own_state[name].copy_(param) except Exception: if name.find('tail') == -1: raise RuntimeError('While copying the parameter named", "= self.tail[0](res) if flag: self.eval() x_new = x.clone().detach() x_new = Variable(x_new.data, requires_grad=True).cuda() num_batch,", "x_new.grad.clone().detach() grad_channel_mean = torch.mean(grads_val.view(num_batch, num_channel, -1), dim=2) channel_mean = grad_channel_mean grad_channel_mean = grad_channel_mean.view(num_batch,", "mask_all_cuda = torch.where(spatial_mean > th18_mask_value, torch.zeros(spatial_mean.shape).cuda(), torch.ones(spatial_mean.shape).cuda()) mask_all = mask_all_cuda.reshape(num_batch, H, H).view(num_batch, 1,", "from torch.autograd import Variable import numpy.random as npr import numpy as np import", "= math.ceil(HW * 1 / 3.0) th18_mask_value = torch.sort(spatial_mean, dim=1, descending=True)[0][:, spatial_drop_num] th18_mask_value", "conv, n_feats, kernel_size, bn=True, act=act, res_scale=args.res_scale ) for _ in range(n_resblocks) ] m_body.append(conv(n_feats,", "mask_all = vector.view(num_batch, num_channel, 1, 1) mask_all[int(num_batch/3):,:,:,:] = 1 self.train() mask_all = Variable(mask_all,", "kernel_size = 3 scale = args.scale[0] act = nn.PReLU() self.sub_mean = common.MeanShift(args.rgb_range) self.add_mean", "try: own_state[name].copy_(param) except Exception: if name.find('tail') == -1: raise RuntimeError('While copying the parameter", "th18_mask_value.view(num_batch, 1).expand(num_batch, 36864) mask_all_cuda = torch.where(spatial_mean > th18_mask_value, torch.zeros(spatial_mean.shape).cuda(), torch.ones(spatial_mean.shape).cuda()) mask_all = mask_all_cuda.reshape(num_batch,", "self.add_mean(x) return x def load_state_dict(self, state_dict, strict=True): own_state = self.state_dict() for name, param", "x.clone().detach() x_new = Variable(x_new.data, requires_grad=True).cuda() num_batch, num_channel, H, W = x_new.shape HW =", "act ] # define body module m_body = [ common.ResBlock( conv, n_feats, kernel_size,", "args, conv=common.default_conv): super(SRResNet, self).__init__() n_resblocks = 5 n_feats = 64 kernel_size = 3", "if flag: self.eval() x_new = x.clone().detach() x_new = Variable(x_new.data, requires_grad=True).cuda() num_batch, num_channel, H,", "if name.find('tail') == -1: raise RuntimeError('While copying the parameter named {}, ' 'whose", "make_model(args, parent=False): return SRResNet(args) class SRResNet(nn.Module): def __init__(self, args, conv=common.default_conv): super(SRResNet, self).__init__() n_resblocks", "F import random import math def make_model(args, parent=False): return SRResNet(args) class SRResNet(nn.Module): def", "choose_one = random.randint(0,9) if choose_one <= 4: # ---------------------------- spatial ----------------------- spatial_drop_num =", "except Exception: if name.find('tail') == -1: raise RuntimeError('While copying the parameter named {},", "torch from torch.autograd import Variable import numpy.random as npr import numpy as np", "mask_all x = self.tail[-1](x) x = self.add_mean(x) return x def load_state_dict(self, state_dict, strict=True):", "as npr import numpy as np import torch.nn.functional as F import random import", "math.ceil(HW * 1 / 3.0) th18_mask_value = torch.sort(spatial_mean, dim=1, descending=True)[0][:, spatial_drop_num] th18_mask_value =", "self.tail = nn.Sequential(*m_tail) def forward(self, x, flag=False, hr=None): x = self.sub_mean(x) x =", "grad_channel_mean grad_channel_mean = grad_channel_mean.view(num_batch, num_channel, 1, 1) spatial_mean = torch.sum(x_new * grad_channel_mean, 1)", "1, H, H) else: # -------------------------- channel ---------------------------- vector_thresh_percent = math.ceil(num_channel * 1", "own_state: if isinstance(param, nn.Parameter): param = param.data try: own_state[name].copy_(param) except Exception: if name.find('tail')", "torch.sum(x_new * grad_channel_mean, 1) spatial_mean = spatial_mean.view(num_batch, HW) self.zero_grad() choose_one = random.randint(0,9) if", "3, kernel_size=9, padding=4) ] self.head = nn.Sequential(*m_head) self.body = nn.Sequential(*m_body) self.tail = nn.Sequential(*m_tail)", "# ---------------------------- spatial ----------------------- spatial_drop_num = math.ceil(HW * 1 / 3.0) th18_mask_value =", "= Variable(mask_all, requires_grad=True) x = x * mask_all x = self.tail[-1](x) x =", "torch.autograd import Variable import numpy.random as npr import numpy as np import torch.nn.functional", "num_channel, -1), dim=2) channel_mean = grad_channel_mean grad_channel_mean = grad_channel_mean.view(num_batch, num_channel, 1, 1) spatial_mean", "= nn.Sequential(*m_tail) def forward(self, x, flag=False, hr=None): x = self.sub_mean(x) x = self.head(x)", "nn.Conv2d(n_feats, 3, kernel_size=9, padding=4) ] self.head = nn.Sequential(*m_head) self.body = nn.Sequential(*m_body) self.tail =", "_ in range(n_resblocks) ] m_body.append(conv(n_feats, n_feats, kernel_size)) m_body.append(nn.BatchNorm2d(n_feats)) # define tail module m_tail", "# define tail module m_tail = [ common.Upsampler(conv, scale, n_feats, act='prelu'), nn.Conv2d(n_feats, 3,", "H) else: # -------------------------- channel ---------------------------- vector_thresh_percent = math.ceil(num_channel * 1 / 3.2)", "criterion = nn.L1Loss() loss = criterion(sr, hr) self.zero_grad() loss.backward() grads_val = x_new.grad.clone().detach() grad_channel_mean", "name.find('tail') == -1: raise RuntimeError('While copying the parameter named {}, ' 'whose dimensions", "else: # -------------------------- channel ---------------------------- vector_thresh_percent = math.ceil(num_channel * 1 / 3.2) vector_thresh_value", "class SRResNet(nn.Module): def __init__(self, args, conv=common.default_conv): super(SRResNet, self).__init__() n_resblocks = 5 n_feats =", "as nn import torch from torch.autograd import Variable import numpy.random as npr import", "module m_body = [ common.ResBlock( conv, n_feats, kernel_size, bn=True, act=act, res_scale=args.res_scale ) for", "act='prelu'), nn.Conv2d(n_feats, 3, kernel_size=9, padding=4) ] self.head = nn.Sequential(*m_head) self.body = nn.Sequential(*m_body) self.tail", "= self.head(x) res = self.body(x) res += x x = self.tail[0](res) if flag:", "import common import torch.nn as nn import torch from torch.autograd import Variable import", "the model are {} and ' 'whose dimensions in the checkpoint are {}.'", "---------------------------- vector_thresh_percent = math.ceil(num_channel * 1 / 3.2) vector_thresh_value = torch.sort(channel_mean, dim=1, descending=True)[0][:,", "= nn.L1Loss() loss = criterion(sr, hr) self.zero_grad() loss.backward() grads_val = x_new.grad.clone().detach() grad_channel_mean =", "hr) self.zero_grad() loss.backward() grads_val = x_new.grad.clone().detach() grad_channel_mean = torch.mean(grads_val.view(num_batch, num_channel, -1), dim=2) channel_mean", "# define head module m_head = [ nn.Conv2d(3, 64, kernel_size=9, padding=4), act ]", "/ 3.2) vector_thresh_value = torch.sort(channel_mean, dim=1, descending=True)[0][:, vector_thresh_percent] vector_thresh_value = vector_thresh_value.view(num_batch, 1).expand(num_batch, num_channel)", "mask_all_cuda.reshape(num_batch, H, H).view(num_batch, 1, H, H) else: # -------------------------- channel ---------------------------- vector_thresh_percent =", "x = self.tail[0](res) if flag: self.eval() x_new = x.clone().detach() x_new = Variable(x_new.data, requires_grad=True).cuda()", "4: # ---------------------------- spatial ----------------------- spatial_drop_num = math.ceil(HW * 1 / 3.0) th18_mask_value", "state_dict.items(): if name in own_state: if isinstance(param, nn.Parameter): param = param.data try: own_state[name].copy_(param)", "descending=True)[0][:, spatial_drop_num] th18_mask_value = th18_mask_value.view(num_batch, 1).expand(num_batch, 36864) mask_all_cuda = torch.where(spatial_mean > th18_mask_value, torch.zeros(spatial_mean.shape).cuda(),", "elif strict: if name.find('tail') == -1: raise KeyError('unexpected key \"{}\" in state_dict' .format(name))", "W = x_new.shape HW = H*W sr = self.tail[-1](x_new) criterion = nn.L1Loss() loss", "== -1: raise RuntimeError('While copying the parameter named {}, ' 'whose dimensions in", "1) spatial_mean = spatial_mean.view(num_batch, HW) self.zero_grad() choose_one = random.randint(0,9) if choose_one <= 4:", "n_feats, kernel_size, bn=True, act=act, res_scale=args.res_scale ) for _ in range(n_resblocks) ] m_body.append(conv(n_feats, n_feats,", "torch.sort(channel_mean, dim=1, descending=True)[0][:, vector_thresh_percent] vector_thresh_value = vector_thresh_value.view(num_batch, 1).expand(num_batch, num_channel) vector = torch.where(channel_mean >", "kernel_size, bn=True, act=act, res_scale=args.res_scale ) for _ in range(n_resblocks) ] m_body.append(conv(n_feats, n_feats, kernel_size))", "res_scale=args.res_scale ) for _ in range(n_resblocks) ] m_body.append(conv(n_feats, n_feats, kernel_size)) m_body.append(nn.BatchNorm2d(n_feats)) # define", "= self.sub_mean(x) x = self.head(x) res = self.body(x) res += x x =", "= spatial_mean.view(num_batch, HW) self.zero_grad() choose_one = random.randint(0,9) if choose_one <= 4: # ----------------------------", "= self.tail[-1](x) x = self.add_mean(x) return x def load_state_dict(self, state_dict, strict=True): own_state =", "in state_dict.items(): if name in own_state: if isinstance(param, nn.Parameter): param = param.data try:", "1).expand(num_batch, num_channel) vector = torch.where(channel_mean > vector_thresh_value, torch.zeros(channel_mean.shape).cuda(), torch.ones(channel_mean.shape).cuda()) mask_all = vector.view(num_batch, num_channel,", "= common.MeanShift(args.rgb_range, sign=1) # define head module m_head = [ nn.Conv2d(3, 64, kernel_size=9,", "kernel_size)) m_body.append(nn.BatchNorm2d(n_feats)) # define tail module m_tail = [ common.Upsampler(conv, scale, n_feats, act='prelu'),", "are {}.' .format(name, own_state[name].size(), param.size())) elif strict: if name.find('tail') == -1: raise KeyError('unexpected", "nn.Sequential(*m_body) self.tail = nn.Sequential(*m_tail) def forward(self, x, flag=False, hr=None): x = self.sub_mean(x) x", "scale = args.scale[0] act = nn.PReLU() self.sub_mean = common.MeanShift(args.rgb_range) self.add_mean = common.MeanShift(args.rgb_range, sign=1)", "= 1 self.train() mask_all = Variable(mask_all, requires_grad=True) x = x * mask_all x", "1, 1) spatial_mean = torch.sum(x_new * grad_channel_mean, 1) spatial_mean = spatial_mean.view(num_batch, HW) self.zero_grad()", "self.tail[-1](x) x = self.add_mean(x) return x def load_state_dict(self, state_dict, strict=True): own_state = self.state_dict()", "from model import common import torch.nn as nn import torch from torch.autograd import", "vector_thresh_value = vector_thresh_value.view(num_batch, 1).expand(num_batch, num_channel) vector = torch.where(channel_mean > vector_thresh_value, torch.zeros(channel_mean.shape).cuda(), torch.ones(channel_mean.shape).cuda()) mask_all", "import torch.nn.functional as F import random import math def make_model(args, parent=False): return SRResNet(args)", "{} and ' 'whose dimensions in the checkpoint are {}.' .format(name, own_state[name].size(), param.size()))", "flag=False, hr=None): x = self.sub_mean(x) x = self.head(x) res = self.body(x) res +=", "torch.nn as nn import torch from torch.autograd import Variable import numpy.random as npr", "= nn.PReLU() self.sub_mean = common.MeanShift(args.rgb_range) self.add_mean = common.MeanShift(args.rgb_range, sign=1) # define head module", "common import torch.nn as nn import torch from torch.autograd import Variable import numpy.random", "= Variable(x_new.data, requires_grad=True).cuda() num_batch, num_channel, H, W = x_new.shape HW = H*W sr", "= vector_thresh_value.view(num_batch, 1).expand(num_batch, num_channel) vector = torch.where(channel_mean > vector_thresh_value, torch.zeros(channel_mean.shape).cuda(), torch.ones(channel_mean.shape).cuda()) mask_all =", "nn.Conv2d(3, 64, kernel_size=9, padding=4), act ] # define body module m_body = [", "npr import numpy as np import torch.nn.functional as F import random import math", "-------------------------- channel ---------------------------- vector_thresh_percent = math.ceil(num_channel * 1 / 3.2) vector_thresh_value = torch.sort(channel_mean,", "36864) mask_all_cuda = torch.where(spatial_mean > th18_mask_value, torch.zeros(spatial_mean.shape).cuda(), torch.ones(spatial_mean.shape).cuda()) mask_all = mask_all_cuda.reshape(num_batch, H, H).view(num_batch,", "{}, ' 'whose dimensions in the model are {} and ' 'whose dimensions", "conv=common.default_conv): super(SRResNet, self).__init__() n_resblocks = 5 n_feats = 64 kernel_size = 3 scale", "m_body = [ common.ResBlock( conv, n_feats, kernel_size, bn=True, act=act, res_scale=args.res_scale ) for _", "scale, n_feats, act='prelu'), nn.Conv2d(n_feats, 3, kernel_size=9, padding=4) ] self.head = nn.Sequential(*m_head) self.body =", "self.tail[0](res) if flag: self.eval() x_new = x.clone().detach() x_new = Variable(x_new.data, requires_grad=True).cuda() num_batch, num_channel,", "Variable import numpy.random as npr import numpy as np import torch.nn.functional as F", "= grad_channel_mean.view(num_batch, num_channel, 1, 1) spatial_mean = torch.sum(x_new * grad_channel_mean, 1) spatial_mean =", "body module m_body = [ common.ResBlock( conv, n_feats, kernel_size, bn=True, act=act, res_scale=args.res_scale )", "in the model are {} and ' 'whose dimensions in the checkpoint are", "SRResNet(args) class SRResNet(nn.Module): def __init__(self, args, conv=common.default_conv): super(SRResNet, self).__init__() n_resblocks = 5 n_feats", "= math.ceil(num_channel * 1 / 3.2) vector_thresh_value = torch.sort(channel_mean, dim=1, descending=True)[0][:, vector_thresh_percent] vector_thresh_value", "' 'whose dimensions in the model are {} and ' 'whose dimensions in", "# + from model import common import torch.nn as nn import torch from", "return x def load_state_dict(self, state_dict, strict=True): own_state = self.state_dict() for name, param in", "Exception: if name.find('tail') == -1: raise RuntimeError('While copying the parameter named {}, '", "{}.' .format(name, own_state[name].size(), param.size())) elif strict: if name.find('tail') == -1: raise KeyError('unexpected key", "args.scale[0] act = nn.PReLU() self.sub_mean = common.MeanShift(args.rgb_range) self.add_mean = common.MeanShift(args.rgb_range, sign=1) # define", "HW = H*W sr = self.tail[-1](x_new) criterion = nn.L1Loss() loss = criterion(sr, hr)", "as np import torch.nn.functional as F import random import math def make_model(args, parent=False):", "import random import math def make_model(args, parent=False): return SRResNet(args) class SRResNet(nn.Module): def __init__(self,", "define tail module m_tail = [ common.Upsampler(conv, scale, n_feats, act='prelu'), nn.Conv2d(n_feats, 3, kernel_size=9,", "HW) self.zero_grad() choose_one = random.randint(0,9) if choose_one <= 4: # ---------------------------- spatial -----------------------", "vector.view(num_batch, num_channel, 1, 1) mask_all[int(num_batch/3):,:,:,:] = 1 self.train() mask_all = Variable(mask_all, requires_grad=True) x", "nn.Sequential(*m_head) self.body = nn.Sequential(*m_body) self.tail = nn.Sequential(*m_tail) def forward(self, x, flag=False, hr=None): x", "= torch.sum(x_new * grad_channel_mean, 1) spatial_mean = spatial_mean.view(num_batch, HW) self.zero_grad() choose_one = random.randint(0,9)", "module m_head = [ nn.Conv2d(3, 64, kernel_size=9, padding=4), act ] # define body", "torch.zeros(channel_mean.shape).cuda(), torch.ones(channel_mean.shape).cuda()) mask_all = vector.view(num_batch, num_channel, 1, 1) mask_all[int(num_batch/3):,:,:,:] = 1 self.train() mask_all", "m_body.append(conv(n_feats, n_feats, kernel_size)) m_body.append(nn.BatchNorm2d(n_feats)) # define tail module m_tail = [ common.Upsampler(conv, scale,", "spatial_mean.view(num_batch, HW) self.zero_grad() choose_one = random.randint(0,9) if choose_one <= 4: # ---------------------------- spatial", "mask_all = mask_all_cuda.reshape(num_batch, H, H).view(num_batch, 1, H, H) else: # -------------------------- channel ----------------------------", "flag: self.eval() x_new = x.clone().detach() x_new = Variable(x_new.data, requires_grad=True).cuda() num_batch, num_channel, H, W", "= 3 scale = args.scale[0] act = nn.PReLU() self.sub_mean = common.MeanShift(args.rgb_range) self.add_mean =", "in own_state: if isinstance(param, nn.Parameter): param = param.data try: own_state[name].copy_(param) except Exception: if", "spatial_mean = torch.sum(x_new * grad_channel_mean, 1) spatial_mean = spatial_mean.view(num_batch, HW) self.zero_grad() choose_one =", "def forward(self, x, flag=False, hr=None): x = self.sub_mean(x) x = self.head(x) res =", "x, flag=False, hr=None): x = self.sub_mean(x) x = self.head(x) res = self.body(x) res", "vector_thresh_percent = math.ceil(num_channel * 1 / 3.2) vector_thresh_value = torch.sort(channel_mean, dim=1, descending=True)[0][:, vector_thresh_percent]", "common.MeanShift(args.rgb_range) self.add_mean = common.MeanShift(args.rgb_range, sign=1) # define head module m_head = [ nn.Conv2d(3,", "> th18_mask_value, torch.zeros(spatial_mean.shape).cuda(), torch.ones(spatial_mean.shape).cuda()) mask_all = mask_all_cuda.reshape(num_batch, H, H).view(num_batch, 1, H, H) else:", "res = self.body(x) res += x x = self.tail[0](res) if flag: self.eval() x_new", "nn.L1Loss() loss = criterion(sr, hr) self.zero_grad() loss.backward() grads_val = x_new.grad.clone().detach() grad_channel_mean = torch.mean(grads_val.view(num_batch,", "= x_new.shape HW = H*W sr = self.tail[-1](x_new) criterion = nn.L1Loss() loss =", "spatial_drop_num = math.ceil(HW * 1 / 3.0) th18_mask_value = torch.sort(spatial_mean, dim=1, descending=True)[0][:, spatial_drop_num]", "m_head = [ nn.Conv2d(3, 64, kernel_size=9, padding=4), act ] # define body module", "random.randint(0,9) if choose_one <= 4: # ---------------------------- spatial ----------------------- spatial_drop_num = math.ceil(HW *", "= x_new.grad.clone().detach() grad_channel_mean = torch.mean(grads_val.view(num_batch, num_channel, -1), dim=2) channel_mean = grad_channel_mean grad_channel_mean =", "grad_channel_mean = torch.mean(grads_val.view(num_batch, num_channel, -1), dim=2) channel_mean = grad_channel_mean grad_channel_mean = grad_channel_mean.view(num_batch, num_channel,", "[ common.Upsampler(conv, scale, n_feats, act='prelu'), nn.Conv2d(n_feats, 3, kernel_size=9, padding=4) ] self.head = nn.Sequential(*m_head)", "= x * mask_all x = self.tail[-1](x) x = self.add_mean(x) return x def", "import numpy.random as npr import numpy as np import torch.nn.functional as F import", "self.body = nn.Sequential(*m_body) self.tail = nn.Sequential(*m_tail) def forward(self, x, flag=False, hr=None): x =", "raise RuntimeError('While copying the parameter named {}, ' 'whose dimensions in the model", "self.zero_grad() loss.backward() grads_val = x_new.grad.clone().detach() grad_channel_mean = torch.mean(grads_val.view(num_batch, num_channel, -1), dim=2) channel_mean =", "checkpoint are {}.' .format(name, own_state[name].size(), param.size())) elif strict: if name.find('tail') == -1: raise", "load_state_dict(self, state_dict, strict=True): own_state = self.state_dict() for name, param in state_dict.items(): if name", "if isinstance(param, nn.Parameter): param = param.data try: own_state[name].copy_(param) except Exception: if name.find('tail') ==", "vector_thresh_percent] vector_thresh_value = vector_thresh_value.view(num_batch, 1).expand(num_batch, num_channel) vector = torch.where(channel_mean > vector_thresh_value, torch.zeros(channel_mean.shape).cuda(), torch.ones(channel_mean.shape).cuda())", "self.body(x) res += x x = self.tail[0](res) if flag: self.eval() x_new = x.clone().detach()", "Variable(x_new.data, requires_grad=True).cuda() num_batch, num_channel, H, W = x_new.shape HW = H*W sr =", "n_feats, act='prelu'), nn.Conv2d(n_feats, 3, kernel_size=9, padding=4) ] self.head = nn.Sequential(*m_head) self.body = nn.Sequential(*m_body)", "define head module m_head = [ nn.Conv2d(3, 64, kernel_size=9, padding=4), act ] #", "spatial_drop_num] th18_mask_value = th18_mask_value.view(num_batch, 1).expand(num_batch, 36864) mask_all_cuda = torch.where(spatial_mean > th18_mask_value, torch.zeros(spatial_mean.shape).cuda(), torch.ones(spatial_mean.shape).cuda())", "import math def make_model(args, parent=False): return SRResNet(args) class SRResNet(nn.Module): def __init__(self, args, conv=common.default_conv):", "if choose_one <= 4: # ---------------------------- spatial ----------------------- spatial_drop_num = math.ceil(HW * 1", "# define body module m_body = [ common.ResBlock( conv, n_feats, kernel_size, bn=True, act=act,", "= grad_channel_mean grad_channel_mean = grad_channel_mean.view(num_batch, num_channel, 1, 1) spatial_mean = torch.sum(x_new * grad_channel_mean,", "grads_val = x_new.grad.clone().detach() grad_channel_mean = torch.mean(grads_val.view(num_batch, num_channel, -1), dim=2) channel_mean = grad_channel_mean grad_channel_mean", "1, 1) mask_all[int(num_batch/3):,:,:,:] = 1 self.train() mask_all = Variable(mask_all, requires_grad=True) x = x", "for _ in range(n_resblocks) ] m_body.append(conv(n_feats, n_feats, kernel_size)) m_body.append(nn.BatchNorm2d(n_feats)) # define tail module", "= self.tail[-1](x_new) criterion = nn.L1Loss() loss = criterion(sr, hr) self.zero_grad() loss.backward() grads_val =", "= torch.sort(spatial_mean, dim=1, descending=True)[0][:, spatial_drop_num] th18_mask_value = th18_mask_value.view(num_batch, 1).expand(num_batch, 36864) mask_all_cuda = torch.where(spatial_mean", "= nn.Sequential(*m_head) self.body = nn.Sequential(*m_body) self.tail = nn.Sequential(*m_tail) def forward(self, x, flag=False, hr=None):", "= 64 kernel_size = 3 scale = args.scale[0] act = nn.PReLU() self.sub_mean =", "+= x x = self.tail[0](res) if flag: self.eval() x_new = x.clone().detach() x_new =", "channel_mean = grad_channel_mean grad_channel_mean = grad_channel_mean.view(num_batch, num_channel, 1, 1) spatial_mean = torch.sum(x_new *", "parent=False): return SRResNet(args) class SRResNet(nn.Module): def __init__(self, args, conv=common.default_conv): super(SRResNet, self).__init__() n_resblocks =", "import Variable import numpy.random as npr import numpy as np import torch.nn.functional as", "sign=1) # define head module m_head = [ nn.Conv2d(3, 64, kernel_size=9, padding=4), act", "H*W sr = self.tail[-1](x_new) criterion = nn.L1Loss() loss = criterion(sr, hr) self.zero_grad() loss.backward()", "self.add_mean = common.MeanShift(args.rgb_range, sign=1) # define head module m_head = [ nn.Conv2d(3, 64,", "Variable(mask_all, requires_grad=True) x = x * mask_all x = self.tail[-1](x) x = self.add_mean(x)", "1 / 3.2) vector_thresh_value = torch.sort(channel_mean, dim=1, descending=True)[0][:, vector_thresh_percent] vector_thresh_value = vector_thresh_value.view(num_batch, 1).expand(num_batch,", "import torch.nn as nn import torch from torch.autograd import Variable import numpy.random as", "self.state_dict() for name, param in state_dict.items(): if name in own_state: if isinstance(param, nn.Parameter):", "num_channel, 1, 1) spatial_mean = torch.sum(x_new * grad_channel_mean, 1) spatial_mean = spatial_mean.view(num_batch, HW)", "state_dict, strict=True): own_state = self.state_dict() for name, param in state_dict.items(): if name in", "dim=2) channel_mean = grad_channel_mean grad_channel_mean = grad_channel_mean.view(num_batch, num_channel, 1, 1) spatial_mean = torch.sum(x_new", "= param.data try: own_state[name].copy_(param) except Exception: if name.find('tail') == -1: raise RuntimeError('While copying", "x = x * mask_all x = self.tail[-1](x) x = self.add_mean(x) return x", "n_feats, kernel_size)) m_body.append(nn.BatchNorm2d(n_feats)) # define tail module m_tail = [ common.Upsampler(conv, scale, n_feats,", "* mask_all x = self.tail[-1](x) x = self.add_mean(x) return x def load_state_dict(self, state_dict,", "th18_mask_value = th18_mask_value.view(num_batch, 1).expand(num_batch, 36864) mask_all_cuda = torch.where(spatial_mean > th18_mask_value, torch.zeros(spatial_mean.shape).cuda(), torch.ones(spatial_mean.shape).cuda()) mask_all", "3.2) vector_thresh_value = torch.sort(channel_mean, dim=1, descending=True)[0][:, vector_thresh_percent] vector_thresh_value = vector_thresh_value.view(num_batch, 1).expand(num_batch, num_channel) vector", "requires_grad=True) x = x * mask_all x = self.tail[-1](x) x = self.add_mean(x) return", "nn.PReLU() self.sub_mean = common.MeanShift(args.rgb_range) self.add_mean = common.MeanShift(args.rgb_range, sign=1) # define head module m_head", "self.sub_mean = common.MeanShift(args.rgb_range) self.add_mean = common.MeanShift(args.rgb_range, sign=1) # define head module m_head =", "torch.mean(grads_val.view(num_batch, num_channel, -1), dim=2) channel_mean = grad_channel_mean grad_channel_mean = grad_channel_mean.view(num_batch, num_channel, 1, 1)", "math.ceil(num_channel * 1 / 3.2) vector_thresh_value = torch.sort(channel_mean, dim=1, descending=True)[0][:, vector_thresh_percent] vector_thresh_value =", "forward(self, x, flag=False, hr=None): x = self.sub_mean(x) x = self.head(x) res = self.body(x)", "1) spatial_mean = torch.sum(x_new * grad_channel_mean, 1) spatial_mean = spatial_mean.view(num_batch, HW) self.zero_grad() choose_one", "descending=True)[0][:, vector_thresh_percent] vector_thresh_value = vector_thresh_value.view(num_batch, 1).expand(num_batch, num_channel) vector = torch.where(channel_mean > vector_thresh_value, torch.zeros(channel_mean.shape).cuda(),", "copying the parameter named {}, ' 'whose dimensions in the model are {}", "= 5 n_feats = 64 kernel_size = 3 scale = args.scale[0] act =" ]
[ "na een verkoop €{abs(huis_winst)/1000:.0f}k {winst_of_verlies}. \" f\"Hadden we een huis gehuurd voor €{huur}", "Finance\", color=\"k\", ) fill_area(groei.aandelen, ax) plt.show() def load_huizen_prijsindex_per_regio(): # Gedownload van https://opendata.cbs.nl/statline/#/CBS/nl/dataset/83913NED/table?ts=1617045165965 #", "\"aandelen\", \"verschil (🏠 - 📈)\", \"notities\", ] ] return table def fill_area(x: pd.Series,", "pd.DataFrame: start_jaar = groei.index.year.min() + 1 eind_jaar = groei.index.year.max() n_jaar = eind_jaar -", "with get_reusable_executor() as executor: results = list( tqdm( executor.map( partial(try_run_simulation, parameters=parameters), iterator ),", "results = {} iterator = list( product(groei.index[groei.index.year >= start_jaar], range(1, n_jaar)) ) def", "f\"We hebben op {aankoop_datum} een huis van €{geleend/1000:.0f}k gekocht. \" f\"Op {date.date()} (na", "df.belegging[df.verschil < 0].mean() / 1000 mean_huis = df.huis_winst[df.verschil > 0].mean() / 1000 print(", "= 2): return huis_waarde * onderhoud_pct / 100 / 12 def vermogensbelasting( vermogen:", "huidige_prijs for _date in dates[::-1]: # We rekenen terug naar de prijs van", "1000).plot.contourf( ax=axs[1, 0], add_colorbar=True, levels=levels, norm=matplotlib.colors.TwoSlopeNorm( 0, vmin=ds.huis_winst.min() / 1000, vmax=ds.huis_winst.max() / 1000", "per jaar (%)\") fill_area(groei.aandelen, ax, alpha=0.3) fill_area(groei.huis, ax, alpha=0.3) plt.show() def vergelijkings_tabel(groei: pd.DataFrame):", "1].set_xlabel(\"\") axs[0, 1].set_ylabel(\"\") axs[1, 1].set_ylabel(\"\") plt.show() def plot_result_lines(df: pd.DataFrame) -> None: jaren =", "0, vmin=ds.verschil.min(), vmax=ds.verschil.max() ), add_colorbar=True, levels=levels, cbar_kwargs={\"label\": \"Verschil (x€1000)\"}, ) (ds.belegging / 1000).plot.contourf(", "vmin=ds.verschil.min(), vmax=ds.verschil.max() ), add_colorbar=True, levels=levels, cbar_kwargs={\"label\": \"Verschil (x€1000)\"}, ) (ds.belegging / 1000).plot.contourf( ax=axs[0,", "stijging/daling per jaar (%)\", title=\"S&P500 index vs. tijd, bron: Yahoo! Finance\", color=\"k\", )", "-> None: ds = df.set_index([\"aantal_jaar\", \"aankoop_datum\"]).to_xarray() fig, axs = plt.subplots(ncols=2, nrows=2, figsize=(12, 8),", "/ 100 schijf_3 = float(\"inf\") belastbaar_3 = min(vermogen, schijf_3) vermogen -= belastbaar_3 inkomen_3", "📈)\", \"notities\", ] ] return table def fill_area(x: pd.Series, ax, alpha: float =", "\"rood is huis is beter\\nblauw is belegging is beter\", horizontalalignment=\"right\", verticalalignment=\"top\", transform=ax.transAxes, fontsize=14,", "(df.verschil < 0).sum() / len(df.verschil) print( f\"In {pct_blauw:.1f}% van alle gevallen is het", "= float(afbetaling) + float(rente) rente_betaald[date.year] += float(rente) betaald += hypotheek_kosten belegging += hypotheek_kosten", "float(rente) rente_betaald[date.year] += float(rente) betaald += hypotheek_kosten belegging += hypotheek_kosten - huur afgelost", "y=\"verschil\", ax=ax, color=color_map[jaar], legend=False ) cbar = fig.colorbar( matplotlib.cm.ScalarMappable(cmap=cmap), ax=ax, ) cbar.set_ticks(np.linspace(0, 1,", "-= vermogensbelasting(belegging, schulden, met_fiscaal_partner) # Krijg hypotheekrenteaftrek terug van vorig jaar! woz_waarde =", "groei.index[groei.index >= aankoop_datum][ : round(jaar_tot_verkoop * 12) + 1 ] if len(dates) <", "woz_waarde = bepaal_woz(huis_waarde, date, groei) hypotheek_aftrek = maandlasten.hypotheek_aftrek( rente_betaald[date.year - 1], woz_waarde )", "1, len(jaren))) cbar.set_ticklabels([int(j) for j in color_map.keys()]) cbar.set_label(\"Verkoop na (jaar)\") ax.hlines( 0, df.aankoop_datum.min(),", "aandelenprijs stijging/daling per jaar in %\") ax.set_xlabel(\"Datum\") ax.set_ylabel(\"Prijs stijging/daling per jaar (%)\") fill_area(groei.aandelen,", "df.set_index(\"Perioden\", inplace=True) for col in df.columns: df[col] = df[col].str.replace(\",\", \".\").astype(float) df = df.resample(\"D\").interpolate()", "add_colorbar=True, cbar_kwargs={\"label\": \"Huis waarde (x€1000)\"}, cmap=\"magma\", levels=levels, ) axs[0, 0].text( 0.95, 0.95, \"rood", "(pd.to_datetime(dct[\"tot\"]) - pd.to_datetime(dct[\"van\"])).total_seconds() dct[\"lengte periode\"] = f\"{round(dt / 86400 / 365)} jaar\" table", "1], add_colorbar=True, levels=levels, cbar_kwargs={\"label\": \"Waarde belegging (x€1000)\"}, ) (ds.huis_winst / 1000).plot.contourf( ax=axs[1, 0],", "belastbaar_3 inkomen_3 = belastbaar_3 * 5.69 / 100 inkomen = inkomen_1 + inkomen_2", "partial from itertools import product from numbers import Number from typing import Any,", "ax.set_xlabel(\"Aankoop datum\") ax.set_ylabel(\"Winst kopen huis t.o.v. beleggen\") ax.set_title(\"Winst kopen huis t.o.v. beleggen\") plt.show()", "pd.DataFrame) -> None: fig, ax = plt.subplots(figsize=(7, 7)) groei.huis.plot( ax=ax, legend=False, xlabel=\"Datum\", ylabel=\"Huizenprijs", "groei.index.year.min() + 1 eind_jaar = groei.index.year.max() n_jaar = eind_jaar - start_jaar + 1", "betaald += hypotheek_kosten belegging += hypotheek_kosten - huur afgelost += float(afbetaling) if date.month", "cbar.set_ticks(np.linspace(0, 1, len(jaren))) cbar.set_ticklabels([int(j) for j in color_map.keys()]) cbar.set_label(\"Verkoop na (jaar)\") ax.hlines( 0,", "df_stock.Date = pd.to_datetime(df_stock.Date) df_stock.set_index(\"Date\", inplace=True) # *Close price adjusted for splits # **Adjusted", "len(dates) < jaar_tot_verkoop * 12: raise ValueError( f\"Een duur van {jaar_tot_verkoop} jaar is", "pd.DataFrame) -> None: fig, ax = plt.subplots() df.plot.scatter( ax=ax, x=\"aankoop_datum\", y=\"aantal_jaar\", c=\"verschil\", s=100,", "hyptotheek_looptijd, dollar(float(huis_prijs)) ).monthly_payment() onderhoud = onderhoud_pct / 100 * huis_prijs / 12 kosten", "(🏠 - 📈)\", \"notities\", ] ] return table def fill_area(x: pd.Series, ax, alpha:", "{} iterator = list( product(groei.index[groei.index.year >= start_jaar], range(1, n_jaar)) ) def try_run_simulation(datum_jaar, parameters):", "data sinds 1996. df = pd.read_csv(\"huizen_prijsindex_per_regio.csv\") df.Perioden = pd.to_datetime( df.Perioden.str.replace(\"e kwartaal\", \"\").str.replace(\" \",", "functools import partial from itertools import product from numbers import Number from typing", "= stock_relative[huis_prijsindex.index] groei = pd.concat( [huis_prijsindex, stock_relative], axis=1, keys=[\"huis\", \"aandelen\"] ) return groei", "(ds.belegging / 1000).plot.contourf( ax=axs[0, 1], add_colorbar=True, levels=levels, cbar_kwargs={\"label\": \"Waarde belegging (x€1000)\"}, ) (ds.huis_winst", "{aantal_jaar(dates):.1f} jaar) hebben we €{betaald/1000:.0f}k betaald, \" f\"€{afgelost/1000:.0f}k afgelost, een huiswaarde van €{huis_waarde/1000:.0f}k,", "Gedownload van https://opendata.cbs.nl/statline/#/CBS/nl/dataset/83913NED/table?ts=1617045165965 # Col: \"Prijsindex bestaande koopwoningen Ontwikkeling t.o.v. een jaar eerder\"", "== date].iloc[0] / 100 return (1 + pct) ** (1 / 12) def", "jaar prijs /= maandelijke_groei(_date, groei, \"huis\") return prijs def aantal_jaar(dates: pd.DatetimeIndex): dt =", "Mortgage( hypotheekrente / 100, hyptotheek_looptijd, dollar(float(huis_prijs)) ).monthly_payment() onderhoud = onderhoud_pct / 100 *", "f\"{round(dt / 86400 / 365)} jaar\" table = pd.DataFrame(example_periods)[ [ \"van\", \"tot\", \"lengte", "plt import numpy as np import pandas as pd import scipy.optimize from loky", "only first of the month first_year = stock_price.index.min().year start = f\"{first_year+1}-02-01\" stock_relative =", "= huidige_prijs for _date in dates[::-1]: # We rekenen terug naar de prijs", "= [ dict(van=\"2014-Q2\", tot=\"2020-Q4\", notities=\"de recente 'goede' jaren\"), dict( van=\"2009-Q2\", tot=\"2014-Q1\", notities=\"slechtste jaren", "woz_waarde ) persoon_met_aftrek = maandlasten.Persoon(persoon.bruto_jaarloon) persoon_met_aftrek.aftrek = hypotheek_aftrek teruggave = persoon_met_aftrek.netto_loon - persoon.netto_loon", "dat aandelen beter waren, dan is de verwachte winst €{mean_beleggen:.1f}k.\" ) print(f\"Als een", "in dates: huis_waarde *= maandelijke_groei(date, groei, \"huis\") belegging *= maandelijke_groei(date, groei, \"aandelen\") betaald", "hypotheekrente: Number = 2.04, hyptotheek_looptijd: int = 30 * 12, jaarinkomen: Number =", "def plot_sp500() -> None: stock_price = load_sp500() stock_price.plot( xlabel=\"Datum\", ylabel=\"S&P500 prijs ($)\", title=\"S&P500", "for date in dates: huis_waarde *= maandelijke_groei(date, groei, \"huis\") belegging *= maandelijke_groei(date, groei,", "groei) hypotheek_aftrek = maandlasten.hypotheek_aftrek( rente_betaald[date.year - 1], woz_waarde ) persoon_met_aftrek = maandlasten.Persoon(persoon.bruto_jaarloon) persoon_met_aftrek.aftrek", "== jaar].plot( x=\"aankoop_datum\", y=\"verschil\", ax=ax, color=color_map[jaar], legend=False ) cbar = fig.colorbar( matplotlib.cm.ScalarMappable(cmap=cmap), ax=ax,", "(x€1000)\"}, cmap=\"magma\", levels=levels, ) axs[0, 0].text( 0.95, 0.95, \"rood is huis is beter\\nblauw", "(jaar)\") ax.hlines( 0, df.aankoop_datum.min(), df.aankoop_datum.max(), ls=\"--\", color=\"k\", zorder=-1 ) ax.set_xlabel(\"Aankoop datum\") ax.set_ylabel(\"Winst kopen", "nu\" ), ] for dct in example_periods: mean = lambda x: x[(x.index >=", "] for dct in example_periods: mean = lambda x: x[(x.index >= dct[\"van\"]) &", "verkoop_datum=dates[-1], aantal_jaar=aantal_jaar(dates), betaald=betaald, afgelost=afgelost, af_te_lossen=af_te_lossen, huis_waarde=huis_waarde, huis_winst=huis_winst, belegging=belegging, ) def run_monte_carlo(groei: pd.DataFrame, parameters:", "fill_area(x: pd.Series, ax, alpha: float = 1.0) -> None: ax.fill_between( x.index, x.values, where=x.values", "stock_relative = pd.Series(stock_relative) # Select at same dates as huis prijzen huis_prijsindex =", "onderhoud_pct: float = 2): return huis_waarde * onderhoud_pct / 100 / 12 def", "(value - prev) / prev * 100 stock_relative = pd.Series(stock_relative) # Select at", "df.plot.scatter( ax=ax, x=\"aankoop_datum\", y=\"aantal_jaar\", c=\"verschil\", s=100, alpha=1, norm=matplotlib.colors.TwoSlopeNorm(0), cmap=\"seismic\", title=\"Kopen of huren?\", xlabel=\"Aankoop", "alpha=alpha, zorder=-1, ) ax.fill_between( x.index, x.values, where=x.values < 0, color=\"red\", alpha=alpha, zorder=-1, )", "rente_betaald: Dict[int, float] = defaultdict(float) start_year = dates[0].year betaald = 0 afgelost =", "= bepaal_woz(huis_waarde, date, groei) hypotheek_aftrek = maandlasten.hypotheek_aftrek( rente_betaald[date.year - 1], woz_waarde ) persoon_met_aftrek", "van alle gevallen is het beter om aandelen \" f\"te kopen en in", "f\"te kopen en in {100-pct_rood:.1f}% is het beter om een huis te kopen.\"", "for splits # **Adjusted close price adjusted for both dividends and splits. stock_price", "+ pct) ** (1 / 12) def bepaal_woz(huidige_prijs: float, date: pd.Timestamp, groei: pd.DataFrame):", "res = scipy.optimize.minimize( lambda huis_prijs: abs(hyptotheek_kosten(huis_prijs) - huur), x0=100_000, method=\"Nelder-Mead\", tol=1e-2, ) return", "pd.to_datetime(df.aankoop_datum) df[\"verschil\"] = (df.huis_winst - df.belegging) / 1000 df.aantal_jaar = df.aantal_jaar.round() return df", "groei.huis.plot(ax=ax, label=\"Huizenprijs\", legend=True) ax.set_title(\"Huizenprijs en aandelenprijs stijging/daling per jaar in %\") ax.set_xlabel(\"Datum\") ax.set_ylabel(\"Prijs", "color=\"green\", alpha=alpha, zorder=-1, ) ax.fill_between( x.index, x.values, where=x.values < 0, color=\"red\", alpha=alpha, zorder=-1,", "maandlasten from mortgage import Mortgage, dollar matplotlib.rc(\"font\", size=15) def load_sp500() -> pd.Series: #", "vermogen < 0: return 0 # De rest is in box 3 schijf_1", "stock_price = load_sp500() stock_price.plot( xlabel=\"Datum\", ylabel=\"S&P500 prijs ($)\", title=\"S&P500 index vs. tijd, bron:", "met_fiscaal_partner) # Krijg hypotheekrenteaftrek terug van vorig jaar! woz_waarde = bepaal_woz(huis_waarde, date, groei)", "= float(\"inf\") belastbaar_3 = min(vermogen, schijf_3) vermogen -= belastbaar_3 inkomen_3 = belastbaar_3 *", "def load_sp500() -> pd.Series: # Daily data to need to resample it to", "= df.resample(\"D\").interpolate() df = df[df.index.day == 1] return df def plot_huizenprijzen(groei: pd.DataFrame) ->", "def vermogensbelasting( vermogen: float, schulden: float = 0, met_fiscaal_partner: bool = True ):", "date: pd.Timestamp, groei: pd.DataFrame): \"\"\"WOZ waarde is bepaald aan de hand van de", "add_colorbar=True, levels=levels, cbar_kwargs={\"label\": \"Verschil (x€1000)\"}, ) (ds.belegging / 1000).plot.contourf( ax=axs[0, 1], add_colorbar=True, levels=levels,", "1000 print( f\"In het geval dat aandelen beter waren, dan is de verwachte", "vermogensbelasting(belegging, schulden, met_fiscaal_partner) # Krijg hypotheekrenteaftrek terug van vorig jaar! woz_waarde = bepaal_woz(huis_waarde,", "+= onderhoud(huis_waarde) afbetaling, rente = next(betaalschema) hypotheek_kosten = float(afbetaling) + float(rente) rente_betaald[date.year] +=", "vorig jaar.\"\"\" vorig_jaar = date.year - 1 dates = groei.index[groei.index.year == vorig_jaar] prijs", "huur in bedragen ] hyptoheek_hoogstes = (np.array(hyptoheek_hoogstes) / 1000).round(1) df = pd.DataFrame([bedragen, hyptoheek_hoogstes]).T", "= lambda x: x[(x.index >= dct[\"van\"]) & (x.index <= dct[\"tot\"])].mean() dct[\"huis\"] = f\"{mean(groei.huis):.2f}%\"", "beter\", horizontalalignment=\"right\", verticalalignment=\"top\", transform=ax.transAxes, fontsize=14, ) plt.show() def plot_result_contour(df: pd.DataFrame) -> None: ds", "Number, geleend: Number, groei: pd.DataFrame, huur: Number = 1000, hypotheekrente: Number = 2.04,", "stock_relative = stock_relative[huis_prijsindex.index] groei = pd.concat( [huis_prijsindex, stock_relative], axis=1, keys=[\"huis\", \"aandelen\"] ) return", "cbar = fig.colorbar( matplotlib.cm.ScalarMappable(cmap=cmap), ax=ax, ) cbar.set_ticks(np.linspace(0, 1, len(jaren))) cbar.set_ticklabels([int(j) for j in", "2008 crisis tot en met nu\"), dict( van=\"1996-Q1\", tot=\"2020-Q4\", notities=\"alle data sinds 1996", "pd.DataFrame(example_periods)[ [ \"van\", \"tot\", \"lengte periode\", \"huis\", \"aandelen\", \"verschil (🏠 - 📈)\", \"notities\",", "Number = 1, met_fiscaal_partner: bool = True, verbose: bool = True, ): dates", "een verkoop €{abs(huis_winst)/1000:.0f}k {winst_of_verlies}. \" f\"Hadden we een huis gehuurd voor €{huur} per", "1) prev = stock_price[date_prev] stock_relative[date] = (value - prev) / prev * 100", ") def run_monte_carlo(groei: pd.DataFrame, parameters: Dict[str, Any]) -> pd.DataFrame: start_jaar = groei.index.year.min() +", "op {aankoop_datum}. \" f\"Een duur van {aantal_jaar(dates):.2f} is mogelijk.\" ) persoon = maandlasten.Persoon(jaarinkomen)", "= pd.to_datetime(df.aankoop_datum) df[\"verschil\"] = (df.huis_winst - df.belegging) / 1000 df.aantal_jaar = df.aantal_jaar.round() return", "= date.year - 1 dates = groei.index[groei.index.year == vorig_jaar] prijs = huidige_prijs for", "True, ): dates = groei.index[groei.index >= aankoop_datum][ : round(jaar_tot_verkoop * 12) + 1", "-> None: fig, ax = plt.subplots(figsize=(8, 8)) groei.aandelen[groei.huis.index].plot(ax=ax, label=\"Aandelen\", legend=True) groei.huis.plot(ax=ax, label=\"Huizenprijs\", legend=True)", "- 1) prev = stock_price[date_prev] stock_relative[date] = (value - prev) / prev *", "pd.DataFrame, huur: Number = 1000, hypotheekrente: Number = 2.04, hyptotheek_looptijd: int = 30", "12: raise ValueError( f\"Een duur van {jaar_tot_verkoop} jaar is niet mogelijk als \"", "= stock_price[date_prev] stock_relative[date] = (value - prev) / prev * 100 stock_relative =", "= (value - prev) / prev * 100 stock_relative = pd.Series(stock_relative) # Select", "prijzen huis_prijsindex = load_huizen_prijsindex_per_regio()[regio] stock_relative = stock_relative[huis_prijsindex.index] groei = pd.concat( [huis_prijsindex, stock_relative], axis=1,", "pd.DataFrame) -> None: fig, ax = plt.subplots(figsize=(8, 8)) groei.aandelen[groei.huis.index].plot(ax=ax, label=\"Aandelen\", legend=True) groei.huis.plot(ax=ax, label=\"Huizenprijs\",", "niet mogelijk want we kunnen niet in de toekomst kijken return with get_reusable_executor()", "index vs. tijd, bron: Yahoo! Finance\", color=\"k\", ) fill_area(groei.aandelen, ax) plt.show() def load_huizen_prijsindex_per_regio():", "Krijg hypotheekrenteaftrek terug van vorig jaar! woz_waarde = bepaal_woz(huis_waarde, date, groei) hypotheek_aftrek =", "pd.Series: # Daily data to need to resample it to quarterly like the", "def plot_result_contour(df: pd.DataFrame) -> None: ds = df.set_index([\"aantal_jaar\", \"aankoop_datum\"]).to_xarray() fig, axs = plt.subplots(ncols=2,", "1 dates = groei.index[groei.index.year == vorig_jaar] prijs = huidige_prijs for _date in dates[::-1]:", "/ 12) def bepaal_woz(huidige_prijs: float, date: pd.Timestamp, groei: pd.DataFrame): \"\"\"WOZ waarde is bepaald", "duur van {aantal_jaar(dates):.2f} is mogelijk.\" ) persoon = maandlasten.Persoon(jaarinkomen) onderhoud = partial(maandelijks_onderhoud, onderhoud_pct=onderhoud_pct)", "+= float(rente) betaald += hypotheek_kosten belegging += hypotheek_kosten - huur afgelost += float(afbetaling)", "86400 / 365.25 def maandelijks_onderhoud(huis_waarde: float, onderhoud_pct: float = 2): return huis_waarde *", "hypotheekrenteaftrek terug van vorig jaar! woz_waarde = bepaal_woz(huis_waarde, date, groei) hypotheek_aftrek = maandlasten.hypotheek_aftrek(", "transform=axs[0, 0].transAxes, fontsize=12, ) axs[1, 0].set_xlabel(\"Aankoop datum\") axs[1, 1].set_xlabel(\"Aankoop datum\") axs[0, 0].set_ylabel(\"Verkoop na", "/ 1000).round(1) df = pd.DataFrame([bedragen, hyptoheek_hoogstes]).T df.columns = [\"maandlasten (€)\", \"hypotheek (x€1000)\"] return", "/ 100 * huis_prijs / 12 kosten = float(hyptotheek_maandelijks) + onderhoud return kosten", "[ \"van\", \"tot\", \"lengte periode\", \"huis\", \"aandelen\", \"verschil (🏠 - 📈)\", \"notities\", ]", "beter waren, dan is de verwachte winst €{mean_beleggen:.1f}k.\" ) print(f\"Als een huis kopen", "1].set_xlabel(\"Aankoop datum\") axs[0, 0].set_ylabel(\"Verkoop na (jaar)\") axs[1, 0].set_ylabel(\"Verkoop na (jaar)\") axs[0, 0].set_xlabel(\"\") axs[0,", "= df.belegging[df.verschil < 0].mean() / 1000 mean_huis = df.huis_winst[df.verschil > 0].mean() / 1000", "None: fig, ax = plt.subplots(figsize=(7, 7)) groei.huis.plot( ax=ax, legend=False, xlabel=\"Datum\", ylabel=\"Huizenprijs stijging/daling per", "date_prev = date.replace(date.year - 1) prev = stock_price[date_prev] stock_relative[date] = (value - prev)", "iterator ), \"Monte Carlo simulatie\", total=len(iterator), ) ) df = pd.DataFrame([r for r", "hyptoheek_hoogstes = [ hyptotheek_van_huur( huur=huur, hypotheekrente=2.04, hyptotheek_looptijd=360, onderhoud_pct=1, ) for huur in bedragen", "van=\"1996-Q1\", tot=\"2020-Q4\", notities=\"alle data sinds 1996 tot en met nu\" ), ] for", "ax = plt.subplots() df.plot.scatter( ax=ax, x=\"aankoop_datum\", y=\"aantal_jaar\", c=\"verschil\", s=100, alpha=1, norm=matplotlib.colors.TwoSlopeNorm(0), cmap=\"seismic\", title=\"Kopen", "Daily data to need to resample it to quarterly like the huizenprijzen df_stock", "dollar(float(huis_prijs)) ).monthly_payment() onderhoud = onderhoud_pct / 100 * huis_prijs / 12 kosten =", "0].transAxes, fontsize=12, ) axs[1, 0].set_xlabel(\"Aankoop datum\") axs[1, 1].set_xlabel(\"Aankoop datum\") axs[0, 0].set_ylabel(\"Verkoop na (jaar)\")", "7)) groei.aandelen.plot( ax=ax, xlabel=\"Datum\", ylabel=\"S&P500 prijs stijging/daling per jaar (%)\", title=\"S&P500 index vs.", "hyptotheek_van_huur( huur=huur, hypotheekrente=2.04, hyptotheek_looptijd=360, onderhoud_pct=1, ) for huur in bedragen ] hyptoheek_hoogstes =", "stock_price = stock_price.resample(\"D\").interpolate() return stock_price def plot_sp500() -> None: stock_price = load_sp500() stock_price.plot(", "-> None: fig, ax = plt.subplots() df.plot.scatter( ax=ax, x=\"aankoop_datum\", y=\"aantal_jaar\", c=\"verschil\", s=100, alpha=1,", "def koop_huis_of_beleg( aankoop_datum: Union[str, pd.Timestamp], jaar_tot_verkoop: Number, geleend: Number, groei: pd.DataFrame, huur: Number", "= [ hyptotheek_van_huur( huur=huur, hypotheekrente=2.04, hyptotheek_looptijd=360, onderhoud_pct=1, ) for huur in bedragen ]", "📈)\"] = f\"{mean(groei.huis) - mean(groei.aandelen):.2f}%\" dt = (pd.to_datetime(dct[\"tot\"]) - pd.to_datetime(dct[\"van\"])).total_seconds() dct[\"lengte periode\"] =", ") df = pd.DataFrame([r for r in results if r is not None])", "over vorig jaar belegging -= vermogensbelasting(belegging, schulden, met_fiscaal_partner) # Krijg hypotheekrenteaftrek terug van", ") print(f\"Als een huis kopen beter was, dan is de verwachte winst €{mean_huis:.1f}k.\")", "jaar (%)\", title=\"S&P500 index vs. tijd, bron: Yahoo! Finance\", color=\"k\", ) fill_area(groei.aandelen, ax)", "bepaald aan de hand van de prijs van vorig jaar.\"\"\" vorig_jaar = date.year", "label=\"Huizenprijs\", legend=True) ax.set_title(\"Huizenprijs en aandelenprijs stijging/daling per jaar in %\") ax.set_xlabel(\"Datum\") ax.set_ylabel(\"Prijs stijging/daling", "+ inkomen_2 + inkomen_3 return inkomen * 31 / 100 def koop_huis_of_beleg( aankoop_datum:", "prijs /= maandelijke_groei(_date, groei, \"huis\") return prijs def aantal_jaar(dates: pd.DatetimeIndex): dt = dates.max()", "periode\", \"huis\", \"aandelen\", \"verschil (🏠 - 📈)\", \"notities\", ] ] return table def", "start_jaar], range(1, n_jaar)) ) def try_run_simulation(datum_jaar, parameters): aankoop_datum, jaar_tot_verkoop = datum_jaar try: return", "geval dat aandelen beter waren, dan is de verwachte winst €{mean_beleggen:.1f}k.\" ) print(f\"Als", "from itertools import product from numbers import Number from typing import Any, Dict,", "= maandlasten.Persoon(jaarinkomen) onderhoud = partial(maandelijks_onderhoud, onderhoud_pct=onderhoud_pct) hypotheek = Mortgage(hypotheekrente / 100, hyptotheek_looptijd, geleend)", "= maandlasten.Persoon(persoon.bruto_jaarloon) persoon_met_aftrek.aftrek = hypotheek_aftrek teruggave = persoon_met_aftrek.netto_loon - persoon.netto_loon betaald -= teruggave", ") -> float: def hyptotheek_kosten(huis_prijs): hyptotheek_maandelijks = Mortgage( hypotheekrente / 100, hyptotheek_looptijd, dollar(float(huis_prijs))", "= groei.index[groei.index.year == vorig_jaar] prijs = huidige_prijs for _date in dates[::-1]: # We", "int = 30 * 12, jaarinkomen: Number = 90_000, schulden: Number = 20_000,", "ax = plt.subplots(figsize=(7, 7)) groei.huis.plot( ax=ax, legend=False, xlabel=\"Datum\", ylabel=\"Huizenprijs stijging/daling per jaar (%)\",", "each day stock_price = stock_price.resample(\"D\").interpolate() return stock_price def plot_sp500() -> None: stock_price =", "price adjusted for both dividends and splits. stock_price = df_stock[\"Close*\"].str.replace(\",\", \"\").astype(float) # Create", "belegging = 0 huis_waarde = geleend for date in dates: huis_waarde *= maandelijke_groei(date,", "of huren?\", xlabel=\"Aankoop datum\", ylabel=\"verkopen na (jaar)\", figsize=(8, 8), ) ax, cax =", "ax=ax, ) cbar.set_ticks(np.linspace(0, 1, len(jaren))) cbar.set_ticklabels([int(j) for j in color_map.keys()]) cbar.set_label(\"Verkoop na (jaar)\")", "in box 3 schijf_1 = 100_000 - 50_000 belastbaar_1 = min(vermogen, schijf_1) vermogen", "import matplotlib.pyplot as plt import numpy as np import pandas as pd import", "get_reusable_executor from tqdm.notebook import tqdm from maandlasten import maandlasten from mortgage import Mortgage,", "next(betaalschema) hypotheek_kosten = float(afbetaling) + float(rente) rente_betaald[date.year] += float(rente) betaald += hypotheek_kosten belegging", "* onderhoud_pct / 100 / 12 def vermogensbelasting( vermogen: float, schulden: float =", "van €{huis_waarde/1000:.0f}k, \" f\"en na een verkoop €{abs(huis_winst)/1000:.0f}k {winst_of_verlies}. \" f\"Hadden we een", "data to need to resample it to quarterly like the huizenprijzen df_stock =", "dict( van=\"1996-Q1\", tot=\"2020-Q4\", notities=\"alle data sinds 1996 tot en met nu\" ), ]", "f\"Op {date.date()} (na {aantal_jaar(dates):.1f} jaar) hebben we €{betaald/1000:.0f}k betaald, \" f\"€{afgelost/1000:.0f}k afgelost, een", "cmap=\"magma\", levels=levels, ) axs[0, 0].text( 0.95, 0.95, \"rood is huis is beter\\nblauw is", "maandelijke_groei( date: pd.Timestamp, groei: pd.DataFrame, which: Literal[\"huis\", \"aandelen\"] = \"huis\" ) -> float:", "ax = plt.subplots(figsize=(7, 7)) groei.aandelen.plot( ax=ax, xlabel=\"Datum\", ylabel=\"S&P500 prijs stijging/daling per jaar (%)\",", "huis prijzen huis_prijsindex = load_huizen_prijsindex_per_regio()[regio] stock_relative = stock_relative[huis_prijsindex.index] groei = pd.concat( [huis_prijsindex, stock_relative],", "de prijs van vorig jaar.\"\"\" vorig_jaar = date.year - 1 dates = groei.index[groei.index.year", "tot=\"2020-Q4\", notities=\"alle data sinds 1996 tot en met nu\" ), ] for dct", "\"Monte Carlo simulatie\", total=len(iterator), ) ) df = pd.DataFrame([r for r in results", "return 0 # De rest is in box 3 schijf_1 = 100_000 -", "* 12, jaarinkomen: Number = 90_000, schulden: Number = 20_000, onderhoud_pct: Number =", "= 2.04, hyptotheek_looptijd: int = 30 * 12, jaarinkomen: Number = 90_000, schulden:", "x.index.max(), ls=\"--\", color=\"k\") def maandelijke_groei( date: pd.Timestamp, groei: pd.DataFrame, which: Literal[\"huis\", \"aandelen\"] =", "huur: Number = 1000, hypotheekrente: Number = 2.04, hyptotheek_looptijd: int = 360, onderhoud_pct:", "Literal, Union import matplotlib import matplotlib.colors import matplotlib.pyplot as plt import numpy as", "vs. tijd, bron: Yahoo! Finance\", figsize=(7, 7), ) plt.show() def get_groei(regio=\"Nederland\") -> pd.DataFrame:", "title=\"Kopen of huren?\", xlabel=\"Aankoop datum\", ylabel=\"verkopen na (jaar)\", figsize=(8, 8), ) ax, cax", "0: return 0 # De rest is in box 3 schijf_1 = 100_000", "overdrachts_belasting = huis_waarde * 0.02 huis_winst = huis_waarde - af_te_lossen - betaald -", "plt.show() def plot_aandelen_en_huis(groei: pd.DataFrame) -> None: fig, ax = plt.subplots(figsize=(8, 8)) groei.aandelen[groei.huis.index].plot(ax=ax, label=\"Aandelen\",", "if date.month == 1 and date.year > start_year: # Betaal vermogensbelasting over vorig", "inkomen_1 + inkomen_2 + inkomen_3 return inkomen * 31 / 100 def koop_huis_of_beleg(", "load_sp500() stock_price.plot( xlabel=\"Datum\", ylabel=\"S&P500 prijs ($)\", title=\"S&P500 index vs. tijd, bron: Yahoo! Finance\",", "plot_result_contour(df: pd.DataFrame) -> None: ds = df.set_index([\"aantal_jaar\", \"aankoop_datum\"]).to_xarray() fig, axs = plt.subplots(ncols=2, nrows=2,", "0, met_fiscaal_partner: bool = True ): \"\"\"Vermogensbelasting vanaf 2021. https://www.rijksoverheid.nl/onderwerpen/belastingplan/belastingwijzigingen-voor-ons-allemaal/box-3 \"\"\" heffingvrij =", "x[(x.index >= dct[\"van\"]) & (x.index <= dct[\"tot\"])].mean() dct[\"huis\"] = f\"{mean(groei.huis):.2f}%\" dct[\"aandelen\"] = f\"{mean(groei.aandelen):.2f}%\"", "prijs stijging/daling per jaar (%)\", title=\"S&P500 index vs. tijd, bron: Yahoo! Finance\", color=\"k\",", "f\"Een duur van {aantal_jaar(dates):.2f} is mogelijk.\" ) persoon = maandlasten.Persoon(jaarinkomen) onderhoud = partial(maandelijks_onderhoud,", "geleend: Number, groei: pd.DataFrame, huur: Number = 1000, hypotheekrente: Number = 2.04, hyptotheek_looptijd:", "per jaar in %\") ax.set_xlabel(\"Datum\") ax.set_ylabel(\"Prijs stijging/daling per jaar (%)\") fill_area(groei.aandelen, ax, alpha=0.3)", "), cbar_kwargs={\"label\": \"Winst vrkp huis (x€1000)\"}, ) (ds.huis_waarde / 1000).plot.contourf( ax=axs[1, 1], add_colorbar=True,", "float = 1.0) -> None: ax.fill_between( x.index, x.values, where=x.values > 0, color=\"green\", alpha=alpha,", "round(jaar_tot_verkoop * 12) + 1 ] if len(dates) < jaar_tot_verkoop * 12: raise", "in {100-pct_rood:.1f}% is het beter om een huis te kopen.\" ) mean_beleggen =", "afgelost += float(afbetaling) if date.month == 1 and date.year > start_year: # Betaal", "* 5.69 / 100 inkomen = inkomen_1 + inkomen_2 + inkomen_3 return inkomen", "de toekomst kijken return with get_reusable_executor() as executor: results = list( tqdm( executor.map(", "), ] for dct in example_periods: mean = lambda x: x[(x.index >= dct[\"van\"])", "levels = 15 ds.verschil.plot.contourf( ax=axs[0, 0], norm=matplotlib.colors.TwoSlopeNorm( 0, vmin=ds.verschil.min(), vmax=ds.verschil.max() ), add_colorbar=True, levels=levels,", "winner = \"huis\" if mean(groei.huis) > mean(groei.aandelen) else \"aandelen\" dct[winner] += \" 🏆\"", "100_000 if met_fiscaal_partner else 50_000 vermogen -= heffingvrij vermogen -= schulden if vermogen", "datum\") axs[0, 0].set_ylabel(\"Verkoop na (jaar)\") axs[1, 0].set_ylabel(\"Verkoop na (jaar)\") axs[0, 0].set_xlabel(\"\") axs[0, 1].set_xlabel(\"\")", "fig, ax = plt.subplots(figsize=(8, 8)) groei.aandelen[groei.huis.index].plot(ax=ax, label=\"Aandelen\", legend=True) groei.huis.plot(ax=ax, label=\"Huizenprijs\", legend=True) ax.set_title(\"Huizenprijs en", "date.year - 1 dates = groei.index[groei.index.year == vorig_jaar] prijs = huidige_prijs for _date", "== vorig_jaar] prijs = huidige_prijs for _date in dates[::-1]: # We rekenen terug", ">= start].items(): date_prev = date.replace(date.year - 1) prev = stock_price[date_prev] stock_relative[date] = (value", "+= hypotheek_kosten - huur afgelost += float(afbetaling) if date.month == 1 and date.year", "kosten res = scipy.optimize.minimize( lambda huis_prijs: abs(hyptotheek_kosten(huis_prijs) - huur), x0=100_000, method=\"Nelder-Mead\", tol=1e-2, )", "ds = df.set_index([\"aantal_jaar\", \"aankoop_datum\"]).to_xarray() fig, axs = plt.subplots(ncols=2, nrows=2, figsize=(12, 8), sharex=True, sharey=True)", "/ 1000 ), cbar_kwargs={\"label\": \"Winst vrkp huis (x€1000)\"}, ) (ds.huis_waarde / 1000).plot.contourf( ax=axs[1,", "datum\", ylabel=\"verkopen na (jaar)\", figsize=(8, 8), ) ax, cax = plt.gcf().get_axes() cax.set_ylabel(\"verschil (x€1000)\")", "2): return huis_waarde * onderhoud_pct / 100 / 12 def vermogensbelasting( vermogen: float,", "figsize=(8, 8), ) ax, cax = plt.gcf().get_axes() cax.set_ylabel(\"verschil (x€1000)\") ax.text( 0.95, 0.95, \"rood", "100_000 belastbaar_2 = min(vermogen, schijf_2) vermogen -= belastbaar_2 inkomen_2 = belastbaar_2 * 4.50", "0 else \"verlies\" print( f\"We hebben op {aankoop_datum} een huis van €{geleend/1000:.0f}k gekocht.", "from collections import defaultdict from functools import partial from itertools import product from", "vermogensbelasting( vermogen: float, schulden: float = 0, met_fiscaal_partner: bool = True ): \"\"\"Vermogensbelasting", "100 def koop_huis_of_beleg( aankoop_datum: Union[str, pd.Timestamp], jaar_tot_verkoop: Number, geleend: Number, groei: pd.DataFrame, huur:", "waarde (x€1000)\"}, cmap=\"magma\", levels=levels, ) axs[0, 0].text( 0.95, 0.95, \"rood is huis is", "= f\"{mean(groei.aandelen):.2f}%\" winner = \"huis\" if mean(groei.huis) > mean(groei.aandelen) else \"aandelen\" dct[winner] +=", "groei.aandelen.plot( ax=ax, xlabel=\"Datum\", ylabel=\"S&P500 prijs stijging/daling per jaar (%)\", title=\"S&P500 index vs. tijd,", "ylabel=\"Huizenprijs stijging/daling per jaar (%)\", title=\"Huizenprijs verschil vs. tijd, bron: CBS\", figsize=(8, 8),", "12) + 1 ] if len(dates) < jaar_tot_verkoop * 12: raise ValueError( f\"Een", "def try_run_simulation(datum_jaar, parameters): aankoop_datum, jaar_tot_verkoop = datum_jaar try: return koop_huis_of_beleg( aankoop_datum, jaar_tot_verkoop, groei=groei,", "groei.index.year.max() n_jaar = eind_jaar - start_jaar + 1 results = {} iterator =", "return kosten res = scipy.optimize.minimize( lambda huis_prijs: abs(hyptotheek_kosten(huis_prijs) - huur), x0=100_000, method=\"Nelder-Mead\", tol=1e-2,", "as np import pandas as pd import scipy.optimize from loky import get_reusable_executor from", "rente_betaald[date.year] += float(rente) betaald += hypotheek_kosten belegging += hypotheek_kosten - huur afgelost +=", "= 100_000 - 50_000 belastbaar_1 = min(vermogen, schijf_1) vermogen -= belastbaar_1 inkomen_1 =", "color=\"k\") def maandelijke_groei( date: pd.Timestamp, groei: pd.DataFrame, which: Literal[\"huis\", \"aandelen\"] = \"huis\" )", "beleggen\") plt.show() def hyptotheek_van_huur( huur: Number = 1000, hypotheekrente: Number = 2.04, hyptotheek_looptijd:", "mogelijk als \" f\"we starten op {aankoop_datum}. \" f\"Een duur van {aantal_jaar(dates):.2f} is", "Dict[str, Any]) -> pd.DataFrame: start_jaar = groei.index.year.min() + 1 eind_jaar = groei.index.year.max() n_jaar", "= (df.huis_winst - df.belegging) / 1000 df.aantal_jaar = df.aantal_jaar.round() return df def plot_result_scatter(df:", "vergelijkings_tabel(groei: pd.DataFrame): example_periods = [ dict(van=\"2014-Q2\", tot=\"2020-Q4\", notities=\"de recente 'goede' jaren\"), dict( van=\"2009-Q2\",", "dct[\"huis\"] = f\"{mean(groei.huis):.2f}%\" dct[\"aandelen\"] = f\"{mean(groei.aandelen):.2f}%\" winner = \"huis\" if mean(groei.huis) > mean(groei.aandelen)", "pd.DataFrame([r for r in results if r is not None]) df.aankoop_datum = pd.to_datetime(df.aankoop_datum)", "df = pd.DataFrame([r for r in results if r is not None]) df.aankoop_datum", "if verbose: winst_of_verlies = \"winst\" if huis_winst > 0 else \"verlies\" print( f\"We", "-= schulden if vermogen < 0: return 0 # De rest is in", "None: jaren = df.aantal_jaar.unique()[1::2] cmap = matplotlib.cm.get_cmap(\"tab20\", len(jaren)) color_map = dict(zip(sorted(jaren), cmap.colors)) fig,", "horizontalalignment=\"right\", verticalalignment=\"top\", transform=axs[0, 0].transAxes, fontsize=12, ) axs[1, 0].set_xlabel(\"Aankoop datum\") axs[1, 1].set_xlabel(\"Aankoop datum\") axs[0,", "is huis is beter\\nblauw is belegging is beter\", horizontalalignment=\"right\", verticalalignment=\"top\", transform=ax.transAxes, fontsize=14, )", "and splits. stock_price = df_stock[\"Close*\"].str.replace(\",\", \"\").astype(float) # Create data points for each day", "100 * (df.verschil < 0).sum() / len(df.verschil) print( f\"In {pct_blauw:.1f}% van alle gevallen", "x=\"aankoop_datum\", y=\"verschil\", ax=ax, color=color_map[jaar], legend=False ) cbar = fig.colorbar( matplotlib.cm.ScalarMappable(cmap=cmap), ax=ax, ) cbar.set_ticks(np.linspace(0,", ") ax.set_xlabel(\"Aankoop datum\") ax.set_ylabel(\"Winst kopen huis t.o.v. beleggen\") ax.set_title(\"Winst kopen huis t.o.v. beleggen\")", "het geval dat aandelen beter waren, dan is de verwachte winst €{mean_beleggen:.1f}k.\" )", "at same dates as huis prijzen huis_prijsindex = load_huizen_prijsindex_per_regio()[regio] stock_relative = stock_relative[huis_prijsindex.index] groei", "een huis gehuurd voor €{huur} per maand en belegd, dan hadden we €{belegging/1000:.0f}k.", "vorig jaar belegging -= vermogensbelasting(belegging, schulden, met_fiscaal_partner) # Krijg hypotheekrenteaftrek terug van vorig", "= df.set_index([\"aantal_jaar\", \"aankoop_datum\"]).to_xarray() fig, axs = plt.subplots(ncols=2, nrows=2, figsize=(12, 8), sharex=True, sharey=True) levels", "try: return koop_huis_of_beleg( aankoop_datum, jaar_tot_verkoop, groei=groei, verbose=False, **parameters, ) except ValueError: # 'jaar'", "dan is de verwachte winst €{mean_beleggen:.1f}k.\" ) print(f\"Als een huis kopen beter was,", "Union import matplotlib import matplotlib.colors import matplotlib.pyplot as plt import numpy as np", "* 12: raise ValueError( f\"Een duur van {jaar_tot_verkoop} jaar is niet mogelijk als", "terug naar de prijs van vorig jaar prijs /= maandelijke_groei(_date, groei, \"huis\") return", "voor €{huur} per maand en belegd, dan hadden we €{belegging/1000:.0f}k. \" f\"Dat is", "df_stock = pd.read_csv(\"sp500.csv\") df_stock.Date = pd.to_datetime(df_stock.Date) df_stock.set_index(\"Date\", inplace=True) # *Close price adjusted for", "= 1_000_000 - 100_000 belastbaar_2 = min(vermogen, schijf_2) vermogen -= belastbaar_2 inkomen_2 =", "x.index.min(), x.index.max(), ls=\"--\", color=\"k\") def maandelijke_groei( date: pd.Timestamp, groei: pd.DataFrame, which: Literal[\"huis\", \"aandelen\"]", "need to resample it to quarterly like the huizenprijzen df_stock = pd.read_csv(\"sp500.csv\") df_stock.Date", "1.90 / 100 schijf_2 = 1_000_000 - 100_000 belastbaar_2 = min(vermogen, schijf_2) vermogen", "pd.DataFrame, parameters: Dict[str, Any]) -> pd.DataFrame: start_jaar = groei.index.year.min() + 1 eind_jaar =", "stock_price.plot( xlabel=\"Datum\", ylabel=\"S&P500 prijs ($)\", title=\"S&P500 index vs. tijd, bron: Yahoo! Finance\", figsize=(7,", "overdrachts_belasting if verbose: winst_of_verlies = \"winst\" if huis_winst > 0 else \"verlies\" print(", "de 2008 crisis\" ), dict(van=\"2009-Q2\", tot=\"2020-Q4\", notities=\"van 2008 crisis tot en met nu\"),", "_date in dates[::-1]: # We rekenen terug naar de prijs van vorig jaar", "te kopen.\" ) mean_beleggen = df.belegging[df.verschil < 0].mean() / 1000 mean_huis = df.huis_winst[df.verschil", "als \" f\"we starten op {aankoop_datum}. \" f\"Een duur van {aantal_jaar(dates):.2f} is mogelijk.\"", "\" f\"Dat is dus €{(belegging - huis_winst)/1000:.0f}k verschil.\" ) return dict( aankoop_datum=aankoop_datum, verkoop_datum=dates[-1],", "color=\"red\", alpha=alpha, zorder=-1, ) ax.hlines(0, x.index.min(), x.index.max(), ls=\"--\", color=\"k\") def maandelijke_groei( date: pd.Timestamp,", "= plt.subplots() df.plot.scatter( ax=ax, x=\"aankoop_datum\", y=\"aantal_jaar\", c=\"verschil\", s=100, alpha=1, norm=matplotlib.colors.TwoSlopeNorm(0), cmap=\"seismic\", title=\"Kopen of", "is de verwachte winst €{mean_beleggen:.1f}k.\" ) print(f\"Als een huis kopen beter was, dan", "-> None: pct_blauw = 100 * (df.verschil < 0).sum() / len(df.verschil) print( f\"In", "\"aandelen\") betaald += onderhoud(huis_waarde) afbetaling, rente = next(betaalschema) hypotheek_kosten = float(afbetaling) + float(rente)", "= df[col].str.replace(\",\", \".\").astype(float) df = df.resample(\"D\").interpolate() df = df[df.index.day == 1] return df", "geleend for date in dates: huis_waarde *= maandelijke_groei(date, groei, \"huis\") belegging *= maandelijke_groei(date,", "1], add_colorbar=True, cbar_kwargs={\"label\": \"Huis waarde (x€1000)\"}, cmap=\"magma\", levels=levels, ) axs[0, 0].text( 0.95, 0.95,", "(x€1000)\"}, ) (ds.huis_winst / 1000).plot.contourf( ax=axs[1, 0], add_colorbar=True, levels=levels, norm=matplotlib.colors.TwoSlopeNorm( 0, vmin=ds.huis_winst.min() /", "float = 2): return huis_waarde * onderhoud_pct / 100 / 12 def vermogensbelasting(", "= load_sp500() stock_price.plot( xlabel=\"Datum\", ylabel=\"S&P500 prijs ($)\", title=\"S&P500 index vs. tijd, bron: Yahoo!", "ax, alpha: float = 1.0) -> None: ax.fill_between( x.index, x.values, where=x.values > 0,", "jaar! woz_waarde = bepaal_woz(huis_waarde, date, groei) hypotheek_aftrek = maandlasten.hypotheek_aftrek( rente_betaald[date.year - 1], woz_waarde", "stock_relative[date] = (value - prev) / prev * 100 stock_relative = pd.Series(stock_relative) #", "dict( van=\"2009-Q2\", tot=\"2014-Q1\", notities=\"slechtste jaren na de 2008 crisis\" ), dict(van=\"2009-Q2\", tot=\"2020-Q4\", notities=\"van", "import partial from itertools import product from numbers import Number from typing import", ") axs[0, 0].text( 0.95, 0.95, \"rood is huis is beter\\nblauw is belegging is", "jaar].plot( x=\"aankoop_datum\", y=\"verschil\", ax=ax, color=color_map[jaar], legend=False ) cbar = fig.colorbar( matplotlib.cm.ScalarMappable(cmap=cmap), ax=ax, )", "t.o.v. beleggen\") ax.set_title(\"Winst kopen huis t.o.v. beleggen\") plt.show() def hyptotheek_van_huur( huur: Number =", "axs[0, 0].text( 0.95, 0.95, \"rood is huis is beter\\nblauw is belegging is beter\",", "< 0: return 0 # De rest is in box 3 schijf_1 =", "= 1000, hypotheekrente: Number = 2.04, hyptotheek_looptijd: int = 360, onderhoud_pct: Number =", "xlabel=\"Aankoop datum\", ylabel=\"verkopen na (jaar)\", figsize=(8, 8), ) ax, cax = plt.gcf().get_axes() cax.set_ylabel(\"verschil", "= 2.04, hyptotheek_looptijd: int = 360, onderhoud_pct: Number = 1, ) -> float:", "0).sum() / len(df.verschil) print( f\"In {pct_blauw:.1f}% van alle gevallen is het beter om", "huur afgelost += float(afbetaling) if date.month == 1 and date.year > start_year: #", "levels=levels, cbar_kwargs={\"label\": \"Waarde belegging (x€1000)\"}, ) (ds.huis_winst / 1000).plot.contourf( ax=axs[1, 0], add_colorbar=True, levels=levels,", "terug van vorig jaar! woz_waarde = bepaal_woz(huis_waarde, date, groei) hypotheek_aftrek = maandlasten.hypotheek_aftrek( rente_betaald[date.year", "return df def analyseer_data(df: pd.DataFrame) -> None: pct_blauw = 100 * (df.verschil <", "0.02 huis_winst = huis_waarde - af_te_lossen - betaald - overdrachts_belasting if verbose: winst_of_verlies", "== 1 and date.year > start_year: # Betaal vermogensbelasting over vorig jaar belegging", "dividends and splits. stock_price = df_stock[\"Close*\"].str.replace(\",\", \"\").astype(float) # Create data points for each", "like the huizenprijzen df_stock = pd.read_csv(\"sp500.csv\") df_stock.Date = pd.to_datetime(df_stock.Date) df_stock.set_index(\"Date\", inplace=True) # *Close", "aankoop_datum][ : round(jaar_tot_verkoop * 12) + 1 ] if len(dates) < jaar_tot_verkoop *", "return prijs def aantal_jaar(dates: pd.DatetimeIndex): dt = dates.max() - dates.min() return dt.total_seconds() /", "= huis_waarde * 0.02 huis_winst = huis_waarde - af_te_lossen - betaald - overdrachts_belasting", "plt.show() def vergelijkings_tabel(groei: pd.DataFrame): example_periods = [ dict(van=\"2014-Q2\", tot=\"2020-Q4\", notities=\"de recente 'goede' jaren\"),", "axs[0, 1].set_xlabel(\"\") axs[0, 1].set_ylabel(\"\") axs[1, 1].set_ylabel(\"\") plt.show() def plot_result_lines(df: pd.DataFrame) -> None: jaren", "adjusted for splits # **Adjusted close price adjusted for both dividends and splits.", "fig, ax = plt.subplots(figsize=(8, 8)) for jaar in jaren: df[df.aantal_jaar == jaar].plot( x=\"aankoop_datum\",", "huis_prijs: abs(hyptotheek_kosten(huis_prijs) - huur), x0=100_000, method=\"Nelder-Mead\", tol=1e-2, ) return round(float(res.x), 2) def hyptotheek_maandlasten_df()", "vermogen -= schulden if vermogen < 0: return 0 # De rest is", "plot_result_scatter(df: pd.DataFrame) -> None: fig, ax = plt.subplots() df.plot.scatter( ax=ax, x=\"aankoop_datum\", y=\"aantal_jaar\", c=\"verschil\",", "tijd, bron: Yahoo! Finance\", color=\"k\", ) fill_area(groei.aandelen, ax) plt.show() def load_huizen_prijsindex_per_regio(): # Gedownload", "is beter\", horizontalalignment=\"right\", verticalalignment=\"top\", transform=axs[0, 0].transAxes, fontsize=12, ) axs[1, 0].set_xlabel(\"Aankoop datum\") axs[1, 1].set_xlabel(\"Aankoop", "- pd.to_datetime(dct[\"van\"])).total_seconds() dct[\"lengte periode\"] = f\"{round(dt / 86400 / 365)} jaar\" table =", "nrows=2, figsize=(12, 8), sharex=True, sharey=True) levels = 15 ds.verschil.plot.contourf( ax=axs[0, 0], norm=matplotlib.colors.TwoSlopeNorm( 0,", "min(vermogen, schijf_1) vermogen -= belastbaar_1 inkomen_1 = belastbaar_1 * 1.90 / 100 schijf_2", "= list( tqdm( executor.map( partial(try_run_simulation, parameters=parameters), iterator ), \"Monte Carlo simulatie\", total=len(iterator), )", "sinds 1996. df = pd.read_csv(\"huizen_prijsindex_per_regio.csv\") df.Perioden = pd.to_datetime( df.Perioden.str.replace(\"e kwartaal\", \"\").str.replace(\" \", \"-Q\")", "verkoop €{abs(huis_winst)/1000:.0f}k {winst_of_verlies}. \" f\"Hadden we een huis gehuurd voor €{huur} per maand", "ax.hlines(0, x.index.min(), x.index.max(), ls=\"--\", color=\"k\") def maandelijke_groei( date: pd.Timestamp, groei: pd.DataFrame, which: Literal[\"huis\",", "hypotheek_kosten - huur afgelost += float(afbetaling) if date.month == 1 and date.year >", "fig, ax = plt.subplots(figsize=(7, 7)) groei.aandelen.plot( ax=ax, xlabel=\"Datum\", ylabel=\"S&P500 prijs stijging/daling per jaar", "ax.text( 0.95, 0.95, \"rood is huis is beter\\nblauw is belegging is beter\", horizontalalignment=\"right\",", "transform=ax.transAxes, fontsize=14, ) plt.show() def plot_result_contour(df: pd.DataFrame) -> None: ds = df.set_index([\"aantal_jaar\", \"aankoop_datum\"]).to_xarray()", "date].iloc[0] / 100 return (1 + pct) ** (1 / 12) def bepaal_woz(huidige_prijs:", "€{geleend/1000:.0f}k gekocht. \" f\"Op {date.date()} (na {aantal_jaar(dates):.1f} jaar) hebben we €{betaald/1000:.0f}k betaald, \"", "mean_beleggen = df.belegging[df.verschil < 0].mean() / 1000 mean_huis = df.huis_winst[df.verschil > 0].mean() /", "1000, hypotheekrente: Number = 2.04, hyptotheek_looptijd: int = 30 * 12, jaarinkomen: Number", "df.belegging) / 1000 df.aantal_jaar = df.aantal_jaar.round() return df def plot_result_scatter(df: pd.DataFrame) -> None:", "2.04, hyptotheek_looptijd: int = 360, onderhoud_pct: Number = 1, ) -> float: def", "ax, alpha=0.3) plt.show() def vergelijkings_tabel(groei: pd.DataFrame): example_periods = [ dict(van=\"2014-Q2\", tot=\"2020-Q4\", notities=\"de recente", "100 inkomen = inkomen_1 + inkomen_2 + inkomen_3 return inkomen * 31 /", "parameters: Dict[str, Any]) -> pd.DataFrame: start_jaar = groei.index.year.min() + 1 eind_jaar = groei.index.year.max()", "t.o.v. beleggen\") plt.show() def hyptotheek_van_huur( huur: Number = 1000, hypotheekrente: Number = 2.04,", "Any, Dict, Literal, Union import matplotlib import matplotlib.colors import matplotlib.pyplot as plt import", "return stock_price def plot_sp500() -> None: stock_price = load_sp500() stock_price.plot( xlabel=\"Datum\", ylabel=\"S&P500 prijs", "en met nu\" ), ] for dct in example_periods: mean = lambda x:", "= 0 huis_waarde = geleend for date in dates: huis_waarde *= maandelijke_groei(date, groei,", "\"\"\" heffingvrij = 100_000 if met_fiscaal_partner else 50_000 vermogen -= heffingvrij vermogen -=", "f\"In het geval dat aandelen beter waren, dan is de verwachte winst €{mean_beleggen:.1f}k.\"", ").monthly_payment() onderhoud = onderhoud_pct / 100 * huis_prijs / 12 kosten = float(hyptotheek_maandelijks)", "groei: pd.DataFrame, huur: Number = 1000, hypotheekrente: Number = 2.04, hyptotheek_looptijd: int =", "(jaar)\") axs[0, 0].set_xlabel(\"\") axs[0, 1].set_xlabel(\"\") axs[0, 1].set_ylabel(\"\") axs[1, 1].set_ylabel(\"\") plt.show() def plot_result_lines(df: pd.DataFrame)", "\"verschil (🏠 - 📈)\", \"notities\", ] ] return table def fill_area(x: pd.Series, ax,", "fill_area(groei.huis, ax, alpha=0.3) plt.show() def vergelijkings_tabel(groei: pd.DataFrame): example_periods = [ dict(van=\"2014-Q2\", tot=\"2020-Q4\", notities=\"de", "van vorig jaar! woz_waarde = bepaal_woz(huis_waarde, date, groei) hypotheek_aftrek = maandlasten.hypotheek_aftrek( rente_betaald[date.year -", "title=\"S&P500 index vs. tijd, bron: Yahoo! Finance\", color=\"k\", ) fill_area(groei.aandelen, ax) plt.show() def", "**Adjusted close price adjusted for both dividends and splits. stock_price = df_stock[\"Close*\"].str.replace(\",\", \"\").astype(float)", "/ 86400 / 365.25 def maandelijks_onderhoud(huis_waarde: float, onderhoud_pct: float = 2): return huis_waarde", "= belastbaar_3 * 5.69 / 100 inkomen = inkomen_1 + inkomen_2 + inkomen_3", "kopen huis t.o.v. beleggen\") ax.set_title(\"Winst kopen huis t.o.v. beleggen\") plt.show() def hyptotheek_van_huur( huur:", "collections import defaultdict from functools import partial from itertools import product from numbers", "= (np.array(hyptoheek_hoogstes) / 1000).round(1) df = pd.DataFrame([bedragen, hyptoheek_hoogstes]).T df.columns = [\"maandlasten (€)\", \"hypotheek", "4.50 / 100 schijf_3 = float(\"inf\") belastbaar_3 = min(vermogen, schijf_3) vermogen -= belastbaar_3", "\"verlies\" print( f\"We hebben op {aankoop_datum} een huis van €{geleend/1000:.0f}k gekocht. \" f\"Op", "= geleend for date in dates: huis_waarde *= maandelijke_groei(date, groei, \"huis\") belegging *=", "dates as huis prijzen huis_prijsindex = load_huizen_prijsindex_per_regio()[regio] stock_relative = stock_relative[huis_prijsindex.index] groei = pd.concat(", "len(jaren))) cbar.set_ticklabels([int(j) for j in color_map.keys()]) cbar.set_label(\"Verkoop na (jaar)\") ax.hlines( 0, df.aankoop_datum.min(), df.aankoop_datum.max(),", ") return dict( aankoop_datum=aankoop_datum, verkoop_datum=dates[-1], aantal_jaar=aantal_jaar(dates), betaald=betaald, afgelost=afgelost, af_te_lossen=af_te_lossen, huis_waarde=huis_waarde, huis_winst=huis_winst, belegging=belegging, )", "huis_waarde = geleend for date in dates: huis_waarde *= maandelijke_groei(date, groei, \"huis\") belegging", "s=100, alpha=1, norm=matplotlib.colors.TwoSlopeNorm(0), cmap=\"seismic\", title=\"Kopen of huren?\", xlabel=\"Aankoop datum\", ylabel=\"verkopen na (jaar)\", figsize=(8,", "\" f\"en na een verkoop €{abs(huis_winst)/1000:.0f}k {winst_of_verlies}. \" f\"Hadden we een huis gehuurd", "from mortgage import Mortgage, dollar matplotlib.rc(\"font\", size=15) def load_sp500() -> pd.Series: # Daily", "om een huis te kopen.\" ) mean_beleggen = df.belegging[df.verschil < 0].mean() / 1000", "return koop_huis_of_beleg( aankoop_datum, jaar_tot_verkoop, groei=groei, verbose=False, **parameters, ) except ValueError: # 'jaar' is", "get_reusable_executor() as executor: results = list( tqdm( executor.map( partial(try_run_simulation, parameters=parameters), iterator ), \"Monte", "min(vermogen, schijf_2) vermogen -= belastbaar_2 inkomen_2 = belastbaar_2 * 4.50 / 100 schijf_3", "belastbaar_1 inkomen_1 = belastbaar_1 * 1.90 / 100 schijf_2 = 1_000_000 - 100_000", "bool = True ): \"\"\"Vermogensbelasting vanaf 2021. https://www.rijksoverheid.nl/onderwerpen/belastingplan/belastingwijzigingen-voor-ons-allemaal/box-3 \"\"\" heffingvrij = 100_000 if", "for both dividends and splits. stock_price = df_stock[\"Close*\"].str.replace(\",\", \"\").astype(float) # Create data points", "fig, axs = plt.subplots(ncols=2, nrows=2, figsize=(12, 8), sharex=True, sharey=True) levels = 15 ds.verschil.plot.contourf(", "def plot_aandelen(groei: pd.DataFrame) -> None: fig, ax = plt.subplots(figsize=(7, 7)) groei.aandelen.plot( ax=ax, xlabel=\"Datum\",", "df_stock.set_index(\"Date\", inplace=True) # *Close price adjusted for splits # **Adjusted close price adjusted", "f\"{mean(groei.aandelen):.2f}%\" winner = \"huis\" if mean(groei.huis) > mean(groei.aandelen) else \"aandelen\" dct[winner] += \"", "gehuurd voor €{huur} per maand en belegd, dan hadden we €{belegging/1000:.0f}k. \" f\"Dat", "is niet mogelijk als \" f\"we starten op {aankoop_datum}. \" f\"Een duur van", "zorder=-1, ) ax.fill_between( x.index, x.values, where=x.values < 0, color=\"red\", alpha=alpha, zorder=-1, ) ax.hlines(0,", "ax.set_title(\"Winst kopen huis t.o.v. beleggen\") plt.show() def hyptotheek_van_huur( huur: Number = 1000, hypotheekrente:", "0, color=\"green\", alpha=alpha, zorder=-1, ) ax.fill_between( x.index, x.values, where=x.values < 0, color=\"red\", alpha=alpha,", "in de toekomst kijken return with get_reusable_executor() as executor: results = list( tqdm(", "else \"verlies\" print( f\"We hebben op {aankoop_datum} een huis van €{geleend/1000:.0f}k gekocht. \"", "= load_sp500() stock_price = stock_price[ stock_price.index.day == 1 ] # Keep only first", "= {} iterator = list( product(groei.index[groei.index.year >= start_jaar], range(1, n_jaar)) ) def try_run_simulation(datum_jaar,", "len(df.verschil) print( f\"In {pct_blauw:.1f}% van alle gevallen is het beter om aandelen \"", "xlabel=\"Datum\", ylabel=\"S&P500 prijs ($)\", title=\"S&P500 index vs. tijd, bron: Yahoo! Finance\", figsize=(7, 7),", "0, color=\"red\", alpha=alpha, zorder=-1, ) ax.hlines(0, x.index.min(), x.index.max(), ls=\"--\", color=\"k\") def maandelijke_groei( date:", "mean(groei.aandelen) else \"aandelen\" dct[winner] += \" 🏆\" dct[\"verschil (🏠 - 📈)\"] = f\"{mean(groei.huis)", "inkomen_2 + inkomen_3 return inkomen * 31 / 100 def koop_huis_of_beleg( aankoop_datum: Union[str,", "t.o.v. een jaar eerder\" # met alle kwartaal data sinds 1996. df =", "- 1], woz_waarde ) persoon_met_aftrek = maandlasten.Persoon(persoon.bruto_jaarloon) persoon_met_aftrek.aftrek = hypotheek_aftrek teruggave = persoon_met_aftrek.netto_loon", "date: pd.Timestamp, groei: pd.DataFrame, which: Literal[\"huis\", \"aandelen\"] = \"huis\" ) -> float: pct", "1, ) -> float: def hyptotheek_kosten(huis_prijs): hyptotheek_maandelijks = Mortgage( hypotheekrente / 100, hyptotheek_looptijd,", "jaar in %\") ax.set_xlabel(\"Datum\") ax.set_ylabel(\"Prijs stijging/daling per jaar (%)\") fill_area(groei.aandelen, ax, alpha=0.3) fill_area(groei.huis,", "ax=axs[0, 0], norm=matplotlib.colors.TwoSlopeNorm( 0, vmin=ds.verschil.min(), vmax=ds.verschil.max() ), add_colorbar=True, levels=levels, cbar_kwargs={\"label\": \"Verschil (x€1000)\"}, )", "= list(range(400, 2000, 100)) hyptoheek_hoogstes = [ hyptotheek_van_huur( huur=huur, hypotheekrente=2.04, hyptotheek_looptijd=360, onderhoud_pct=1, )", "horizontalalignment=\"right\", verticalalignment=\"top\", transform=ax.transAxes, fontsize=14, ) plt.show() def plot_result_contour(df: pd.DataFrame) -> None: ds =", "** (1 / 12) def bepaal_woz(huidige_prijs: float, date: pd.Timestamp, groei: pd.DataFrame): \"\"\"WOZ waarde", "f\"In {pct_blauw:.1f}% van alle gevallen is het beter om aandelen \" f\"te kopen", "stock_price.index.day == 1 ] # Keep only first of the month first_year =", "op {aankoop_datum} een huis van €{geleend/1000:.0f}k gekocht. \" f\"Op {date.date()} (na {aantal_jaar(dates):.1f} jaar)", "hypotheekrente: Number = 2.04, hyptotheek_looptijd: int = 360, onderhoud_pct: Number = 1, )", "None: pct_blauw = 100 * (df.verschil < 0).sum() / len(df.verschil) print( f\"In {pct_blauw:.1f}%", "= groei.index[groei.index >= aankoop_datum][ : round(jaar_tot_verkoop * 12) + 1 ] if len(dates)", "verwachte winst €{mean_beleggen:.1f}k.\" ) print(f\"Als een huis kopen beter was, dan is de", "(🏠 - 📈)\"] = f\"{mean(groei.huis) - mean(groei.aandelen):.2f}%\" dt = (pd.to_datetime(dct[\"tot\"]) - pd.to_datetime(dct[\"van\"])).total_seconds() dct[\"lengte", "\"Huis waarde (x€1000)\"}, cmap=\"magma\", levels=levels, ) axs[0, 0].text( 0.95, 0.95, \"rood is huis", "= dates.max() - dates.min() return dt.total_seconds() / 86400 / 365.25 def maandelijks_onderhoud(huis_waarde: float,", "True ): \"\"\"Vermogensbelasting vanaf 2021. https://www.rijksoverheid.nl/onderwerpen/belastingplan/belastingwijzigingen-voor-ons-allemaal/box-3 \"\"\" heffingvrij = 100_000 if met_fiscaal_partner else", "en met nu\"), dict( van=\"1996-Q1\", tot=\"2020-Q4\", notities=\"alle data sinds 1996 tot en met", "cax = plt.gcf().get_axes() cax.set_ylabel(\"verschil (x€1000)\") ax.text( 0.95, 0.95, \"rood is huis is beter\\nblauw", "🏆\" dct[\"verschil (🏠 - 📈)\"] = f\"{mean(groei.huis) - mean(groei.aandelen):.2f}%\" dt = (pd.to_datetime(dct[\"tot\"]) -", ") df.set_index(\"Perioden\", inplace=True) for col in df.columns: df[col] = df[col].str.replace(\",\", \".\").astype(float) df =", "groei[which][groei.index == date].iloc[0] / 100 return (1 + pct) ** (1 / 12)", "vermogen -= heffingvrij vermogen -= schulden if vermogen < 0: return 0 #", "de verwachte winst €{mean_beleggen:.1f}k.\" ) print(f\"Als een huis kopen beter was, dan is", "\"\").astype(float) # Create data points for each day stock_price = stock_price.resample(\"D\").interpolate() return stock_price", "een jaar eerder\" # met alle kwartaal data sinds 1996. df = pd.read_csv(\"huizen_prijsindex_per_regio.csv\")", ") return round(float(res.x), 2) def hyptotheek_maandlasten_df() -> pd.DataFrame: bedragen = list(range(400, 2000, 100))", "afbetaling, rente = next(betaalschema) hypotheek_kosten = float(afbetaling) + float(rente) rente_betaald[date.year] += float(rente) betaald", "# **Adjusted close price adjusted for both dividends and splits. stock_price = df_stock[\"Close*\"].str.replace(\",\",", "\"notities\", ] ] return table def fill_area(x: pd.Series, ax, alpha: float = 1.0)", "0, vmin=ds.huis_winst.min() / 1000, vmax=ds.huis_winst.max() / 1000 ), cbar_kwargs={\"label\": \"Winst vrkp huis (x€1000)\"},", "Number = 2.04, hyptotheek_looptijd: int = 30 * 12, jaarinkomen: Number = 90_000,", "tot=\"2014-Q1\", notities=\"slechtste jaren na de 2008 crisis\" ), dict(van=\"2009-Q2\", tot=\"2020-Q4\", notities=\"van 2008 crisis", "same dates as huis prijzen huis_prijsindex = load_huizen_prijsindex_per_regio()[regio] stock_relative = stock_relative[huis_prijsindex.index] groei =", "bool = True, ): dates = groei.index[groei.index >= aankoop_datum][ : round(jaar_tot_verkoop * 12)", "belegging (x€1000)\"}, ) (ds.huis_winst / 1000).plot.contourf( ax=axs[1, 0], add_colorbar=True, levels=levels, norm=matplotlib.colors.TwoSlopeNorm( 0, vmin=ds.huis_winst.min()", "onderhoud = onderhoud_pct / 100 * huis_prijs / 12 kosten = float(hyptotheek_maandelijks) +", "met_fiscaal_partner: bool = True ): \"\"\"Vermogensbelasting vanaf 2021. https://www.rijksoverheid.nl/onderwerpen/belastingplan/belastingwijzigingen-voor-ons-allemaal/box-3 \"\"\" heffingvrij = 100_000", "0 # De rest is in box 3 schijf_1 = 100_000 - 50_000", "tijd, bron: Yahoo! Finance\", figsize=(7, 7), ) plt.show() def get_groei(regio=\"Nederland\") -> pd.DataFrame: stock_price", "f\"we starten op {aankoop_datum}. \" f\"Een duur van {aantal_jaar(dates):.2f} is mogelijk.\" ) persoon", "= df.aantal_jaar.round() return df def plot_result_scatter(df: pd.DataFrame) -> None: fig, ax = plt.subplots()", "van {jaar_tot_verkoop} jaar is niet mogelijk als \" f\"we starten op {aankoop_datum}. \"", "plt.subplots(figsize=(7, 7)) groei.aandelen.plot( ax=ax, xlabel=\"Datum\", ylabel=\"S&P500 prijs stijging/daling per jaar (%)\", title=\"S&P500 index", "/ 1000, vmax=ds.huis_winst.max() / 1000 ), cbar_kwargs={\"label\": \"Winst vrkp huis (x€1000)\"}, ) (ds.huis_waarde", "return round(float(res.x), 2) def hyptotheek_maandlasten_df() -> pd.DataFrame: bedragen = list(range(400, 2000, 100)) hyptoheek_hoogstes", ") (ds.huis_waarde / 1000).plot.contourf( ax=axs[1, 1], add_colorbar=True, cbar_kwargs={\"label\": \"Huis waarde (x€1000)\"}, cmap=\"magma\", levels=levels,", "belegging -= vermogensbelasting(belegging, schulden, met_fiscaal_partner) # Krijg hypotheekrenteaftrek terug van vorig jaar! woz_waarde", "0 belegging = 0 huis_waarde = geleend for date in dates: huis_waarde *=", "df.columns: df[col] = df[col].str.replace(\",\", \".\").astype(float) df = df.resample(\"D\").interpolate() df = df[df.index.day == 1]", "matplotlib.rc(\"font\", size=15) def load_sp500() -> pd.Series: # Daily data to need to resample", "import defaultdict from functools import partial from itertools import product from numbers import", "met alle kwartaal data sinds 1996. df = pd.read_csv(\"huizen_prijsindex_per_regio.csv\") df.Perioden = pd.to_datetime( df.Perioden.str.replace(\"e", "float: def hyptotheek_kosten(huis_prijs): hyptotheek_maandelijks = Mortgage( hypotheekrente / 100, hyptotheek_looptijd, dollar(float(huis_prijs)) ).monthly_payment() onderhoud", "vanaf 2021. https://www.rijksoverheid.nl/onderwerpen/belastingplan/belastingwijzigingen-voor-ons-allemaal/box-3 \"\"\" heffingvrij = 100_000 if met_fiscaal_partner else 50_000 vermogen -=", "toekomst kijken return with get_reusable_executor() as executor: results = list( tqdm( executor.map( partial(try_run_simulation,", "dct[winner] += \" 🏆\" dct[\"verschil (🏠 - 📈)\"] = f\"{mean(groei.huis) - mean(groei.aandelen):.2f}%\" dt", "3 schijf_1 = 100_000 - 50_000 belastbaar_1 = min(vermogen, schijf_1) vermogen -= belastbaar_1", "# We rekenen terug naar de prijs van vorig jaar prijs /= maandelijke_groei(_date,", "(ds.huis_winst / 1000).plot.contourf( ax=axs[1, 0], add_colorbar=True, levels=levels, norm=matplotlib.colors.TwoSlopeNorm( 0, vmin=ds.huis_winst.min() / 1000, vmax=ds.huis_winst.max()", "Yahoo! Finance\", color=\"k\", ) fill_area(groei.aandelen, ax) plt.show() def load_huizen_prijsindex_per_regio(): # Gedownload van https://opendata.cbs.nl/statline/#/CBS/nl/dataset/83913NED/table?ts=1617045165965", "Number = 2.04, hyptotheek_looptijd: int = 360, onderhoud_pct: Number = 1, ) ->", "dict(van=\"2014-Q2\", tot=\"2020-Q4\", notities=\"de recente 'goede' jaren\"), dict( van=\"2009-Q2\", tot=\"2014-Q1\", notities=\"slechtste jaren na de", "def analyseer_data(df: pd.DataFrame) -> None: pct_blauw = 100 * (df.verschil < 0).sum() /", "< jaar_tot_verkoop * 12: raise ValueError( f\"Een duur van {jaar_tot_verkoop} jaar is niet", "(jaar)\", figsize=(8, 8), ) ax, cax = plt.gcf().get_axes() cax.set_ylabel(\"verschil (x€1000)\") ax.text( 0.95, 0.95,", "\" f\"€{afgelost/1000:.0f}k afgelost, een huiswaarde van €{huis_waarde/1000:.0f}k, \" f\"en na een verkoop €{abs(huis_winst)/1000:.0f}k", "= stock_price.index.min().year start = f\"{first_year+1}-02-01\" stock_relative = {} for date, value in stock_price[stock_price.index", "f\"Een duur van {jaar_tot_verkoop} jaar is niet mogelijk als \" f\"we starten op", "+ onderhoud return kosten res = scipy.optimize.minimize( lambda huis_prijs: abs(hyptotheek_kosten(huis_prijs) - huur), x0=100_000,", "in df.columns: df[col] = df[col].str.replace(\",\", \".\").astype(float) df = df.resample(\"D\").interpolate() df = df[df.index.day ==", "pd.Series(stock_relative) # Select at same dates as huis prijzen huis_prijsindex = load_huizen_prijsindex_per_regio()[regio] stock_relative", "dates[::-1]: # We rekenen terug naar de prijs van vorig jaar prijs /=", "een huis te kopen.\" ) mean_beleggen = df.belegging[df.verschil < 0].mean() / 1000 mean_huis", "beter\\nblauw is belegging is beter\", horizontalalignment=\"right\", verticalalignment=\"top\", transform=axs[0, 0].transAxes, fontsize=12, ) axs[1, 0].set_xlabel(\"Aankoop", "we een huis gehuurd voor €{huur} per maand en belegd, dan hadden we", "ax=ax, color=color_map[jaar], legend=False ) cbar = fig.colorbar( matplotlib.cm.ScalarMappable(cmap=cmap), ax=ax, ) cbar.set_ticks(np.linspace(0, 1, len(jaren)))", "belegging += hypotheek_kosten - huur afgelost += float(afbetaling) if date.month == 1 and", "cbar.set_label(\"Verkoop na (jaar)\") ax.hlines( 0, df.aankoop_datum.min(), df.aankoop_datum.max(), ls=\"--\", color=\"k\", zorder=-1 ) ax.set_xlabel(\"Aankoop datum\")", "zorder=-1, ) ax.hlines(0, x.index.min(), x.index.max(), ls=\"--\", color=\"k\") def maandelijke_groei( date: pd.Timestamp, groei: pd.DataFrame,", "kopen huis t.o.v. beleggen\") plt.show() def hyptotheek_van_huur( huur: Number = 1000, hypotheekrente: Number", ") mean_beleggen = df.belegging[df.verschil < 0].mean() / 1000 mean_huis = df.huis_winst[df.verschil > 0].mean()", "= 1000, hypotheekrente: Number = 2.04, hyptotheek_looptijd: int = 30 * 12, jaarinkomen:", "total=len(iterator), ) ) df = pd.DataFrame([r for r in results if r is", "for r in results if r is not None]) df.aankoop_datum = pd.to_datetime(df.aankoop_datum) df[\"verschil\"]", "stock_price.index.min().year start = f\"{first_year+1}-02-01\" stock_relative = {} for date, value in stock_price[stock_price.index >=", "partial(maandelijks_onderhoud, onderhoud_pct=onderhoud_pct) hypotheek = Mortgage(hypotheekrente / 100, hyptotheek_looptijd, geleend) betaalschema = hypotheek.monthly_payment_schedule() rente_betaald:", "Number = 20_000, onderhoud_pct: Number = 1, met_fiscaal_partner: bool = True, verbose: bool", "\"huis\" ) -> float: pct = groei[which][groei.index == date].iloc[0] / 100 return (1", "df = pd.DataFrame([bedragen, hyptoheek_hoogstes]).T df.columns = [\"maandlasten (€)\", \"hypotheek (x€1000)\"] return df def", "matplotlib.cm.ScalarMappable(cmap=cmap), ax=ax, ) cbar.set_ticks(np.linspace(0, 1, len(jaren))) cbar.set_ticklabels([int(j) for j in color_map.keys()]) cbar.set_label(\"Verkoop na", "van https://opendata.cbs.nl/statline/#/CBS/nl/dataset/83913NED/table?ts=1617045165965 # Col: \"Prijsindex bestaande koopwoningen Ontwikkeling t.o.v. een jaar eerder\" #", "x.values, where=x.values < 0, color=\"red\", alpha=alpha, zorder=-1, ) ax.hlines(0, x.index.min(), x.index.max(), ls=\"--\", color=\"k\")", "af_te_lossen - betaald - overdrachts_belasting if verbose: winst_of_verlies = \"winst\" if huis_winst >", "5.69 / 100 inkomen = inkomen_1 + inkomen_2 + inkomen_3 return inkomen *", "jaren: df[df.aantal_jaar == jaar].plot( x=\"aankoop_datum\", y=\"verschil\", ax=ax, color=color_map[jaar], legend=False ) cbar = fig.colorbar(", "in example_periods: mean = lambda x: x[(x.index >= dct[\"van\"]) & (x.index <= dct[\"tot\"])].mean()", "hyptotheek_looptijd=360, onderhoud_pct=1, ) for huur in bedragen ] hyptoheek_hoogstes = (np.array(hyptoheek_hoogstes) / 1000).round(1)", "adjusted for both dividends and splits. stock_price = df_stock[\"Close*\"].str.replace(\",\", \"\").astype(float) # Create data", "df_stock[\"Close*\"].str.replace(\",\", \"\").astype(float) # Create data points for each day stock_price = stock_price.resample(\"D\").interpolate() return", "figsize=(7, 7), ) plt.show() def get_groei(regio=\"Nederland\") -> pd.DataFrame: stock_price = load_sp500() stock_price =", "from loky import get_reusable_executor from tqdm.notebook import tqdm from maandlasten import maandlasten from", "huis_winst=huis_winst, belegging=belegging, ) def run_monte_carlo(groei: pd.DataFrame, parameters: Dict[str, Any]) -> pd.DataFrame: start_jaar =", "x0=100_000, method=\"Nelder-Mead\", tol=1e-2, ) return round(float(res.x), 2) def hyptotheek_maandlasten_df() -> pd.DataFrame: bedragen =", "float(afbetaling) if date.month == 1 and date.year > start_year: # Betaal vermogensbelasting over", "of the month first_year = stock_price.index.min().year start = f\"{first_year+1}-02-01\" stock_relative = {} for", "= stock_price.resample(\"D\").interpolate() return stock_price def plot_sp500() -> None: stock_price = load_sp500() stock_price.plot( xlabel=\"Datum\",", "None: fig, ax = plt.subplots(figsize=(7, 7)) groei.aandelen.plot( ax=ax, xlabel=\"Datum\", ylabel=\"S&P500 prijs stijging/daling per", "table = pd.DataFrame(example_periods)[ [ \"van\", \"tot\", \"lengte periode\", \"huis\", \"aandelen\", \"verschil (🏠 -", "in results if r is not None]) df.aankoop_datum = pd.to_datetime(df.aankoop_datum) df[\"verschil\"] = (df.huis_winst", "hyptotheek_van_huur( huur: Number = 1000, hypotheekrente: Number = 2.04, hyptotheek_looptijd: int = 360,", "tot=\"2020-Q4\", notities=\"de recente 'goede' jaren\"), dict( van=\"2009-Q2\", tot=\"2014-Q1\", notities=\"slechtste jaren na de 2008", "\"rood is huis is beter\\nblauw is belegging is beter\", horizontalalignment=\"right\", verticalalignment=\"top\", transform=axs[0, 0].transAxes,", "/ 1000).plot.contourf( ax=axs[1, 0], add_colorbar=True, levels=levels, norm=matplotlib.colors.TwoSlopeNorm( 0, vmin=ds.huis_winst.min() / 1000, vmax=ds.huis_winst.max() /", "onderhoud_pct: Number = 1, met_fiscaal_partner: bool = True, verbose: bool = True, ):", "return (1 + pct) ** (1 / 12) def bepaal_woz(huidige_prijs: float, date: pd.Timestamp,", "def hyptotheek_van_huur( huur: Number = 1000, hypotheekrente: Number = 2.04, hyptotheek_looptijd: int =", "jaarinkomen: Number = 90_000, schulden: Number = 20_000, onderhoud_pct: Number = 1, met_fiscaal_partner:", "pd.DataFrame([bedragen, hyptoheek_hoogstes]).T df.columns = [\"maandlasten (€)\", \"hypotheek (x€1000)\"] return df def analyseer_data(df: pd.DataFrame)", "matplotlib.pyplot as plt import numpy as np import pandas as pd import scipy.optimize", "vmax=ds.verschil.max() ), add_colorbar=True, levels=levels, cbar_kwargs={\"label\": \"Verschil (x€1000)\"}, ) (ds.belegging / 1000).plot.contourf( ax=axs[0, 1],", "een huis van €{geleend/1000:.0f}k gekocht. \" f\"Op {date.date()} (na {aantal_jaar(dates):.1f} jaar) hebben we", "-= belastbaar_2 inkomen_2 = belastbaar_2 * 4.50 / 100 schijf_3 = float(\"inf\") belastbaar_3", "return dict( aankoop_datum=aankoop_datum, verkoop_datum=dates[-1], aantal_jaar=aantal_jaar(dates), betaald=betaald, afgelost=afgelost, af_te_lossen=af_te_lossen, huis_waarde=huis_waarde, huis_winst=huis_winst, belegging=belegging, ) def", "dct in example_periods: mean = lambda x: x[(x.index >= dct[\"van\"]) & (x.index <=", "] if len(dates) < jaar_tot_verkoop * 12: raise ValueError( f\"Een duur van {jaar_tot_verkoop}", "alpha=0.3) fill_area(groei.huis, ax, alpha=0.3) plt.show() def vergelijkings_tabel(groei: pd.DataFrame): example_periods = [ dict(van=\"2014-Q2\", tot=\"2020-Q4\",", ") persoon = maandlasten.Persoon(jaarinkomen) onderhoud = partial(maandelijks_onderhoud, onderhoud_pct=onderhoud_pct) hypotheek = Mortgage(hypotheekrente / 100,", "cmap=\"seismic\", title=\"Kopen of huren?\", xlabel=\"Aankoop datum\", ylabel=\"verkopen na (jaar)\", figsize=(8, 8), ) ax,", "), dict(van=\"2009-Q2\", tot=\"2020-Q4\", notities=\"van 2008 crisis tot en met nu\"), dict( van=\"1996-Q1\", tot=\"2020-Q4\",", "maandelijke_groei(_date, groei, \"huis\") return prijs def aantal_jaar(dates: pd.DatetimeIndex): dt = dates.max() - dates.min()", "12 kosten = float(hyptotheek_maandelijks) + onderhoud return kosten res = scipy.optimize.minimize( lambda huis_prijs:", "is belegging is beter\", horizontalalignment=\"right\", verticalalignment=\"top\", transform=axs[0, 0].transAxes, fontsize=12, ) axs[1, 0].set_xlabel(\"Aankoop datum\")", "en belegd, dan hadden we €{belegging/1000:.0f}k. \" f\"Dat is dus €{(belegging - huis_winst)/1000:.0f}k", "add_colorbar=True, levels=levels, cbar_kwargs={\"label\": \"Waarde belegging (x€1000)\"}, ) (ds.huis_winst / 1000).plot.contourf( ax=axs[1, 0], add_colorbar=True,", "import get_reusable_executor from tqdm.notebook import tqdm from maandlasten import maandlasten from mortgage import", ") ax, cax = plt.gcf().get_axes() cax.set_ylabel(\"verschil (x€1000)\") ax.text( 0.95, 0.95, \"rood is huis", "cbar_kwargs={\"label\": \"Huis waarde (x€1000)\"}, cmap=\"magma\", levels=levels, ) axs[0, 0].text( 0.95, 0.95, \"rood is", "lambda x: x[(x.index >= dct[\"van\"]) & (x.index <= dct[\"tot\"])].mean() dct[\"huis\"] = f\"{mean(groei.huis):.2f}%\" dct[\"aandelen\"]", "legend=True) groei.huis.plot(ax=ax, label=\"Huizenprijs\", legend=True) ax.set_title(\"Huizenprijs en aandelenprijs stijging/daling per jaar in %\") ax.set_xlabel(\"Datum\")", ") fill_area(groei.huis, ax) plt.show() def plot_aandelen_en_huis(groei: pd.DataFrame) -> None: fig, ax = plt.subplots(figsize=(8,", "/ 100 inkomen = inkomen_1 + inkomen_2 + inkomen_3 return inkomen * 31", "crisis tot en met nu\"), dict( van=\"1996-Q1\", tot=\"2020-Q4\", notities=\"alle data sinds 1996 tot", "persoon = maandlasten.Persoon(jaarinkomen) onderhoud = partial(maandelijks_onderhoud, onderhoud_pct=onderhoud_pct) hypotheek = Mortgage(hypotheekrente / 100, hyptotheek_looptijd,", "100, hyptotheek_looptijd, geleend) betaalschema = hypotheek.monthly_payment_schedule() rente_betaald: Dict[int, float] = defaultdict(float) start_year =", "van=\"2009-Q2\", tot=\"2014-Q1\", notities=\"slechtste jaren na de 2008 crisis\" ), dict(van=\"2009-Q2\", tot=\"2020-Q4\", notities=\"van 2008", "recente 'goede' jaren\"), dict( van=\"2009-Q2\", tot=\"2014-Q1\", notities=\"slechtste jaren na de 2008 crisis\" ),", "betaald += onderhoud(huis_waarde) afbetaling, rente = next(betaalschema) hypotheek_kosten = float(afbetaling) + float(rente) rente_betaald[date.year]", "= f\"{round(dt / 86400 / 365)} jaar\" table = pd.DataFrame(example_periods)[ [ \"van\", \"tot\",", "= date.replace(date.year - 1) prev = stock_price[date_prev] stock_relative[date] = (value - prev) /", "\" f\"te kopen en in {100-pct_rood:.1f}% is het beter om een huis te", "koopwoningen Ontwikkeling t.o.v. een jaar eerder\" # met alle kwartaal data sinds 1996.", "bron: Yahoo! Finance\", color=\"k\", ) fill_area(groei.aandelen, ax) plt.show() def load_huizen_prijsindex_per_regio(): # Gedownload van", "datum_jaar try: return koop_huis_of_beleg( aankoop_datum, jaar_tot_verkoop, groei=groei, verbose=False, **parameters, ) except ValueError: #", "notities=\"van 2008 crisis tot en met nu\"), dict( van=\"1996-Q1\", tot=\"2020-Q4\", notities=\"alle data sinds", "huis_winst = huis_waarde - af_te_lossen - betaald - overdrachts_belasting if verbose: winst_of_verlies =", "1 results = {} iterator = list( product(groei.index[groei.index.year >= start_jaar], range(1, n_jaar)) )", "abs(hyptotheek_kosten(huis_prijs) - huur), x0=100_000, method=\"Nelder-Mead\", tol=1e-2, ) return round(float(res.x), 2) def hyptotheek_maandlasten_df() ->", "groei: pd.DataFrame): \"\"\"WOZ waarde is bepaald aan de hand van de prijs van", ") plt.show() def plot_result_contour(df: pd.DataFrame) -> None: ds = df.set_index([\"aantal_jaar\", \"aankoop_datum\"]).to_xarray() fig, axs", "Dict[int, float] = defaultdict(float) start_year = dates[0].year betaald = 0 afgelost = 0", "hypotheek.monthly_payment_schedule() rente_betaald: Dict[int, float] = defaultdict(float) start_year = dates[0].year betaald = 0 afgelost", "periode\"] = f\"{round(dt / 86400 / 365)} jaar\" table = pd.DataFrame(example_periods)[ [ \"van\",", "- overdrachts_belasting if verbose: winst_of_verlies = \"winst\" if huis_winst > 0 else \"verlies\"", "date.year > start_year: # Betaal vermogensbelasting over vorig jaar belegging -= vermogensbelasting(belegging, schulden,", "notities=\"slechtste jaren na de 2008 crisis\" ), dict(van=\"2009-Q2\", tot=\"2020-Q4\", notities=\"van 2008 crisis tot", "(jaar)\") axs[1, 0].set_ylabel(\"Verkoop na (jaar)\") axs[0, 0].set_xlabel(\"\") axs[0, 1].set_xlabel(\"\") axs[0, 1].set_ylabel(\"\") axs[1, 1].set_ylabel(\"\")", "pd.DataFrame) -> None: pct_blauw = 100 * (df.verschil < 0).sum() / len(df.verschil) print(", "teruggave = persoon_met_aftrek.netto_loon - persoon.netto_loon betaald -= teruggave af_te_lossen = geleend - afgelost", "ax, cax = plt.gcf().get_axes() cax.set_ylabel(\"verschil (x€1000)\") ax.text( 0.95, 0.95, \"rood is huis is", "= True, verbose: bool = True, ): dates = groei.index[groei.index >= aankoop_datum][ :", "df.Perioden = pd.to_datetime( df.Perioden.str.replace(\"e kwartaal\", \"\").str.replace(\" \", \"-Q\") ) df.set_index(\"Perioden\", inplace=True) for col", "norm=matplotlib.colors.TwoSlopeNorm(0), cmap=\"seismic\", title=\"Kopen of huren?\", xlabel=\"Aankoop datum\", ylabel=\"verkopen na (jaar)\", figsize=(8, 8), )", "\" f\"Een duur van {aantal_jaar(dates):.2f} is mogelijk.\" ) persoon = maandlasten.Persoon(jaarinkomen) onderhoud =", "afgelost, een huiswaarde van €{huis_waarde/1000:.0f}k, \" f\"en na een verkoop €{abs(huis_winst)/1000:.0f}k {winst_of_verlies}. \"", "import Mortgage, dollar matplotlib.rc(\"font\", size=15) def load_sp500() -> pd.Series: # Daily data to", "huis is beter\\nblauw is belegging is beter\", horizontalalignment=\"right\", verticalalignment=\"top\", transform=axs[0, 0].transAxes, fontsize=12, )", ") (ds.huis_winst / 1000).plot.contourf( ax=axs[1, 0], add_colorbar=True, levels=levels, norm=matplotlib.colors.TwoSlopeNorm( 0, vmin=ds.huis_winst.min() / 1000,", "is het beter om een huis te kopen.\" ) mean_beleggen = df.belegging[df.verschil <", "het beter om een huis te kopen.\" ) mean_beleggen = df.belegging[df.verschil < 0].mean()", "ax.hlines( 0, df.aankoop_datum.min(), df.aankoop_datum.max(), ls=\"--\", color=\"k\", zorder=-1 ) ax.set_xlabel(\"Aankoop datum\") ax.set_ylabel(\"Winst kopen huis", "n_jaar)) ) def try_run_simulation(datum_jaar, parameters): aankoop_datum, jaar_tot_verkoop = datum_jaar try: return koop_huis_of_beleg( aankoop_datum,", "in jaren: df[df.aantal_jaar == jaar].plot( x=\"aankoop_datum\", y=\"verschil\", ax=ax, color=color_map[jaar], legend=False ) cbar =", "add_colorbar=True, levels=levels, norm=matplotlib.colors.TwoSlopeNorm( 0, vmin=ds.huis_winst.min() / 1000, vmax=ds.huis_winst.max() / 1000 ), cbar_kwargs={\"label\": \"Winst", "Ontwikkeling t.o.v. een jaar eerder\" # met alle kwartaal data sinds 1996. df", "ax = plt.subplots(figsize=(8, 8)) for jaar in jaren: df[df.aantal_jaar == jaar].plot( x=\"aankoop_datum\", y=\"verschil\",", "beter om aandelen \" f\"te kopen en in {100-pct_rood:.1f}% is het beter om", "hyptotheek_maandelijks = Mortgage( hypotheekrente / 100, hyptotheek_looptijd, dollar(float(huis_prijs)) ).monthly_payment() onderhoud = onderhoud_pct /", "inkomen_1 = belastbaar_1 * 1.90 / 100 schijf_2 = 1_000_000 - 100_000 belastbaar_2", "groei, \"huis\") belegging *= maandelijke_groei(date, groei, \"aandelen\") betaald += onderhoud(huis_waarde) afbetaling, rente =", "fill_area(groei.huis, ax) plt.show() def plot_aandelen_en_huis(groei: pd.DataFrame) -> None: fig, ax = plt.subplots(figsize=(8, 8))", "Number = 1000, hypotheekrente: Number = 2.04, hyptotheek_looptijd: int = 30 * 12,", "= 20_000, onderhoud_pct: Number = 1, met_fiscaal_partner: bool = True, verbose: bool =", "verbose: bool = True, ): dates = groei.index[groei.index >= aankoop_datum][ : round(jaar_tot_verkoop *", "): dates = groei.index[groei.index >= aankoop_datum][ : round(jaar_tot_verkoop * 12) + 1 ]", "= pd.read_csv(\"sp500.csv\") df_stock.Date = pd.to_datetime(df_stock.Date) df_stock.set_index(\"Date\", inplace=True) # *Close price adjusted for splits", "= 100 * (df.verschil < 0).sum() / len(df.verschil) print( f\"In {pct_blauw:.1f}% van alle", "ax=axs[1, 1], add_colorbar=True, cbar_kwargs={\"label\": \"Huis waarde (x€1000)\"}, cmap=\"magma\", levels=levels, ) axs[0, 0].text( 0.95,", "7)) groei.huis.plot( ax=ax, legend=False, xlabel=\"Datum\", ylabel=\"Huizenprijs stijging/daling per jaar (%)\", title=\"Huizenprijs verschil vs.", "stock_price[stock_price.index >= start].items(): date_prev = date.replace(date.year - 1) prev = stock_price[date_prev] stock_relative[date] =", "sharex=True, sharey=True) levels = 15 ds.verschil.plot.contourf( ax=axs[0, 0], norm=matplotlib.colors.TwoSlopeNorm( 0, vmin=ds.verschil.min(), vmax=ds.verschil.max() ),", "* (df.verschil < 0).sum() / len(df.verschil) print( f\"In {pct_blauw:.1f}% van alle gevallen is", "title=\"S&P500 index vs. tijd, bron: Yahoo! Finance\", figsize=(7, 7), ) plt.show() def get_groei(regio=\"Nederland\")", "for huur in bedragen ] hyptoheek_hoogstes = (np.array(hyptoheek_hoogstes) / 1000).round(1) df = pd.DataFrame([bedragen,", "return huis_waarde * onderhoud_pct / 100 / 12 def vermogensbelasting( vermogen: float, schulden:", "stock_price[date_prev] stock_relative[date] = (value - prev) / prev * 100 stock_relative = pd.Series(stock_relative)", "Mortgage(hypotheekrente / 100, hyptotheek_looptijd, geleend) betaalschema = hypotheek.monthly_payment_schedule() rente_betaald: Dict[int, float] = defaultdict(float)", "None: ax.fill_between( x.index, x.values, where=x.values > 0, color=\"green\", alpha=alpha, zorder=-1, ) ax.fill_between( x.index,", ">= dct[\"van\"]) & (x.index <= dct[\"tot\"])].mean() dct[\"huis\"] = f\"{mean(groei.huis):.2f}%\" dct[\"aandelen\"] = f\"{mean(groei.aandelen):.2f}%\" winner", "vermogen -= belastbaar_2 inkomen_2 = belastbaar_2 * 4.50 / 100 schijf_3 = float(\"inf\")", "= dates[0].year betaald = 0 afgelost = 0 belegging = 0 huis_waarde =", "31 / 100 def koop_huis_of_beleg( aankoop_datum: Union[str, pd.Timestamp], jaar_tot_verkoop: Number, geleend: Number, groei:", "tqdm from maandlasten import maandlasten from mortgage import Mortgage, dollar matplotlib.rc(\"font\", size=15) def", "schijf_3 = float(\"inf\") belastbaar_3 = min(vermogen, schijf_3) vermogen -= belastbaar_3 inkomen_3 = belastbaar_3", "alpha: float = 1.0) -> None: ax.fill_between( x.index, x.values, where=x.values > 0, color=\"green\",", "method=\"Nelder-Mead\", tol=1e-2, ) return round(float(res.x), 2) def hyptotheek_maandlasten_df() -> pd.DataFrame: bedragen = list(range(400,", "is het beter om aandelen \" f\"te kopen en in {100-pct_rood:.1f}% is het", "# *Close price adjusted for splits # **Adjusted close price adjusted for both", "* 4.50 / 100 schijf_3 = float(\"inf\") belastbaar_3 = min(vermogen, schijf_3) vermogen -=", "- df.belegging) / 1000 df.aantal_jaar = df.aantal_jaar.round() return df def plot_result_scatter(df: pd.DataFrame) ->", "inplace=True) for col in df.columns: df[col] = df[col].str.replace(\",\", \".\").astype(float) df = df.resample(\"D\").interpolate() df", "8)) groei.aandelen[groei.huis.index].plot(ax=ax, label=\"Aandelen\", legend=True) groei.huis.plot(ax=ax, label=\"Huizenprijs\", legend=True) ax.set_title(\"Huizenprijs en aandelenprijs stijging/daling per jaar", "= groei.index.year.min() + 1 eind_jaar = groei.index.year.max() n_jaar = eind_jaar - start_jaar +", "executor.map( partial(try_run_simulation, parameters=parameters), iterator ), \"Monte Carlo simulatie\", total=len(iterator), ) ) df =", "y=\"aantal_jaar\", c=\"verschil\", s=100, alpha=1, norm=matplotlib.colors.TwoSlopeNorm(0), cmap=\"seismic\", title=\"Kopen of huren?\", xlabel=\"Aankoop datum\", ylabel=\"verkopen na", "stock_price = load_sp500() stock_price = stock_price[ stock_price.index.day == 1 ] # Keep only", "duur van {jaar_tot_verkoop} jaar is niet mogelijk als \" f\"we starten op {aankoop_datum}.", "= fig.colorbar( matplotlib.cm.ScalarMappable(cmap=cmap), ax=ax, ) cbar.set_ticks(np.linspace(0, 1, len(jaren))) cbar.set_ticklabels([int(j) for j in color_map.keys()])", "jaren na de 2008 crisis\" ), dict(van=\"2009-Q2\", tot=\"2020-Q4\", notities=\"van 2008 crisis tot en", "# Betaal vermogensbelasting over vorig jaar belegging -= vermogensbelasting(belegging, schulden, met_fiscaal_partner) # Krijg", "first of the month first_year = stock_price.index.min().year start = f\"{first_year+1}-02-01\" stock_relative = {}", "= \"winst\" if huis_winst > 0 else \"verlies\" print( f\"We hebben op {aankoop_datum}", "belegging=belegging, ) def run_monte_carlo(groei: pd.DataFrame, parameters: Dict[str, Any]) -> pd.DataFrame: start_jaar = groei.index.year.min()", ") ax.fill_between( x.index, x.values, where=x.values < 0, color=\"red\", alpha=alpha, zorder=-1, ) ax.hlines(0, x.index.min(),", "plt.show() def plot_result_lines(df: pd.DataFrame) -> None: jaren = df.aantal_jaar.unique()[1::2] cmap = matplotlib.cm.get_cmap(\"tab20\", len(jaren))", "(%)\", title=\"S&P500 index vs. tijd, bron: Yahoo! Finance\", color=\"k\", ) fill_area(groei.aandelen, ax) plt.show()", "beleggen\") ax.set_title(\"Winst kopen huis t.o.v. beleggen\") plt.show() def hyptotheek_van_huur( huur: Number = 1000,", "0].set_xlabel(\"\") axs[0, 1].set_xlabel(\"\") axs[0, 1].set_ylabel(\"\") axs[1, 1].set_ylabel(\"\") plt.show() def plot_result_lines(df: pd.DataFrame) -> None:", "pd.Series, ax, alpha: float = 1.0) -> None: ax.fill_between( x.index, x.values, where=x.values >", "pd.DatetimeIndex): dt = dates.max() - dates.min() return dt.total_seconds() / 86400 / 365.25 def", "\"\"\"WOZ waarde is bepaald aan de hand van de prijs van vorig jaar.\"\"\"", "prijs van vorig jaar prijs /= maandelijke_groei(_date, groei, \"huis\") return prijs def aantal_jaar(dates:", "f\"{mean(groei.huis):.2f}%\" dct[\"aandelen\"] = f\"{mean(groei.aandelen):.2f}%\" winner = \"huis\" if mean(groei.huis) > mean(groei.aandelen) else \"aandelen\"", "kijken return with get_reusable_executor() as executor: results = list( tqdm( executor.map( partial(try_run_simulation, parameters=parameters),", "schijf_2 = 1_000_000 - 100_000 belastbaar_2 = min(vermogen, schijf_2) vermogen -= belastbaar_2 inkomen_2", "return inkomen * 31 / 100 def koop_huis_of_beleg( aankoop_datum: Union[str, pd.Timestamp], jaar_tot_verkoop: Number,", "hypotheek_aftrek teruggave = persoon_met_aftrek.netto_loon - persoon.netto_loon betaald -= teruggave af_te_lossen = geleend -", "-> None: stock_price = load_sp500() stock_price.plot( xlabel=\"Datum\", ylabel=\"S&P500 prijs ($)\", title=\"S&P500 index vs.", "notities=\"alle data sinds 1996 tot en met nu\" ), ] for dct in", "dct[\"verschil (🏠 - 📈)\"] = f\"{mean(groei.huis) - mean(groei.aandelen):.2f}%\" dt = (pd.to_datetime(dct[\"tot\"]) - pd.to_datetime(dct[\"van\"])).total_seconds()", "jaar_tot_verkoop, groei=groei, verbose=False, **parameters, ) except ValueError: # 'jaar' is niet mogelijk want", "ds.verschil.plot.contourf( ax=axs[0, 0], norm=matplotlib.colors.TwoSlopeNorm( 0, vmin=ds.verschil.min(), vmax=ds.verschil.max() ), add_colorbar=True, levels=levels, cbar_kwargs={\"label\": \"Verschil (x€1000)\"},", "def load_huizen_prijsindex_per_regio(): # Gedownload van https://opendata.cbs.nl/statline/#/CBS/nl/dataset/83913NED/table?ts=1617045165965 # Col: \"Prijsindex bestaande koopwoningen Ontwikkeling t.o.v.", "plt.subplots(figsize=(8, 8)) groei.aandelen[groei.huis.index].plot(ax=ax, label=\"Aandelen\", legend=True) groei.huis.plot(ax=ax, label=\"Huizenprijs\", legend=True) ax.set_title(\"Huizenprijs en aandelenprijs stijging/daling per", "we €{belegging/1000:.0f}k. \" f\"Dat is dus €{(belegging - huis_winst)/1000:.0f}k verschil.\" ) return dict(", "(np.array(hyptoheek_hoogstes) / 1000).round(1) df = pd.DataFrame([bedragen, hyptoheek_hoogstes]).T df.columns = [\"maandlasten (€)\", \"hypotheek (x€1000)\"]", "*= maandelijke_groei(date, groei, \"aandelen\") betaald += onderhoud(huis_waarde) afbetaling, rente = next(betaalschema) hypotheek_kosten =", "keys=[\"huis\", \"aandelen\"] ) return groei def plot_aandelen(groei: pd.DataFrame) -> None: fig, ax =", "{date.date()} (na {aantal_jaar(dates):.1f} jaar) hebben we €{betaald/1000:.0f}k betaald, \" f\"€{afgelost/1000:.0f}k afgelost, een huiswaarde", "1, met_fiscaal_partner: bool = True, verbose: bool = True, ): dates = groei.index[groei.index", "van €{geleend/1000:.0f}k gekocht. \" f\"Op {date.date()} (na {aantal_jaar(dates):.1f} jaar) hebben we €{betaald/1000:.0f}k betaald,", "heffingvrij = 100_000 if met_fiscaal_partner else 50_000 vermogen -= heffingvrij vermogen -= schulden", "alle kwartaal data sinds 1996. df = pd.read_csv(\"huizen_prijsindex_per_regio.csv\") df.Perioden = pd.to_datetime( df.Perioden.str.replace(\"e kwartaal\",", "if met_fiscaal_partner else 50_000 vermogen -= heffingvrij vermogen -= schulden if vermogen <", "0], norm=matplotlib.colors.TwoSlopeNorm( 0, vmin=ds.verschil.min(), vmax=ds.verschil.max() ), add_colorbar=True, levels=levels, cbar_kwargs={\"label\": \"Verschil (x€1000)\"}, ) (ds.belegging", "len(jaren)) color_map = dict(zip(sorted(jaren), cmap.colors)) fig, ax = plt.subplots(figsize=(8, 8)) for jaar in", "belegging is beter\", horizontalalignment=\"right\", verticalalignment=\"top\", transform=axs[0, 0].transAxes, fontsize=12, ) axs[1, 0].set_xlabel(\"Aankoop datum\") axs[1,", "= df.huis_winst[df.verschil > 0].mean() / 1000 print( f\"In het geval dat aandelen beter", "onderhoud_pct / 100 / 12 def vermogensbelasting( vermogen: float, schulden: float = 0,", "maandelijke_groei(date, groei, \"aandelen\") betaald += onderhoud(huis_waarde) afbetaling, rente = next(betaalschema) hypotheek_kosten = float(afbetaling)", "df[\"verschil\"] = (df.huis_winst - df.belegging) / 1000 df.aantal_jaar = df.aantal_jaar.round() return df def", "def maandelijks_onderhoud(huis_waarde: float, onderhoud_pct: float = 2): return huis_waarde * onderhoud_pct / 100", "= geleend - afgelost overdrachts_belasting = huis_waarde * 0.02 huis_winst = huis_waarde -", "= plt.subplots(figsize=(7, 7)) groei.huis.plot( ax=ax, legend=False, xlabel=\"Datum\", ylabel=\"Huizenprijs stijging/daling per jaar (%)\", title=\"Huizenprijs", "8), color=\"k\", ) fill_area(groei.huis, ax) plt.show() def plot_aandelen_en_huis(groei: pd.DataFrame) -> None: fig, ax", "f\"{first_year+1}-02-01\" stock_relative = {} for date, value in stock_price[stock_price.index >= start].items(): date_prev =", "niet mogelijk als \" f\"we starten op {aankoop_datum}. \" f\"Een duur van {aantal_jaar(dates):.2f}", "aantal_jaar=aantal_jaar(dates), betaald=betaald, afgelost=afgelost, af_te_lossen=af_te_lossen, huis_waarde=huis_waarde, huis_winst=huis_winst, belegging=belegging, ) def run_monte_carlo(groei: pd.DataFrame, parameters: Dict[str,", "aan de hand van de prijs van vorig jaar.\"\"\" vorig_jaar = date.year -", "groei.aandelen[groei.huis.index].plot(ax=ax, label=\"Aandelen\", legend=True) groei.huis.plot(ax=ax, label=\"Huizenprijs\", legend=True) ax.set_title(\"Huizenprijs en aandelenprijs stijging/daling per jaar in", "axs[1, 0].set_xlabel(\"Aankoop datum\") axs[1, 1].set_xlabel(\"Aankoop datum\") axs[0, 0].set_ylabel(\"Verkoop na (jaar)\") axs[1, 0].set_ylabel(\"Verkoop na", "schulden: Number = 20_000, onderhoud_pct: Number = 1, met_fiscaal_partner: bool = True, verbose:", "from maandlasten import maandlasten from mortgage import Mortgage, dollar matplotlib.rc(\"font\", size=15) def load_sp500()", "defaultdict(float) start_year = dates[0].year betaald = 0 afgelost = 0 belegging = 0", "\"hypotheek (x€1000)\"] return df def analyseer_data(df: pd.DataFrame) -> None: pct_blauw = 100 *", "verbose: winst_of_verlies = \"winst\" if huis_winst > 0 else \"verlies\" print( f\"We hebben", "hypotheek_aftrek = maandlasten.hypotheek_aftrek( rente_betaald[date.year - 1], woz_waarde ) persoon_met_aftrek = maandlasten.Persoon(persoon.bruto_jaarloon) persoon_met_aftrek.aftrek =", "plot_result_lines(df: pd.DataFrame) -> None: jaren = df.aantal_jaar.unique()[1::2] cmap = matplotlib.cm.get_cmap(\"tab20\", len(jaren)) color_map =", "stock_relative[huis_prijsindex.index] groei = pd.concat( [huis_prijsindex, stock_relative], axis=1, keys=[\"huis\", \"aandelen\"] ) return groei def", "= huis_waarde - af_te_lossen - betaald - overdrachts_belasting if verbose: winst_of_verlies = \"winst\"", "in dates[::-1]: # We rekenen terug naar de prijs van vorig jaar prijs", "df def analyseer_data(df: pd.DataFrame) -> None: pct_blauw = 100 * (df.verschil < 0).sum()", "import Any, Dict, Literal, Union import matplotlib import matplotlib.colors import matplotlib.pyplot as plt", "0].set_ylabel(\"Verkoop na (jaar)\") axs[1, 0].set_ylabel(\"Verkoop na (jaar)\") axs[0, 0].set_xlabel(\"\") axs[0, 1].set_xlabel(\"\") axs[0, 1].set_ylabel(\"\")", "sharey=True) levels = 15 ds.verschil.plot.contourf( ax=axs[0, 0], norm=matplotlib.colors.TwoSlopeNorm( 0, vmin=ds.verschil.min(), vmax=ds.verschil.max() ), add_colorbar=True,", "Dict, Literal, Union import matplotlib import matplotlib.colors import matplotlib.pyplot as plt import numpy", "= df.aantal_jaar.unique()[1::2] cmap = matplotlib.cm.get_cmap(\"tab20\", len(jaren)) color_map = dict(zip(sorted(jaren), cmap.colors)) fig, ax =", "schijf_2) vermogen -= belastbaar_2 inkomen_2 = belastbaar_2 * 4.50 / 100 schijf_3 =", "waarde is bepaald aan de hand van de prijs van vorig jaar.\"\"\" vorig_jaar", "pd.read_csv(\"huizen_prijsindex_per_regio.csv\") df.Perioden = pd.to_datetime( df.Perioden.str.replace(\"e kwartaal\", \"\").str.replace(\" \", \"-Q\") ) df.set_index(\"Perioden\", inplace=True) for", "pd import scipy.optimize from loky import get_reusable_executor from tqdm.notebook import tqdm from maandlasten", "import matplotlib import matplotlib.colors import matplotlib.pyplot as plt import numpy as np import", "0.95, \"rood is huis is beter\\nblauw is belegging is beter\", horizontalalignment=\"right\", verticalalignment=\"top\", transform=ax.transAxes,", "fig.colorbar( matplotlib.cm.ScalarMappable(cmap=cmap), ax=ax, ) cbar.set_ticks(np.linspace(0, 1, len(jaren))) cbar.set_ticklabels([int(j) for j in color_map.keys()]) cbar.set_label(\"Verkoop", "= scipy.optimize.minimize( lambda huis_prijs: abs(hyptotheek_kosten(huis_prijs) - huur), x0=100_000, method=\"Nelder-Mead\", tol=1e-2, ) return round(float(res.x),", "return df def plot_huizenprijzen(groei: pd.DataFrame) -> None: fig, ax = plt.subplots(figsize=(7, 7)) groei.huis.plot(", "\"aankoop_datum\"]).to_xarray() fig, axs = plt.subplots(ncols=2, nrows=2, figsize=(12, 8), sharex=True, sharey=True) levels = 15", "load_sp500() -> pd.Series: # Daily data to need to resample it to quarterly", "aankoop_datum: Union[str, pd.Timestamp], jaar_tot_verkoop: Number, geleend: Number, groei: pd.DataFrame, huur: Number = 1000,", "-= belastbaar_1 inkomen_1 = belastbaar_1 * 1.90 / 100 schijf_2 = 1_000_000 -", "\"-Q\") ) df.set_index(\"Perioden\", inplace=True) for col in df.columns: df[col] = df[col].str.replace(\",\", \".\").astype(float) df", "product from numbers import Number from typing import Any, Dict, Literal, Union import", "legend=False ) cbar = fig.colorbar( matplotlib.cm.ScalarMappable(cmap=cmap), ax=ax, ) cbar.set_ticks(np.linspace(0, 1, len(jaren))) cbar.set_ticklabels([int(j) for", "hyptotheek_looptijd: int = 360, onderhoud_pct: Number = 1, ) -> float: def hyptotheek_kosten(huis_prijs):", "vermogensbelasting over vorig jaar belegging -= vermogensbelasting(belegging, schulden, met_fiscaal_partner) # Krijg hypotheekrenteaftrek terug", "ax.fill_between( x.index, x.values, where=x.values > 0, color=\"green\", alpha=alpha, zorder=-1, ) ax.fill_between( x.index, x.values,", "- 1 dates = groei.index[groei.index.year == vorig_jaar] prijs = huidige_prijs for _date in", "ylabel=\"verkopen na (jaar)\", figsize=(8, 8), ) ax, cax = plt.gcf().get_axes() cax.set_ylabel(\"verschil (x€1000)\") ax.text(", "pd.Timestamp, groei: pd.DataFrame, which: Literal[\"huis\", \"aandelen\"] = \"huis\" ) -> float: pct =", "for _date in dates[::-1]: # We rekenen terug naar de prijs van vorig", "float(rente) betaald += hypotheek_kosten belegging += hypotheek_kosten - huur afgelost += float(afbetaling) if", "pd.DataFrame: stock_price = load_sp500() stock_price = stock_price[ stock_price.index.day == 1 ] # Keep", "pd.DataFrame: bedragen = list(range(400, 2000, 100)) hyptoheek_hoogstes = [ hyptotheek_van_huur( huur=huur, hypotheekrente=2.04, hyptotheek_looptijd=360,", "float, onderhoud_pct: float = 2): return huis_waarde * onderhoud_pct / 100 / 12", "prijs van vorig jaar.\"\"\" vorig_jaar = date.year - 1 dates = groei.index[groei.index.year ==", "df.Perioden.str.replace(\"e kwartaal\", \"\").str.replace(\" \", \"-Q\") ) df.set_index(\"Perioden\", inplace=True) for col in df.columns: df[col]", "af_te_lossen = geleend - afgelost overdrachts_belasting = huis_waarde * 0.02 huis_winst = huis_waarde", "splits # **Adjusted close price adjusted for both dividends and splits. stock_price =", "df = pd.read_csv(\"huizen_prijsindex_per_regio.csv\") df.Perioden = pd.to_datetime( df.Perioden.str.replace(\"e kwartaal\", \"\").str.replace(\" \", \"-Q\") ) df.set_index(\"Perioden\",", "Betaal vermogensbelasting over vorig jaar belegging -= vermogensbelasting(belegging, schulden, met_fiscaal_partner) # Krijg hypotheekrenteaftrek", "50_000 belastbaar_1 = min(vermogen, schijf_1) vermogen -= belastbaar_1 inkomen_1 = belastbaar_1 * 1.90", "= f\"{first_year+1}-02-01\" stock_relative = {} for date, value in stock_price[stock_price.index >= start].items(): date_prev", "360, onderhoud_pct: Number = 1, ) -> float: def hyptotheek_kosten(huis_prijs): hyptotheek_maandelijks = Mortgage(", "notities=\"de recente 'goede' jaren\"), dict( van=\"2009-Q2\", tot=\"2014-Q1\", notities=\"slechtste jaren na de 2008 crisis\"", "= next(betaalschema) hypotheek_kosten = float(afbetaling) + float(rente) rente_betaald[date.year] += float(rente) betaald += hypotheek_kosten", "# Daily data to need to resample it to quarterly like the huizenprijzen", "mean(groei.aandelen):.2f}%\" dt = (pd.to_datetime(dct[\"tot\"]) - pd.to_datetime(dct[\"van\"])).total_seconds() dct[\"lengte periode\"] = f\"{round(dt / 86400 /", "nu\"), dict( van=\"1996-Q1\", tot=\"2020-Q4\", notities=\"alle data sinds 1996 tot en met nu\" ),", "{jaar_tot_verkoop} jaar is niet mogelijk als \" f\"we starten op {aankoop_datum}. \" f\"Een", "koop_huis_of_beleg( aankoop_datum: Union[str, pd.Timestamp], jaar_tot_verkoop: Number, geleend: Number, groei: pd.DataFrame, huur: Number =", "huis_waarde * 0.02 huis_winst = huis_waarde - af_te_lossen - betaald - overdrachts_belasting if", "df.resample(\"D\").interpolate() df = df[df.index.day == 1] return df def plot_huizenprijzen(groei: pd.DataFrame) -> None:", "*= maandelijke_groei(date, groei, \"huis\") belegging *= maandelijke_groei(date, groei, \"aandelen\") betaald += onderhoud(huis_waarde) afbetaling,", "is beter\\nblauw is belegging is beter\", horizontalalignment=\"right\", verticalalignment=\"top\", transform=axs[0, 0].transAxes, fontsize=12, ) axs[1,", "eind_jaar = groei.index.year.max() n_jaar = eind_jaar - start_jaar + 1 results = {}", "100, hyptotheek_looptijd, dollar(float(huis_prijs)) ).monthly_payment() onderhoud = onderhoud_pct / 100 * huis_prijs / 12", "(ds.huis_waarde / 1000).plot.contourf( ax=axs[1, 1], add_colorbar=True, cbar_kwargs={\"label\": \"Huis waarde (x€1000)\"}, cmap=\"magma\", levels=levels, )", "= partial(maandelijks_onderhoud, onderhoud_pct=onderhoud_pct) hypotheek = Mortgage(hypotheekrente / 100, hyptotheek_looptijd, geleend) betaalschema = hypotheek.monthly_payment_schedule()", "- 50_000 belastbaar_1 = min(vermogen, schijf_1) vermogen -= belastbaar_1 inkomen_1 = belastbaar_1 *", "def maandelijke_groei( date: pd.Timestamp, groei: pd.DataFrame, which: Literal[\"huis\", \"aandelen\"] = \"huis\" ) ->", "gekocht. \" f\"Op {date.date()} (na {aantal_jaar(dates):.1f} jaar) hebben we €{betaald/1000:.0f}k betaald, \" f\"€{afgelost/1000:.0f}k", "= min(vermogen, schijf_2) vermogen -= belastbaar_2 inkomen_2 = belastbaar_2 * 4.50 / 100", "/ 86400 / 365)} jaar\" table = pd.DataFrame(example_periods)[ [ \"van\", \"tot\", \"lengte periode\",", "analyseer_data(df: pd.DataFrame) -> None: pct_blauw = 100 * (df.verschil < 0).sum() / len(df.verschil)", "(1 + pct) ** (1 / 12) def bepaal_woz(huidige_prijs: float, date: pd.Timestamp, groei:", "verschil.\" ) return dict( aankoop_datum=aankoop_datum, verkoop_datum=dates[-1], aantal_jaar=aantal_jaar(dates), betaald=betaald, afgelost=afgelost, af_te_lossen=af_te_lossen, huis_waarde=huis_waarde, huis_winst=huis_winst, belegging=belegging,", "float: pct = groei[which][groei.index == date].iloc[0] / 100 return (1 + pct) **", "eind_jaar - start_jaar + 1 results = {} iterator = list( product(groei.index[groei.index.year >=", "= groei[which][groei.index == date].iloc[0] / 100 return (1 + pct) ** (1 /", "teruggave af_te_lossen = geleend - afgelost overdrachts_belasting = huis_waarde * 0.02 huis_winst =", "tqdm( executor.map( partial(try_run_simulation, parameters=parameters), iterator ), \"Monte Carlo simulatie\", total=len(iterator), ) ) df", "<gh_stars>1-10 from collections import defaultdict from functools import partial from itertools import product", "= list( product(groei.index[groei.index.year >= start_jaar], range(1, n_jaar)) ) def try_run_simulation(datum_jaar, parameters): aankoop_datum, jaar_tot_verkoop", "\"aandelen\" dct[winner] += \" 🏆\" dct[\"verschil (🏠 - 📈)\"] = f\"{mean(groei.huis) - mean(groei.aandelen):.2f}%\"", "geleend - afgelost overdrachts_belasting = huis_waarde * 0.02 huis_winst = huis_waarde - af_te_lossen", "belegd, dan hadden we €{belegging/1000:.0f}k. \" f\"Dat is dus €{(belegging - huis_winst)/1000:.0f}k verschil.\"", ") fill_area(groei.aandelen, ax) plt.show() def load_huizen_prijsindex_per_regio(): # Gedownload van https://opendata.cbs.nl/statline/#/CBS/nl/dataset/83913NED/table?ts=1617045165965 # Col: \"Prijsindex", "https://opendata.cbs.nl/statline/#/CBS/nl/dataset/83913NED/table?ts=1617045165965 # Col: \"Prijsindex bestaande koopwoningen Ontwikkeling t.o.v. een jaar eerder\" # met", "resample it to quarterly like the huizenprijzen df_stock = pd.read_csv(\"sp500.csv\") df_stock.Date = pd.to_datetime(df_stock.Date)", "to resample it to quarterly like the huizenprijzen df_stock = pd.read_csv(\"sp500.csv\") df_stock.Date =", "plot_aandelen(groei: pd.DataFrame) -> None: fig, ax = plt.subplots(figsize=(7, 7)) groei.aandelen.plot( ax=ax, xlabel=\"Datum\", ylabel=\"S&P500", "jaar) hebben we €{betaald/1000:.0f}k betaald, \" f\"€{afgelost/1000:.0f}k afgelost, een huiswaarde van €{huis_waarde/1000:.0f}k, \"", "df.aantal_jaar.round() return df def plot_result_scatter(df: pd.DataFrame) -> None: fig, ax = plt.subplots() df.plot.scatter(", "the huizenprijzen df_stock = pd.read_csv(\"sp500.csv\") df_stock.Date = pd.to_datetime(df_stock.Date) df_stock.set_index(\"Date\", inplace=True) # *Close price", "prev * 100 stock_relative = pd.Series(stock_relative) # Select at same dates as huis", "-> None: jaren = df.aantal_jaar.unique()[1::2] cmap = matplotlib.cm.get_cmap(\"tab20\", len(jaren)) color_map = dict(zip(sorted(jaren), cmap.colors))", "/ 100 schijf_2 = 1_000_000 - 100_000 belastbaar_2 = min(vermogen, schijf_2) vermogen -=", "df.aantal_jaar.unique()[1::2] cmap = matplotlib.cm.get_cmap(\"tab20\", len(jaren)) color_map = dict(zip(sorted(jaren), cmap.colors)) fig, ax = plt.subplots(figsize=(8,", "hypotheekrente / 100, hyptotheek_looptijd, dollar(float(huis_prijs)) ).monthly_payment() onderhoud = onderhoud_pct / 100 * huis_prijs", "8), ) ax, cax = plt.gcf().get_axes() cax.set_ylabel(\"verschil (x€1000)\") ax.text( 0.95, 0.95, \"rood is", "beter om een huis te kopen.\" ) mean_beleggen = df.belegging[df.verschil < 0].mean() /", "0].text( 0.95, 0.95, \"rood is huis is beter\\nblauw is belegging is beter\", horizontalalignment=\"right\",", "day stock_price = stock_price.resample(\"D\").interpolate() return stock_price def plot_sp500() -> None: stock_price = load_sp500()", "example_periods: mean = lambda x: x[(x.index >= dct[\"van\"]) & (x.index <= dct[\"tot\"])].mean() dct[\"huis\"]", "12) def bepaal_woz(huidige_prijs: float, date: pd.Timestamp, groei: pd.DataFrame): \"\"\"WOZ waarde is bepaald aan", "maandelijks_onderhoud(huis_waarde: float, onderhoud_pct: float = 2): return huis_waarde * onderhoud_pct / 100 /", "# 'jaar' is niet mogelijk want we kunnen niet in de toekomst kijken", "= \"huis\" ) -> float: pct = groei[which][groei.index == date].iloc[0] / 100 return", "\"Prijsindex bestaande koopwoningen Ontwikkeling t.o.v. een jaar eerder\" # met alle kwartaal data", ") ) df = pd.DataFrame([r for r in results if r is not", "kwartaal\", \"\").str.replace(\" \", \"-Q\") ) df.set_index(\"Perioden\", inplace=True) for col in df.columns: df[col] =", "sinds 1996 tot en met nu\" ), ] for dct in example_periods: mean", "huis gehuurd voor €{huur} per maand en belegd, dan hadden we €{belegging/1000:.0f}k. \"", "datum\") ax.set_ylabel(\"Winst kopen huis t.o.v. beleggen\") ax.set_title(\"Winst kopen huis t.o.v. beleggen\") plt.show() def", "print( f\"In {pct_blauw:.1f}% van alle gevallen is het beter om aandelen \" f\"te", "betaald=betaald, afgelost=afgelost, af_te_lossen=af_te_lossen, huis_waarde=huis_waarde, huis_winst=huis_winst, belegging=belegging, ) def run_monte_carlo(groei: pd.DataFrame, parameters: Dict[str, Any])", "ax.set_ylabel(\"Prijs stijging/daling per jaar (%)\") fill_area(groei.aandelen, ax, alpha=0.3) fill_area(groei.huis, ax, alpha=0.3) plt.show() def", "jaar\" table = pd.DataFrame(example_periods)[ [ \"van\", \"tot\", \"lengte periode\", \"huis\", \"aandelen\", \"verschil (🏠", "color_map = dict(zip(sorted(jaren), cmap.colors)) fig, ax = plt.subplots(figsize=(8, 8)) for jaar in jaren:", "/ 100, hyptotheek_looptijd, geleend) betaalschema = hypotheek.monthly_payment_schedule() rente_betaald: Dict[int, float] = defaultdict(float) start_year", "def aantal_jaar(dates: pd.DatetimeIndex): dt = dates.max() - dates.min() return dt.total_seconds() / 86400 /", "both dividends and splits. stock_price = df_stock[\"Close*\"].str.replace(\",\", \"\").astype(float) # Create data points for", "schulden: float = 0, met_fiscaal_partner: bool = True ): \"\"\"Vermogensbelasting vanaf 2021. https://www.rijksoverheid.nl/onderwerpen/belastingplan/belastingwijzigingen-voor-ons-allemaal/box-3", "dan hadden we €{belegging/1000:.0f}k. \" f\"Dat is dus €{(belegging - huis_winst)/1000:.0f}k verschil.\" )", "ax.set_ylabel(\"Winst kopen huis t.o.v. beleggen\") ax.set_title(\"Winst kopen huis t.o.v. beleggen\") plt.show() def hyptotheek_van_huur(", "huis te kopen.\" ) mean_beleggen = df.belegging[df.verschil < 0].mean() / 1000 mean_huis =", "lambda huis_prijs: abs(hyptotheek_kosten(huis_prijs) - huur), x0=100_000, method=\"Nelder-Mead\", tol=1e-2, ) return round(float(res.x), 2) def", "(df.huis_winst - df.belegging) / 1000 df.aantal_jaar = df.aantal_jaar.round() return df def plot_result_scatter(df: pd.DataFrame)", ") for huur in bedragen ] hyptoheek_hoogstes = (np.array(hyptoheek_hoogstes) / 1000).round(1) df =", "belastbaar_1 = min(vermogen, schijf_1) vermogen -= belastbaar_1 inkomen_1 = belastbaar_1 * 1.90 /", "< 0).sum() / len(df.verschil) print( f\"In {pct_blauw:.1f}% van alle gevallen is het beter", "groei def plot_aandelen(groei: pd.DataFrame) -> None: fig, ax = plt.subplots(figsize=(7, 7)) groei.aandelen.plot( ax=ax,", "def plot_result_lines(df: pd.DataFrame) -> None: jaren = df.aantal_jaar.unique()[1::2] cmap = matplotlib.cm.get_cmap(\"tab20\", len(jaren)) color_map", "- dates.min() return dt.total_seconds() / 86400 / 365.25 def maandelijks_onderhoud(huis_waarde: float, onderhoud_pct: float", "scipy.optimize.minimize( lambda huis_prijs: abs(hyptotheek_kosten(huis_prijs) - huur), x0=100_000, method=\"Nelder-Mead\", tol=1e-2, ) return round(float(res.x), 2)", "hypotheekrente=2.04, hyptotheek_looptijd=360, onderhoud_pct=1, ) for huur in bedragen ] hyptoheek_hoogstes = (np.array(hyptoheek_hoogstes) /", "executor: results = list( tqdm( executor.map( partial(try_run_simulation, parameters=parameters), iterator ), \"Monte Carlo simulatie\",", "= df_stock[\"Close*\"].str.replace(\",\", \"\").astype(float) # Create data points for each day stock_price = stock_price.resample(\"D\").interpolate()", "verbose=False, **parameters, ) except ValueError: # 'jaar' is niet mogelijk want we kunnen", "pct) ** (1 / 12) def bepaal_woz(huidige_prijs: float, date: pd.Timestamp, groei: pd.DataFrame): \"\"\"WOZ", "2008 crisis\" ), dict(van=\"2009-Q2\", tot=\"2020-Q4\", notities=\"van 2008 crisis tot en met nu\"), dict(", "betaald, \" f\"€{afgelost/1000:.0f}k afgelost, een huiswaarde van €{huis_waarde/1000:.0f}k, \" f\"en na een verkoop", "hyptotheek_looptijd: int = 30 * 12, jaarinkomen: Number = 90_000, schulden: Number =", "groei.index[groei.index.year == vorig_jaar] prijs = huidige_prijs for _date in dates[::-1]: # We rekenen", "= 1, met_fiscaal_partner: bool = True, verbose: bool = True, ): dates =", "/ 1000 df.aantal_jaar = df.aantal_jaar.round() return df def plot_result_scatter(df: pd.DataFrame) -> None: fig,", "huis_waarde=huis_waarde, huis_winst=huis_winst, belegging=belegging, ) def run_monte_carlo(groei: pd.DataFrame, parameters: Dict[str, Any]) -> pd.DataFrame: start_jaar", "koop_huis_of_beleg( aankoop_datum, jaar_tot_verkoop, groei=groei, verbose=False, **parameters, ) except ValueError: # 'jaar' is niet", "van {aantal_jaar(dates):.2f} is mogelijk.\" ) persoon = maandlasten.Persoon(jaarinkomen) onderhoud = partial(maandelijks_onderhoud, onderhoud_pct=onderhoud_pct) hypotheek", "van de prijs van vorig jaar.\"\"\" vorig_jaar = date.year - 1 dates =", "ax=axs[0, 1], add_colorbar=True, levels=levels, cbar_kwargs={\"label\": \"Waarde belegging (x€1000)\"}, ) (ds.huis_winst / 1000).plot.contourf( ax=axs[1,", "na (jaar)\") ax.hlines( 0, df.aankoop_datum.min(), df.aankoop_datum.max(), ls=\"--\", color=\"k\", zorder=-1 ) ax.set_xlabel(\"Aankoop datum\") ax.set_ylabel(\"Winst", "pandas as pd import scipy.optimize from loky import get_reusable_executor from tqdm.notebook import tqdm", "list( product(groei.index[groei.index.year >= start_jaar], range(1, n_jaar)) ) def try_run_simulation(datum_jaar, parameters): aankoop_datum, jaar_tot_verkoop =", "= 90_000, schulden: Number = 20_000, onderhoud_pct: Number = 1, met_fiscaal_partner: bool =", "verticalalignment=\"top\", transform=axs[0, 0].transAxes, fontsize=12, ) axs[1, 0].set_xlabel(\"Aankoop datum\") axs[1, 1].set_xlabel(\"Aankoop datum\") axs[0, 0].set_ylabel(\"Verkoop", "import pandas as pd import scipy.optimize from loky import get_reusable_executor from tqdm.notebook import", "maand en belegd, dan hadden we €{belegging/1000:.0f}k. \" f\"Dat is dus €{(belegging -", "onderhoud = partial(maandelijks_onderhoud, onderhoud_pct=onderhoud_pct) hypotheek = Mortgage(hypotheekrente / 100, hyptotheek_looptijd, geleend) betaalschema =", "# Krijg hypotheekrenteaftrek terug van vorig jaar! woz_waarde = bepaal_woz(huis_waarde, date, groei) hypotheek_aftrek", "scipy.optimize from loky import get_reusable_executor from tqdm.notebook import tqdm from maandlasten import maandlasten", "\" f\"we starten op {aankoop_datum}. \" f\"Een duur van {aantal_jaar(dates):.2f} is mogelijk.\" )", "alpha=alpha, zorder=-1, ) ax.hlines(0, x.index.min(), x.index.max(), ls=\"--\", color=\"k\") def maandelijke_groei( date: pd.Timestamp, groei:", "Yahoo! Finance\", figsize=(7, 7), ) plt.show() def get_groei(regio=\"Nederland\") -> pd.DataFrame: stock_price = load_sp500()", "print( f\"We hebben op {aankoop_datum} een huis van €{geleend/1000:.0f}k gekocht. \" f\"Op {date.date()}", "ax=ax, x=\"aankoop_datum\", y=\"aantal_jaar\", c=\"verschil\", s=100, alpha=1, norm=matplotlib.colors.TwoSlopeNorm(0), cmap=\"seismic\", title=\"Kopen of huren?\", xlabel=\"Aankoop datum\",", "we €{betaald/1000:.0f}k betaald, \" f\"€{afgelost/1000:.0f}k afgelost, een huiswaarde van €{huis_waarde/1000:.0f}k, \" f\"en na", ") return groei def plot_aandelen(groei: pd.DataFrame) -> None: fig, ax = plt.subplots(figsize=(7, 7))", "= min(vermogen, schijf_3) vermogen -= belastbaar_3 inkomen_3 = belastbaar_3 * 5.69 / 100", "c=\"verschil\", s=100, alpha=1, norm=matplotlib.colors.TwoSlopeNorm(0), cmap=\"seismic\", title=\"Kopen of huren?\", xlabel=\"Aankoop datum\", ylabel=\"verkopen na (jaar)\",", "= 1.0) -> None: ax.fill_between( x.index, x.values, where=x.values > 0, color=\"green\", alpha=alpha, zorder=-1,", "20_000, onderhoud_pct: Number = 1, met_fiscaal_partner: bool = True, verbose: bool = True,", "list(range(400, 2000, 100)) hyptoheek_hoogstes = [ hyptotheek_van_huur( huur=huur, hypotheekrente=2.04, hyptotheek_looptijd=360, onderhoud_pct=1, ) for", "norm=matplotlib.colors.TwoSlopeNorm( 0, vmin=ds.verschil.min(), vmax=ds.verschil.max() ), add_colorbar=True, levels=levels, cbar_kwargs={\"label\": \"Verschil (x€1000)\"}, ) (ds.belegging /", "= Mortgage( hypotheekrente / 100, hyptotheek_looptijd, dollar(float(huis_prijs)) ).monthly_payment() onderhoud = onderhoud_pct / 100", "dct[\"van\"]) & (x.index <= dct[\"tot\"])].mean() dct[\"huis\"] = f\"{mean(groei.huis):.2f}%\" dct[\"aandelen\"] = f\"{mean(groei.aandelen):.2f}%\" winner =", "= hypotheek_aftrek teruggave = persoon_met_aftrek.netto_loon - persoon.netto_loon betaald -= teruggave af_te_lossen = geleend", "hyptoheek_hoogstes = (np.array(hyptoheek_hoogstes) / 1000).round(1) df = pd.DataFrame([bedragen, hyptoheek_hoogstes]).T df.columns = [\"maandlasten (€)\",", "plt.subplots(ncols=2, nrows=2, figsize=(12, 8), sharex=True, sharey=True) levels = 15 ds.verschil.plot.contourf( ax=axs[0, 0], norm=matplotlib.colors.TwoSlopeNorm(", "prijs def aantal_jaar(dates: pd.DatetimeIndex): dt = dates.max() - dates.min() return dt.total_seconds() / 86400", "/ 12 kosten = float(hyptotheek_maandelijks) + onderhoud return kosten res = scipy.optimize.minimize( lambda", "= 15 ds.verschil.plot.contourf( ax=axs[0, 0], norm=matplotlib.colors.TwoSlopeNorm( 0, vmin=ds.verschil.min(), vmax=ds.verschil.max() ), add_colorbar=True, levels=levels, cbar_kwargs={\"label\":", "/ 1000 print( f\"In het geval dat aandelen beter waren, dan is de", "float = 0, met_fiscaal_partner: bool = True ): \"\"\"Vermogensbelasting vanaf 2021. https://www.rijksoverheid.nl/onderwerpen/belastingplan/belastingwijzigingen-voor-ons-allemaal/box-3 \"\"\"", "[huis_prijsindex, stock_relative], axis=1, keys=[\"huis\", \"aandelen\"] ) return groei def plot_aandelen(groei: pd.DataFrame) -> None:", "bedragen ] hyptoheek_hoogstes = (np.array(hyptoheek_hoogstes) / 1000).round(1) df = pd.DataFrame([bedragen, hyptoheek_hoogstes]).T df.columns =", "schijf_1) vermogen -= belastbaar_1 inkomen_1 = belastbaar_1 * 1.90 / 100 schijf_2 =", "- persoon.netto_loon betaald -= teruggave af_te_lossen = geleend - afgelost overdrachts_belasting = huis_waarde", "simulatie\", total=len(iterator), ) ) df = pd.DataFrame([r for r in results if r", "= matplotlib.cm.get_cmap(\"tab20\", len(jaren)) color_map = dict(zip(sorted(jaren), cmap.colors)) fig, ax = plt.subplots(figsize=(8, 8)) for", "inkomen * 31 / 100 def koop_huis_of_beleg( aankoop_datum: Union[str, pd.Timestamp], jaar_tot_verkoop: Number, geleend:", "cax.set_ylabel(\"verschil (x€1000)\") ax.text( 0.95, 0.95, \"rood is huis is beter\\nblauw is belegging is", "verschil vs. tijd, bron: CBS\", figsize=(8, 8), color=\"k\", ) fill_area(groei.huis, ax) plt.show() def", "{aankoop_datum} een huis van €{geleend/1000:.0f}k gekocht. \" f\"Op {date.date()} (na {aantal_jaar(dates):.1f} jaar) hebben", "df def plot_huizenprijzen(groei: pd.DataFrame) -> None: fig, ax = plt.subplots(figsize=(7, 7)) groei.huis.plot( ax=ax,", "plt.subplots(figsize=(7, 7)) groei.huis.plot( ax=ax, legend=False, xlabel=\"Datum\", ylabel=\"Huizenprijs stijging/daling per jaar (%)\", title=\"Huizenprijs verschil", "rest is in box 3 schijf_1 = 100_000 - 50_000 belastbaar_1 = min(vermogen,", "= pd.Series(stock_relative) # Select at same dates as huis prijzen huis_prijsindex = load_huizen_prijsindex_per_regio()[regio]", "vs. tijd, bron: CBS\", figsize=(8, 8), color=\"k\", ) fill_area(groei.huis, ax) plt.show() def plot_aandelen_en_huis(groei:", "waren, dan is de verwachte winst €{mean_beleggen:.1f}k.\" ) print(f\"Als een huis kopen beter", "ax=ax, xlabel=\"Datum\", ylabel=\"S&P500 prijs stijging/daling per jaar (%)\", title=\"S&P500 index vs. tijd, bron:", "/ prev * 100 stock_relative = pd.Series(stock_relative) # Select at same dates as", "dt.total_seconds() / 86400 / 365.25 def maandelijks_onderhoud(huis_waarde: float, onderhoud_pct: float = 2): return", "partial(try_run_simulation, parameters=parameters), iterator ), \"Monte Carlo simulatie\", total=len(iterator), ) ) df = pd.DataFrame([r", "100)) hyptoheek_hoogstes = [ hyptotheek_van_huur( huur=huur, hypotheekrente=2.04, hyptotheek_looptijd=360, onderhoud_pct=1, ) for huur in", "onderhoud(huis_waarde) afbetaling, rente = next(betaalschema) hypotheek_kosten = float(afbetaling) + float(rente) rente_betaald[date.year] += float(rente)", "start_jaar = groei.index.year.min() + 1 eind_jaar = groei.index.year.max() n_jaar = eind_jaar - start_jaar", "df[col].str.replace(\",\", \".\").astype(float) df = df.resample(\"D\").interpolate() df = df[df.index.day == 1] return df def", "= stock_price[ stock_price.index.day == 1 ] # Keep only first of the month", ") except ValueError: # 'jaar' is niet mogelijk want we kunnen niet in", "which: Literal[\"huis\", \"aandelen\"] = \"huis\" ) -> float: pct = groei[which][groei.index == date].iloc[0]", "+= \" 🏆\" dct[\"verschil (🏠 - 📈)\"] = f\"{mean(groei.huis) - mean(groei.aandelen):.2f}%\" dt =", "/ 1000 mean_huis = df.huis_winst[df.verschil > 0].mean() / 1000 print( f\"In het geval", "vorig jaar prijs /= maandelijke_groei(_date, groei, \"huis\") return prijs def aantal_jaar(dates: pd.DatetimeIndex): dt", "axs[0, 0].set_ylabel(\"Verkoop na (jaar)\") axs[1, 0].set_ylabel(\"Verkoop na (jaar)\") axs[0, 0].set_xlabel(\"\") axs[0, 1].set_xlabel(\"\") axs[0,", "df.huis_winst[df.verschil > 0].mean() / 1000 print( f\"In het geval dat aandelen beter waren,", "stijging/daling per jaar in %\") ax.set_xlabel(\"Datum\") ax.set_ylabel(\"Prijs stijging/daling per jaar (%)\") fill_area(groei.aandelen, ax,", "stock_price = df_stock[\"Close*\"].str.replace(\",\", \"\").astype(float) # Create data points for each day stock_price =", "results if r is not None]) df.aankoop_datum = pd.to_datetime(df.aankoop_datum) df[\"verschil\"] = (df.huis_winst -", "dct[\"aandelen\"] = f\"{mean(groei.aandelen):.2f}%\" winner = \"huis\" if mean(groei.huis) > mean(groei.aandelen) else \"aandelen\" dct[winner]", "Number = 1, ) -> float: def hyptotheek_kosten(huis_prijs): hyptotheek_maandelijks = Mortgage( hypotheekrente /", "stock_relative = {} for date, value in stock_price[stock_price.index >= start].items(): date_prev = date.replace(date.year", "cmap.colors)) fig, ax = plt.subplots(figsize=(8, 8)) for jaar in jaren: df[df.aantal_jaar == jaar].plot(", "aantal_jaar(dates: pd.DatetimeIndex): dt = dates.max() - dates.min() return dt.total_seconds() / 86400 / 365.25", "0.95, \"rood is huis is beter\\nblauw is belegging is beter\", horizontalalignment=\"right\", verticalalignment=\"top\", transform=axs[0,", "f\"en na een verkoop €{abs(huis_winst)/1000:.0f}k {winst_of_verlies}. \" f\"Hadden we een huis gehuurd voor", "pct = groei[which][groei.index == date].iloc[0] / 100 return (1 + pct) ** (1", "+ 1 ] if len(dates) < jaar_tot_verkoop * 12: raise ValueError( f\"Een duur", "- huur afgelost += float(afbetaling) if date.month == 1 and date.year > start_year:", "inplace=True) # *Close price adjusted for splits # **Adjusted close price adjusted for", "-= teruggave af_te_lossen = geleend - afgelost overdrachts_belasting = huis_waarde * 0.02 huis_winst", "jaar_tot_verkoop = datum_jaar try: return koop_huis_of_beleg( aankoop_datum, jaar_tot_verkoop, groei=groei, verbose=False, **parameters, ) except", "not None]) df.aankoop_datum = pd.to_datetime(df.aankoop_datum) df[\"verschil\"] = (df.huis_winst - df.belegging) / 1000 df.aantal_jaar", "value in stock_price[stock_price.index >= start].items(): date_prev = date.replace(date.year - 1) prev = stock_price[date_prev]", "pd.to_datetime(dct[\"van\"])).total_seconds() dct[\"lengte periode\"] = f\"{round(dt / 86400 / 365)} jaar\" table = pd.DataFrame(example_periods)[", "huiswaarde van €{huis_waarde/1000:.0f}k, \" f\"en na een verkoop €{abs(huis_winst)/1000:.0f}k {winst_of_verlies}. \" f\"Hadden we", "axs[1, 1].set_ylabel(\"\") plt.show() def plot_result_lines(df: pd.DataFrame) -> None: jaren = df.aantal_jaar.unique()[1::2] cmap =", "fig, ax = plt.subplots() df.plot.scatter( ax=ax, x=\"aankoop_datum\", y=\"aantal_jaar\", c=\"verschil\", s=100, alpha=1, norm=matplotlib.colors.TwoSlopeNorm(0), cmap=\"seismic\",", "product(groei.index[groei.index.year >= start_jaar], range(1, n_jaar)) ) def try_run_simulation(datum_jaar, parameters): aankoop_datum, jaar_tot_verkoop = datum_jaar", "hebben we €{betaald/1000:.0f}k betaald, \" f\"€{afgelost/1000:.0f}k afgelost, een huiswaarde van €{huis_waarde/1000:.0f}k, \" f\"en", "r is not None]) df.aankoop_datum = pd.to_datetime(df.aankoop_datum) df[\"verschil\"] = (df.huis_winst - df.belegging) /", "first_year = stock_price.index.min().year start = f\"{first_year+1}-02-01\" stock_relative = {} for date, value in", "< 0, color=\"red\", alpha=alpha, zorder=-1, ) ax.hlines(0, x.index.min(), x.index.max(), ls=\"--\", color=\"k\") def maandelijke_groei(", "= belastbaar_1 * 1.90 / 100 schijf_2 = 1_000_000 - 100_000 belastbaar_2 =", "from tqdm.notebook import tqdm from maandlasten import maandlasten from mortgage import Mortgage, dollar", "%\") ax.set_xlabel(\"Datum\") ax.set_ylabel(\"Prijs stijging/daling per jaar (%)\") fill_area(groei.aandelen, ax, alpha=0.3) fill_area(groei.huis, ax, alpha=0.3)", "plt.show() def get_groei(regio=\"Nederland\") -> pd.DataFrame: stock_price = load_sp500() stock_price = stock_price[ stock_price.index.day ==", "1 and date.year > start_year: # Betaal vermogensbelasting over vorig jaar belegging -=", "vmin=ds.huis_winst.min() / 1000, vmax=ds.huis_winst.max() / 1000 ), cbar_kwargs={\"label\": \"Winst vrkp huis (x€1000)\"}, )", "load_huizen_prijsindex_per_regio()[regio] stock_relative = stock_relative[huis_prijsindex.index] groei = pd.concat( [huis_prijsindex, stock_relative], axis=1, keys=[\"huis\", \"aandelen\"] )", "rekenen terug naar de prijs van vorig jaar prijs /= maandelijke_groei(_date, groei, \"huis\")", "= True ): \"\"\"Vermogensbelasting vanaf 2021. https://www.rijksoverheid.nl/onderwerpen/belastingplan/belastingwijzigingen-voor-ons-allemaal/box-3 \"\"\" heffingvrij = 100_000 if met_fiscaal_partner", "prijs ($)\", title=\"S&P500 index vs. tijd, bron: Yahoo! Finance\", figsize=(7, 7), ) plt.show()", "groei: pd.DataFrame, which: Literal[\"huis\", \"aandelen\"] = \"huis\" ) -> float: pct = groei[which][groei.index", "> 0 else \"verlies\" print( f\"We hebben op {aankoop_datum} een huis van €{geleend/1000:.0f}k", "schijf_3) vermogen -= belastbaar_3 inkomen_3 = belastbaar_3 * 5.69 / 100 inkomen =", "100 * huis_prijs / 12 kosten = float(hyptotheek_maandelijks) + onderhoud return kosten res", "'goede' jaren\"), dict( van=\"2009-Q2\", tot=\"2014-Q1\", notities=\"slechtste jaren na de 2008 crisis\" ), dict(van=\"2009-Q2\",", "> 0].mean() / 1000 print( f\"In het geval dat aandelen beter waren, dan", "dict(zip(sorted(jaren), cmap.colors)) fig, ax = plt.subplots(figsize=(8, 8)) for jaar in jaren: df[df.aantal_jaar ==", "vermogen -= belastbaar_3 inkomen_3 = belastbaar_3 * 5.69 / 100 inkomen = inkomen_1", "belastbaar_3 = min(vermogen, schijf_3) vermogen -= belastbaar_3 inkomen_3 = belastbaar_3 * 5.69 /", "ValueError: # 'jaar' is niet mogelijk want we kunnen niet in de toekomst", "na (jaar)\") axs[1, 0].set_ylabel(\"Verkoop na (jaar)\") axs[0, 0].set_xlabel(\"\") axs[0, 1].set_xlabel(\"\") axs[0, 1].set_ylabel(\"\") axs[1,", ") def try_run_simulation(datum_jaar, parameters): aankoop_datum, jaar_tot_verkoop = datum_jaar try: return koop_huis_of_beleg( aankoop_datum, jaar_tot_verkoop,", "= df[df.index.day == 1] return df def plot_huizenprijzen(groei: pd.DataFrame) -> None: fig, ax", "x.index, x.values, where=x.values > 0, color=\"green\", alpha=alpha, zorder=-1, ) ax.fill_between( x.index, x.values, where=x.values", "parameters=parameters), iterator ), \"Monte Carlo simulatie\", total=len(iterator), ) ) df = pd.DataFrame([r for", "- 📈)\", \"notities\", ] ] return table def fill_area(x: pd.Series, ax, alpha: float", "pd.DataFrame) -> None: fig, ax = plt.subplots(figsize=(7, 7)) groei.aandelen.plot( ax=ax, xlabel=\"Datum\", ylabel=\"S&P500 prijs", "= eind_jaar - start_jaar + 1 results = {} iterator = list( product(groei.index[groei.index.year", "start_year = dates[0].year betaald = 0 afgelost = 0 belegging = 0 huis_waarde", "persoon.netto_loon betaald -= teruggave af_te_lossen = geleend - afgelost overdrachts_belasting = huis_waarde *", "f\"Hadden we een huis gehuurd voor €{huur} per maand en belegd, dan hadden", "box 3 schijf_1 = 100_000 - 50_000 belastbaar_1 = min(vermogen, schijf_1) vermogen -=", "8)) for jaar in jaren: df[df.aantal_jaar == jaar].plot( x=\"aankoop_datum\", y=\"verschil\", ax=ax, color=color_map[jaar], legend=False", "stock_relative], axis=1, keys=[\"huis\", \"aandelen\"] ) return groei def plot_aandelen(groei: pd.DataFrame) -> None: fig,", "start].items(): date_prev = date.replace(date.year - 1) prev = stock_price[date_prev] stock_relative[date] = (value -", "), \"Monte Carlo simulatie\", total=len(iterator), ) ) df = pd.DataFrame([r for r in", "prev) / prev * 100 stock_relative = pd.Series(stock_relative) # Select at same dates", "norm=matplotlib.colors.TwoSlopeNorm( 0, vmin=ds.huis_winst.min() / 1000, vmax=ds.huis_winst.max() / 1000 ), cbar_kwargs={\"label\": \"Winst vrkp huis", "> 0, color=\"green\", alpha=alpha, zorder=-1, ) ax.fill_between( x.index, x.values, where=x.values < 0, color=\"red\",", "), add_colorbar=True, levels=levels, cbar_kwargs={\"label\": \"Verschil (x€1000)\"}, ) (ds.belegging / 1000).plot.contourf( ax=axs[0, 1], add_colorbar=True,", "jaar_tot_verkoop: Number, geleend: Number, groei: pd.DataFrame, huur: Number = 1000, hypotheekrente: Number =", "for dct in example_periods: mean = lambda x: x[(x.index >= dct[\"van\"]) & (x.index", "cmap = matplotlib.cm.get_cmap(\"tab20\", len(jaren)) color_map = dict(zip(sorted(jaren), cmap.colors)) fig, ax = plt.subplots(figsize=(8, 8))", "kopen.\" ) mean_beleggen = df.belegging[df.verschil < 0].mean() / 1000 mean_huis = df.huis_winst[df.verschil >", "float, schulden: float = 0, met_fiscaal_partner: bool = True ): \"\"\"Vermogensbelasting vanaf 2021.", "prijs = huidige_prijs for _date in dates[::-1]: # We rekenen terug naar de", "\"huis\", \"aandelen\", \"verschil (🏠 - 📈)\", \"notities\", ] ] return table def fill_area(x:", "is not None]) df.aankoop_datum = pd.to_datetime(df.aankoop_datum) df[\"verschil\"] = (df.huis_winst - df.belegging) / 1000", "def plot_huizenprijzen(groei: pd.DataFrame) -> None: fig, ax = plt.subplots(figsize=(7, 7)) groei.huis.plot( ax=ax, legend=False,", "= pd.read_csv(\"huizen_prijsindex_per_regio.csv\") df.Perioden = pd.to_datetime( df.Perioden.str.replace(\"e kwartaal\", \"\").str.replace(\" \", \"-Q\") ) df.set_index(\"Perioden\", inplace=True)", "vorig jaar! woz_waarde = bepaal_woz(huis_waarde, date, groei) hypotheek_aftrek = maandlasten.hypotheek_aftrek( rente_betaald[date.year - 1],", "table def fill_area(x: pd.Series, ax, alpha: float = 1.0) -> None: ax.fill_between( x.index,", "ylabel=\"S&P500 prijs ($)\", title=\"S&P500 index vs. tijd, bron: Yahoo! Finance\", figsize=(7, 7), )", "De rest is in box 3 schijf_1 = 100_000 - 50_000 belastbaar_1 =", "jaar_tot_verkoop * 12: raise ValueError( f\"Een duur van {jaar_tot_verkoop} jaar is niet mogelijk", "matplotlib.cm.get_cmap(\"tab20\", len(jaren)) color_map = dict(zip(sorted(jaren), cmap.colors)) fig, ax = plt.subplots(figsize=(8, 8)) for jaar", "1000).round(1) df = pd.DataFrame([bedragen, hyptoheek_hoogstes]).T df.columns = [\"maandlasten (€)\", \"hypotheek (x€1000)\"] return df", "typing import Any, Dict, Literal, Union import matplotlib import matplotlib.colors import matplotlib.pyplot as", "tol=1e-2, ) return round(float(res.x), 2) def hyptotheek_maandlasten_df() -> pd.DataFrame: bedragen = list(range(400, 2000,", "onderhoud_pct=1, ) for huur in bedragen ] hyptoheek_hoogstes = (np.array(hyptoheek_hoogstes) / 1000).round(1) df", "example_periods = [ dict(van=\"2014-Q2\", tot=\"2020-Q4\", notities=\"de recente 'goede' jaren\"), dict( van=\"2009-Q2\", tot=\"2014-Q1\", notities=\"slechtste", "pd.to_datetime(df_stock.Date) df_stock.set_index(\"Date\", inplace=True) # *Close price adjusted for splits # **Adjusted close price", "float(\"inf\") belastbaar_3 = min(vermogen, schijf_3) vermogen -= belastbaar_3 inkomen_3 = belastbaar_3 * 5.69", "= belastbaar_2 * 4.50 / 100 schijf_3 = float(\"inf\") belastbaar_3 = min(vermogen, schijf_3)", "run_monte_carlo(groei: pd.DataFrame, parameters: Dict[str, Any]) -> pd.DataFrame: start_jaar = groei.index.year.min() + 1 eind_jaar", "(x€1000)\"}, ) (ds.huis_waarde / 1000).plot.contourf( ax=axs[1, 1], add_colorbar=True, cbar_kwargs={\"label\": \"Huis waarde (x€1000)\"}, cmap=\"magma\",", "Union[str, pd.Timestamp], jaar_tot_verkoop: Number, geleend: Number, groei: pd.DataFrame, huur: Number = 1000, hypotheekrente:", "dates[0].year betaald = 0 afgelost = 0 belegging = 0 huis_waarde = geleend", "from numbers import Number from typing import Any, Dict, Literal, Union import matplotlib", "starten op {aankoop_datum}. \" f\"Een duur van {aantal_jaar(dates):.2f} is mogelijk.\" ) persoon =", "groei, \"huis\") return prijs def aantal_jaar(dates: pd.DatetimeIndex): dt = dates.max() - dates.min() return", "from typing import Any, Dict, Literal, Union import matplotlib import matplotlib.colors import matplotlib.pyplot", "jaar (%)\") fill_area(groei.aandelen, ax, alpha=0.3) fill_area(groei.huis, ax, alpha=0.3) plt.show() def vergelijkings_tabel(groei: pd.DataFrame): example_periods", "range(1, n_jaar)) ) def try_run_simulation(datum_jaar, parameters): aankoop_datum, jaar_tot_verkoop = datum_jaar try: return koop_huis_of_beleg(", "kwartaal data sinds 1996. df = pd.read_csv(\"huizen_prijsindex_per_regio.csv\") df.Perioden = pd.to_datetime( df.Perioden.str.replace(\"e kwartaal\", \"\").str.replace(\"", "maandlasten.Persoon(persoon.bruto_jaarloon) persoon_met_aftrek.aftrek = hypotheek_aftrek teruggave = persoon_met_aftrek.netto_loon - persoon.netto_loon betaald -= teruggave af_te_lossen", "(x€1000)\"] return df def analyseer_data(df: pd.DataFrame) -> None: pct_blauw = 100 * (df.verschil", "levels=levels, ) axs[0, 0].text( 0.95, 0.95, \"rood is huis is beter\\nblauw is belegging", "# Gedownload van https://opendata.cbs.nl/statline/#/CBS/nl/dataset/83913NED/table?ts=1617045165965 # Col: \"Prijsindex bestaande koopwoningen Ontwikkeling t.o.v. een jaar", "belegging *= maandelijke_groei(date, groei, \"aandelen\") betaald += onderhoud(huis_waarde) afbetaling, rente = next(betaalschema) hypotheek_kosten", "belastbaar_3 * 5.69 / 100 inkomen = inkomen_1 + inkomen_2 + inkomen_3 return", "jaar in jaren: df[df.aantal_jaar == jaar].plot( x=\"aankoop_datum\", y=\"verschil\", ax=ax, color=color_map[jaar], legend=False ) cbar", "dct[\"lengte periode\"] = f\"{round(dt / 86400 / 365)} jaar\" table = pd.DataFrame(example_periods)[ [", "zorder=-1 ) ax.set_xlabel(\"Aankoop datum\") ax.set_ylabel(\"Winst kopen huis t.o.v. beleggen\") ax.set_title(\"Winst kopen huis t.o.v.", "Number, groei: pd.DataFrame, huur: Number = 1000, hypotheekrente: Number = 2.04, hyptotheek_looptijd: int", "huis_prijsindex = load_huizen_prijsindex_per_regio()[regio] stock_relative = stock_relative[huis_prijsindex.index] groei = pd.concat( [huis_prijsindex, stock_relative], axis=1, keys=[\"huis\",", "import numpy as np import pandas as pd import scipy.optimize from loky import", "+ 1 results = {} iterator = list( product(groei.index[groei.index.year >= start_jaar], range(1, n_jaar))", "0], add_colorbar=True, levels=levels, norm=matplotlib.colors.TwoSlopeNorm( 0, vmin=ds.huis_winst.min() / 1000, vmax=ds.huis_winst.max() / 1000 ), cbar_kwargs={\"label\":", "50_000 vermogen -= heffingvrij vermogen -= schulden if vermogen < 0: return 0", "as plt import numpy as np import pandas as pd import scipy.optimize from", "onderhoud return kosten res = scipy.optimize.minimize( lambda huis_prijs: abs(hyptotheek_kosten(huis_prijs) - huur), x0=100_000, method=\"Nelder-Mead\",", "\" f\"Op {date.date()} (na {aantal_jaar(dates):.1f} jaar) hebben we €{betaald/1000:.0f}k betaald, \" f\"€{afgelost/1000:.0f}k afgelost,", "= pd.DataFrame([r for r in results if r is not None]) df.aankoop_datum =", "numpy as np import pandas as pd import scipy.optimize from loky import get_reusable_executor", "/ 365.25 def maandelijks_onderhoud(huis_waarde: float, onderhoud_pct: float = 2): return huis_waarde * onderhoud_pct", "1_000_000 - 100_000 belastbaar_2 = min(vermogen, schijf_2) vermogen -= belastbaar_2 inkomen_2 = belastbaar_2", "n_jaar = eind_jaar - start_jaar + 1 results = {} iterator = list(", "for date, value in stock_price[stock_price.index >= start].items(): date_prev = date.replace(date.year - 1) prev", "onderhoud_pct: Number = 1, ) -> float: def hyptotheek_kosten(huis_prijs): hyptotheek_maandelijks = Mortgage( hypotheekrente", "1 eind_jaar = groei.index.year.max() n_jaar = eind_jaar - start_jaar + 1 results =", "bron: CBS\", figsize=(8, 8), color=\"k\", ) fill_area(groei.huis, ax) plt.show() def plot_aandelen_en_huis(groei: pd.DataFrame) ->", "jaar is niet mogelijk als \" f\"we starten op {aankoop_datum}. \" f\"Een duur", "/ 1000).plot.contourf( ax=axs[1, 1], add_colorbar=True, cbar_kwargs={\"label\": \"Huis waarde (x€1000)\"}, cmap=\"magma\", levels=levels, ) axs[0,", "cbar_kwargs={\"label\": \"Verschil (x€1000)\"}, ) (ds.belegging / 1000).plot.contourf( ax=axs[0, 1], add_colorbar=True, levels=levels, cbar_kwargs={\"label\": \"Waarde", "hyptotheek_maandlasten_df() -> pd.DataFrame: bedragen = list(range(400, 2000, 100)) hyptoheek_hoogstes = [ hyptotheek_van_huur( huur=huur,", "list( tqdm( executor.map( partial(try_run_simulation, parameters=parameters), iterator ), \"Monte Carlo simulatie\", total=len(iterator), ) )", "1] return df def plot_huizenprijzen(groei: pd.DataFrame) -> None: fig, ax = plt.subplots(figsize=(7, 7))", "# Select at same dates as huis prijzen huis_prijsindex = load_huizen_prijsindex_per_regio()[regio] stock_relative =", "# Col: \"Prijsindex bestaande koopwoningen Ontwikkeling t.o.v. een jaar eerder\" # met alle", "1].set_ylabel(\"\") plt.show() def plot_result_lines(df: pd.DataFrame) -> None: jaren = df.aantal_jaar.unique()[1::2] cmap = matplotlib.cm.get_cmap(\"tab20\",", "True, verbose: bool = True, ): dates = groei.index[groei.index >= aankoop_datum][ : round(jaar_tot_verkoop", "is belegging is beter\", horizontalalignment=\"right\", verticalalignment=\"top\", transform=ax.transAxes, fontsize=14, ) plt.show() def plot_result_contour(df: pd.DataFrame)", "hypotheek_kosten belegging += hypotheek_kosten - huur afgelost += float(afbetaling) if date.month == 1", "jaren\"), dict( van=\"2009-Q2\", tot=\"2014-Q1\", notities=\"slechtste jaren na de 2008 crisis\" ), dict(van=\"2009-Q2\", tot=\"2020-Q4\",", "vermogen: float, schulden: float = 0, met_fiscaal_partner: bool = True ): \"\"\"Vermogensbelasting vanaf", "= 100_000 if met_fiscaal_partner else 50_000 vermogen -= heffingvrij vermogen -= schulden if", "https://www.rijksoverheid.nl/onderwerpen/belastingplan/belastingwijzigingen-voor-ons-allemaal/box-3 \"\"\" heffingvrij = 100_000 if met_fiscaal_partner else 50_000 vermogen -= heffingvrij vermogen", "def plot_result_scatter(df: pd.DataFrame) -> None: fig, ax = plt.subplots() df.plot.scatter( ax=ax, x=\"aankoop_datum\", y=\"aantal_jaar\",", "(x€1000)\") ax.text( 0.95, 0.95, \"rood is huis is beter\\nblauw is belegging is beter\",", "> start_year: # Betaal vermogensbelasting over vorig jaar belegging -= vermogensbelasting(belegging, schulden, met_fiscaal_partner)", "stock_price def plot_sp500() -> None: stock_price = load_sp500() stock_price.plot( xlabel=\"Datum\", ylabel=\"S&P500 prijs ($)\",", "pd.Timestamp, groei: pd.DataFrame): \"\"\"WOZ waarde is bepaald aan de hand van de prijs", "r in results if r is not None]) df.aankoop_datum = pd.to_datetime(df.aankoop_datum) df[\"verschil\"] =", "x.values, where=x.values > 0, color=\"green\", alpha=alpha, zorder=-1, ) ax.fill_between( x.index, x.values, where=x.values <", "-> pd.DataFrame: stock_price = load_sp500() stock_price = stock_price[ stock_price.index.day == 1 ] #", "huis t.o.v. beleggen\") ax.set_title(\"Winst kopen huis t.o.v. beleggen\") plt.show() def hyptotheek_van_huur( huur: Number", "index vs. tijd, bron: Yahoo! Finance\", figsize=(7, 7), ) plt.show() def get_groei(regio=\"Nederland\") ->", "cbar.set_ticklabels([int(j) for j in color_map.keys()]) cbar.set_label(\"Verkoop na (jaar)\") ax.hlines( 0, df.aankoop_datum.min(), df.aankoop_datum.max(), ls=\"--\",", "x: x[(x.index >= dct[\"van\"]) & (x.index <= dct[\"tot\"])].mean() dct[\"huis\"] = f\"{mean(groei.huis):.2f}%\" dct[\"aandelen\"] =", "for col in df.columns: df[col] = df[col].str.replace(\",\", \".\").astype(float) df = df.resample(\"D\").interpolate() df =", "== 1] return df def plot_huizenprijzen(groei: pd.DataFrame) -> None: fig, ax = plt.subplots(figsize=(7,", "ax.set_title(\"Huizenprijs en aandelenprijs stijging/daling per jaar in %\") ax.set_xlabel(\"Datum\") ax.set_ylabel(\"Prijs stijging/daling per jaar", "kosten = float(hyptotheek_maandelijks) + onderhoud return kosten res = scipy.optimize.minimize( lambda huis_prijs: abs(hyptotheek_kosten(huis_prijs)", "None: stock_price = load_sp500() stock_price.plot( xlabel=\"Datum\", ylabel=\"S&P500 prijs ($)\", title=\"S&P500 index vs. tijd,", "for jaar in jaren: df[df.aantal_jaar == jaar].plot( x=\"aankoop_datum\", y=\"verschil\", ax=ax, color=color_map[jaar], legend=False )", "1000, hypotheekrente: Number = 2.04, hyptotheek_looptijd: int = 360, onderhoud_pct: Number = 1,", "met nu\"), dict( van=\"1996-Q1\", tot=\"2020-Q4\", notities=\"alle data sinds 1996 tot en met nu\"", "tot en met nu\"), dict( van=\"1996-Q1\", tot=\"2020-Q4\", notities=\"alle data sinds 1996 tot en", "= 0 afgelost = 0 belegging = 0 huis_waarde = geleend for date", "cbar_kwargs={\"label\": \"Waarde belegging (x€1000)\"}, ) (ds.huis_winst / 1000).plot.contourf( ax=axs[1, 0], add_colorbar=True, levels=levels, norm=matplotlib.colors.TwoSlopeNorm(", "aandelen beter waren, dan is de verwachte winst €{mean_beleggen:.1f}k.\" ) print(f\"Als een huis", "huizenprijzen df_stock = pd.read_csv(\"sp500.csv\") df_stock.Date = pd.to_datetime(df_stock.Date) df_stock.set_index(\"Date\", inplace=True) # *Close price adjusted", "itertools import product from numbers import Number from typing import Any, Dict, Literal,", "betaald -= teruggave af_te_lossen = geleend - afgelost overdrachts_belasting = huis_waarde * 0.02", "-> pd.Series: # Daily data to need to resample it to quarterly like", "plot_huizenprijzen(groei: pd.DataFrame) -> None: fig, ax = plt.subplots(figsize=(7, 7)) groei.huis.plot( ax=ax, legend=False, xlabel=\"Datum\",", "100 stock_relative = pd.Series(stock_relative) # Select at same dates as huis prijzen huis_prijsindex", "{aankoop_datum}. \" f\"Een duur van {aantal_jaar(dates):.2f} is mogelijk.\" ) persoon = maandlasten.Persoon(jaarinkomen) onderhoud", "if mean(groei.huis) > mean(groei.aandelen) else \"aandelen\" dct[winner] += \" 🏆\" dct[\"verschil (🏠 -", "van vorig jaar prijs /= maandelijke_groei(_date, groei, \"huis\") return prijs def aantal_jaar(dates: pd.DatetimeIndex):", "= plt.subplots(ncols=2, nrows=2, figsize=(12, 8), sharex=True, sharey=True) levels = 15 ds.verschil.plot.contourf( ax=axs[0, 0],", "af_te_lossen=af_te_lossen, huis_waarde=huis_waarde, huis_winst=huis_winst, belegging=belegging, ) def run_monte_carlo(groei: pd.DataFrame, parameters: Dict[str, Any]) -> pd.DataFrame:", "afgelost=afgelost, af_te_lossen=af_te_lossen, huis_waarde=huis_waarde, huis_winst=huis_winst, belegging=belegging, ) def run_monte_carlo(groei: pd.DataFrame, parameters: Dict[str, Any]) ->", "pct_blauw = 100 * (df.verschil < 0).sum() / len(df.verschil) print( f\"In {pct_blauw:.1f}% van", "-= heffingvrij vermogen -= schulden if vermogen < 0: return 0 # De", "color_map.keys()]) cbar.set_label(\"Verkoop na (jaar)\") ax.hlines( 0, df.aankoop_datum.min(), df.aankoop_datum.max(), ls=\"--\", color=\"k\", zorder=-1 ) ax.set_xlabel(\"Aankoop", "None: fig, ax = plt.subplots() df.plot.scatter( ax=ax, x=\"aankoop_datum\", y=\"aantal_jaar\", c=\"verschil\", s=100, alpha=1, norm=matplotlib.colors.TwoSlopeNorm(0),", "where=x.values > 0, color=\"green\", alpha=alpha, zorder=-1, ) ax.fill_between( x.index, x.values, where=x.values < 0,", "if huis_winst > 0 else \"verlies\" print( f\"We hebben op {aankoop_datum} een huis", "tot en met nu\" ), ] for dct in example_periods: mean = lambda", "belastbaar_2 inkomen_2 = belastbaar_2 * 4.50 / 100 schijf_3 = float(\"inf\") belastbaar_3 =", "= f\"{mean(groei.huis) - mean(groei.aandelen):.2f}%\" dt = (pd.to_datetime(dct[\"tot\"]) - pd.to_datetime(dct[\"van\"])).total_seconds() dct[\"lengte periode\"] = f\"{round(dt", "= True, ): dates = groei.index[groei.index >= aankoop_datum][ : round(jaar_tot_verkoop * 12) +", "return table def fill_area(x: pd.Series, ax, alpha: float = 1.0) -> None: ax.fill_between(", "de prijs van vorig jaar prijs /= maandelijke_groei(_date, groei, \"huis\") return prijs def", "[ hyptotheek_van_huur( huur=huur, hypotheekrente=2.04, hyptotheek_looptijd=360, onderhoud_pct=1, ) for huur in bedragen ] hyptoheek_hoogstes", "= float(hyptotheek_maandelijks) + onderhoud return kosten res = scipy.optimize.minimize( lambda huis_prijs: abs(hyptotheek_kosten(huis_prijs) -", ") cbar.set_ticks(np.linspace(0, 1, len(jaren))) cbar.set_ticklabels([int(j) for j in color_map.keys()]) cbar.set_label(\"Verkoop na (jaar)\") ax.hlines(", "-= belastbaar_3 inkomen_3 = belastbaar_3 * 5.69 / 100 inkomen = inkomen_1 +", "8), sharex=True, sharey=True) levels = 15 ds.verschil.plot.contourf( ax=axs[0, 0], norm=matplotlib.colors.TwoSlopeNorm( 0, vmin=ds.verschil.min(), vmax=ds.verschil.max()", "0.95, 0.95, \"rood is huis is beter\\nblauw is belegging is beter\", horizontalalignment=\"right\", verticalalignment=\"top\",", "aandelen \" f\"te kopen en in {100-pct_rood:.1f}% is het beter om een huis", "dates = groei.index[groei.index >= aankoop_datum][ : round(jaar_tot_verkoop * 12) + 1 ] if", "persoon_met_aftrek.netto_loon - persoon.netto_loon betaald -= teruggave af_te_lossen = geleend - afgelost overdrachts_belasting =", "Number from typing import Any, Dict, Literal, Union import matplotlib import matplotlib.colors import", "{} for date, value in stock_price[stock_price.index >= start].items(): date_prev = date.replace(date.year - 1)", "na (jaar)\", figsize=(8, 8), ) ax, cax = plt.gcf().get_axes() cax.set_ylabel(\"verschil (x€1000)\") ax.text( 0.95,", "mortgage import Mortgage, dollar matplotlib.rc(\"font\", size=15) def load_sp500() -> pd.Series: # Daily data", "is beter\\nblauw is belegging is beter\", horizontalalignment=\"right\", verticalalignment=\"top\", transform=ax.transAxes, fontsize=14, ) plt.show() def", "0].mean() / 1000 mean_huis = df.huis_winst[df.verschil > 0].mean() / 1000 print( f\"In het", "plt.subplots(figsize=(8, 8)) for jaar in jaren: df[df.aantal_jaar == jaar].plot( x=\"aankoop_datum\", y=\"verschil\", ax=ax, color=color_map[jaar],", "afgelost overdrachts_belasting = huis_waarde * 0.02 huis_winst = huis_waarde - af_te_lossen - betaald", "None: ds = df.set_index([\"aantal_jaar\", \"aankoop_datum\"]).to_xarray() fig, axs = plt.subplots(ncols=2, nrows=2, figsize=(12, 8), sharex=True,", "matplotlib import matplotlib.colors import matplotlib.pyplot as plt import numpy as np import pandas", "ax=ax, legend=False, xlabel=\"Datum\", ylabel=\"Huizenprijs stijging/daling per jaar (%)\", title=\"Huizenprijs verschil vs. tijd, bron:", "+= float(afbetaling) if date.month == 1 and date.year > start_year: # Betaal vermogensbelasting", "# met alle kwartaal data sinds 1996. df = pd.read_csv(\"huizen_prijsindex_per_regio.csv\") df.Perioden = pd.to_datetime(", "xlabel=\"Datum\", ylabel=\"Huizenprijs stijging/daling per jaar (%)\", title=\"Huizenprijs verschil vs. tijd, bron: CBS\", figsize=(8,", "tijd, bron: CBS\", figsize=(8, 8), color=\"k\", ) fill_area(groei.huis, ax) plt.show() def plot_aandelen_en_huis(groei: pd.DataFrame)", "xlabel=\"Datum\", ylabel=\"S&P500 prijs stijging/daling per jaar (%)\", title=\"S&P500 index vs. tijd, bron: Yahoo!", "\"huis\" if mean(groei.huis) > mean(groei.aandelen) else \"aandelen\" dct[winner] += \" 🏆\" dct[\"verschil (🏠", "belastbaar_2 = min(vermogen, schijf_2) vermogen -= belastbaar_2 inkomen_2 = belastbaar_2 * 4.50 /", "en in {100-pct_rood:.1f}% is het beter om een huis te kopen.\" ) mean_beleggen", "f\"{mean(groei.huis) - mean(groei.aandelen):.2f}%\" dt = (pd.to_datetime(dct[\"tot\"]) - pd.to_datetime(dct[\"van\"])).total_seconds() dct[\"lengte periode\"] = f\"{round(dt /", "* 1.90 / 100 schijf_2 = 1_000_000 - 100_000 belastbaar_2 = min(vermogen, schijf_2)", "fontsize=14, ) plt.show() def plot_result_contour(df: pd.DataFrame) -> None: ds = df.set_index([\"aantal_jaar\", \"aankoop_datum\"]).to_xarray() fig,", "label=\"Aandelen\", legend=True) groei.huis.plot(ax=ax, label=\"Huizenprijs\", legend=True) ax.set_title(\"Huizenprijs en aandelenprijs stijging/daling per jaar in %\")", "= persoon_met_aftrek.netto_loon - persoon.netto_loon betaald -= teruggave af_te_lossen = geleend - afgelost overdrachts_belasting", "huis van €{geleend/1000:.0f}k gekocht. \" f\"Op {date.date()} (na {aantal_jaar(dates):.1f} jaar) hebben we €{betaald/1000:.0f}k", "tqdm.notebook import tqdm from maandlasten import maandlasten from mortgage import Mortgage, dollar matplotlib.rc(\"font\",", "load_huizen_prijsindex_per_regio(): # Gedownload van https://opendata.cbs.nl/statline/#/CBS/nl/dataset/83913NED/table?ts=1617045165965 # Col: \"Prijsindex bestaande koopwoningen Ontwikkeling t.o.v. een", "groei=groei, verbose=False, **parameters, ) except ValueError: # 'jaar' is niet mogelijk want we", "else 50_000 vermogen -= heffingvrij vermogen -= schulden if vermogen < 0: return", "vorig_jaar = date.year - 1 dates = groei.index[groei.index.year == vorig_jaar] prijs = huidige_prijs", "om aandelen \" f\"te kopen en in {100-pct_rood:.1f}% is het beter om een", "na de 2008 crisis\" ), dict(van=\"2009-Q2\", tot=\"2020-Q4\", notities=\"van 2008 crisis tot en met", "vs. tijd, bron: Yahoo! Finance\", color=\"k\", ) fill_area(groei.aandelen, ax) plt.show() def load_huizen_prijsindex_per_regio(): #", "it to quarterly like the huizenprijzen df_stock = pd.read_csv(\"sp500.csv\") df_stock.Date = pd.to_datetime(df_stock.Date) df_stock.set_index(\"Date\",", "{winst_of_verlies}. \" f\"Hadden we een huis gehuurd voor €{huur} per maand en belegd,", "def hyptotheek_kosten(huis_prijs): hyptotheek_maandelijks = Mortgage( hypotheekrente / 100, hyptotheek_looptijd, dollar(float(huis_prijs)) ).monthly_payment() onderhoud =", "1].set_ylabel(\"\") axs[1, 1].set_ylabel(\"\") plt.show() def plot_result_lines(df: pd.DataFrame) -> None: jaren = df.aantal_jaar.unique()[1::2] cmap", "- 100_000 belastbaar_2 = min(vermogen, schijf_2) vermogen -= belastbaar_2 inkomen_2 = belastbaar_2 *", "betaald - overdrachts_belasting if verbose: winst_of_verlies = \"winst\" if huis_winst > 0 else", "= \"huis\" if mean(groei.huis) > mean(groei.aandelen) else \"aandelen\" dct[winner] += \" 🏆\" dct[\"verschil", "> mean(groei.aandelen) else \"aandelen\" dct[winner] += \" 🏆\" dct[\"verschil (🏠 - 📈)\"] =", "int = 360, onderhoud_pct: Number = 1, ) -> float: def hyptotheek_kosten(huis_prijs): hyptotheek_maandelijks", "huis_waarde * onderhoud_pct / 100 / 12 def vermogensbelasting( vermogen: float, schulden: float", "1000 mean_huis = df.huis_winst[df.verschil > 0].mean() / 1000 print( f\"In het geval dat", "Select at same dates as huis prijzen huis_prijsindex = load_huizen_prijsindex_per_regio()[regio] stock_relative = stock_relative[huis_prijsindex.index]", "figsize=(12, 8), sharex=True, sharey=True) levels = 15 ds.verschil.plot.contourf( ax=axs[0, 0], norm=matplotlib.colors.TwoSlopeNorm( 0, vmin=ds.verschil.min(),", "100 return (1 + pct) ** (1 / 12) def bepaal_woz(huidige_prijs: float, date:", "bool = True, verbose: bool = True, ): dates = groei.index[groei.index >= aankoop_datum][", "= 0 belegging = 0 huis_waarde = geleend for date in dates: huis_waarde", "title=\"Huizenprijs verschil vs. tijd, bron: CBS\", figsize=(8, 8), color=\"k\", ) fill_area(groei.huis, ax) plt.show()", "dt = dates.max() - dates.min() return dt.total_seconds() / 86400 / 365.25 def maandelijks_onderhoud(huis_waarde:", "return groei def plot_aandelen(groei: pd.DataFrame) -> None: fig, ax = plt.subplots(figsize=(7, 7)) groei.aandelen.plot(", "crisis\" ), dict(van=\"2009-Q2\", tot=\"2020-Q4\", notities=\"van 2008 crisis tot en met nu\"), dict( van=\"1996-Q1\",", "(x.index <= dct[\"tot\"])].mean() dct[\"huis\"] = f\"{mean(groei.huis):.2f}%\" dct[\"aandelen\"] = f\"{mean(groei.aandelen):.2f}%\" winner = \"huis\" if", "winst_of_verlies = \"winst\" if huis_winst > 0 else \"verlies\" print( f\"We hebben op", "aankoop_datum=aankoop_datum, verkoop_datum=dates[-1], aantal_jaar=aantal_jaar(dates), betaald=betaald, afgelost=afgelost, af_te_lossen=af_te_lossen, huis_waarde=huis_waarde, huis_winst=huis_winst, belegging=belegging, ) def run_monte_carlo(groei: pd.DataFrame,", "vorig_jaar] prijs = huidige_prijs for _date in dates[::-1]: # We rekenen terug naar", "fill_area(groei.aandelen, ax) plt.show() def load_huizen_prijsindex_per_regio(): # Gedownload van https://opendata.cbs.nl/statline/#/CBS/nl/dataset/83913NED/table?ts=1617045165965 # Col: \"Prijsindex bestaande", "groei, \"aandelen\") betaald += onderhoud(huis_waarde) afbetaling, rente = next(betaalschema) hypotheek_kosten = float(afbetaling) +", "dict(van=\"2009-Q2\", tot=\"2020-Q4\", notities=\"van 2008 crisis tot en met nu\"), dict( van=\"1996-Q1\", tot=\"2020-Q4\", notities=\"alle", "CBS\", figsize=(8, 8), color=\"k\", ) fill_area(groei.huis, ax) plt.show() def plot_aandelen_en_huis(groei: pd.DataFrame) -> None:", "df = df[df.index.day == 1] return df def plot_huizenprijzen(groei: pd.DataFrame) -> None: fig,", "def run_monte_carlo(groei: pd.DataFrame, parameters: Dict[str, Any]) -> pd.DataFrame: start_jaar = groei.index.year.min() + 1", "met_fiscaal_partner: bool = True, verbose: bool = True, ): dates = groei.index[groei.index >=", "\"\").str.replace(\" \", \"-Q\") ) df.set_index(\"Perioden\", inplace=True) for col in df.columns: df[col] = df[col].str.replace(\",\",", "bepaal_woz(huidige_prijs: float, date: pd.Timestamp, groei: pd.DataFrame): \"\"\"WOZ waarde is bepaald aan de hand", "persoon_met_aftrek.aftrek = hypotheek_aftrek teruggave = persoon_met_aftrek.netto_loon - persoon.netto_loon betaald -= teruggave af_te_lossen =", "splits. stock_price = df_stock[\"Close*\"].str.replace(\",\", \"\").astype(float) # Create data points for each day stock_price", "huur), x0=100_000, method=\"Nelder-Mead\", tol=1e-2, ) return round(float(res.x), 2) def hyptotheek_maandlasten_df() -> pd.DataFrame: bedragen", "def get_groei(regio=\"Nederland\") -> pd.DataFrame: stock_price = load_sp500() stock_price = stock_price[ stock_price.index.day == 1", "df def plot_result_scatter(df: pd.DataFrame) -> None: fig, ax = plt.subplots() df.plot.scatter( ax=ax, x=\"aankoop_datum\",", "We rekenen terug naar de prijs van vorig jaar prijs /= maandelijke_groei(_date, groei,", "100 schijf_3 = float(\"inf\") belastbaar_3 = min(vermogen, schijf_3) vermogen -= belastbaar_3 inkomen_3 =", "pd.Timestamp], jaar_tot_verkoop: Number, geleend: Number, groei: pd.DataFrame, huur: Number = 1000, hypotheekrente: Number", "jaar.\"\"\" vorig_jaar = date.year - 1 dates = groei.index[groei.index.year == vorig_jaar] prijs =", "per jaar (%)\", title=\"S&P500 index vs. tijd, bron: Yahoo! Finance\", color=\"k\", ) fill_area(groei.aandelen,", "= 360, onderhoud_pct: Number = 1, ) -> float: def hyptotheek_kosten(huis_prijs): hyptotheek_maandelijks =", "plot_sp500() -> None: stock_price = load_sp500() stock_price.plot( xlabel=\"Datum\", ylabel=\"S&P500 prijs ($)\", title=\"S&P500 index", "huis_winst)/1000:.0f}k verschil.\" ) return dict( aankoop_datum=aankoop_datum, verkoop_datum=dates[-1], aantal_jaar=aantal_jaar(dates), betaald=betaald, afgelost=afgelost, af_te_lossen=af_te_lossen, huis_waarde=huis_waarde, huis_winst=huis_winst,", "f\"Dat is dus €{(belegging - huis_winst)/1000:.0f}k verschil.\" ) return dict( aankoop_datum=aankoop_datum, verkoop_datum=dates[-1], aantal_jaar=aantal_jaar(dates),", "maandlasten import maandlasten from mortgage import Mortgage, dollar matplotlib.rc(\"font\", size=15) def load_sp500() ->", "huis t.o.v. beleggen\") plt.show() def hyptotheek_van_huur( huur: Number = 1000, hypotheekrente: Number =", "date.month == 1 and date.year > start_year: # Betaal vermogensbelasting over vorig jaar", "fontsize=12, ) axs[1, 0].set_xlabel(\"Aankoop datum\") axs[1, 1].set_xlabel(\"Aankoop datum\") axs[0, 0].set_ylabel(\"Verkoop na (jaar)\") axs[1,", "is bepaald aan de hand van de prijs van vorig jaar.\"\"\" vorig_jaar =", "90_000, schulden: Number = 20_000, onderhoud_pct: Number = 1, met_fiscaal_partner: bool = True,", "- afgelost overdrachts_belasting = huis_waarde * 0.02 huis_winst = huis_waarde - af_te_lossen -", "hadden we €{belegging/1000:.0f}k. \" f\"Dat is dus €{(belegging - huis_winst)/1000:.0f}k verschil.\" ) return", "7), ) plt.show() def get_groei(regio=\"Nederland\") -> pd.DataFrame: stock_price = load_sp500() stock_price = stock_price[", "= dict(zip(sorted(jaren), cmap.colors)) fig, ax = plt.subplots(figsize=(8, 8)) for jaar in jaren: df[df.aantal_jaar", "if len(dates) < jaar_tot_verkoop * 12: raise ValueError( f\"Een duur van {jaar_tot_verkoop} jaar", "ax, alpha=0.3) fill_area(groei.huis, ax, alpha=0.3) plt.show() def vergelijkings_tabel(groei: pd.DataFrame): example_periods = [ dict(van=\"2014-Q2\",", "mogelijk want we kunnen niet in de toekomst kijken return with get_reusable_executor() as", "stock_price[ stock_price.index.day == 1 ] # Keep only first of the month first_year", "€{belegging/1000:.0f}k. \" f\"Dat is dus €{(belegging - huis_winst)/1000:.0f}k verschil.\" ) return dict( aankoop_datum=aankoop_datum,", "* 0.02 huis_winst = huis_waarde - af_te_lossen - betaald - overdrachts_belasting if verbose:", "ax.fill_between( x.index, x.values, where=x.values < 0, color=\"red\", alpha=alpha, zorder=-1, ) ax.hlines(0, x.index.min(), x.index.max(),", "loky import get_reusable_executor from tqdm.notebook import tqdm from maandlasten import maandlasten from mortgage", "belastbaar_1 * 1.90 / 100 schijf_2 = 1_000_000 - 100_000 belastbaar_2 = min(vermogen,", "- af_te_lossen - betaald - overdrachts_belasting if verbose: winst_of_verlies = \"winst\" if huis_winst", "= (pd.to_datetime(dct[\"tot\"]) - pd.to_datetime(dct[\"van\"])).total_seconds() dct[\"lengte periode\"] = f\"{round(dt / 86400 / 365)} jaar\"", "pd.DataFrame): \"\"\"WOZ waarde is bepaald aan de hand van de prijs van vorig", "= pd.to_datetime( df.Perioden.str.replace(\"e kwartaal\", \"\").str.replace(\" \", \"-Q\") ) df.set_index(\"Perioden\", inplace=True) for col in", "86400 / 365)} jaar\" table = pd.DataFrame(example_periods)[ [ \"van\", \"tot\", \"lengte periode\", \"huis\",", "* 31 / 100 def koop_huis_of_beleg( aankoop_datum: Union[str, pd.Timestamp], jaar_tot_verkoop: Number, geleend: Number,", "belegging is beter\", horizontalalignment=\"right\", verticalalignment=\"top\", transform=ax.transAxes, fontsize=14, ) plt.show() def plot_result_contour(df: pd.DataFrame) ->", "col in df.columns: df[col] = df[col].str.replace(\",\", \".\").astype(float) df = df.resample(\"D\").interpolate() df = df[df.index.day", "= pd.DataFrame(example_periods)[ [ \"van\", \"tot\", \"lengte periode\", \"huis\", \"aandelen\", \"verschil (🏠 - 📈)\",", "vermogen -= belastbaar_1 inkomen_1 = belastbaar_1 * 1.90 / 100 schijf_2 = 1_000_000", "ValueError( f\"Een duur van {jaar_tot_verkoop} jaar is niet mogelijk als \" f\"we starten", "(%)\", title=\"Huizenprijs verschil vs. tijd, bron: CBS\", figsize=(8, 8), color=\"k\", ) fill_area(groei.huis, ax)", "as huis prijzen huis_prijsindex = load_huizen_prijsindex_per_regio()[regio] stock_relative = stock_relative[huis_prijsindex.index] groei = pd.concat( [huis_prijsindex,", "raise ValueError( f\"Een duur van {jaar_tot_verkoop} jaar is niet mogelijk als \" f\"we", "want we kunnen niet in de toekomst kijken return with get_reusable_executor() as executor:", "hyptotheek_kosten(huis_prijs): hyptotheek_maandelijks = Mortgage( hypotheekrente / 100, hyptotheek_looptijd, dollar(float(huis_prijs)) ).monthly_payment() onderhoud = onderhoud_pct", "ax) plt.show() def plot_aandelen_en_huis(groei: pd.DataFrame) -> None: fig, ax = plt.subplots(figsize=(8, 8)) groei.aandelen[groei.huis.index].plot(ax=ax,", "1996. df = pd.read_csv(\"huizen_prijsindex_per_regio.csv\") df.Perioden = pd.to_datetime( df.Perioden.str.replace(\"e kwartaal\", \"\").str.replace(\" \", \"-Q\") )", "Number = 1000, hypotheekrente: Number = 2.04, hyptotheek_looptijd: int = 360, onderhoud_pct: Number", "float, date: pd.Timestamp, groei: pd.DataFrame): \"\"\"WOZ waarde is bepaald aan de hand van", "- 📈)\"] = f\"{mean(groei.huis) - mean(groei.aandelen):.2f}%\" dt = (pd.to_datetime(dct[\"tot\"]) - pd.to_datetime(dct[\"van\"])).total_seconds() dct[\"lengte periode\"]", "jaren = df.aantal_jaar.unique()[1::2] cmap = matplotlib.cm.get_cmap(\"tab20\", len(jaren)) color_map = dict(zip(sorted(jaren), cmap.colors)) fig, ax", "*Close price adjusted for splits # **Adjusted close price adjusted for both dividends", "dates.min() return dt.total_seconds() / 86400 / 365.25 def maandelijks_onderhoud(huis_waarde: float, onderhoud_pct: float =", "in bedragen ] hyptoheek_hoogstes = (np.array(hyptoheek_hoogstes) / 1000).round(1) df = pd.DataFrame([bedragen, hyptoheek_hoogstes]).T df.columns", "we kunnen niet in de toekomst kijken return with get_reusable_executor() as executor: results", "ax.set_xlabel(\"Datum\") ax.set_ylabel(\"Prijs stijging/daling per jaar (%)\") fill_area(groei.aandelen, ax, alpha=0.3) fill_area(groei.huis, ax, alpha=0.3) plt.show()", "df.aankoop_datum.min(), df.aankoop_datum.max(), ls=\"--\", color=\"k\", zorder=-1 ) ax.set_xlabel(\"Aankoop datum\") ax.set_ylabel(\"Winst kopen huis t.o.v. beleggen\")", "= 1, ) -> float: def hyptotheek_kosten(huis_prijs): hyptotheek_maandelijks = Mortgage( hypotheekrente / 100,", "float] = defaultdict(float) start_year = dates[0].year betaald = 0 afgelost = 0 belegging", "axs[0, 1].set_ylabel(\"\") axs[1, 1].set_ylabel(\"\") plt.show() def plot_result_lines(df: pd.DataFrame) -> None: jaren = df.aantal_jaar.unique()[1::2]", "ls=\"--\", color=\"k\", zorder=-1 ) ax.set_xlabel(\"Aankoop datum\") ax.set_ylabel(\"Winst kopen huis t.o.v. beleggen\") ax.set_title(\"Winst kopen", "min(vermogen, schijf_3) vermogen -= belastbaar_3 inkomen_3 = belastbaar_3 * 5.69 / 100 inkomen", "= pd.DataFrame([bedragen, hyptoheek_hoogstes]).T df.columns = [\"maandlasten (€)\", \"hypotheek (x€1000)\"] return df def analyseer_data(df:", "hyptoheek_hoogstes]).T df.columns = [\"maandlasten (€)\", \"hypotheek (x€1000)\"] return df def analyseer_data(df: pd.DataFrame) ->", "- huis_winst)/1000:.0f}k verschil.\" ) return dict( aankoop_datum=aankoop_datum, verkoop_datum=dates[-1], aantal_jaar=aantal_jaar(dates), betaald=betaald, afgelost=afgelost, af_te_lossen=af_te_lossen, huis_waarde=huis_waarde,", "import tqdm from maandlasten import maandlasten from mortgage import Mortgage, dollar matplotlib.rc(\"font\", size=15)", "1000).plot.contourf( ax=axs[1, 1], add_colorbar=True, cbar_kwargs={\"label\": \"Huis waarde (x€1000)\"}, cmap=\"magma\", levels=levels, ) axs[0, 0].text(", "start_year: # Betaal vermogensbelasting over vorig jaar belegging -= vermogensbelasting(belegging, schulden, met_fiscaal_partner) #", "= plt.subplots(figsize=(7, 7)) groei.aandelen.plot( ax=ax, xlabel=\"Datum\", ylabel=\"S&P500 prijs stijging/daling per jaar (%)\", title=\"S&P500", "ax=axs[1, 0], add_colorbar=True, levels=levels, norm=matplotlib.colors.TwoSlopeNorm( 0, vmin=ds.huis_winst.min() / 1000, vmax=ds.huis_winst.max() / 1000 ),", "van vorig jaar.\"\"\" vorig_jaar = date.year - 1 dates = groei.index[groei.index.year == vorig_jaar]", "belastbaar_2 * 4.50 / 100 schijf_3 = float(\"inf\") belastbaar_3 = min(vermogen, schijf_3) vermogen", "legend=False, xlabel=\"Datum\", ylabel=\"Huizenprijs stijging/daling per jaar (%)\", title=\"Huizenprijs verschil vs. tijd, bron: CBS\",", "maandlasten.Persoon(jaarinkomen) onderhoud = partial(maandelijks_onderhoud, onderhoud_pct=onderhoud_pct) hypotheek = Mortgage(hypotheekrente / 100, hyptotheek_looptijd, geleend) betaalschema", "color=\"k\", ) fill_area(groei.aandelen, ax) plt.show() def load_huizen_prijsindex_per_regio(): # Gedownload van https://opendata.cbs.nl/statline/#/CBS/nl/dataset/83913NED/table?ts=1617045165965 # Col:", "vrkp huis (x€1000)\"}, ) (ds.huis_waarde / 1000).plot.contourf( ax=axs[1, 1], add_colorbar=True, cbar_kwargs={\"label\": \"Huis waarde", "betaalschema = hypotheek.monthly_payment_schedule() rente_betaald: Dict[int, float] = defaultdict(float) start_year = dates[0].year betaald =", "and date.year > start_year: # Betaal vermogensbelasting over vorig jaar belegging -= vermogensbelasting(belegging,", "is niet mogelijk want we kunnen niet in de toekomst kijken return with", "verticalalignment=\"top\", transform=ax.transAxes, fontsize=14, ) plt.show() def plot_result_contour(df: pd.DataFrame) -> None: ds = df.set_index([\"aantal_jaar\",", "inkomen = inkomen_1 + inkomen_2 + inkomen_3 return inkomen * 31 / 100", "/ 100, hyptotheek_looptijd, dollar(float(huis_prijs)) ).monthly_payment() onderhoud = onderhoud_pct / 100 * huis_prijs /", ") ax.hlines(0, x.index.min(), x.index.max(), ls=\"--\", color=\"k\") def maandelijke_groei( date: pd.Timestamp, groei: pd.DataFrame, which:", "+ float(rente) rente_betaald[date.year] += float(rente) betaald += hypotheek_kosten belegging += hypotheek_kosten - huur", "huis_waarde *= maandelijke_groei(date, groei, \"huis\") belegging *= maandelijke_groei(date, groei, \"aandelen\") betaald += onderhoud(huis_waarde)", "rente = next(betaalschema) hypotheek_kosten = float(afbetaling) + float(rente) rente_betaald[date.year] += float(rente) betaald +=", "ax) plt.show() def load_huizen_prijsindex_per_regio(): # Gedownload van https://opendata.cbs.nl/statline/#/CBS/nl/dataset/83913NED/table?ts=1617045165965 # Col: \"Prijsindex bestaande koopwoningen", "return dt.total_seconds() / 86400 / 365.25 def maandelijks_onderhoud(huis_waarde: float, onderhoud_pct: float = 2):", "price adjusted for splits # **Adjusted close price adjusted for both dividends and", "+ inkomen_3 return inkomen * 31 / 100 def koop_huis_of_beleg( aankoop_datum: Union[str, pd.Timestamp],", ") (ds.belegging / 1000).plot.contourf( ax=axs[0, 1], add_colorbar=True, levels=levels, cbar_kwargs={\"label\": \"Waarde belegging (x€1000)\"}, )", "load_sp500() stock_price = stock_price[ stock_price.index.day == 1 ] # Keep only first of", "date, value in stock_price[stock_price.index >= start].items(): date_prev = date.replace(date.year - 1) prev =", "groei.huis.plot( ax=ax, legend=False, xlabel=\"Datum\", ylabel=\"Huizenprijs stijging/daling per jaar (%)\", title=\"Huizenprijs verschil vs. tijd,", "-> pd.DataFrame: bedragen = list(range(400, 2000, 100)) hyptoheek_hoogstes = [ hyptotheek_van_huur( huur=huur, hypotheekrente=2.04,", "levels=levels, norm=matplotlib.colors.TwoSlopeNorm( 0, vmin=ds.huis_winst.min() / 1000, vmax=ds.huis_winst.max() / 1000 ), cbar_kwargs={\"label\": \"Winst vrkp", "df[df.aantal_jaar == jaar].plot( x=\"aankoop_datum\", y=\"verschil\", ax=ax, color=color_map[jaar], legend=False ) cbar = fig.colorbar( matplotlib.cm.ScalarMappable(cmap=cmap),", "Finance\", figsize=(7, 7), ) plt.show() def get_groei(regio=\"Nederland\") -> pd.DataFrame: stock_price = load_sp500() stock_price", "[\"maandlasten (€)\", \"hypotheek (x€1000)\"] return df def analyseer_data(df: pd.DataFrame) -> None: pct_blauw =", "eerder\" # met alle kwartaal data sinds 1996. df = pd.read_csv(\"huizen_prijsindex_per_regio.csv\") df.Perioden =", "date.replace(date.year - 1) prev = stock_price[date_prev] stock_relative[date] = (value - prev) / prev", "dates = groei.index[groei.index.year == vorig_jaar] prijs = huidige_prijs for _date in dates[::-1]: #", "dus €{(belegging - huis_winst)/1000:.0f}k verschil.\" ) return dict( aankoop_datum=aankoop_datum, verkoop_datum=dates[-1], aantal_jaar=aantal_jaar(dates), betaald=betaald, afgelost=afgelost,", "persoon_met_aftrek = maandlasten.Persoon(persoon.bruto_jaarloon) persoon_met_aftrek.aftrek = hypotheek_aftrek teruggave = persoon_met_aftrek.netto_loon - persoon.netto_loon betaald -=", "start_jaar + 1 results = {} iterator = list( product(groei.index[groei.index.year >= start_jaar], range(1,", "import Number from typing import Any, Dict, Literal, Union import matplotlib import matplotlib.colors", "/ 100 / 12 def vermogensbelasting( vermogen: float, schulden: float = 0, met_fiscaal_partner:", "get_groei(regio=\"Nederland\") -> pd.DataFrame: stock_price = load_sp500() stock_price = stock_price[ stock_price.index.day == 1 ]", "(x€1000)\"}, ) (ds.belegging / 1000).plot.contourf( ax=axs[0, 1], add_colorbar=True, levels=levels, cbar_kwargs={\"label\": \"Waarde belegging (x€1000)\"},", "hand van de prijs van vorig jaar.\"\"\" vorig_jaar = date.year - 1 dates", "in %\") ax.set_xlabel(\"Datum\") ax.set_ylabel(\"Prijs stijging/daling per jaar (%)\") fill_area(groei.aandelen, ax, alpha=0.3) fill_area(groei.huis, ax,", "import maandlasten from mortgage import Mortgage, dollar matplotlib.rc(\"font\", size=15) def load_sp500() -> pd.Series:", "hebben op {aankoop_datum} een huis van €{geleend/1000:.0f}k gekocht. \" f\"Op {date.date()} (na {aantal_jaar(dates):.1f}", "niet in de toekomst kijken return with get_reusable_executor() as executor: results = list(", "2000, 100)) hyptoheek_hoogstes = [ hyptotheek_van_huur( huur=huur, hypotheekrente=2.04, hyptotheek_looptijd=360, onderhoud_pct=1, ) for huur", "schulden if vermogen < 0: return 0 # De rest is in box", "+= hypotheek_kosten belegging += hypotheek_kosten - huur afgelost += float(afbetaling) if date.month ==", "(€)\", \"hypotheek (x€1000)\"] return df def analyseer_data(df: pd.DataFrame) -> None: pct_blauw = 100", "is in box 3 schijf_1 = 100_000 - 50_000 belastbaar_1 = min(vermogen, schijf_1)", "geleend) betaalschema = hypotheek.monthly_payment_schedule() rente_betaald: Dict[int, float] = defaultdict(float) start_year = dates[0].year betaald", "{aantal_jaar(dates):.2f} is mogelijk.\" ) persoon = maandlasten.Persoon(jaarinkomen) onderhoud = partial(maandelijks_onderhoud, onderhoud_pct=onderhoud_pct) hypotheek =", "€{betaald/1000:.0f}k betaald, \" f\"€{afgelost/1000:.0f}k afgelost, een huiswaarde van €{huis_waarde/1000:.0f}k, \" f\"en na een", "# Create data points for each day stock_price = stock_price.resample(\"D\").interpolate() return stock_price def", "plt.show() def load_huizen_prijsindex_per_regio(): # Gedownload van https://opendata.cbs.nl/statline/#/CBS/nl/dataset/83913NED/table?ts=1617045165965 # Col: \"Prijsindex bestaande koopwoningen Ontwikkeling", "pd.DataFrame): example_periods = [ dict(van=\"2014-Q2\", tot=\"2020-Q4\", notities=\"de recente 'goede' jaren\"), dict( van=\"2009-Q2\", tot=\"2014-Q1\",", "aankoop_datum, jaar_tot_verkoop = datum_jaar try: return koop_huis_of_beleg( aankoop_datum, jaar_tot_verkoop, groei=groei, verbose=False, **parameters, )", "df.set_index([\"aantal_jaar\", \"aankoop_datum\"]).to_xarray() fig, axs = plt.subplots(ncols=2, nrows=2, figsize=(12, 8), sharex=True, sharey=True) levels =", "axs[0, 0].set_xlabel(\"\") axs[0, 1].set_xlabel(\"\") axs[0, 1].set_ylabel(\"\") axs[1, 1].set_ylabel(\"\") plt.show() def plot_result_lines(df: pd.DataFrame) ->", "fill_area(groei.aandelen, ax, alpha=0.3) fill_area(groei.huis, ax, alpha=0.3) plt.show() def vergelijkings_tabel(groei: pd.DataFrame): example_periods = [", "/ len(df.verschil) print( f\"In {pct_blauw:.1f}% van alle gevallen is het beter om aandelen", "Mortgage, dollar matplotlib.rc(\"font\", size=15) def load_sp500() -> pd.Series: # Daily data to need", "alpha=0.3) plt.show() def vergelijkings_tabel(groei: pd.DataFrame): example_periods = [ dict(van=\"2014-Q2\", tot=\"2020-Q4\", notities=\"de recente 'goede'", "as pd import scipy.optimize from loky import get_reusable_executor from tqdm.notebook import tqdm from", "kunnen niet in de toekomst kijken return with get_reusable_executor() as executor: results =", "as executor: results = list( tqdm( executor.map( partial(try_run_simulation, parameters=parameters), iterator ), \"Monte Carlo", "0 afgelost = 0 belegging = 0 huis_waarde = geleend for date in", "Any]) -> pd.DataFrame: start_jaar = groei.index.year.min() + 1 eind_jaar = groei.index.year.max() n_jaar =", "< 0].mean() / 1000 mean_huis = df.huis_winst[df.verschil > 0].mean() / 1000 print( f\"In", "stock_price = stock_price[ stock_price.index.day == 1 ] # Keep only first of the", "{pct_blauw:.1f}% van alle gevallen is het beter om aandelen \" f\"te kopen en", "-> None: fig, ax = plt.subplots(figsize=(7, 7)) groei.aandelen.plot( ax=ax, xlabel=\"Datum\", ylabel=\"S&P500 prijs stijging/daling", "\", \"-Q\") ) df.set_index(\"Perioden\", inplace=True) for col in df.columns: df[col] = df[col].str.replace(\",\", \".\").astype(float)", "round(float(res.x), 2) def hyptotheek_maandlasten_df() -> pd.DataFrame: bedragen = list(range(400, 2000, 100)) hyptoheek_hoogstes =", "hyptotheek_looptijd, geleend) betaalschema = hypotheek.monthly_payment_schedule() rente_betaald: Dict[int, float] = defaultdict(float) start_year = dates[0].year", "= plt.subplots(figsize=(8, 8)) for jaar in jaren: df[df.aantal_jaar == jaar].plot( x=\"aankoop_datum\", y=\"verschil\", ax=ax,", "/ 365)} jaar\" table = pd.DataFrame(example_periods)[ [ \"van\", \"tot\", \"lengte periode\", \"huis\", \"aandelen\",", "else \"aandelen\" dct[winner] += \" 🏆\" dct[\"verschil (🏠 - 📈)\"] = f\"{mean(groei.huis) -", "dates.max() - dates.min() return dt.total_seconds() / 86400 / 365.25 def maandelijks_onderhoud(huis_waarde: float, onderhoud_pct:", "€{huis_waarde/1000:.0f}k, \" f\"en na een verkoop €{abs(huis_winst)/1000:.0f}k {winst_of_verlies}. \" f\"Hadden we een huis", "\"huis\") belegging *= maandelijke_groei(date, groei, \"aandelen\") betaald += onderhoud(huis_waarde) afbetaling, rente = next(betaalschema)", "dt = (pd.to_datetime(dct[\"tot\"]) - pd.to_datetime(dct[\"van\"])).total_seconds() dct[\"lengte periode\"] = f\"{round(dt / 86400 / 365)}", "vmax=ds.huis_winst.max() / 1000 ), cbar_kwargs={\"label\": \"Winst vrkp huis (x€1000)\"}, ) (ds.huis_waarde / 1000).plot.contourf(", "\".\").astype(float) df = df.resample(\"D\").interpolate() df = df[df.index.day == 1] return df def plot_huizenprijzen(groei:", "j in color_map.keys()]) cbar.set_label(\"Verkoop na (jaar)\") ax.hlines( 0, df.aankoop_datum.min(), df.aankoop_datum.max(), ls=\"--\", color=\"k\", zorder=-1", "alpha=1, norm=matplotlib.colors.TwoSlopeNorm(0), cmap=\"seismic\", title=\"Kopen of huren?\", xlabel=\"Aankoop datum\", ylabel=\"verkopen na (jaar)\", figsize=(8, 8),", "df = df.resample(\"D\").interpolate() df = df[df.index.day == 1] return df def plot_huizenprijzen(groei: pd.DataFrame)", "bedragen = list(range(400, 2000, 100)) hyptoheek_hoogstes = [ hyptotheek_van_huur( huur=huur, hypotheekrente=2.04, hyptotheek_looptijd=360, onderhoud_pct=1,", "def fill_area(x: pd.Series, ax, alpha: float = 1.0) -> None: ax.fill_between( x.index, x.values,", "jaar (%)\", title=\"Huizenprijs verschil vs. tijd, bron: CBS\", figsize=(8, 8), color=\"k\", ) fill_area(groei.huis,", "numbers import Number from typing import Any, Dict, Literal, Union import matplotlib import", "= 30 * 12, jaarinkomen: Number = 90_000, schulden: Number = 20_000, onderhoud_pct:", "if r is not None]) df.aankoop_datum = pd.to_datetime(df.aankoop_datum) df[\"verschil\"] = (df.huis_winst - df.belegging)", "huis is beter\\nblauw is belegging is beter\", horizontalalignment=\"right\", verticalalignment=\"top\", transform=ax.transAxes, fontsize=14, ) plt.show()", "0].set_ylabel(\"Verkoop na (jaar)\") axs[0, 0].set_xlabel(\"\") axs[0, 1].set_xlabel(\"\") axs[0, 1].set_ylabel(\"\") axs[1, 1].set_ylabel(\"\") plt.show() def", "= min(vermogen, schijf_1) vermogen -= belastbaar_1 inkomen_1 = belastbaar_1 * 1.90 / 100", "bestaande koopwoningen Ontwikkeling t.o.v. een jaar eerder\" # met alle kwartaal data sinds", "**parameters, ) except ValueError: # 'jaar' is niet mogelijk want we kunnen niet", "== 1 ] # Keep only first of the month first_year = stock_price.index.min().year", "= defaultdict(float) start_year = dates[0].year betaald = 0 afgelost = 0 belegging =", "# Keep only first of the month first_year = stock_price.index.min().year start = f\"{first_year+1}-02-01\"", "* 100 stock_relative = pd.Series(stock_relative) # Select at same dates as huis prijzen", "Literal[\"huis\", \"aandelen\"] = \"huis\" ) -> float: pct = groei[which][groei.index == date].iloc[0] /", "schulden, met_fiscaal_partner) # Krijg hypotheekrenteaftrek terug van vorig jaar! woz_waarde = bepaal_woz(huis_waarde, date,", "= onderhoud_pct / 100 * huis_prijs / 12 kosten = float(hyptotheek_maandelijks) + onderhoud", "start = f\"{first_year+1}-02-01\" stock_relative = {} for date, value in stock_price[stock_price.index >= start].items():", "per jaar (%)\", title=\"Huizenprijs verschil vs. tijd, bron: CBS\", figsize=(8, 8), color=\"k\", )", "= plt.gcf().get_axes() cax.set_ylabel(\"verschil (x€1000)\") ax.text( 0.95, 0.95, \"rood is huis is beter\\nblauw is", "-> pd.DataFrame: start_jaar = groei.index.year.min() + 1 eind_jaar = groei.index.year.max() n_jaar = eind_jaar", "\"huis\") return prijs def aantal_jaar(dates: pd.DatetimeIndex): dt = dates.max() - dates.min() return dt.total_seconds()", "f\"€{afgelost/1000:.0f}k afgelost, een huiswaarde van €{huis_waarde/1000:.0f}k, \" f\"en na een verkoop €{abs(huis_winst)/1000:.0f}k {winst_of_verlies}.", "\"van\", \"tot\", \"lengte periode\", \"huis\", \"aandelen\", \"verschil (🏠 - 📈)\", \"notities\", ] ]", "/ 100 def koop_huis_of_beleg( aankoop_datum: Union[str, pd.Timestamp], jaar_tot_verkoop: Number, geleend: Number, groei: pd.DataFrame,", "for each day stock_price = stock_price.resample(\"D\").interpolate() return stock_price def plot_sp500() -> None: stock_price", "\" 🏆\" dct[\"verschil (🏠 - 📈)\"] = f\"{mean(groei.huis) - mean(groei.aandelen):.2f}%\" dt = (pd.to_datetime(dct[\"tot\"])", "- betaald - overdrachts_belasting if verbose: winst_of_verlies = \"winst\" if huis_winst > 0", "is mogelijk.\" ) persoon = maandlasten.Persoon(jaarinkomen) onderhoud = partial(maandelijks_onderhoud, onderhoud_pct=onderhoud_pct) hypotheek = Mortgage(hypotheekrente", "float(afbetaling) + float(rente) rente_betaald[date.year] += float(rente) betaald += hypotheek_kosten belegging += hypotheek_kosten -", "Carlo simulatie\", total=len(iterator), ) ) df = pd.DataFrame([r for r in results if", "x=\"aankoop_datum\", y=\"aantal_jaar\", c=\"verschil\", s=100, alpha=1, norm=matplotlib.colors.TwoSlopeNorm(0), cmap=\"seismic\", title=\"Kopen of huren?\", xlabel=\"Aankoop datum\", ylabel=\"verkopen", "# De rest is in box 3 schijf_1 = 100_000 - 50_000 belastbaar_1", "size=15) def load_sp500() -> pd.Series: # Daily data to need to resample it", "+ 1 eind_jaar = groei.index.year.max() n_jaar = eind_jaar - start_jaar + 1 results", "= pd.concat( [huis_prijsindex, stock_relative], axis=1, keys=[\"huis\", \"aandelen\"] ) return groei def plot_aandelen(groei: pd.DataFrame)", "€{huur} per maand en belegd, dan hadden we €{belegging/1000:.0f}k. \" f\"Dat is dus", "onderhoud_pct / 100 * huis_prijs / 12 kosten = float(hyptotheek_maandelijks) + onderhoud return", "None: fig, ax = plt.subplots(figsize=(8, 8)) groei.aandelen[groei.huis.index].plot(ax=ax, label=\"Aandelen\", legend=True) groei.huis.plot(ax=ax, label=\"Huizenprijs\", legend=True) ax.set_title(\"Huizenprijs", "] return table def fill_area(x: pd.Series, ax, alpha: float = 1.0) -> None:", "the month first_year = stock_price.index.min().year start = f\"{first_year+1}-02-01\" stock_relative = {} for date,", "= [\"maandlasten (€)\", \"hypotheek (x€1000)\"] return df def analyseer_data(df: pd.DataFrame) -> None: pct_blauw", "inkomen_3 return inkomen * 31 / 100 def koop_huis_of_beleg( aankoop_datum: Union[str, pd.Timestamp], jaar_tot_verkoop:", "beter\\nblauw is belegging is beter\", horizontalalignment=\"right\", verticalalignment=\"top\", transform=ax.transAxes, fontsize=14, ) plt.show() def plot_result_contour(df:", "defaultdict from functools import partial from itertools import product from numbers import Number", "iterator = list( product(groei.index[groei.index.year >= start_jaar], range(1, n_jaar)) ) def try_run_simulation(datum_jaar, parameters): aankoop_datum,", "for j in color_map.keys()]) cbar.set_label(\"Verkoop na (jaar)\") ax.hlines( 0, df.aankoop_datum.min(), df.aankoop_datum.max(), ls=\"--\", color=\"k\",", "per maand en belegd, dan hadden we €{belegging/1000:.0f}k. \" f\"Dat is dus €{(belegging", ") -> float: pct = groei[which][groei.index == date].iloc[0] / 100 return (1 +", "] # Keep only first of the month first_year = stock_price.index.min().year start =", "365.25 def maandelijks_onderhoud(huis_waarde: float, onderhoud_pct: float = 2): return huis_waarde * onderhoud_pct /", "\"winst\" if huis_winst > 0 else \"verlies\" print( f\"We hebben op {aankoop_datum} een", "($)\", title=\"S&P500 index vs. tijd, bron: Yahoo! Finance\", figsize=(7, 7), ) plt.show() def", "): \"\"\"Vermogensbelasting vanaf 2021. https://www.rijksoverheid.nl/onderwerpen/belastingplan/belastingwijzigingen-voor-ons-allemaal/box-3 \"\"\" heffingvrij = 100_000 if met_fiscaal_partner else 50_000", "= load_huizen_prijsindex_per_regio()[regio] stock_relative = stock_relative[huis_prijsindex.index] groei = pd.concat( [huis_prijsindex, stock_relative], axis=1, keys=[\"huis\", \"aandelen\"]", "axs[1, 1].set_xlabel(\"Aankoop datum\") axs[0, 0].set_ylabel(\"Verkoop na (jaar)\") axs[1, 0].set_ylabel(\"Verkoop na (jaar)\") axs[0, 0].set_xlabel(\"\")", "import matplotlib.colors import matplotlib.pyplot as plt import numpy as np import pandas as", "results = list( tqdm( executor.map( partial(try_run_simulation, parameters=parameters), iterator ), \"Monte Carlo simulatie\", total=len(iterator),", "100 / 12 def vermogensbelasting( vermogen: float, schulden: float = 0, met_fiscaal_partner: bool", "dct[\"tot\"])].mean() dct[\"huis\"] = f\"{mean(groei.huis):.2f}%\" dct[\"aandelen\"] = f\"{mean(groei.aandelen):.2f}%\" winner = \"huis\" if mean(groei.huis) >", "df.aantal_jaar = df.aantal_jaar.round() return df def plot_result_scatter(df: pd.DataFrame) -> None: fig, ax =", "levels=levels, cbar_kwargs={\"label\": \"Verschil (x€1000)\"}, ) (ds.belegging / 1000).plot.contourf( ax=axs[0, 1], add_colorbar=True, levels=levels, cbar_kwargs={\"label\":", "ax = plt.subplots(figsize=(8, 8)) groei.aandelen[groei.huis.index].plot(ax=ax, label=\"Aandelen\", legend=True) groei.huis.plot(ax=ax, label=\"Huizenprijs\", legend=True) ax.set_title(\"Huizenprijs en aandelenprijs", "color=color_map[jaar], legend=False ) cbar = fig.colorbar( matplotlib.cm.ScalarMappable(cmap=cmap), ax=ax, ) cbar.set_ticks(np.linspace(0, 1, len(jaren))) cbar.set_ticklabels([int(j)", "= datum_jaar try: return koop_huis_of_beleg( aankoop_datum, jaar_tot_verkoop, groei=groei, verbose=False, **parameters, ) except ValueError:", "pd.read_csv(\"sp500.csv\") df_stock.Date = pd.to_datetime(df_stock.Date) df_stock.set_index(\"Date\", inplace=True) # *Close price adjusted for splits #", "/ 100 return (1 + pct) ** (1 / 12) def bepaal_woz(huidige_prijs: float,", "mogelijk.\" ) persoon = maandlasten.Persoon(jaarinkomen) onderhoud = partial(maandelijks_onderhoud, onderhoud_pct=onderhoud_pct) hypotheek = Mortgage(hypotheekrente /", ">= start_jaar], range(1, n_jaar)) ) def try_run_simulation(datum_jaar, parameters): aankoop_datum, jaar_tot_verkoop = datum_jaar try:", "1996 tot en met nu\" ), ] for dct in example_periods: mean =", "None]) df.aankoop_datum = pd.to_datetime(df.aankoop_datum) df[\"verschil\"] = (df.huis_winst - df.belegging) / 1000 df.aantal_jaar =", "heffingvrij vermogen -= schulden if vermogen < 0: return 0 # De rest", "- prev) / prev * 100 stock_relative = pd.Series(stock_relative) # Select at same", "Number = 90_000, schulden: Number = 20_000, onderhoud_pct: Number = 1, met_fiscaal_partner: bool", "float(hyptotheek_maandelijks) + onderhoud return kosten res = scipy.optimize.minimize( lambda huis_prijs: abs(hyptotheek_kosten(huis_prijs) - huur),", "= hypotheek.monthly_payment_schedule() rente_betaald: Dict[int, float] = defaultdict(float) start_year = dates[0].year betaald = 0", "pd.to_datetime( df.Perioden.str.replace(\"e kwartaal\", \"\").str.replace(\" \", \"-Q\") ) df.set_index(\"Perioden\", inplace=True) for col in df.columns:", "* huis_prijs / 12 kosten = float(hyptotheek_maandelijks) + onderhoud return kosten res =", "rente_betaald[date.year - 1], woz_waarde ) persoon_met_aftrek = maandlasten.Persoon(persoon.bruto_jaarloon) persoon_met_aftrek.aftrek = hypotheek_aftrek teruggave =", "inkomen_3 = belastbaar_3 * 5.69 / 100 inkomen = inkomen_1 + inkomen_2 +", "color=\"k\", zorder=-1 ) ax.set_xlabel(\"Aankoop datum\") ax.set_ylabel(\"Winst kopen huis t.o.v. beleggen\") ax.set_title(\"Winst kopen huis", ") persoon_met_aftrek = maandlasten.Persoon(persoon.bruto_jaarloon) persoon_met_aftrek.aftrek = hypotheek_aftrek teruggave = persoon_met_aftrek.netto_loon - persoon.netto_loon betaald", "groei = pd.concat( [huis_prijsindex, stock_relative], axis=1, keys=[\"huis\", \"aandelen\"] ) return groei def plot_aandelen(groei:", "met nu\" ), ] for dct in example_periods: mean = lambda x: x[(x.index", "naar de prijs van vorig jaar prijs /= maandelijke_groei(_date, groei, \"huis\") return prijs", "color=\"k\", ) fill_area(groei.huis, ax) plt.show() def plot_aandelen_en_huis(groei: pd.DataFrame) -> None: fig, ax =", "date in dates: huis_waarde *= maandelijke_groei(date, groei, \"huis\") belegging *= maandelijke_groei(date, groei, \"aandelen\")", "dict( aankoop_datum=aankoop_datum, verkoop_datum=dates[-1], aantal_jaar=aantal_jaar(dates), betaald=betaald, afgelost=afgelost, af_te_lossen=af_te_lossen, huis_waarde=huis_waarde, huis_winst=huis_winst, belegging=belegging, ) def run_monte_carlo(groei:", "def plot_aandelen_en_huis(groei: pd.DataFrame) -> None: fig, ax = plt.subplots(figsize=(8, 8)) groei.aandelen[groei.huis.index].plot(ax=ax, label=\"Aandelen\", legend=True)", "1], woz_waarde ) persoon_met_aftrek = maandlasten.Persoon(persoon.bruto_jaarloon) persoon_met_aftrek.aftrek = hypotheek_aftrek teruggave = persoon_met_aftrek.netto_loon -", "Create data points for each day stock_price = stock_price.resample(\"D\").interpolate() return stock_price def plot_sp500()", "0].mean() / 1000 print( f\"In het geval dat aandelen beter waren, dan is", "df[col] = df[col].str.replace(\",\", \".\").astype(float) df = df.resample(\"D\").interpolate() df = df[df.index.day == 1] return", "huur=huur, hypotheekrente=2.04, hyptotheek_looptijd=360, onderhoud_pct=1, ) for huur in bedragen ] hyptoheek_hoogstes = (np.array(hyptoheek_hoogstes)", "plt.subplots() df.plot.scatter( ax=ax, x=\"aankoop_datum\", y=\"aantal_jaar\", c=\"verschil\", s=100, alpha=1, norm=matplotlib.colors.TwoSlopeNorm(0), cmap=\"seismic\", title=\"Kopen of huren?\",", "huur: Number = 1000, hypotheekrente: Number = 2.04, hyptotheek_looptijd: int = 30 *", ") plt.show() def get_groei(regio=\"Nederland\") -> pd.DataFrame: stock_price = load_sp500() stock_price = stock_price[ stock_price.index.day", "= f\"{mean(groei.huis):.2f}%\" dct[\"aandelen\"] = f\"{mean(groei.aandelen):.2f}%\" winner = \"huis\" if mean(groei.huis) > mean(groei.aandelen) else", "print( f\"In het geval dat aandelen beter waren, dan is de verwachte winst", "where=x.values < 0, color=\"red\", alpha=alpha, zorder=-1, ) ax.hlines(0, x.index.min(), x.index.max(), ls=\"--\", color=\"k\") def", "bepaal_woz(huis_waarde, date, groei) hypotheek_aftrek = maandlasten.hypotheek_aftrek( rente_betaald[date.year - 1], woz_waarde ) persoon_met_aftrek =", "def hyptotheek_maandlasten_df() -> pd.DataFrame: bedragen = list(range(400, 2000, 100)) hyptoheek_hoogstes = [ hyptotheek_van_huur(", "cbar_kwargs={\"label\": \"Winst vrkp huis (x€1000)\"}, ) (ds.huis_waarde / 1000).plot.contourf( ax=axs[1, 1], add_colorbar=True, cbar_kwargs={\"label\":", "winst €{mean_beleggen:.1f}k.\" ) print(f\"Als een huis kopen beter was, dan is de verwachte", "month first_year = stock_price.index.min().year start = f\"{first_year+1}-02-01\" stock_relative = {} for date, value", "fig, ax = plt.subplots(figsize=(7, 7)) groei.huis.plot( ax=ax, legend=False, xlabel=\"Datum\", ylabel=\"Huizenprijs stijging/daling per jaar", "plt.gcf().get_axes() cax.set_ylabel(\"verschil (x€1000)\") ax.text( 0.95, 0.95, \"rood is huis is beter\\nblauw is belegging", "\"Verschil (x€1000)\"}, ) (ds.belegging / 1000).plot.contourf( ax=axs[0, 1], add_colorbar=True, levels=levels, cbar_kwargs={\"label\": \"Waarde belegging", "& (x.index <= dct[\"tot\"])].mean() dct[\"huis\"] = f\"{mean(groei.huis):.2f}%\" dct[\"aandelen\"] = f\"{mean(groei.aandelen):.2f}%\" winner = \"huis\"", "365)} jaar\" table = pd.DataFrame(example_periods)[ [ \"van\", \"tot\", \"lengte periode\", \"huis\", \"aandelen\", \"verschil", "\"lengte periode\", \"huis\", \"aandelen\", \"verschil (🏠 - 📈)\", \"notities\", ] ] return table", "€{abs(huis_winst)/1000:.0f}k {winst_of_verlies}. \" f\"Hadden we een huis gehuurd voor €{huur} per maand en", "1000 df.aantal_jaar = df.aantal_jaar.round() return df def plot_result_scatter(df: pd.DataFrame) -> None: fig, ax", "1 ] # Keep only first of the month first_year = stock_price.index.min().year start", "= inkomen_1 + inkomen_2 + inkomen_3 return inkomen * 31 / 100 def", "huren?\", xlabel=\"Aankoop datum\", ylabel=\"verkopen na (jaar)\", figsize=(8, 8), ) ax, cax = plt.gcf().get_axes()", "legend=True) ax.set_title(\"Huizenprijs en aandelenprijs stijging/daling per jaar in %\") ax.set_xlabel(\"Datum\") ax.set_ylabel(\"Prijs stijging/daling per", "30 * 12, jaarinkomen: Number = 90_000, schulden: Number = 20_000, onderhoud_pct: Number", "axis=1, keys=[\"huis\", \"aandelen\"] ) return groei def plot_aandelen(groei: pd.DataFrame) -> None: fig, ax", "axs[1, 0].set_ylabel(\"Verkoop na (jaar)\") axs[0, 0].set_xlabel(\"\") axs[0, 1].set_xlabel(\"\") axs[0, 1].set_ylabel(\"\") axs[1, 1].set_ylabel(\"\") plt.show()", "plot_aandelen_en_huis(groei: pd.DataFrame) -> None: fig, ax = plt.subplots(figsize=(8, 8)) groei.aandelen[groei.huis.index].plot(ax=ax, label=\"Aandelen\", legend=True) groei.huis.plot(ax=ax,", "= 0, met_fiscaal_partner: bool = True ): \"\"\"Vermogensbelasting vanaf 2021. https://www.rijksoverheid.nl/onderwerpen/belastingplan/belastingwijzigingen-voor-ons-allemaal/box-3 \"\"\" heffingvrij", "df.columns = [\"maandlasten (€)\", \"hypotheek (x€1000)\"] return df def analyseer_data(df: pd.DataFrame) -> None:", "jaar eerder\" # met alle kwartaal data sinds 1996. df = pd.read_csv(\"huizen_prijsindex_per_regio.csv\") df.Perioden", "aankoop_datum, jaar_tot_verkoop, groei=groei, verbose=False, **parameters, ) except ValueError: # 'jaar' is niet mogelijk", "from functools import partial from itertools import product from numbers import Number from", "\"aandelen\"] = \"huis\" ) -> float: pct = groei[which][groei.index == date].iloc[0] / 100", "huis_waarde - af_te_lossen - betaald - overdrachts_belasting if verbose: winst_of_verlies = \"winst\" if", "[ dict(van=\"2014-Q2\", tot=\"2020-Q4\", notities=\"de recente 'goede' jaren\"), dict( van=\"2009-Q2\", tot=\"2014-Q1\", notities=\"slechtste jaren na", "plt.show() def plot_result_contour(df: pd.DataFrame) -> None: ds = df.set_index([\"aantal_jaar\", \"aankoop_datum\"]).to_xarray() fig, axs =", "kopen en in {100-pct_rood:.1f}% is het beter om een huis te kopen.\" )", "(%)\") fill_area(groei.aandelen, ax, alpha=0.3) fill_area(groei.huis, ax, alpha=0.3) plt.show() def vergelijkings_tabel(groei: pd.DataFrame): example_periods =", ">= aankoop_datum][ : round(jaar_tot_verkoop * 12) + 1 ] if len(dates) < jaar_tot_verkoop", "figsize=(8, 8), color=\"k\", ) fill_area(groei.huis, ax) plt.show() def plot_aandelen_en_huis(groei: pd.DataFrame) -> None: fig,", "x.index, x.values, where=x.values < 0, color=\"red\", alpha=alpha, zorder=-1, ) ax.hlines(0, x.index.min(), x.index.max(), ls=\"--\",", "ylabel=\"S&P500 prijs stijging/daling per jaar (%)\", title=\"S&P500 index vs. tijd, bron: Yahoo! Finance\",", ") cbar = fig.colorbar( matplotlib.cm.ScalarMappable(cmap=cmap), ax=ax, ) cbar.set_ticks(np.linspace(0, 1, len(jaren))) cbar.set_ticklabels([int(j) for j", "= groei.index.year.max() n_jaar = eind_jaar - start_jaar + 1 results = {} iterator", "prev = stock_price[date_prev] stock_relative[date] = (value - prev) / prev * 100 stock_relative", ") axs[1, 0].set_xlabel(\"Aankoop datum\") axs[1, 1].set_xlabel(\"Aankoop datum\") axs[0, 0].set_ylabel(\"Verkoop na (jaar)\") axs[1, 0].set_ylabel(\"Verkoop", "{100-pct_rood:.1f}% is het beter om een huis te kopen.\" ) mean_beleggen = df.belegging[df.verschil", "if vermogen < 0: return 0 # De rest is in box 3", "1000).plot.contourf( ax=axs[0, 1], add_colorbar=True, levels=levels, cbar_kwargs={\"label\": \"Waarde belegging (x€1000)\"}, ) (ds.huis_winst / 1000).plot.contourf(", "bron: Yahoo! Finance\", figsize=(7, 7), ) plt.show() def get_groei(regio=\"Nederland\") -> pd.DataFrame: stock_price =", "'jaar' is niet mogelijk want we kunnen niet in de toekomst kijken return", "100_000 - 50_000 belastbaar_1 = min(vermogen, schijf_1) vermogen -= belastbaar_1 inkomen_1 = belastbaar_1", "= pd.to_datetime(df_stock.Date) df_stock.set_index(\"Date\", inplace=True) # *Close price adjusted for splits # **Adjusted close", "return with get_reusable_executor() as executor: results = list( tqdm( executor.map( partial(try_run_simulation, parameters=parameters), iterator", "gevallen is het beter om aandelen \" f\"te kopen en in {100-pct_rood:.1f}% is", "alle gevallen is het beter om aandelen \" f\"te kopen en in {100-pct_rood:.1f}%", "= Mortgage(hypotheekrente / 100, hyptotheek_looptijd, geleend) betaalschema = hypotheek.monthly_payment_schedule() rente_betaald: Dict[int, float] =", "= plt.subplots(figsize=(8, 8)) groei.aandelen[groei.huis.index].plot(ax=ax, label=\"Aandelen\", legend=True) groei.huis.plot(ax=ax, label=\"Huizenprijs\", legend=True) ax.set_title(\"Huizenprijs en aandelenprijs stijging/daling", "100 schijf_2 = 1_000_000 - 100_000 belastbaar_2 = min(vermogen, schijf_2) vermogen -= belastbaar_2", "- start_jaar + 1 results = {} iterator = list( product(groei.index[groei.index.year >= start_jaar],", "1000 ), cbar_kwargs={\"label\": \"Winst vrkp huis (x€1000)\"}, ) (ds.huis_waarde / 1000).plot.contourf( ax=axs[1, 1],", "inkomen_2 = belastbaar_2 * 4.50 / 100 schijf_3 = float(\"inf\") belastbaar_3 = min(vermogen,", "import product from numbers import Number from typing import Any, Dict, Literal, Union", "€{(belegging - huis_winst)/1000:.0f}k verschil.\" ) return dict( aankoop_datum=aankoop_datum, verkoop_datum=dates[-1], aantal_jaar=aantal_jaar(dates), betaald=betaald, afgelost=afgelost, af_te_lossen=af_te_lossen,", "\"tot\", \"lengte periode\", \"huis\", \"aandelen\", \"verschil (🏠 - 📈)\", \"notities\", ] ] return", "data sinds 1996 tot en met nu\" ), ] for dct in example_periods:", "plt.show() def hyptotheek_van_huur( huur: Number = 1000, hypotheekrente: Number = 2.04, hyptotheek_looptijd: int", "pd.concat( [huis_prijsindex, stock_relative], axis=1, keys=[\"huis\", \"aandelen\"] ) return groei def plot_aandelen(groei: pd.DataFrame) ->", "Col: \"Prijsindex bestaande koopwoningen Ontwikkeling t.o.v. een jaar eerder\" # met alle kwartaal", "-> float: pct = groei[which][groei.index == date].iloc[0] / 100 return (1 + pct)", "2.04, hyptotheek_looptijd: int = 30 * 12, jaarinkomen: Number = 90_000, schulden: Number", "stijging/daling per jaar (%)\") fill_area(groei.aandelen, ax, alpha=0.3) fill_area(groei.huis, ax, alpha=0.3) plt.show() def vergelijkings_tabel(groei:", "/= maandelijke_groei(_date, groei, \"huis\") return prijs def aantal_jaar(dates: pd.DatetimeIndex): dt = dates.max() -", "stock_price.resample(\"D\").interpolate() return stock_price def plot_sp500() -> None: stock_price = load_sp500() stock_price.plot( xlabel=\"Datum\", ylabel=\"S&P500", "0].set_xlabel(\"Aankoop datum\") axs[1, 1].set_xlabel(\"Aankoop datum\") axs[0, 0].set_ylabel(\"Verkoop na (jaar)\") axs[1, 0].set_ylabel(\"Verkoop na (jaar)\")", "pd.DataFrame, which: Literal[\"huis\", \"aandelen\"] = \"huis\" ) -> float: pct = groei[which][groei.index ==", "pd.DataFrame) -> None: ds = df.set_index([\"aantal_jaar\", \"aankoop_datum\"]).to_xarray() fig, axs = plt.subplots(ncols=2, nrows=2, figsize=(12,", "mean_huis = df.huis_winst[df.verschil > 0].mean() / 1000 print( f\"In het geval dat aandelen", "to quarterly like the huizenprijzen df_stock = pd.read_csv(\"sp500.csv\") df_stock.Date = pd.to_datetime(df_stock.Date) df_stock.set_index(\"Date\", inplace=True)", "schijf_1 = 100_000 - 50_000 belastbaar_1 = min(vermogen, schijf_1) vermogen -= belastbaar_1 inkomen_1", "maandlasten.hypotheek_aftrek( rente_betaald[date.year - 1], woz_waarde ) persoon_met_aftrek = maandlasten.Persoon(persoon.bruto_jaarloon) persoon_met_aftrek.aftrek = hypotheek_aftrek teruggave", "pd.DataFrame) -> None: jaren = df.aantal_jaar.unique()[1::2] cmap = matplotlib.cm.get_cmap(\"tab20\", len(jaren)) color_map = dict(zip(sorted(jaren),", "= {} for date, value in stock_price[stock_price.index >= start].items(): date_prev = date.replace(date.year -", "import scipy.optimize from loky import get_reusable_executor from tqdm.notebook import tqdm from maandlasten import", "15 ds.verschil.plot.contourf( ax=axs[0, 0], norm=matplotlib.colors.TwoSlopeNorm( 0, vmin=ds.verschil.min(), vmax=ds.verschil.max() ), add_colorbar=True, levels=levels, cbar_kwargs={\"label\": \"Verschil", "* 12) + 1 ] if len(dates) < jaar_tot_verkoop * 12: raise ValueError(", "-> None: fig, ax = plt.subplots(figsize=(7, 7)) groei.huis.plot( ax=ax, legend=False, xlabel=\"Datum\", ylabel=\"Huizenprijs stijging/daling", "is beter\", horizontalalignment=\"right\", verticalalignment=\"top\", transform=ax.transAxes, fontsize=14, ) plt.show() def plot_result_contour(df: pd.DataFrame) -> None:", "matplotlib.colors import matplotlib.pyplot as plt import numpy as np import pandas as pd", "jaar belegging -= vermogensbelasting(belegging, schulden, met_fiscaal_partner) # Krijg hypotheekrenteaftrek terug van vorig jaar!", "df[df.index.day == 1] return df def plot_huizenprijzen(groei: pd.DataFrame) -> None: fig, ax =", "dates: huis_waarde *= maandelijke_groei(date, groei, \"huis\") belegging *= maandelijke_groei(date, groei, \"aandelen\") betaald +=", "(1 / 12) def bepaal_woz(huidige_prijs: float, date: pd.Timestamp, groei: pd.DataFrame): \"\"\"WOZ waarde is", "in color_map.keys()]) cbar.set_label(\"Verkoop na (jaar)\") ax.hlines( 0, df.aankoop_datum.min(), df.aankoop_datum.max(), ls=\"--\", color=\"k\", zorder=-1 )", "1.0) -> None: ax.fill_between( x.index, x.values, where=x.values > 0, color=\"green\", alpha=alpha, zorder=-1, )", "met_fiscaal_partner else 50_000 vermogen -= heffingvrij vermogen -= schulden if vermogen < 0:", "\"Waarde belegging (x€1000)\"}, ) (ds.huis_winst / 1000).plot.contourf( ax=axs[1, 0], add_colorbar=True, levels=levels, norm=matplotlib.colors.TwoSlopeNorm( 0,", "en aandelenprijs stijging/daling per jaar in %\") ax.set_xlabel(\"Datum\") ax.set_ylabel(\"Prijs stijging/daling per jaar (%)\")", "= maandlasten.hypotheek_aftrek( rente_betaald[date.year - 1], woz_waarde ) persoon_met_aftrek = maandlasten.Persoon(persoon.bruto_jaarloon) persoon_met_aftrek.aftrek = hypotheek_aftrek", "-> float: def hyptotheek_kosten(huis_prijs): hyptotheek_maandelijks = Mortgage( hypotheekrente / 100, hyptotheek_looptijd, dollar(float(huis_prijs)) ).monthly_payment()", "1 ] if len(dates) < jaar_tot_verkoop * 12: raise ValueError( f\"Een duur van", "(na {aantal_jaar(dates):.1f} jaar) hebben we €{betaald/1000:.0f}k betaald, \" f\"€{afgelost/1000:.0f}k afgelost, een huiswaarde van", "- huur), x0=100_000, method=\"Nelder-Mead\", tol=1e-2, ) return round(float(res.x), 2) def hyptotheek_maandlasten_df() -> pd.DataFrame:", "mean = lambda x: x[(x.index >= dct[\"van\"]) & (x.index <= dct[\"tot\"])].mean() dct[\"huis\"] =", "huis (x€1000)\"}, ) (ds.huis_waarde / 1000).plot.contourf( ax=axs[1, 1], add_colorbar=True, cbar_kwargs={\"label\": \"Huis waarde (x€1000)\"},", "Keep only first of the month first_year = stock_price.index.min().year start = f\"{first_year+1}-02-01\" stock_relative", "2021. https://www.rijksoverheid.nl/onderwerpen/belastingplan/belastingwijzigingen-voor-ons-allemaal/box-3 \"\"\" heffingvrij = 100_000 if met_fiscaal_partner else 50_000 vermogen -= heffingvrij", "2) def hyptotheek_maandlasten_df() -> pd.DataFrame: bedragen = list(range(400, 2000, 100)) hyptoheek_hoogstes = [", "in stock_price[stock_price.index >= start].items(): date_prev = date.replace(date.year - 1) prev = stock_price[date_prev] stock_relative[date]", "maandelijke_groei(date, groei, \"huis\") belegging *= maandelijke_groei(date, groei, \"aandelen\") betaald += onderhoud(huis_waarde) afbetaling, rente", "€{mean_beleggen:.1f}k.\" ) print(f\"Als een huis kopen beter was, dan is de verwachte winst", "return df def plot_result_scatter(df: pd.DataFrame) -> None: fig, ax = plt.subplots() df.plot.scatter( ax=ax,", "dollar matplotlib.rc(\"font\", size=15) def load_sp500() -> pd.Series: # Daily data to need to", "de hand van de prijs van vorig jaar.\"\"\" vorig_jaar = date.year - 1", "- mean(groei.aandelen):.2f}%\" dt = (pd.to_datetime(dct[\"tot\"]) - pd.to_datetime(dct[\"van\"])).total_seconds() dct[\"lengte periode\"] = f\"{round(dt / 86400", "try_run_simulation(datum_jaar, parameters): aankoop_datum, jaar_tot_verkoop = datum_jaar try: return koop_huis_of_beleg( aankoop_datum, jaar_tot_verkoop, groei=groei, verbose=False,", "ls=\"--\", color=\"k\") def maandelijke_groei( date: pd.Timestamp, groei: pd.DataFrame, which: Literal[\"huis\", \"aandelen\"] = \"huis\"", "close price adjusted for both dividends and splits. stock_price = df_stock[\"Close*\"].str.replace(\",\", \"\").astype(float) #", "def vergelijkings_tabel(groei: pd.DataFrame): example_periods = [ dict(van=\"2014-Q2\", tot=\"2020-Q4\", notities=\"de recente 'goede' jaren\"), dict(", "-> None: ax.fill_between( x.index, x.values, where=x.values > 0, color=\"green\", alpha=alpha, zorder=-1, ) ax.fill_between(", "beter\", horizontalalignment=\"right\", verticalalignment=\"top\", transform=axs[0, 0].transAxes, fontsize=12, ) axs[1, 0].set_xlabel(\"Aankoop datum\") axs[1, 1].set_xlabel(\"Aankoop datum\")", "0, df.aankoop_datum.min(), df.aankoop_datum.max(), ls=\"--\", color=\"k\", zorder=-1 ) ax.set_xlabel(\"Aankoop datum\") ax.set_ylabel(\"Winst kopen huis t.o.v.", "betaald = 0 afgelost = 0 belegging = 0 huis_waarde = geleend for", "\" f\"Hadden we een huis gehuurd voor €{huur} per maand en belegd, dan", "df.aankoop_datum = pd.to_datetime(df.aankoop_datum) df[\"verschil\"] = (df.huis_winst - df.belegging) / 1000 df.aantal_jaar = df.aantal_jaar.round()", "onderhoud_pct=onderhoud_pct) hypotheek = Mortgage(hypotheekrente / 100, hyptotheek_looptijd, geleend) betaalschema = hypotheek.monthly_payment_schedule() rente_betaald: Dict[int,", "axs = plt.subplots(ncols=2, nrows=2, figsize=(12, 8), sharex=True, sharey=True) levels = 15 ds.verschil.plot.contourf( ax=axs[0,", "\"aandelen\"] ) return groei def plot_aandelen(groei: pd.DataFrame) -> None: fig, ax = plt.subplots(figsize=(7,", "except ValueError: # 'jaar' is niet mogelijk want we kunnen niet in de", "afgelost = 0 belegging = 0 huis_waarde = geleend for date in dates:", "is huis is beter\\nblauw is belegging is beter\", horizontalalignment=\"right\", verticalalignment=\"top\", transform=axs[0, 0].transAxes, fontsize=12,", "data points for each day stock_price = stock_price.resample(\"D\").interpolate() return stock_price def plot_sp500() ->", "het beter om aandelen \" f\"te kopen en in {100-pct_rood:.1f}% is het beter", ": round(jaar_tot_verkoop * 12) + 1 ] if len(dates) < jaar_tot_verkoop * 12:", "hypotheek_kosten = float(afbetaling) + float(rente) rente_betaald[date.year] += float(rente) betaald += hypotheek_kosten belegging +=", "1000, vmax=ds.huis_winst.max() / 1000 ), cbar_kwargs={\"label\": \"Winst vrkp huis (x€1000)\"}, ) (ds.huis_waarde /", "def bepaal_woz(huidige_prijs: float, date: pd.Timestamp, groei: pd.DataFrame): \"\"\"WOZ waarde is bepaald aan de", "een huiswaarde van €{huis_waarde/1000:.0f}k, \" f\"en na een verkoop €{abs(huis_winst)/1000:.0f}k {winst_of_verlies}. \" f\"Hadden", "date, groei) hypotheek_aftrek = maandlasten.hypotheek_aftrek( rente_betaald[date.year - 1], woz_waarde ) persoon_met_aftrek = maandlasten.Persoon(persoon.bruto_jaarloon)", "hypotheek = Mortgage(hypotheekrente / 100, hyptotheek_looptijd, geleend) betaalschema = hypotheek.monthly_payment_schedule() rente_betaald: Dict[int, float]", "datum\") axs[1, 1].set_xlabel(\"Aankoop datum\") axs[0, 0].set_ylabel(\"Verkoop na (jaar)\") axs[1, 0].set_ylabel(\"Verkoop na (jaar)\") axs[0,", "] ] return table def fill_area(x: pd.Series, ax, alpha: float = 1.0) ->", "quarterly like the huizenprijzen df_stock = pd.read_csv(\"sp500.csv\") df_stock.Date = pd.to_datetime(df_stock.Date) df_stock.set_index(\"Date\", inplace=True) #", "huis_prijs / 12 kosten = float(hyptotheek_maandelijks) + onderhoud return kosten res = scipy.optimize.minimize(", "\"\"\"Vermogensbelasting vanaf 2021. https://www.rijksoverheid.nl/onderwerpen/belastingplan/belastingwijzigingen-voor-ons-allemaal/box-3 \"\"\" heffingvrij = 100_000 if met_fiscaal_partner else 50_000 vermogen", "mean(groei.huis) > mean(groei.aandelen) else \"aandelen\" dct[winner] += \" 🏆\" dct[\"verschil (🏠 - 📈)\"]", "na (jaar)\") axs[0, 0].set_xlabel(\"\") axs[0, 1].set_xlabel(\"\") axs[0, 1].set_ylabel(\"\") axs[1, 1].set_ylabel(\"\") plt.show() def plot_result_lines(df:", "np import pandas as pd import scipy.optimize from loky import get_reusable_executor from tqdm.notebook", "df.aankoop_datum.max(), ls=\"--\", color=\"k\", zorder=-1 ) ax.set_xlabel(\"Aankoop datum\") ax.set_ylabel(\"Winst kopen huis t.o.v. beleggen\") ax.set_title(\"Winst", "huis_winst > 0 else \"verlies\" print( f\"We hebben op {aankoop_datum} een huis van", "12 def vermogensbelasting( vermogen: float, schulden: float = 0, met_fiscaal_partner: bool = True", "is dus €{(belegging - huis_winst)/1000:.0f}k verschil.\" ) return dict( aankoop_datum=aankoop_datum, verkoop_datum=dates[-1], aantal_jaar=aantal_jaar(dates), betaald=betaald,", "/ 1000).plot.contourf( ax=axs[0, 1], add_colorbar=True, levels=levels, cbar_kwargs={\"label\": \"Waarde belegging (x€1000)\"}, ) (ds.huis_winst /", "/ 12 def vermogensbelasting( vermogen: float, schulden: float = 0, met_fiscaal_partner: bool =", "parameters): aankoop_datum, jaar_tot_verkoop = datum_jaar try: return koop_huis_of_beleg( aankoop_datum, jaar_tot_verkoop, groei=groei, verbose=False, **parameters,", "] hyptoheek_hoogstes = (np.array(hyptoheek_hoogstes) / 1000).round(1) df = pd.DataFrame([bedragen, hyptoheek_hoogstes]).T df.columns = [\"maandlasten", "<= dct[\"tot\"])].mean() dct[\"huis\"] = f\"{mean(groei.huis):.2f}%\" dct[\"aandelen\"] = f\"{mean(groei.aandelen):.2f}%\" winner = \"huis\" if mean(groei.huis)", "to need to resample it to quarterly like the huizenprijzen df_stock = pd.read_csv(\"sp500.csv\")", "stijging/daling per jaar (%)\", title=\"Huizenprijs verschil vs. tijd, bron: CBS\", figsize=(8, 8), color=\"k\",", "tot=\"2020-Q4\", notities=\"van 2008 crisis tot en met nu\"), dict( van=\"1996-Q1\", tot=\"2020-Q4\", notities=\"alle data", "points for each day stock_price = stock_price.resample(\"D\").interpolate() return stock_price def plot_sp500() -> None:", "\"Winst vrkp huis (x€1000)\"}, ) (ds.huis_waarde / 1000).plot.contourf( ax=axs[1, 1], add_colorbar=True, cbar_kwargs={\"label\": \"Huis", "12, jaarinkomen: Number = 90_000, schulden: Number = 20_000, onderhoud_pct: Number = 1,", "0 huis_waarde = geleend for date in dates: huis_waarde *= maandelijke_groei(date, groei, \"huis\")" ]
[ "from_data if email is from Sara, and 1 if email is from Chris", "If you have not obtained the Enron email corpus, run startup.py in the", "from sklearn.feature_extraction import text word_matrix = text.TfidfVectorizer(stop_words='english') word_matrix.fit(word_data) # You can access the", "### use parseOutText to extract the text from the opened email words =", "word_data word_data.append(words) ### append a 0 to from_data if email is from Sara,", "#temp_counter = 0 for name, from_person in [(\"sara\", from_sara), (\"chris\", from_chris)]: for path", "away in pickle files at the end. \"\"\" from_sara = open(\"from_sara.txt\", \"r\") from_chris", "returns a list of all the words in the vocabulary. How many different", ") from parse_out_email_text import parseOutText \"\"\" Starter code to process the emails from", "run over full dataset #temp_counter += 1 #if temp_counter < 200: try: path", "from_data, open(\"your_email_authors.pkl\", \"w\") ) ### in Part 4, do TfIdf vectorization here \"\"\"", "in sw] return ' '.join(text) word_data2 = map(remove_stopwords, word_data) \"\"\" # Transform the", "\"emails processed\" print word_data[152] from_sara.close() from_chris.close() pickle.dump( word_data, open(\"your_word_data.pkl\", \"w\") ) pickle.dump( from_data,", "the Enron email dataset, which you downloaded/unpacked in Part 0 of the first", "the Enron email corpus, run startup.py in the tools folder. The data is", "open(\"your_word_data.pkl\", \"w\") ) pickle.dump( from_data, open(\"your_email_authors.pkl\", \"w\") ) ### in Part 4, do", "Part 0 of the first mini-project. If you have not obtained the Enron", "has misleaded the programmer. ### append the text to word_data word_data.append(words) ### append", "the end. \"\"\" from_sara = open(\"from_sara.txt\", \"r\") from_chris = open(\"from_chris.txt\", \"r\") from_data =", "\"\"\" Starter code to process the emails from Sara and Chris to extract", "remove any instances of the words ### [\"sara\", \"shackleton\", \"chris\", \"germani\"] patt =", "a list of all the words in the vocabulary. How many different words", "### temp_counter helps you only look at the first 200 emails in the", "import sys sys.path.append( \"../tools/\" ) from parse_out_email_text import parseOutText \"\"\" Starter code to", "in pickle files at the end. \"\"\" from_sara = open(\"from_sara.txt\", \"r\") from_chris =", "'sara|shackleton|chris|germani|sshacklensf|cgermannsf' words = re.sub(patt,'',words) #words is a string, not an iterator. wrong usage", "list so you ### can iterate your modifications quicker #temp_counter = 0 for", "(from_chris) The actual documents are in the Enron email dataset, which you downloaded/unpacked", "the words ### [\"sara\", \"shackleton\", \"chris\", \"germani\"] patt = 'sara|shackleton|chris|germani|sshacklensf|cgermannsf' words = re.sub(patt,'',words)", "documents ready for classification. The list of all the emails from Sara are", "the emails from Sara are in the from_sara list likewise for emails from", "from_sara.close() from_chris.close() pickle.dump( word_data, open(\"your_word_data.pkl\", \"w\") ) pickle.dump( from_data, open(\"your_email_authors.pkl\", \"w\") ) ###", "import re import sys sys.path.append( \"../tools/\" ) from parse_out_email_text import parseOutText \"\"\" Starter", "the opened email words = parseOutText(email) ### use str.replace() to remove any instances", "to process the emails from Sara and Chris to extract the features and", "words = re.sub(patt,'',words) #words is a string, not an iterator. wrong usage of", "path in from_person: ### only look at first 200 emails when developing ###", "email corpus, run startup.py in the tools folder. The data is stored in", "\"\"\" # Transform the word_data into a tf-idf matrix using the sklearn TfIdf", "str.replace() to remove any instances of the words ### [\"sara\", \"shackleton\", \"chris\", \"germani\"]", "in from_person: ### only look at first 200 emails when developing ### once", "between words and feature numbers using get_feature_names(), which returns a list of all", "text = text.split(' ') text = [word for word in text if word.lower()", "list of all the emails from Sara are in the from_sara list likewise", "helps you only look at the first 200 emails in the list so", "sw = stopwords.words('english') def remove_stopwords(text): text = text.split(' ') text = [word for", "in the from_sara list likewise for emails from Chris (from_chris) The actual documents", "of emails from Sara and Chris, so running over all of them ###", "from_chris = open(\"from_chris.txt\", \"r\") from_data = [] word_data = [] ### temp_counter is", "if word.lower() not in sw] return ' '.join(text) word_data2 = map(remove_stopwords, word_data) \"\"\"", "word_data) \"\"\" # Transform the word_data into a tf-idf matrix using the sklearn", "from nltk.corpus import stopwords sw = stopwords.words('english') def remove_stopwords(text): text = text.split(' ')", "features and get the documents ready for classification. The list of all the", "only look at the first 200 emails in the list so you ###", "in Part 0 of the first mini-project. If you have not obtained the", "startup.py in the tools folder. The data is stored in lists and packed", "Chris to extract the features and get the documents ready for classification. The", "### thousands of emails from Sara and Chris, so running over all of", "email is from Sara, and 1 if email is from Chris from_data.append(1) if", "text if word.lower() not in sw] return ' '.join(text) word_data2 = map(remove_stopwords, word_data)", "#if temp_counter < 200: try: path = os.path.join('..', path[:-1]) print name, ': ',", "path email = open(path, \"r\") ### use parseOutText to extract the text from", "import text word_matrix = text.TfidfVectorizer(stop_words='english') word_matrix.fit(word_data) # You can access the mapping between", "take a long time ### temp_counter helps you only look at the first", "+= 1 #if temp_counter < 200: try: path = os.path.join('..', path[:-1]) print name,", "data is stored in lists and packed away in pickle files at the", "= [word for word in text if word.lower() not in sw] return '", "access the mapping between words and feature numbers using get_feature_names(), which returns a", "can iterate your modifications quicker #temp_counter = 0 for name, from_person in [(\"sara\",", "not obtained the Enron email corpus, run startup.py in the tools folder. The", "all the words in the vocabulary. How many different words are there? print", "### append a 0 to from_data if email is from Sara, and 1", "parseOutText \"\"\" Starter code to process the emails from Sara and Chris to", "\"w\") ) pickle.dump( from_data, open(\"your_email_authors.pkl\", \"w\") ) ### in Part 4, do TfIdf", "text word_matrix = text.TfidfVectorizer(stop_words='english') word_matrix.fit(word_data) # You can access the mapping between words", "text.split(' ') text = [word for word in text if word.lower() not in", "email.close() except: pass print \"emails processed\" print word_data[152] from_sara.close() from_chris.close() pickle.dump( word_data, open(\"your_word_data.pkl\",", "4, do TfIdf vectorization here \"\"\" #Remove english stopwords from nltk.corpus import stopwords", "and packed away in pickle files at the end. \"\"\" from_sara = open(\"from_sara.txt\",", "the tools folder. The data is stored in lists and packed away in", "words and feature numbers using get_feature_names(), which returns a list of all the", "in lists and packed away in pickle files at the end. \"\"\" from_sara", "running over all of them ### can take a long time ### temp_counter", "end. \"\"\" from_sara = open(\"from_sara.txt\", \"r\") from_chris = open(\"from_chris.txt\", \"r\") from_data = []", "corpus, run startup.py in the tools folder. The data is stored in lists", "and get the documents ready for classification. The list of all the emails", "a long time ### temp_counter helps you only look at the first 200", "for name, from_person in [(\"sara\", from_sara), (\"chris\", from_chris)]: for path in from_person: ###", "get the documents ready for classification. The list of all the emails from", "can take a long time ### temp_counter helps you only look at the", "from the opened email words = parseOutText(email) ### use str.replace() to remove any", "Part 4, do TfIdf vectorization here \"\"\" #Remove english stopwords from nltk.corpus import", "The actual documents are in the Enron email dataset, which you downloaded/unpacked in", "dataset #temp_counter += 1 #if temp_counter < 200: try: path = os.path.join('..', path[:-1])", "use str.replace() to remove any instances of the words ### [\"sara\", \"shackleton\", \"chris\",", "open(\"from_sara.txt\", \"r\") from_chris = open(\"from_chris.txt\", \"r\") from_data = [] word_data = [] ###", "of them ### can take a long time ### temp_counter helps you only", "and feature numbers using get_feature_names(), which returns a list of all the words", "not in sw] return ' '.join(text) word_data2 = map(remove_stopwords, word_data) \"\"\" # Transform", "### temp_counter is a way to speed up the development--there are ### thousands", "200 emails in the list so you ### can iterate your modifications quicker", "long time ### temp_counter helps you only look at the first 200 emails", "documents are in the Enron email dataset, which you downloaded/unpacked in Part 0", "emails from Chris (from_chris) The actual documents are in the Enron email dataset,", "[word for word in text if word.lower() not in sw] return ' '.join(text)", "mini-project. If you have not obtained the Enron email corpus, run startup.py in", "append the text to word_data word_data.append(words) ### append a 0 to from_data if", "sys sys.path.append( \"../tools/\" ) from parse_out_email_text import parseOutText \"\"\" Starter code to process", "= stopwords.words('english') def remove_stopwords(text): text = text.split(' ') text = [word for word", "in the list so you ### can iterate your modifications quicker #temp_counter =", "your modifications quicker #temp_counter = 0 for name, from_person in [(\"sara\", from_sara), (\"chris\",", "you downloaded/unpacked in Part 0 of the first mini-project. If you have not", "[\"sara\", \"shackleton\", \"chris\", \"germani\"] patt = 'sara|shackleton|chris|germani|sshacklensf|cgermannsf' words = re.sub(patt,'',words) #words is a", "The list of all the emails from Sara are in the from_sara list", "\"\"\" from_sara = open(\"from_sara.txt\", \"r\") from_chris = open(\"from_chris.txt\", \"r\") from_data = [] word_data", "the emails from Sara and Chris to extract the features and get the", "< 200: try: path = os.path.join('..', path[:-1]) print name, ': ', path email", "print name, ': ', path email = open(path, \"r\") ### use parseOutText to", "of plural here has misleaded the programmer. ### append the text to word_data", "have not obtained the Enron email corpus, run startup.py in the tools folder.", "to remove any instances of the words ### [\"sara\", \"shackleton\", \"chris\", \"germani\"] patt", "\"shackleton\", \"chris\", \"germani\"] patt = 'sara|shackleton|chris|germani|sshacklensf|cgermannsf' words = re.sub(patt,'',words) #words is a string,", "words = parseOutText(email) ### use str.replace() to remove any instances of the words", "= open(path, \"r\") ### use parseOutText to extract the text from the opened", "email words = parseOutText(email) ### use str.replace() to remove any instances of the", "= text.TfidfVectorizer(stop_words='english') word_matrix.fit(word_data) # You can access the mapping between words and feature", "import os import pickle import re import sys sys.path.append( \"../tools/\" ) from parse_out_email_text", "[(\"sara\", from_sara), (\"chris\", from_chris)]: for path in from_person: ### only look at first", "text = [word for word in text if word.lower() not in sw] return", "200 emails when developing ### once everything is working, remove this line to", "parseOutText(email) ### use str.replace() to remove any instances of the words ### [\"sara\",", "any instances of the words ### [\"sara\", \"shackleton\", \"chris\", \"germani\"] patt = 'sara|shackleton|chris|germani|sshacklensf|cgermannsf'", "is a way to speed up the development--there are ### thousands of emails", "open(\"your_email_authors.pkl\", \"w\") ) ### in Part 4, do TfIdf vectorization here \"\"\" #Remove", "a tf-idf matrix using the sklearn TfIdf transformation. from sklearn.feature_extraction import text word_matrix", "process the emails from Sara and Chris to extract the features and get", "so running over all of them ### can take a long time ###", "word_matrix.fit(word_data) # You can access the mapping between words and feature numbers using", "the text from the opened email words = parseOutText(email) ### use str.replace() to", "sys.path.append( \"../tools/\" ) from parse_out_email_text import parseOutText \"\"\" Starter code to process the", "word_matrix = text.TfidfVectorizer(stop_words='english') word_matrix.fit(word_data) # You can access the mapping between words and", "for path in from_person: ### only look at first 200 emails when developing", "TfIdf transformation. from sklearn.feature_extraction import text word_matrix = text.TfidfVectorizer(stop_words='english') word_matrix.fit(word_data) # You can", "mapping between words and feature numbers using get_feature_names(), which returns a list of", "way to speed up the development--there are ### thousands of emails from Sara", "### can iterate your modifications quicker #temp_counter = 0 for name, from_person in", "at first 200 emails when developing ### once everything is working, remove this", "\"germani\"] patt = 'sara|shackleton|chris|germani|sshacklensf|cgermannsf' words = re.sub(patt,'',words) #words is a string, not an", "from Chris from_data.append(1) if name == 'chris' else from_data.append(0) email.close() except: pass print", "200: try: path = os.path.join('..', path[:-1]) print name, ': ', path email =", "text from the opened email words = parseOutText(email) ### use str.replace() to remove", "code to process the emails from Sara and Chris to extract the features", "packed away in pickle files at the end. \"\"\" from_sara = open(\"from_sara.txt\", \"r\")", "look at the first 200 emails in the list so you ### can", "patt = 'sara|shackleton|chris|germani|sshacklensf|cgermannsf' words = re.sub(patt,'',words) #words is a string, not an iterator.", "re.sub(patt,'',words) #words is a string, not an iterator. wrong usage of plural here", "is a string, not an iterator. wrong usage of plural here has misleaded", "get_feature_names(), which returns a list of all the words in the vocabulary. How", "to from_data if email is from Sara, and 1 if email is from", "0 for name, from_person in [(\"sara\", from_sara), (\"chris\", from_chris)]: for path in from_person:", "[] word_data = [] ### temp_counter is a way to speed up the", "string, not an iterator. wrong usage of plural here has misleaded the programmer.", "\"r\") from_data = [] word_data = [] ### temp_counter is a way to", "only look at first 200 emails when developing ### once everything is working,", "append a 0 to from_data if email is from Sara, and 1 if", "#!/usr/bin/python import os import pickle import re import sys sys.path.append( \"../tools/\" ) from", "downloaded/unpacked in Part 0 of the first mini-project. If you have not obtained", "for emails from Chris (from_chris) The actual documents are in the Enron email", "are ### thousands of emails from Sara and Chris, so running over all", "temp_counter < 200: try: path = os.path.join('..', path[:-1]) print name, ': ', path", "word_data[152] from_sara.close() from_chris.close() pickle.dump( word_data, open(\"your_word_data.pkl\", \"w\") ) pickle.dump( from_data, open(\"your_email_authors.pkl\", \"w\") )", "development--there are ### thousands of emails from Sara and Chris, so running over", "print word_data[152] from_sara.close() from_chris.close() pickle.dump( word_data, open(\"your_word_data.pkl\", \"w\") ) pickle.dump( from_data, open(\"your_email_authors.pkl\", \"w\")", "\"r\") from_chris = open(\"from_chris.txt\", \"r\") from_data = [] word_data = [] ### temp_counter", "modifications quicker #temp_counter = 0 for name, from_person in [(\"sara\", from_sara), (\"chris\", from_chris)]:", "TfIdf vectorization here \"\"\" #Remove english stopwords from nltk.corpus import stopwords sw =", "the word_data into a tf-idf matrix using the sklearn TfIdf transformation. from sklearn.feature_extraction", "can access the mapping between words and feature numbers using get_feature_names(), which returns", "\"r\") ### use parseOutText to extract the text from the opened email words", "in text if word.lower() not in sw] return ' '.join(text) word_data2 = map(remove_stopwords,", "except: pass print \"emails processed\" print word_data[152] from_sara.close() from_chris.close() pickle.dump( word_data, open(\"your_word_data.pkl\", \"w\")", "of the first mini-project. If you have not obtained the Enron email corpus,", "stopwords.words('english') def remove_stopwords(text): text = text.split(' ') text = [word for word in", "not an iterator. wrong usage of plural here has misleaded the programmer. ###", "extract the features and get the documents ready for classification. The list of", "The data is stored in lists and packed away in pickle files at", "instances of the words ### [\"sara\", \"shackleton\", \"chris\", \"germani\"] patt = 'sara|shackleton|chris|germani|sshacklensf|cgermannsf' words", "sklearn TfIdf transformation. from sklearn.feature_extraction import text word_matrix = text.TfidfVectorizer(stop_words='english') word_matrix.fit(word_data) # You", "first mini-project. If you have not obtained the Enron email corpus, run startup.py", "= parseOutText(email) ### use str.replace() to remove any instances of the words ###", "and Chris, so running over all of them ### can take a long", "#words is a string, not an iterator. wrong usage of plural here has", "dataset, which you downloaded/unpacked in Part 0 of the first mini-project. If you", "from Sara and Chris, so running over all of them ### can take", "### only look at first 200 emails when developing ### once everything is", "iterate your modifications quicker #temp_counter = 0 for name, from_person in [(\"sara\", from_sara),", "email dataset, which you downloaded/unpacked in Part 0 of the first mini-project. If", "path[:-1]) print name, ': ', path email = open(path, \"r\") ### use parseOutText", "word_data = [] ### temp_counter is a way to speed up the development--there", "extract the text from the opened email words = parseOutText(email) ### use str.replace()", "which returns a list of all the words in the vocabulary. How many", "os import pickle import re import sys sys.path.append( \"../tools/\" ) from parse_out_email_text import", "in the Enron email dataset, which you downloaded/unpacked in Part 0 of the", "text to word_data word_data.append(words) ### append a 0 to from_data if email is", "up the development--there are ### thousands of emails from Sara and Chris, so", "quicker #temp_counter = 0 for name, from_person in [(\"sara\", from_sara), (\"chris\", from_chris)]: for", "pass print \"emails processed\" print word_data[152] from_sara.close() from_chris.close() pickle.dump( word_data, open(\"your_word_data.pkl\", \"w\") )", "open(path, \"r\") ### use parseOutText to extract the text from the opened email", "\"../tools/\" ) from parse_out_email_text import parseOutText \"\"\" Starter code to process the emails", "import parseOutText \"\"\" Starter code to process the emails from Sara and Chris", "(\"chris\", from_chris)]: for path in from_person: ### only look at first 200 emails", "plural here has misleaded the programmer. ### append the text to word_data word_data.append(words)", "stored in lists and packed away in pickle files at the end. \"\"\"", "= os.path.join('..', path[:-1]) print name, ': ', path email = open(path, \"r\") ###", "the sklearn TfIdf transformation. from sklearn.feature_extraction import text word_matrix = text.TfidfVectorizer(stop_words='english') word_matrix.fit(word_data) #", "this line to run over full dataset #temp_counter += 1 #if temp_counter <", "Sara are in the from_sara list likewise for emails from Chris (from_chris) The", "1 #if temp_counter < 200: try: path = os.path.join('..', path[:-1]) print name, ':", "[] ### temp_counter is a way to speed up the development--there are ###", "== 'chris' else from_data.append(0) email.close() except: pass print \"emails processed\" print word_data[152] from_sara.close()", "is working, remove this line to run over full dataset #temp_counter += 1", "a string, not an iterator. wrong usage of plural here has misleaded the", "Sara, and 1 if email is from Chris from_data.append(1) if name == 'chris'", "when developing ### once everything is working, remove this line to run over", "from Sara, and 1 if email is from Chris from_data.append(1) if name ==", "transformation. from sklearn.feature_extraction import text word_matrix = text.TfidfVectorizer(stop_words='english') word_matrix.fit(word_data) # You can access", "Enron email dataset, which you downloaded/unpacked in Part 0 of the first mini-project.", "once everything is working, remove this line to run over full dataset #temp_counter", "import stopwords sw = stopwords.words('english') def remove_stopwords(text): text = text.split(' ') text =", "print \"emails processed\" print word_data[152] from_sara.close() from_chris.close() pickle.dump( word_data, open(\"your_word_data.pkl\", \"w\") ) pickle.dump(", "emails from Sara and Chris to extract the features and get the documents", "words in the vocabulary. How many different words are there? print len(word_matrix.get_feature_names()) #import", "here \"\"\" #Remove english stopwords from nltk.corpus import stopwords sw = stopwords.words('english') def", "= re.sub(patt,'',words) #words is a string, not an iterator. wrong usage of plural", "word_data2 = map(remove_stopwords, word_data) \"\"\" # Transform the word_data into a tf-idf matrix", "map(remove_stopwords, word_data) \"\"\" # Transform the word_data into a tf-idf matrix using the", "\"\"\" #Remove english stopwords from nltk.corpus import stopwords sw = stopwords.words('english') def remove_stopwords(text):", "in the vocabulary. How many different words are there? print len(word_matrix.get_feature_names()) #import pdb;pdb.set_trace()", "do TfIdf vectorization here \"\"\" #Remove english stopwords from nltk.corpus import stopwords sw", "pickle.dump( from_data, open(\"your_email_authors.pkl\", \"w\") ) ### in Part 4, do TfIdf vectorization here", "pickle.dump( word_data, open(\"your_word_data.pkl\", \"w\") ) pickle.dump( from_data, open(\"your_email_authors.pkl\", \"w\") ) ### in Part", "word in text if word.lower() not in sw] return ' '.join(text) word_data2 =", "remove_stopwords(text): text = text.split(' ') text = [word for word in text if", "look at first 200 emails when developing ### once everything is working, remove", "numbers using get_feature_names(), which returns a list of all the words in the", "for word in text if word.lower() not in sw] return ' '.join(text) word_data2", "'.join(text) word_data2 = map(remove_stopwords, word_data) \"\"\" # Transform the word_data into a tf-idf", "### append the text to word_data word_data.append(words) ### append a 0 to from_data", "', path email = open(path, \"r\") ### use parseOutText to extract the text", "speed up the development--there are ### thousands of emails from Sara and Chris,", "you have not obtained the Enron email corpus, run startup.py in the tools", "in Part 4, do TfIdf vectorization here \"\"\" #Remove english stopwords from nltk.corpus", "the from_sara list likewise for emails from Chris (from_chris) The actual documents are", "temp_counter is a way to speed up the development--there are ### thousands of", "= text.split(' ') text = [word for word in text if word.lower() not", "the development--there are ### thousands of emails from Sara and Chris, so running", "Transform the word_data into a tf-idf matrix using the sklearn TfIdf transformation. from", "#Remove english stopwords from nltk.corpus import stopwords sw = stopwords.words('english') def remove_stopwords(text): text", "over full dataset #temp_counter += 1 #if temp_counter < 200: try: path =", "working, remove this line to run over full dataset #temp_counter += 1 #if", "vectorization here \"\"\" #Remove english stopwords from nltk.corpus import stopwords sw = stopwords.words('english')", "= [] ### temp_counter is a way to speed up the development--there are", "word_data, open(\"your_word_data.pkl\", \"w\") ) pickle.dump( from_data, open(\"your_email_authors.pkl\", \"w\") ) ### in Part 4,", "from_data.append(0) email.close() except: pass print \"emails processed\" print word_data[152] from_sara.close() from_chris.close() pickle.dump( word_data,", "tools folder. The data is stored in lists and packed away in pickle", "open(\"from_chris.txt\", \"r\") from_data = [] word_data = [] ### temp_counter is a way", "sklearn.feature_extraction import text word_matrix = text.TfidfVectorizer(stop_words='english') word_matrix.fit(word_data) # You can access the mapping", "in [(\"sara\", from_sara), (\"chris\", from_chris)]: for path in from_person: ### only look at", "from_sara = open(\"from_sara.txt\", \"r\") from_chris = open(\"from_chris.txt\", \"r\") from_data = [] word_data =", "pickle files at the end. \"\"\" from_sara = open(\"from_sara.txt\", \"r\") from_chris = open(\"from_chris.txt\",", ") pickle.dump( from_data, open(\"your_email_authors.pkl\", \"w\") ) ### in Part 4, do TfIdf vectorization", "else from_data.append(0) email.close() except: pass print \"emails processed\" print word_data[152] from_sara.close() from_chris.close() pickle.dump(", "\"chris\", \"germani\"] patt = 'sara|shackleton|chris|germani|sshacklensf|cgermannsf' words = re.sub(patt,'',words) #words is a string, not", "Chris (from_chris) The actual documents are in the Enron email dataset, which you", "### can take a long time ### temp_counter helps you only look at", "the words in the vocabulary. How many different words are there? print len(word_matrix.get_feature_names())", "here has misleaded the programmer. ### append the text to word_data word_data.append(words) ###", "the first 200 emails in the list so you ### can iterate your", "and 1 if email is from Chris from_data.append(1) if name == 'chris' else", "0 of the first mini-project. If you have not obtained the Enron email", "you only look at the first 200 emails in the list so you", "files at the end. \"\"\" from_sara = open(\"from_sara.txt\", \"r\") from_chris = open(\"from_chris.txt\", \"r\")", "over all of them ### can take a long time ### temp_counter helps", "iterator. wrong usage of plural here has misleaded the programmer. ### append the", "of the words ### [\"sara\", \"shackleton\", \"chris\", \"germani\"] patt = 'sara|shackleton|chris|germani|sshacklensf|cgermannsf' words =", "the first mini-project. If you have not obtained the Enron email corpus, run", "a way to speed up the development--there are ### thousands of emails from", "list likewise for emails from Chris (from_chris) The actual documents are in the", "from Sara are in the from_sara list likewise for emails from Chris (from_chris)", "if name == 'chris' else from_data.append(0) email.close() except: pass print \"emails processed\" print", "from Chris (from_chris) The actual documents are in the Enron email dataset, which", "using get_feature_names(), which returns a list of all the words in the vocabulary.", "to speed up the development--there are ### thousands of emails from Sara and", ") ### in Part 4, do TfIdf vectorization here \"\"\" #Remove english stopwords", "### in Part 4, do TfIdf vectorization here \"\"\" #Remove english stopwords from", "everything is working, remove this line to run over full dataset #temp_counter +=", "= map(remove_stopwords, word_data) \"\"\" # Transform the word_data into a tf-idf matrix using", "': ', path email = open(path, \"r\") ### use parseOutText to extract the", "likewise for emails from Chris (from_chris) The actual documents are in the Enron", "Sara and Chris to extract the features and get the documents ready for", "\"w\") ) ### in Part 4, do TfIdf vectorization here \"\"\" #Remove english", "') text = [word for word in text if word.lower() not in sw]", "classification. The list of all the emails from Sara are in the from_sara", "at the end. \"\"\" from_sara = open(\"from_sara.txt\", \"r\") from_chris = open(\"from_chris.txt\", \"r\") from_data", "of all the words in the vocabulary. How many different words are there?", "the documents ready for classification. The list of all the emails from Sara", "matrix using the sklearn TfIdf transformation. from sklearn.feature_extraction import text word_matrix = text.TfidfVectorizer(stop_words='english')", "a 0 to from_data if email is from Sara, and 1 if email", "parseOutText to extract the text from the opened email words = parseOutText(email) ###", "text.TfidfVectorizer(stop_words='english') word_matrix.fit(word_data) # You can access the mapping between words and feature numbers", "emails when developing ### once everything is working, remove this line to run", "email is from Chris from_data.append(1) if name == 'chris' else from_data.append(0) email.close() except:", "word_data.append(words) ### append a 0 to from_data if email is from Sara, and", "Sara and Chris, so running over all of them ### can take a", "1 if email is from Chris from_data.append(1) if name == 'chris' else from_data.append(0)", "temp_counter helps you only look at the first 200 emails in the list", "in the tools folder. The data is stored in lists and packed away", "thousands of emails from Sara and Chris, so running over all of them", "from_data.append(1) if name == 'chris' else from_data.append(0) email.close() except: pass print \"emails processed\"", "which you downloaded/unpacked in Part 0 of the first mini-project. If you have", "word.lower() not in sw] return ' '.join(text) word_data2 = map(remove_stopwords, word_data) \"\"\" #", "path = os.path.join('..', path[:-1]) print name, ': ', path email = open(path, \"r\")", "name == 'chris' else from_data.append(0) email.close() except: pass print \"emails processed\" print word_data[152]", "from_person: ### only look at first 200 emails when developing ### once everything", "<reponame>lucasosouza/udacity-data-analysis #!/usr/bin/python import os import pickle import re import sys sys.path.append( \"../tools/\" )", "os.path.join('..', path[:-1]) print name, ': ', path email = open(path, \"r\") ### use", "from_sara), (\"chris\", from_chris)]: for path in from_person: ### only look at first 200", "wrong usage of plural here has misleaded the programmer. ### append the text", "email = open(path, \"r\") ### use parseOutText to extract the text from the", "nltk.corpus import stopwords sw = stopwords.words('english') def remove_stopwords(text): text = text.split(' ') text", "to word_data word_data.append(words) ### append a 0 to from_data if email is from", "obtained the Enron email corpus, run startup.py in the tools folder. The data", "processed\" print word_data[152] from_sara.close() from_chris.close() pickle.dump( word_data, open(\"your_word_data.pkl\", \"w\") ) pickle.dump( from_data, open(\"your_email_authors.pkl\",", "is from Chris from_data.append(1) if name == 'chris' else from_data.append(0) email.close() except: pass", "from_person in [(\"sara\", from_sara), (\"chris\", from_chris)]: for path in from_person: ### only look", "is from Sara, and 1 if email is from Chris from_data.append(1) if name", "are in the Enron email dataset, which you downloaded/unpacked in Part 0 of", "feature numbers using get_feature_names(), which returns a list of all the words in", "at the first 200 emails in the list so you ### can iterate", "to extract the features and get the documents ready for classification. The list", "all of them ### can take a long time ### temp_counter helps you", "#temp_counter += 1 #if temp_counter < 200: try: path = os.path.join('..', path[:-1]) print", "an iterator. wrong usage of plural here has misleaded the programmer. ### append", "### once everything is working, remove this line to run over full dataset", "remove this line to run over full dataset #temp_counter += 1 #if temp_counter", "first 200 emails when developing ### once everything is working, remove this line", "= open(\"from_chris.txt\", \"r\") from_data = [] word_data = [] ### temp_counter is a", "from parse_out_email_text import parseOutText \"\"\" Starter code to process the emails from Sara", "try: path = os.path.join('..', path[:-1]) print name, ': ', path email = open(path,", "from Sara and Chris to extract the features and get the documents ready", "of all the emails from Sara are in the from_sara list likewise for", "from_chris)]: for path in from_person: ### only look at first 200 emails when", "Enron email corpus, run startup.py in the tools folder. The data is stored", "### use str.replace() to remove any instances of the words ### [\"sara\", \"shackleton\",", "the programmer. ### append the text to word_data word_data.append(words) ### append a 0", "' '.join(text) word_data2 = map(remove_stopwords, word_data) \"\"\" # Transform the word_data into a", "the list so you ### can iterate your modifications quicker #temp_counter = 0", "list of all the words in the vocabulary. How many different words are", "tf-idf matrix using the sklearn TfIdf transformation. from sklearn.feature_extraction import text word_matrix =", "parse_out_email_text import parseOutText \"\"\" Starter code to process the emails from Sara and", "opened email words = parseOutText(email) ### use str.replace() to remove any instances of", "into a tf-idf matrix using the sklearn TfIdf transformation. from sklearn.feature_extraction import text", "'chris' else from_data.append(0) email.close() except: pass print \"emails processed\" print word_data[152] from_sara.close() from_chris.close()", "= 'sara|shackleton|chris|germani|sshacklensf|cgermannsf' words = re.sub(patt,'',words) #words is a string, not an iterator. wrong", "# Transform the word_data into a tf-idf matrix using the sklearn TfIdf transformation.", "Starter code to process the emails from Sara and Chris to extract the", "word_data into a tf-idf matrix using the sklearn TfIdf transformation. from sklearn.feature_extraction import", "# You can access the mapping between words and feature numbers using get_feature_names(),", "folder. The data is stored in lists and packed away in pickle files", "0 to from_data if email is from Sara, and 1 if email is", "= 0 for name, from_person in [(\"sara\", from_sara), (\"chris\", from_chris)]: for path in", "You can access the mapping between words and feature numbers using get_feature_names(), which", "Chris from_data.append(1) if name == 'chris' else from_data.append(0) email.close() except: pass print \"emails", "you ### can iterate your modifications quicker #temp_counter = 0 for name, from_person", "to run over full dataset #temp_counter += 1 #if temp_counter < 200: try:", "all the emails from Sara are in the from_sara list likewise for emails", "from_data = [] word_data = [] ### temp_counter is a way to speed", "def remove_stopwords(text): text = text.split(' ') text = [word for word in text", "misleaded the programmer. ### append the text to word_data word_data.append(words) ### append a", "the text to word_data word_data.append(words) ### append a 0 to from_data if email", "pickle import re import sys sys.path.append( \"../tools/\" ) from parse_out_email_text import parseOutText \"\"\"", "sw] return ' '.join(text) word_data2 = map(remove_stopwords, word_data) \"\"\" # Transform the word_data", "name, ': ', path email = open(path, \"r\") ### use parseOutText to extract", "words ### [\"sara\", \"shackleton\", \"chris\", \"germani\"] patt = 'sara|shackleton|chris|germani|sshacklensf|cgermannsf' words = re.sub(patt,'',words) #words", "from_chris.close() pickle.dump( word_data, open(\"your_word_data.pkl\", \"w\") ) pickle.dump( from_data, open(\"your_email_authors.pkl\", \"w\") ) ### in", "for classification. The list of all the emails from Sara are in the", "usage of plural here has misleaded the programmer. ### append the text to", "are in the from_sara list likewise for emails from Chris (from_chris) The actual", "developing ### once everything is working, remove this line to run over full", "lists and packed away in pickle files at the end. \"\"\" from_sara =", "emails from Sara are in the from_sara list likewise for emails from Chris", "english stopwords from nltk.corpus import stopwords sw = stopwords.words('english') def remove_stopwords(text): text =", "emails in the list so you ### can iterate your modifications quicker #temp_counter", "the mapping between words and feature numbers using get_feature_names(), which returns a list", "Chris, so running over all of them ### can take a long time", "first 200 emails in the list so you ### can iterate your modifications", "time ### temp_counter helps you only look at the first 200 emails in", "so you ### can iterate your modifications quicker #temp_counter = 0 for name,", "emails from Sara and Chris, so running over all of them ### can", "line to run over full dataset #temp_counter += 1 #if temp_counter < 200:", "actual documents are in the Enron email dataset, which you downloaded/unpacked in Part", "to extract the text from the opened email words = parseOutText(email) ### use", "re import sys sys.path.append( \"../tools/\" ) from parse_out_email_text import parseOutText \"\"\" Starter code", "from_sara list likewise for emails from Chris (from_chris) The actual documents are in", "import pickle import re import sys sys.path.append( \"../tools/\" ) from parse_out_email_text import parseOutText", "full dataset #temp_counter += 1 #if temp_counter < 200: try: path = os.path.join('..',", "ready for classification. The list of all the emails from Sara are in", "if email is from Sara, and 1 if email is from Chris from_data.append(1)", "return ' '.join(text) word_data2 = map(remove_stopwords, word_data) \"\"\" # Transform the word_data into", "using the sklearn TfIdf transformation. from sklearn.feature_extraction import text word_matrix = text.TfidfVectorizer(stop_words='english') word_matrix.fit(word_data)", "= [] word_data = [] ### temp_counter is a way to speed up", "programmer. ### append the text to word_data word_data.append(words) ### append a 0 to", "and Chris to extract the features and get the documents ready for classification.", "= open(\"from_sara.txt\", \"r\") from_chris = open(\"from_chris.txt\", \"r\") from_data = [] word_data = []", "name, from_person in [(\"sara\", from_sara), (\"chris\", from_chris)]: for path in from_person: ### only", "stopwords sw = stopwords.words('english') def remove_stopwords(text): text = text.split(' ') text = [word", "is stored in lists and packed away in pickle files at the end.", "run startup.py in the tools folder. The data is stored in lists and", "stopwords from nltk.corpus import stopwords sw = stopwords.words('english') def remove_stopwords(text): text = text.split('", "use parseOutText to extract the text from the opened email words = parseOutText(email)", "if email is from Chris from_data.append(1) if name == 'chris' else from_data.append(0) email.close()", "the features and get the documents ready for classification. The list of all", "them ### can take a long time ### temp_counter helps you only look", "### [\"sara\", \"shackleton\", \"chris\", \"germani\"] patt = 'sara|shackleton|chris|germani|sshacklensf|cgermannsf' words = re.sub(patt,'',words) #words is" ]
[ "<reponame>felix781/market-access-python-frontend def public_view(func): \"\"\" Decorator for public views that do not require authentication", "for public views that do not require authentication \"\"\" orig_func = func orig_func._public_view", "do not require authentication \"\"\" orig_func = func orig_func._public_view = True return func", "def public_view(func): \"\"\" Decorator for public views that do not require authentication \"\"\"", "Decorator for public views that do not require authentication \"\"\" orig_func = func", "that do not require authentication \"\"\" orig_func = func orig_func._public_view = True return", "public views that do not require authentication \"\"\" orig_func = func orig_func._public_view =", "views that do not require authentication \"\"\" orig_func = func orig_func._public_view = True", "public_view(func): \"\"\" Decorator for public views that do not require authentication \"\"\" orig_func", "\"\"\" Decorator for public views that do not require authentication \"\"\" orig_func =" ]
[ "[float(x) for x in line.strip().split(\"\\t\")] max_ind = 0 max_val = parts[0] for ind,", "in enumerate(parts): if part > max_val: max_val = part max_ind = ind fo.write(\"ex\"", "parts = [float(x) for x in line.strip().split(\"\\t\")] max_ind = 0 max_val = parts[0]", "= ind fo.write(\"ex\" + str(counter) + \",\" + labels[max_ind] + \"\\n\") counter +=", "counter = 0 labels = [\"contradiction\", \"entailment\", \"neutral\"] for line in fi: parts", "max_ind = ind fo.write(\"ex\" + str(counter) + \",\" + labels[max_ind] + \"\\n\") counter", "fo.write(\"pairID,gold_label\\n\") counter = 0 labels = [\"contradiction\", \"entailment\", \"neutral\"] for line in fi:", "for line in fi: parts = [float(x) for x in line.strip().split(\"\\t\")] max_ind =", "part > max_val: max_val = part max_ind = ind fo.write(\"ex\" + str(counter) +", "max_ind = 0 max_val = parts[0] for ind, part in enumerate(parts): if part", "\"test_results.tsv\", \"r\") fo = open(prefix + \"/\" + \"preds.txt\", \"w\") fo.write(\"pairID,gold_label\\n\") counter =", "\"/\" + \"test_results.tsv\", \"r\") fo = open(prefix + \"/\" + \"preds.txt\", \"w\") fo.write(\"pairID,gold_label\\n\")", "line.strip().split(\"\\t\")] max_ind = 0 max_val = parts[0] for ind, part in enumerate(parts): if", "0 labels = [\"contradiction\", \"entailment\", \"neutral\"] for line in fi: parts = [float(x)", "import sys prefix = sys.argv[1] fi = open(prefix + \"/\" + \"test_results.tsv\", \"r\")", "prefix = sys.argv[1] fi = open(prefix + \"/\" + \"test_results.tsv\", \"r\") fo =", "line in fi: parts = [float(x) for x in line.strip().split(\"\\t\")] max_ind = 0", "\"entailment\", \"neutral\"] for line in fi: parts = [float(x) for x in line.strip().split(\"\\t\")]", "for ind, part in enumerate(parts): if part > max_val: max_val = part max_ind", "part in enumerate(parts): if part > max_val: max_val = part max_ind = ind", "part max_ind = ind fo.write(\"ex\" + str(counter) + \",\" + labels[max_ind] + \"\\n\")", "= sys.argv[1] fi = open(prefix + \"/\" + \"test_results.tsv\", \"r\") fo = open(prefix", "fi: parts = [float(x) for x in line.strip().split(\"\\t\")] max_ind = 0 max_val =", "= 0 labels = [\"contradiction\", \"entailment\", \"neutral\"] for line in fi: parts =", "for x in line.strip().split(\"\\t\")] max_ind = 0 max_val = parts[0] for ind, part", "in line.strip().split(\"\\t\")] max_ind = 0 max_val = parts[0] for ind, part in enumerate(parts):", "enumerate(parts): if part > max_val: max_val = part max_ind = ind fo.write(\"ex\" +", "+ \"test_results.tsv\", \"r\") fo = open(prefix + \"/\" + \"preds.txt\", \"w\") fo.write(\"pairID,gold_label\\n\") counter", "max_val: max_val = part max_ind = ind fo.write(\"ex\" + str(counter) + \",\" +", "= [\"contradiction\", \"entailment\", \"neutral\"] for line in fi: parts = [float(x) for x", "sys prefix = sys.argv[1] fi = open(prefix + \"/\" + \"test_results.tsv\", \"r\") fo", "open(prefix + \"/\" + \"preds.txt\", \"w\") fo.write(\"pairID,gold_label\\n\") counter = 0 labels = [\"contradiction\",", "sys.argv[1] fi = open(prefix + \"/\" + \"test_results.tsv\", \"r\") fo = open(prefix +", "max_val = part max_ind = ind fo.write(\"ex\" + str(counter) + \",\" + labels[max_ind]", "\"r\") fo = open(prefix + \"/\" + \"preds.txt\", \"w\") fo.write(\"pairID,gold_label\\n\") counter = 0", "fi = open(prefix + \"/\" + \"test_results.tsv\", \"r\") fo = open(prefix + \"/\"", "if part > max_val: max_val = part max_ind = ind fo.write(\"ex\" + str(counter)", "ind fo.write(\"ex\" + str(counter) + \",\" + labels[max_ind] + \"\\n\") counter += 1", "+ \"preds.txt\", \"w\") fo.write(\"pairID,gold_label\\n\") counter = 0 labels = [\"contradiction\", \"entailment\", \"neutral\"] for", "[\"contradiction\", \"entailment\", \"neutral\"] for line in fi: parts = [float(x) for x in", "labels = [\"contradiction\", \"entailment\", \"neutral\"] for line in fi: parts = [float(x) for", "= [float(x) for x in line.strip().split(\"\\t\")] max_ind = 0 max_val = parts[0] for", "= part max_ind = ind fo.write(\"ex\" + str(counter) + \",\" + labels[max_ind] +", "in fi: parts = [float(x) for x in line.strip().split(\"\\t\")] max_ind = 0 max_val", "+ \"/\" + \"test_results.tsv\", \"r\") fo = open(prefix + \"/\" + \"preds.txt\", \"w\")", "x in line.strip().split(\"\\t\")] max_ind = 0 max_val = parts[0] for ind, part in", "max_val = parts[0] for ind, part in enumerate(parts): if part > max_val: max_val", "ind, part in enumerate(parts): if part > max_val: max_val = part max_ind =", "> max_val: max_val = part max_ind = ind fo.write(\"ex\" + str(counter) + \",\"", "\"/\" + \"preds.txt\", \"w\") fo.write(\"pairID,gold_label\\n\") counter = 0 labels = [\"contradiction\", \"entailment\", \"neutral\"]", "\"w\") fo.write(\"pairID,gold_label\\n\") counter = 0 labels = [\"contradiction\", \"entailment\", \"neutral\"] for line in", "0 max_val = parts[0] for ind, part in enumerate(parts): if part > max_val:", "= open(prefix + \"/\" + \"test_results.tsv\", \"r\") fo = open(prefix + \"/\" +", "+ \"/\" + \"preds.txt\", \"w\") fo.write(\"pairID,gold_label\\n\") counter = 0 labels = [\"contradiction\", \"entailment\",", "= open(prefix + \"/\" + \"preds.txt\", \"w\") fo.write(\"pairID,gold_label\\n\") counter = 0 labels =", "\"preds.txt\", \"w\") fo.write(\"pairID,gold_label\\n\") counter = 0 labels = [\"contradiction\", \"entailment\", \"neutral\"] for line", "= 0 max_val = parts[0] for ind, part in enumerate(parts): if part >", "= parts[0] for ind, part in enumerate(parts): if part > max_val: max_val =", "open(prefix + \"/\" + \"test_results.tsv\", \"r\") fo = open(prefix + \"/\" + \"preds.txt\",", "parts[0] for ind, part in enumerate(parts): if part > max_val: max_val = part", "fo = open(prefix + \"/\" + \"preds.txt\", \"w\") fo.write(\"pairID,gold_label\\n\") counter = 0 labels", "\"neutral\"] for line in fi: parts = [float(x) for x in line.strip().split(\"\\t\")] max_ind" ]
[ "def test_home_page_contains_correct_html(self): response = self.client.get(\"/\") self.assertContains( response, '<h1 class=\"display-4\">Roster Wizard</h1>' ) def test_home_page_does_not_contain_incorrect_html(self):", "\"home.html\") def test_home_page_contains_correct_html(self): response = self.client.get(\"/\") self.assertContains( response, '<h1 class=\"display-4\">Roster Wizard</h1>' ) def", "test_home_page_contains_correct_html(self): response = self.client.get(\"/\") self.assertContains( response, '<h1 class=\"display-4\">Roster Wizard</h1>' ) def test_home_page_does_not_contain_incorrect_html(self): response", "response = self.client.get(\"/\") self.assertEqual(response.status_code, 200) def test_view_url_by_name(self): response = self.client.get(reverse(\"home\")) self.assertEqual(response.status_code, 200) def", "= self.client.get(\"/\") self.assertContains( response, '<h1 class=\"display-4\">Roster Wizard</h1>' ) def test_home_page_does_not_contain_incorrect_html(self): response = self.client.get(\"/\")", "from django.http import HttpRequest from django.test import SimpleTestCase from django.urls import reverse from", "200) def test_view_url_by_name(self): response = self.client.get(reverse(\"home\")) self.assertEqual(response.status_code, 200) def test_view_uses_correct_template(self): response = self.client.get(reverse(\"home\"))", "response = self.client.get(reverse(\"home\")) self.assertEqual(response.status_code, 200) self.assertTemplateUsed(response, \"home.html\") def test_home_page_contains_correct_html(self): response = self.client.get(\"/\") self.assertContains(", "self.client.get(\"/\") self.assertNotContains( response, \"Hi there! I should not be on the page.\" )", "HttpRequest from django.test import SimpleTestCase from django.urls import reverse from .. import views", "from .. import views class HomePageTests(SimpleTestCase): def test_home_page_status_code(self): response = self.client.get(\"/\") self.assertEqual(response.status_code, 200)", "def test_home_page_status_code(self): response = self.client.get(\"/\") self.assertEqual(response.status_code, 200) def test_view_url_by_name(self): response = self.client.get(reverse(\"home\")) self.assertEqual(response.status_code,", "reverse from .. import views class HomePageTests(SimpleTestCase): def test_home_page_status_code(self): response = self.client.get(\"/\") self.assertEqual(response.status_code,", "import HttpRequest from django.test import SimpleTestCase from django.urls import reverse from .. import", "def test_view_url_by_name(self): response = self.client.get(reverse(\"home\")) self.assertEqual(response.status_code, 200) def test_view_uses_correct_template(self): response = self.client.get(reverse(\"home\")) self.assertEqual(response.status_code,", "from django.test import SimpleTestCase from django.urls import reverse from .. import views class", "200) def test_view_uses_correct_template(self): response = self.client.get(reverse(\"home\")) self.assertEqual(response.status_code, 200) self.assertTemplateUsed(response, \"home.html\") def test_home_page_contains_correct_html(self): response", "test_view_uses_correct_template(self): response = self.client.get(reverse(\"home\")) self.assertEqual(response.status_code, 200) self.assertTemplateUsed(response, \"home.html\") def test_home_page_contains_correct_html(self): response = self.client.get(\"/\")", "200) self.assertTemplateUsed(response, \"home.html\") def test_home_page_contains_correct_html(self): response = self.client.get(\"/\") self.assertContains( response, '<h1 class=\"display-4\">Roster Wizard</h1>'", "self.assertTemplateUsed(response, \"home.html\") def test_home_page_contains_correct_html(self): response = self.client.get(\"/\") self.assertContains( response, '<h1 class=\"display-4\">Roster Wizard</h1>' )", "self.client.get(\"/\") self.assertContains( response, '<h1 class=\"display-4\">Roster Wizard</h1>' ) def test_home_page_does_not_contain_incorrect_html(self): response = self.client.get(\"/\") self.assertNotContains(", ".. import views class HomePageTests(SimpleTestCase): def test_home_page_status_code(self): response = self.client.get(\"/\") self.assertEqual(response.status_code, 200) def", "self.assertContains( response, '<h1 class=\"display-4\">Roster Wizard</h1>' ) def test_home_page_does_not_contain_incorrect_html(self): response = self.client.get(\"/\") self.assertNotContains( response,", "test_home_page_status_code(self): response = self.client.get(\"/\") self.assertEqual(response.status_code, 200) def test_view_url_by_name(self): response = self.client.get(reverse(\"home\")) self.assertEqual(response.status_code, 200)", "'<h1 class=\"display-4\">Roster Wizard</h1>' ) def test_home_page_does_not_contain_incorrect_html(self): response = self.client.get(\"/\") self.assertNotContains( response, \"Hi there!", "self.assertEqual(response.status_code, 200) def test_view_url_by_name(self): response = self.client.get(reverse(\"home\")) self.assertEqual(response.status_code, 200) def test_view_uses_correct_template(self): response =", "def test_view_uses_correct_template(self): response = self.client.get(reverse(\"home\")) self.assertEqual(response.status_code, 200) self.assertTemplateUsed(response, \"home.html\") def test_home_page_contains_correct_html(self): response =", "Wizard</h1>' ) def test_home_page_does_not_contain_incorrect_html(self): response = self.client.get(\"/\") self.assertNotContains( response, \"Hi there! I should", "import SimpleTestCase from django.urls import reverse from .. import views class HomePageTests(SimpleTestCase): def", "= self.client.get(reverse(\"home\")) self.assertEqual(response.status_code, 200) def test_view_uses_correct_template(self): response = self.client.get(reverse(\"home\")) self.assertEqual(response.status_code, 200) self.assertTemplateUsed(response, \"home.html\")", "self.assertEqual(response.status_code, 200) self.assertTemplateUsed(response, \"home.html\") def test_home_page_contains_correct_html(self): response = self.client.get(\"/\") self.assertContains( response, '<h1 class=\"display-4\">Roster", "import reverse from .. import views class HomePageTests(SimpleTestCase): def test_home_page_status_code(self): response = self.client.get(\"/\")", "response = self.client.get(\"/\") self.assertContains( response, '<h1 class=\"display-4\">Roster Wizard</h1>' ) def test_home_page_does_not_contain_incorrect_html(self): response =", "def test_home_page_does_not_contain_incorrect_html(self): response = self.client.get(\"/\") self.assertNotContains( response, \"Hi there! I should not be", "test_home_page_does_not_contain_incorrect_html(self): response = self.client.get(\"/\") self.assertNotContains( response, \"Hi there! I should not be on", "response, '<h1 class=\"display-4\">Roster Wizard</h1>' ) def test_home_page_does_not_contain_incorrect_html(self): response = self.client.get(\"/\") self.assertNotContains( response, \"Hi", "response = self.client.get(reverse(\"home\")) self.assertEqual(response.status_code, 200) def test_view_uses_correct_template(self): response = self.client.get(reverse(\"home\")) self.assertEqual(response.status_code, 200) self.assertTemplateUsed(response,", "class HomePageTests(SimpleTestCase): def test_home_page_status_code(self): response = self.client.get(\"/\") self.assertEqual(response.status_code, 200) def test_view_url_by_name(self): response =", "class=\"display-4\">Roster Wizard</h1>' ) def test_home_page_does_not_contain_incorrect_html(self): response = self.client.get(\"/\") self.assertNotContains( response, \"Hi there! I", "= self.client.get(reverse(\"home\")) self.assertEqual(response.status_code, 200) self.assertTemplateUsed(response, \"home.html\") def test_home_page_contains_correct_html(self): response = self.client.get(\"/\") self.assertContains( response,", "self.client.get(\"/\") self.assertEqual(response.status_code, 200) def test_view_url_by_name(self): response = self.client.get(reverse(\"home\")) self.assertEqual(response.status_code, 200) def test_view_uses_correct_template(self): response", "self.client.get(reverse(\"home\")) self.assertEqual(response.status_code, 200) self.assertTemplateUsed(response, \"home.html\") def test_home_page_contains_correct_html(self): response = self.client.get(\"/\") self.assertContains( response, '<h1", "= self.client.get(\"/\") self.assertNotContains( response, \"Hi there! I should not be on the page.\"", "self.assertEqual(response.status_code, 200) def test_view_uses_correct_template(self): response = self.client.get(reverse(\"home\")) self.assertEqual(response.status_code, 200) self.assertTemplateUsed(response, \"home.html\") def test_home_page_contains_correct_html(self):", "response = self.client.get(\"/\") self.assertNotContains( response, \"Hi there! I should not be on the", "from django.urls import reverse from .. import views class HomePageTests(SimpleTestCase): def test_home_page_status_code(self): response", "django.test import SimpleTestCase from django.urls import reverse from .. import views class HomePageTests(SimpleTestCase):", "import views class HomePageTests(SimpleTestCase): def test_home_page_status_code(self): response = self.client.get(\"/\") self.assertEqual(response.status_code, 200) def test_view_url_by_name(self):", "django.http import HttpRequest from django.test import SimpleTestCase from django.urls import reverse from ..", "views class HomePageTests(SimpleTestCase): def test_home_page_status_code(self): response = self.client.get(\"/\") self.assertEqual(response.status_code, 200) def test_view_url_by_name(self): response", "HomePageTests(SimpleTestCase): def test_home_page_status_code(self): response = self.client.get(\"/\") self.assertEqual(response.status_code, 200) def test_view_url_by_name(self): response = self.client.get(reverse(\"home\"))", ") def test_home_page_does_not_contain_incorrect_html(self): response = self.client.get(\"/\") self.assertNotContains( response, \"Hi there! I should not", "test_view_url_by_name(self): response = self.client.get(reverse(\"home\")) self.assertEqual(response.status_code, 200) def test_view_uses_correct_template(self): response = self.client.get(reverse(\"home\")) self.assertEqual(response.status_code, 200)", "= self.client.get(\"/\") self.assertEqual(response.status_code, 200) def test_view_url_by_name(self): response = self.client.get(reverse(\"home\")) self.assertEqual(response.status_code, 200) def test_view_uses_correct_template(self):", "SimpleTestCase from django.urls import reverse from .. import views class HomePageTests(SimpleTestCase): def test_home_page_status_code(self):", "self.client.get(reverse(\"home\")) self.assertEqual(response.status_code, 200) def test_view_uses_correct_template(self): response = self.client.get(reverse(\"home\")) self.assertEqual(response.status_code, 200) self.assertTemplateUsed(response, \"home.html\") def", "django.urls import reverse from .. import views class HomePageTests(SimpleTestCase): def test_home_page_status_code(self): response =" ]
[ "Unless required by applicable law or agreed to in writing, software # distributed", "Returns np array of inverse uniform CDF values at pts in x_grid '''", "np array of inverse uniform CDF values at pts in x_grid ''' return", "''' self._moments = np.zeros((max_moment, 1)) #Rely on scipy.stats to return non-central moment for", "= np.zeros((max_moment, 1)) #Rely on scipy.stats to return non-central moment for i in", "x_grid ''' return scipyuniform.pdf(x_grid, self._minimum_value, self._range_size) def draw_random_sample(self, sample_size): ''' Draws random samples", "to equal mean +/- 4stds self._dim = 1 self._mins = [min_val] self._maxs =", "self._max_moment: moments = self._moments[:max_order] else: raise NotImplementedError(\"Moment above max_moment not handled yet\") return", "self._maxs = [max_val] #cache moments self.generate_moments(max_moment) self._max_moment = max_moment def get_dim(self): return self._dim", "be less than maximum value\") self._minimum_value = min_val self._range_size = max_val - min_val", "points contained in x_grid ''' return scipyuniform.pdf(x_grid, self._minimum_value, self._range_size) def draw_random_sample(self, sample_size): '''", "# the National Aeronautics and Space Administration. No copyright is claimed in #", "the # License at http://www.apache.org/licenses/LICENSE-2.0. # Unless required by applicable law or agreed", "max_moment def get_dim(self): return self._dim def get_variance(self): ''' Returns variance of uniform random", "limitations # under the License. ''' Class for defining a uniform random variable", "Space Administration. No copyright is claimed in # the United States under Title", "moments up to order 'max_order' in numpy array. ''' #TODO - calculate moments", "scipyuniform.rvs(self._minimum_value, self._range_size, sample_size) def generate_moments(self, max_moment): ''' Calculate & store moments to retrieve", "generate_moments(self, max_moment): ''' Calculate & store moments to retrieve more efficiently later '''", "Version 2.0 (the \"License\"); you may not use this # file except in", "at http://www.apache.org/licenses/LICENSE-2.0. # Unless required by applicable law or agreed to in writing,", "scipy uniform rv to return shifted/scaled samples automatically return scipyuniform.rvs(self._minimum_value, self._range_size, sample_size) def", "under Title 17, U.S. Code. All Other Rights Reserved. # The Stochastic Reduced", "max_moment on the fly & append to stored if max_order <= self._max_moment: moments", "compliance with the License. You may obtain a copy of the # License", "provided minimum/maximum values. Implementation wraps scipy.stats.uniform to get statistics/samples. Caches moments up to", "BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "''' #TODO - calculate moments above max_moment on the fly & append to", "represented by the Administrator of # the National Aeronautics and Space Administration. No", "is distributed on an \"AS IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF", "Other Rights Reserved. # The Stochastic Reduced Order Models with Python (SROMPy) platform", "17, U.S. Code. All Other Rights Reserved. # The Stochastic Reduced Order Models", "self._max_moment = max_moment def get_dim(self): return self._dim def get_variance(self): ''' Returns variance of", "1)) #Rely on scipy.stats to return non-central moment for i in range(max_moment): self._moments[i]", "implied. See the # License for the specific language governing permissions and limitations", "max_order <= self._max_moment: moments = self._moments[:max_order] else: raise NotImplementedError(\"Moment above max_moment not handled", "licensed # under the Apache License, Version 2.0 (the \"License\"); you may not", "random variable. Returns numpy array of length 'sample_size' containing these samples ''' #Use", "''' import numpy as np from scipy.stats import uniform as scipyuniform from SROMPy.target.RandomVariable", "License at http://www.apache.org/licenses/LICENSE-2.0. # Unless required by applicable law or agreed to in", "array of length 'sample_size' containing these samples ''' #Use scipy uniform rv to", "max_val: raise ValueError(\"Minimum value must be less than maximum value\") self._minimum_value = min_val", "must be less than maximum value\") self._minimum_value = min_val self._range_size = max_val -", "<= self._max_moment: moments = self._moments[:max_order] else: raise NotImplementedError(\"Moment above max_moment not handled yet\")", "1 self._mins = [min_val] self._maxs = [max_val] #cache moments self.generate_moments(max_moment) self._max_moment = max_moment", "fly & append to stored if max_order <= self._max_moment: moments = self._moments[:max_order] else:", "statistics/samples. Caches moments up to max_moment for speedup. ''' if min_val >= max_val:", "self._range_size, sample_size) def generate_moments(self, max_moment): ''' Calculate & store moments to retrieve more", "KIND, either express or implied. See the # License for the specific language", "''' Draws random samples from the uniform random variable. Returns numpy array of", "= [max_val] #cache moments self.generate_moments(max_moment) self._max_moment = max_moment def get_dim(self): return self._dim def", "contained in x_grid ''' return scipyuniform.pdf(x_grid, self._minimum_value, self._range_size) def draw_random_sample(self, sample_size): ''' Draws", "self._mins = [min_val] self._maxs = [max_val] #cache moments self.generate_moments(max_moment) self._max_moment = max_moment def", "samples from the uniform random variable. Returns numpy array of length 'sample_size' containing", "variable ''' import numpy as np from scipy.stats import uniform as scipyuniform from", "\"AS IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express", "software # distributed under the License is distributed on an \"AS IS\" BASIS,", "contained in x_grid ''' return scipyuniform.cdf(x_grid, self._minimum_value, self._range_size) def compute_inv_CDF(self, x_grid): ''' Returns", "Administrator of # the National Aeronautics and Space Administration. No copyright is claimed", "return scipyuniform.ppf(x_grid, self._minimum_value, self._range_size) def compute_pdf(self, x_grid): ''' Returns numpy array of uniform", "efficiently later ''' self._moments = np.zeros((max_moment, 1)) #Rely on scipy.stats to return non-central", "= self._moments[:max_order] else: raise NotImplementedError(\"Moment above max_moment not handled yet\") return moments def", "raise ValueError(\"Minimum value must be less than maximum value\") self._minimum_value = min_val self._range_size", "distributed on an \"AS IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY", "retrieve more efficiently later ''' self._moments = np.zeros((max_moment, 1)) #Rely on scipy.stats to", "min_val #set dimension (scalar), min/max to equal mean +/- 4stds self._dim = 1", "# Copyright 2018 United States Government as represented by the Administrator of #", "def compute_inv_CDF(self, x_grid): ''' Returns np array of inverse uniform CDF values at", "x_grid): ''' Returns np array of inverse uniform CDF values at pts in", "variable with provided minimum/maximum values. Implementation wraps scipy.stats.uniform to get statistics/samples. Caches moments", "the # License for the specific language governing permissions and limitations # under", "= min_val self._range_size = max_val - min_val #set dimension (scalar), min/max to equal", "max_val - min_val #set dimension (scalar), min/max to equal mean +/- 4stds self._dim", "scipyuniform from SROMPy.target.RandomVariable import RandomVariable class UniformRandomVariable(RandomVariable): ''' Class for defining a uniform", "the License. You may obtain a copy of the # License at http://www.apache.org/licenses/LICENSE-2.0.", "array of uniform CDF values at the points contained in x_grid ''' return", "of uniform random variable ''' return self._std**2.0 def compute_moments(self, max_order): ''' Returns moments", "up to order 'max_order' in numpy array. ''' #TODO - calculate moments above", "return moments def compute_CDF(self, x_grid): ''' Returns numpy array of uniform CDF values", "SROMPy.target.RandomVariable import RandomVariable class UniformRandomVariable(RandomVariable): ''' Class for defining a uniform random variable", "Reserved. # The Stochastic Reduced Order Models with Python (SROMPy) platform is licensed", "numpy array. ''' #TODO - calculate moments above max_moment on the fly &", "required by applicable law or agreed to in writing, software # distributed under", "to stored if max_order <= self._max_moment: moments = self._moments[:max_order] else: raise NotImplementedError(\"Moment above", "Copyright 2018 United States Government as represented by the Administrator of # the", "applicable law or agreed to in writing, software # distributed under the License", "above max_moment not handled yet\") return moments def compute_CDF(self, x_grid): ''' Returns numpy", "def get_dim(self): return self._dim def get_variance(self): ''' Returns variance of uniform random variable", "or agreed to in writing, software # distributed under the License is distributed", "License, Version 2.0 (the \"License\"); you may not use this # file except", "variable. Returns numpy array of length 'sample_size' containing these samples ''' #Use scipy", "order 'max_order' in numpy array. ''' #TODO - calculate moments above max_moment on", "# under the License. ''' Class for defining a uniform random variable '''", "with Python (SROMPy) platform is licensed # under the Apache License, Version 2.0", "CDF values at the points contained in x_grid ''' return scipyuniform.cdf(x_grid, self._minimum_value, self._range_size)", "values at the points contained in x_grid ''' return scipyuniform.cdf(x_grid, self._minimum_value, self._range_size) def", "speedup. ''' if min_val >= max_val: raise ValueError(\"Minimum value must be less than", "claimed in # the United States under Title 17, U.S. Code. All Other", "in x_grid ''' return scipyuniform.ppf(x_grid, self._minimum_value, self._range_size) def compute_pdf(self, x_grid): ''' Returns numpy", "License is distributed on an \"AS IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS", "writing, software # distributed under the License is distributed on an \"AS IS\"", "self._minimum_value, self._range_size) def compute_inv_CDF(self, x_grid): ''' Returns np array of inverse uniform CDF", "Stochastic Reduced Order Models with Python (SROMPy) platform is licensed # under the", "return shifted/scaled samples automatically return scipyuniform.rvs(self._minimum_value, self._range_size, sample_size) def generate_moments(self, max_moment): ''' Calculate", "2.0 (the \"License\"); you may not use this # file except in compliance", "a copy of the # License at http://www.apache.org/licenses/LICENSE-2.0. # Unless required by applicable", "the License. ''' Class for defining a uniform random variable ''' import numpy", "in # the United States under Title 17, U.S. Code. All Other Rights", "''' Class for defining a uniform random variable ''' import numpy as np", "variable ''' return self._std**2.0 def compute_moments(self, max_order): ''' Returns moments up to order", "Models with Python (SROMPy) platform is licensed # under the Apache License, Version", "than maximum value\") self._minimum_value = min_val self._range_size = max_val - min_val #set dimension", "def generate_moments(self, max_moment): ''' Calculate & store moments to retrieve more efficiently later", "to return non-central moment for i in range(max_moment): self._moments[i] = scipyuniform.moment(i+1, self._minimum_value, self._range_size)", "[max_val] #cache moments self.generate_moments(max_moment) self._max_moment = max_moment def get_dim(self): return self._dim def get_variance(self):", "governing permissions and limitations # under the License. ''' Class for defining a", "Returns numpy array of length 'sample_size' containing these samples ''' #Use scipy uniform", "x_grid ''' return scipyuniform.ppf(x_grid, self._minimum_value, self._range_size) def compute_pdf(self, x_grid): ''' Returns numpy array", "numpy as np from scipy.stats import uniform as scipyuniform from SROMPy.target.RandomVariable import RandomVariable", "(the \"License\"); you may not use this # file except in compliance with", "''' #Use scipy uniform rv to return shifted/scaled samples automatically return scipyuniform.rvs(self._minimum_value, self._range_size,", "+/- 4stds self._dim = 1 self._mins = [min_val] self._maxs = [max_val] #cache moments", "in x_grid ''' return scipyuniform.pdf(x_grid, self._minimum_value, self._range_size) def draw_random_sample(self, sample_size): ''' Draws random", "array of uniform pdf values at the points contained in x_grid ''' return", "under the License. ''' Class for defining a uniform random variable ''' import", "get_dim(self): return self._dim def get_variance(self): ''' Returns variance of uniform random variable '''", "# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT", "Rights Reserved. # The Stochastic Reduced Order Models with Python (SROMPy) platform is", "No copyright is claimed in # the United States under Title 17, U.S.", "Draws random samples from the uniform random variable. Returns numpy array of length", "values. Implementation wraps scipy.stats.uniform to get statistics/samples. Caches moments up to max_moment for", "random variable ''' def __init__(self, min_val=0., max_val=0., max_moment=10): ''' Initialize the uniform (gaussian)", "National Aeronautics and Space Administration. No copyright is claimed in # the United", "more efficiently later ''' self._moments = np.zeros((max_moment, 1)) #Rely on scipy.stats to return", "max_order): ''' Returns moments up to order 'max_order' in numpy array. ''' #TODO", "Implementation wraps scipy.stats.uniform to get statistics/samples. Caches moments up to max_moment for speedup.", "self._range_size = max_val - min_val #set dimension (scalar), min/max to equal mean +/-", "United States Government as represented by the Administrator of # the National Aeronautics", "max_moment not handled yet\") return moments def compute_CDF(self, x_grid): ''' Returns numpy array", "uniform random variable. Returns numpy array of length 'sample_size' containing these samples '''", "copyright is claimed in # the United States under Title 17, U.S. Code.", "max_moment=10): ''' Initialize the uniform (gaussian) random variable with provided minimum/maximum values. Implementation", "''' Returns np array of inverse uniform CDF values at pts in x_grid", "length 'sample_size' containing these samples ''' #Use scipy uniform rv to return shifted/scaled", "automatically return scipyuniform.rvs(self._minimum_value, self._range_size, sample_size) def generate_moments(self, max_moment): ''' Calculate & store moments", "values at the points contained in x_grid ''' return scipyuniform.pdf(x_grid, self._minimum_value, self._range_size) def", "import RandomVariable class UniformRandomVariable(RandomVariable): ''' Class for defining a uniform random variable '''", "to return shifted/scaled samples automatically return scipyuniform.rvs(self._minimum_value, self._range_size, sample_size) def generate_moments(self, max_moment): '''", "a uniform random variable ''' def __init__(self, min_val=0., max_val=0., max_moment=10): ''' Initialize the", "agreed to in writing, software # distributed under the License is distributed on", "in numpy array. ''' #TODO - calculate moments above max_moment on the fly", "max_val=0., max_moment=10): ''' Initialize the uniform (gaussian) random variable with provided minimum/maximum values.", "Initialize the uniform (gaussian) random variable with provided minimum/maximum values. Implementation wraps scipy.stats.uniform", "moments up to max_moment for speedup. ''' if min_val >= max_val: raise ValueError(\"Minimum", "Calculate & store moments to retrieve more efficiently later ''' self._moments = np.zeros((max_moment,", "is claimed in # the United States under Title 17, U.S. Code. All", "You may obtain a copy of the # License at http://www.apache.org/licenses/LICENSE-2.0. # Unless", "= max_moment def get_dim(self): return self._dim def get_variance(self): ''' Returns variance of uniform", "& store moments to retrieve more efficiently later ''' self._moments = np.zeros((max_moment, 1))", "(gaussian) random variable with provided minimum/maximum values. Implementation wraps scipy.stats.uniform to get statistics/samples.", "def compute_moments(self, max_order): ''' Returns moments up to order 'max_order' in numpy array.", "# Unless required by applicable law or agreed to in writing, software #", "by applicable law or agreed to in writing, software # distributed under the", "NotImplementedError(\"Moment above max_moment not handled yet\") return moments def compute_CDF(self, x_grid): ''' Returns", "return scipyuniform.cdf(x_grid, self._minimum_value, self._range_size) def compute_inv_CDF(self, x_grid): ''' Returns np array of inverse", "scipyuniform.cdf(x_grid, self._minimum_value, self._range_size) def compute_inv_CDF(self, x_grid): ''' Returns np array of inverse uniform", "compute_pdf(self, x_grid): ''' Returns numpy array of uniform pdf values at the points", "uniform as scipyuniform from SROMPy.target.RandomVariable import RandomVariable class UniformRandomVariable(RandomVariable): ''' Class for defining", "under the License is distributed on an \"AS IS\" BASIS, WITHOUT # WARRANTIES", "later ''' self._moments = np.zeros((max_moment, 1)) #Rely on scipy.stats to return non-central moment", "the Apache License, Version 2.0 (the \"License\"); you may not use this #", "moments self.generate_moments(max_moment) self._max_moment = max_moment def get_dim(self): return self._dim def get_variance(self): ''' Returns", "2018 United States Government as represented by the Administrator of # the National", "All Other Rights Reserved. # The Stochastic Reduced Order Models with Python (SROMPy)", "States Government as represented by the Administrator of # the National Aeronautics and", "handled yet\") return moments def compute_CDF(self, x_grid): ''' Returns numpy array of uniform", "as scipyuniform from SROMPy.target.RandomVariable import RandomVariable class UniformRandomVariable(RandomVariable): ''' Class for defining a", "dimension (scalar), min/max to equal mean +/- 4stds self._dim = 1 self._mins =", "uniform random variable ''' import numpy as np from scipy.stats import uniform as", "min_val=0., max_val=0., max_moment=10): ''' Initialize the uniform (gaussian) random variable with provided minimum/maximum", "for defining a uniform random variable ''' import numpy as np from scipy.stats", "uniform pdf values at the points contained in x_grid ''' return scipyuniform.pdf(x_grid, self._minimum_value,", "to in writing, software # distributed under the License is distributed on an", "the United States under Title 17, U.S. Code. All Other Rights Reserved. #", "(SROMPy) platform is licensed # under the Apache License, Version 2.0 (the \"License\");", "not handled yet\") return moments def compute_CDF(self, x_grid): ''' Returns numpy array of", "class UniformRandomVariable(RandomVariable): ''' Class for defining a uniform random variable ''' def __init__(self,", "this # file except in compliance with the License. You may obtain a", "distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT #", "variance of uniform random variable ''' return self._std**2.0 def compute_moments(self, max_order): ''' Returns", "return scipyuniform.pdf(x_grid, self._minimum_value, self._range_size) def draw_random_sample(self, sample_size): ''' Draws random samples from the", "#set dimension (scalar), min/max to equal mean +/- 4stds self._dim = 1 self._mins", "random variable with provided minimum/maximum values. Implementation wraps scipy.stats.uniform to get statistics/samples. Caches", "RandomVariable class UniformRandomVariable(RandomVariable): ''' Class for defining a uniform random variable ''' def", "as np from scipy.stats import uniform as scipyuniform from SROMPy.target.RandomVariable import RandomVariable class", "# License for the specific language governing permissions and limitations # under the", "mean +/- 4stds self._dim = 1 self._mins = [min_val] self._maxs = [max_val] #cache", "self._dim = 1 self._mins = [min_val] self._maxs = [max_val] #cache moments self.generate_moments(max_moment) self._max_moment", "#Rely on scipy.stats to return non-central moment for i in range(max_moment): self._moments[i] =", "''' Initialize the uniform (gaussian) random variable with provided minimum/maximum values. Implementation wraps", "# under the Apache License, Version 2.0 (the \"License\"); you may not use", "platform is licensed # under the Apache License, Version 2.0 (the \"License\"); you", "defining a uniform random variable ''' def __init__(self, min_val=0., max_val=0., max_moment=10): ''' Initialize", "min/max to equal mean +/- 4stds self._dim = 1 self._mins = [min_val] self._maxs", "Code. All Other Rights Reserved. # The Stochastic Reduced Order Models with Python", "in writing, software # distributed under the License is distributed on an \"AS", "scipy.stats import uniform as scipyuniform from SROMPy.target.RandomVariable import RandomVariable class UniformRandomVariable(RandomVariable): ''' Class", "you may not use this # file except in compliance with the License.", "the uniform random variable. Returns numpy array of length 'sample_size' containing these samples", "min_val >= max_val: raise ValueError(\"Minimum value must be less than maximum value\") self._minimum_value", "from SROMPy.target.RandomVariable import RandomVariable class UniformRandomVariable(RandomVariable): ''' Class for defining a uniform random", "Python (SROMPy) platform is licensed # under the Apache License, Version 2.0 (the", "values at pts in x_grid ''' return scipyuniform.ppf(x_grid, self._minimum_value, self._range_size) def compute_pdf(self, x_grid):", "array. ''' #TODO - calculate moments above max_moment on the fly & append", "specific language governing permissions and limitations # under the License. ''' Class for", "- min_val #set dimension (scalar), min/max to equal mean +/- 4stds self._dim =", "UniformRandomVariable(RandomVariable): ''' Class for defining a uniform random variable ''' def __init__(self, min_val=0.,", "''' Returns moments up to order 'max_order' in numpy array. ''' #TODO -", "sample_size): ''' Draws random samples from the uniform random variable. Returns numpy array", "scipyuniform.ppf(x_grid, self._minimum_value, self._range_size) def compute_pdf(self, x_grid): ''' Returns numpy array of uniform pdf", "self._range_size) def compute_inv_CDF(self, x_grid): ''' Returns np array of inverse uniform CDF values", "self._moments = np.zeros((max_moment, 1)) #Rely on scipy.stats to return non-central moment for i", "up to max_moment for speedup. ''' if min_val >= max_val: raise ValueError(\"Minimum value", "ValueError(\"Minimum value must be less than maximum value\") self._minimum_value = min_val self._range_size =", "np from scipy.stats import uniform as scipyuniform from SROMPy.target.RandomVariable import RandomVariable class UniformRandomVariable(RandomVariable):", "raise NotImplementedError(\"Moment above max_moment not handled yet\") return moments def compute_CDF(self, x_grid): '''", "The Stochastic Reduced Order Models with Python (SROMPy) platform is licensed # under", "scipy.stats to return non-central moment for i in range(max_moment): self._moments[i] = scipyuniform.moment(i+1, self._minimum_value,", "wraps scipy.stats.uniform to get statistics/samples. Caches moments up to max_moment for speedup. '''", "License for the specific language governing permissions and limitations # under the License.", "\"License\"); you may not use this # file except in compliance with the", "on the fly & append to stored if max_order <= self._max_moment: moments =", "and limitations # under the License. ''' Class for defining a uniform random", "the specific language governing permissions and limitations # under the License. ''' Class", "Aeronautics and Space Administration. No copyright is claimed in # the United States", "of # the National Aeronautics and Space Administration. No copyright is claimed in", "numpy array of uniform pdf values at the points contained in x_grid '''", "max_moment for speedup. ''' if min_val >= max_val: raise ValueError(\"Minimum value must be", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the #", "copy of the # License at http://www.apache.org/licenses/LICENSE-2.0. # Unless required by applicable law", "= max_val - min_val #set dimension (scalar), min/max to equal mean +/- 4stds", "defining a uniform random variable ''' import numpy as np from scipy.stats import", "file except in compliance with the License. You may obtain a copy of", "numpy array of uniform CDF values at the points contained in x_grid '''", "samples automatically return scipyuniform.rvs(self._minimum_value, self._range_size, sample_size) def generate_moments(self, max_moment): ''' Calculate & store", "random variable ''' import numpy as np from scipy.stats import uniform as scipyuniform", "pdf values at the points contained in x_grid ''' return scipyuniform.pdf(x_grid, self._minimum_value, self._range_size)", "OF ANY KIND, either express or implied. See the # License for the", "4stds self._dim = 1 self._mins = [min_val] self._maxs = [max_val] #cache moments self.generate_moments(max_moment)", "and Space Administration. No copyright is claimed in # the United States under", "np.zeros((max_moment, 1)) #Rely on scipy.stats to return non-central moment for i in range(max_moment):", "uniform rv to return shifted/scaled samples automatically return scipyuniform.rvs(self._minimum_value, self._range_size, sample_size) def generate_moments(self,", "at pts in x_grid ''' return scipyuniform.ppf(x_grid, self._minimum_value, self._range_size) def compute_pdf(self, x_grid): '''", "# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the", "a uniform random variable ''' import numpy as np from scipy.stats import uniform", "the National Aeronautics and Space Administration. No copyright is claimed in # the", "Class for defining a uniform random variable ''' import numpy as np from", "''' Class for defining a uniform random variable ''' def __init__(self, min_val=0., max_val=0.,", "minimum/maximum values. Implementation wraps scipy.stats.uniform to get statistics/samples. Caches moments up to max_moment", "compute_inv_CDF(self, x_grid): ''' Returns np array of inverse uniform CDF values at pts", "if min_val >= max_val: raise ValueError(\"Minimum value must be less than maximum value\")", "self._minimum_value, self._range_size) def draw_random_sample(self, sample_size): ''' Draws random samples from the uniform random", "- calculate moments above max_moment on the fly & append to stored if", "if max_order <= self._max_moment: moments = self._moments[:max_order] else: raise NotImplementedError(\"Moment above max_moment not", "<gh_stars>0 # Copyright 2018 United States Government as represented by the Administrator of", "''' Calculate & store moments to retrieve more efficiently later ''' self._moments =", "obtain a copy of the # License at http://www.apache.org/licenses/LICENSE-2.0. # Unless required by", "variable ''' def __init__(self, min_val=0., max_val=0., max_moment=10): ''' Initialize the uniform (gaussian) random", "#TODO - calculate moments above max_moment on the fly & append to stored", "containing these samples ''' #Use scipy uniform rv to return shifted/scaled samples automatically", "the points contained in x_grid ''' return scipyuniform.cdf(x_grid, self._minimum_value, self._range_size) def compute_inv_CDF(self, x_grid):", "'sample_size' containing these samples ''' #Use scipy uniform rv to return shifted/scaled samples", "on scipy.stats to return non-central moment for i in range(max_moment): self._moments[i] = scipyuniform.moment(i+1,", "get statistics/samples. Caches moments up to max_moment for speedup. ''' if min_val >=", "random variable ''' return self._std**2.0 def compute_moments(self, max_order): ''' Returns moments up to", "random samples from the uniform random variable. Returns numpy array of length 'sample_size'", "the License is distributed on an \"AS IS\" BASIS, WITHOUT # WARRANTIES OR", "Returns moments up to order 'max_order' in numpy array. ''' #TODO - calculate", "from the uniform random variable. Returns numpy array of length 'sample_size' containing these", "as represented by the Administrator of # the National Aeronautics and Space Administration.", "yet\") return moments def compute_CDF(self, x_grid): ''' Returns numpy array of uniform CDF", "store moments to retrieve more efficiently later ''' self._moments = np.zeros((max_moment, 1)) #Rely", "ANY KIND, either express or implied. See the # License for the specific", "from scipy.stats import uniform as scipyuniform from SROMPy.target.RandomVariable import RandomVariable class UniformRandomVariable(RandomVariable): '''", "#cache moments self.generate_moments(max_moment) self._max_moment = max_moment def get_dim(self): return self._dim def get_variance(self): '''", "uniform random variable ''' def __init__(self, min_val=0., max_val=0., max_moment=10): ''' Initialize the uniform", "the fly & append to stored if max_order <= self._max_moment: moments = self._moments[:max_order]", "by the Administrator of # the National Aeronautics and Space Administration. No copyright", "uniform CDF values at the points contained in x_grid ''' return scipyuniform.cdf(x_grid, self._minimum_value,", "self._moments[:max_order] else: raise NotImplementedError(\"Moment above max_moment not handled yet\") return moments def compute_CDF(self,", "self.generate_moments(max_moment) self._max_moment = max_moment def get_dim(self): return self._dim def get_variance(self): ''' Returns variance", "use this # file except in compliance with the License. You may obtain", "under the Apache License, Version 2.0 (the \"License\"); you may not use this", "return scipyuniform.rvs(self._minimum_value, self._range_size, sample_size) def generate_moments(self, max_moment): ''' Calculate & store moments to", "value must be less than maximum value\") self._minimum_value = min_val self._range_size = max_val", "samples ''' #Use scipy uniform rv to return shifted/scaled samples automatically return scipyuniform.rvs(self._minimum_value,", "moments above max_moment on the fly & append to stored if max_order <=", "Returns numpy array of uniform pdf values at the points contained in x_grid", "at the points contained in x_grid ''' return scipyuniform.pdf(x_grid, self._minimum_value, self._range_size) def draw_random_sample(self,", "uniform CDF values at pts in x_grid ''' return scipyuniform.ppf(x_grid, self._minimum_value, self._range_size) def", "min_val self._range_size = max_val - min_val #set dimension (scalar), min/max to equal mean", "WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See", "''' Returns numpy array of uniform pdf values at the points contained in", "points contained in x_grid ''' return scipyuniform.cdf(x_grid, self._minimum_value, self._range_size) def compute_inv_CDF(self, x_grid): '''", "self._std**2.0 def compute_moments(self, max_order): ''' Returns moments up to order 'max_order' in numpy", "draw_random_sample(self, sample_size): ''' Draws random samples from the uniform random variable. Returns numpy", "rv to return shifted/scaled samples automatically return scipyuniform.rvs(self._minimum_value, self._range_size, sample_size) def generate_moments(self, max_moment):", "Caches moments up to max_moment for speedup. ''' if min_val >= max_val: raise", "Administration. No copyright is claimed in # the United States under Title 17,", "See the # License for the specific language governing permissions and limitations #", "stored if max_order <= self._max_moment: moments = self._moments[:max_order] else: raise NotImplementedError(\"Moment above max_moment", "law or agreed to in writing, software # distributed under the License is", "''' return scipyuniform.cdf(x_grid, self._minimum_value, self._range_size) def compute_inv_CDF(self, x_grid): ''' Returns np array of", "import numpy as np from scipy.stats import uniform as scipyuniform from SROMPy.target.RandomVariable import", "''' return self._std**2.0 def compute_moments(self, max_order): ''' Returns moments up to order 'max_order'", "these samples ''' #Use scipy uniform rv to return shifted/scaled samples automatically return", "express or implied. See the # License for the specific language governing permissions", "compute_CDF(self, x_grid): ''' Returns numpy array of uniform CDF values at the points", "append to stored if max_order <= self._max_moment: moments = self._moments[:max_order] else: raise NotImplementedError(\"Moment", "''' if min_val >= max_val: raise ValueError(\"Minimum value must be less than maximum", "# file except in compliance with the License. You may obtain a copy", "an \"AS IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either", "CONDITIONS OF ANY KIND, either express or implied. See the # License for", "calculate moments above max_moment on the fly & append to stored if max_order", "shifted/scaled samples automatically return scipyuniform.rvs(self._minimum_value, self._range_size, sample_size) def generate_moments(self, max_moment): ''' Calculate &", "except in compliance with the License. You may obtain a copy of the", "get_variance(self): ''' Returns variance of uniform random variable ''' return self._std**2.0 def compute_moments(self,", "moments def compute_CDF(self, x_grid): ''' Returns numpy array of uniform CDF values at", "in compliance with the License. You may obtain a copy of the #", "moments = self._moments[:max_order] else: raise NotImplementedError(\"Moment above max_moment not handled yet\") return moments", "above max_moment on the fly & append to stored if max_order <= self._max_moment:", "Apache License, Version 2.0 (the \"License\"); you may not use this # file", "array of inverse uniform CDF values at pts in x_grid ''' return scipyuniform.ppf(x_grid,", "United States under Title 17, U.S. Code. All Other Rights Reserved. # The", "self._range_size) def draw_random_sample(self, sample_size): ''' Draws random samples from the uniform random variable.", "max_moment): ''' Calculate & store moments to retrieve more efficiently later ''' self._moments", "Order Models with Python (SROMPy) platform is licensed # under the Apache License,", "# License at http://www.apache.org/licenses/LICENSE-2.0. # Unless required by applicable law or agreed to", "self._dim def get_variance(self): ''' Returns variance of uniform random variable ''' return self._std**2.0", "to retrieve more efficiently later ''' self._moments = np.zeros((max_moment, 1)) #Rely on scipy.stats", "= 1 self._mins = [min_val] self._maxs = [max_val] #cache moments self.generate_moments(max_moment) self._max_moment =", "of the # License at http://www.apache.org/licenses/LICENSE-2.0. # Unless required by applicable law or", "for the specific language governing permissions and limitations # under the License. '''", "is licensed # under the Apache License, Version 2.0 (the \"License\"); you may", "numpy array of length 'sample_size' containing these samples ''' #Use scipy uniform rv", "to get statistics/samples. Caches moments up to max_moment for speedup. ''' if min_val", "''' Returns variance of uniform random variable ''' return self._std**2.0 def compute_moments(self, max_order):", "& append to stored if max_order <= self._max_moment: moments = self._moments[:max_order] else: raise", "IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "not use this # file except in compliance with the License. You may", "inverse uniform CDF values at pts in x_grid ''' return scipyuniform.ppf(x_grid, self._minimum_value, self._range_size)", "def get_variance(self): ''' Returns variance of uniform random variable ''' return self._std**2.0 def", "# the United States under Title 17, U.S. Code. All Other Rights Reserved.", "License. ''' Class for defining a uniform random variable ''' import numpy as", ">= max_val: raise ValueError(\"Minimum value must be less than maximum value\") self._minimum_value =", "''' Returns numpy array of uniform CDF values at the points contained in", "of length 'sample_size' containing these samples ''' #Use scipy uniform rv to return", "'max_order' in numpy array. ''' #TODO - calculate moments above max_moment on the", "equal mean +/- 4stds self._dim = 1 self._mins = [min_val] self._maxs = [max_val]", "return self._std**2.0 def compute_moments(self, max_order): ''' Returns moments up to order 'max_order' in", "may not use this # file except in compliance with the License. You", "permissions and limitations # under the License. ''' Class for defining a uniform", "the Administrator of # the National Aeronautics and Space Administration. No copyright is", "either express or implied. See the # License for the specific language governing", "= [min_val] self._maxs = [max_val] #cache moments self.generate_moments(max_moment) self._max_moment = max_moment def get_dim(self):", "self._minimum_value, self._range_size) def compute_pdf(self, x_grid): ''' Returns numpy array of uniform pdf values", "def __init__(self, min_val=0., max_val=0., max_moment=10): ''' Initialize the uniform (gaussian) random variable with", "(scalar), min/max to equal mean +/- 4stds self._dim = 1 self._mins = [min_val]", "sample_size) def generate_moments(self, max_moment): ''' Calculate & store moments to retrieve more efficiently", "States under Title 17, U.S. Code. All Other Rights Reserved. # The Stochastic", "or implied. See the # License for the specific language governing permissions and", "of uniform pdf values at the points contained in x_grid ''' return scipyuniform.pdf(x_grid,", "uniform (gaussian) random variable with provided minimum/maximum values. Implementation wraps scipy.stats.uniform to get", "scipyuniform.pdf(x_grid, self._minimum_value, self._range_size) def draw_random_sample(self, sample_size): ''' Draws random samples from the uniform", "Reduced Order Models with Python (SROMPy) platform is licensed # under the Apache", "at the points contained in x_grid ''' return scipyuniform.cdf(x_grid, self._minimum_value, self._range_size) def compute_inv_CDF(self,", "may obtain a copy of the # License at http://www.apache.org/licenses/LICENSE-2.0. # Unless required", "in x_grid ''' return scipyuniform.cdf(x_grid, self._minimum_value, self._range_size) def compute_inv_CDF(self, x_grid): ''' Returns np", "__init__(self, min_val=0., max_val=0., max_moment=10): ''' Initialize the uniform (gaussian) random variable with provided", "self._minimum_value = min_val self._range_size = max_val - min_val #set dimension (scalar), min/max to", "U.S. Code. All Other Rights Reserved. # The Stochastic Reduced Order Models with", "Returns variance of uniform random variable ''' return self._std**2.0 def compute_moments(self, max_order): '''", "less than maximum value\") self._minimum_value = min_val self._range_size = max_val - min_val #set", "with the License. You may obtain a copy of the # License at", "License. You may obtain a copy of the # License at http://www.apache.org/licenses/LICENSE-2.0. #", "''' return scipyuniform.pdf(x_grid, self._minimum_value, self._range_size) def draw_random_sample(self, sample_size): ''' Draws random samples from", "import uniform as scipyuniform from SROMPy.target.RandomVariable import RandomVariable class UniformRandomVariable(RandomVariable): ''' Class for", "for defining a uniform random variable ''' def __init__(self, min_val=0., max_val=0., max_moment=10): '''", "''' return scipyuniform.ppf(x_grid, self._minimum_value, self._range_size) def compute_pdf(self, x_grid): ''' Returns numpy array of", "def compute_pdf(self, x_grid): ''' Returns numpy array of uniform pdf values at the", "on an \"AS IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND,", "language governing permissions and limitations # under the License. ''' Class for defining", "x_grid): ''' Returns numpy array of uniform CDF values at the points contained", "moments to retrieve more efficiently later ''' self._moments = np.zeros((max_moment, 1)) #Rely on", "CDF values at pts in x_grid ''' return scipyuniform.ppf(x_grid, self._minimum_value, self._range_size) def compute_pdf(self,", "x_grid ''' return scipyuniform.cdf(x_grid, self._minimum_value, self._range_size) def compute_inv_CDF(self, x_grid): ''' Returns np array", "to order 'max_order' in numpy array. ''' #TODO - calculate moments above max_moment", "def draw_random_sample(self, sample_size): ''' Draws random samples from the uniform random variable. Returns", "Government as represented by the Administrator of # the National Aeronautics and Space", "def compute_CDF(self, x_grid): ''' Returns numpy array of uniform CDF values at the", "x_grid): ''' Returns numpy array of uniform pdf values at the points contained", "else: raise NotImplementedError(\"Moment above max_moment not handled yet\") return moments def compute_CDF(self, x_grid):", "to max_moment for speedup. ''' if min_val >= max_val: raise ValueError(\"Minimum value must", "maximum value\") self._minimum_value = min_val self._range_size = max_val - min_val #set dimension (scalar),", "Title 17, U.S. Code. All Other Rights Reserved. # The Stochastic Reduced Order", "http://www.apache.org/licenses/LICENSE-2.0. # Unless required by applicable law or agreed to in writing, software", "of inverse uniform CDF values at pts in x_grid ''' return scipyuniform.ppf(x_grid, self._minimum_value,", "compute_moments(self, max_order): ''' Returns moments up to order 'max_order' in numpy array. '''", "Returns numpy array of uniform CDF values at the points contained in x_grid", "of uniform CDF values at the points contained in x_grid ''' return scipyuniform.cdf(x_grid,", "uniform random variable ''' return self._std**2.0 def compute_moments(self, max_order): ''' Returns moments up", "with provided minimum/maximum values. Implementation wraps scipy.stats.uniform to get statistics/samples. Caches moments up", "OR CONDITIONS OF ANY KIND, either express or implied. See the # License", "scipy.stats.uniform to get statistics/samples. Caches moments up to max_moment for speedup. ''' if", "value\") self._minimum_value = min_val self._range_size = max_val - min_val #set dimension (scalar), min/max", "the uniform (gaussian) random variable with provided minimum/maximum values. Implementation wraps scipy.stats.uniform to", "#Use scipy uniform rv to return shifted/scaled samples automatically return scipyuniform.rvs(self._minimum_value, self._range_size, sample_size)", "''' def __init__(self, min_val=0., max_val=0., max_moment=10): ''' Initialize the uniform (gaussian) random variable", "for speedup. ''' if min_val >= max_val: raise ValueError(\"Minimum value must be less", "return self._dim def get_variance(self): ''' Returns variance of uniform random variable ''' return", "# The Stochastic Reduced Order Models with Python (SROMPy) platform is licensed #", "pts in x_grid ''' return scipyuniform.ppf(x_grid, self._minimum_value, self._range_size) def compute_pdf(self, x_grid): ''' Returns", "the points contained in x_grid ''' return scipyuniform.pdf(x_grid, self._minimum_value, self._range_size) def draw_random_sample(self, sample_size):", "Class for defining a uniform random variable ''' def __init__(self, min_val=0., max_val=0., max_moment=10):", "[min_val] self._maxs = [max_val] #cache moments self.generate_moments(max_moment) self._max_moment = max_moment def get_dim(self): return", "self._range_size) def compute_pdf(self, x_grid): ''' Returns numpy array of uniform pdf values at" ]
[ "assert_almost_equal(mdf.standard_errors(), se[j], decimal=6) # Check for run-time exceptions in summary # print(mdf.summary()) def", "v) md.setup_nominal() mdf1 = md.fit() # From statsmodels.GEE (not an independent test) cf1", "sm.ols(\"Y ~ X1 + X2 + X3\", data=D).fit() assert_almost_equal(ols.params.values, mdf.params, decimal=10) se =", "np.random.normal(size=gsize)) endog.append(expval + errors) groups.append(i*np.ones(gsize)) endog = np.concatenate(endog) groups = np.concatenate(groups) exog =", "ii in idx: jj = np.flatnonzero(group == ii) T[jj] = lrange(len(jj)) family =", "not agree exactly. \"\"\" from __future__ import print_function from statsmodels.compat import lrange import", "= coefficients(sma) cfa = paste(u[,1], collapse=\",\") sea = paste(u[,4], collapse=\",\") sprintf(\"cf = [[%s],[%s],[%s]]\",", "= 300 exog = np.random.normal(size=(n, 4)) exog[:,0] = 1 exog[:,1] = 1*(exog[:,2] <", "assert_almost_equal(mdf.params, cf[j], decimal=6) assert_almost_equal(mdf.standard_errors(), se[j], decimal=6) # Test with formulas D = np.concatenate((endog[:,None],", "group[:,None], exog[:,1:]), axis=1) D = pd.DataFrame(D) D.columns = [\"Y\",\"Id\",] + [\"X%d\" % (k+1)", "se1, decimal=5) # Test with global odds ratio dependence v = GlobalOddsRatio(\"nominal\") md", "correlation estimation differ among implementations and the results will not agree exactly. \"\"\"", "ve = Exchangeable() md = GEE(endog, exog, group, None, family, ve) mdf =", "does this test fail? def t_est_missing(self): Y = np.random.normal(size=100) X1 = np.random.normal(size=100) X2", "0.05488705, 0.05995019, 0.0916574, 0.05951445, 0.08539281] assert_almost_equal(mdf.params, cf, decimal=5) assert_almost_equal(mdf.bse, se, decimal=5) def test_nominal(self):", "+ X3 + X4 + X5, id=Id, family=poisson, corstr=\"independence\", scale.fix=TRUE) smi = summary(mi)", "[[%s],[%s]]\", cfi, cfe) sprintf(\"se = [[%s],[%s]]\", sei, see) \"\"\" family = Poisson() endog,exog,group_n", "+ X2 + X3\", D, None, groups=D.loc[:,\"Id\"], family=family, cov_struct=v) mdf = md.fit() assert_almost_equal(mdf.params,", "= md.fit() assert_almost_equal(ar.dep_params, dep_params_true[gsize-1]) assert_almost_equal(mdf.params, params_true[gsize-1]) def test_post_estimation(self): family = Gaussian() endog,exog,group =", "0.2]) +\\ np.random.normal(size=300) group = np.kron(np.arange(100), np.r_[1,1,1]) vi = Independence() ve = Exchangeable()", "CSV file with the following format: Column 0: Group indicator Column 1: endog", "= np.concatenate(endog) groups = np.concatenate(groups) exog = np.concatenate(exog, axis=0) ar = Autoregressive() md", "family, ne, dep_data=group_n) mdf2 = md.fit(start_params=mdf1.params) # From statsmodels.GEE (not an independent test)", "= [[-0.01850226507491,0.81436304278962, -1.56167635393184,0.794239361055003], [-0.0182920577154767,0.814898414022467, -1.56194040106201,0.793499517527478]] se = [[0.0440733554189401,0.0479993639119261, 0.0496045952071308,0.0479467597161284], [0.0440369906460754,0.0480069787567662, 0.049519758758187,0.0479760443027526]] for j,v", "vs = Independence() family = Poisson() Y = np.ceil(-np.log(np.random.uniform(size=100))) X1 = np.random.normal(size=100) X2", "exog[:,1] = 1*(exog[:,2] < 0) group = np.kron(np.arange(n/4), np.ones(4)) time = np.zeros((n, 1))", "= Exchangeable() # From R gee cf = [[-0.0364450410793481,-0.0543209391301178, 0.0156642711741052,0.57628591338724, -0.00465659951186211,-0.477093153099256], [-0.0315615554826533,-0.0562589480840004, 0.0178419412298561,0.571512795340481,", "np.sqrt(np.diag(mdf.naive_covariance)) assert_almost_equal(naive_tvalues, ols.tvalues, decimal=10) def test_compare_logit(self): vs = Independence() family = Binomial() Y", "If `icept` is True, an intercept is prepended to the exog variables. \"\"\"", "se, decimal=6) ne = Nested() md = GEE(endog, exog, group, None, family, ne,", "paste(u[,4], collapse=\",\") ma = gee(Y ~ X1 + X2 + X3, id=Id, family=binomial,", "= Z[,6] X5 = Z[,7] mi = gee(Y ~ X1 + X2 +", "size=100) D = pd.DataFrame({\"Y\": Y, \"X1\": X1, \"X2\": X2, \"X3\": X3}) md =", "'results', fname), delimiter=\",\") group = Z[:,0] endog = Z[:,1] exog = Z[:,2:] if", "np.random.normal(size=(gsize,k)) exog.append(x) expval = x.sum(1) errors = np.dot(cmat_r, np.random.normal(size=gsize)) endog.append(expval + errors) groups.append(i*np.ones(gsize))", "= np.r_[0.08632616, 0.02913582, 0.03114428, 0.02893991] assert_almost_equal(mdf2.params, cf, decimal=6) assert_almost_equal(mdf2.standard_errors(), se, decimal=6) def test_ordinal(self):", "= np.genfromtxt(os.path.join(cur_dir, 'results', fname), delimiter=\",\") group = Z[:,0] endog = Z[:,1] exog =", "me = gee(Y ~ X1 + X2 + X3, id=Id, family=gaussian, corstr=\"exchangeable\", tol=1e-8,", "= md.fit() assert_almost_equal(mdf.params, cf[j], decimal=5) assert_almost_equal(mdf.standard_errors(), se[j], decimal=6) # print(mdf.params) def test_compare_OLS(self): \"\"\"", "cf1 = np.r_[0.44944752, 0.45569985, -0.92007064, -0.46766728] se1 = np.r_[0.09801821, 0.07718842, 0.13229421, 0.08544553] assert_almost_equal(mdf1.params,", "an independent test) cf2 = np.r_[0.45397549, 0.42278345, -0.91997131, -0.50115943] se2 = np.r_[0.09646057, 0.07405713,", "np.dot(exog, np.r_[1, 1, 0, 0.2]) +\\ np.random.normal(size=300) group = np.kron(np.arange(100), np.r_[1,1,1]) vi =", "enumerate((vi,ve)): md = GEE(endog, exog, group_n, None, family, v) mdf = md.fit() assert_almost_equal(mdf.params,", "md = GEE(endog, exog, group, T, family, v) mdf = md.fit() if id(v)", "cf[j], decimal=10) assert_almost_equal(mdf.standard_errors(), se[j], decimal=10) # Test with formulas D = np.concatenate((endog[:,None], group[:,None],", "Z = np.genfromtxt(os.path.join(cur_dir, 'results', fname), delimiter=\",\") group = Z[:,0] endog = Z[:,1] exog", "(not an independent test) cf1 = np.r_[0.44944752, 0.45569985, -0.92007064, -0.46766728] se1 = np.r_[0.09801821,", "group_n = np.array(group_n)[:,None] dp = Independence() md = GEE(endog, exog, group, None, family,", "= load_data(\"gee_linear_1.csv\") ve = Exchangeable() md = GEE(endog, exog, group, None, family, ve)", "variable Columns 2-end: exog variables If `icept` is True, an intercept is prepended", "exchangeable correlation structures. For other correlation structures, the details of the correlation estimation", "= 100 ar_param = 0.5 k = 3 ga = Gaussian() for gsize", "id=Id, family=binomial, corstr=\"AR-M\") sma = summary(ma) u = coefficients(sma) cfa = paste(u[,1], collapse=\",\")", "= x.sum(1) errors = np.dot(cmat_r, np.random.normal(size=gsize)) endog.append(expval + errors) groups.append(i*np.ones(gsize)) endog = np.concatenate(endog)", "mdf = md.fit() assert_almost_equal(mdf.params, cf[j], decimal=5) assert_almost_equal(mdf.standard_errors(), se[j], decimal=6) # print(mdf.params) def test_compare_OLS(self):", "GEE.from_formula(\"y ~ age + trt + base\", data, groups=data[\"subject\"], cov_struct=ind, family=fam) mdf1 =", "errors derived from the naive covariance estimate. \"\"\" vs = Independence() family =", "gee cf = [[-0.0364450410793481,-0.0543209391301178, 0.0156642711741052,0.57628591338724, -0.00465659951186211,-0.477093153099256], [-0.0315615554826533,-0.0562589480840004, 0.0178419412298561,0.571512795340481, -0.00363255566297332,-0.475971696727736]] se = [[0.0611309237214186,0.0390680524493108, 0.0334234174505518,0.0366860768962715,", "X2 + X3\", data=D).fit(disp=False) assert_almost_equal(sml.params.values, md.params, decimal=10) def test_compare_poisson(self): vs = Independence() family", "groups = load_data(\"gee_ordinal_1.csv\", icept=False) v = GlobalOddsRatio(\"ordinal\") md = GEE(endog, exog, groups, None,", "GLM.from_formula(\"y ~ age + trt + base\", data, family=families.Poisson()) mdf2 = md2.fit(scale=\"X2\") assert_almost_equal(mdf1.params,", "group_n.extend([0,]*5) group_n.extend([1,]*5) group_n = np.array(group_n)[:,None] dp = Independence() md = GEE(endog, exog, group,", "the correlation estimation differ among implementations and the results will not agree exactly.", "From R gee cf = [[0.0167272965285882,1.13038654425893, -1.86896345082962,1.09397608331333], [0.0178982283915449,1.13118798191788, -1.86133518416017,1.08944256230299], [0.0109621937947958,1.13226505028438, -1.88278757333046,1.09954623769449]] se =", "corstr=\"exchangeable\", scale.fix=TRUE) sme = summary(me) u = coefficients(sme) cfe = paste(u[,1], collapse=\",\") see", "endog = Z[:,1] exog = Z[:,2:] if icept: exog = np.concatenate((np.ones((exog.shape[0],1)), exog), axis=1)", "maxit=100) smi = summary(mi) u = coefficients(smi) cfi = paste(u[,1], collapse=\",\") sei =", "ix = np.arange(gsize)[:,None] - np.arange(gsize)[None,:] ix = np.abs(ix) cmat = ar_param ** ix", "test_compare_poisson(self): vs = Independence() family = Poisson() Y = np.ceil(-np.log(np.random.uniform(size=100))) X1 = np.random.normal(size=100)", "np.arange(gsize)[None,:] ix = np.abs(ix) cmat = ar_param ** ix cmat_r = np.linalg.cholesky(cmat) endog", "decimal=10) def test_compare_poisson(self): vs = Independence() family = Poisson() Y = np.ceil(-np.log(np.random.uniform(size=100))) X1", "vs = Independence() family = Gaussian() Y = np.random.normal(size=100) X1 = np.random.normal(size=100) X2", "+ X2 + X3, id=Id, family=binomial, corstr=\"independence\") smi = summary(mi) u = coefficients(smi)", "= np.random.normal(size=(300,4)) exog[:,0] = 1 endog = np.dot(exog, np.r_[1, 1, 0, 0.2]) +\\", "np.random.normal(size=100) groups = np.kron(lrange(20), np.ones(5)) Y[0] = np.nan Y[5:7] = np.nan X2[10:12] =", "= summary(mi) u = coefficients(smi) cfi = paste(u[,1], collapse=\",\") sei = paste(u[,4], collapse=\",\")", "0.02913582, 0.03114428, 0.02893991] assert_almost_equal(mdf2.params, cf, decimal=6) assert_almost_equal(mdf2.standard_errors(), se, decimal=6) def test_ordinal(self): family =", "0.08539281] assert_almost_equal(mdf.params, cf, decimal=5) assert_almost_equal(mdf.bse, se, decimal=5) def test_nominal(self): family = Multinomial(3) endog,", "+ X2 + X3, id=Id, family=gaussian, corstr=\"exchangeable\", tol=1e-8, maxit=100) sme = summary(me) u", "0.07405713, 0.1324629 , 0.09025019] assert_almost_equal(mdf2.params, cf2, decimal=5) assert_almost_equal(mdf2.standard_errors(), se2, decimal=5) def test_poisson(self): \"\"\"", "Independence() va = Autoregressive() # From R gee cf = [[0.0167272965285882,1.13038654425893, -1.86896345082962,1.09397608331333], [0.0178982283915449,1.13118798191788,", "+ X2 + X3 + X4 + X5, id=Id, family=poisson, corstr=\"exchangeable\", scale.fix=TRUE) sme", "cf, decimal=6) assert_almost_equal(mdf2.standard_errors(), se, decimal=6) def test_ordinal(self): family = Binomial() endog, exog, groups", "numpy.testing import assert_almost_equal from statsmodels.genmod.generalized_estimating_equations import (GEE, GEEMargins, Multinomial) from statsmodels.genmod.families import Gaussian,", "+ X3, id=Id, family=binomial, corstr=\"exchangeable\") sme = summary(me) u = coefficients(sme) cfe =", "[-0.0182920577154767,0.814898414022467, -1.56194040106201,0.793499517527478]] se = [[0.0440733554189401,0.0479993639119261, 0.0496045952071308,0.0479467597161284], [0.0440369906460754,0.0480069787567662, 0.049519758758187,0.0479760443027526]] for j,v in enumerate((vi, ve)):", "tol=1e-8, maxit=100) sme = summary(me) u = coefficients(sme) cfe = paste(u[,1], collapse=\",\") see", "None, groups=D.loc[:,\"Id\"], family=family, cov_struct=v) mdf = md.fit() assert_almost_equal(mdf.params, cf[j], decimal=6) assert_almost_equal(mdf.standard_errors(), se[j], decimal=6)", "GEE(endog, exog, group, None, family, ne, dep_data=group_n) mdf2 = md.fit(start_params=mdf1.params) # From statsmodels.GEE", "se = [[0.0440733554189401,0.0479993639119261, 0.0496045952071308,0.0479467597161284], [0.0440369906460754,0.0480069787567662, 0.049519758758187,0.0479760443027526]] for j,v in enumerate((vi, ve)): md =", "scale.fix=TRUE) sme = summary(me) u = coefficients(sme) cfe = paste(u[,1], collapse=\",\") see =", "md = GEE(endog, exog, group, None, family, dp) mdf1 = md.fit() # From", "mdf1 = md1.fit() # Coefficients should agree with GLM from statsmodels.genmod.generalized_linear_model import GLM", "= paste(u[,4], collapse=\",\") sprintf(\"cf = [[%s],[%s]]\", cfi, cfe) sprintf(\"se = [[%s],[%s]]\", sei, see)", "Autoregressive, Nested) import pandas as pd import statsmodels.formula.api as sm def load_data(fname, icept=True):", "corstr=\"independence\", scale.fix=TRUE) smi = summary(mi) u = coefficients(smi) cfi = paste(u[,1], collapse=\",\") sei", "np.random.normal(size=100) groups = np.random.randint(0, 4, size=100) D = pd.DataFrame({\"Y\": Y, \"X1\": X1, \"X2\":", "test_default_time(self): \"\"\" Check that the time defaults work correctly. \"\"\" endog,exog,group = load_data(\"gee_logistic_1.csv\")", "0.5] lpr = np.dot(exog, beta) prob = 1 / (1 + np.exp(-lpr)) endog", "exog, groups, None, family, v) md.setup_nominal() mdf1 = md.fit() # From statsmodels.GEE (not", "= load_data(\"gee_ordinal_1.csv\", icept=False) v = GlobalOddsRatio(\"ordinal\") md = GEE(endog, exog, groups, None, family,", "[[%s],[%s]]\", cfi, cfe) sprintf(\"se = [[%s],[%s]]\", sei, see) \"\"\" family = Gaussian() endog,exog,group", "cfa = paste(u[,1], collapse=\",\") sea = paste(u[,4], collapse=\",\") sprintf(\"cf = [[%s],[%s],[%s]]\", cfi, cfe,", "see = paste(u[,4], collapse=\",\") sprintf(\"cf = [[%s],[%s]]\", cfi, cfe) sprintf(\"se = [[%s],[%s]]\", sei,", "+ X4 + X5\", D, None, groups=D.loc[:,\"Id\"], family=family, cov_struct=v) mdf = md.fit() assert_almost_equal(mdf.params,", "= Independence() ve = Exchangeable() L = np.r_[[[0, 0, 0, 1]]] R =", "X3\", D, None, groups=D.loc[:,\"Id\"], family=family, cov_struct=v) mdf = md.fit() assert_almost_equal(mdf.params, cf[j], decimal=6) assert_almost_equal(mdf.standard_errors(),", "= os.path.join(cur_dir, \"results\", \"epil.csv\") data = pd.read_csv(fname) fam = Poisson() ind = Independence()", "1.02183688, -2.00858719, 1.00101969] se = np.r_[0.08632616, 0.02913582, 0.03114428, 0.02893991] assert_almost_equal(mdf2.params, cf, decimal=6) assert_almost_equal(mdf2.standard_errors(),", "= Independence() md = GEE(endog, exog, groups, None, family, v) md.setup_nominal() mdf1 =", "D, None, groups=D.loc[:,\"Id\"], family=family, cov_struct=v) mdf = md.fit() assert_almost_equal(mdf.params, cf[j], decimal=6) assert_almost_equal(mdf.standard_errors(), se[j],", "GEE.from_formula(\"Y ~ X1 + X2 + X3\", D, None, groups=D[\"groups\"], missing='drop') mdf =", "Y, \"X1\": X1, \"X2\": X2, \"X3\": X3}) md = GEE.from_formula(\"Y ~ X1 +", "= read.csv(\"results/gee_linear_1.csv\", header=FALSE) Y = Z[,2] Id = Z[,1] X1 = Z[,3] X2", "mdf1 = md.fit() # From statsmodels.GEE (not an independent test) cf1 = np.r_[0.44944752,", "assert_almost_equal(mdf2.standard_errors(), se2, decimal=5) def test_poisson(self): \"\"\" library(gee) Z = read.csv(\"results/gee_poisson_1.csv\", header=FALSE) Y =", "from the naive covariance estimate. \"\"\" vs = Independence() family = Gaussian() Y", "errors = np.dot(cmat_r, np.random.normal(size=gsize)) endog.append(expval + errors) groups.append(i*np.ones(gsize)) endog = np.concatenate(endog) groups =", "mdf.params / \\ np.sqrt(np.diag(mdf.naive_covariance)) assert_almost_equal(naive_tvalues, ols.tvalues, decimal=10) def test_compare_logit(self): vs = Independence() family", "decimal=6) assert_almost_equal(mdf1.scale, mdf2.scale, decimal=6) # TODO: why does this test fail? def t_est_missing(self):", "groups = [] for i in range(num_group): x = np.random.normal(size=(gsize,k)) exog.append(x) expval =", "assert_almost_equal(mdf.params, params_true[gsize-1]) def test_post_estimation(self): family = Gaussian() endog,exog,group = load_data(\"gee_linear_1.csv\") ve = Exchangeable()", "= Binomial() va = Autoregressive() md1 = GEE(endog, exog, group, family=family, cov_struct=va) mdf1", "0.0496045952071308,0.0479467597161284], [0.0440369906460754,0.0480069787567662, 0.049519758758187,0.0479760443027526]] for j,v in enumerate((vi, ve)): md = GEE(endog, exog, group,", "for comparing results: library(gee) Z = read.csv(\"results/gee_logistic_1.csv\", header=FALSE) Y = Z[,2] Id =", "the following format: Column 0: Group indicator Column 1: endog variable Columns 2-end:", "in enumerate((vi,ve,va)): md = GEE(endog, exog, group, T, family, v) mdf = md.fit()", "np.kron(lrange(20), np.ones(5)) D = pd.DataFrame({\"Y\": Y, \"X1\": X1, \"X2\": X2, \"X3\": X3}) md", "ve = Exchangeable() # From R gee cf = [[-0.0364450410793481,-0.0543209391301178, 0.0156642711741052,0.57628591338724, -0.00465659951186211,-0.477093153099256], [-0.0315615554826533,-0.0562589480840004,", "-0.00363255566297332,-0.475971696727736]] se = [[0.0611309237214186,0.0390680524493108, 0.0334234174505518,0.0366860768962715, 0.0304758505008105,0.0316348058881079], [0.0610840153582275,0.0376887268649102, 0.0325168379415177,0.0369786751362213, 0.0296141014225009,0.0306115470200955]] for j,v in enumerate((vi,ve)):", "= GEE(endog, exog, groups, None, family, v) md.setup_nominal() mdf1 = md.fit() # From", "+ X3 + X4 + X5\", D, None, groups=D.loc[:,\"Id\"], family=family, cov_struct=v) mdf =", "= np.random.normal(size=100) X1 = np.random.normal(size=100) X2 = np.random.normal(size=100) X3 = np.random.normal(size=100) groups =", "md = GEE.from_formula(\"Y ~ X1 + X2 + X3\", D, None, groups=groups, family=family,", "X2 + X3\", data=D).fit() assert_almost_equal(ols.params.values, mdf.params, decimal=10) se = mdf.standard_errors(covariance_type=\"naive\") assert_almost_equal(ols.bse, se, decimal=10)", "[0, 0.589208623896, 0.559823804948] params_true = [[1.08043787, 1.12709319, 0.90133927], [0.9613677, 1.05826987, 0.90832055], [1.05370439, 0.96084864,", "cf = np.r_[1.09238131, 0.02148193, -0.39879146, -0.01855666, 0.02983409, 1.18123172, 0.01845318, -1.10233886] se = np.r_[0.10878752,", "cov_struct=v) mdf = md.fit() assert_almost_equal(mdf.params, cf[j], decimal=10) assert_almost_equal(mdf.standard_errors(), se[j], decimal=10) def test_linear_constrained(self): family", "0) X1 = np.random.normal(size=100) X2 = np.random.normal(size=100) X3 = np.random.normal(size=100) groups = np.random.randint(0,", "idx: jj = np.flatnonzero(group == ii) T[jj] = lrange(len(jj)) family = Binomial() va", "= np.r_[0.09801821, 0.07718842, 0.13229421, 0.08544553] assert_almost_equal(mdf1.params, cf1, decimal=5) assert_almost_equal(mdf1.standard_errors(), se1, decimal=5) # Test", "Z = read.csv(\"results/gee_linear_1.csv\", header=FALSE) Y = Z[,2] Id = Z[,1] X1 = Z[,3]", "cf = np.r_[-0.16655319, 1.02183688, -2.00858719, 1.00101969] se = np.r_[0.08632616, 0.02913582, 0.03114428, 0.02893991] assert_almost_equal(mdf2.params,", "md = GEE(endog, exog, group, None, family, ne, dep_data=group_n) mdf2 = md.fit(start_params=mdf1.params) #", "= Exchangeable() # From R gee cf = [[-0.01850226507491,0.81436304278962, -1.56167635393184,0.794239361055003], [-0.0182920577154767,0.814898414022467, -1.56194040106201,0.793499517527478]] se", "Independence() md1 = GEE.from_formula(\"y ~ age + trt + base\", data, groups=data[\"subject\"], cov_struct=ind,", "1.00101969] se = np.r_[0.08632616, 0.02913582, 0.03114428, 0.02893991] assert_almost_equal(mdf2.params, cf, decimal=6) assert_almost_equal(mdf2.standard_errors(), se, decimal=6)", "GEE(endog, exog, group, T, family, v) mdf = md.fit() if id(v) != id(va):", "= [[%s],[%s]]\", sei, see) \"\"\" family = Poisson() endog,exog,group_n = load_data(\"gee_poisson_1.csv\") vi =", "paste(u[,1], collapse=\",\") see = paste(u[,4], collapse=\",\") sprintf(\"cf = [[%s],[%s]]\", cfi, cfe) sprintf(\"se =", "Z[,3] X2 = Z[,4] X3 = Z[,5] X4 = Z[,6] X5 = Z[,7]", "scale.fix=TRUE) smi = summary(mi) u = coefficients(smi) cfi = paste(u[,1], collapse=\",\") sei =", "family = Poisson() endog,exog,group_n = load_data(\"gee_poisson_1.csv\") vi = Independence() ve = Exchangeable() #", "data, family=families.Poisson()) mdf2 = md2.fit(scale=\"X2\") assert_almost_equal(mdf1.params, mdf2.params, decimal=6) assert_almost_equal(mdf1.scale, mdf2.scale, decimal=6) # TODO:", "~ X1 + X2 + X3 + X4 + X5, id=Id, family=poisson, corstr=\"exchangeable\",", "enumerate((vi,ve,va)): md = GEE(endog, exog, group, T, family, v) mdf = md.fit() if", "def test_poisson_epil(self): cur_dir = os.path.dirname(os.path.abspath(__file__)) fname = os.path.join(cur_dir, \"results\", \"epil.csv\") data = pd.read_csv(fname)", "= [0, 0.589208623896, 0.559823804948] params_true = [[1.08043787, 1.12709319, 0.90133927], [0.9613677, 1.05826987, 0.90832055], [1.05370439,", "# From statsmodels.GEE (not an independent test) cf2 = np.r_[0.45397549, 0.42278345, -0.91997131, -0.50115943]", "statsmodels.genmod.generalized_estimating_equations import (GEE, GEEMargins, Multinomial) from statsmodels.genmod.families import Gaussian, Binomial, Poisson from statsmodels.genmod.dependence_structures", "3 ga = Gaussian() for gsize in 1,2,3: ix = np.arange(gsize)[:,None] - np.arange(gsize)[None,:]", "odds ratio dependence v = GlobalOddsRatio(\"nominal\") md = GEE(endog, exog, groups, None, family,", "smi = summary(mi) u = coefficients(smi) cfi = paste(u[,1], collapse=\",\") sei = paste(u[,4],", "endog,exog,group = load_data(\"gee_nested_linear_1.csv\") group_n = [] for i in range(endog.shape[0]//10): group_n.extend([0,]*5) group_n.extend([1,]*5) group_n", "assert_almost_equal(mdf1.standard_errors(), se, decimal=6) ne = Nested() md = GEE(endog, exog, group, None, family,", "id=Id, family=binomial, corstr=\"exchangeable\") sme = summary(me) u = coefficients(sme) cfe = paste(u[,1], collapse=\",\")", "(Exchangeable, Independence, GlobalOddsRatio, Autoregressive, Nested) import pandas as pd import statsmodels.formula.api as sm", "np.random.normal(size=100) X2 = np.random.normal(size=100) X3 = np.random.normal(size=100) groups = np.random.randint(0, 4, size=100) D", "Column 1: endog variable Columns 2-end: exog variables If `icept` is True, an", "# This is in the release announcement for version 0.6. def test_poisson_epil(self): cur_dir", "groups = np.kron(lrange(20), np.ones(5)) D = pd.DataFrame({\"Y\": Y, \"X1\": X1, \"X2\": X2, \"X3\":", "# Check for run-time exceptions in summary # print(mdf.summary()) def test_autoregressive(self): dep_params_true =", "\"X2\": X2, \"X3\": X3}) md = GEE.from_formula(\"Y ~ X1 + X2 + X3\",", "= Z[,1] X1 = Z[,3] X2 = Z[,4] X3 = Z[,5] X4 =", "version 0.6. def test_poisson_epil(self): cur_dir = os.path.dirname(os.path.abspath(__file__)) fname = os.path.join(cur_dir, \"results\", \"epil.csv\") data", "np.random.normal(size=300) group = np.kron(np.arange(100), np.r_[1,1,1]) vi = Independence() ve = Exchangeable() L =", "corstr=\"exchangeable\", tol=1e-8, maxit=100) sme = summary(me) u = coefficients(sme) cfe = paste(u[,1], collapse=\",\")", "# Time values for the autoregressive model T = np.zeros(len(endog)) idx = set(group)", "1, 0, 0.2]) +\\ np.random.normal(size=300) group = np.kron(np.arange(100), np.r_[1,1,1]) vi = Independence() ve", "family, v) mdf = md.fit() if id(v) != id(va): assert_almost_equal(mdf.params, cf[j], decimal=6) assert_almost_equal(mdf.standard_errors(),", "library(gee) Z = read.csv(\"results/gee_logistic_1.csv\", header=FALSE) Y = Z[,2] Id = Z[,1] X1 =", "+ X3, id=Id, family=binomial, corstr=\"independence\") smi = summary(mi) u = coefficients(smi) cfi =", "fname = os.path.join(cur_dir, \"results\", \"epil.csv\") data = pd.read_csv(fname) fam = Poisson() ind =", "mdf.resid) def test_linear(self): \"\"\" library(gee) Z = read.csv(\"results/gee_linear_1.csv\", header=FALSE) Y = Z[,2] Id", "= np.nan D = pd.DataFrame({\"Y\": Y, \"X1\": X1, \"X2\": X2, \"X3\": X3, \"groups\":", "= paste(u[,1], collapse=\",\") sea = paste(u[,4], collapse=\",\") sprintf(\"cf = [[%s],[%s],[%s]]\", cfi, cfe, cfa)", "= GEE.from_formula(\"Y ~ X1 + X2 + X3\", D, None, groups=groups, family=family, cov_struct=vs).fit()", "= Independence() family = Gaussian() Y = np.random.normal(size=100) X1 = np.random.normal(size=100) X2 =", "vi = Independence() va = Autoregressive() # From R gee cf = [[0.0167272965285882,1.13038654425893,", "-1.88278757333046,1.09954623769449]] se = [[0.127291720283049,0.166725808326067, 0.192430061340865,0.173141068839597], [0.127045031730155,0.165470678232842, 0.192052750030501,0.173174779369249], [0.127240302296444,0.170554083928117, 0.191045527104503,0.169776150974586]] for j,v in enumerate((vi,ve,va)):", "exog[:,0] = 1 exog[:,1] = 1*(exog[:,2] < 0) group = np.kron(np.arange(n/4), np.ones(4)) time", "import GLM from statsmodels.genmod import families md2 = GLM.from_formula(\"y ~ age + trt", "= GEE(endog, exog, group, T, family, v) mdf = md.fit() if id(v) !=", "with independence correlation v = Independence() md = GEE(endog, exog, groups, None, family,", "se[j], decimal=6) # Check for run-time exceptions in summary # print(mdf.summary()) def test_autoregressive(self):", "= lrange(len(jj)) family = Binomial() ve = Exchangeable() vi = Independence() va =", "/ \\ np.sqrt(np.diag(mdf.naive_covariance)) assert_almost_equal(naive_tvalues, ols.tvalues, decimal=10) def test_compare_logit(self): vs = Independence() family =", "X1 = np.random.normal(size=100) X2 = np.random.normal(size=100) X3 = np.random.normal(size=100) groups = np.random.randint(0, 4,", "Autoregressive() md1 = GEE(endog, exog, group, family=family, cov_struct=va) mdf1 = md1.fit() md2 =", "sprintf(\"se = [[%s],[%s]]\", sei, see) \"\"\" family = Gaussian() endog,exog,group = load_data(\"gee_linear_1.csv\") vi", "exog[:,0] = 1 endog = np.dot(exog, np.r_[1, 1, 0, 0.2]) +\\ np.random.normal(size=300) group", "D = pd.DataFrame(D) D.columns = [\"Y\",\"Id\",] + [\"X%d\" % (k+1) for k in", "= 1*(exog[:,2] < 0) group = np.kron(np.arange(n/4), np.ones(4)) time = np.zeros((n, 1)) beta", "-0.39879146, -0.01855666, 0.02983409, 1.18123172, 0.01845318, -1.10233886] se = np.r_[0.10878752, 0.10326078, 0.11171241, 0.05488705, 0.05995019,", "Independence() family = Poisson() Y = np.ceil(-np.log(np.random.uniform(size=100))) X1 = np.random.normal(size=100) X2 = np.random.normal(size=100)", "decimal=5) assert_almost_equal(mdf1.standard_errors(), se1, decimal=5) # Test with global odds ratio dependence v =", "= md.fit() assert_almost_equal(mdf.params, cf[j], decimal=10) assert_almost_equal(mdf.standard_errors(), se[j], decimal=10) def test_linear_constrained(self): family = Gaussian()", "__future__ import print_function from statsmodels.compat import lrange import numpy as np import os", "= md.fit() assert_almost_equal(mdf.params, cf[j], decimal=6) assert_almost_equal(mdf.standard_errors(), se[j], decimal=6) # Check for run-time exceptions", "mdf = md.fit() assert_almost_equal(np.dot(exog, mdf.params), mdf.fittedvalues) assert_almost_equal(endog - np.dot(exog, mdf.params), mdf.resid) def test_linear(self):", "0.0156642711741052,0.57628591338724, -0.00465659951186211,-0.477093153099256], [-0.0315615554826533,-0.0562589480840004, 0.0178419412298561,0.571512795340481, -0.00363255566297332,-0.475971696727736]] se = [[0.0611309237214186,0.0390680524493108, 0.0334234174505518,0.0366860768962715, 0.0304758505008105,0.0316348058881079], [0.0610840153582275,0.0376887268649102, 0.0325168379415177,0.0369786751362213, 0.0296141014225009,0.0306115470200955]]", "+ X3\", D, None, groups=groups, family=family, cov_struct=vs).fit() sml = sm.logit(\"Y ~ X1 +", "me = gee(Y ~ X1 + X2 + X3 + X4 + X5,", "family=poisson, corstr=\"independence\", scale.fix=TRUE) smi = summary(mi) u = coefficients(smi) cfi = paste(u[,1], collapse=\",\")", "group_n = [] for i in range(endog.shape[0]//10): group_n.extend([0,]*5) group_n.extend([1,]*5) group_n = np.array(group_n)[:,None] dp", "and the results will not agree exactly. \"\"\" from __future__ import print_function from", "= 0.5 k = 3 ga = Gaussian() for gsize in 1,2,3: ix", "read.csv(\"results/gee_logistic_1.csv\", header=FALSE) Y = Z[,2] Id = Z[,1] X1 = Z[,3] X2 =", "with formulas D = np.concatenate((endog[:,None], group_n[:,None], exog[:,1:]), axis=1) D = pd.DataFrame(D) D.columns =", "se = [[0.127291720283049,0.166725808326067, 0.192430061340865,0.173141068839597], [0.127045031730155,0.165470678232842, 0.192052750030501,0.173174779369249], [0.127240302296444,0.170554083928117, 0.191045527104503,0.169776150974586]] for j,v in enumerate((vi,ve,va)): md", "# From R gee cf = [[0.0167272965285882,1.13038654425893, -1.86896345082962,1.09397608331333], [0.0178982283915449,1.13118798191788, -1.86133518416017,1.08944256230299], [0.0109621937947958,1.13226505028438, -1.88278757333046,1.09954623769449]] se", "= Independence() ve = Exchangeable() # From R gee cf = [[-0.01850226507491,0.81436304278962, -1.56167635393184,0.794239361055003],", "mdf.standard_errors(covariance_type=\"naive\") assert_almost_equal(ols.bse, se, decimal=10) naive_tvalues = mdf.params / \\ np.sqrt(np.diag(mdf.naive_covariance)) assert_almost_equal(naive_tvalues, ols.tvalues, decimal=10)", "ma = gee(Y ~ X1 + X2 + X3, id=Id, family=binomial, corstr=\"AR-M\") sma", "0.09025019] assert_almost_equal(mdf2.params, cf2, decimal=5) assert_almost_equal(mdf2.standard_errors(), se2, decimal=5) def test_poisson(self): \"\"\" library(gee) Z =", "-1, 0.5] lpr = np.dot(exog, beta) prob = 1 / (1 + np.exp(-lpr))", "md.fit() assert_almost_equal(np.dot(exog, mdf.params), mdf.fittedvalues) assert_almost_equal(endog - np.dot(exog, mdf.params), mdf.resid) def test_linear(self): \"\"\" library(gee)", "endog,exog,group class TestGEE(object): def test_margins(self): n = 300 exog = np.random.normal(size=(n, 4)) exog[:,0]", "= pd.DataFrame({\"Y\": Y, \"X1\": X1, \"X2\": X2, \"X3\": X3, \"groups\": groups}) md =", "me = gee(Y ~ X1 + X2 + X3, id=Id, family=binomial, corstr=\"exchangeable\") sme", "decimal=10) def test_compare_logit(self): vs = Independence() family = Binomial() Y = 1*(np.random.normal(size=100) <", "def test_autoregressive(self): dep_params_true = [0, 0.589208623896, 0.559823804948] params_true = [[1.08043787, 1.12709319, 0.90133927], [0.9613677,", "sprintf(\"cf = [[%s],[%s]]\", cfi, cfe) sprintf(\"se = [[%s],[%s]]\", sei, see) \"\"\" family =", "groups=D.loc[:,\"Id\"], family=family, cov_struct=v) mdf = md.fit() assert_almost_equal(mdf.params, cf[j], decimal=6) assert_almost_equal(mdf.standard_errors(), se[j], decimal=6) #", "assert_almost_equal(mdf1.standard_errors(), mdf2.standard_errors(), decimal=6) def test_logistic(self): \"\"\" R code for comparing results: library(gee) Z", "values for the autoregressive model T = np.zeros(len(endog)) idx = set(group) for ii", "Autoregressive() # From R gee cf = [[0.0167272965285882,1.13038654425893, -1.86896345082962,1.09397608331333], [0.0178982283915449,1.13118798191788, -1.86133518416017,1.08944256230299], [0.0109621937947958,1.13226505028438, -1.88278757333046,1.09954623769449]]", "assert_almost_equal(mdf.params, cf[j], decimal=10) assert_almost_equal(mdf.standard_errors(), se[j], decimal=10) # Test with formulas D = np.concatenate((endog[:,None],", "test_ordinal(self): family = Binomial() endog, exog, groups = load_data(\"gee_ordinal_1.csv\", icept=False) v = GlobalOddsRatio(\"ordinal\")", "1.05826987, 0.90832055], [1.05370439, 0.96084864, 0.93923374]] np.random.seed(342837482) num_group = 100 ar_param = 0.5 k", "[0.0440369906460754,0.0480069787567662, 0.049519758758187,0.0479760443027526]] for j,v in enumerate((vi, ve)): md = GEE(endog, exog, group, None,", "= paste(u[,1], collapse=\",\") sei = paste(u[,4], collapse=\",\") me = gee(Y ~ X1 +", "exog = np.concatenate((np.ones((exog.shape[0],1)), exog), axis=1) return endog,exog,group class TestGEE(object): def test_margins(self): n =", "icept=False) v = GlobalOddsRatio(\"ordinal\") md = GEE(endog, exog, groups, None, family, v) md.setup_ordinal()", "exog, groups = load_data(\"gee_ordinal_1.csv\", icept=False) v = GlobalOddsRatio(\"ordinal\") md = GEE(endog, exog, groups,", "~ X1 + X2 + X3\", data=D).fit() assert_almost_equal(ols.params.values, mdf.params, decimal=10) se = mdf.standard_errors(covariance_type=\"naive\")", "se = [[0.0611309237214186,0.0390680524493108, 0.0334234174505518,0.0366860768962715, 0.0304758505008105,0.0316348058881079], [0.0610840153582275,0.0376887268649102, 0.0325168379415177,0.0369786751362213, 0.0296141014225009,0.0306115470200955]] for j,v in enumerate((vi,ve)): md", "data=D).fit() assert_almost_equal(ols.params.values, mdf.params, decimal=10) se = mdf.standard_errors(covariance_type=\"naive\") assert_almost_equal(ols.bse, se, decimal=10) naive_tvalues = mdf.params", "= sm.logit(\"Y ~ X1 + X2 + X3\", data=D).fit(disp=False) assert_almost_equal(sml.params.values, md.params, decimal=10) def", "id=Id, family=binomial, corstr=\"independence\") smi = summary(mi) u = coefficients(smi) cfi = paste(u[,1], collapse=\",\")", "paste(u[,1], collapse=\",\") see = paste(u[,4], collapse=\",\") ma = gee(Y ~ X1 + X2", "X1 = np.random.normal(size=100) X2 = np.random.normal(size=100) X3 = np.random.normal(size=100) groups = np.kron(lrange(20), np.ones(5))", "should generally agree with the R GEE implementation for the independence and exchangeable", "cfe = paste(u[,1], collapse=\",\") see = paste(u[,4], collapse=\",\") ma = gee(Y ~ X1", "set(group) for ii in idx: jj = np.flatnonzero(group == ii) T[jj] = lrange(len(jj))", "decimal=5) assert_almost_equal(mdf.standard_errors(), se[j], decimal=6) # Test with formulas D = np.concatenate((endog[:,None], group_n[:,None], exog[:,1:]),", "ii) T[jj] = lrange(len(jj)) family = Binomial() ve = Exchangeable() vi = Independence()", "j,v in enumerate((vi,ve,va)): md = GEE(endog, exog, group, T, family, v) mdf =", "in enumerate((vi,ve)): md = GEE.from_formula(\"Y ~ X1 + X2 + X3\", D, None,", "np.random.randint(0, 4, size=100) D = pd.DataFrame({\"Y\": Y, \"X1\": X1, \"X2\": X2, \"X3\": X3})", "= np.concatenate((endog[:,None], group[:,None], exog[:,1:]), axis=1) D = pd.DataFrame(D) D.columns = [\"Y\",\"Id\",] + [\"X%d\"", "an independent test) cf = np.r_[-0.1671073 , 1.00467426, -2.01723004, 0.97297106] se = np.r_[0.08629606,", "= GEE.from_formula(\"Y ~ X1 + X2 + X3\", D, None, groups=groups, family=family, cov_struct=vs)", "mdf = md.fit() marg = GEEMargins(mdf, ()) marg.summary() # This is in the", "= Poisson() ind = Independence() md1 = GEE.from_formula(\"y ~ age + trt +", "md = GEE(endog, exog, group_n, None, family, v) mdf = md.fit() assert_almost_equal(mdf.params, cf[j],", "= GEE.from_formula(\"y ~ age + trt + base\", data, groups=data[\"subject\"], cov_struct=ind, family=fam) mdf1", "exog, group, None, family, dp) mdf1 = md.fit() # From statsmodels.GEE (not an", "for i in range(num_group): x = np.random.normal(size=(gsize,k)) exog.append(x) expval = x.sum(1) errors =", "[] for i in range(endog.shape[0]//10): group_n.extend([0,]*5) group_n.extend([1,]*5) group_n = np.array(group_n)[:,None] dp = Independence()", "= np.r_[0.08629606, 0.04058653, 0.04067038, 0.03777989] assert_almost_equal(mdf1.params, cf, decimal=6) assert_almost_equal(mdf1.standard_errors(), se, decimal=6) ne =", "x.sum(1) errors = np.dot(cmat_r, np.random.normal(size=gsize)) endog.append(expval + errors) groups.append(i*np.ones(gsize)) endog = np.concatenate(endog) groups", "Gaussian() for gsize in 1,2,3: ix = np.arange(gsize)[:,None] - np.arange(gsize)[None,:] ix = np.abs(ix)", "md.fit() assert_almost_equal(ar.dep_params, dep_params_true[gsize-1]) assert_almost_equal(mdf.params, params_true[gsize-1]) def test_post_estimation(self): family = Gaussian() endog,exog,group = load_data(\"gee_linear_1.csv\")", "= GEE(endog, exog, group, None, family, v) mdf = md.fit() assert_almost_equal(mdf.params, cf[j], decimal=10)", "dep_params_true[gsize-1]) assert_almost_equal(mdf.params, params_true[gsize-1]) def test_post_estimation(self): family = Gaussian() endog,exog,group = load_data(\"gee_linear_1.csv\") ve =", "np.kron(np.arange(n/4), np.ones(4)) time = np.zeros((n, 1)) beta = np.r_[0, 1, -1, 0.5] lpr", "gee(Y ~ X1 + X2 + X3, id=Id, family=binomial, corstr=\"AR-M\") sma = summary(ma)", "D = pd.DataFrame({\"Y\": Y, \"X1\": X1, \"X2\": X2, \"X3\": X3, \"groups\": groups}) md", "= GEE(endog, exog, group, time=T, family=family, cov_struct=va) mdf2 = md2.fit() assert_almost_equal(mdf1.params, mdf2.params, decimal=6)", "= [[%s],[%s]]\", cfi, cfe) sprintf(\"se = [[%s],[%s]]\", sei, see) \"\"\" family = Gaussian()", "cf1, decimal=5) assert_almost_equal(mdf1.standard_errors(), se1, decimal=5) # Test with global odds ratio dependence v", "assert_almost_equal(naive_tvalues, ols.tvalues, decimal=10) def test_compare_logit(self): vs = Independence() family = Binomial() Y =", "-0.01855666, 0.02983409, 1.18123172, 0.01845318, -1.10233886] se = np.r_[0.10878752, 0.10326078, 0.11171241, 0.05488705, 0.05995019, 0.0916574,", "md.setup_ordinal() mdf = md.fit() cf = np.r_[1.09238131, 0.02148193, -0.39879146, -0.01855666, 0.02983409, 1.18123172, 0.01845318,", "family = Binomial() endog, exog, groups = load_data(\"gee_ordinal_1.csv\", icept=False) v = GlobalOddsRatio(\"ordinal\") md", "exog, group, None, family, v) mdf = md.fit() assert_almost_equal(mdf.params, cf[j], decimal=10) assert_almost_equal(mdf.standard_errors(), se[j],", "0.11171241, 0.05488705, 0.05995019, 0.0916574, 0.05951445, 0.08539281] assert_almost_equal(mdf.params, cf, decimal=5) assert_almost_equal(mdf.bse, se, decimal=5) def", "[0.127045031730155,0.165470678232842, 0.192052750030501,0.173174779369249], [0.127240302296444,0.170554083928117, 0.191045527104503,0.169776150974586]] for j,v in enumerate((vi,ve,va)): md = GEE(endog, exog, group,", "= Gaussian() exog = np.random.normal(size=(300,4)) exog[:,0] = 1 endog = np.dot(exog, np.r_[1, 1,", "= GEE.from_formula(\"Y ~ X1 + X2 + X3\", D, None, groups=D.loc[:,\"Id\"], family=family, cov_struct=v)", "= ar) mdf = md.fit() assert_almost_equal(ar.dep_params, dep_params_true[gsize-1]) assert_almost_equal(mdf.params, params_true[gsize-1]) def test_post_estimation(self): family =", "= np.random.normal(size=(gsize,k)) exog.append(x) expval = x.sum(1) errors = np.dot(cmat_r, np.random.normal(size=gsize)) endog.append(expval + errors)", "-1.86896345082962,1.09397608331333], [0.0178982283915449,1.13118798191788, -1.86133518416017,1.08944256230299], [0.0109621937947958,1.13226505028438, -1.88278757333046,1.09954623769449]] se = [[0.127291720283049,0.166725808326067, 0.192430061340865,0.173141068839597], [0.127045031730155,0.165470678232842, 0.192052750030501,0.173174779369249], [0.127240302296444,0.170554083928117, 0.191045527104503,0.169776150974586]]", "marg = GEEMargins(mdf, ()) marg.summary() # This is in the release announcement for", "assert_almost_equal(ols.bse, se, decimal=10) naive_tvalues = mdf.params / \\ np.sqrt(np.diag(mdf.naive_covariance)) assert_almost_equal(naive_tvalues, ols.tvalues, decimal=10) def", "axis=0) ar = Autoregressive() md = GEE(endog, exog, groups, family=ga, cov_struct = ar)", "decimal=10) naive_tvalues = mdf.params / \\ np.sqrt(np.diag(mdf.naive_covariance)) assert_almost_equal(naive_tvalues, ols.tvalues, decimal=10) def test_compare_logit(self): vs", "+ base\", data, family=families.Poisson()) mdf2 = md2.fit(scale=\"X2\") assert_almost_equal(mdf1.params, mdf2.params, decimal=6) assert_almost_equal(mdf1.scale, mdf2.scale, decimal=6)", "np.random.seed(342837482) num_group = 100 ar_param = 0.5 k = 3 ga = Gaussian()", "mdf.params), mdf.resid) def test_linear(self): \"\"\" library(gee) Z = read.csv(\"results/gee_linear_1.csv\", header=FALSE) Y = Z[,2]", "cov_struct = ar) mdf = md.fit() assert_almost_equal(ar.dep_params, dep_params_true[gsize-1]) assert_almost_equal(mdf.params, params_true[gsize-1]) def test_post_estimation(self): family", "ex = Exchangeable() md = GEE(endog, exog, group, time, fa, ex) mdf =", "= Z[,4] X3 = Z[,5] mi = gee(Y ~ X1 + X2 +", "X2, \"X3\": X3}) md = GEE.from_formula(\"Y ~ X1 + X2 + X3\", D,", "v = Independence() md = GEE(endog, exog, groups, None, family, v) md.setup_nominal() mdf1", "Multinomial) from statsmodels.genmod.families import Gaussian, Binomial, Poisson from statsmodels.genmod.dependence_structures import (Exchangeable, Independence, GlobalOddsRatio,", "= np.r_[0.44944752, 0.45569985, -0.92007064, -0.46766728] se1 = np.r_[0.09801821, 0.07718842, 0.13229421, 0.08544553] assert_almost_equal(mdf1.params, cf1,", "np.random.normal(size=100) X3 = np.random.normal(size=100) groups = np.kron(lrange(20), np.ones(5)) Y[0] = np.nan Y[5:7] =", "0.0334234174505518,0.0366860768962715, 0.0304758505008105,0.0316348058881079], [0.0610840153582275,0.0376887268649102, 0.0325168379415177,0.0369786751362213, 0.0296141014225009,0.0306115470200955]] for j,v in enumerate((vi,ve)): md = GEE(endog, exog,", "\\ np.sqrt(np.diag(mdf.naive_covariance)) assert_almost_equal(naive_tvalues, ols.tvalues, decimal=10) def test_compare_logit(self): vs = Independence() family = Binomial()", "implementation for the independence and exchangeable correlation structures. For other correlation structures, the", "GlobalOddsRatio, Autoregressive, Nested) import pandas as pd import statsmodels.formula.api as sm def load_data(fname,", "mdf2 = md.fit(start_params=mdf1.params) # From statsmodels.GEE (not an independent test) cf2 = np.r_[0.45397549,", "0) group = np.kron(np.arange(n/4), np.ones(4)) time = np.zeros((n, 1)) beta = np.r_[0, 1,", "= GEE(endog, exog, group, family=family, cov_struct=va) mdf1 = md1.fit() md2 = GEE(endog, exog,", "pd.read_csv(fname) fam = Poisson() ind = Independence() md1 = GEE.from_formula(\"y ~ age +", "differ among implementations and the results will not agree exactly. \"\"\" from __future__", "family = Binomial() ve = Exchangeable() vi = Independence() va = Autoregressive() #", "mdf2.scale, decimal=6) # TODO: why does this test fail? def t_est_missing(self): Y =", "for GEE External comparisons are to R. The statmodels GEE implementation should generally", "decimal=6) assert_almost_equal(mdf1.standard_errors(), mdf2.standard_errors(), decimal=6) def test_logistic(self): \"\"\" R code for comparing results: library(gee)", "decimal=6) # Test with formulas D = np.concatenate((endog[:,None], group[:,None], exog[:,1:]), axis=1) D =", "External comparisons are to R. The statmodels GEE implementation should generally agree with", "group = np.kron(np.arange(n/4), np.ones(4)) time = np.zeros((n, 1)) beta = np.r_[0, 1, -1,", "D = pd.DataFrame({\"Y\": Y, \"X1\": X1, \"X2\": X2, \"X3\": X3}) md = GEE.from_formula(\"Y", "md = GEE(endog, exog, groups, family=ga, cov_struct = ar) mdf = md.fit() assert_almost_equal(ar.dep_params,", "~ X1 + X2 + X3, id=Id, family=binomial, corstr=\"exchangeable\") sme = summary(me) u", "for j,v in enumerate((vi,ve)): md = GEE(endog, exog, group_n, None, family, v) mdf", "n = 300 exog = np.random.normal(size=(n, 4)) exog[:,0] = 1 exog[:,1] = 1*(exog[:,2]", "Z[,1] X1 = Z[,3] X2 = Z[,4] X3 = Z[,5] X4 = Z[,6]", "assert_almost_equal(mdf.standard_errors(), se[j], decimal=6) # Test with formulas D = np.concatenate((endog[:,None], group_n[:,None], exog[:,1:]), axis=1)", "X3}) md = GEE.from_formula(\"Y ~ X1 + X2 + X3\", D, None, groups=groups,", "v, constraint=(L,R)) mdf = md.fit() assert_almost_equal(mdf.params[3], 0, decimal=10) def test_nested_linear(self): family = Gaussian()", "gee(Y ~ X1 + X2 + X3 + X4 + X5, id=Id, family=poisson,", "= 3 ga = Gaussian() for gsize in 1,2,3: ix = np.arange(gsize)[:,None] -", "np.concatenate(exog, axis=0) ar = Autoregressive() md = GEE(endog, exog, groups, family=ga, cov_struct =", "0, 0.2]) +\\ np.random.normal(size=300) group = np.kron(np.arange(100), np.r_[1,1,1]) vi = Independence() ve =", "global odds ratio dependence v = GlobalOddsRatio(\"nominal\") md = GEE(endog, exog, groups, None,", "load_data(\"gee_ordinal_1.csv\", icept=False) v = GlobalOddsRatio(\"ordinal\") md = GEE(endog, exog, groups, None, family, v)", "= gee(Y ~ X1 + X2 + X3, id=Id, family=binomial, corstr=\"AR-M\") sma =", "params_true = [[1.08043787, 1.12709319, 0.90133927], [0.9613677, 1.05826987, 0.90832055], [1.05370439, 0.96084864, 0.93923374]] np.random.seed(342837482) num_group", "exog, groups, family=ga, cov_struct = ar) mdf = md.fit() assert_almost_equal(ar.dep_params, dep_params_true[gsize-1]) assert_almost_equal(mdf.params, params_true[gsize-1])", "md = GEE(endog, exog, groups, None, family, v) md.setup_nominal() mdf1 = md.fit() #", "GEE(endog, exog, group, time, fa, ex) mdf = md.fit() marg = GEEMargins(mdf, ())", "the independence and exchangeable correlation structures. For other correlation structures, the details of", "== ii) T[jj] = lrange(len(jj)) family = Binomial() ve = Exchangeable() vi =", "= GEE(endog, exog, groups, None, family, v) md.setup_ordinal() mdf = md.fit() cf =", "test_post_estimation(self): family = Gaussian() endog,exog,group = load_data(\"gee_linear_1.csv\") ve = Exchangeable() md = GEE(endog,", "family = Binomial() va = Autoregressive() md1 = GEE(endog, exog, group, family=family, cov_struct=va)", "np.random.normal(size=100) X2 = np.random.normal(size=100) X3 = np.random.normal(size=100) groups = np.kron(lrange(20), np.ones(5)) Y[0] =", "Independence() ve = Exchangeable() # From R gee cf = [[-0.0364450410793481,-0.0543209391301178, 0.0156642711741052,0.57628591338724, -0.00465659951186211,-0.477093153099256],", "X2 = np.random.normal(size=100) X3 = np.random.normal(size=100) groups = np.kron(lrange(20), np.ones(5)) Y[0] = np.nan", "= paste(u[,4], collapse=\",\") me = gee(Y ~ X1 + X2 + X3 +", "Independence() family = Binomial() Y = 1*(np.random.normal(size=100) < 0) X1 = np.random.normal(size=100) X2", "summary(me) u = coefficients(sme) cfe = paste(u[,1], collapse=\",\") see = paste(u[,4], collapse=\",\") sprintf(\"cf", "pd import statsmodels.formula.api as sm def load_data(fname, icept=True): \"\"\" Load a data set", "+ X3, id=Id, family=binomial, corstr=\"AR-M\") sma = summary(ma) u = coefficients(sma) cfa =", "family, v) mdf = md.fit() assert_almost_equal(mdf.params, cf[j], decimal=10) assert_almost_equal(mdf.standard_errors(), se[j], decimal=10) # Test", "import assert_almost_equal from statsmodels.genmod.generalized_estimating_equations import (GEE, GEEMargins, Multinomial) from statsmodels.genmod.families import Gaussian, Binomial,", "= Exchangeable() md = GEE(endog, exog, group, None, family, ve) mdf = md.fit()", "test_margins(self): n = 300 exog = np.random.normal(size=(n, 4)) exog[:,0] = 1 exog[:,1] =", "[0.0178982283915449,1.13118798191788, -1.86133518416017,1.08944256230299], [0.0109621937947958,1.13226505028438, -1.88278757333046,1.09954623769449]] se = [[0.127291720283049,0.166725808326067, 0.192430061340865,0.173141068839597], [0.127045031730155,0.165470678232842, 0.192052750030501,0.173174779369249], [0.127240302296444,0.170554083928117, 0.191045527104503,0.169776150974586]] for", "set from the results directory. The data set should be a CSV file", "se = np.r_[0.10878752, 0.10326078, 0.11171241, 0.05488705, 0.05995019, 0.0916574, 0.05951445, 0.08539281] assert_almost_equal(mdf.params, cf, decimal=5)", "an independent test) cf = np.r_[-0.16655319, 1.02183688, -2.00858719, 1.00101969] se = np.r_[0.08632616, 0.02913582,", "+ X5, id=Id, family=poisson, corstr=\"independence\", scale.fix=TRUE) smi = summary(mi) u = coefficients(smi) cfi", "= np.zeros((n, 1)) beta = np.r_[0, 1, -1, 0.5] lpr = np.dot(exog, beta)", "# print(mdf.params) def test_compare_OLS(self): \"\"\" Gaussian GEE with independence correlation should agree exactly", "GLM from statsmodels.genmod.generalized_linear_model import GLM from statsmodels.genmod import families md2 = GLM.from_formula(\"y ~", "sml = sm.poisson(\"Y ~ X1 + X2 + X3\", data=D).fit(disp=False) assert_almost_equal(sml.params.values, md.params, decimal=10)", "data, groups=data[\"subject\"], cov_struct=ind, family=fam) mdf1 = md1.fit() # Coefficients should agree with GLM", "ve = Exchangeable() L = np.r_[[[0, 0, 0, 1]]] R = np.r_[0,] for", "= Nested() md = GEE(endog, exog, group, None, family, ne, dep_data=group_n) mdf2 =", "[] groups = [] for i in range(num_group): x = np.random.normal(size=(gsize,k)) exog.append(x) expval", "= np.random.normal(size=100) X3 = np.random.normal(size=100) groups = np.random.randint(0, 4, size=100) D = pd.DataFrame({\"Y\":", "statsmodels.genmod.generalized_linear_model import GLM from statsmodels.genmod import families md2 = GLM.from_formula(\"y ~ age +", "cf, decimal=6) assert_almost_equal(mdf1.standard_errors(), se, decimal=6) ne = Nested() md = GEE(endog, exog, group,", "mdf2 = md2.fit() assert_almost_equal(mdf1.params, mdf2.params, decimal=6) assert_almost_equal(mdf1.standard_errors(), mdf2.standard_errors(), decimal=6) def test_logistic(self): \"\"\" R", "X3 = np.random.normal(size=100) groups = np.kron(lrange(20), np.ones(5)) D = pd.DataFrame({\"Y\": Y, \"X1\": X1,", "= Z[:,1] exog = Z[:,2:] if icept: exog = np.concatenate((np.ones((exog.shape[0],1)), exog), axis=1) return", "= [[1.08043787, 1.12709319, 0.90133927], [0.9613677, 1.05826987, 0.90832055], [1.05370439, 0.96084864, 0.93923374]] np.random.seed(342837482) num_group =", "Id = Z[,1] X1 = Z[,3] X2 = Z[,4] X3 = Z[,5] X4", "def t_est_missing(self): Y = np.random.normal(size=100) X1 = np.random.normal(size=100) X2 = np.random.normal(size=100) X3 =", "[1.05370439, 0.96084864, 0.93923374]] np.random.seed(342837482) num_group = 100 ar_param = 0.5 k = 3", "None, groups=D.loc[:,\"Id\"], family=family, cov_struct=v) mdf = md.fit() assert_almost_equal(mdf.params, cf[j], decimal=10) assert_almost_equal(mdf.standard_errors(), se[j], decimal=10)", "GEE(endog, exog, groups, family=ga, cov_struct = ar) mdf = md.fit() assert_almost_equal(ar.dep_params, dep_params_true[gsize-1]) assert_almost_equal(mdf.params,", "with formulas D = np.concatenate((endog[:,None], group[:,None], exog[:,1:]), axis=1) D = pd.DataFrame(D) D.columns =", "0.08544553] assert_almost_equal(mdf1.params, cf1, decimal=5) assert_almost_equal(mdf1.standard_errors(), se1, decimal=5) # Test with global odds ratio", "[[-0.0364450410793481,-0.0543209391301178, 0.0156642711741052,0.57628591338724, -0.00465659951186211,-0.477093153099256], [-0.0315615554826533,-0.0562589480840004, 0.0178419412298561,0.571512795340481, -0.00363255566297332,-0.475971696727736]] se = [[0.0611309237214186,0.0390680524493108, 0.0334234174505518,0.0366860768962715, 0.0304758505008105,0.0316348058881079], [0.0610840153582275,0.0376887268649102, 0.0325168379415177,0.0369786751362213,", "vi = Independence() ve = Exchangeable() # From R gee cf = [[-0.0364450410793481,-0.0543209391301178,", "family=family, cov_struct=v) mdf = md.fit() assert_almost_equal(mdf.params, cf[j], decimal=6) assert_almost_equal(mdf.standard_errors(), se[j], decimal=6) # Check", "X1 + X2 + X3\", D, None, groups=groups, family=family, cov_struct=vs).fit() sml = sm.logit(\"Y", "= gee(Y ~ X1 + X2 + X3 + X4 + X5, id=Id,", "group, family=family, cov_struct=va) mdf1 = md1.fit() md2 = GEE(endog, exog, group, time=T, family=family,", "np.concatenate((np.ones((exog.shape[0],1)), exog), axis=1) return endog,exog,group class TestGEE(object): def test_margins(self): n = 300 exog", "D, None, groups=groups, family=family, cov_struct=vs).fit() sml = sm.poisson(\"Y ~ X1 + X2 +", "exog), axis=1) return endog,exog,group class TestGEE(object): def test_margins(self): n = 300 exog =", "statmodels GEE implementation should generally agree with the R GEE implementation for the", "test_poisson_epil(self): cur_dir = os.path.dirname(os.path.abspath(__file__)) fname = os.path.join(cur_dir, \"results\", \"epil.csv\") data = pd.read_csv(fname) fam", "endog = np.concatenate(endog) groups = np.concatenate(groups) exog = np.concatenate(exog, axis=0) ar = Autoregressive()", "X3 = Z[,5] X4 = Z[,6] X5 = Z[,7] mi = gee(Y ~", "= md.fit() marg = GEEMargins(mdf, ()) marg.summary() # This is in the release", "cov_struct=ind, family=fam) mdf1 = md1.fit() # Coefficients should agree with GLM from statsmodels.genmod.generalized_linear_model", "estimates and standard errors derived from the naive covariance estimate. \"\"\" vs =", "Column 0: Group indicator Column 1: endog variable Columns 2-end: exog variables If", "Z = read.csv(\"results/gee_poisson_1.csv\", header=FALSE) Y = Z[,2] Id = Z[,1] X1 = Z[,3]", "+ X3\", D, None, groups=groups, family=family, cov_struct=vs) mdf = md.fit() ols = sm.ols(\"Y", "md.fit() # From statsmodels.GEE (not an independent test) cf = np.r_[-0.1671073 , 1.00467426,", "= np.kron(lrange(20), np.ones(5)) D = pd.DataFrame({\"Y\": Y, \"X1\": X1, \"X2\": X2, \"X3\": X3})", "Z[,4] X3 = Z[,5] X4 = Z[,6] X5 = Z[,7] mi = gee(Y", "decimal=6) # print(mdf.params) def test_compare_OLS(self): \"\"\" Gaussian GEE with independence correlation should agree", "Gaussian, Binomial, Poisson from statsmodels.genmod.dependence_structures import (Exchangeable, Independence, GlobalOddsRatio, Autoregressive, Nested) import pandas", "variables. \"\"\" cur_dir = os.path.dirname(os.path.abspath(__file__)) Z = np.genfromtxt(os.path.join(cur_dir, 'results', fname), delimiter=\",\") group =", "def test_compare_poisson(self): vs = Independence() family = Poisson() Y = np.ceil(-np.log(np.random.uniform(size=100))) X1 =", "np.nan X2[10:12] = np.nan D = pd.DataFrame({\"Y\": Y, \"X1\": X1, \"X2\": X2, \"X3\":", "family=family, cov_struct=v) mdf = md.fit() assert_almost_equal(mdf.params, cf[j], decimal=5) assert_almost_equal(mdf.standard_errors(), se[j], decimal=6) # print(mdf.params)", "Z[:,0] endog = Z[:,1] exog = Z[:,2:] if icept: exog = np.concatenate((np.ones((exog.shape[0],1)), exog),", "Test with formulas D = np.concatenate((endog[:,None], group_n[:,None], exog[:,1:]), axis=1) D = pd.DataFrame(D) D.columns", "# Test with formulas D = np.concatenate((endog[:,None], group[:,None], exog[:,1:]), axis=1) D = pd.DataFrame(D)", "id(va): assert_almost_equal(mdf.params, cf[j], decimal=6) assert_almost_equal(mdf.standard_errors(), se[j], decimal=6) # Test with formulas D =", "np.abs(ix) cmat = ar_param ** ix cmat_r = np.linalg.cholesky(cmat) endog = [] exog", "np.random.normal(size=100) X3 = np.random.normal(size=100) groups = np.random.randint(0, 4, size=100) D = pd.DataFrame({\"Y\": Y,", "estimate. \"\"\" vs = Independence() family = Gaussian() Y = np.random.normal(size=100) X1 =", "X1, \"X2\": X2, \"X3\": X3, \"groups\": groups}) md = GEE.from_formula(\"Y ~ X1 +", "cf2 = np.r_[0.45397549, 0.42278345, -0.91997131, -0.50115943] se2 = np.r_[0.09646057, 0.07405713, 0.1324629 , 0.09025019]", "see = paste(u[,4], collapse=\",\") ma = gee(Y ~ X1 + X2 + X3,", "import os from numpy.testing import assert_almost_equal from statsmodels.genmod.generalized_estimating_equations import (GEE, GEEMargins, Multinomial) from", "correctly. \"\"\" endog,exog,group = load_data(\"gee_logistic_1.csv\") # Time values for the autoregressive model T", "T = np.zeros(len(endog)) idx = set(group) for ii in idx: jj = np.flatnonzero(group", "paste(u[,4], collapse=\",\") sprintf(\"cf = [[%s],[%s]]\", cfi, cfe) sprintf(\"se = [[%s],[%s]]\", sei, see) \"\"\"", "vi = Independence() ve = Exchangeable() L = np.r_[[[0, 0, 0, 1]]] R", "se[j], decimal=6) # print(mdf.params) def test_compare_OLS(self): \"\"\" Gaussian GEE with independence correlation should", "cf = [[-0.0364450410793481,-0.0543209391301178, 0.0156642711741052,0.57628591338724, -0.00465659951186211,-0.477093153099256], [-0.0315615554826533,-0.0562589480840004, 0.0178419412298561,0.571512795340481, -0.00363255566297332,-0.475971696727736]] se = [[0.0611309237214186,0.0390680524493108, 0.0334234174505518,0.0366860768962715, 0.0304758505008105,0.0316348058881079],", "()) marg.summary() # This is in the release announcement for version 0.6. def", "cfe, cfa) sprintf(\"se = [[%s],[%s],[%s]]\", sei, see, sea) \"\"\" endog,exog,group = load_data(\"gee_logistic_1.csv\") #", "From statsmodels.GEE (not an independent test) cf1 = np.r_[0.44944752, 0.45569985, -0.92007064, -0.46766728] se1", "details of the correlation estimation differ among implementations and the results will not", "Independence() ve = Exchangeable() # From R gee cf = [[-0.01850226507491,0.81436304278962, -1.56167635393184,0.794239361055003], [-0.0182920577154767,0.814898414022467,", "time=T, family=family, cov_struct=va) mdf2 = md2.fit() assert_almost_equal(mdf1.params, mdf2.params, decimal=6) assert_almost_equal(mdf1.standard_errors(), mdf2.standard_errors(), decimal=6) def", "j,v in enumerate((vi,ve)): md = GEE.from_formula(\"Y ~ X1 + X2 + X3 +", "test_poisson(self): \"\"\" library(gee) Z = read.csv(\"results/gee_poisson_1.csv\", header=FALSE) Y = Z[,2] Id = Z[,1]", "a data set from the results directory. The data set should be a", "import Gaussian, Binomial, Poisson from statsmodels.genmod.dependence_structures import (Exchangeable, Independence, GlobalOddsRatio, Autoregressive, Nested) import", "= md.fit() ols = sm.ols(\"Y ~ X1 + X2 + X3\", data=D).fit() assert_almost_equal(ols.params.values,", "from statsmodels.genmod.generalized_linear_model import GLM from statsmodels.genmod import families md2 = GLM.from_formula(\"y ~ age", "pandas as pd import statsmodels.formula.api as sm def load_data(fname, icept=True): \"\"\" Load a", "age + trt + base\", data, family=families.Poisson()) mdf2 = md2.fit(scale=\"X2\") assert_almost_equal(mdf1.params, mdf2.params, decimal=6)", "= load_data(\"gee_logistic_1.csv\") # Time values for the autoregressive model T = np.zeros(len(endog)) idx", "directory. The data set should be a CSV file with the following format:", "v) md.setup_nominal() mdf2 = md.fit(start_params=mdf1.params) # From statsmodels.GEE (not an independent test) cf2", "= np.random.randint(0, 4, size=100) D = pd.DataFrame({\"Y\": Y, \"X1\": X1, \"X2\": X2, \"X3\":", "np.concatenate(groups) exog = np.concatenate(exog, axis=0) ar = Autoregressive() md = GEE(endog, exog, groups,", "cmat_r = np.linalg.cholesky(cmat) endog = [] exog = [] groups = [] for", "X5\", D, None, groups=D.loc[:,\"Id\"], family=family, cov_struct=v) mdf = md.fit() assert_almost_equal(mdf.params, cf[j], decimal=5) assert_almost_equal(mdf.standard_errors(),", "Load a data set from the results directory. The data set should be", "= [[%s],[%s],[%s]]\", sei, see, sea) \"\"\" endog,exog,group = load_data(\"gee_logistic_1.csv\") # Time values for", "cfe) sprintf(\"se = [[%s],[%s]]\", sei, see) \"\"\" family = Gaussian() endog,exog,group = load_data(\"gee_linear_1.csv\")", "se = mdf.standard_errors(covariance_type=\"naive\") assert_almost_equal(ols.bse, se, decimal=10) naive_tvalues = mdf.params / \\ np.sqrt(np.diag(mdf.naive_covariance)) assert_almost_equal(naive_tvalues,", "Y[0] = np.nan Y[5:7] = np.nan X2[10:12] = np.nan D = pd.DataFrame({\"Y\": Y,", "~ X1 + X2 + X3, id=Id, family=binomial, corstr=\"AR-M\") sma = summary(ma) u", "with global odds ratio dependence v = GlobalOddsRatio(\"nominal\") md = GEE(endog, exog, groups,", "exog[:,1:]), axis=1) D = pd.DataFrame(D) D.columns = [\"Y\",\"Id\",] + [\"X%d\" % (k+1) for", "will not agree exactly. \"\"\" from __future__ import print_function from statsmodels.compat import lrange", "= sm.ols(\"Y ~ X1 + X2 + X3\", data=D).fit() assert_almost_equal(ols.params.values, mdf.params, decimal=10) se", "as pd import statsmodels.formula.api as sm def load_data(fname, icept=True): \"\"\" Load a data", "idx: jj = np.flatnonzero(group == ii) T[jj] = lrange(len(jj)) family = Binomial() ve", "= np.concatenate((endog[:,None], group_n[:,None], exog[:,1:]), axis=1) D = pd.DataFrame(D) D.columns = [\"Y\",\"Id\",] + [\"X%d\"", "class TestGEE(object): def test_margins(self): n = 300 exog = np.random.normal(size=(n, 4)) exog[:,0] =", "test_linear(self): \"\"\" library(gee) Z = read.csv(\"results/gee_linear_1.csv\", header=FALSE) Y = Z[,2] Id = Z[,1]", "gee(Y ~ X1 + X2 + X3, id=Id, family=gaussian, corstr=\"exchangeable\", tol=1e-8, maxit=100) sme", "GEE.from_formula(\"Y ~ X1 + X2 + X3\", D, None, groups=groups, family=family, cov_struct=vs).fit() sml", "X1 + X2 + X3\", data=D).fit(disp=False) assert_almost_equal(sml.params.values, md.params, decimal=10) def test_compare_poisson(self): vs =", "groups = np.concatenate(groups) exog = np.concatenate(exog, axis=0) ar = Autoregressive() md = GEE(endog,", "= np.zeros(len(endog)) idx = set(group) for ii in idx: jj = np.flatnonzero(group ==", "= np.dot(exog, np.r_[1, 1, 0, 0.2]) +\\ np.random.normal(size=300) group = np.kron(np.arange(100), np.r_[1,1,1]) vi", "np.r_[1.09238131, 0.02148193, -0.39879146, -0.01855666, 0.02983409, 1.18123172, 0.01845318, -1.10233886] se = np.r_[0.10878752, 0.10326078, 0.11171241,", "assert_almost_equal(mdf.params, cf[j], decimal=6) assert_almost_equal(mdf.standard_errors(), se[j], decimal=6) # Check for run-time exceptions in summary", "Independence() family = Gaussian() Y = np.random.normal(size=100) X1 = np.random.normal(size=100) X2 = np.random.normal(size=100)", "= np.random.normal(size=100) X3 = np.random.normal(size=100) groups = np.kron(lrange(20), np.ones(5)) D = pd.DataFrame({\"Y\": Y,", "ix cmat_r = np.linalg.cholesky(cmat) endog = [] exog = [] groups = []", "run-time exceptions in summary # print(mdf.summary()) def test_autoregressive(self): dep_params_true = [0, 0.589208623896, 0.559823804948]", "sm.logit(\"Y ~ X1 + X2 + X3\", data=D).fit(disp=False) assert_almost_equal(sml.params.values, md.params, decimal=10) def test_compare_poisson(self):", "exog variables If `icept` is True, an intercept is prepended to the exog", "correlation structures, the details of the correlation estimation differ among implementations and the", "\"X3\": X3}) md = GEE.from_formula(\"Y ~ X1 + X2 + X3\", D, None,", "sea = paste(u[,4], collapse=\",\") sprintf(\"cf = [[%s],[%s],[%s]]\", cfi, cfe, cfa) sprintf(\"se = [[%s],[%s],[%s]]\",", "ar_param = 0.5 k = 3 ga = Gaussian() for gsize in 1,2,3:", "1 exog[:,1] = 1*(exog[:,2] < 0) group = np.kron(np.arange(n/4), np.ones(4)) time = np.zeros((n,", "mdf = md.fit() if id(v) != id(va): assert_almost_equal(mdf.params, cf[j], decimal=6) assert_almost_equal(mdf.standard_errors(), se[j], decimal=6)", "mdf = md.fit() assert(len(md.endog) == 95) assert(md.exog.shape) == (95,4) def test_default_time(self): \"\"\" Check", "ols.tvalues, decimal=10) def test_compare_logit(self): vs = Independence() family = Binomial() Y = 1*(np.random.normal(size=100)", "Group indicator Column 1: endog variable Columns 2-end: exog variables If `icept` is", "= md.fit(start_params=mdf1.params) # From statsmodels.GEE (not an independent test) cf = np.r_[-0.16655319, 1.02183688,", "GEE implementation should generally agree with the R GEE implementation for the independence", "import print_function from statsmodels.compat import lrange import numpy as np import os from", "from statsmodels.compat import lrange import numpy as np import os from numpy.testing import", "\"\"\" Check that the time defaults work correctly. \"\"\" endog,exog,group = load_data(\"gee_logistic_1.csv\") #", "exog, group, time, fa, ex) mdf = md.fit() marg = GEEMargins(mdf, ()) marg.summary()", "paste(u[,4], collapse=\",\") me = gee(Y ~ X1 + X2 + X3, id=Id, family=binomial,", "= paste(u[,1], collapse=\",\") see = paste(u[,4], collapse=\",\") ma = gee(Y ~ X1 +", "= Z[,2] Id = Z[,1] X1 = Z[,3] X2 = Z[,4] X3 =", "announcement for version 0.6. def test_poisson_epil(self): cur_dir = os.path.dirname(os.path.abspath(__file__)) fname = os.path.join(cur_dir, \"results\",", "Poisson from statsmodels.genmod.dependence_structures import (Exchangeable, Independence, GlobalOddsRatio, Autoregressive, Nested) import pandas as pd", "the R GEE implementation for the independence and exchangeable correlation structures. For other", "among implementations and the results will not agree exactly. \"\"\" from __future__ import", "i in range(endog.shape[0]//10): group_n.extend([0,]*5) group_n.extend([1,]*5) group_n = np.array(group_n)[:,None] dp = Independence() md =", "\"\"\" library(gee) Z = read.csv(\"results/gee_poisson_1.csv\", header=FALSE) Y = Z[,2] Id = Z[,1] X1", "The data set should be a CSV file with the following format: Column", "GEE(endog, exog, group, None, family, v, constraint=(L,R)) mdf = md.fit() assert_almost_equal(mdf.params[3], 0, decimal=10)", "beta) prob = 1 / (1 + np.exp(-lpr)) endog = 1*(np.random.uniform(size=n) < prob)", "time defaults work correctly. \"\"\" endog,exog,group = load_data(\"gee_logistic_1.csv\") # Time values for the", "Y = np.ceil(-np.log(np.random.uniform(size=100))) X1 = np.random.normal(size=100) X2 = np.random.normal(size=100) X3 = np.random.normal(size=100) groups", "groups=groups, family=family, cov_struct=vs).fit() sml = sm.poisson(\"Y ~ X1 + X2 + X3\", data=D).fit(disp=False)", "~ X1 + X2 + X3, id=Id, family=gaussian, corstr=\"exchangeable\", tol=1e-8, maxit=100) sme =", "= np.r_[0,] for j,v in enumerate((vi,ve)): md = GEE(endog, exog, group, None, family,", "= np.ceil(-np.log(np.random.uniform(size=100))) X1 = np.random.normal(size=100) X2 = np.random.normal(size=100) X3 = np.random.normal(size=100) groups =", "ix = np.abs(ix) cmat = ar_param ** ix cmat_r = np.linalg.cholesky(cmat) endog =", "in range(exog.shape[1]-1)] for j,v in enumerate((vi,ve)): md = GEE.from_formula(\"Y ~ X1 + X2", "# print(mdf.summary()) def test_autoregressive(self): dep_params_true = [0, 0.589208623896, 0.559823804948] params_true = [[1.08043787, 1.12709319,", "= Z[:,2:] if icept: exog = np.concatenate((np.ones((exog.shape[0],1)), exog), axis=1) return endog,exog,group class TestGEE(object):", "None, groups=D.loc[:,\"Id\"], family=family, cov_struct=v) mdf = md.fit() assert_almost_equal(mdf.params, cf[j], decimal=5) assert_almost_equal(mdf.standard_errors(), se[j], decimal=6)", "mdf.params), mdf.fittedvalues) assert_almost_equal(endog - np.dot(exog, mdf.params), mdf.resid) def test_linear(self): \"\"\" library(gee) Z =", "np.random.normal(size=100) X3 = np.random.normal(size=100) groups = np.kron(lrange(20), np.ones(5)) D = pd.DataFrame({\"Y\": Y, \"X1\":", "+ X2 + X3\", data=D).fit(disp=False) assert_almost_equal(sml.params.values, md.params, decimal=10) if __name__==\"__main__\": import nose nose.runmodule(argv=[__file__,'-vvs','-x','--pdb',", "u = coefficients(sma) cfa = paste(u[,1], collapse=\",\") sea = paste(u[,4], collapse=\",\") sprintf(\"cf =", "se, decimal=10) naive_tvalues = mdf.params / \\ np.sqrt(np.diag(mdf.naive_covariance)) assert_almost_equal(naive_tvalues, ols.tvalues, decimal=10) def test_compare_logit(self):", "numpy as np import os from numpy.testing import assert_almost_equal from statsmodels.genmod.generalized_estimating_equations import (GEE,", "in idx: jj = np.flatnonzero(group == ii) T[jj] = lrange(len(jj)) family = Binomial()", "~ X1 + X2 + X3\", D, None, groups=D[\"groups\"], missing='drop') mdf = md.fit()", "= Independence() family = Binomial() Y = 1*(np.random.normal(size=100) < 0) X1 = np.random.normal(size=100)", "= load_data(\"gee_nested_linear_1.csv\") group_n = [] for i in range(endog.shape[0]//10): group_n.extend([0,]*5) group_n.extend([1,]*5) group_n =", "groups = np.kron(lrange(20), np.ones(5)) Y[0] = np.nan Y[5:7] = np.nan X2[10:12] = np.nan", "md = GEE(endog, exog, group, None, family, v) mdf = md.fit() assert_almost_equal(mdf.params, cf[j],", "dependence v = GlobalOddsRatio(\"nominal\") md = GEE(endog, exog, groups, None, family, v) md.setup_nominal()", "paste(u[,1], collapse=\",\") sei = paste(u[,4], collapse=\",\") me = gee(Y ~ X1 + X2", "variables If `icept` is True, an intercept is prepended to the exog variables.", "groups, family=ga, cov_struct = ar) mdf = md.fit() assert_almost_equal(ar.dep_params, dep_params_true[gsize-1]) assert_almost_equal(mdf.params, params_true[gsize-1]) def", "an intercept is prepended to the exog variables. \"\"\" cur_dir = os.path.dirname(os.path.abspath(__file__)) Z", "for j,v in enumerate((vi,ve)): md = GEE.from_formula(\"Y ~ X1 + X2 + X3\",", "X3\", D, None, groups=D.loc[:,\"Id\"], family=family, cov_struct=v) mdf = md.fit() assert_almost_equal(mdf.params, cf[j], decimal=10) assert_almost_equal(mdf.standard_errors(),", "family = Binomial() Y = 1*(np.random.normal(size=100) < 0) X1 = np.random.normal(size=100) X2 =", "X4 + X5, id=Id, family=poisson, corstr=\"independence\", scale.fix=TRUE) smi = summary(mi) u = coefficients(smi)", "= GlobalOddsRatio(\"ordinal\") md = GEE(endog, exog, groups, None, family, v) md.setup_ordinal() mdf =", "Poisson() endog,exog,group_n = load_data(\"gee_poisson_1.csv\") vi = Independence() ve = Exchangeable() # From R", "md.fit() assert_almost_equal(mdf.params, cf[j], decimal=6) assert_almost_equal(mdf.standard_errors(), se[j], decimal=6) # Check for run-time exceptions in", "assert_almost_equal(mdf1.standard_errors(), se1, decimal=5) # Test with global odds ratio dependence v = GlobalOddsRatio(\"nominal\")", "cov_struct=vs).fit() sml = sm.poisson(\"Y ~ X1 + X2 + X3\", data=D).fit(disp=False) assert_almost_equal(sml.params.values, md.params,", "family=family, cov_struct=vs).fit() sml = sm.logit(\"Y ~ X1 + X2 + X3\", data=D).fit(disp=False) assert_almost_equal(sml.params.values,", "cmat = ar_param ** ix cmat_r = np.linalg.cholesky(cmat) endog = [] exog =", "= lrange(len(jj)) family = Binomial() va = Autoregressive() md1 = GEE(endog, exog, group,", "np.r_[1, 1, 0, 0.2]) +\\ np.random.normal(size=300) group = np.kron(np.arange(100), np.r_[1,1,1]) vi = Independence()", "= [[0.0167272965285882,1.13038654425893, -1.86896345082962,1.09397608331333], [0.0178982283915449,1.13118798191788, -1.86133518416017,1.08944256230299], [0.0109621937947958,1.13226505028438, -1.88278757333046,1.09954623769449]] se = [[0.127291720283049,0.166725808326067, 0.192430061340865,0.173141068839597], [0.127045031730155,0.165470678232842, 0.192052750030501,0.173174779369249],", "family=binomial, corstr=\"AR-M\") sma = summary(ma) u = coefficients(sma) cfa = paste(u[,1], collapse=\",\") sea", "time, fa, ex) mdf = md.fit() marg = GEEMargins(mdf, ()) marg.summary() # This", "Z[,6] X5 = Z[,7] mi = gee(Y ~ X1 + X2 + X3", "should agree with GLM from statsmodels.genmod.generalized_linear_model import GLM from statsmodels.genmod import families md2", "+ X3\", D, None, groups=groups, family=family, cov_struct=vs).fit() sml = sm.poisson(\"Y ~ X1 +", "def test_nested_linear(self): family = Gaussian() endog,exog,group = load_data(\"gee_nested_linear_1.csv\") group_n = [] for i", "decimal=5) def test_nominal(self): family = Multinomial(3) endog, exog, groups = load_data(\"gee_nominal_1.csv\", icept=False) #", "= 1*(np.random.uniform(size=n) < prob) fa = Binomial() ex = Exchangeable() md = GEE(endog,", "with OLS for parameter estimates and standard errors derived from the naive covariance", "+ trt + base\", data, groups=data[\"subject\"], cov_struct=ind, family=fam) mdf1 = md1.fit() # Coefficients", "X3 + X4 + X5, id=Id, family=poisson, corstr=\"exchangeable\", scale.fix=TRUE) sme = summary(me) u", "md1 = GEE(endog, exog, group, family=family, cov_struct=va) mdf1 = md1.fit() md2 = GEE(endog,", "= np.abs(ix) cmat = ar_param ** ix cmat_r = np.linalg.cholesky(cmat) endog = []", "= mdf.params / \\ np.sqrt(np.diag(mdf.naive_covariance)) assert_almost_equal(naive_tvalues, ols.tvalues, decimal=10) def test_compare_logit(self): vs = Independence()", "= [] for i in range(endog.shape[0]//10): group_n.extend([0,]*5) group_n.extend([1,]*5) group_n = np.array(group_n)[:,None] dp =", "X2 + X3\", D, None, groups=groups, family=family, cov_struct=vs) mdf = md.fit() ols =", "= np.random.normal(size=100) X2 = np.random.normal(size=100) X3 = np.random.normal(size=100) groups = np.random.randint(0, 4, size=100)", "exactly with OLS for parameter estimates and standard errors derived from the naive", "X1 + X2 + X3\", D, None, groups=D.loc[:,\"Id\"], family=family, cov_struct=v) mdf = md.fit()", "0.02983409, 1.18123172, 0.01845318, -1.10233886] se = np.r_[0.10878752, 0.10326078, 0.11171241, 0.05488705, 0.05995019, 0.0916574, 0.05951445,", "independence correlation v = Independence() md = GEE(endog, exog, groups, None, family, v)", "(95,4) def test_default_time(self): \"\"\" Check that the time defaults work correctly. \"\"\" endog,exog,group", "indicator Column 1: endog variable Columns 2-end: exog variables If `icept` is True,", "< 0) group = np.kron(np.arange(n/4), np.ones(4)) time = np.zeros((n, 1)) beta = np.r_[0,", "with the R GEE implementation for the independence and exchangeable correlation structures. For", "enumerate((vi,ve)): md = GEE(endog, exog, group, None, family, v, constraint=(L,R)) mdf = md.fit()", "for i in range(endog.shape[0]//10): group_n.extend([0,]*5) group_n.extend([1,]*5) group_n = np.array(group_n)[:,None] dp = Independence() md", "cf, decimal=5) assert_almost_equal(mdf.bse, se, decimal=5) def test_nominal(self): family = Multinomial(3) endog, exog, groups", "def test_default_time(self): \"\"\" Check that the time defaults work correctly. \"\"\" endog,exog,group =", "[] for i in range(num_group): x = np.random.normal(size=(gsize,k)) exog.append(x) expval = x.sum(1) errors", "mdf = md.fit() assert_almost_equal(mdf.params, cf[j], decimal=5) assert_almost_equal(mdf.standard_errors(), se[j], decimal=6) # Test with formulas", "= GLM.from_formula(\"y ~ age + trt + base\", data, family=families.Poisson()) mdf2 = md2.fit(scale=\"X2\")", "sm def load_data(fname, icept=True): \"\"\" Load a data set from the results directory.", "= md1.fit() md2 = GEE(endog, exog, group, time=T, family=family, cov_struct=va) mdf2 = md2.fit()", "library(gee) Z = read.csv(\"results/gee_linear_1.csv\", header=FALSE) Y = Z[,2] Id = Z[,1] X1 =", "Exchangeable() vi = Independence() va = Autoregressive() # From R gee cf =", "endog,exog,group = load_data(\"gee_linear_1.csv\") vi = Independence() ve = Exchangeable() # From R gee", "~ age + trt + base\", data, groups=data[\"subject\"], cov_struct=ind, family=fam) mdf1 = md1.fit()", "sei = paste(u[,4], collapse=\",\") me = gee(Y ~ X1 + X2 + X3,", "assert_almost_equal(mdf.params, cf[j], decimal=5) assert_almost_equal(mdf.standard_errors(), se[j], decimal=6) # Test with formulas D = np.concatenate((endog[:,None],", "mdf = md.fit() ols = sm.ols(\"Y ~ X1 + X2 + X3\", data=D).fit()", "= np.dot(exog, beta) prob = 1 / (1 + np.exp(-lpr)) endog = 1*(np.random.uniform(size=n)", "assert_almost_equal(ar.dep_params, dep_params_true[gsize-1]) assert_almost_equal(mdf.params, params_true[gsize-1]) def test_post_estimation(self): family = Gaussian() endog,exog,group = load_data(\"gee_linear_1.csv\") ve", "= np.r_[0.10878752, 0.10326078, 0.11171241, 0.05488705, 0.05995019, 0.0916574, 0.05951445, 0.08539281] assert_almost_equal(mdf.params, cf, decimal=5) assert_almost_equal(mdf.bse,", "= Independence() md1 = GEE.from_formula(\"y ~ age + trt + base\", data, groups=data[\"subject\"],", "[0.0610840153582275,0.0376887268649102, 0.0325168379415177,0.0369786751362213, 0.0296141014225009,0.0306115470200955]] for j,v in enumerate((vi,ve)): md = GEE(endog, exog, group_n, None,", "= ar_param ** ix cmat_r = np.linalg.cholesky(cmat) endog = [] exog = []", "comparisons are to R. The statmodels GEE implementation should generally agree with the", "test) cf = np.r_[-0.16655319, 1.02183688, -2.00858719, 1.00101969] se = np.r_[0.08632616, 0.02913582, 0.03114428, 0.02893991]", "md.fit() # From statsmodels.GEE (not an independent test) cf1 = np.r_[0.44944752, 0.45569985, -0.92007064,", "GEE.from_formula(\"Y ~ X1 + X2 + X3\", D, None, groups=groups, family=family, cov_struct=vs) mdf", "exog = np.concatenate(exog, axis=0) ar = Autoregressive() md = GEE(endog, exog, groups, family=ga,", "mi = gee(Y ~ X1 + X2 + X3, id=Id, family=gaussian, corstr=\"independence\", tol=1e-8,", "R GEE implementation for the independence and exchangeable correlation structures. For other correlation", "test_compare_OLS(self): \"\"\" Gaussian GEE with independence correlation should agree exactly with OLS for", "= np.arange(gsize)[:,None] - np.arange(gsize)[None,:] ix = np.abs(ix) cmat = ar_param ** ix cmat_r", "0.04067038, 0.03777989] assert_almost_equal(mdf1.params, cf, decimal=6) assert_almost_equal(mdf1.standard_errors(), se, decimal=6) ne = Nested() md =", "assert_almost_equal(mdf2.params, cf2, decimal=5) assert_almost_equal(mdf2.standard_errors(), se2, decimal=5) def test_poisson(self): \"\"\" library(gee) Z = read.csv(\"results/gee_poisson_1.csv\",", "family, v) md.setup_nominal() mdf1 = md.fit() # From statsmodels.GEE (not an independent test)", "np.ones(5)) Y[0] = np.nan Y[5:7] = np.nan X2[10:12] = np.nan D = pd.DataFrame({\"Y\":", "pd.DataFrame({\"Y\": Y, \"X1\": X1, \"X2\": X2, \"X3\": X3, \"groups\": groups}) md = GEE.from_formula(\"Y", "/ (1 + np.exp(-lpr)) endog = 1*(np.random.uniform(size=n) < prob) fa = Binomial() ex", "corstr=\"independence\") smi = summary(mi) u = coefficients(smi) cfi = paste(u[,1], collapse=\",\") sei =", "u = coefficients(sme) cfe = paste(u[,1], collapse=\",\") see = paste(u[,4], collapse=\",\") ma =", "cur_dir = os.path.dirname(os.path.abspath(__file__)) Z = np.genfromtxt(os.path.join(cur_dir, 'results', fname), delimiter=\",\") group = Z[:,0] endog", "for k in range(exog.shape[1]-1)] for j,v in enumerate((vi,ve)): md = GEE.from_formula(\"Y ~ X1", "+ X3, id=Id, family=gaussian, corstr=\"independence\", tol=1e-8, maxit=100) smi = summary(mi) u = coefficients(smi)", "= np.kron(np.arange(100), np.r_[1,1,1]) vi = Independence() ve = Exchangeable() L = np.r_[[[0, 0,", "0.42278345, -0.91997131, -0.50115943] se2 = np.r_[0.09646057, 0.07405713, 0.1324629 , 0.09025019] assert_almost_equal(mdf2.params, cf2, decimal=5)", "trt + base\", data, groups=data[\"subject\"], cov_struct=ind, family=fam) mdf1 = md1.fit() # Coefficients should", "= GEE(endog, exog, group_n, None, family, v) mdf = md.fit() assert_almost_equal(mdf.params, cf[j], decimal=5)", "def test_compare_OLS(self): \"\"\" Gaussian GEE with independence correlation should agree exactly with OLS", "is True, an intercept is prepended to the exog variables. \"\"\" cur_dir =", "from numpy.testing import assert_almost_equal from statsmodels.genmod.generalized_estimating_equations import (GEE, GEEMargins, Multinomial) from statsmodels.genmod.families import", "From R gee cf = [[-0.01850226507491,0.81436304278962, -1.56167635393184,0.794239361055003], [-0.0182920577154767,0.814898414022467, -1.56194040106201,0.793499517527478]] se = [[0.0440733554189401,0.0479993639119261, 0.0496045952071308,0.0479467597161284],", "naive_tvalues = mdf.params / \\ np.sqrt(np.diag(mdf.naive_covariance)) assert_almost_equal(naive_tvalues, ols.tvalues, decimal=10) def test_compare_logit(self): vs =", "return endog,exog,group class TestGEE(object): def test_margins(self): n = 300 exog = np.random.normal(size=(n, 4))", "= np.kron(lrange(20), np.ones(5)) Y[0] = np.nan Y[5:7] = np.nan X2[10:12] = np.nan D", "structures, the details of the correlation estimation differ among implementations and the results", "= GEE(endog, exog, groups, family=ga, cov_struct = ar) mdf = md.fit() assert_almost_equal(ar.dep_params, dep_params_true[gsize-1])", "base\", data, groups=data[\"subject\"], cov_struct=ind, family=fam) mdf1 = md1.fit() # Coefficients should agree with", "va = Autoregressive() # From R gee cf = [[0.0167272965285882,1.13038654425893, -1.86896345082962,1.09397608331333], [0.0178982283915449,1.13118798191788, -1.86133518416017,1.08944256230299],", "= coefficients(sme) cfe = paste(u[,1], collapse=\",\") see = paste(u[,4], collapse=\",\") sprintf(\"cf = [[%s],[%s]]\",", "groups, None, family, v) md.setup_ordinal() mdf = md.fit() cf = np.r_[1.09238131, 0.02148193, -0.39879146,", "ratio dependence v = GlobalOddsRatio(\"nominal\") md = GEE(endog, exog, groups, None, family, v)", "pd.DataFrame(D) D.columns = [\"Y\",\"Id\",] + [\"X%d\" % (k+1) for k in range(exog.shape[1]-1)] for", "constraint=(L,R)) mdf = md.fit() assert_almost_equal(mdf.params[3], 0, decimal=10) def test_nested_linear(self): family = Gaussian() endog,exog,group", "\"\"\" family = Poisson() endog,exog,group_n = load_data(\"gee_poisson_1.csv\") vi = Independence() ve = Exchangeable()", "np.ones(4)) time = np.zeros((n, 1)) beta = np.r_[0, 1, -1, 0.5] lpr =", "Z[:,2:] if icept: exog = np.concatenate((np.ones((exog.shape[0],1)), exog), axis=1) return endog,exog,group class TestGEE(object): def", "the details of the correlation estimation differ among implementations and the results will", "0.049519758758187,0.0479760443027526]] for j,v in enumerate((vi, ve)): md = GEE(endog, exog, group, None, family,", "0, 0, 1]]] R = np.r_[0,] for j,v in enumerate((vi,ve)): md = GEE(endog,", "= np.kron(np.arange(n/4), np.ones(4)) time = np.zeros((n, 1)) beta = np.r_[0, 1, -1, 0.5]", "autoregressive model T = np.zeros(len(endog)) idx = set(group) for ii in idx: jj", "fa = Binomial() ex = Exchangeable() md = GEE(endog, exog, group, time, fa,", "statsmodels.GEE (not an independent test) cf1 = np.r_[0.44944752, 0.45569985, -0.92007064, -0.46766728] se1 =", "= read.csv(\"results/gee_poisson_1.csv\", header=FALSE) Y = Z[,2] Id = Z[,1] X1 = Z[,3] X2", "\"X2\": X2, \"X3\": X3, \"groups\": groups}) md = GEE.from_formula(\"Y ~ X1 + X2", "md.fit() if id(v) != id(va): assert_almost_equal(mdf.params, cf[j], decimal=6) assert_almost_equal(mdf.standard_errors(), se[j], decimal=6) # Test", "md.fit() cf = np.r_[1.09238131, 0.02148193, -0.39879146, -0.01855666, 0.02983409, 1.18123172, 0.01845318, -1.10233886] se =", "lpr = np.dot(exog, beta) prob = 1 / (1 + np.exp(-lpr)) endog =", "TODO: why does this test fail? def t_est_missing(self): Y = np.random.normal(size=100) X1 =", "np.exp(-lpr)) endog = 1*(np.random.uniform(size=n) < prob) fa = Binomial() ex = Exchangeable() md", "= [[%s],[%s]]\", sei, see) \"\"\" family = Gaussian() endog,exog,group = load_data(\"gee_linear_1.csv\") vi =", "md = GEE(endog, exog, groups, None, family, v) md.setup_ordinal() mdf = md.fit() cf", "[0.127240302296444,0.170554083928117, 0.191045527104503,0.169776150974586]] for j,v in enumerate((vi,ve,va)): md = GEE(endog, exog, group, T, family,", "decimal=6) def test_ordinal(self): family = Binomial() endog, exog, groups = load_data(\"gee_ordinal_1.csv\", icept=False) v", "family, v) md.setup_ordinal() mdf = md.fit() cf = np.r_[1.09238131, 0.02148193, -0.39879146, -0.01855666, 0.02983409,", "md.params, decimal=10) def test_compare_poisson(self): vs = Independence() family = Poisson() Y = np.ceil(-np.log(np.random.uniform(size=100)))", "0.0304758505008105,0.0316348058881079], [0.0610840153582275,0.0376887268649102, 0.0325168379415177,0.0369786751362213, 0.0296141014225009,0.0306115470200955]] for j,v in enumerate((vi,ve)): md = GEE(endog, exog, group_n,", "for j,v in enumerate((vi,ve,va)): md = GEE(endog, exog, group, T, family, v) mdf", "delimiter=\",\") group = Z[:,0] endog = Z[:,1] exog = Z[:,2:] if icept: exog", "se[j], decimal=6) # Test with formulas D = np.concatenate((endog[:,None], group_n[:,None], exog[:,1:]), axis=1) D", "!= id(va): assert_almost_equal(mdf.params, cf[j], decimal=6) assert_almost_equal(mdf.standard_errors(), se[j], decimal=6) # Test with formulas D", "Independence, GlobalOddsRatio, Autoregressive, Nested) import pandas as pd import statsmodels.formula.api as sm def", "md.fit(start_params=mdf1.params) # From statsmodels.GEE (not an independent test) cf2 = np.r_[0.45397549, 0.42278345, -0.91997131,", "For other correlation structures, the details of the correlation estimation differ among implementations", "Id = Z[,1] X1 = Z[,3] X2 = Z[,4] X3 = Z[,5] mi", "collapse=\",\") ma = gee(Y ~ X1 + X2 + X3, id=Id, family=binomial, corstr=\"AR-M\")", "u = coefficients(sme) cfe = paste(u[,1], collapse=\",\") see = paste(u[,4], collapse=\",\") sprintf(\"cf =", "time = np.zeros((n, 1)) beta = np.r_[0, 1, -1, 0.5] lpr = np.dot(exog,", "group = Z[:,0] endog = Z[:,1] exog = Z[:,2:] if icept: exog =", "= gee(Y ~ X1 + X2 + X3, id=Id, family=binomial, corstr=\"independence\") smi =", "exog, group, None, family, ne, dep_data=group_n) mdf2 = md.fit(start_params=mdf1.params) # From statsmodels.GEE (not", "load_data(\"gee_logistic_1.csv\") # Time values for the autoregressive model T = np.zeros(len(endog)) idx =", "endog,exog,group = load_data(\"gee_logistic_1.csv\") # Time values for the autoregressive model T = np.zeros(len(endog))", "test_nominal(self): family = Multinomial(3) endog, exog, groups = load_data(\"gee_nominal_1.csv\", icept=False) # Test with", "an independent test) cf1 = np.r_[0.44944752, 0.45569985, -0.92007064, -0.46766728] se1 = np.r_[0.09801821, 0.07718842,", "[\"X%d\" % (k+1) for k in range(exog.shape[1]-1)] for j,v in enumerate((vi,ve)): md =", "0.559823804948] params_true = [[1.08043787, 1.12709319, 0.90133927], [0.9613677, 1.05826987, 0.90832055], [1.05370439, 0.96084864, 0.93923374]] np.random.seed(342837482)", "release announcement for version 0.6. def test_poisson_epil(self): cur_dir = os.path.dirname(os.path.abspath(__file__)) fname = os.path.join(cur_dir,", "mdf.fittedvalues) assert_almost_equal(endog - np.dot(exog, mdf.params), mdf.resid) def test_linear(self): \"\"\" library(gee) Z = read.csv(\"results/gee_linear_1.csv\",", "family=family, cov_struct=va) mdf1 = md1.fit() md2 = GEE(endog, exog, group, time=T, family=family, cov_struct=va)", "id=Id, family=poisson, corstr=\"independence\", scale.fix=TRUE) smi = summary(mi) u = coefficients(smi) cfi = paste(u[,1],", "icept=False) # Test with independence correlation v = Independence() md = GEE(endog, exog,", "GEEMargins(mdf, ()) marg.summary() # This is in the release announcement for version 0.6.", "mdf1 = md.fit() # From statsmodels.GEE (not an independent test) cf = np.r_[-0.1671073", "= [\"Y\",\"Id\",] + [\"X%d\" % (k+1) for k in range(exog.shape[1]-1)] for j,v in", "exog, group, family=family, cov_struct=va) mdf1 = md1.fit() md2 = GEE(endog, exog, group, time=T,", "gee(Y ~ X1 + X2 + X3, id=Id, family=gaussian, corstr=\"independence\", tol=1e-8, maxit=100) smi", "np.genfromtxt(os.path.join(cur_dir, 'results', fname), delimiter=\",\") group = Z[:,0] endog = Z[:,1] exog = Z[:,2:]", "GEE(endog, exog, group, time=T, family=family, cov_struct=va) mdf2 = md2.fit() assert_almost_equal(mdf1.params, mdf2.params, decimal=6) assert_almost_equal(mdf1.standard_errors(),", "collapse=\",\") sprintf(\"cf = [[%s],[%s],[%s]]\", cfi, cfe, cfa) sprintf(\"se = [[%s],[%s],[%s]]\", sei, see, sea)", "decimal=10) def test_nested_linear(self): family = Gaussian() endog,exog,group = load_data(\"gee_nested_linear_1.csv\") group_n = [] for", "= Z[,5] mi = gee(Y ~ X1 + X2 + X3, id=Id, family=binomial,", "X3, id=Id, family=gaussian, corstr=\"independence\", tol=1e-8, maxit=100) smi = summary(mi) u = coefficients(smi) cfi", "== ii) T[jj] = lrange(len(jj)) family = Binomial() va = Autoregressive() md1 =", "D, None, groups=D.loc[:,\"Id\"], family=family, cov_struct=v) mdf = md.fit() assert_almost_equal(mdf.params, cf[j], decimal=10) assert_almost_equal(mdf.standard_errors(), se[j],", "1.18123172, 0.01845318, -1.10233886] se = np.r_[0.10878752, 0.10326078, 0.11171241, 0.05488705, 0.05995019, 0.0916574, 0.05951445, 0.08539281]", "decimal=6) assert_almost_equal(mdf1.standard_errors(), se, decimal=6) ne = Nested() md = GEE(endog, exog, group, None,", "family=gaussian, corstr=\"independence\", tol=1e-8, maxit=100) smi = summary(mi) u = coefficients(smi) cfi = paste(u[,1],", "X1 = Z[,3] X2 = Z[,4] X3 = Z[,5] mi = gee(Y ~", "md.fit() assert(len(md.endog) == 95) assert(md.exog.shape) == (95,4) def test_default_time(self): \"\"\" Check that the", "independence correlation should agree exactly with OLS for parameter estimates and standard errors", "= GEE(endog, exog, group, time, fa, ex) mdf = md.fit() marg = GEEMargins(mdf,", "X2 + X3\", D, None, groups=groups, family=family, cov_struct=vs).fit() sml = sm.logit(\"Y ~ X1", "group, None, family, v) mdf = md.fit() assert_almost_equal(mdf.params, cf[j], decimal=10) assert_almost_equal(mdf.standard_errors(), se[j], decimal=10)", "D, None, groups=D.loc[:,\"Id\"], family=family, cov_struct=v) mdf = md.fit() assert_almost_equal(mdf.params, cf[j], decimal=5) assert_almost_equal(mdf.standard_errors(), se[j],", "\"results\", \"epil.csv\") data = pd.read_csv(fname) fam = Poisson() ind = Independence() md1 =", "x = np.random.normal(size=(gsize,k)) exog.append(x) expval = x.sum(1) errors = np.dot(cmat_r, np.random.normal(size=gsize)) endog.append(expval +", "GEE External comparisons are to R. The statmodels GEE implementation should generally agree", "0.589208623896, 0.559823804948] params_true = [[1.08043787, 1.12709319, 0.90133927], [0.9613677, 1.05826987, 0.90832055], [1.05370439, 0.96084864, 0.93923374]]", "= load_data(\"gee_poisson_1.csv\") vi = Independence() ve = Exchangeable() # From R gee cf", "exog, group, None, family, ve) mdf = md.fit() assert_almost_equal(np.dot(exog, mdf.params), mdf.fittedvalues) assert_almost_equal(endog -", "\"X1\": X1, \"X2\": X2, \"X3\": X3, \"groups\": groups}) md = GEE.from_formula(\"Y ~ X1", "[[0.0440733554189401,0.0479993639119261, 0.0496045952071308,0.0479467597161284], [0.0440369906460754,0.0480069787567662, 0.049519758758187,0.0479760443027526]] for j,v in enumerate((vi, ve)): md = GEE(endog, exog,", "\"\"\" R code for comparing results: library(gee) Z = read.csv(\"results/gee_logistic_1.csv\", header=FALSE) Y =", "group, None, family, dp) mdf1 = md.fit() # From statsmodels.GEE (not an independent", "statsmodels.GEE (not an independent test) cf2 = np.r_[0.45397549, 0.42278345, -0.91997131, -0.50115943] se2 =", "np.random.normal(size=100) groups = np.kron(lrange(20), np.ones(5)) D = pd.DataFrame({\"Y\": Y, \"X1\": X1, \"X2\": X2,", "GEE.from_formula(\"Y ~ X1 + X2 + X3 + X4 + X5\", D, None,", "-0.00465659951186211,-0.477093153099256], [-0.0315615554826533,-0.0562589480840004, 0.0178419412298561,0.571512795340481, -0.00363255566297332,-0.475971696727736]] se = [[0.0611309237214186,0.0390680524493108, 0.0334234174505518,0.0366860768962715, 0.0304758505008105,0.0316348058881079], [0.0610840153582275,0.0376887268649102, 0.0325168379415177,0.0369786751362213, 0.0296141014225009,0.0306115470200955]] for", "from statsmodels.genmod.generalized_estimating_equations import (GEE, GEEMargins, Multinomial) from statsmodels.genmod.families import Gaussian, Binomial, Poisson from", "test_autoregressive(self): dep_params_true = [0, 0.589208623896, 0.559823804948] params_true = [[1.08043787, 1.12709319, 0.90133927], [0.9613677, 1.05826987,", "j,v in enumerate((vi,ve)): md = GEE.from_formula(\"Y ~ X1 + X2 + X3\", D,", "family, ve) mdf = md.fit() assert_almost_equal(np.dot(exog, mdf.params), mdf.fittedvalues) assert_almost_equal(endog - np.dot(exog, mdf.params), mdf.resid)", "Binomial() Y = 1*(np.random.normal(size=100) < 0) X1 = np.random.normal(size=100) X2 = np.random.normal(size=100) X3", "following format: Column 0: Group indicator Column 1: endog variable Columns 2-end: exog", "decimal=6) ne = Nested() md = GEE(endog, exog, group, None, family, ne, dep_data=group_n)", "= load_data(\"gee_nominal_1.csv\", icept=False) # Test with independence correlation v = Independence() md =", "sml = sm.logit(\"Y ~ X1 + X2 + X3\", data=D).fit(disp=False) assert_almost_equal(sml.params.values, md.params, decimal=10)", "for run-time exceptions in summary # print(mdf.summary()) def test_autoregressive(self): dep_params_true = [0, 0.589208623896,", "~ X1 + X2 + X3\", D, None, groups=D.loc[:,\"Id\"], family=family, cov_struct=v) mdf =", "gee(Y ~ X1 + X2 + X3, id=Id, family=binomial, corstr=\"exchangeable\") sme = summary(me)", "Z[:,1] exog = Z[:,2:] if icept: exog = np.concatenate((np.ones((exog.shape[0],1)), exog), axis=1) return endog,exog,group", "coefficients(smi) cfi = paste(u[,1], collapse=\",\") sei = paste(u[,4], collapse=\",\") me = gee(Y ~", "0.93923374]] np.random.seed(342837482) num_group = 100 ar_param = 0.5 k = 3 ga =", "[0.0109621937947958,1.13226505028438, -1.88278757333046,1.09954623769449]] se = [[0.127291720283049,0.166725808326067, 0.192430061340865,0.173141068839597], [0.127045031730155,0.165470678232842, 0.192052750030501,0.173174779369249], [0.127240302296444,0.170554083928117, 0.191045527104503,0.169776150974586]] for j,v in", "-0.91997131, -0.50115943] se2 = np.r_[0.09646057, 0.07405713, 0.1324629 , 0.09025019] assert_almost_equal(mdf2.params, cf2, decimal=5) assert_almost_equal(mdf2.standard_errors(),", "statsmodels.genmod.families import Gaussian, Binomial, Poisson from statsmodels.genmod.dependence_structures import (Exchangeable, Independence, GlobalOddsRatio, Autoregressive, Nested)", "exog, group, None, family, v, constraint=(L,R)) mdf = md.fit() assert_almost_equal(mdf.params[3], 0, decimal=10) def", "= GEE(endog, exog, group, None, family, v, constraint=(L,R)) mdf = md.fit() assert_almost_equal(mdf.params[3], 0,", "v = GlobalOddsRatio(\"nominal\") md = GEE(endog, exog, groups, None, family, v) md.setup_nominal() mdf2", "the autoregressive model T = np.zeros(len(endog)) idx = set(group) for ii in idx:", "X3 = Z[,5] mi = gee(Y ~ X1 + X2 + X3, id=Id,", "md.fit() assert_almost_equal(mdf.params[3], 0, decimal=10) def test_nested_linear(self): family = Gaussian() endog,exog,group = load_data(\"gee_nested_linear_1.csv\") group_n", "def test_compare_logit(self): vs = Independence() family = Binomial() Y = 1*(np.random.normal(size=100) < 0)", "(not an independent test) cf = np.r_[-0.1671073 , 1.00467426, -2.01723004, 0.97297106] se =", "marg.summary() # This is in the release announcement for version 0.6. def test_poisson_epil(self):", "= md.fit() assert_almost_equal(mdf.params[3], 0, decimal=10) def test_nested_linear(self): family = Gaussian() endog,exog,group = load_data(\"gee_nested_linear_1.csv\")", "from the results directory. The data set should be a CSV file with", "results: library(gee) Z = read.csv(\"results/gee_logistic_1.csv\", header=FALSE) Y = Z[,2] Id = Z[,1] X1", "\"\"\" Gaussian GEE with independence correlation should agree exactly with OLS for parameter", "\"\"\" vs = Independence() family = Gaussian() Y = np.random.normal(size=100) X1 = np.random.normal(size=100)", "None, family, v) md.setup_nominal() mdf1 = md.fit() # From statsmodels.GEE (not an independent", "+ trt + base\", data, family=families.Poisson()) mdf2 = md2.fit(scale=\"X2\") assert_almost_equal(mdf1.params, mdf2.params, decimal=6) assert_almost_equal(mdf1.scale,", "groups = np.random.randint(0, 4, size=100) D = pd.DataFrame({\"Y\": Y, \"X1\": X1, \"X2\": X2,", "-2.00858719, 1.00101969] se = np.r_[0.08632616, 0.02913582, 0.03114428, 0.02893991] assert_almost_equal(mdf2.params, cf, decimal=6) assert_almost_equal(mdf2.standard_errors(), se,", "corstr=\"exchangeable\") sme = summary(me) u = coefficients(sme) cfe = paste(u[,1], collapse=\",\") see =", "decimal=6) assert_almost_equal(mdf2.standard_errors(), se, decimal=6) def test_ordinal(self): family = Binomial() endog, exog, groups =", "from statsmodels.genmod import families md2 = GLM.from_formula(\"y ~ age + trt + base\",", "summary(ma) u = coefficients(sma) cfa = paste(u[,1], collapse=\",\") sea = paste(u[,4], collapse=\",\") sprintf(\"cf", "ne = Nested() md = GEE(endog, exog, group, None, family, ne, dep_data=group_n) mdf2", "for j,v in enumerate((vi,ve)): md = GEE(endog, exog, group, None, family, v, constraint=(L,R))", "= paste(u[,1], collapse=\",\") see = paste(u[,4], collapse=\",\") sprintf(\"cf = [[%s],[%s]]\", cfi, cfe) sprintf(\"se", "= np.r_[[[0, 0, 0, 1]]] R = np.r_[0,] for j,v in enumerate((vi,ve)): md", "= np.nan X2[10:12] = np.nan D = pd.DataFrame({\"Y\": Y, \"X1\": X1, \"X2\": X2,", "= coefficients(sme) cfe = paste(u[,1], collapse=\",\") see = paste(u[,4], collapse=\",\") ma = gee(Y", "# Test with formulas D = np.concatenate((endog[:,None], group_n[:,None], exog[:,1:]), axis=1) D = pd.DataFrame(D)", "[[%s],[%s]]\", sei, see) \"\"\" family = Poisson() endog,exog,group_n = load_data(\"gee_poisson_1.csv\") vi = Independence()", "None, family, ne, dep_data=group_n) mdf2 = md.fit(start_params=mdf1.params) # From statsmodels.GEE (not an independent", "md = GEE.from_formula(\"Y ~ X1 + X2 + X3\", D, None, groups=D.loc[:,\"Id\"], family=family,", "collapse=\",\") me = gee(Y ~ X1 + X2 + X3 + X4 +", "= np.random.normal(size=(n, 4)) exog[:,0] = 1 exog[:,1] = 1*(exog[:,2] < 0) group =", "= np.random.normal(size=100) groups = np.kron(lrange(20), np.ones(5)) D = pd.DataFrame({\"Y\": Y, \"X1\": X1, \"X2\":", "exog, group, T, family, v) mdf = md.fit() if id(v) != id(va): assert_almost_equal(mdf.params,", "v) md.setup_ordinal() mdf = md.fit() cf = np.r_[1.09238131, 0.02148193, -0.39879146, -0.01855666, 0.02983409, 1.18123172,", "# Coefficients should agree with GLM from statsmodels.genmod.generalized_linear_model import GLM from statsmodels.genmod import", "= GEEMargins(mdf, ()) marg.summary() # This is in the release announcement for version", "np.r_[-0.16655319, 1.02183688, -2.00858719, 1.00101969] se = np.r_[0.08632616, 0.02913582, 0.03114428, 0.02893991] assert_almost_equal(mdf2.params, cf, decimal=6)", "os.path.dirname(os.path.abspath(__file__)) Z = np.genfromtxt(os.path.join(cur_dir, 'results', fname), delimiter=\",\") group = Z[:,0] endog = Z[:,1]", "X2 + X3 + X4 + X5\", D, None, groups=D.loc[:,\"Id\"], family=family, cov_struct=v) mdf", "X2[10:12] = np.nan D = pd.DataFrame({\"Y\": Y, \"X1\": X1, \"X2\": X2, \"X3\": X3,", "for the autoregressive model T = np.zeros(len(endog)) idx = set(group) for ii in", "T, family, v) mdf = md.fit() if id(v) != id(va): assert_almost_equal(mdf.params, cf[j], decimal=6)", "np.arange(gsize)[:,None] - np.arange(gsize)[None,:] ix = np.abs(ix) cmat = ar_param ** ix cmat_r =", "cfe = paste(u[,1], collapse=\",\") see = paste(u[,4], collapse=\",\") sprintf(\"cf = [[%s],[%s]]\", cfi, cfe)", "implementations and the results will not agree exactly. \"\"\" from __future__ import print_function", "# From statsmodels.GEE (not an independent test) cf = np.r_[-0.1671073 , 1.00467426, -2.01723004,", "T[jj] = lrange(len(jj)) family = Binomial() va = Autoregressive() md1 = GEE(endog, exog,", "(not an independent test) cf2 = np.r_[0.45397549, 0.42278345, -0.91997131, -0.50115943] se2 = np.r_[0.09646057,", "= md.fit() assert_almost_equal(mdf.params, cf[j], decimal=5) assert_almost_equal(mdf.standard_errors(), se[j], decimal=6) # Test with formulas D", "0.10326078, 0.11171241, 0.05488705, 0.05995019, 0.0916574, 0.05951445, 0.08539281] assert_almost_equal(mdf.params, cf, decimal=5) assert_almost_equal(mdf.bse, se, decimal=5)", "+ X2 + X3, id=Id, family=binomial, corstr=\"exchangeable\") sme = summary(me) u = coefficients(sme)", "X5, id=Id, family=poisson, corstr=\"independence\", scale.fix=TRUE) smi = summary(mi) u = coefficients(smi) cfi =", "md.fit() ols = sm.ols(\"Y ~ X1 + X2 + X3\", data=D).fit() assert_almost_equal(ols.params.values, mdf.params,", "exog, group, time=T, family=family, cov_struct=va) mdf2 = md2.fit() assert_almost_equal(mdf1.params, mdf2.params, decimal=6) assert_almost_equal(mdf1.standard_errors(), mdf2.standard_errors(),", "family=poisson, corstr=\"exchangeable\", scale.fix=TRUE) sme = summary(me) u = coefficients(sme) cfe = paste(u[,1], collapse=\",\")", "se, decimal=5) def test_nominal(self): family = Multinomial(3) endog, exog, groups = load_data(\"gee_nominal_1.csv\", icept=False)", "[[1.08043787, 1.12709319, 0.90133927], [0.9613677, 1.05826987, 0.90832055], [1.05370439, 0.96084864, 0.93923374]] np.random.seed(342837482) num_group = 100", ", 1.00467426, -2.01723004, 0.97297106] se = np.r_[0.08629606, 0.04058653, 0.04067038, 0.03777989] assert_almost_equal(mdf1.params, cf, decimal=6)", "group, time=T, family=family, cov_struct=va) mdf2 = md2.fit() assert_almost_equal(mdf1.params, mdf2.params, decimal=6) assert_almost_equal(mdf1.standard_errors(), mdf2.standard_errors(), decimal=6)", "family=fam) mdf1 = md1.fit() # Coefficients should agree with GLM from statsmodels.genmod.generalized_linear_model import", "= Autoregressive() # From R gee cf = [[0.0167272965285882,1.13038654425893, -1.86896345082962,1.09397608331333], [0.0178982283915449,1.13118798191788, -1.86133518416017,1.08944256230299], [0.0109621937947958,1.13226505028438,", "k in range(exog.shape[1]-1)] for j,v in enumerate((vi,ve)): md = GEE.from_formula(\"Y ~ X1 +", "= 1 exog[:,1] = 1*(exog[:,2] < 0) group = np.kron(np.arange(n/4), np.ones(4)) time =", "0.192052750030501,0.173174779369249], [0.127240302296444,0.170554083928117, 0.191045527104503,0.169776150974586]] for j,v in enumerate((vi,ve,va)): md = GEE(endog, exog, group, T,", "cf = [[0.0167272965285882,1.13038654425893, -1.86896345082962,1.09397608331333], [0.0178982283915449,1.13118798191788, -1.86133518416017,1.08944256230299], [0.0109621937947958,1.13226505028438, -1.88278757333046,1.09954623769449]] se = [[0.127291720283049,0.166725808326067, 0.192430061340865,0.173141068839597], [0.127045031730155,0.165470678232842,", "\"X1\": X1, \"X2\": X2, \"X3\": X3}) md = GEE.from_formula(\"Y ~ X1 + X2", "[[%s],[%s],[%s]]\", cfi, cfe, cfa) sprintf(\"se = [[%s],[%s],[%s]]\", sei, see, sea) \"\"\" endog,exog,group =", "None, family, v) md.setup_nominal() mdf2 = md.fit(start_params=mdf1.params) # From statsmodels.GEE (not an independent", "= Independence() family = Poisson() Y = np.ceil(-np.log(np.random.uniform(size=100))) X1 = np.random.normal(size=100) X2 =", "exog.append(x) expval = x.sum(1) errors = np.dot(cmat_r, np.random.normal(size=gsize)) endog.append(expval + errors) groups.append(i*np.ones(gsize)) endog", "Gaussian() endog,exog,group = load_data(\"gee_nested_linear_1.csv\") group_n = [] for i in range(endog.shape[0]//10): group_n.extend([0,]*5) group_n.extend([1,]*5)", "np.flatnonzero(group == ii) T[jj] = lrange(len(jj)) family = Binomial() ve = Exchangeable() vi", "np.r_[[[0, 0, 0, 1]]] R = np.r_[0,] for j,v in enumerate((vi,ve)): md =", "decimal=10) # Test with formulas D = np.concatenate((endog[:,None], group[:,None], exog[:,1:]), axis=1) D =", "the results will not agree exactly. \"\"\" from __future__ import print_function from statsmodels.compat", "Y = np.random.normal(size=100) X1 = np.random.normal(size=100) X2 = np.random.normal(size=100) X3 = np.random.normal(size=100) groups", "~ X1 + X2 + X3\", data=D).fit(disp=False) assert_almost_equal(sml.params.values, md.params, decimal=10) if __name__==\"__main__\": import", "that the time defaults work correctly. \"\"\" endog,exog,group = load_data(\"gee_logistic_1.csv\") # Time values", "params_true[gsize-1]) def test_post_estimation(self): family = Gaussian() endog,exog,group = load_data(\"gee_linear_1.csv\") ve = Exchangeable() md", "Z = read.csv(\"results/gee_logistic_1.csv\", header=FALSE) Y = Z[,2] Id = Z[,1] X1 = Z[,3]", "0.191045527104503,0.169776150974586]] for j,v in enumerate((vi,ve,va)): md = GEE(endog, exog, group, T, family, v)", "standard errors derived from the naive covariance estimate. \"\"\" vs = Independence() family", "# Test with global odds ratio dependence v = GlobalOddsRatio(\"nominal\") md = GEE(endog,", "jj = np.flatnonzero(group == ii) T[jj] = lrange(len(jj)) family = Binomial() va =", "TestGEE(object): def test_margins(self): n = 300 exog = np.random.normal(size=(n, 4)) exog[:,0] = 1", "cf = np.r_[-0.1671073 , 1.00467426, -2.01723004, 0.97297106] se = np.r_[0.08629606, 0.04058653, 0.04067038, 0.03777989]", "X2 + X3\", D, None, groups=groups, family=family, cov_struct=vs).fit() sml = sm.poisson(\"Y ~ X1", "se = np.r_[0.08632616, 0.02913582, 0.03114428, 0.02893991] assert_almost_equal(mdf2.params, cf, decimal=6) assert_almost_equal(mdf2.standard_errors(), se, decimal=6) def", "sei, see) \"\"\" family = Poisson() endog,exog,group_n = load_data(\"gee_poisson_1.csv\") vi = Independence() ve", "= Binomial() ve = Exchangeable() vi = Independence() va = Autoregressive() # From", "# From R gee cf = [[-0.0364450410793481,-0.0543209391301178, 0.0156642711741052,0.57628591338724, -0.00465659951186211,-0.477093153099256], [-0.0315615554826533,-0.0562589480840004, 0.0178419412298561,0.571512795340481, -0.00363255566297332,-0.475971696727736]] se", "1]]] R = np.r_[0,] for j,v in enumerate((vi,ve)): md = GEE(endog, exog, group,", "collapse=\",\") see = paste(u[,4], collapse=\",\") sprintf(\"cf = [[%s],[%s]]\", cfi, cfe) sprintf(\"se = [[%s],[%s]]\",", "Exchangeable() L = np.r_[[[0, 0, 0, 1]]] R = np.r_[0,] for j,v in", "groups=data[\"subject\"], cov_struct=ind, family=fam) mdf1 = md1.fit() # Coefficients should agree with GLM from", "L = np.r_[[[0, 0, 0, 1]]] R = np.r_[0,] for j,v in enumerate((vi,ve)):", "lrange(len(jj)) family = Binomial() ve = Exchangeable() vi = Independence() va = Autoregressive()", "R code for comparing results: library(gee) Z = read.csv(\"results/gee_logistic_1.csv\", header=FALSE) Y = Z[,2]", "= os.path.dirname(os.path.abspath(__file__)) fname = os.path.join(cur_dir, \"results\", \"epil.csv\") data = pd.read_csv(fname) fam = Poisson()", "Exchangeable() # From R gee cf = [[-0.0364450410793481,-0.0543209391301178, 0.0156642711741052,0.57628591338724, -0.00465659951186211,-0.477093153099256], [-0.0315615554826533,-0.0562589480840004, 0.0178419412298561,0.571512795340481, -0.00363255566297332,-0.475971696727736]]", "+ X2 + X3\", D, None, groups=groups, family=family, cov_struct=vs).fit() sml = sm.logit(\"Y ~", "X2 + X3 + X4 + X5, id=Id, family=poisson, corstr=\"exchangeable\", scale.fix=TRUE) sme =", "[[%s],[%s],[%s]]\", sei, see, sea) \"\"\" endog,exog,group = load_data(\"gee_logistic_1.csv\") # Time values for the", "id=Id, family=poisson, corstr=\"exchangeable\", scale.fix=TRUE) sme = summary(me) u = coefficients(sme) cfe = paste(u[,1],", "covariance estimate. \"\"\" vs = Independence() family = Gaussian() Y = np.random.normal(size=100) X1", "1 endog = np.dot(exog, np.r_[1, 1, 0, 0.2]) +\\ np.random.normal(size=300) group = np.kron(np.arange(100),", "== (95,4) def test_default_time(self): \"\"\" Check that the time defaults work correctly. \"\"\"", "md.setup_nominal() mdf2 = md.fit(start_params=mdf1.params) # From statsmodels.GEE (not an independent test) cf2 =", "k = 3 ga = Gaussian() for gsize in 1,2,3: ix = np.arange(gsize)[:,None]", "def test_post_estimation(self): family = Gaussian() endog,exog,group = load_data(\"gee_linear_1.csv\") ve = Exchangeable() md =", "import numpy as np import os from numpy.testing import assert_almost_equal from statsmodels.genmod.generalized_estimating_equations import", "X1 + X2 + X3 + X4 + X5, id=Id, family=poisson, corstr=\"exchangeable\", scale.fix=TRUE)", "= np.r_[-0.1671073 , 1.00467426, -2.01723004, 0.97297106] se = np.r_[0.08629606, 0.04058653, 0.04067038, 0.03777989] assert_almost_equal(mdf1.params,", "functions for GEE External comparisons are to R. The statmodels GEE implementation should", "+ X4 + X5, id=Id, family=poisson, corstr=\"exchangeable\", scale.fix=TRUE) sme = summary(me) u =", "decimal=6) assert_almost_equal(mdf.standard_errors(), se[j], decimal=6) # Check for run-time exceptions in summary # print(mdf.summary())", "= np.concatenate((np.ones((exog.shape[0],1)), exog), axis=1) return endog,exog,group class TestGEE(object): def test_margins(self): n = 300", "correlation structures. For other correlation structures, the details of the correlation estimation differ", "< 0) X1 = np.random.normal(size=100) X2 = np.random.normal(size=100) X3 = np.random.normal(size=100) groups =", "collapse=\",\") see = paste(u[,4], collapse=\",\") ma = gee(Y ~ X1 + X2 +", "group_n.extend([1,]*5) group_n = np.array(group_n)[:,None] dp = Independence() md = GEE(endog, exog, group, None,", "should agree exactly with OLS for parameter estimates and standard errors derived from", "data = pd.read_csv(fname) fam = Poisson() ind = Independence() md1 = GEE.from_formula(\"y ~", "+ np.exp(-lpr)) endog = 1*(np.random.uniform(size=n) < prob) fa = Binomial() ex = Exchangeable()", "cf = [[-0.01850226507491,0.81436304278962, -1.56167635393184,0.794239361055003], [-0.0182920577154767,0.814898414022467, -1.56194040106201,0.793499517527478]] se = [[0.0440733554189401,0.0479993639119261, 0.0496045952071308,0.0479467597161284], [0.0440369906460754,0.0480069787567662, 0.049519758758187,0.0479760443027526]] for", "independent test) cf2 = np.r_[0.45397549, 0.42278345, -0.91997131, -0.50115943] se2 = np.r_[0.09646057, 0.07405713, 0.1324629", "None, groups=groups, family=family, cov_struct=vs) mdf = md.fit() ols = sm.ols(\"Y ~ X1 +", "assert_almost_equal from statsmodels.genmod.generalized_estimating_equations import (GEE, GEEMargins, Multinomial) from statsmodels.genmod.families import Gaussian, Binomial, Poisson", "= np.random.normal(size=100) X2 = np.random.normal(size=100) X3 = np.random.normal(size=100) groups = np.kron(lrange(20), np.ones(5)) D", "in enumerate((vi,ve)): md = GEE(endog, exog, group_n, None, family, v) mdf = md.fit()", "j,v in enumerate((vi,ve)): md = GEE(endog, exog, group_n, None, family, v) mdf =", "= [[0.0611309237214186,0.0390680524493108, 0.0334234174505518,0.0366860768962715, 0.0304758505008105,0.0316348058881079], [0.0610840153582275,0.0376887268649102, 0.0325168379415177,0.0369786751362213, 0.0296141014225009,0.0306115470200955]] for j,v in enumerate((vi,ve)): md =", "X2 = Z[,4] X3 = Z[,5] X4 = Z[,6] X5 = Z[,7] mi", "np import os from numpy.testing import assert_almost_equal from statsmodels.genmod.generalized_estimating_equations import (GEE, GEEMargins, Multinomial)", "= Binomial() endog, exog, groups = load_data(\"gee_ordinal_1.csv\", icept=False) v = GlobalOddsRatio(\"ordinal\") md =", "D, None, groups=groups, family=family, cov_struct=vs).fit() sml = sm.logit(\"Y ~ X1 + X2 +", "assert_almost_equal(np.dot(exog, mdf.params), mdf.fittedvalues) assert_almost_equal(endog - np.dot(exog, mdf.params), mdf.resid) def test_linear(self): \"\"\" library(gee) Z", "independent test) cf1 = np.r_[0.44944752, 0.45569985, -0.92007064, -0.46766728] se1 = np.r_[0.09801821, 0.07718842, 0.13229421,", "cfi, cfe) sprintf(\"se = [[%s],[%s]]\", sei, see) \"\"\" family = Gaussian() endog,exog,group =", "other correlation structures, the details of the correlation estimation differ among implementations and", "~ X1 + X2 + X3 + X4 + X5\", D, None, groups=D.loc[:,\"Id\"],", "sprintf(\"cf = [[%s],[%s],[%s]]\", cfi, cfe, cfa) sprintf(\"se = [[%s],[%s],[%s]]\", sei, see, sea) \"\"\"", "= Exchangeable() md = GEE(endog, exog, group, time, fa, ex) mdf = md.fit()", "groups}) md = GEE.from_formula(\"Y ~ X1 + X2 + X3\", D, None, groups=D[\"groups\"],", "mdf1 = md1.fit() md2 = GEE(endog, exog, group, time=T, family=family, cov_struct=va) mdf2 =", "defaults work correctly. \"\"\" endog,exog,group = load_data(\"gee_logistic_1.csv\") # Time values for the autoregressive", "gee cf = [[0.0167272965285882,1.13038654425893, -1.86896345082962,1.09397608331333], [0.0178982283915449,1.13118798191788, -1.86133518416017,1.08944256230299], [0.0109621937947958,1.13226505028438, -1.88278757333046,1.09954623769449]] se = [[0.127291720283049,0.166725808326067, 0.192430061340865,0.173141068839597],", "generally agree with the R GEE implementation for the independence and exchangeable correlation", "+ X5\", D, None, groups=D.loc[:,\"Id\"], family=family, cov_struct=v) mdf = md.fit() assert_almost_equal(mdf.params, cf[j], decimal=5)", "X3\", D, None, groups=groups, family=family, cov_struct=vs).fit() sml = sm.logit(\"Y ~ X1 + X2", "np.r_[0.10878752, 0.10326078, 0.11171241, 0.05488705, 0.05995019, 0.0916574, 0.05951445, 0.08539281] assert_almost_equal(mdf.params, cf, decimal=5) assert_almost_equal(mdf.bse, se,", "cf2, decimal=5) assert_almost_equal(mdf2.standard_errors(), se2, decimal=5) def test_poisson(self): \"\"\" library(gee) Z = read.csv(\"results/gee_poisson_1.csv\", header=FALSE)", "0.03114428, 0.02893991] assert_almost_equal(mdf2.params, cf, decimal=6) assert_almost_equal(mdf2.standard_errors(), se, decimal=6) def test_ordinal(self): family = Binomial()", "mdf.params, decimal=10) se = mdf.standard_errors(covariance_type=\"naive\") assert_almost_equal(ols.bse, se, decimal=10) naive_tvalues = mdf.params / \\", "def test_linear_constrained(self): family = Gaussian() exog = np.random.normal(size=(300,4)) exog[:,0] = 1 endog =", "np.nan Y[5:7] = np.nan X2[10:12] = np.nan D = pd.DataFrame({\"Y\": Y, \"X1\": X1,", "range(exog.shape[1]-1)] for j,v in enumerate((vi,ve)): md = GEE.from_formula(\"Y ~ X1 + X2 +", "md.fit() marg = GEEMargins(mdf, ()) marg.summary() # This is in the release announcement", "mdf2.params, decimal=6) assert_almost_equal(mdf1.standard_errors(), mdf2.standard_errors(), decimal=6) def test_logistic(self): \"\"\" R code for comparing results:", "se = np.r_[0.08629606, 0.04058653, 0.04067038, 0.03777989] assert_almost_equal(mdf1.params, cf, decimal=6) assert_almost_equal(mdf1.standard_errors(), se, decimal=6) ne", "[] exog = [] groups = [] for i in range(num_group): x =", "print(mdf.summary()) def test_autoregressive(self): dep_params_true = [0, 0.589208623896, 0.559823804948] params_true = [[1.08043787, 1.12709319, 0.90133927],", "+ X2 + X3\", data=D).fit(disp=False) assert_almost_equal(sml.params.values, md.params, decimal=10) def test_compare_poisson(self): vs = Independence()", "corstr=\"independence\", tol=1e-8, maxit=100) smi = summary(mi) u = coefficients(smi) cfi = paste(u[,1], collapse=\",\")", "decimal=5) assert_almost_equal(mdf.standard_errors(), se[j], decimal=6) # print(mdf.params) def test_compare_OLS(self): \"\"\" Gaussian GEE with independence", "fa, ex) mdf = md.fit() marg = GEEMargins(mdf, ()) marg.summary() # This is", "cfa) sprintf(\"se = [[%s],[%s],[%s]]\", sei, see, sea) \"\"\" endog,exog,group = load_data(\"gee_logistic_1.csv\") # Time", "-0.46766728] se1 = np.r_[0.09801821, 0.07718842, 0.13229421, 0.08544553] assert_almost_equal(mdf1.params, cf1, decimal=5) assert_almost_equal(mdf1.standard_errors(), se1, decimal=5)", "mi = gee(Y ~ X1 + X2 + X3 + X4 + X5,", "print_function from statsmodels.compat import lrange import numpy as np import os from numpy.testing", "= 1 / (1 + np.exp(-lpr)) endog = 1*(np.random.uniform(size=n) < prob) fa =", "= md1.fit() # Coefficients should agree with GLM from statsmodels.genmod.generalized_linear_model import GLM from", "groups=D.loc[:,\"Id\"], family=family, cov_struct=v) mdf = md.fit() assert_almost_equal(mdf.params, cf[j], decimal=10) assert_almost_equal(mdf.standard_errors(), se[j], decimal=10) def", "read.csv(\"results/gee_linear_1.csv\", header=FALSE) Y = Z[,2] Id = Z[,1] X1 = Z[,3] X2 =", "sea) \"\"\" endog,exog,group = load_data(\"gee_logistic_1.csv\") # Time values for the autoregressive model T", "X1 = Z[,3] X2 = Z[,4] X3 = Z[,5] X4 = Z[,6] X5", "range(endog.shape[0]//10): group_n.extend([0,]*5) group_n.extend([1,]*5) group_n = np.array(group_n)[:,None] dp = Independence() md = GEE(endog, exog,", "if icept: exog = np.concatenate((np.ones((exog.shape[0],1)), exog), axis=1) return endog,exog,group class TestGEE(object): def test_margins(self):", "of the correlation estimation differ among implementations and the results will not agree", "[[0.0167272965285882,1.13038654425893, -1.86896345082962,1.09397608331333], [0.0178982283915449,1.13118798191788, -1.86133518416017,1.08944256230299], [0.0109621937947958,1.13226505028438, -1.88278757333046,1.09954623769449]] se = [[0.127291720283049,0.166725808326067, 0.192430061340865,0.173141068839597], [0.127045031730155,0.165470678232842, 0.192052750030501,0.173174779369249], [0.127240302296444,0.170554083928117,", "collapse=\",\") me = gee(Y ~ X1 + X2 + X3, id=Id, family=binomial, corstr=\"exchangeable\")", "is in the release announcement for version 0.6. def test_poisson_epil(self): cur_dir = os.path.dirname(os.path.abspath(__file__))", "-0.92007064, -0.46766728] se1 = np.r_[0.09801821, 0.07718842, 0.13229421, 0.08544553] assert_almost_equal(mdf1.params, cf1, decimal=5) assert_almost_equal(mdf1.standard_errors(), se1,", "results directory. The data set should be a CSV file with the following", "for the independence and exchangeable correlation structures. For other correlation structures, the details", "endog = [] exog = [] groups = [] for i in range(num_group):", "np.concatenate((endog[:,None], group_n[:,None], exog[:,1:]), axis=1) D = pd.DataFrame(D) D.columns = [\"Y\",\"Id\",] + [\"X%d\" %", "1*(np.random.normal(size=100) < 0) X1 = np.random.normal(size=100) X2 = np.random.normal(size=100) X3 = np.random.normal(size=100) groups", "independence and exchangeable correlation structures. For other correlation structures, the details of the", "in summary # print(mdf.summary()) def test_autoregressive(self): dep_params_true = [0, 0.589208623896, 0.559823804948] params_true =", "cf[j], decimal=6) assert_almost_equal(mdf.standard_errors(), se[j], decimal=6) # Check for run-time exceptions in summary #", "X3\", D, None, groups=groups, family=family, cov_struct=vs) mdf = md.fit() ols = sm.ols(\"Y ~", "[-0.0315615554826533,-0.0562589480840004, 0.0178419412298561,0.571512795340481, -0.00363255566297332,-0.475971696727736]] se = [[0.0611309237214186,0.0390680524493108, 0.0334234174505518,0.0366860768962715, 0.0304758505008105,0.0316348058881079], [0.0610840153582275,0.0376887268649102, 0.0325168379415177,0.0369786751362213, 0.0296141014225009,0.0306115470200955]] for j,v", "1, -1, 0.5] lpr = np.dot(exog, beta) prob = 1 / (1 +", "family=families.Poisson()) mdf2 = md2.fit(scale=\"X2\") assert_almost_equal(mdf1.params, mdf2.params, decimal=6) assert_almost_equal(mdf1.scale, mdf2.scale, decimal=6) # TODO: why", "np.r_[0.08629606, 0.04058653, 0.04067038, 0.03777989] assert_almost_equal(mdf1.params, cf, decimal=6) assert_almost_equal(mdf1.standard_errors(), se, decimal=6) ne = Nested()", "v = GlobalOddsRatio(\"ordinal\") md = GEE(endog, exog, groups, None, family, v) md.setup_ordinal() mdf", "groups=groups, family=family, cov_struct=vs) mdf = md.fit() ols = sm.ols(\"Y ~ X1 + X2", "= Z[,5] X4 = Z[,6] X5 = Z[,7] mi = gee(Y ~ X1", "in range(num_group): x = np.random.normal(size=(gsize,k)) exog.append(x) expval = x.sum(1) errors = np.dot(cmat_r, np.random.normal(size=gsize))", "4, size=100) D = pd.DataFrame({\"Y\": Y, \"X1\": X1, \"X2\": X2, \"X3\": X3}) md", "statsmodels.formula.api as sm def load_data(fname, icept=True): \"\"\" Load a data set from the", "Test with independence correlation v = Independence() md = GEE(endog, exog, groups, None,", "family = Gaussian() exog = np.random.normal(size=(300,4)) exog[:,0] = 1 endog = np.dot(exog, np.r_[1,", "The statmodels GEE implementation should generally agree with the R GEE implementation for", "j,v in enumerate((vi, ve)): md = GEE(endog, exog, group, None, family, v) mdf", "= Exchangeable() vi = Independence() va = Autoregressive() # From R gee cf", "test_linear_constrained(self): family = Gaussian() exog = np.random.normal(size=(300,4)) exog[:,0] = 1 endog = np.dot(exog,", "family = Multinomial(3) endog, exog, groups = load_data(\"gee_nominal_1.csv\", icept=False) # Test with independence", "None, family, v) md.setup_ordinal() mdf = md.fit() cf = np.r_[1.09238131, 0.02148193, -0.39879146, -0.01855666,", "X3, id=Id, family=binomial, corstr=\"exchangeable\") sme = summary(me) u = coefficients(sme) cfe = paste(u[,1],", "for j,v in enumerate((vi, ve)): md = GEE(endog, exog, group, None, family, v)", "se2, decimal=5) def test_poisson(self): \"\"\" library(gee) Z = read.csv(\"results/gee_poisson_1.csv\", header=FALSE) Y = Z[,2]", "Coefficients should agree with GLM from statsmodels.genmod.generalized_linear_model import GLM from statsmodels.genmod import families", "for parameter estimates and standard errors derived from the naive covariance estimate. \"\"\"", "GEE(endog, exog, groups, None, family, v) md.setup_nominal() mdf1 = md.fit() # From statsmodels.GEE", "= Gaussian() endog,exog,group = load_data(\"gee_linear_1.csv\") vi = Independence() ve = Exchangeable() # From", "= load_data(\"gee_linear_1.csv\") vi = Independence() ve = Exchangeable() # From R gee cf", "0.0916574, 0.05951445, 0.08539281] assert_almost_equal(mdf.params, cf, decimal=5) assert_almost_equal(mdf.bse, se, decimal=5) def test_nominal(self): family =", "Test functions for GEE External comparisons are to R. The statmodels GEE implementation", "se[j], decimal=10) def test_linear_constrained(self): family = Gaussian() exog = np.random.normal(size=(300,4)) exog[:,0] = 1", "import families md2 = GLM.from_formula(\"y ~ age + trt + base\", data, family=families.Poisson())", "-1.10233886] se = np.r_[0.10878752, 0.10326078, 0.11171241, 0.05488705, 0.05995019, 0.0916574, 0.05951445, 0.08539281] assert_almost_equal(mdf.params, cf,", "enumerate((vi, ve)): md = GEE(endog, exog, group, None, family, v) mdf = md.fit()", "Binomial() endog, exog, groups = load_data(\"gee_ordinal_1.csv\", icept=False) v = GlobalOddsRatio(\"ordinal\") md = GEE(endog,", "(not an independent test) cf = np.r_[-0.16655319, 1.02183688, -2.00858719, 1.00101969] se = np.r_[0.08632616,", "GEE(endog, exog, group, None, family, dp) mdf1 = md.fit() # From statsmodels.GEE (not", "cfi, cfe) sprintf(\"se = [[%s],[%s]]\", sei, see) \"\"\" family = Poisson() endog,exog,group_n =", "summary(mi) u = coefficients(smi) cfi = paste(u[,1], collapse=\",\") sei = paste(u[,4], collapse=\",\") me", "X2 + X3\", D, None, groups=D[\"groups\"], missing='drop') mdf = md.fit() assert(len(md.endog) == 95)", "\"\"\" from __future__ import print_function from statsmodels.compat import lrange import numpy as np", "ve)): md = GEE(endog, exog, group, None, family, v) mdf = md.fit() assert_almost_equal(mdf.params,", "= read.csv(\"results/gee_logistic_1.csv\", header=FALSE) Y = Z[,2] Id = Z[,1] X1 = Z[,3] X2", "and exchangeable correlation structures. For other correlation structures, the details of the correlation", "= md.fit() cf = np.r_[1.09238131, 0.02148193, -0.39879146, -0.01855666, 0.02983409, 1.18123172, 0.01845318, -1.10233886] se", "endog, exog, groups = load_data(\"gee_nominal_1.csv\", icept=False) # Test with independence correlation v =", "GEE.from_formula(\"Y ~ X1 + X2 + X3\", D, None, groups=D.loc[:,\"Id\"], family=family, cov_struct=v) mdf", "the exog variables. \"\"\" cur_dir = os.path.dirname(os.path.abspath(__file__)) Z = np.genfromtxt(os.path.join(cur_dir, 'results', fname), delimiter=\",\")", "def test_logistic(self): \"\"\" R code for comparing results: library(gee) Z = read.csv(\"results/gee_logistic_1.csv\", header=FALSE)", "X1 + X2 + X3\", data=D).fit() assert_almost_equal(ols.params.values, mdf.params, decimal=10) se = mdf.standard_errors(covariance_type=\"naive\") assert_almost_equal(ols.bse,", "decimal=5) # Test with global odds ratio dependence v = GlobalOddsRatio(\"nominal\") md =", "= np.random.normal(size=100) X2 = np.random.normal(size=100) X3 = np.random.normal(size=100) groups = np.kron(lrange(20), np.ones(5)) Y[0]", "maxit=100) sme = summary(me) u = coefficients(sme) cfe = paste(u[,1], collapse=\",\") see =", "se2 = np.r_[0.09646057, 0.07405713, 0.1324629 , 0.09025019] assert_almost_equal(mdf2.params, cf2, decimal=5) assert_almost_equal(mdf2.standard_errors(), se2, decimal=5)", "X3, id=Id, family=gaussian, corstr=\"exchangeable\", tol=1e-8, maxit=100) sme = summary(me) u = coefficients(sme) cfe", "icept=True): \"\"\" Load a data set from the results directory. The data set", "pd.DataFrame({\"Y\": Y, \"X1\": X1, \"X2\": X2, \"X3\": X3}) md = GEE.from_formula(\"Y ~ X1", "decimal=6) # TODO: why does this test fail? def t_est_missing(self): Y = np.random.normal(size=100)", "+ X3\", data=D).fit(disp=False) assert_almost_equal(sml.params.values, md.params, decimal=10) if __name__==\"__main__\": import nose nose.runmodule(argv=[__file__,'-vvs','-x','--pdb', '--pdb-failure'], exit=False)", "prob = 1 / (1 + np.exp(-lpr)) endog = 1*(np.random.uniform(size=n) < prob) fa", "np.concatenate(endog) groups = np.concatenate(groups) exog = np.concatenate(exog, axis=0) ar = Autoregressive() md =", "+ X3\", D, None, groups=D.loc[:,\"Id\"], family=family, cov_struct=v) mdf = md.fit() assert_almost_equal(mdf.params, cf[j], decimal=6)", "= md.fit() if id(v) != id(va): assert_almost_equal(mdf.params, cf[j], decimal=6) assert_almost_equal(mdf.standard_errors(), se[j], decimal=6) #", "cov_struct=va) mdf1 = md1.fit() md2 = GEE(endog, exog, group, time=T, family=family, cov_struct=va) mdf2", "axis=1) return endog,exog,group class TestGEE(object): def test_margins(self): n = 300 exog = np.random.normal(size=(n,", "Nested() md = GEE(endog, exog, group, None, family, ne, dep_data=group_n) mdf2 = md.fit(start_params=mdf1.params)", "= [[%s],[%s]]\", cfi, cfe) sprintf(\"se = [[%s],[%s]]\", sei, see) \"\"\" family = Poisson()", "family = Poisson() Y = np.ceil(-np.log(np.random.uniform(size=100))) X1 = np.random.normal(size=100) X2 = np.random.normal(size=100) X3", "np.kron(lrange(20), np.ones(5)) Y[0] = np.nan Y[5:7] = np.nan X2[10:12] = np.nan D =", "= Z[,7] mi = gee(Y ~ X1 + X2 + X3 + X4", "to R. The statmodels GEE implementation should generally agree with the R GEE", "collapse=\",\") sprintf(\"cf = [[%s],[%s]]\", cfi, cfe) sprintf(\"se = [[%s],[%s]]\", sei, see) \"\"\" family", "fail? def t_est_missing(self): Y = np.random.normal(size=100) X1 = np.random.normal(size=100) X2 = np.random.normal(size=100) X3", "X3, id=Id, family=binomial, corstr=\"AR-M\") sma = summary(ma) u = coefficients(sma) cfa = paste(u[,1],", "group, None, family, v, constraint=(L,R)) mdf = md.fit() assert_almost_equal(mdf.params[3], 0, decimal=10) def test_nested_linear(self):", "collapse=\",\") sei = paste(u[,4], collapse=\",\") me = gee(Y ~ X1 + X2 +", "= gee(Y ~ X1 + X2 + X3, id=Id, family=gaussian, corstr=\"independence\", tol=1e-8, maxit=100)", "[[0.0611309237214186,0.0390680524493108, 0.0334234174505518,0.0366860768962715, 0.0304758505008105,0.0316348058881079], [0.0610840153582275,0.0376887268649102, 0.0325168379415177,0.0369786751362213, 0.0296141014225009,0.0306115470200955]] for j,v in enumerate((vi,ve)): md = GEE(endog,", "see, sea) \"\"\" endog,exog,group = load_data(\"gee_logistic_1.csv\") # Time values for the autoregressive model", "read.csv(\"results/gee_poisson_1.csv\", header=FALSE) Y = Z[,2] Id = Z[,1] X1 = Z[,3] X2 =", "~ X1 + X2 + X3\", D, None, groups=groups, family=family, cov_struct=vs).fit() sml =", "implementation should generally agree with the R GEE implementation for the independence and", "why does this test fail? def t_est_missing(self): Y = np.random.normal(size=100) X1 = np.random.normal(size=100)", "= md.fit(start_params=mdf1.params) # From statsmodels.GEE (not an independent test) cf2 = np.r_[0.45397549, 0.42278345,", "prepended to the exog variables. \"\"\" cur_dir = os.path.dirname(os.path.abspath(__file__)) Z = np.genfromtxt(os.path.join(cur_dir, 'results',", "assert_almost_equal(mdf.standard_errors(), se[j], decimal=6) # print(mdf.params) def test_compare_OLS(self): \"\"\" Gaussian GEE with independence correlation", "i in range(num_group): x = np.random.normal(size=(gsize,k)) exog.append(x) expval = x.sum(1) errors = np.dot(cmat_r,", "range(num_group): x = np.random.normal(size=(gsize,k)) exog.append(x) expval = x.sum(1) errors = np.dot(cmat_r, np.random.normal(size=gsize)) endog.append(expval", "groups=groups, family=family, cov_struct=vs).fit() sml = sm.logit(\"Y ~ X1 + X2 + X3\", data=D).fit(disp=False)", "= Z[:,0] endog = Z[:,1] exog = Z[:,2:] if icept: exog = np.concatenate((np.ones((exog.shape[0],1)),", "= pd.DataFrame({\"Y\": Y, \"X1\": X1, \"X2\": X2, \"X3\": X3}) md = GEE.from_formula(\"Y ~", "errors) groups.append(i*np.ones(gsize)) endog = np.concatenate(endog) groups = np.concatenate(groups) exog = np.concatenate(exog, axis=0) ar", "t_est_missing(self): Y = np.random.normal(size=100) X1 = np.random.normal(size=100) X2 = np.random.normal(size=100) X3 = np.random.normal(size=100)", "+ errors) groups.append(i*np.ones(gsize)) endog = np.concatenate(endog) groups = np.concatenate(groups) exog = np.concatenate(exog, axis=0)", "\"epil.csv\") data = pd.read_csv(fname) fam = Poisson() ind = Independence() md1 = GEE.from_formula(\"y", "Binomial() ve = Exchangeable() vi = Independence() va = Autoregressive() # From R", "assert_almost_equal(mdf1.scale, mdf2.scale, decimal=6) # TODO: why does this test fail? def t_est_missing(self): Y", "None, groups=D[\"groups\"], missing='drop') mdf = md.fit() assert(len(md.endog) == 95) assert(md.exog.shape) == (95,4) def", "expval = x.sum(1) errors = np.dot(cmat_r, np.random.normal(size=gsize)) endog.append(expval + errors) groups.append(i*np.ones(gsize)) endog =", "decimal=5) def test_poisson(self): \"\"\" library(gee) Z = read.csv(\"results/gee_poisson_1.csv\", header=FALSE) Y = Z[,2] Id", "os.path.join(cur_dir, \"results\", \"epil.csv\") data = pd.read_csv(fname) fam = Poisson() ind = Independence() md1", "assert_almost_equal(mdf.params, cf[j], decimal=5) assert_almost_equal(mdf.standard_errors(), se[j], decimal=6) # print(mdf.params) def test_compare_OLS(self): \"\"\" Gaussian GEE", "= set(group) for ii in idx: jj = np.flatnonzero(group == ii) T[jj] =", "is prepended to the exog variables. \"\"\" cur_dir = os.path.dirname(os.path.abspath(__file__)) Z = np.genfromtxt(os.path.join(cur_dir,", "~ X1 + X2 + X3 + X4 + X5, id=Id, family=poisson, corstr=\"independence\",", "None, family, v, constraint=(L,R)) mdf = md.fit() assert_almost_equal(mdf.params[3], 0, decimal=10) def test_nested_linear(self): family", "X2 + X3, id=Id, family=gaussian, corstr=\"exchangeable\", tol=1e-8, maxit=100) sme = summary(me) u =", "= GEE(endog, exog, groups, None, family, v) md.setup_nominal() mdf2 = md.fit(start_params=mdf1.params) # From", "[[0.127291720283049,0.166725808326067, 0.192430061340865,0.173141068839597], [0.127045031730155,0.165470678232842, 0.192052750030501,0.173174779369249], [0.127240302296444,0.170554083928117, 0.191045527104503,0.169776150974586]] for j,v in enumerate((vi,ve,va)): md = GEE(endog,", "cf[j], decimal=5) assert_almost_equal(mdf.standard_errors(), se[j], decimal=6) # print(mdf.params) def test_compare_OLS(self): \"\"\" Gaussian GEE with", "GlobalOddsRatio(\"nominal\") md = GEE(endog, exog, groups, None, family, v) md.setup_nominal() mdf2 = md.fit(start_params=mdf1.params)", "age + trt + base\", data, groups=data[\"subject\"], cov_struct=ind, family=fam) mdf1 = md1.fit() #", "+ X2 + X3 + X4 + X5, id=Id, family=poisson, corstr=\"independence\", scale.fix=TRUE) smi", "dep_params_true = [0, 0.589208623896, 0.559823804948] params_true = [[1.08043787, 1.12709319, 0.90133927], [0.9613677, 1.05826987, 0.90832055],", "model T = np.zeros(len(endog)) idx = set(group) for ii in idx: jj =", "Time values for the autoregressive model T = np.zeros(len(endog)) idx = set(group) for", "\"\"\" family = Gaussian() endog,exog,group = load_data(\"gee_linear_1.csv\") vi = Independence() ve = Exchangeable()", "= np.dot(cmat_r, np.random.normal(size=gsize)) endog.append(expval + errors) groups.append(i*np.ones(gsize)) endog = np.concatenate(endog) groups = np.concatenate(groups)", "300 exog = np.random.normal(size=(n, 4)) exog[:,0] = 1 exog[:,1] = 1*(exog[:,2] < 0)", "0.5 k = 3 ga = Gaussian() for gsize in 1,2,3: ix =", "= np.linalg.cholesky(cmat) endog = [] exog = [] groups = [] for i", "np.dot(exog, mdf.params), mdf.resid) def test_linear(self): \"\"\" library(gee) Z = read.csv(\"results/gee_linear_1.csv\", header=FALSE) Y =", "md2.fit() assert_almost_equal(mdf1.params, mdf2.params, decimal=6) assert_almost_equal(mdf1.standard_errors(), mdf2.standard_errors(), decimal=6) def test_logistic(self): \"\"\" R code for", "(k+1) for k in range(exog.shape[1]-1)] for j,v in enumerate((vi,ve)): md = GEE.from_formula(\"Y ~", "np.dot(exog, beta) prob = 1 / (1 + np.exp(-lpr)) endog = 1*(np.random.uniform(size=n) <", "np.zeros((n, 1)) beta = np.r_[0, 1, -1, 0.5] lpr = np.dot(exog, beta) prob", "= summary(ma) u = coefficients(sma) cfa = paste(u[,1], collapse=\",\") sea = paste(u[,4], collapse=\",\")", "Autoregressive() md = GEE(endog, exog, groups, family=ga, cov_struct = ar) mdf = md.fit()", "np.array(group_n)[:,None] dp = Independence() md = GEE(endog, exog, group, None, family, dp) mdf1", "X3\", data=D).fit() assert_almost_equal(ols.params.values, mdf.params, decimal=10) se = mdf.standard_errors(covariance_type=\"naive\") assert_almost_equal(ols.bse, se, decimal=10) naive_tvalues =", "import lrange import numpy as np import os from numpy.testing import assert_almost_equal from", "= np.flatnonzero(group == ii) T[jj] = lrange(len(jj)) family = Binomial() ve = Exchangeable()", "-1.56167635393184,0.794239361055003], [-0.0182920577154767,0.814898414022467, -1.56194040106201,0.793499517527478]] se = [[0.0440733554189401,0.0479993639119261, 0.0496045952071308,0.0479467597161284], [0.0440369906460754,0.0480069787567662, 0.049519758758187,0.0479760443027526]] for j,v in enumerate((vi,", "idx = set(group) for ii in idx: jj = np.flatnonzero(group == ii) T[jj]", "GEE(endog, exog, group, None, family, ve) mdf = md.fit() assert_almost_equal(np.dot(exog, mdf.params), mdf.fittedvalues) assert_almost_equal(endog", "+ X4 + X5, id=Id, family=poisson, corstr=\"independence\", scale.fix=TRUE) smi = summary(mi) u =", "cfi = paste(u[,1], collapse=\",\") sei = paste(u[,4], collapse=\",\") me = gee(Y ~ X1", "np.random.normal(size=(300,4)) exog[:,0] = 1 endog = np.dot(exog, np.r_[1, 1, 0, 0.2]) +\\ np.random.normal(size=300)", "id=Id, family=gaussian, corstr=\"exchangeable\", tol=1e-8, maxit=100) sme = summary(me) u = coefficients(sme) cfe =", "2-end: exog variables If `icept` is True, an intercept is prepended to the", "ar = Autoregressive() md = GEE(endog, exog, groups, family=ga, cov_struct = ar) mdf", "mdf = md.fit() assert_almost_equal(mdf.params, cf[j], decimal=10) assert_almost_equal(mdf.standard_errors(), se[j], decimal=10) def test_linear_constrained(self): family =", "== 95) assert(md.exog.shape) == (95,4) def test_default_time(self): \"\"\" Check that the time defaults", "np.concatenate((endog[:,None], group[:,None], exog[:,1:]), axis=1) D = pd.DataFrame(D) D.columns = [\"Y\",\"Id\",] + [\"X%d\" %", "load_data(\"gee_nested_linear_1.csv\") group_n = [] for i in range(endog.shape[0]//10): group_n.extend([0,]*5) group_n.extend([1,]*5) group_n = np.array(group_n)[:,None]", "paste(u[,4], collapse=\",\") me = gee(Y ~ X1 + X2 + X3 + X4", "and standard errors derived from the naive covariance estimate. \"\"\" vs = Independence()", "+ X3 + X4 + X5, id=Id, family=poisson, corstr=\"exchangeable\", scale.fix=TRUE) sme = summary(me)", "= np.concatenate(exog, axis=0) ar = Autoregressive() md = GEE(endog, exog, groups, family=ga, cov_struct", "X2 = Z[,4] X3 = Z[,5] mi = gee(Y ~ X1 + X2", "decimal=10) def test_linear_constrained(self): family = Gaussian() exog = np.random.normal(size=(300,4)) exog[:,0] = 1 endog", "X2 + X3, id=Id, family=binomial, corstr=\"independence\") smi = summary(mi) u = coefficients(smi) cfi", "mi = gee(Y ~ X1 + X2 + X3, id=Id, family=binomial, corstr=\"independence\") smi", "in the release announcement for version 0.6. def test_poisson_epil(self): cur_dir = os.path.dirname(os.path.abspath(__file__)) fname", "groups=D.loc[:,\"Id\"], family=family, cov_struct=v) mdf = md.fit() assert_almost_equal(mdf.params, cf[j], decimal=5) assert_almost_equal(mdf.standard_errors(), se[j], decimal=6) #", "se1 = np.r_[0.09801821, 0.07718842, 0.13229421, 0.08544553] assert_almost_equal(mdf1.params, cf1, decimal=5) assert_almost_equal(mdf1.standard_errors(), se1, decimal=5) #", "family=ga, cov_struct = ar) mdf = md.fit() assert_almost_equal(ar.dep_params, dep_params_true[gsize-1]) assert_almost_equal(mdf.params, params_true[gsize-1]) def test_post_estimation(self):", "Z[,5] mi = gee(Y ~ X1 + X2 + X3, id=Id, family=gaussian, corstr=\"independence\",", "= GEE(endog, exog, group, None, family, ne, dep_data=group_n) mdf2 = md.fit(start_params=mdf1.params) # From", "= sm.poisson(\"Y ~ X1 + X2 + X3\", data=D).fit(disp=False) assert_almost_equal(sml.params.values, md.params, decimal=10) if", "np.r_[0.09801821, 0.07718842, 0.13229421, 0.08544553] assert_almost_equal(mdf1.params, cf1, decimal=5) assert_almost_equal(mdf1.standard_errors(), se1, decimal=5) # Test with", "1)) beta = np.r_[0, 1, -1, 0.5] lpr = np.dot(exog, beta) prob =", "np.random.normal(size=100) X1 = np.random.normal(size=100) X2 = np.random.normal(size=100) X3 = np.random.normal(size=100) groups = np.kron(lrange(20),", "paste(u[,1], collapse=\",\") sea = paste(u[,4], collapse=\",\") sprintf(\"cf = [[%s],[%s],[%s]]\", cfi, cfe, cfa) sprintf(\"se", "se[j], decimal=10) # Test with formulas D = np.concatenate((endog[:,None], group[:,None], exog[:,1:]), axis=1) D", "= paste(u[,4], collapse=\",\") sprintf(\"cf = [[%s],[%s],[%s]]\", cfi, cfe, cfa) sprintf(\"se = [[%s],[%s],[%s]]\", sei,", "X2 + X3\", D, None, groups=D.loc[:,\"Id\"], family=family, cov_struct=v) mdf = md.fit() assert_almost_equal(mdf.params, cf[j],", "group = np.kron(np.arange(100), np.r_[1,1,1]) vi = Independence() ve = Exchangeable() L = np.r_[[[0,", "np.ceil(-np.log(np.random.uniform(size=100))) X1 = np.random.normal(size=100) X2 = np.random.normal(size=100) X3 = np.random.normal(size=100) groups = np.random.randint(0,", "OLS for parameter estimates and standard errors derived from the naive covariance estimate.", "= np.random.normal(size=100) groups = np.random.randint(0, 4, size=100) D = pd.DataFrame({\"Y\": Y, \"X1\": X1,", "groups = load_data(\"gee_nominal_1.csv\", icept=False) # Test with independence correlation v = Independence() md", "np.random.normal(size=100) X2 = np.random.normal(size=100) X3 = np.random.normal(size=100) groups = np.kron(lrange(20), np.ones(5)) D =", "with GLM from statsmodels.genmod.generalized_linear_model import GLM from statsmodels.genmod import families md2 = GLM.from_formula(\"y", "= GEE.from_formula(\"Y ~ X1 + X2 + X3\", D, None, groups=D[\"groups\"], missing='drop') mdf", "0.02893991] assert_almost_equal(mdf2.params, cf, decimal=6) assert_almost_equal(mdf2.standard_errors(), se, decimal=6) def test_ordinal(self): family = Binomial() endog,", "[0.9613677, 1.05826987, 0.90832055], [1.05370439, 0.96084864, 0.93923374]] np.random.seed(342837482) num_group = 100 ar_param = 0.5", "load_data(\"gee_poisson_1.csv\") vi = Independence() ve = Exchangeable() # From R gee cf =", "np.r_[0.45397549, 0.42278345, -0.91997131, -0.50115943] se2 = np.r_[0.09646057, 0.07405713, 0.1324629 , 0.09025019] assert_almost_equal(mdf2.params, cf2,", "format: Column 0: Group indicator Column 1: endog variable Columns 2-end: exog variables", "From statsmodels.GEE (not an independent test) cf = np.r_[-0.16655319, 1.02183688, -2.00858719, 1.00101969] se", "test_logistic(self): \"\"\" R code for comparing results: library(gee) Z = read.csv(\"results/gee_logistic_1.csv\", header=FALSE) Y", "group_n[:,None], exog[:,1:]), axis=1) D = pd.DataFrame(D) D.columns = [\"Y\",\"Id\",] + [\"X%d\" % (k+1)", "(1 + np.exp(-lpr)) endog = 1*(np.random.uniform(size=n) < prob) fa = Binomial() ex =", "D, None, groups=groups, family=family, cov_struct=vs) mdf = md.fit() ols = sm.ols(\"Y ~ X1", "X1 + X2 + X3\", D, None, groups=D[\"groups\"], missing='drop') mdf = md.fit() assert(len(md.endog)", "0.05951445, 0.08539281] assert_almost_equal(mdf.params, cf, decimal=5) assert_almost_equal(mdf.bse, se, decimal=5) def test_nominal(self): family = Multinomial(3)", "1,2,3: ix = np.arange(gsize)[:,None] - np.arange(gsize)[None,:] ix = np.abs(ix) cmat = ar_param **", "X3\", data=D).fit(disp=False) assert_almost_equal(sml.params.values, md.params, decimal=10) def test_compare_poisson(self): vs = Independence() family = Poisson()", "-1.86133518416017,1.08944256230299], [0.0109621937947958,1.13226505028438, -1.88278757333046,1.09954623769449]] se = [[0.127291720283049,0.166725808326067, 0.192430061340865,0.173141068839597], [0.127045031730155,0.165470678232842, 0.192052750030501,0.173174779369249], [0.127240302296444,0.170554083928117, 0.191045527104503,0.169776150974586]] for j,v", "endog.append(expval + errors) groups.append(i*np.ones(gsize)) endog = np.concatenate(endog) groups = np.concatenate(groups) exog = np.concatenate(exog,", "id=Id, family=gaussian, corstr=\"independence\", tol=1e-8, maxit=100) smi = summary(mi) u = coefficients(smi) cfi =", "Multinomial(3) endog, exog, groups = load_data(\"gee_nominal_1.csv\", icept=False) # Test with independence correlation v", "0.02148193, -0.39879146, -0.01855666, 0.02983409, 1.18123172, 0.01845318, -1.10233886] se = np.r_[0.10878752, 0.10326078, 0.11171241, 0.05488705,", "decimal=5) assert_almost_equal(mdf.bse, se, decimal=5) def test_nominal(self): family = Multinomial(3) endog, exog, groups =", "ex) mdf = md.fit() marg = GEEMargins(mdf, ()) marg.summary() # This is in", "= paste(u[,4], collapse=\",\") ma = gee(Y ~ X1 + X2 + X3, id=Id,", "0, decimal=10) def test_nested_linear(self): family = Gaussian() endog,exog,group = load_data(\"gee_nested_linear_1.csv\") group_n = []", "None, family, dp) mdf1 = md.fit() # From statsmodels.GEE (not an independent test)", "for version 0.6. def test_poisson_epil(self): cur_dir = os.path.dirname(os.path.abspath(__file__)) fname = os.path.join(cur_dir, \"results\", \"epil.csv\")", "\"\"\" library(gee) Z = read.csv(\"results/gee_linear_1.csv\", header=FALSE) Y = Z[,2] Id = Z[,1] X1", "cov_struct=v) mdf = md.fit() assert_almost_equal(mdf.params, cf[j], decimal=6) assert_almost_equal(mdf.standard_errors(), se[j], decimal=6) # Check for", "in 1,2,3: ix = np.arange(gsize)[:,None] - np.arange(gsize)[None,:] ix = np.abs(ix) cmat = ar_param", "= md.fit() assert_almost_equal(np.dot(exog, mdf.params), mdf.fittedvalues) assert_almost_equal(endog - np.dot(exog, mdf.params), mdf.resid) def test_linear(self): \"\"\"", "md = GEE.from_formula(\"Y ~ X1 + X2 + X3 + X4 + X5\",", "= Poisson() endog,exog,group_n = load_data(\"gee_poisson_1.csv\") vi = Independence() ve = Exchangeable() # From", "md.fit() assert_almost_equal(mdf.params, cf[j], decimal=5) assert_almost_equal(mdf.standard_errors(), se[j], decimal=6) # print(mdf.params) def test_compare_OLS(self): \"\"\" Gaussian", "Z[,7] mi = gee(Y ~ X1 + X2 + X3 + X4 +", "X1, \"X2\": X2, \"X3\": X3}) md = GEE.from_formula(\"Y ~ X1 + X2 +", "exactly. \"\"\" from __future__ import print_function from statsmodels.compat import lrange import numpy as", "X3 + X4 + X5\", D, None, groups=D.loc[:,\"Id\"], family=family, cov_struct=v) mdf = md.fit()", "[\"Y\",\"Id\",] + [\"X%d\" % (k+1) for k in range(exog.shape[1]-1)] for j,v in enumerate((vi,ve)):", "family, v) mdf = md.fit() assert_almost_equal(mdf.params, cf[j], decimal=5) assert_almost_equal(mdf.standard_errors(), se[j], decimal=6) # Test", "np.nan D = pd.DataFrame({\"Y\": Y, \"X1\": X1, \"X2\": X2, \"X3\": X3, \"groups\": groups})", "mdf = md.fit() assert_almost_equal(mdf.params, cf[j], decimal=10) assert_almost_equal(mdf.standard_errors(), se[j], decimal=10) # Test with formulas", "assert_almost_equal(mdf1.params, cf1, decimal=5) assert_almost_equal(mdf1.standard_errors(), se1, decimal=5) # Test with global odds ratio dependence", "md2 = GLM.from_formula(\"y ~ age + trt + base\", data, family=families.Poisson()) mdf2 =", "np.r_[0.44944752, 0.45569985, -0.92007064, -0.46766728] se1 = np.r_[0.09801821, 0.07718842, 0.13229421, 0.08544553] assert_almost_equal(mdf1.params, cf1, decimal=5)", "cf[j], decimal=5) assert_almost_equal(mdf.standard_errors(), se[j], decimal=6) # Test with formulas D = np.concatenate((endog[:,None], group_n[:,None],", "X2 + X3, id=Id, family=binomial, corstr=\"AR-M\") sma = summary(ma) u = coefficients(sma) cfa", "results will not agree exactly. \"\"\" from __future__ import print_function from statsmodels.compat import", "assert_almost_equal(mdf1.params, mdf2.params, decimal=6) assert_almost_equal(mdf1.standard_errors(), mdf2.standard_errors(), decimal=6) def test_logistic(self): \"\"\" R code for comparing", "cfi, cfe, cfa) sprintf(\"se = [[%s],[%s],[%s]]\", sei, see, sea) \"\"\" endog,exog,group = load_data(\"gee_logistic_1.csv\")", "X1 + X2 + X3, id=Id, family=binomial, corstr=\"exchangeable\") sme = summary(me) u =", "GLM from statsmodels.genmod import families md2 = GLM.from_formula(\"y ~ age + trt +", "cov_struct=va) mdf2 = md2.fit() assert_almost_equal(mdf1.params, mdf2.params, decimal=6) assert_almost_equal(mdf1.standard_errors(), mdf2.standard_errors(), decimal=6) def test_logistic(self): \"\"\"", "decimal=10) assert_almost_equal(mdf.standard_errors(), se[j], decimal=10) # Test with formulas D = np.concatenate((endog[:,None], group[:,None], exog[:,1:]),", "np.linalg.cholesky(cmat) endog = [] exog = [] groups = [] for i in", "% (k+1) for k in range(exog.shape[1]-1)] for j,v in enumerate((vi,ve)): md = GEE.from_formula(\"Y", "exog, groups = load_data(\"gee_nominal_1.csv\", icept=False) # Test with independence correlation v = Independence()", "= GEE.from_formula(\"Y ~ X1 + X2 + X3 + X4 + X5\", D,", "= Gaussian() endog,exog,group = load_data(\"gee_linear_1.csv\") ve = Exchangeable() md = GEE(endog, exog, group,", "Z[,4] X3 = Z[,5] mi = gee(Y ~ X1 + X2 + X3,", "md2 = GEE(endog, exog, group, time=T, family=family, cov_struct=va) mdf2 = md2.fit() assert_almost_equal(mdf1.params, mdf2.params,", "Exchangeable() md = GEE(endog, exog, group, None, family, ve) mdf = md.fit() assert_almost_equal(np.dot(exog,", "decimal=5) assert_almost_equal(mdf2.standard_errors(), se2, decimal=5) def test_poisson(self): \"\"\" library(gee) Z = read.csv(\"results/gee_poisson_1.csv\", header=FALSE) Y", "gee cf = [[-0.01850226507491,0.81436304278962, -1.56167635393184,0.794239361055003], [-0.0182920577154767,0.814898414022467, -1.56194040106201,0.793499517527478]] se = [[0.0440733554189401,0.0479993639119261, 0.0496045952071308,0.0479467597161284], [0.0440369906460754,0.0480069787567662, 0.049519758758187,0.0479760443027526]]", "load_data(\"gee_linear_1.csv\") ve = Exchangeable() md = GEE(endog, exog, group, None, family, ve) mdf", "family, dp) mdf1 = md.fit() # From statsmodels.GEE (not an independent test) cf", "mdf2.standard_errors(), decimal=6) def test_logistic(self): \"\"\" R code for comparing results: library(gee) Z =", "set should be a CSV file with the following format: Column 0: Group", "X1 + X2 + X3\", data=D).fit(disp=False) assert_almost_equal(sml.params.values, md.params, decimal=10) if __name__==\"__main__\": import nose", "family = Gaussian() endog,exog,group = load_data(\"gee_nested_linear_1.csv\") group_n = [] for i in range(endog.shape[0]//10):", "group, time, fa, ex) mdf = md.fit() marg = GEEMargins(mdf, ()) marg.summary() #", "agree with the R GEE implementation for the independence and exchangeable correlation structures.", "np.zeros(len(endog)) idx = set(group) for ii in idx: jj = np.flatnonzero(group == ii)", "+\\ np.random.normal(size=300) group = np.kron(np.arange(100), np.r_[1,1,1]) vi = Independence() ve = Exchangeable() L", "mdf = md.fit() cf = np.r_[1.09238131, 0.02148193, -0.39879146, -0.01855666, 0.02983409, 1.18123172, 0.01845318, -1.10233886]", "Test with formulas D = np.concatenate((endog[:,None], group[:,None], exog[:,1:]), axis=1) D = pd.DataFrame(D) D.columns", "assert_almost_equal(sml.params.values, md.params, decimal=10) def test_compare_poisson(self): vs = Independence() family = Poisson() Y =", "np.kron(np.arange(100), np.r_[1,1,1]) vi = Independence() ve = Exchangeable() L = np.r_[[[0, 0, 0,", "assert_almost_equal(mdf2.params, cf, decimal=6) assert_almost_equal(mdf2.standard_errors(), se, decimal=6) def test_ordinal(self): family = Binomial() endog, exog,", "cf[j], decimal=6) assert_almost_equal(mdf.standard_errors(), se[j], decimal=6) # Test with formulas D = np.concatenate((endog[:,None], group[:,None],", "None, family, v) mdf = md.fit() assert_almost_equal(mdf.params, cf[j], decimal=10) assert_almost_equal(mdf.standard_errors(), se[j], decimal=10) #", "X3, \"groups\": groups}) md = GEE.from_formula(\"Y ~ X1 + X2 + X3\", D,", "def test_poisson(self): \"\"\" library(gee) Z = read.csv(\"results/gee_poisson_1.csv\", header=FALSE) Y = Z[,2] Id =", "exog = [] groups = [] for i in range(num_group): x = np.random.normal(size=(gsize,k))", "# TODO: why does this test fail? def t_est_missing(self): Y = np.random.normal(size=100) X1", "= 1*(np.random.normal(size=100) < 0) X1 = np.random.normal(size=100) X2 = np.random.normal(size=100) X3 = np.random.normal(size=100)", "Binomial, Poisson from statsmodels.genmod.dependence_structures import (Exchangeable, Independence, GlobalOddsRatio, Autoregressive, Nested) import pandas as", "should be a CSV file with the following format: Column 0: Group indicator", "X1 + X2 + X3, id=Id, family=gaussian, corstr=\"independence\", tol=1e-8, maxit=100) smi = summary(mi)", "assert_almost_equal(mdf.bse, se, decimal=5) def test_nominal(self): family = Multinomial(3) endog, exog, groups = load_data(\"gee_nominal_1.csv\",", "in enumerate((vi,ve)): md = GEE.from_formula(\"Y ~ X1 + X2 + X3 + X4", "fname), delimiter=\",\") group = Z[:,0] endog = Z[:,1] exog = Z[:,2:] if icept:", "num_group = 100 ar_param = 0.5 k = 3 ga = Gaussian() for", "= mdf.standard_errors(covariance_type=\"naive\") assert_almost_equal(ols.bse, se, decimal=10) naive_tvalues = mdf.params / \\ np.sqrt(np.diag(mdf.naive_covariance)) assert_almost_equal(naive_tvalues, ols.tvalues,", "X3, id=Id, family=binomial, corstr=\"independence\") smi = summary(mi) u = coefficients(smi) cfi = paste(u[,1],", "md = GEE.from_formula(\"Y ~ X1 + X2 + X3\", D, None, groups=D[\"groups\"], missing='drop')", "+ [\"X%d\" % (k+1) for k in range(exog.shape[1]-1)] for j,v in enumerate((vi,ve)): md", "0.97297106] se = np.r_[0.08629606, 0.04058653, 0.04067038, 0.03777989] assert_almost_equal(mdf1.params, cf, decimal=6) assert_almost_equal(mdf1.standard_errors(), se, decimal=6)", "0.01845318, -1.10233886] se = np.r_[0.10878752, 0.10326078, 0.11171241, 0.05488705, 0.05995019, 0.0916574, 0.05951445, 0.08539281] assert_almost_equal(mdf.params,", "= 1 endog = np.dot(exog, np.r_[1, 1, 0, 0.2]) +\\ np.random.normal(size=300) group =", "95) assert(md.exog.shape) == (95,4) def test_default_time(self): \"\"\" Check that the time defaults work", "md1.fit() md2 = GEE(endog, exog, group, time=T, family=family, cov_struct=va) mdf2 = md2.fit() assert_almost_equal(mdf1.params,", "None, family, ve) mdf = md.fit() assert_almost_equal(np.dot(exog, mdf.params), mdf.fittedvalues) assert_almost_equal(endog - np.dot(exog, mdf.params),", "0.05995019, 0.0916574, 0.05951445, 0.08539281] assert_almost_equal(mdf.params, cf, decimal=5) assert_almost_equal(mdf.bse, se, decimal=5) def test_nominal(self): family", "= coefficients(smi) cfi = paste(u[,1], collapse=\",\") sei = paste(u[,4], collapse=\",\") me = gee(Y", "decimal=6) # Check for run-time exceptions in summary # print(mdf.summary()) def test_autoregressive(self): dep_params_true", "group, None, family, ne, dep_data=group_n) mdf2 = md.fit(start_params=mdf1.params) # From statsmodels.GEE (not an", "D = np.concatenate((endog[:,None], group[:,None], exog[:,1:]), axis=1) D = pd.DataFrame(D) D.columns = [\"Y\",\"Id\",] +", "Y = 1*(np.random.normal(size=100) < 0) X1 = np.random.normal(size=100) X2 = np.random.normal(size=100) X3 =", "for ii in idx: jj = np.flatnonzero(group == ii) T[jj] = lrange(len(jj)) family", "ii) T[jj] = lrange(len(jj)) family = Binomial() va = Autoregressive() md1 = GEE(endog,", "Z[,2] Id = Z[,1] X1 = Z[,3] X2 = Z[,4] X3 = Z[,5]", "md = GEE(endog, exog, group, None, family, ve) mdf = md.fit() assert_almost_equal(np.dot(exog, mdf.params),", "X3\", D, None, groups=groups, family=family, cov_struct=vs).fit() sml = sm.poisson(\"Y ~ X1 + X2", "+ base\", data, groups=data[\"subject\"], cov_struct=ind, family=fam) mdf1 = md1.fit() # Coefficients should agree", "# Test with independence correlation v = Independence() md = GEE(endog, exog, groups,", "Gaussian() endog,exog,group = load_data(\"gee_linear_1.csv\") vi = Independence() ve = Exchangeable() # From R", "= md.fit() # From statsmodels.GEE (not an independent test) cf = np.r_[-0.1671073 ,", "cov_struct=vs).fit() sml = sm.logit(\"Y ~ X1 + X2 + X3\", data=D).fit(disp=False) assert_almost_equal(sml.params.values, md.params,", "D, None, groups=D[\"groups\"], missing='drop') mdf = md.fit() assert(len(md.endog) == 95) assert(md.exog.shape) == (95,4)", "sprintf(\"se = [[%s],[%s]]\", sei, see) \"\"\" family = Poisson() endog,exog,group_n = load_data(\"gee_poisson_1.csv\") vi", "R gee cf = [[-0.01850226507491,0.81436304278962, -1.56167635393184,0.794239361055003], [-0.0182920577154767,0.814898414022467, -1.56194040106201,0.793499517527478]] se = [[0.0440733554189401,0.0479993639119261, 0.0496045952071308,0.0479467597161284], [0.0440369906460754,0.0480069787567662,", "axis=1) D = pd.DataFrame(D) D.columns = [\"Y\",\"Id\",] + [\"X%d\" % (k+1) for k", "ga = Gaussian() for gsize in 1,2,3: ix = np.arange(gsize)[:,None] - np.arange(gsize)[None,:] ix", "id(v) != id(va): assert_almost_equal(mdf.params, cf[j], decimal=6) assert_almost_equal(mdf.standard_errors(), se[j], decimal=6) # Test with formulas", "= [] groups = [] for i in range(num_group): x = np.random.normal(size=(gsize,k)) exog.append(x)", "gsize in 1,2,3: ix = np.arange(gsize)[:,None] - np.arange(gsize)[None,:] ix = np.abs(ix) cmat =", "ve = Exchangeable() # From R gee cf = [[-0.01850226507491,0.81436304278962, -1.56167635393184,0.794239361055003], [-0.0182920577154767,0.814898414022467, -1.56194040106201,0.793499517527478]]", "0.45569985, -0.92007064, -0.46766728] se1 = np.r_[0.09801821, 0.07718842, 0.13229421, 0.08544553] assert_almost_equal(mdf1.params, cf1, decimal=5) assert_almost_equal(mdf1.standard_errors(),", "# From statsmodels.GEE (not an independent test) cf1 = np.r_[0.44944752, 0.45569985, -0.92007064, -0.46766728]", "library(gee) Z = read.csv(\"results/gee_poisson_1.csv\", header=FALSE) Y = Z[,2] Id = Z[,1] X1 =", "From R gee cf = [[-0.0364450410793481,-0.0543209391301178, 0.0156642711741052,0.57628591338724, -0.00465659951186211,-0.477093153099256], [-0.0315615554826533,-0.0562589480840004, 0.0178419412298561,0.571512795340481, -0.00363255566297332,-0.475971696727736]] se =", "Gaussian GEE with independence correlation should agree exactly with OLS for parameter estimates", "mdf = md.fit() assert_almost_equal(ar.dep_params, dep_params_true[gsize-1]) assert_almost_equal(mdf.params, params_true[gsize-1]) def test_post_estimation(self): family = Gaussian() endog,exog,group", "the naive covariance estimate. \"\"\" vs = Independence() family = Gaussian() Y =", "va = Autoregressive() md1 = GEE(endog, exog, group, family=family, cov_struct=va) mdf1 = md1.fit()", "import statsmodels.formula.api as sm def load_data(fname, icept=True): \"\"\" Load a data set from", "group, T, family, v) mdf = md.fit() if id(v) != id(va): assert_almost_equal(mdf.params, cf[j],", "+ X3\", D, None, groups=D.loc[:,\"Id\"], family=family, cov_struct=v) mdf = md.fit() assert_almost_equal(mdf.params, cf[j], decimal=10)", "jj = np.flatnonzero(group == ii) T[jj] = lrange(len(jj)) family = Binomial() ve =", "sme = summary(me) u = coefficients(sme) cfe = paste(u[,1], collapse=\",\") see = paste(u[,4],", "Y[5:7] = np.nan X2[10:12] = np.nan D = pd.DataFrame({\"Y\": Y, \"X1\": X1, \"X2\":", "X2 + X3, id=Id, family=binomial, corstr=\"exchangeable\") sme = summary(me) u = coefficients(sme) cfe", "statsmodels.genmod import families md2 = GLM.from_formula(\"y ~ age + trt + base\", data,", "ve) mdf = md.fit() assert_almost_equal(np.dot(exog, mdf.params), mdf.fittedvalues) assert_almost_equal(endog - np.dot(exog, mdf.params), mdf.resid) def", "[[%s],[%s]]\", sei, see) \"\"\" family = Gaussian() endog,exog,group = load_data(\"gee_linear_1.csv\") vi = Independence()", "GEE(endog, exog, group_n, None, family, v) mdf = md.fit() assert_almost_equal(mdf.params, cf[j], decimal=5) assert_almost_equal(mdf.standard_errors(),", "X4 = Z[,6] X5 = Z[,7] mi = gee(Y ~ X1 + X2", "test fail? def t_est_missing(self): Y = np.random.normal(size=100) X1 = np.random.normal(size=100) X2 = np.random.normal(size=100)", "= gee(Y ~ X1 + X2 + X3, id=Id, family=gaussian, corstr=\"exchangeable\", tol=1e-8, maxit=100)", "np.r_[1,1,1]) vi = Independence() ve = Exchangeable() L = np.r_[[[0, 0, 0, 1]]]", "exog, groups, None, family, v) md.setup_nominal() mdf2 = md.fit(start_params=mdf1.params) # From statsmodels.GEE (not", "family=family, cov_struct=vs) mdf = md.fit() ols = sm.ols(\"Y ~ X1 + X2 +", "1*(exog[:,2] < 0) group = np.kron(np.arange(n/4), np.ones(4)) time = np.zeros((n, 1)) beta =", "GEE(endog, exog, group, None, family, v) mdf = md.fit() assert_almost_equal(mdf.params, cf[j], decimal=10) assert_almost_equal(mdf.standard_errors(),", "os.path.dirname(os.path.abspath(__file__)) fname = os.path.join(cur_dir, \"results\", \"epil.csv\") data = pd.read_csv(fname) fam = Poisson() ind", "0.6. def test_poisson_epil(self): cur_dir = os.path.dirname(os.path.abspath(__file__)) fname = os.path.join(cur_dir, \"results\", \"epil.csv\") data =", "gee(Y ~ X1 + X2 + X3, id=Id, family=binomial, corstr=\"independence\") smi = summary(mi)", "v) mdf = md.fit() assert_almost_equal(mdf.params, cf[j], decimal=5) assert_almost_equal(mdf.standard_errors(), se[j], decimal=6) # Test with", "Exchangeable() md = GEE(endog, exog, group, time, fa, ex) mdf = md.fit() marg", "coefficients(sma) cfa = paste(u[,1], collapse=\",\") sea = paste(u[,4], collapse=\",\") sprintf(\"cf = [[%s],[%s],[%s]]\", cfi,", "+ X3\", data=D).fit(disp=False) assert_almost_equal(sml.params.values, md.params, decimal=10) def test_compare_poisson(self): vs = Independence() family =", "= np.random.normal(size=100) groups = np.kron(lrange(20), np.ones(5)) Y[0] = np.nan Y[5:7] = np.nan X2[10:12]", "def load_data(fname, icept=True): \"\"\" Load a data set from the results directory. The", "= Z[,3] X2 = Z[,4] X3 = Z[,5] X4 = Z[,6] X5 =", "summary # print(mdf.summary()) def test_autoregressive(self): dep_params_true = [0, 0.589208623896, 0.559823804948] params_true = [[1.08043787,", "= Gaussian() endog,exog,group = load_data(\"gee_nested_linear_1.csv\") group_n = [] for i in range(endog.shape[0]//10): group_n.extend([0,]*5)", "True, an intercept is prepended to the exog variables. \"\"\" cur_dir = os.path.dirname(os.path.abspath(__file__))", "this test fail? def t_est_missing(self): Y = np.random.normal(size=100) X1 = np.random.normal(size=100) X2 =", "ne, dep_data=group_n) mdf2 = md.fit(start_params=mdf1.params) # From statsmodels.GEE (not an independent test) cf", "None, family, v) mdf = md.fit() assert_almost_equal(mdf.params, cf[j], decimal=5) assert_almost_equal(mdf.standard_errors(), se[j], decimal=6) #", "X2 + X3\", data=D).fit(disp=False) assert_almost_equal(sml.params.values, md.params, decimal=10) if __name__==\"__main__\": import nose nose.runmodule(argv=[__file__,'-vvs','-x','--pdb', '--pdb-failure'],", "assert_almost_equal(mdf1.params, cf, decimal=6) assert_almost_equal(mdf1.standard_errors(), se, decimal=6) ne = Nested() md = GEE(endog, exog,", "dp = Independence() md = GEE(endog, exog, group, None, family, dp) mdf1 =", "vi = Independence() ve = Exchangeable() # From R gee cf = [[-0.01850226507491,0.81436304278962,", "Poisson() ind = Independence() md1 = GEE.from_formula(\"y ~ age + trt + base\",", "the results directory. The data set should be a CSV file with the", "= pd.DataFrame(D) D.columns = [\"Y\",\"Id\",] + [\"X%d\" % (k+1) for k in range(exog.shape[1]-1)]", "= [[0.0440733554189401,0.0479993639119261, 0.0496045952071308,0.0479467597161284], [0.0440369906460754,0.0480069787567662, 0.049519758758187,0.0479760443027526]] for j,v in enumerate((vi, ve)): md = GEE(endog,", "+ X5, id=Id, family=poisson, corstr=\"exchangeable\", scale.fix=TRUE) sme = summary(me) u = coefficients(sme) cfe", "def test_nominal(self): family = Multinomial(3) endog, exog, groups = load_data(\"gee_nominal_1.csv\", icept=False) # Test", "summary(me) u = coefficients(sme) cfe = paste(u[,1], collapse=\",\") see = paste(u[,4], collapse=\",\") ma", "are to R. The statmodels GEE implementation should generally agree with the R", "family, v) md.setup_nominal() mdf2 = md.fit(start_params=mdf1.params) # From statsmodels.GEE (not an independent test)", "4)) exog[:,0] = 1 exog[:,1] = 1*(exog[:,2] < 0) group = np.kron(np.arange(n/4), np.ones(4))", "decimal=10) se = mdf.standard_errors(covariance_type=\"naive\") assert_almost_equal(ols.bse, se, decimal=10) naive_tvalues = mdf.params / \\ np.sqrt(np.diag(mdf.naive_covariance))", "os from numpy.testing import assert_almost_equal from statsmodels.genmod.generalized_estimating_equations import (GEE, GEEMargins, Multinomial) from statsmodels.genmod.families", "0.1324629 , 0.09025019] assert_almost_equal(mdf2.params, cf2, decimal=5) assert_almost_equal(mdf2.standard_errors(), se2, decimal=5) def test_poisson(self): \"\"\" library(gee)", "= Z[,4] X3 = Z[,5] X4 = Z[,6] X5 = Z[,7] mi =", "= np.concatenate(groups) exog = np.concatenate(exog, axis=0) ar = Autoregressive() md = GEE(endog, exog,", "assert_almost_equal(ols.params.values, mdf.params, decimal=10) se = mdf.standard_errors(covariance_type=\"naive\") assert_almost_equal(ols.bse, se, decimal=10) naive_tvalues = mdf.params /", "R gee cf = [[-0.0364450410793481,-0.0543209391301178, 0.0156642711741052,0.57628591338724, -0.00465659951186211,-0.477093153099256], [-0.0315615554826533,-0.0562589480840004, 0.0178419412298561,0.571512795340481, -0.00363255566297332,-0.475971696727736]] se = [[0.0611309237214186,0.0390680524493108,", "np.dot(cmat_r, np.random.normal(size=gsize)) endog.append(expval + errors) groups.append(i*np.ones(gsize)) endog = np.concatenate(endog) groups = np.concatenate(groups) exog", "= os.path.dirname(os.path.abspath(__file__)) Z = np.genfromtxt(os.path.join(cur_dir, 'results', fname), delimiter=\",\") group = Z[:,0] endog =", "X3 = np.random.normal(size=100) groups = np.kron(lrange(20), np.ones(5)) Y[0] = np.nan Y[5:7] = np.nan", "Gaussian() Y = np.random.normal(size=100) X1 = np.random.normal(size=100) X2 = np.random.normal(size=100) X3 = np.random.normal(size=100)", "fam = Poisson() ind = Independence() md1 = GEE.from_formula(\"y ~ age + trt", "-1.56194040106201,0.793499517527478]] se = [[0.0440733554189401,0.0479993639119261, 0.0496045952071308,0.0479467597161284], [0.0440369906460754,0.0480069787567662, 0.049519758758187,0.0479760443027526]] for j,v in enumerate((vi, ve)): md", "0.0296141014225009,0.0306115470200955]] for j,v in enumerate((vi,ve)): md = GEE(endog, exog, group_n, None, family, v)", "= Z[,5] mi = gee(Y ~ X1 + X2 + X3, id=Id, family=gaussian,", "endog variable Columns 2-end: exog variables If `icept` is True, an intercept is", "Gaussian() endog,exog,group = load_data(\"gee_linear_1.csv\") ve = Exchangeable() md = GEE(endog, exog, group, None,", "Columns 2-end: exog variables If `icept` is True, an intercept is prepended to", "= gee(Y ~ X1 + X2 + X3, id=Id, family=binomial, corstr=\"exchangeable\") sme =", "exog = np.random.normal(size=(300,4)) exog[:,0] = 1 endog = np.dot(exog, np.r_[1, 1, 0, 0.2])", "enumerate((vi,ve)): md = GEE.from_formula(\"Y ~ X1 + X2 + X3 + X4 +", "to the exog variables. \"\"\" cur_dir = os.path.dirname(os.path.abspath(__file__)) Z = np.genfromtxt(os.path.join(cur_dir, 'results', fname),", "test) cf = np.r_[-0.1671073 , 1.00467426, -2.01723004, 0.97297106] se = np.r_[0.08629606, 0.04058653, 0.04067038,", "X1 + X2 + X3, id=Id, family=binomial, corstr=\"independence\") smi = summary(mi) u =", "X2, \"X3\": X3, \"groups\": groups}) md = GEE.from_formula(\"Y ~ X1 + X2 +", "v) mdf = md.fit() if id(v) != id(va): assert_almost_equal(mdf.params, cf[j], decimal=6) assert_almost_equal(mdf.standard_errors(), se[j],", "- np.arange(gsize)[None,:] ix = np.abs(ix) cmat = ar_param ** ix cmat_r = np.linalg.cholesky(cmat)", "Independence() md = GEE(endog, exog, group, None, family, dp) mdf1 = md.fit() #", "correlation should agree exactly with OLS for parameter estimates and standard errors derived", "Nested) import pandas as pd import statsmodels.formula.api as sm def load_data(fname, icept=True): \"\"\"", "-2.01723004, 0.97297106] se = np.r_[0.08629606, 0.04058653, 0.04067038, 0.03777989] assert_almost_equal(mdf1.params, cf, decimal=6) assert_almost_equal(mdf1.standard_errors(), se,", "`icept` is True, an intercept is prepended to the exog variables. \"\"\" cur_dir", "family=binomial, corstr=\"exchangeable\") sme = summary(me) u = coefficients(sme) cfe = paste(u[,1], collapse=\",\") see", "se[j], decimal=6) # Test with formulas D = np.concatenate((endog[:,None], group[:,None], exog[:,1:]), axis=1) D", "family = Gaussian() Y = np.random.normal(size=100) X1 = np.random.normal(size=100) X2 = np.random.normal(size=100) X3", "X3 + X4 + X5, id=Id, family=poisson, corstr=\"independence\", scale.fix=TRUE) smi = summary(mi) u", "cov_struct=vs) mdf = md.fit() ols = sm.ols(\"Y ~ X1 + X2 + X3\",", "= summary(me) u = coefficients(sme) cfe = paste(u[,1], collapse=\",\") see = paste(u[,4], collapse=\",\")", "from __future__ import print_function from statsmodels.compat import lrange import numpy as np import", "base\", data, family=families.Poisson()) mdf2 = md2.fit(scale=\"X2\") assert_almost_equal(mdf1.params, mdf2.params, decimal=6) assert_almost_equal(mdf1.scale, mdf2.scale, decimal=6) #", "= GEE(endog, exog, group, None, family, ve) mdf = md.fit() assert_almost_equal(np.dot(exog, mdf.params), mdf.fittedvalues)", "sprintf(\"se = [[%s],[%s],[%s]]\", sei, see, sea) \"\"\" endog,exog,group = load_data(\"gee_logistic_1.csv\") # Time values", "X2 + X3, id=Id, family=gaussian, corstr=\"independence\", tol=1e-8, maxit=100) smi = summary(mi) u =", "assert_almost_equal(mdf.standard_errors(), se[j], decimal=10) # Test with formulas D = np.concatenate((endog[:,None], group[:,None], exog[:,1:]), axis=1)", "group, None, family, ve) mdf = md.fit() assert_almost_equal(np.dot(exog, mdf.params), mdf.fittedvalues) assert_almost_equal(endog - np.dot(exog,", "Y = Z[,2] Id = Z[,1] X1 = Z[,3] X2 = Z[,4] X3", "assert_almost_equal(mdf.params, cf[j], decimal=10) assert_almost_equal(mdf.standard_errors(), se[j], decimal=10) def test_linear_constrained(self): family = Gaussian() exog =", "vs = Independence() family = Binomial() Y = 1*(np.random.normal(size=100) < 0) X1 =", "+ X3, id=Id, family=gaussian, corstr=\"exchangeable\", tol=1e-8, maxit=100) sme = summary(me) u = coefficients(sme)", "= paste(u[,4], collapse=\",\") me = gee(Y ~ X1 + X2 + X3, id=Id,", "0.04058653, 0.04067038, 0.03777989] assert_almost_equal(mdf1.params, cf, decimal=6) assert_almost_equal(mdf1.standard_errors(), se, decimal=6) ne = Nested() md", "def test_linear(self): \"\"\" library(gee) Z = read.csv(\"results/gee_linear_1.csv\", header=FALSE) Y = Z[,2] Id =", "groups.append(i*np.ones(gsize)) endog = np.concatenate(endog) groups = np.concatenate(groups) exog = np.concatenate(exog, axis=0) ar =", "= Autoregressive() md = GEE(endog, exog, groups, family=ga, cov_struct = ar) mdf =", "+ X2 + X3, id=Id, family=binomial, corstr=\"AR-M\") sma = summary(ma) u = coefficients(sma)", "T[jj] = lrange(len(jj)) family = Binomial() ve = Exchangeable() vi = Independence() va", "< prob) fa = Binomial() ex = Exchangeable() md = GEE(endog, exog, group,", "X4 + X5, id=Id, family=poisson, corstr=\"exchangeable\", scale.fix=TRUE) sme = summary(me) u = coefficients(sme)", "= np.r_[0, 1, -1, 0.5] lpr = np.dot(exog, beta) prob = 1 /", "\"\"\" cur_dir = os.path.dirname(os.path.abspath(__file__)) Z = np.genfromtxt(os.path.join(cur_dir, 'results', fname), delimiter=\",\") group = Z[:,0]", "X5 = Z[,7] mi = gee(Y ~ X1 + X2 + X3 +", "cf[j], decimal=10) assert_almost_equal(mdf.standard_errors(), se[j], decimal=10) def test_linear_constrained(self): family = Gaussian() exog = np.random.normal(size=(300,4))", "Z[,1] X1 = Z[,3] X2 = Z[,4] X3 = Z[,5] mi = gee(Y", "1.12709319, 0.90133927], [0.9613677, 1.05826987, 0.90832055], [1.05370439, 0.96084864, 0.93923374]] np.random.seed(342837482) num_group = 100 ar_param", "np.r_[0.08632616, 0.02913582, 0.03114428, 0.02893991] assert_almost_equal(mdf2.params, cf, decimal=6) assert_almost_equal(mdf2.standard_errors(), se, decimal=6) def test_ordinal(self): family", "Z[,5] X4 = Z[,6] X5 = Z[,7] mi = gee(Y ~ X1 +", "= np.r_[-0.16655319, 1.02183688, -2.00858719, 1.00101969] se = np.r_[0.08632616, 0.02913582, 0.03114428, 0.02893991] assert_almost_equal(mdf2.params, cf,", "mdf = md.fit() assert_almost_equal(mdf.params, cf[j], decimal=6) assert_almost_equal(mdf.standard_errors(), se[j], decimal=6) # Check for run-time", "in range(endog.shape[0]//10): group_n.extend([0,]*5) group_n.extend([1,]*5) group_n = np.array(group_n)[:,None] dp = Independence() md = GEE(endog,", "file with the following format: Column 0: Group indicator Column 1: endog variable", "sma = summary(ma) u = coefficients(sma) cfa = paste(u[,1], collapse=\",\") sea = paste(u[,4],", "D = np.concatenate((endog[:,None], group_n[:,None], exog[:,1:]), axis=1) D = pd.DataFrame(D) D.columns = [\"Y\",\"Id\",] +", "group_n, None, family, v) mdf = md.fit() assert_almost_equal(mdf.params, cf[j], decimal=5) assert_almost_equal(mdf.standard_errors(), se[j], decimal=6)", "+ X3\", data=D).fit() assert_almost_equal(ols.params.values, mdf.params, decimal=10) se = mdf.standard_errors(covariance_type=\"naive\") assert_almost_equal(ols.bse, se, decimal=10) naive_tvalues", "import (Exchangeable, Independence, GlobalOddsRatio, Autoregressive, Nested) import pandas as pd import statsmodels.formula.api as", "~ age + trt + base\", data, family=families.Poisson()) mdf2 = md2.fit(scale=\"X2\") assert_almost_equal(mdf1.params, mdf2.params,", "X5, id=Id, family=poisson, corstr=\"exchangeable\", scale.fix=TRUE) sme = summary(me) u = coefficients(sme) cfe =", "= Independence() ve = Exchangeable() # From R gee cf = [[-0.0364450410793481,-0.0543209391301178, 0.0156642711741052,0.57628591338724,", "From statsmodels.GEE (not an independent test) cf2 = np.r_[0.45397549, 0.42278345, -0.91997131, -0.50115943] se2", "np.ones(5)) D = pd.DataFrame({\"Y\": Y, \"X1\": X1, \"X2\": X2, \"X3\": X3}) md =", "decimal=6) def test_logistic(self): \"\"\" R code for comparing results: library(gee) Z = read.csv(\"results/gee_logistic_1.csv\",", "exceptions in summary # print(mdf.summary()) def test_autoregressive(self): dep_params_true = [0, 0.589208623896, 0.559823804948] params_true", "np.r_[0.09646057, 0.07405713, 0.1324629 , 0.09025019] assert_almost_equal(mdf2.params, cf2, decimal=5) assert_almost_equal(mdf2.standard_errors(), se2, decimal=5) def test_poisson(self):", "exog variables. \"\"\" cur_dir = os.path.dirname(os.path.abspath(__file__)) Z = np.genfromtxt(os.path.join(cur_dir, 'results', fname), delimiter=\",\") group", "collapse=\",\") sea = paste(u[,4], collapse=\",\") sprintf(\"cf = [[%s],[%s],[%s]]\", cfi, cfe, cfa) sprintf(\"se =", "family=family, cov_struct=vs).fit() sml = sm.poisson(\"Y ~ X1 + X2 + X3\", data=D).fit(disp=False) assert_almost_equal(sml.params.values,", "coefficients(sme) cfe = paste(u[,1], collapse=\",\") see = paste(u[,4], collapse=\",\") ma = gee(Y ~", "~ X1 + X2 + X3, id=Id, family=gaussian, corstr=\"independence\", tol=1e-8, maxit=100) smi =", "= GEE(endog, exog, group, None, family, dp) mdf1 = md.fit() # From statsmodels.GEE", "+ X2 + X3\", D, None, groups=D[\"groups\"], missing='drop') mdf = md.fit() assert(len(md.endog) ==", "md2.fit(scale=\"X2\") assert_almost_equal(mdf1.params, mdf2.params, decimal=6) assert_almost_equal(mdf1.scale, mdf2.scale, decimal=6) # TODO: why does this test", "\"X3\": X3, \"groups\": groups}) md = GEE.from_formula(\"Y ~ X1 + X2 + X3\",", "sei, see) \"\"\" family = Gaussian() endog,exog,group = load_data(\"gee_linear_1.csv\") vi = Independence() ve", "with the following format: Column 0: Group indicator Column 1: endog variable Columns", "ar) mdf = md.fit() assert_almost_equal(ar.dep_params, dep_params_true[gsize-1]) assert_almost_equal(mdf.params, params_true[gsize-1]) def test_post_estimation(self): family = Gaussian()", "test) cf2 = np.r_[0.45397549, 0.42278345, -0.91997131, -0.50115943] se2 = np.r_[0.09646057, 0.07405713, 0.1324629 ,", "0.96084864, 0.93923374]] np.random.seed(342837482) num_group = 100 ar_param = 0.5 k = 3 ga", "Binomial() ex = Exchangeable() md = GEE(endog, exog, group, time, fa, ex) mdf", "assert_almost_equal(mdf1.params, mdf2.params, decimal=6) assert_almost_equal(mdf1.scale, mdf2.scale, decimal=6) # TODO: why does this test fail?", "formulas D = np.concatenate((endog[:,None], group_n[:,None], exog[:,1:]), axis=1) D = pd.DataFrame(D) D.columns = [\"Y\",\"Id\",]", "+ X2 + X3\", data=D).fit() assert_almost_equal(ols.params.values, mdf.params, decimal=10) se = mdf.standard_errors(covariance_type=\"naive\") assert_almost_equal(ols.bse, se,", "X2 = np.random.normal(size=100) X3 = np.random.normal(size=100) groups = np.random.randint(0, 4, size=100) D =", "\"\"\" Test functions for GEE External comparisons are to R. The statmodels GEE", "test_compare_logit(self): vs = Independence() family = Binomial() Y = 1*(np.random.normal(size=100) < 0) X1", "X4 + X5\", D, None, groups=D.loc[:,\"Id\"], family=family, cov_struct=v) mdf = md.fit() assert_almost_equal(mdf.params, cf[j],", "mdf2 = md2.fit(scale=\"X2\") assert_almost_equal(mdf1.params, mdf2.params, decimal=6) assert_almost_equal(mdf1.scale, mdf2.scale, decimal=6) # TODO: why does", "md = GEE(endog, exog, group, None, family, v, constraint=(L,R)) mdf = md.fit() assert_almost_equal(mdf.params[3],", "X3 = np.random.normal(size=100) groups = np.random.randint(0, 4, size=100) D = pd.DataFrame({\"Y\": Y, \"X1\":", "from statsmodels.genmod.families import Gaussian, Binomial, Poisson from statsmodels.genmod.dependence_structures import (Exchangeable, Independence, GlobalOddsRatio, Autoregressive,", "family=gaussian, corstr=\"exchangeable\", tol=1e-8, maxit=100) sme = summary(me) u = coefficients(sme) cfe = paste(u[,1],", "ind = Independence() md1 = GEE.from_formula(\"y ~ age + trt + base\", data,", "= md.fit() assert(len(md.endog) == 95) assert(md.exog.shape) == (95,4) def test_default_time(self): \"\"\" Check that", "GEE implementation for the independence and exchangeable correlation structures. For other correlation structures,", "D.columns = [\"Y\",\"Id\",] + [\"X%d\" % (k+1) for k in range(exog.shape[1]-1)] for j,v", "the release announcement for version 0.6. def test_poisson_epil(self): cur_dir = os.path.dirname(os.path.abspath(__file__)) fname =", "np.random.normal(size=(n, 4)) exog[:,0] = 1 exog[:,1] = 1*(exog[:,2] < 0) group = np.kron(np.arange(n/4),", "1 / (1 + np.exp(-lpr)) endog = 1*(np.random.uniform(size=n) < prob) fa = Binomial()", "Binomial() va = Autoregressive() md1 = GEE(endog, exog, group, family=family, cov_struct=va) mdf1 =", "corstr=\"AR-M\") sma = summary(ma) u = coefficients(sma) cfa = paste(u[,1], collapse=\",\") sea =", "np.r_[0,] for j,v in enumerate((vi,ve)): md = GEE(endog, exog, group, None, family, v,", "be a CSV file with the following format: Column 0: Group indicator Column", "test_nested_linear(self): family = Gaussian() endog,exog,group = load_data(\"gee_nested_linear_1.csv\") group_n = [] for i in", "X2 = np.random.normal(size=100) X3 = np.random.normal(size=100) groups = np.kron(lrange(20), np.ones(5)) D = pd.DataFrame({\"Y\":", "np.r_[-0.1671073 , 1.00467426, -2.01723004, 0.97297106] se = np.r_[0.08629606, 0.04058653, 0.04067038, 0.03777989] assert_almost_equal(mdf1.params, cf,", "groups, None, family, v) md.setup_nominal() mdf2 = md.fit(start_params=mdf1.params) # From statsmodels.GEE (not an", "0: Group indicator Column 1: endog variable Columns 2-end: exog variables If `icept`", "Independence() md = GEE(endog, exog, groups, None, family, v) md.setup_nominal() mdf1 = md.fit()", "u = coefficients(smi) cfi = paste(u[,1], collapse=\",\") sei = paste(u[,4], collapse=\",\") me =", "prob) fa = Binomial() ex = Exchangeable() md = GEE(endog, exog, group, time,", "= Exchangeable() L = np.r_[[[0, 0, 0, 1]]] R = np.r_[0,] for j,v", "coefficients(sme) cfe = paste(u[,1], collapse=\",\") see = paste(u[,4], collapse=\",\") sprintf(\"cf = [[%s],[%s]]\", cfi,", "header=FALSE) Y = Z[,2] Id = Z[,1] X1 = Z[,3] X2 = Z[,4]", "statsmodels.GEE (not an independent test) cf = np.r_[-0.1671073 , 1.00467426, -2.01723004, 0.97297106] se", "- np.dot(exog, mdf.params), mdf.resid) def test_linear(self): \"\"\" library(gee) Z = read.csv(\"results/gee_linear_1.csv\", header=FALSE) Y", "ols = sm.ols(\"Y ~ X1 + X2 + X3\", data=D).fit() assert_almost_equal(ols.params.values, mdf.params, decimal=10)", "= [] exog = [] groups = [] for i in range(num_group): x", "md = GEE(endog, exog, groups, None, family, v) md.setup_nominal() mdf2 = md.fit(start_params=mdf1.params) #", "from statsmodels.genmod.dependence_structures import (Exchangeable, Independence, GlobalOddsRatio, Autoregressive, Nested) import pandas as pd import", "\"groups\": groups}) md = GEE.from_formula(\"Y ~ X1 + X2 + X3\", D, None,", "= Z[,3] X2 = Z[,4] X3 = Z[,5] mi = gee(Y ~ X1", "import (GEE, GEEMargins, Multinomial) from statsmodels.genmod.families import Gaussian, Binomial, Poisson from statsmodels.genmod.dependence_structures import", "= GlobalOddsRatio(\"nominal\") md = GEE(endog, exog, groups, None, family, v) md.setup_nominal() mdf2 =", "Z[,5] mi = gee(Y ~ X1 + X2 + X3, id=Id, family=binomial, corstr=\"independence\")", "Check for run-time exceptions in summary # print(mdf.summary()) def test_autoregressive(self): dep_params_true = [0,", "0.0178419412298561,0.571512795340481, -0.00363255566297332,-0.475971696727736]] se = [[0.0611309237214186,0.0390680524493108, 0.0334234174505518,0.0366860768962715, 0.0304758505008105,0.0316348058881079], [0.0610840153582275,0.0376887268649102, 0.0325168379415177,0.0369786751362213, 0.0296141014225009,0.0306115470200955]] for j,v in", "mdf = md.fit() assert_almost_equal(mdf.params[3], 0, decimal=10) def test_nested_linear(self): family = Gaussian() endog,exog,group =", "This is in the release announcement for version 0.6. def test_poisson_epil(self): cur_dir =", "X1 + X2 + X3, id=Id, family=binomial, corstr=\"AR-M\") sma = summary(ma) u =", "ve = Exchangeable() vi = Independence() va = Autoregressive() # From R gee", "X1 + X2 + X3\", D, None, groups=groups, family=family, cov_struct=vs) mdf = md.fit()", "cur_dir = os.path.dirname(os.path.abspath(__file__)) fname = os.path.join(cur_dir, \"results\", \"epil.csv\") data = pd.read_csv(fname) fam =", "0.192430061340865,0.173141068839597], [0.127045031730155,0.165470678232842, 0.192052750030501,0.173174779369249], [0.127240302296444,0.170554083928117, 0.191045527104503,0.169776150974586]] for j,v in enumerate((vi,ve,va)): md = GEE(endog, exog,", "endog = 1*(np.random.uniform(size=n) < prob) fa = Binomial() ex = Exchangeable() md =", "None, groups=groups, family=family, cov_struct=vs).fit() sml = sm.logit(\"Y ~ X1 + X2 + X3\",", "-0.50115943] se2 = np.r_[0.09646057, 0.07405713, 0.1324629 , 0.09025019] assert_almost_equal(mdf2.params, cf2, decimal=5) assert_almost_equal(mdf2.standard_errors(), se2,", "endog, exog, groups = load_data(\"gee_ordinal_1.csv\", icept=False) v = GlobalOddsRatio(\"ordinal\") md = GEE(endog, exog,", "md.fit(start_params=mdf1.params) # From statsmodels.GEE (not an independent test) cf = np.r_[-0.16655319, 1.02183688, -2.00858719,", "= [] for i in range(num_group): x = np.random.normal(size=(gsize,k)) exog.append(x) expval = x.sum(1)", "sei, see, sea) \"\"\" endog,exog,group = load_data(\"gee_logistic_1.csv\") # Time values for the autoregressive", "statsmodels.GEE (not an independent test) cf = np.r_[-0.16655319, 1.02183688, -2.00858719, 1.00101969] se =", "Exchangeable() # From R gee cf = [[-0.01850226507491,0.81436304278962, -1.56167635393184,0.794239361055003], [-0.0182920577154767,0.814898414022467, -1.56194040106201,0.793499517527478]] se =", "independent test) cf = np.r_[-0.16655319, 1.02183688, -2.00858719, 1.00101969] se = np.r_[0.08632616, 0.02913582, 0.03114428,", "** ix cmat_r = np.linalg.cholesky(cmat) endog = [] exog = [] groups =", "paste(u[,4], collapse=\",\") sprintf(\"cf = [[%s],[%s],[%s]]\", cfi, cfe, cfa) sprintf(\"se = [[%s],[%s],[%s]]\", sei, see,", "family=family, cov_struct=v) mdf = md.fit() assert_almost_equal(mdf.params, cf[j], decimal=10) assert_almost_equal(mdf.standard_errors(), se[j], decimal=10) def test_linear_constrained(self):", "families md2 = GLM.from_formula(\"y ~ age + trt + base\", data, family=families.Poisson()) mdf2", "\"\"\" Load a data set from the results directory. The data set should", "with independence correlation should agree exactly with OLS for parameter estimates and standard", "load_data(\"gee_linear_1.csv\") vi = Independence() ve = Exchangeable() # From R gee cf =", "1: endog variable Columns 2-end: exog variables If `icept` is True, an intercept", "[[-0.01850226507491,0.81436304278962, -1.56167635393184,0.794239361055003], [-0.0182920577154767,0.814898414022467, -1.56194040106201,0.793499517527478]] se = [[0.0440733554189401,0.0479993639119261, 0.0496045952071308,0.0479467597161284], [0.0440369906460754,0.0480069787567662, 0.049519758758187,0.0479760443027526]] for j,v in", "Poisson() Y = np.ceil(-np.log(np.random.uniform(size=100))) X1 = np.random.normal(size=100) X2 = np.random.normal(size=100) X3 = np.random.normal(size=100)", "data=D).fit(disp=False) assert_almost_equal(sml.params.values, md.params, decimal=10) def test_compare_poisson(self): vs = Independence() family = Poisson() Y", "~ X1 + X2 + X3\", D, None, groups=groups, family=family, cov_struct=vs) mdf =", "100 ar_param = 0.5 k = 3 ga = Gaussian() for gsize in", "0.03777989] assert_almost_equal(mdf1.params, cf, decimal=6) assert_almost_equal(mdf1.standard_errors(), se, decimal=6) ne = Nested() md = GEE(endog,", "for gsize in 1,2,3: ix = np.arange(gsize)[:,None] - np.arange(gsize)[None,:] ix = np.abs(ix) cmat", "independent test) cf = np.r_[-0.1671073 , 1.00467426, -2.01723004, 0.97297106] se = np.r_[0.08629606, 0.04058653,", "GEE(endog, exog, groups, None, family, v) md.setup_nominal() mdf2 = md.fit(start_params=mdf1.params) # From statsmodels.GEE", "import pandas as pd import statsmodels.formula.api as sm def load_data(fname, icept=True): \"\"\" Load", "j,v in enumerate((vi,ve)): md = GEE(endog, exog, group, None, family, v, constraint=(L,R)) mdf", "family=binomial, corstr=\"independence\") smi = summary(mi) u = coefficients(smi) cfi = paste(u[,1], collapse=\",\") sei", "None, groups=groups, family=family, cov_struct=vs).fit() sml = sm.poisson(\"Y ~ X1 + X2 + X3\",", "mdf2.params, decimal=6) assert_almost_equal(mdf1.scale, mdf2.scale, decimal=6) # TODO: why does this test fail? def", "assert(len(md.endog) == 95) assert(md.exog.shape) == (95,4) def test_default_time(self): \"\"\" Check that the time", "tol=1e-8, maxit=100) smi = summary(mi) u = coefficients(smi) cfi = paste(u[,1], collapse=\",\") sei", "missing='drop') mdf = md.fit() assert(len(md.endog) == 95) assert(md.exog.shape) == (95,4) def test_default_time(self): \"\"\"", "data set should be a CSV file with the following format: Column 0:", "= Binomial() ex = Exchangeable() md = GEE(endog, exog, group, time, fa, ex)", "family = Gaussian() endog,exog,group = load_data(\"gee_linear_1.csv\") ve = Exchangeable() md = GEE(endog, exog,", "+ X2 + X3\", D, None, groups=groups, family=family, cov_struct=vs).fit() sml = sm.poisson(\"Y ~", "Independence() ve = Exchangeable() L = np.r_[[[0, 0, 0, 1]]] R = np.r_[0,]", "= np.nan Y[5:7] = np.nan X2[10:12] = np.nan D = pd.DataFrame({\"Y\": Y, \"X1\":", "GEE(endog, exog, groups, None, family, v) md.setup_ordinal() mdf = md.fit() cf = np.r_[1.09238131,", "Gaussian() exog = np.random.normal(size=(300,4)) exog[:,0] = 1 endog = np.dot(exog, np.r_[1, 1, 0,", "se, decimal=6) def test_ordinal(self): family = Binomial() endog, exog, groups = load_data(\"gee_ordinal_1.csv\", icept=False)", "in enumerate((vi,ve)): md = GEE(endog, exog, group, None, family, v, constraint=(L,R)) mdf =", "X1 + X2 + X3 + X4 + X5\", D, None, groups=D.loc[:,\"Id\"], family=family,", "0, 1]]] R = np.r_[0,] for j,v in enumerate((vi,ve)): md = GEE(endog, exog,", "exog, groups, None, family, v) md.setup_ordinal() mdf = md.fit() cf = np.r_[1.09238131, 0.02148193,", "v) mdf = md.fit() assert_almost_equal(mdf.params, cf[j], decimal=10) assert_almost_equal(mdf.standard_errors(), se[j], decimal=10) # Test with", "exog = Z[:,2:] if icept: exog = np.concatenate((np.ones((exog.shape[0],1)), exog), axis=1) return endog,exog,group class", "beta = np.r_[0, 1, -1, 0.5] lpr = np.dot(exog, beta) prob = 1", "the time defaults work correctly. \"\"\" endog,exog,group = load_data(\"gee_logistic_1.csv\") # Time values for", "paste(u[,4], collapse=\",\") me = gee(Y ~ X1 + X2 + X3, id=Id, family=gaussian,", "1.00467426, -2.01723004, 0.97297106] se = np.r_[0.08629606, 0.04058653, 0.04067038, 0.03777989] assert_almost_equal(mdf1.params, cf, decimal=6) assert_almost_equal(mdf1.standard_errors(),", "+ X2 + X3\", D, None, groups=groups, family=family, cov_struct=vs) mdf = md.fit() ols", "comparing results: library(gee) Z = read.csv(\"results/gee_logistic_1.csv\", header=FALSE) Y = Z[,2] Id = Z[,1]", "family, v, constraint=(L,R)) mdf = md.fit() assert_almost_equal(mdf.params[3], 0, decimal=10) def test_nested_linear(self): family =", "= np.r_[0.09646057, 0.07405713, 0.1324629 , 0.09025019] assert_almost_equal(mdf2.params, cf2, decimal=5) assert_almost_equal(mdf2.standard_errors(), se2, decimal=5) def", "md.fit() assert_almost_equal(mdf.params, cf[j], decimal=10) assert_almost_equal(mdf.standard_errors(), se[j], decimal=10) # Test with formulas D =", "Check that the time defaults work correctly. \"\"\" endog,exog,group = load_data(\"gee_logistic_1.csv\") # Time", "intercept is prepended to the exog variables. \"\"\" cur_dir = os.path.dirname(os.path.abspath(__file__)) Z =", "agree with GLM from statsmodels.genmod.generalized_linear_model import GLM from statsmodels.genmod import families md2 =", "assert(md.exog.shape) == (95,4) def test_default_time(self): \"\"\" Check that the time defaults work correctly.", "if id(v) != id(va): assert_almost_equal(mdf.params, cf[j], decimal=6) assert_almost_equal(mdf.standard_errors(), se[j], decimal=6) # Test with", "GEEMargins, Multinomial) from statsmodels.genmod.families import Gaussian, Binomial, Poisson from statsmodels.genmod.dependence_structures import (Exchangeable, Independence,", "endog,exog,group = load_data(\"gee_linear_1.csv\") ve = Exchangeable() md = GEE(endog, exog, group, None, family,", "~ X1 + X2 + X3, id=Id, family=binomial, corstr=\"independence\") smi = summary(mi) u", "formulas D = np.concatenate((endog[:,None], group[:,None], exog[:,1:]), axis=1) D = pd.DataFrame(D) D.columns = [\"Y\",\"Id\",]", "load_data(\"gee_nominal_1.csv\", icept=False) # Test with independence correlation v = Independence() md = GEE(endog,", "~ X1 + X2 + X3\", data=D).fit(disp=False) assert_almost_equal(sml.params.values, md.params, decimal=10) def test_compare_poisson(self): vs", "collapse=\",\") me = gee(Y ~ X1 + X2 + X3, id=Id, family=gaussian, corstr=\"exchangeable\",", "md.fit() assert_almost_equal(mdf.params, cf[j], decimal=10) assert_almost_equal(mdf.standard_errors(), se[j], decimal=10) def test_linear_constrained(self): family = Gaussian() exog", "= np.random.normal(size=100) X3 = np.random.normal(size=100) groups = np.kron(lrange(20), np.ones(5)) Y[0] = np.nan Y[5:7]", "= Independence() va = Autoregressive() # From R gee cf = [[0.0167272965285882,1.13038654425893, -1.86896345082962,1.09397608331333],", "0.90832055], [1.05370439, 0.96084864, 0.93923374]] np.random.seed(342837482) num_group = 100 ar_param = 0.5 k =", "1*(np.random.uniform(size=n) < prob) fa = Binomial() ex = Exchangeable() md = GEE(endog, exog,", "md.fit() assert_almost_equal(mdf.params, cf[j], decimal=5) assert_almost_equal(mdf.standard_errors(), se[j], decimal=6) # Test with formulas D =", "agree exactly. \"\"\" from __future__ import print_function from statsmodels.compat import lrange import numpy", "= md2.fit(scale=\"X2\") assert_almost_equal(mdf1.params, mdf2.params, decimal=6) assert_almost_equal(mdf1.scale, mdf2.scale, decimal=6) # TODO: why does this", "Test with global odds ratio dependence v = GlobalOddsRatio(\"nominal\") md = GEE(endog, exog,", "assert_almost_equal(endog - np.dot(exog, mdf.params), mdf.resid) def test_linear(self): \"\"\" library(gee) Z = read.csv(\"results/gee_linear_1.csv\", header=FALSE)", "= md.fit() assert_almost_equal(mdf.params, cf[j], decimal=10) assert_almost_equal(mdf.standard_errors(), se[j], decimal=10) # Test with formulas D", "X2 + X3 + X4 + X5, id=Id, family=poisson, corstr=\"independence\", scale.fix=TRUE) smi =", "= Gaussian() for gsize in 1,2,3: ix = np.arange(gsize)[:,None] - np.arange(gsize)[None,:] ix =", ", 0.09025019] assert_almost_equal(mdf2.params, cf2, decimal=5) assert_almost_equal(mdf2.standard_errors(), se2, decimal=5) def test_poisson(self): \"\"\" library(gee) Z", "structures. For other correlation structures, the details of the correlation estimation differ among", "as np import os from numpy.testing import assert_almost_equal from statsmodels.genmod.generalized_estimating_equations import (GEE, GEEMargins,", "ar_param ** ix cmat_r = np.linalg.cholesky(cmat) endog = [] exog = [] groups", "X1 + X2 + X3\", D, None, groups=groups, family=family, cov_struct=vs).fit() sml = sm.poisson(\"Y", "assert_almost_equal(mdf.params, cf, decimal=5) assert_almost_equal(mdf.bse, se, decimal=5) def test_nominal(self): family = Multinomial(3) endog, exog,", "sm.poisson(\"Y ~ X1 + X2 + X3\", data=D).fit(disp=False) assert_almost_equal(sml.params.values, md.params, decimal=10) if __name__==\"__main__\":", "md.setup_nominal() mdf1 = md.fit() # From statsmodels.GEE (not an independent test) cf1 =", "test) cf1 = np.r_[0.44944752, 0.45569985, -0.92007064, -0.46766728] se1 = np.r_[0.09801821, 0.07718842, 0.13229421, 0.08544553]", "decimal=6) # Test with formulas D = np.concatenate((endog[:,None], group_n[:,None], exog[:,1:]), axis=1) D =", "estimation differ among implementations and the results will not agree exactly. \"\"\" from", "naive covariance estimate. \"\"\" vs = Independence() family = Gaussian() Y = np.random.normal(size=100)", "exog, group_n, None, family, v) mdf = md.fit() assert_almost_equal(mdf.params, cf[j], decimal=5) assert_almost_equal(mdf.standard_errors(), se[j],", "decimal=6) assert_almost_equal(mdf.standard_errors(), se[j], decimal=6) # Test with formulas D = np.concatenate((endog[:,None], group[:,None], exog[:,1:]),", "md1.fit() # Coefficients should agree with GLM from statsmodels.genmod.generalized_linear_model import GLM from statsmodels.genmod", "Y, \"X1\": X1, \"X2\": X2, \"X3\": X3, \"groups\": groups}) md = GEE.from_formula(\"Y ~", "= Autoregressive() md1 = GEE(endog, exog, group, family=family, cov_struct=va) mdf1 = md1.fit() md2", "= md.fit() # From statsmodels.GEE (not an independent test) cf1 = np.r_[0.44944752, 0.45569985,", "(GEE, GEEMargins, Multinomial) from statsmodels.genmod.families import Gaussian, Binomial, Poisson from statsmodels.genmod.dependence_structures import (Exchangeable,", "= Independence() md = GEE(endog, exog, group, None, family, dp) mdf1 = md.fit()", "= [[-0.0364450410793481,-0.0543209391301178, 0.0156642711741052,0.57628591338724, -0.00465659951186211,-0.477093153099256], [-0.0315615554826533,-0.0562589480840004, 0.0178419412298561,0.571512795340481, -0.00363255566297332,-0.475971696727736]] se = [[0.0611309237214186,0.0390680524493108, 0.0334234174505518,0.0366860768962715, 0.0304758505008105,0.0316348058881079], [0.0610840153582275,0.0376887268649102,", "np.r_[0, 1, -1, 0.5] lpr = np.dot(exog, beta) prob = 1 / (1", "icept: exog = np.concatenate((np.ones((exog.shape[0],1)), exog), axis=1) return endog,exog,group class TestGEE(object): def test_margins(self): n", "groups, None, family, v) md.setup_nominal() mdf1 = md.fit() # From statsmodels.GEE (not an", "+ X2 + X3, id=Id, family=gaussian, corstr=\"independence\", tol=1e-8, maxit=100) smi = summary(mi) u", "md1 = GEE.from_formula(\"y ~ age + trt + base\", data, groups=data[\"subject\"], cov_struct=ind, family=fam)", "assert_almost_equal(mdf.standard_errors(), se[j], decimal=10) def test_linear_constrained(self): family = Gaussian() exog = np.random.normal(size=(300,4)) exog[:,0] =", "assert_almost_equal(mdf2.standard_errors(), se, decimal=6) def test_ordinal(self): family = Binomial() endog, exog, groups = load_data(\"gee_ordinal_1.csv\",", "endog = np.dot(exog, np.r_[1, 1, 0, 0.2]) +\\ np.random.normal(size=300) group = np.kron(np.arange(100), np.r_[1,1,1])", "print(mdf.params) def test_compare_OLS(self): \"\"\" Gaussian GEE with independence correlation should agree exactly with", "exog = np.random.normal(size=(n, 4)) exog[:,0] = 1 exog[:,1] = 1*(exog[:,2] < 0) group", "see) \"\"\" family = Poisson() endog,exog,group_n = load_data(\"gee_poisson_1.csv\") vi = Independence() ve =", "= Poisson() Y = np.ceil(-np.log(np.random.uniform(size=100))) X1 = np.random.normal(size=100) X2 = np.random.normal(size=100) X3 =", "Z[,3] X2 = Z[,4] X3 = Z[,5] mi = gee(Y ~ X1 +", "= Z[,1] X1 = Z[,3] X2 = Z[,4] X3 = Z[,5] mi =", "np.flatnonzero(group == ii) T[jj] = lrange(len(jj)) family = Binomial() va = Autoregressive() md1", "decimal=10) assert_almost_equal(mdf.standard_errors(), se[j], decimal=10) def test_linear_constrained(self): family = Gaussian() exog = np.random.normal(size=(300,4)) exog[:,0]", "def test_margins(self): n = 300 exog = np.random.normal(size=(n, 4)) exog[:,0] = 1 exog[:,1]", "lrange import numpy as np import os from numpy.testing import assert_almost_equal from statsmodels.genmod.generalized_estimating_equations", "X1 + X2 + X3 + X4 + X5, id=Id, family=poisson, corstr=\"independence\", scale.fix=TRUE)", "md = GEE(endog, exog, group, time, fa, ex) mdf = md.fit() marg =", "= [[0.127291720283049,0.166725808326067, 0.192430061340865,0.173141068839597], [0.127045031730155,0.165470678232842, 0.192052750030501,0.173174779369249], [0.127240302296444,0.170554083928117, 0.191045527104503,0.169776150974586]] for j,v in enumerate((vi,ve,va)): md =", "= Multinomial(3) endog, exog, groups = load_data(\"gee_nominal_1.csv\", icept=False) # Test with independence correlation", "family = Gaussian() endog,exog,group = load_data(\"gee_linear_1.csv\") vi = Independence() ve = Exchangeable() #", "0.13229421, 0.08544553] assert_almost_equal(mdf1.params, cf1, decimal=5) assert_almost_equal(mdf1.standard_errors(), se1, decimal=5) # Test with global odds", "as sm def load_data(fname, icept=True): \"\"\" Load a data set from the results", "agree exactly with OLS for parameter estimates and standard errors derived from the", "= Gaussian() Y = np.random.normal(size=100) X1 = np.random.normal(size=100) X2 = np.random.normal(size=100) X3 =", "groups=D[\"groups\"], missing='drop') mdf = md.fit() assert(len(md.endog) == 95) assert(md.exog.shape) == (95,4) def test_default_time(self):", "R gee cf = [[0.0167272965285882,1.13038654425893, -1.86896345082962,1.09397608331333], [0.0178982283915449,1.13118798191788, -1.86133518416017,1.08944256230299], [0.0109621937947958,1.13226505028438, -1.88278757333046,1.09954623769449]] se = [[0.127291720283049,0.166725808326067,", "for j,v in enumerate((vi,ve)): md = GEE.from_formula(\"Y ~ X1 + X2 + X3", "code for comparing results: library(gee) Z = read.csv(\"results/gee_logistic_1.csv\", header=FALSE) Y = Z[,2] Id", "# From statsmodels.GEE (not an independent test) cf = np.r_[-0.16655319, 1.02183688, -2.00858719, 1.00101969]", "def test_ordinal(self): family = Binomial() endog, exog, groups = load_data(\"gee_ordinal_1.csv\", icept=False) v =", "= pd.read_csv(fname) fam = Poisson() ind = Independence() md1 = GEE.from_formula(\"y ~ age", "GlobalOddsRatio(\"ordinal\") md = GEE(endog, exog, groups, None, family, v) md.setup_ordinal() mdf = md.fit()", "enumerate((vi,ve)): md = GEE.from_formula(\"Y ~ X1 + X2 + X3\", D, None, groups=D.loc[:,\"Id\"],", "# From R gee cf = [[-0.01850226507491,0.81436304278962, -1.56167635393184,0.794239361055003], [-0.0182920577154767,0.814898414022467, -1.56194040106201,0.793499517527478]] se = [[0.0440733554189401,0.0479993639119261,", "cov_struct=v) mdf = md.fit() assert_almost_equal(mdf.params, cf[j], decimal=5) assert_almost_equal(mdf.standard_errors(), se[j], decimal=6) # print(mdf.params) def", "R. The statmodels GEE implementation should generally agree with the R GEE implementation", "parameter estimates and standard errors derived from the naive covariance estimate. \"\"\" vs", "cfe) sprintf(\"se = [[%s],[%s]]\", sei, see) \"\"\" family = Poisson() endog,exog,group_n = load_data(\"gee_poisson_1.csv\")", "= md2.fit() assert_almost_equal(mdf1.params, mdf2.params, decimal=6) assert_almost_equal(mdf1.standard_errors(), mdf2.standard_errors(), decimal=6) def test_logistic(self): \"\"\" R code", "work correctly. \"\"\" endog,exog,group = load_data(\"gee_logistic_1.csv\") # Time values for the autoregressive model", "dp) mdf1 = md.fit() # From statsmodels.GEE (not an independent test) cf =", "statsmodels.genmod.dependence_structures import (Exchangeable, Independence, GlobalOddsRatio, Autoregressive, Nested) import pandas as pd import statsmodels.formula.api", "\"\"\" endog,exog,group = load_data(\"gee_logistic_1.csv\") # Time values for the autoregressive model T =", "assert_almost_equal(mdf.standard_errors(), se[j], decimal=6) # Test with formulas D = np.concatenate((endog[:,None], group[:,None], exog[:,1:]), axis=1)", "+ X2 + X3 + X4 + X5\", D, None, groups=D.loc[:,\"Id\"], family=family, cov_struct=v)", "lrange(len(jj)) family = Binomial() va = Autoregressive() md1 = GEE(endog, exog, group, family=family,", "dep_data=group_n) mdf2 = md.fit(start_params=mdf1.params) # From statsmodels.GEE (not an independent test) cf =", "0.07718842, 0.13229421, 0.08544553] assert_almost_equal(mdf1.params, cf1, decimal=5) assert_almost_equal(mdf1.standard_errors(), se1, decimal=5) # Test with global", "endog,exog,group_n = load_data(\"gee_poisson_1.csv\") vi = Independence() ve = Exchangeable() # From R gee", "= np.r_[0.45397549, 0.42278345, -0.91997131, -0.50115943] se2 = np.r_[0.09646057, 0.07405713, 0.1324629 , 0.09025019] assert_almost_equal(mdf2.params,", "GEE with independence correlation should agree exactly with OLS for parameter estimates and", "correlation v = Independence() md = GEE(endog, exog, groups, None, family, v) md.setup_nominal()", "= Binomial() Y = 1*(np.random.normal(size=100) < 0) X1 = np.random.normal(size=100) X2 = np.random.normal(size=100)", "mdf2 = md.fit(start_params=mdf1.params) # From statsmodels.GEE (not an independent test) cf = np.r_[-0.16655319,", "load_data(fname, icept=True): \"\"\" Load a data set from the results directory. The data", "R = np.r_[0,] for j,v in enumerate((vi,ve)): md = GEE(endog, exog, group, None,", "sei = paste(u[,4], collapse=\",\") me = gee(Y ~ X1 + X2 + X3", "X3\", D, None, groups=D[\"groups\"], missing='drop') mdf = md.fit() assert(len(md.endog) == 95) assert(md.exog.shape) ==", "statsmodels.compat import lrange import numpy as np import os from numpy.testing import assert_almost_equal", "in enumerate((vi, ve)): md = GEE(endog, exog, group, None, family, v) mdf =", "a CSV file with the following format: Column 0: Group indicator Column 1:", "trt + base\", data, family=families.Poisson()) mdf2 = md2.fit(scale=\"X2\") assert_almost_equal(mdf1.params, mdf2.params, decimal=6) assert_almost_equal(mdf1.scale, mdf2.scale,", "family=family, cov_struct=va) mdf2 = md2.fit() assert_almost_equal(mdf1.params, mdf2.params, decimal=6) assert_almost_equal(mdf1.standard_errors(), mdf2.standard_errors(), decimal=6) def test_logistic(self):", "+ X3\", D, None, groups=D[\"groups\"], missing='drop') mdf = md.fit() assert(len(md.endog) == 95) assert(md.exog.shape)", "0.90133927], [0.9613677, 1.05826987, 0.90832055], [1.05370439, 0.96084864, 0.93923374]] np.random.seed(342837482) num_group = 100 ar_param =", "= np.flatnonzero(group == ii) T[jj] = lrange(len(jj)) family = Binomial() va = Autoregressive()", "see) \"\"\" family = Gaussian() endog,exog,group = load_data(\"gee_linear_1.csv\") vi = Independence() ve =", "= np.array(group_n)[:,None] dp = Independence() md = GEE(endog, exog, group, None, family, dp)", "0.0325168379415177,0.0369786751362213, 0.0296141014225009,0.0306115470200955]] for j,v in enumerate((vi,ve)): md = GEE(endog, exog, group_n, None, family,", "From statsmodels.GEE (not an independent test) cf = np.r_[-0.1671073 , 1.00467426, -2.01723004, 0.97297106]", "data set from the results directory. The data set should be a CSV", "derived from the naive covariance estimate. \"\"\" vs = Independence() family = Gaussian()", "X1 + X2 + X3, id=Id, family=gaussian, corstr=\"exchangeable\", tol=1e-8, maxit=100) sme = summary(me)", "= np.r_[1.09238131, 0.02148193, -0.39879146, -0.01855666, 0.02983409, 1.18123172, 0.01845318, -1.10233886] se = np.r_[0.10878752, 0.10326078,", "assert_almost_equal(mdf.params[3], 0, decimal=10) def test_nested_linear(self): family = Gaussian() endog,exog,group = load_data(\"gee_nested_linear_1.csv\") group_n =", "= [[%s],[%s],[%s]]\", cfi, cfe, cfa) sprintf(\"se = [[%s],[%s],[%s]]\", sei, see, sea) \"\"\" endog,exog,group", "GEE(endog, exog, group, family=family, cov_struct=va) mdf1 = md1.fit() md2 = GEE(endog, exog, group," ]
[ "-*- coding: utf-8 -*- # JN 2014-10-21 # script creates a clinRecConv.py from", "# script creates a clinRecConv.py from ncs files import os import numpy as", "matplotlib.dates import date2num if __name__ == \"__main__\": if os.path.exists('clinRecConv.py'): print('File exists, doing nothing')", "from matplotlib.dates import date2num if __name__ == \"__main__\": if os.path.exists('clinRecConv.py'): print('File exists, doing", "creates a clinRecConv.py from ncs files import os import numpy as np from", "else: fid = NcsFile('CSC1.ncs') d = fid.header['opened'] n = date2num(d) ts = fid.read(0,", "import date2num if __name__ == \"__main__\": if os.path.exists('clinRecConv.py'): print('File exists, doing nothing') else:", "NcsFile('CSC1.ncs') d = fid.header['opened'] n = date2num(d) ts = fid.read(0, 1, 'timestep') np.save('clinRecConv',", "# JN 2014-10-21 # script creates a clinRecConv.py from ncs files import os", "exists, doing nothing') else: fid = NcsFile('CSC1.ncs') d = fid.header['opened'] n = date2num(d)", "d = fid.header['opened'] n = date2num(d) ts = fid.read(0, 1, 'timestep') np.save('clinRecConv', np.array((ts,", "import NcsFile from matplotlib.dates import date2num if __name__ == \"__main__\": if os.path.exists('clinRecConv.py'): print('File", "if os.path.exists('clinRecConv.py'): print('File exists, doing nothing') else: fid = NcsFile('CSC1.ncs') d = fid.header['opened']", "ncs files import os import numpy as np from combinato import NcsFile from", "clinRecConv.py from ncs files import os import numpy as np from combinato import", "import os import numpy as np from combinato import NcsFile from matplotlib.dates import", "__name__ == \"__main__\": if os.path.exists('clinRecConv.py'): print('File exists, doing nothing') else: fid = NcsFile('CSC1.ncs')", "os.path.exists('clinRecConv.py'): print('File exists, doing nothing') else: fid = NcsFile('CSC1.ncs') d = fid.header['opened'] n", "\"__main__\": if os.path.exists('clinRecConv.py'): print('File exists, doing nothing') else: fid = NcsFile('CSC1.ncs') d =", "combinato import NcsFile from matplotlib.dates import date2num if __name__ == \"__main__\": if os.path.exists('clinRecConv.py'):", "fid = NcsFile('CSC1.ncs') d = fid.header['opened'] n = date2num(d) ts = fid.read(0, 1,", "JN 2014-10-21 # script creates a clinRecConv.py from ncs files import os import", "NcsFile from matplotlib.dates import date2num if __name__ == \"__main__\": if os.path.exists('clinRecConv.py'): print('File exists,", "= fid.header['opened'] n = date2num(d) ts = fid.read(0, 1, 'timestep') np.save('clinRecConv', np.array((ts, d)))", "if __name__ == \"__main__\": if os.path.exists('clinRecConv.py'): print('File exists, doing nothing') else: fid =", "os import numpy as np from combinato import NcsFile from matplotlib.dates import date2num", "from combinato import NcsFile from matplotlib.dates import date2num if __name__ == \"__main__\": if", "doing nothing') else: fid = NcsFile('CSC1.ncs') d = fid.header['opened'] n = date2num(d) ts", "nothing') else: fid = NcsFile('CSC1.ncs') d = fid.header['opened'] n = date2num(d) ts =", "date2num if __name__ == \"__main__\": if os.path.exists('clinRecConv.py'): print('File exists, doing nothing') else: fid", "as np from combinato import NcsFile from matplotlib.dates import date2num if __name__ ==", "# -*- coding: utf-8 -*- # JN 2014-10-21 # script creates a clinRecConv.py", "-*- # JN 2014-10-21 # script creates a clinRecConv.py from ncs files import", "import numpy as np from combinato import NcsFile from matplotlib.dates import date2num if", "np from combinato import NcsFile from matplotlib.dates import date2num if __name__ == \"__main__\":", "script creates a clinRecConv.py from ncs files import os import numpy as np", "files import os import numpy as np from combinato import NcsFile from matplotlib.dates", "coding: utf-8 -*- # JN 2014-10-21 # script creates a clinRecConv.py from ncs", "<reponame>s-mackay/combinato # -*- coding: utf-8 -*- # JN 2014-10-21 # script creates a", "a clinRecConv.py from ncs files import os import numpy as np from combinato", "utf-8 -*- # JN 2014-10-21 # script creates a clinRecConv.py from ncs files", "= NcsFile('CSC1.ncs') d = fid.header['opened'] n = date2num(d) ts = fid.read(0, 1, 'timestep')", "print('File exists, doing nothing') else: fid = NcsFile('CSC1.ncs') d = fid.header['opened'] n =", "from ncs files import os import numpy as np from combinato import NcsFile", "== \"__main__\": if os.path.exists('clinRecConv.py'): print('File exists, doing nothing') else: fid = NcsFile('CSC1.ncs') d", "2014-10-21 # script creates a clinRecConv.py from ncs files import os import numpy", "numpy as np from combinato import NcsFile from matplotlib.dates import date2num if __name__" ]
[ "('company', '0019_auto_20210512_1114'), ] operations = [ migrations.AlterField( model_name='company', name='address_area_abbrev_name', field=models.CharField(blank=True, max_length=255, verbose_name='State (Abbreviated)'),", "max_length=255, verbose_name='State (Abbreviated)'), ), migrations.AlterField( model_name='company', name='registered_address_area_abbrev_name', field=models.CharField(blank=True, max_length=255, verbose_name='State (Abbreviated)'), ), ]", "2021-07-29 15:26 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('company',", "] operations = [ migrations.AlterField( model_name='company', name='address_area_abbrev_name', field=models.CharField(blank=True, max_length=255, verbose_name='State (Abbreviated)'), ), migrations.AlterField(", "from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('company', '0019_auto_20210512_1114'), ]", "'0019_auto_20210512_1114'), ] operations = [ migrations.AlterField( model_name='company', name='address_area_abbrev_name', field=models.CharField(blank=True, max_length=255, verbose_name='State (Abbreviated)'), ),", "name='address_area_abbrev_name', field=models.CharField(blank=True, max_length=255, verbose_name='State (Abbreviated)'), ), migrations.AlterField( model_name='company', name='registered_address_area_abbrev_name', field=models.CharField(blank=True, max_length=255, verbose_name='State (Abbreviated)'),", "migrations.AlterField( model_name='company', name='address_area_abbrev_name', field=models.CharField(blank=True, max_length=255, verbose_name='State (Abbreviated)'), ), migrations.AlterField( model_name='company', name='registered_address_area_abbrev_name', field=models.CharField(blank=True, max_length=255,", "[ migrations.AlterField( model_name='company', name='address_area_abbrev_name', field=models.CharField(blank=True, max_length=255, verbose_name='State (Abbreviated)'), ), migrations.AlterField( model_name='company', name='registered_address_area_abbrev_name', field=models.CharField(blank=True,", "dependencies = [ ('company', '0019_auto_20210512_1114'), ] operations = [ migrations.AlterField( model_name='company', name='address_area_abbrev_name', field=models.CharField(blank=True,", "Generated by Django 2.2.20 on 2021-07-29 15:26 from django.db import migrations, models class", "import migrations, models class Migration(migrations.Migration): dependencies = [ ('company', '0019_auto_20210512_1114'), ] operations =", "model_name='company', name='address_area_abbrev_name', field=models.CharField(blank=True, max_length=255, verbose_name='State (Abbreviated)'), ), migrations.AlterField( model_name='company', name='registered_address_area_abbrev_name', field=models.CharField(blank=True, max_length=255, verbose_name='State", "migrations, models class Migration(migrations.Migration): dependencies = [ ('company', '0019_auto_20210512_1114'), ] operations = [", "operations = [ migrations.AlterField( model_name='company', name='address_area_abbrev_name', field=models.CharField(blank=True, max_length=255, verbose_name='State (Abbreviated)'), ), migrations.AlterField( model_name='company',", "Django 2.2.20 on 2021-07-29 15:26 from django.db import migrations, models class Migration(migrations.Migration): dependencies", "field=models.CharField(blank=True, max_length=255, verbose_name='State (Abbreviated)'), ), migrations.AlterField( model_name='company', name='registered_address_area_abbrev_name', field=models.CharField(blank=True, max_length=255, verbose_name='State (Abbreviated)'), ),", "15:26 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('company', '0019_auto_20210512_1114'),", "by Django 2.2.20 on 2021-07-29 15:26 from django.db import migrations, models class Migration(migrations.Migration):", "# Generated by Django 2.2.20 on 2021-07-29 15:26 from django.db import migrations, models", "models class Migration(migrations.Migration): dependencies = [ ('company', '0019_auto_20210512_1114'), ] operations = [ migrations.AlterField(", "= [ migrations.AlterField( model_name='company', name='address_area_abbrev_name', field=models.CharField(blank=True, max_length=255, verbose_name='State (Abbreviated)'), ), migrations.AlterField( model_name='company', name='registered_address_area_abbrev_name',", "class Migration(migrations.Migration): dependencies = [ ('company', '0019_auto_20210512_1114'), ] operations = [ migrations.AlterField( model_name='company',", "Migration(migrations.Migration): dependencies = [ ('company', '0019_auto_20210512_1114'), ] operations = [ migrations.AlterField( model_name='company', name='address_area_abbrev_name',", "on 2021-07-29 15:26 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [", "= [ ('company', '0019_auto_20210512_1114'), ] operations = [ migrations.AlterField( model_name='company', name='address_area_abbrev_name', field=models.CharField(blank=True, max_length=255,", "[ ('company', '0019_auto_20210512_1114'), ] operations = [ migrations.AlterField( model_name='company', name='address_area_abbrev_name', field=models.CharField(blank=True, max_length=255, verbose_name='State", "2.2.20 on 2021-07-29 15:26 from django.db import migrations, models class Migration(migrations.Migration): dependencies =", "django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('company', '0019_auto_20210512_1114'), ] operations" ]
[ "import matplotlib.pyplot as plt import random zeta=300 M_2=100000000000 M_1=100000000000000 a_n_plus_1s=2500 ta=1.5 tb=4 l_n_1=50", "k=1+tb*lambda_s h=(1+tb*lambda_s)*tb theta=a_n_plus_1s+beta_n_1*ta+(beta_n_1*ta*lambda_s+(a_n_plus_1s-t)*lambda_s)*k*tb-t-Hs x_analytic_solution=max(0,min(zeta,(c_n-phi_n)/lambda_s,((lambda_s*h+1)*theta-(t-di_1_s-Hs))/(1+(lambda_s*h+1)**2))) ''' if t<di_1_s+Hs: if 0.5*((a_n_plus_1s+lambda_s*(a_n_plus_1s-t)*tb+beta_n_1*ta)-di_1_s)<Hs: x_analytic_solution=di_1_s+Hs-t else: x_analytic_solution=di_1_s+(0.5*((a_n_plus_1s+lambda_s*(a_n_plus_1s-t)*tb+beta_n_1*ta)-di_1_s)+Hs)*0.5-t else:", "di_1_s=1000 k=1+tb*lambda_s h=(1+tb*lambda_s)*tb theta=a_n_plus_1s+beta_n_1*ta+(beta_n_1*ta*lambda_s+(a_n_plus_1s-t)*lambda_s)*k*tb-t-Hs x_analytic_solution=max(0,min(zeta,(c_n-phi_n)/lambda_s,((lambda_s*h+1)*theta-(t-di_1_s-Hs))/(1+(lambda_s*h+1)**2))) ''' if t<di_1_s+Hs: if 0.5*((a_n_plus_1s+lambda_s*(a_n_plus_1s-t)*tb+beta_n_1*ta)-di_1_s)<Hs: x_analytic_solution=di_1_s+Hs-t else: x_analytic_solution=di_1_s+(0.5*((a_n_plus_1s+lambda_s*(a_n_plus_1s-t)*tb+beta_n_1*ta)-di_1_s)+Hs)*0.5-t", "''' n_analytic_v_1=max(0,phi_n+x_analytic_solution*lambda_s-c_n) n_analytic_v_2=max(0,l_n_1-beta_n_1-c_n_plus_1+(beta_n_1*ta*lambda_s+max(0,n_analytic_v_1)+(a_n_plus_1s-t-x_analytic_solution)*lambda_s)*k) d_n_plus_1_s=a_n_plus_1s+beta_n_1*ta+(beta_n_1*ta*lambda_s+n_analytic_v_1+(a_n_plus_1s-t-x_analytic_solution)*lambda_s)*(1+tb*lambda_s)*tb-n_analytic_v_2*tb Bigterm = ((lambda_s*h+1)*theta-(t-di_1_s-Hs))/(1+(lambda_s*h+1)**2) busload_n_plus_1=l_n_1-beta_n_1+\\ (beta_n_1*ta*lambda_s+max(0,phi_n+x_analytic_solution*lambda_s-c_n)+(a_n_plus_1s-(t+x_analytic_solution))*lambda_s)*(1+tb*lambda_s) print(x_analytic_solution,n_analytic_v_1,n_analytic_v_2) #print(d_n_plus_1_s) print((t+x_analytic_solution-di_1_s-Hs)**2+\\ (d_n_plus_1_s-t-x_analytic_solution-Hs)**2)", "n_analytic_v_1=max(0,phi_n+x_analytic_solution*lambda_s-c_n) n_analytic_v_2=max(0,l_n_1-beta_n_1-c_n_plus_1+(beta_n_1*ta*lambda_s+max(0,n_analytic_v_1)+(a_n_plus_1s-t-x_analytic_solution)*lambda_s)*k) d_n_plus_1_s=a_n_plus_1s+beta_n_1*ta+(beta_n_1*ta*lambda_s+n_analytic_v_1+(a_n_plus_1s-t-x_analytic_solution)*lambda_s)*(1+tb*lambda_s)*tb-n_analytic_v_2*tb Bigterm = ((lambda_s*h+1)*theta-(t-di_1_s-Hs))/(1+(lambda_s*h+1)**2) busload_n_plus_1=l_n_1-beta_n_1+\\ (beta_n_1*ta*lambda_s+max(0,phi_n+x_analytic_solution*lambda_s-c_n)+(a_n_plus_1s-(t+x_analytic_solution))*lambda_s)*(1+tb*lambda_s) print(x_analytic_solution,n_analytic_v_1,n_analytic_v_2) #print(d_n_plus_1_s) print((t+x_analytic_solution-di_1_s-Hs)**2+\\ (d_n_plus_1_s-t-x_analytic_solution-Hs)**2) print((t+x_analytic_solution-di_1_s-Hs)**2+(a_n_plus_1s+beta_n_1*ta+(beta_n_1*ta*lambda_s+n_analytic_v_1+(a_n_plus_1s-t-x_analytic_solution)*lambda_s)*(1+tb*lambda_s)*tb-n_analytic_v_2*tb-t-x_analytic_solution-Hs)**2+M_1*n_analytic_v_1+M_2*n_analytic_v_2)", "random zeta=300 M_2=100000000000 M_1=100000000000000 a_n_plus_1s=2500 ta=1.5 tb=4 l_n_1=50 beta_n_1=10 c_n_plus_1=60 c_n=60 lambda_s=0.02 Hs=600", "''' if t<di_1_s+Hs: if 0.5*((a_n_plus_1s+lambda_s*(a_n_plus_1s-t)*tb+beta_n_1*ta)-di_1_s)<Hs: x_analytic_solution=di_1_s+Hs-t else: x_analytic_solution=di_1_s+(0.5*((a_n_plus_1s+lambda_s*(a_n_plus_1s-t)*tb+beta_n_1*ta)-di_1_s)+Hs)*0.5-t else: x_analytic_solution=0 print(x_analytic_solution) ''' n_analytic_v_1=max(0,phi_n+x_analytic_solution*lambda_s-c_n)", "x_analytic_solution=max(0,min(zeta,(c_n-phi_n)/lambda_s,((lambda_s*h+1)*theta-(t-di_1_s-Hs))/(1+(lambda_s*h+1)**2))) ''' if t<di_1_s+Hs: if 0.5*((a_n_plus_1s+lambda_s*(a_n_plus_1s-t)*tb+beta_n_1*ta)-di_1_s)<Hs: x_analytic_solution=di_1_s+Hs-t else: x_analytic_solution=di_1_s+(0.5*((a_n_plus_1s+lambda_s*(a_n_plus_1s-t)*tb+beta_n_1*ta)-di_1_s)+Hs)*0.5-t else: x_analytic_solution=0 print(x_analytic_solution) '''", "beta_n_1=10 c_n_plus_1=60 c_n=60 lambda_s=0.02 Hs=600 phi_n=62 t=1500 di_1_s=1000 k=1+tb*lambda_s h=(1+tb*lambda_s)*tb theta=a_n_plus_1s+beta_n_1*ta+(beta_n_1*ta*lambda_s+(a_n_plus_1s-t)*lambda_s)*k*tb-t-Hs x_analytic_solution=max(0,min(zeta,(c_n-phi_n)/lambda_s,((lambda_s*h+1)*theta-(t-di_1_s-Hs))/(1+(lambda_s*h+1)**2))) '''", "x_analytic_solution=di_1_s+Hs-t else: x_analytic_solution=di_1_s+(0.5*((a_n_plus_1s+lambda_s*(a_n_plus_1s-t)*tb+beta_n_1*ta)-di_1_s)+Hs)*0.5-t else: x_analytic_solution=0 print(x_analytic_solution) ''' n_analytic_v_1=max(0,phi_n+x_analytic_solution*lambda_s-c_n) n_analytic_v_2=max(0,l_n_1-beta_n_1-c_n_plus_1+(beta_n_1*ta*lambda_s+max(0,n_analytic_v_1)+(a_n_plus_1s-t-x_analytic_solution)*lambda_s)*k) d_n_plus_1_s=a_n_plus_1s+beta_n_1*ta+(beta_n_1*ta*lambda_s+n_analytic_v_1+(a_n_plus_1s-t-x_analytic_solution)*lambda_s)*(1+tb*lambda_s)*tb-n_analytic_v_2*tb Bigterm = ((lambda_s*h+1)*theta-(t-di_1_s-Hs))/(1+(lambda_s*h+1)**2)", "matplotlib.pyplot as plt import random zeta=300 M_2=100000000000 M_1=100000000000000 a_n_plus_1s=2500 ta=1.5 tb=4 l_n_1=50 beta_n_1=10", "M_1=100000000000000 a_n_plus_1s=2500 ta=1.5 tb=4 l_n_1=50 beta_n_1=10 c_n_plus_1=60 c_n=60 lambda_s=0.02 Hs=600 phi_n=62 t=1500 di_1_s=1000", "import numpy as np import matplotlib.pyplot as plt import random zeta=300 M_2=100000000000 M_1=100000000000000", "if 0.5*((a_n_plus_1s+lambda_s*(a_n_plus_1s-t)*tb+beta_n_1*ta)-di_1_s)<Hs: x_analytic_solution=di_1_s+Hs-t else: x_analytic_solution=di_1_s+(0.5*((a_n_plus_1s+lambda_s*(a_n_plus_1s-t)*tb+beta_n_1*ta)-di_1_s)+Hs)*0.5-t else: x_analytic_solution=0 print(x_analytic_solution) ''' n_analytic_v_1=max(0,phi_n+x_analytic_solution*lambda_s-c_n) n_analytic_v_2=max(0,l_n_1-beta_n_1-c_n_plus_1+(beta_n_1*ta*lambda_s+max(0,n_analytic_v_1)+(a_n_plus_1s-t-x_analytic_solution)*lambda_s)*k) d_n_plus_1_s=a_n_plus_1s+beta_n_1*ta+(beta_n_1*ta*lambda_s+n_analytic_v_1+(a_n_plus_1s-t-x_analytic_solution)*lambda_s)*(1+tb*lambda_s)*tb-n_analytic_v_2*tb Bigterm", "a_n_plus_1s=2500 ta=1.5 tb=4 l_n_1=50 beta_n_1=10 c_n_plus_1=60 c_n=60 lambda_s=0.02 Hs=600 phi_n=62 t=1500 di_1_s=1000 k=1+tb*lambda_s", "tb=4 l_n_1=50 beta_n_1=10 c_n_plus_1=60 c_n=60 lambda_s=0.02 Hs=600 phi_n=62 t=1500 di_1_s=1000 k=1+tb*lambda_s h=(1+tb*lambda_s)*tb theta=a_n_plus_1s+beta_n_1*ta+(beta_n_1*ta*lambda_s+(a_n_plus_1s-t)*lambda_s)*k*tb-t-Hs", "c_n=60 lambda_s=0.02 Hs=600 phi_n=62 t=1500 di_1_s=1000 k=1+tb*lambda_s h=(1+tb*lambda_s)*tb theta=a_n_plus_1s+beta_n_1*ta+(beta_n_1*ta*lambda_s+(a_n_plus_1s-t)*lambda_s)*k*tb-t-Hs x_analytic_solution=max(0,min(zeta,(c_n-phi_n)/lambda_s,((lambda_s*h+1)*theta-(t-di_1_s-Hs))/(1+(lambda_s*h+1)**2))) ''' if t<di_1_s+Hs:", "Hs=600 phi_n=62 t=1500 di_1_s=1000 k=1+tb*lambda_s h=(1+tb*lambda_s)*tb theta=a_n_plus_1s+beta_n_1*ta+(beta_n_1*ta*lambda_s+(a_n_plus_1s-t)*lambda_s)*k*tb-t-Hs x_analytic_solution=max(0,min(zeta,(c_n-phi_n)/lambda_s,((lambda_s*h+1)*theta-(t-di_1_s-Hs))/(1+(lambda_s*h+1)**2))) ''' if t<di_1_s+Hs: if 0.5*((a_n_plus_1s+lambda_s*(a_n_plus_1s-t)*tb+beta_n_1*ta)-di_1_s)<Hs:", "x_analytic_solution=0 print(x_analytic_solution) ''' n_analytic_v_1=max(0,phi_n+x_analytic_solution*lambda_s-c_n) n_analytic_v_2=max(0,l_n_1-beta_n_1-c_n_plus_1+(beta_n_1*ta*lambda_s+max(0,n_analytic_v_1)+(a_n_plus_1s-t-x_analytic_solution)*lambda_s)*k) d_n_plus_1_s=a_n_plus_1s+beta_n_1*ta+(beta_n_1*ta*lambda_s+n_analytic_v_1+(a_n_plus_1s-t-x_analytic_solution)*lambda_s)*(1+tb*lambda_s)*tb-n_analytic_v_2*tb Bigterm = ((lambda_s*h+1)*theta-(t-di_1_s-Hs))/(1+(lambda_s*h+1)**2) busload_n_plus_1=l_n_1-beta_n_1+\\ (beta_n_1*ta*lambda_s+max(0,phi_n+x_analytic_solution*lambda_s-c_n)+(a_n_plus_1s-(t+x_analytic_solution))*lambda_s)*(1+tb*lambda_s) print(x_analytic_solution,n_analytic_v_1,n_analytic_v_2) #print(d_n_plus_1_s)", "theta=a_n_plus_1s+beta_n_1*ta+(beta_n_1*ta*lambda_s+(a_n_plus_1s-t)*lambda_s)*k*tb-t-Hs x_analytic_solution=max(0,min(zeta,(c_n-phi_n)/lambda_s,((lambda_s*h+1)*theta-(t-di_1_s-Hs))/(1+(lambda_s*h+1)**2))) ''' if t<di_1_s+Hs: if 0.5*((a_n_plus_1s+lambda_s*(a_n_plus_1s-t)*tb+beta_n_1*ta)-di_1_s)<Hs: x_analytic_solution=di_1_s+Hs-t else: x_analytic_solution=di_1_s+(0.5*((a_n_plus_1s+lambda_s*(a_n_plus_1s-t)*tb+beta_n_1*ta)-di_1_s)+Hs)*0.5-t else: x_analytic_solution=0 print(x_analytic_solution)", "plt import random zeta=300 M_2=100000000000 M_1=100000000000000 a_n_plus_1s=2500 ta=1.5 tb=4 l_n_1=50 beta_n_1=10 c_n_plus_1=60 c_n=60", "phi_n=62 t=1500 di_1_s=1000 k=1+tb*lambda_s h=(1+tb*lambda_s)*tb theta=a_n_plus_1s+beta_n_1*ta+(beta_n_1*ta*lambda_s+(a_n_plus_1s-t)*lambda_s)*k*tb-t-Hs x_analytic_solution=max(0,min(zeta,(c_n-phi_n)/lambda_s,((lambda_s*h+1)*theta-(t-di_1_s-Hs))/(1+(lambda_s*h+1)**2))) ''' if t<di_1_s+Hs: if 0.5*((a_n_plus_1s+lambda_s*(a_n_plus_1s-t)*tb+beta_n_1*ta)-di_1_s)<Hs: x_analytic_solution=di_1_s+Hs-t", "if t<di_1_s+Hs: if 0.5*((a_n_plus_1s+lambda_s*(a_n_plus_1s-t)*tb+beta_n_1*ta)-di_1_s)<Hs: x_analytic_solution=di_1_s+Hs-t else: x_analytic_solution=di_1_s+(0.5*((a_n_plus_1s+lambda_s*(a_n_plus_1s-t)*tb+beta_n_1*ta)-di_1_s)+Hs)*0.5-t else: x_analytic_solution=0 print(x_analytic_solution) ''' n_analytic_v_1=max(0,phi_n+x_analytic_solution*lambda_s-c_n) n_analytic_v_2=max(0,l_n_1-beta_n_1-c_n_plus_1+(beta_n_1*ta*lambda_s+max(0,n_analytic_v_1)+(a_n_plus_1s-t-x_analytic_solution)*lambda_s)*k)", "lambda_s=0.02 Hs=600 phi_n=62 t=1500 di_1_s=1000 k=1+tb*lambda_s h=(1+tb*lambda_s)*tb theta=a_n_plus_1s+beta_n_1*ta+(beta_n_1*ta*lambda_s+(a_n_plus_1s-t)*lambda_s)*k*tb-t-Hs x_analytic_solution=max(0,min(zeta,(c_n-phi_n)/lambda_s,((lambda_s*h+1)*theta-(t-di_1_s-Hs))/(1+(lambda_s*h+1)**2))) ''' if t<di_1_s+Hs: if", "t<di_1_s+Hs: if 0.5*((a_n_plus_1s+lambda_s*(a_n_plus_1s-t)*tb+beta_n_1*ta)-di_1_s)<Hs: x_analytic_solution=di_1_s+Hs-t else: x_analytic_solution=di_1_s+(0.5*((a_n_plus_1s+lambda_s*(a_n_plus_1s-t)*tb+beta_n_1*ta)-di_1_s)+Hs)*0.5-t else: x_analytic_solution=0 print(x_analytic_solution) ''' n_analytic_v_1=max(0,phi_n+x_analytic_solution*lambda_s-c_n) n_analytic_v_2=max(0,l_n_1-beta_n_1-c_n_plus_1+(beta_n_1*ta*lambda_s+max(0,n_analytic_v_1)+(a_n_plus_1s-t-x_analytic_solution)*lambda_s)*k) d_n_plus_1_s=a_n_plus_1s+beta_n_1*ta+(beta_n_1*ta*lambda_s+n_analytic_v_1+(a_n_plus_1s-t-x_analytic_solution)*lambda_s)*(1+tb*lambda_s)*tb-n_analytic_v_2*tb", "0.5*((a_n_plus_1s+lambda_s*(a_n_plus_1s-t)*tb+beta_n_1*ta)-di_1_s)<Hs: x_analytic_solution=di_1_s+Hs-t else: x_analytic_solution=di_1_s+(0.5*((a_n_plus_1s+lambda_s*(a_n_plus_1s-t)*tb+beta_n_1*ta)-di_1_s)+Hs)*0.5-t else: x_analytic_solution=0 print(x_analytic_solution) ''' n_analytic_v_1=max(0,phi_n+x_analytic_solution*lambda_s-c_n) n_analytic_v_2=max(0,l_n_1-beta_n_1-c_n_plus_1+(beta_n_1*ta*lambda_s+max(0,n_analytic_v_1)+(a_n_plus_1s-t-x_analytic_solution)*lambda_s)*k) d_n_plus_1_s=a_n_plus_1s+beta_n_1*ta+(beta_n_1*ta*lambda_s+n_analytic_v_1+(a_n_plus_1s-t-x_analytic_solution)*lambda_s)*(1+tb*lambda_s)*tb-n_analytic_v_2*tb Bigterm =", "c_n_plus_1=60 c_n=60 lambda_s=0.02 Hs=600 phi_n=62 t=1500 di_1_s=1000 k=1+tb*lambda_s h=(1+tb*lambda_s)*tb theta=a_n_plus_1s+beta_n_1*ta+(beta_n_1*ta*lambda_s+(a_n_plus_1s-t)*lambda_s)*k*tb-t-Hs x_analytic_solution=max(0,min(zeta,(c_n-phi_n)/lambda_s,((lambda_s*h+1)*theta-(t-di_1_s-Hs))/(1+(lambda_s*h+1)**2))) ''' if", "import random zeta=300 M_2=100000000000 M_1=100000000000000 a_n_plus_1s=2500 ta=1.5 tb=4 l_n_1=50 beta_n_1=10 c_n_plus_1=60 c_n=60 lambda_s=0.02", "h=(1+tb*lambda_s)*tb theta=a_n_plus_1s+beta_n_1*ta+(beta_n_1*ta*lambda_s+(a_n_plus_1s-t)*lambda_s)*k*tb-t-Hs x_analytic_solution=max(0,min(zeta,(c_n-phi_n)/lambda_s,((lambda_s*h+1)*theta-(t-di_1_s-Hs))/(1+(lambda_s*h+1)**2))) ''' if t<di_1_s+Hs: if 0.5*((a_n_plus_1s+lambda_s*(a_n_plus_1s-t)*tb+beta_n_1*ta)-di_1_s)<Hs: x_analytic_solution=di_1_s+Hs-t else: x_analytic_solution=di_1_s+(0.5*((a_n_plus_1s+lambda_s*(a_n_plus_1s-t)*tb+beta_n_1*ta)-di_1_s)+Hs)*0.5-t else: x_analytic_solution=0", "numpy as np import matplotlib.pyplot as plt import random zeta=300 M_2=100000000000 M_1=100000000000000 a_n_plus_1s=2500", "np import matplotlib.pyplot as plt import random zeta=300 M_2=100000000000 M_1=100000000000000 a_n_plus_1s=2500 ta=1.5 tb=4", "zeta=300 M_2=100000000000 M_1=100000000000000 a_n_plus_1s=2500 ta=1.5 tb=4 l_n_1=50 beta_n_1=10 c_n_plus_1=60 c_n=60 lambda_s=0.02 Hs=600 phi_n=62", "d_n_plus_1_s=a_n_plus_1s+beta_n_1*ta+(beta_n_1*ta*lambda_s+n_analytic_v_1+(a_n_plus_1s-t-x_analytic_solution)*lambda_s)*(1+tb*lambda_s)*tb-n_analytic_v_2*tb Bigterm = ((lambda_s*h+1)*theta-(t-di_1_s-Hs))/(1+(lambda_s*h+1)**2) busload_n_plus_1=l_n_1-beta_n_1+\\ (beta_n_1*ta*lambda_s+max(0,phi_n+x_analytic_solution*lambda_s-c_n)+(a_n_plus_1s-(t+x_analytic_solution))*lambda_s)*(1+tb*lambda_s) print(x_analytic_solution,n_analytic_v_1,n_analytic_v_2) #print(d_n_plus_1_s) print((t+x_analytic_solution-di_1_s-Hs)**2+\\ (d_n_plus_1_s-t-x_analytic_solution-Hs)**2) print((t+x_analytic_solution-di_1_s-Hs)**2+(a_n_plus_1s+beta_n_1*ta+(beta_n_1*ta*lambda_s+n_analytic_v_1+(a_n_plus_1s-t-x_analytic_solution)*lambda_s)*(1+tb*lambda_s)*tb-n_analytic_v_2*tb-t-x_analytic_solution-Hs)**2+M_1*n_analytic_v_1+M_2*n_analytic_v_2) print('busload',l_n_1-beta_n_1,(a_n_plus_1s-(t+x_analytic_solution))*lambda_s*(1+tb*lambda_s),busload_n_plus_1,phi_n+x_analytic_solution*lambda_s) print(d_n_plus_1_s-t-x_analytic_solution,t+x_analytic_solution-di_1_s)", "print(x_analytic_solution) ''' n_analytic_v_1=max(0,phi_n+x_analytic_solution*lambda_s-c_n) n_analytic_v_2=max(0,l_n_1-beta_n_1-c_n_plus_1+(beta_n_1*ta*lambda_s+max(0,n_analytic_v_1)+(a_n_plus_1s-t-x_analytic_solution)*lambda_s)*k) d_n_plus_1_s=a_n_plus_1s+beta_n_1*ta+(beta_n_1*ta*lambda_s+n_analytic_v_1+(a_n_plus_1s-t-x_analytic_solution)*lambda_s)*(1+tb*lambda_s)*tb-n_analytic_v_2*tb Bigterm = ((lambda_s*h+1)*theta-(t-di_1_s-Hs))/(1+(lambda_s*h+1)**2) busload_n_plus_1=l_n_1-beta_n_1+\\ (beta_n_1*ta*lambda_s+max(0,phi_n+x_analytic_solution*lambda_s-c_n)+(a_n_plus_1s-(t+x_analytic_solution))*lambda_s)*(1+tb*lambda_s) print(x_analytic_solution,n_analytic_v_1,n_analytic_v_2) #print(d_n_plus_1_s) print((t+x_analytic_solution-di_1_s-Hs)**2+\\", "l_n_1=50 beta_n_1=10 c_n_plus_1=60 c_n=60 lambda_s=0.02 Hs=600 phi_n=62 t=1500 di_1_s=1000 k=1+tb*lambda_s h=(1+tb*lambda_s)*tb theta=a_n_plus_1s+beta_n_1*ta+(beta_n_1*ta*lambda_s+(a_n_plus_1s-t)*lambda_s)*k*tb-t-Hs x_analytic_solution=max(0,min(zeta,(c_n-phi_n)/lambda_s,((lambda_s*h+1)*theta-(t-di_1_s-Hs))/(1+(lambda_s*h+1)**2)))", "x_analytic_solution=di_1_s+(0.5*((a_n_plus_1s+lambda_s*(a_n_plus_1s-t)*tb+beta_n_1*ta)-di_1_s)+Hs)*0.5-t else: x_analytic_solution=0 print(x_analytic_solution) ''' n_analytic_v_1=max(0,phi_n+x_analytic_solution*lambda_s-c_n) n_analytic_v_2=max(0,l_n_1-beta_n_1-c_n_plus_1+(beta_n_1*ta*lambda_s+max(0,n_analytic_v_1)+(a_n_plus_1s-t-x_analytic_solution)*lambda_s)*k) d_n_plus_1_s=a_n_plus_1s+beta_n_1*ta+(beta_n_1*ta*lambda_s+n_analytic_v_1+(a_n_plus_1s-t-x_analytic_solution)*lambda_s)*(1+tb*lambda_s)*tb-n_analytic_v_2*tb Bigterm = ((lambda_s*h+1)*theta-(t-di_1_s-Hs))/(1+(lambda_s*h+1)**2) busload_n_plus_1=l_n_1-beta_n_1+\\ (beta_n_1*ta*lambda_s+max(0,phi_n+x_analytic_solution*lambda_s-c_n)+(a_n_plus_1s-(t+x_analytic_solution))*lambda_s)*(1+tb*lambda_s)", "n_analytic_v_2=max(0,l_n_1-beta_n_1-c_n_plus_1+(beta_n_1*ta*lambda_s+max(0,n_analytic_v_1)+(a_n_plus_1s-t-x_analytic_solution)*lambda_s)*k) d_n_plus_1_s=a_n_plus_1s+beta_n_1*ta+(beta_n_1*ta*lambda_s+n_analytic_v_1+(a_n_plus_1s-t-x_analytic_solution)*lambda_s)*(1+tb*lambda_s)*tb-n_analytic_v_2*tb Bigterm = ((lambda_s*h+1)*theta-(t-di_1_s-Hs))/(1+(lambda_s*h+1)**2) busload_n_plus_1=l_n_1-beta_n_1+\\ (beta_n_1*ta*lambda_s+max(0,phi_n+x_analytic_solution*lambda_s-c_n)+(a_n_plus_1s-(t+x_analytic_solution))*lambda_s)*(1+tb*lambda_s) print(x_analytic_solution,n_analytic_v_1,n_analytic_v_2) #print(d_n_plus_1_s) print((t+x_analytic_solution-di_1_s-Hs)**2+\\ (d_n_plus_1_s-t-x_analytic_solution-Hs)**2) print((t+x_analytic_solution-di_1_s-Hs)**2+(a_n_plus_1s+beta_n_1*ta+(beta_n_1*ta*lambda_s+n_analytic_v_1+(a_n_plus_1s-t-x_analytic_solution)*lambda_s)*(1+tb*lambda_s)*tb-n_analytic_v_2*tb-t-x_analytic_solution-Hs)**2+M_1*n_analytic_v_1+M_2*n_analytic_v_2) print('busload',l_n_1-beta_n_1,(a_n_plus_1s-(t+x_analytic_solution))*lambda_s*(1+tb*lambda_s),busload_n_plus_1,phi_n+x_analytic_solution*lambda_s)", "else: x_analytic_solution=di_1_s+(0.5*((a_n_plus_1s+lambda_s*(a_n_plus_1s-t)*tb+beta_n_1*ta)-di_1_s)+Hs)*0.5-t else: x_analytic_solution=0 print(x_analytic_solution) ''' n_analytic_v_1=max(0,phi_n+x_analytic_solution*lambda_s-c_n) n_analytic_v_2=max(0,l_n_1-beta_n_1-c_n_plus_1+(beta_n_1*ta*lambda_s+max(0,n_analytic_v_1)+(a_n_plus_1s-t-x_analytic_solution)*lambda_s)*k) d_n_plus_1_s=a_n_plus_1s+beta_n_1*ta+(beta_n_1*ta*lambda_s+n_analytic_v_1+(a_n_plus_1s-t-x_analytic_solution)*lambda_s)*(1+tb*lambda_s)*tb-n_analytic_v_2*tb Bigterm = ((lambda_s*h+1)*theta-(t-di_1_s-Hs))/(1+(lambda_s*h+1)**2) busload_n_plus_1=l_n_1-beta_n_1+\\", "as plt import random zeta=300 M_2=100000000000 M_1=100000000000000 a_n_plus_1s=2500 ta=1.5 tb=4 l_n_1=50 beta_n_1=10 c_n_plus_1=60", "t=1500 di_1_s=1000 k=1+tb*lambda_s h=(1+tb*lambda_s)*tb theta=a_n_plus_1s+beta_n_1*ta+(beta_n_1*ta*lambda_s+(a_n_plus_1s-t)*lambda_s)*k*tb-t-Hs x_analytic_solution=max(0,min(zeta,(c_n-phi_n)/lambda_s,((lambda_s*h+1)*theta-(t-di_1_s-Hs))/(1+(lambda_s*h+1)**2))) ''' if t<di_1_s+Hs: if 0.5*((a_n_plus_1s+lambda_s*(a_n_plus_1s-t)*tb+beta_n_1*ta)-di_1_s)<Hs: x_analytic_solution=di_1_s+Hs-t else:", "as np import matplotlib.pyplot as plt import random zeta=300 M_2=100000000000 M_1=100000000000000 a_n_plus_1s=2500 ta=1.5", "else: x_analytic_solution=0 print(x_analytic_solution) ''' n_analytic_v_1=max(0,phi_n+x_analytic_solution*lambda_s-c_n) n_analytic_v_2=max(0,l_n_1-beta_n_1-c_n_plus_1+(beta_n_1*ta*lambda_s+max(0,n_analytic_v_1)+(a_n_plus_1s-t-x_analytic_solution)*lambda_s)*k) d_n_plus_1_s=a_n_plus_1s+beta_n_1*ta+(beta_n_1*ta*lambda_s+n_analytic_v_1+(a_n_plus_1s-t-x_analytic_solution)*lambda_s)*(1+tb*lambda_s)*tb-n_analytic_v_2*tb Bigterm = ((lambda_s*h+1)*theta-(t-di_1_s-Hs))/(1+(lambda_s*h+1)**2) busload_n_plus_1=l_n_1-beta_n_1+\\ (beta_n_1*ta*lambda_s+max(0,phi_n+x_analytic_solution*lambda_s-c_n)+(a_n_plus_1s-(t+x_analytic_solution))*lambda_s)*(1+tb*lambda_s) print(x_analytic_solution,n_analytic_v_1,n_analytic_v_2)", "ta=1.5 tb=4 l_n_1=50 beta_n_1=10 c_n_plus_1=60 c_n=60 lambda_s=0.02 Hs=600 phi_n=62 t=1500 di_1_s=1000 k=1+tb*lambda_s h=(1+tb*lambda_s)*tb", "M_2=100000000000 M_1=100000000000000 a_n_plus_1s=2500 ta=1.5 tb=4 l_n_1=50 beta_n_1=10 c_n_plus_1=60 c_n=60 lambda_s=0.02 Hs=600 phi_n=62 t=1500", "<reponame>KGkiotsalitis/bus-holding-model-under-capacity-limitations<gh_stars>1-10 import numpy as np import matplotlib.pyplot as plt import random zeta=300 M_2=100000000000" ]
[ "= \"Hello World\" return render_template(\"index.html\", headline=headline) @app.route(\"/<string:name>\") def say_name(name): return render_template(\"index.html\", name=name) if", "from flask import Flask, render_template app = Flask(__name__) @app.route(\"/\") def index(): headline =", "= Flask(__name__) @app.route(\"/\") def index(): headline = \"Hello World\" return render_template(\"index.html\", headline=headline) @app.route(\"/<string:name>\")", "Flask, render_template app = Flask(__name__) @app.route(\"/\") def index(): headline = \"Hello World\" return", "Flask(__name__) @app.route(\"/\") def index(): headline = \"Hello World\" return render_template(\"index.html\", headline=headline) @app.route(\"/<string:name>\") def", "\"Hello World\" return render_template(\"index.html\", headline=headline) @app.route(\"/<string:name>\") def say_name(name): return render_template(\"index.html\", name=name) if __name__", "return render_template(\"index.html\", headline=headline) @app.route(\"/<string:name>\") def say_name(name): return render_template(\"index.html\", name=name) if __name__ == \"__main__\":", "def index(): headline = \"Hello World\" return render_template(\"index.html\", headline=headline) @app.route(\"/<string:name>\") def say_name(name): return", "@app.route(\"/\") def index(): headline = \"Hello World\" return render_template(\"index.html\", headline=headline) @app.route(\"/<string:name>\") def say_name(name):", "app = Flask(__name__) @app.route(\"/\") def index(): headline = \"Hello World\" return render_template(\"index.html\", headline=headline)", "flask import Flask, render_template app = Flask(__name__) @app.route(\"/\") def index(): headline = \"Hello", "headline = \"Hello World\" return render_template(\"index.html\", headline=headline) @app.route(\"/<string:name>\") def say_name(name): return render_template(\"index.html\", name=name)", "import Flask, render_template app = Flask(__name__) @app.route(\"/\") def index(): headline = \"Hello World\"", "render_template app = Flask(__name__) @app.route(\"/\") def index(): headline = \"Hello World\" return render_template(\"index.html\",", "index(): headline = \"Hello World\" return render_template(\"index.html\", headline=headline) @app.route(\"/<string:name>\") def say_name(name): return render_template(\"index.html\",", "render_template(\"index.html\", headline=headline) @app.route(\"/<string:name>\") def say_name(name): return render_template(\"index.html\", name=name) if __name__ == \"__main__\": app.run(debug=True)", "World\" return render_template(\"index.html\", headline=headline) @app.route(\"/<string:name>\") def say_name(name): return render_template(\"index.html\", name=name) if __name__ ==" ]
[ "\"\"\"Authenticates SITE_NAME with Reddit. Sets self.reddit and self.username on success. Parameters ---------- max_attempts", "as {}...\".format(self.site_name)) self.reddit = praw.Reddit(self.site_name) self.username = self.reddit.user.me() print(\"Successfully authenticated as {}\".format(self.username)) return", "cache file\") mem_cache = deque([], CACHE_SIZE) return mem_cache @staticmethod def write_cache(file, mem_cache): \"\"\"Writes", "\"\"\" return True if ( not target.is_root and target.parent().author == self.username ) else", "object or reddit.comment object Target Reddit submission or comment. comment : str Comment", "keyword trigger, and excludes processed comments. Returns ------- generator Dict of reddit.Comment and", "int, optional Maximum number of comments to retrieve at a time. Defaults to", "adding \\n to end of file so that we don't get empty #", "Used to prevent infinite reply loop caused by another bot. Parameters ---------- target", "target submission or comment. Parameters ---------- target : reddit.submission object or reddit.comment object", "does not exist. Parameters ---------- file : str Location of cache file. Returns", "return except praw.exceptions.APIException as error: print(\"Unable to authenticate:\", error) print(\"Retrying in {} \"", "to target submission or comment. Parameters ---------- target : reddit.submission object or reddit.comment", "error) print(\"Retrying in {} \" \"seconds\".format(seconds_between_attempts)) sleep(seconds_between_attempts) attempt += 1 raise RuntimeError('Failed to", "yield {'comment': comment, 'query' : query.group(1)} except praw.exceptions.APIException as error: print(\"API Error:\", error)", "return mem_cache @staticmethod def write_cache(file, mem_cache): \"\"\"Writes list into file, converting list to", "deque(cache.split('\\n'), CACHE_SIZE) print(\"Cache loaded.\") except FileNotFoundError: print(\"Cache file not found.\") print(\"Creating cache directory...\")", "file. Creates cache file if does not exist. Parameters ---------- file : str", "of target is from bot. False otherwise. \"\"\" try: # implement replace_more()? target.refresh()", "print(\"Unable to create cache file\") mem_cache = deque([], CACHE_SIZE) return mem_cache @staticmethod def", "comment caching, and comment posting. Allows bot authors to concentrate on writing their", "raise RuntimeError('Failed to authenticate after {} ' 'attempts'.format(max_attempts)) def retrieve_comments(self): \"\"\"Retrieves comments from", "bot functions. \"\"\" from collections import deque from os import mkdir import re", "to config.RETRIEVAL_LIMIT. See: https://praw.readthedocs.io/en/latest/code_overview/models /subreddit.html#praw.models.Subreddit.comments subreddits : str, optional Subreddits to retrieve comments", "same request. Parameters ---------- target : reddit.comment object Target Reddit comment. Returns -------", "loaded print(\"Cache saved\") except IndexError: print(\"No items in cache\") except IOError as error:", "implement replace_more()? target.refresh() for reply in target.replies.list(): if reply.author == self.username: print(\"Comment already", "we don't get empty # entries in deque when next loaded print(\"Cache saved\")", "IOError as error: print(error) print(\"Unable to create cache file\") mem_cache = deque([], CACHE_SIZE)", "for reply in target.replies.list(): if reply.author == self.username: print(\"Comment already processed.\") return True", "is from self. Used to prevent infinite reply loop caused by another bot.", "failure. Defaults to -1 (infinite attempts). seconds_between_attempts : int, optional Seconds to wait", "import mkdir import re import signal import sys from time import sleep import", "self.keyword = re.compile(keyword+r' ([ \\w]+)', re.I) self.reddit = None self.retrieval_limit = retrieval_limit self.site_name", "---------- file : str Location of cache file. Returns ------- collections.deque Contents of", "in target.replies.list(): if reply.author == self.username: print(\"Comment already processed.\") return True print(\"Processing comment...\")", "( not target.is_root and target.parent().author == self.username ) else False def has_already_replied(self, target):", "common bot routines. Parameters ---------- site_name : str, optional Initializes praw under site_name", "in cache\") except IOError as error: print(error) print(\"Unable to create cache file\") def", "file not found.\") print(\"Creating cache directory...\") try: path = '' for subdirectory in", "comment in comments: if (comment.author != self.username and comment not in self.processed_comments #and", "# implement replace_more()? target.refresh() for reply in target.replies.list(): if reply.author == self.username: print(\"Comment", "with open(file, 'r') as data: cache = data.read() mem_cache = deque(cache.split('\\n'), CACHE_SIZE) print(\"Cache", "except IOError as error: print(error) print(\"Unable to create cache file\") mem_cache = deque([],", "'attempts'.format(max_attempts)) def retrieve_comments(self): \"\"\"Retrieves comments from subreddits, filters for keyword trigger, and excludes", "{} comments...\".format(self.retrieval_limit)) comments = self.reddit.subreddit(self.subreddits).comments( limit=self.retrieval_limit ) for comment in comments: if (comment.author", "Defaults to config.SITE_NAME. See: https://praw.readthedocs.io/en/latest/getting_started /configuration/prawini.html#choosing-a-site keyword : str, optional Comment trigger word.", "authenticate:\", error) print(\"Retrying in {} \" \"seconds\".format(seconds_between_attempts)) sleep(seconds_between_attempts) attempt += 1 raise RuntimeError('Failed", "by bot. Used to prevent multiple replies to the same request. Parameters ----------", "don't get empty # entries in deque when next loaded print(\"Cache saved\") except", "self.processed_comments) print(\"Bot stopped\") sys.exit() def is_summon_chain(self, target): \"\"\"Checks if parent comment of target", "Parameters ---------- max_attempts : int, optional Maximum number of authentication attempts before failure.", ": int, optional Maximum number of authentication attempts before failure. Defaults to -1", "with open(file, 'w') as cache_file: try: cache_file.write(mem_cache.popleft()) for entry in mem_cache: cache_file.write('\\n'+entry) #", "mem_cache: cache_file.write('\\n'+entry) # avoid adding \\n to end of file so that we", "replies to the same request. Parameters ---------- target : reddit.comment object Target Reddit", "True if ( not target.is_root and target.parent().author == self.username ) else False def", "comment has already been replied by bot. Used to prevent multiple replies to", "to create cache file\") mem_cache = deque([], CACHE_SIZE) return mem_cache @staticmethod def write_cache(file,", "self.processed_comments = self.read_cache(CACHE_FILE) signal.signal(signal.SIGINT, self.bot_exit) def authenticate(self, max_attempts=-1, seconds_between_attempts=60): \"\"\"Authenticates SITE_NAME with Reddit.", "get empty # entries in deque when next loaded print(\"Cache saved\") except IndexError:", "print(\"Creating cache directory...\") try: path = '' for subdirectory in file.split('/')[:-1]: path +=", "comments. Returns ------- generator Dict of reddit.Comment and query. \"\"\" try: print(\"Retrieving {}", "replied by bot. Used to prevent multiple replies to the same request. Parameters", "'w') as cache_file: try: cache_file.write(mem_cache.popleft()) for entry in mem_cache: cache_file.write('\\n'+entry) # avoid adding", "to prevent multiple replies to the same request. Parameters ---------- target : reddit.comment", "file : str Location of cache file. mem_cache : list or deque Items", "to config.KEYWORD. retrieval_limit : int, optional Maximum number of comments to retrieve at", "by another bot. Parameters ---------- target : reddit.comment object Target Reddit comment. Returns", "try: print(\"Saving memory into cache file...\") with open(file, 'w') as cache_file: try: cache_file.write(mem_cache.popleft())", "when next loaded print(\"Cache saved\") except IndexError: print(\"No items in cache\") except IOError", "class RedditBot: \"\"\"Superclass for Reddit bots which adds common bot routines. Parameters ----------", "to the same request. Parameters ---------- target : reddit.comment object Target Reddit comment.", "#!/usr/bin/env python3 \"\"\"Reddit Bot Common Routines Contains common Reddit bot functions such as", "Comment to post. Returns ------- object reddit.comment of newly created comment. \"\"\" try:", "reads file, converting contents to \\n separated list. Creates cache file if does", "bot. False otherwise. \"\"\" try: # implement replace_more()? target.refresh() for reply in target.replies.list():", "Contains common Reddit bot functions such as keyword comment retrieval, processed comment caching,", ": query.group(1)} except praw.exceptions.APIException as error: print(\"API Error:\", error) raise except AttributeError as", "print(\"\\nStopping bot...\") self.write_cache(CACHE_FILE, self.processed_comments) print(\"Bot stopped\") sys.exit() def is_summon_chain(self, target): \"\"\"Checks if parent", "comments to retrieve at a time. Defaults to config.RETRIEVAL_LIMIT. See: https://praw.readthedocs.io/en/latest/code_overview/models /subreddit.html#praw.models.Subreddit.comments subreddits", "processed comment caching, and comment posting. Allows bot authors to concentrate on writing", "error: print(\"API Error:\", error) raise @staticmethod def read_cache(file): \"\"\"Opens and reads file, converting", "pylint: disable=unused-argument print(\"\\nStopping bot...\") self.write_cache(CACHE_FILE, self.processed_comments) print(\"Bot stopped\") sys.exit() def is_summon_chain(self, target): \"\"\"Checks", "at a time. Defaults to config.RETRIEVAL_LIMIT. See: https://praw.readthedocs.io/en/latest/code_overview/models /subreddit.html#praw.models.Subreddit.comments subreddits : str, optional", "bot authors to concentrate on writing their custom bot functions. \"\"\" from collections", "signal.signal(signal.SIGINT, self.bot_exit) def authenticate(self, max_attempts=-1, seconds_between_attempts=60): \"\"\"Authenticates SITE_NAME with Reddit. Sets self.reddit and", "Returns ------- object reddit.comment of newly created comment. \"\"\" try: if target.author !=", "'' for subdirectory in file.split('/')[:-1]: path += subdirectory + '/' mkdir(path) print(\"Cache directory", "cache_file.write('\\n'+entry) # avoid adding \\n to end of file so that we don't", "RuntimeError('Failed to authenticate after {} ' 'attempts'.format(max_attempts)) def retrieve_comments(self): \"\"\"Retrieves comments from subreddits,", "{}...\".format(self.site_name)) self.reddit = praw.Reddit(self.site_name) self.username = self.reddit.user.me() print(\"Successfully authenticated as {}\".format(self.username)) return except", "avoid adding \\n to end of file so that we don't get empty", "+= 1 raise RuntimeError('Failed to authenticate after {} ' 'attempts'.format(max_attempts)) def retrieve_comments(self): \"\"\"Retrieves", ": str Location of cache file. mem_cache : list or deque Items in", "mem_cache): \"\"\"Writes list into file, converting list to \\n separated contents. Overwrites original", "---------- target : reddit.comment object Target Reddit comment. Returns ------- bool True if", "from bot. False otherwise. \"\"\" try: # implement replace_more()? target.refresh() for reply in", "print(\"Cache saved\") except IndexError: print(\"No items in cache\") except IOError as error: print(error)", "bot_exit(self, *args, **kwargs): \"\"\"Saves self.processed_comments into cache file before exiting.\"\"\" # pylint: disable=unused-argument", "attempts. Defaults to 60. \"\"\" attempt = 0 while attempt != max_attempts: try:", "keyword comment retrieval, processed comment caching, and comment posting. Allows bot authors to", ") class RedditBot: \"\"\"Superclass for Reddit bots which adds common bot routines. Parameters", "reply in target.replies.list(): if reply.author == self.username: print(\"Comment already processed.\") return True print(\"Processing", "print(\"Processing comment...\") return False except praw.exceptions.APIException as error: print(\"API Error:\", error) # Failsafe", "to prevent infinite reply loop caused by another bot. Parameters ---------- target :", "directory...\") try: path = '' for subdirectory in file.split('/')[:-1]: path += subdirectory +", "created.\") except IOError as error: print(error) print(\"Unable to create cache file\") mem_cache =", "mem_cache = deque([], CACHE_SIZE) return mem_cache @staticmethod def write_cache(file, mem_cache): \"\"\"Writes list into", "IOError as error: print(error) print(\"Unable to create cache file\") def bot_exit(self, *args, **kwargs):", "return True if ( not target.is_root and target.parent().author == self.username ) else False", "processed comments. Returns ------- generator Dict of reddit.Comment and query. \"\"\" try: print(\"Retrieving", "does not exist. Parameters ---------- file : str Location of cache file. mem_cache", "mkdir import re import signal import sys from time import sleep import praw", "Parameters ---------- target : reddit.submission object or reddit.comment object Target Reddit submission or", "data.read() mem_cache = deque(cache.split('\\n'), CACHE_SIZE) print(\"Cache loaded.\") except FileNotFoundError: print(\"Cache file not found.\")", "target.refresh() for reply in target.replies.list(): if reply.author == self.username: print(\"Comment already processed.\") return", "comment : str Comment to post. Returns ------- object reddit.comment of newly created", "# pylint: disable=unused-argument print(\"\\nStopping bot...\") self.write_cache(CACHE_FILE, self.processed_comments) print(\"Bot stopped\") sys.exit() def is_summon_chain(self, target):", "cache \"\"\" try: print(\"Saving memory into cache file...\") with open(file, 'w') as cache_file:", "self.reddit and self.username on success. Parameters ---------- max_attempts : int, optional Maximum number", "comments: if (comment.author != self.username and comment not in self.processed_comments #and not self.has_already_replied(comment)", "import praw from config import ( CACHE_FILE, CACHE_SIZE, KEYWORD, RETRIEVAL_LIMIT, SITE_NAME, SUBREDDITS, )", "#and not self.is_summon_chain(comment) ): query = self.keyword.search(comment.body.lower()) if query: self.processed_comments.append(comment.id) yield {'comment': comment,", "CACHE_FILE, CACHE_SIZE, KEYWORD, RETRIEVAL_LIMIT, SITE_NAME, SUBREDDITS, ) class RedditBot: \"\"\"Superclass for Reddit bots", "in deque when next loaded print(\"Cache saved\") except IndexError: print(\"No items in cache\")", "writing their custom bot functions. \"\"\" from collections import deque from os import", "disable=unused-argument print(\"\\nStopping bot...\") self.write_cache(CACHE_FILE, self.processed_comments) print(\"Bot stopped\") sys.exit() def is_summon_chain(self, target): \"\"\"Checks if", "\"\"\"Retrieves comments from subreddits, filters for keyword trigger, and excludes processed comments. Returns", "Contents of cache file, limited to config.CACHE_SIZE \"\"\" try: print(\"Loading cache file into", "memory into cache file...\") with open(file, 'w') as cache_file: try: cache_file.write(mem_cache.popleft()) for entry", "before exiting.\"\"\" # pylint: disable=unused-argument print(\"\\nStopping bot...\") self.write_cache(CACHE_FILE, self.processed_comments) print(\"Bot stopped\") sys.exit() def", "mem_cache = deque(cache.split('\\n'), CACHE_SIZE) print(\"Cache loaded.\") except FileNotFoundError: print(\"Cache file not found.\") print(\"Creating", "cache = data.read() mem_cache = deque(cache.split('\\n'), CACHE_SIZE) print(\"Cache loaded.\") except FileNotFoundError: print(\"Cache file", "optional Seconds to wait between authentication attempts. Defaults to 60. \"\"\" attempt =", "from collections import deque from os import mkdir import re import signal import", "print(\"No items in cache\") except IOError as error: print(error) print(\"Unable to create cache", "bot. Used to prevent multiple replies to the same request. Parameters ---------- target", "Maximum number of authentication attempts before failure. Defaults to -1 (infinite attempts). seconds_between_attempts", "comment retrieval, processed comment caching, and comment posting. Allows bot authors to concentrate", "of cache file. mem_cache : list or deque Items in memory cache \"\"\"", "from bot. False otherwise. \"\"\" return True if ( not target.is_root and target.parent().author", "except praw.exceptions.APIException as error: print(\"API Error:\", error) raise except AttributeError as error: print(error)", "print(\"API Error:\", error) raise @staticmethod def read_cache(file): \"\"\"Opens and reads file, converting contents", "Seconds to wait between authentication attempts. Defaults to 60. \"\"\" attempt = 0", "+ '/' mkdir(path) print(\"Cache directory created.\") except IOError as error: print(error) print(\"Unable to", "if ( not target.is_root and target.parent().author == self.username ) else False def has_already_replied(self,", "Parameters ---------- file : str Location of cache file. Returns ------- collections.deque Contents", "attempt += 1 raise RuntimeError('Failed to authenticate after {} ' 'attempts'.format(max_attempts)) def retrieve_comments(self):", "print(error) print(\"Unable to retrieve comments.\") raise def submit_comment(self, target, comment): \"\"\"Submit comment to", "file...\") with open(file, 'w') as cache_file: try: cache_file.write(mem_cache.popleft()) for entry in mem_cache: cache_file.write('\\n'+entry)", "---------- site_name : str, optional Initializes praw under site_name within praw.ini. Defaults to", "\"\"\"Superclass for Reddit bots which adds common bot routines. Parameters ---------- site_name :", "#and not self.has_already_replied(comment) #and not self.is_summon_chain(comment) ): query = self.keyword.search(comment.body.lower()) if query: self.processed_comments.append(comment.id)", "cache\") except IOError as error: print(error) print(\"Unable to create cache file\") def bot_exit(self,", "Reddit comment. Returns ------- bool True if parent comment of target is from", "custom bot functions. \"\"\" from collections import deque from os import mkdir import", "from. Defaults to config.SUBREDDITS. See: https://praw.readthedocs.io/en/latest/code_overview/models /subreddit.html#subreddit \"\"\" def __init__(self, site_name=SITE_NAME, keyword=KEYWORD, retrieval_limit=RETRIEVAL_LIMIT,", "comment, 'query' : query.group(1)} except praw.exceptions.APIException as error: print(\"API Error:\", error) raise except", "@staticmethod def read_cache(file): \"\"\"Opens and reads file, converting contents to \\n separated list.", "self.write_cache(CACHE_FILE, self.processed_comments) print(\"Bot stopped\") sys.exit() def is_summon_chain(self, target): \"\"\"Checks if parent comment of", "self. Used to prevent infinite reply loop caused by another bot. Parameters ----------", "reddit.Comment and query. \"\"\" try: print(\"Retrieving {} comments...\".format(self.retrieval_limit)) comments = self.reddit.subreddit(self.subreddits).comments( limit=self.retrieval_limit )", "empty # entries in deque when next loaded print(\"Cache saved\") except IndexError: print(\"No", "error) raise @staticmethod def read_cache(file): \"\"\"Opens and reads file, converting contents to \\n", "60. \"\"\" attempt = 0 while attempt != max_attempts: try: print(\"Authenticating as {}...\".format(self.site_name))", "to post. Returns ------- object reddit.comment of newly created comment. \"\"\" try: if", "= deque(cache.split('\\n'), CACHE_SIZE) print(\"Cache loaded.\") except FileNotFoundError: print(\"Cache file not found.\") print(\"Creating cache", "target, comment): \"\"\"Submit comment to target submission or comment. Parameters ---------- target :", "created comment. \"\"\" try: if target.author != self.username: print(\"Posting reply...\") return target.reply(comment) except", "try: print(\"Authenticating as {}...\".format(self.site_name)) self.reddit = praw.Reddit(self.site_name) self.username = self.reddit.user.me() print(\"Successfully authenticated as", "comment not in self.processed_comments #and not self.has_already_replied(comment) #and not self.is_summon_chain(comment) ): query =", "print(\"Posting reply...\") return target.reply(comment) except praw.exceptions.APIException as error: print(\"API Error:\", error) raise @staticmethod", "return True print(\"Processing comment...\") return False except praw.exceptions.APIException as error: print(\"API Error:\", error)", "file\") mem_cache = deque([], CACHE_SIZE) return mem_cache @staticmethod def write_cache(file, mem_cache): \"\"\"Writes list", "path += subdirectory + '/' mkdir(path) print(\"Cache directory created.\") except IOError as error:", "target is from self. Used to prevent infinite reply loop caused by another", "and comment posting. Allows bot authors to concentrate on writing their custom bot", "excludes processed comments. Returns ------- generator Dict of reddit.Comment and query. \"\"\" try:", "Defaults to -1 (infinite attempts). seconds_between_attempts : int, optional Seconds to wait between", "self.read_cache(CACHE_FILE) signal.signal(signal.SIGINT, self.bot_exit) def authenticate(self, max_attempts=-1, seconds_between_attempts=60): \"\"\"Authenticates SITE_NAME with Reddit. Sets self.reddit", "def submit_comment(self, target, comment): \"\"\"Submit comment to target submission or comment. Parameters ----------", "comment of target is from self. Used to prevent infinite reply loop caused", "self.is_summon_chain(comment) ): query = self.keyword.search(comment.body.lower()) if query: self.processed_comments.append(comment.id) yield {'comment': comment, 'query' :", "exiting.\"\"\" # pylint: disable=unused-argument print(\"\\nStopping bot...\") self.write_cache(CACHE_FILE, self.processed_comments) print(\"Bot stopped\") sys.exit() def is_summon_chain(self,", "read_cache(file): \"\"\"Opens and reads file, converting contents to \\n separated list. Creates cache", "not exist. Parameters ---------- file : str Location of cache file. mem_cache :", "create cache file\") def bot_exit(self, *args, **kwargs): \"\"\"Saves self.processed_comments into cache file before", "FileNotFoundError: print(\"Cache file not found.\") print(\"Creating cache directory...\") try: path = '' for", "subreddits, filters for keyword trigger, and excludes processed comments. Returns ------- generator Dict", "Parameters ---------- site_name : str, optional Initializes praw under site_name within praw.ini. Defaults", "def bot_exit(self, *args, **kwargs): \"\"\"Saves self.processed_comments into cache file before exiting.\"\"\" # pylint:", "as error: print(error) print(\"Unable to create cache file\") def bot_exit(self, *args, **kwargs): \"\"\"Saves", "\"\"\" try: if target.author != self.username: print(\"Posting reply...\") return target.reply(comment) except praw.exceptions.APIException as", "not found.\") print(\"Creating cache directory...\") try: path = '' for subdirectory in file.split('/')[:-1]:", "Defaults to config.RETRIEVAL_LIMIT. See: https://praw.readthedocs.io/en/latest/code_overview/models /subreddit.html#praw.models.Subreddit.comments subreddits : str, optional Subreddits to retrieve", "Reddit. Sets self.reddit and self.username on success. Parameters ---------- max_attempts : int, optional", "self.username and comment not in self.processed_comments #and not self.has_already_replied(comment) #and not self.is_summon_chain(comment) ):", "processed.\") return True print(\"Processing comment...\") return False except praw.exceptions.APIException as error: print(\"API Error:\",", "not exist. Parameters ---------- file : str Location of cache file. Returns -------", "target.reply(comment) except praw.exceptions.APIException as error: print(\"API Error:\", error) raise @staticmethod def read_cache(file): \"\"\"Opens", "target.replies.list(): if reply.author == self.username: print(\"Comment already processed.\") return True print(\"Processing comment...\") return", "Target Reddit comment. Returns ------- bool True if parent comment of target is", "already processed.\") return True print(\"Processing comment...\") return False except praw.exceptions.APIException as error: print(\"API", "def authenticate(self, max_attempts=-1, seconds_between_attempts=60): \"\"\"Authenticates SITE_NAME with Reddit. Sets self.reddit and self.username on", "that we don't get empty # entries in deque when next loaded print(\"Cache", "separated contents. Overwrites original cache file. Creates cache file if does not exist.", "exist. Parameters ---------- file : str Location of cache file. Returns ------- collections.deque", "reddit.comment object Target Reddit comment. Returns ------- bool True if parent comment of", "to retrieve comments.\") raise def submit_comment(self, target, comment): \"\"\"Submit comment to target submission", "None self.retrieval_limit = retrieval_limit self.site_name = site_name self.subreddits = subreddits self.username = site_name", "(comment.author != self.username and comment not in self.processed_comments #and not self.has_already_replied(comment) #and not", "common Reddit bot functions such as keyword comment retrieval, processed comment caching, and", "of comments to retrieve at a time. Defaults to config.RETRIEVAL_LIMIT. See: https://praw.readthedocs.io/en/latest/code_overview/models /subreddit.html#praw.models.Subreddit.comments", "in file.split('/')[:-1]: path += subdirectory + '/' mkdir(path) print(\"Cache directory created.\") except IOError", "return False except praw.exceptions.APIException as error: print(\"API Error:\", error) # Failsafe return True", "error: print(\"Unable to authenticate:\", error) print(\"Retrying in {} \" \"seconds\".format(seconds_between_attempts)) sleep(seconds_between_attempts) attempt +=", "praw.exceptions.APIException as error: print(\"API Error:\", error) raise except AttributeError as error: print(error) print(\"Unable", "'query' : query.group(1)} except praw.exceptions.APIException as error: print(\"API Error:\", error) raise except AttributeError", "Overwrites original cache file. Creates cache file if does not exist. Parameters ----------", "from os import mkdir import re import signal import sys from time import", "open(file, 'r') as data: cache = data.read() mem_cache = deque(cache.split('\\n'), CACHE_SIZE) print(\"Cache loaded.\")", "bot. Parameters ---------- target : reddit.comment object Target Reddit comment. Returns ------- bool", "int, optional Seconds to wait between authentication attempts. Defaults to 60. \"\"\" attempt", "------- collections.deque Contents of cache file, limited to config.CACHE_SIZE \"\"\" try: print(\"Loading cache", "is from bot. False otherwise. \"\"\" return True if ( not target.is_root and", "retrieval, processed comment caching, and comment posting. Allows bot authors to concentrate on", "SITE_NAME with Reddit. Sets self.reddit and self.username on success. Parameters ---------- max_attempts :", "1 raise RuntimeError('Failed to authenticate after {} ' 'attempts'.format(max_attempts)) def retrieve_comments(self): \"\"\"Retrieves comments", "to authenticate:\", error) print(\"Retrying in {} \" \"seconds\".format(seconds_between_attempts)) sleep(seconds_between_attempts) attempt += 1 raise", "the same request. Parameters ---------- target : reddit.comment object Target Reddit comment. Returns", "\"\"\"Saves self.processed_comments into cache file before exiting.\"\"\" # pylint: disable=unused-argument print(\"\\nStopping bot...\") self.write_cache(CACHE_FILE,", "file into memory...\") with open(file, 'r') as data: cache = data.read() mem_cache =", "Location of cache file. mem_cache : list or deque Items in memory cache", "from time import sleep import praw from config import ( CACHE_FILE, CACHE_SIZE, KEYWORD,", "praw.Reddit(self.site_name) self.username = self.reddit.user.me() print(\"Successfully authenticated as {}\".format(self.username)) return except praw.exceptions.APIException as error:", "submit_comment(self, target, comment): \"\"\"Submit comment to target submission or comment. Parameters ---------- target", "wait between authentication attempts. Defaults to 60. \"\"\" attempt = 0 while attempt", "CACHE_SIZE) print(\"Cache loaded.\") except FileNotFoundError: print(\"Cache file not found.\") print(\"Creating cache directory...\") try:", "functions. \"\"\" from collections import deque from os import mkdir import re import", "not target.is_root and target.parent().author == self.username ) else False def has_already_replied(self, target): \"\"\"Checks", "\"seconds\".format(seconds_between_attempts)) sleep(seconds_between_attempts) attempt += 1 raise RuntimeError('Failed to authenticate after {} ' 'attempts'.format(max_attempts))", "try: path = '' for subdirectory in file.split('/')[:-1]: path += subdirectory + '/'", "print(\"Saving memory into cache file...\") with open(file, 'w') as cache_file: try: cache_file.write(mem_cache.popleft()) for", "if does not exist. Parameters ---------- file : str Location of cache file.", "collections import deque from os import mkdir import re import signal import sys", "concentrate on writing their custom bot functions. \"\"\" from collections import deque from", "See: https://praw.readthedocs.io/en/latest/getting_started /configuration/prawini.html#choosing-a-site keyword : str, optional Comment trigger word. Defaults to config.KEYWORD.", "to wait between authentication attempts. Defaults to 60. \"\"\" attempt = 0 while", "https://praw.readthedocs.io/en/latest/getting_started /configuration/prawini.html#choosing-a-site keyword : str, optional Comment trigger word. Defaults to config.KEYWORD. retrieval_limit", "write_cache(file, mem_cache): \"\"\"Writes list into file, converting list to \\n separated contents. Overwrites", "otherwise. \"\"\" return True if ( not target.is_root and target.parent().author == self.username )", "max_attempts : int, optional Maximum number of authentication attempts before failure. Defaults to", "mkdir(path) print(\"Cache directory created.\") except IOError as error: print(error) print(\"Unable to create cache", "retrieval_limit self.site_name = site_name self.subreddits = subreddits self.username = site_name self.processed_comments = self.read_cache(CACHE_FILE)", "print(\"Retrying in {} \" \"seconds\".format(seconds_between_attempts)) sleep(seconds_between_attempts) attempt += 1 raise RuntimeError('Failed to authenticate", "or deque Items in memory cache \"\"\" try: print(\"Saving memory into cache file...\")", "config.CACHE_SIZE \"\"\" try: print(\"Loading cache file into memory...\") with open(file, 'r') as data:", "See: https://praw.readthedocs.io/en/latest/code_overview/models /subreddit.html#praw.models.Subreddit.comments subreddits : str, optional Subreddits to retrieve comments from. Defaults", "file before exiting.\"\"\" # pylint: disable=unused-argument print(\"\\nStopping bot...\") self.write_cache(CACHE_FILE, self.processed_comments) print(\"Bot stopped\") sys.exit()", "such as keyword comment retrieval, processed comment caching, and comment posting. Allows bot", "entries in deque when next loaded print(\"Cache saved\") except IndexError: print(\"No items in", "to concentrate on writing their custom bot functions. \"\"\" from collections import deque", "separated list. Creates cache file if does not exist. Parameters ---------- file :", "{'comment': comment, 'query' : query.group(1)} except praw.exceptions.APIException as error: print(\"API Error:\", error) raise", "object Target Reddit submission or comment. comment : str Comment to post. Returns", "SUBREDDITS, ) class RedditBot: \"\"\"Superclass for Reddit bots which adds common bot routines.", "-1 (infinite attempts). seconds_between_attempts : int, optional Seconds to wait between authentication attempts.", "list or deque Items in memory cache \"\"\" try: print(\"Saving memory into cache", "\\n to end of file so that we don't get empty # entries", "authentication attempts before failure. Defaults to -1 (infinite attempts). seconds_between_attempts : int, optional", "file, converting contents to \\n separated list. Creates cache file if does not", "Parameters ---------- file : str Location of cache file. mem_cache : list or", "= 0 while attempt != max_attempts: try: print(\"Authenticating as {}...\".format(self.site_name)) self.reddit = praw.Reddit(self.site_name)", "Location of cache file. Returns ------- collections.deque Contents of cache file, limited to", "== self.username ) else False def has_already_replied(self, target): \"\"\"Checks if target comment has", "print(\"Unable to create cache file\") def bot_exit(self, *args, **kwargs): \"\"\"Saves self.processed_comments into cache", "Items in memory cache \"\"\" try: print(\"Saving memory into cache file...\") with open(file,", "Dict of reddit.Comment and query. \"\"\" try: print(\"Retrieving {} comments...\".format(self.retrieval_limit)) comments = self.reddit.subreddit(self.subreddits).comments(", "\"\"\" from collections import deque from os import mkdir import re import signal", "comments from. Defaults to config.SUBREDDITS. See: https://praw.readthedocs.io/en/latest/code_overview/models /subreddit.html#subreddit \"\"\" def __init__(self, site_name=SITE_NAME, keyword=KEYWORD,", "as error: print(\"API Error:\", error) raise except AttributeError as error: print(error) print(\"Unable to", "print(\"API Error:\", error) raise except AttributeError as error: print(error) print(\"Unable to retrieve comments.\")", "print(\"Cache directory created.\") except IOError as error: print(error) print(\"Unable to create cache file\")", "to config.SITE_NAME. See: https://praw.readthedocs.io/en/latest/getting_started /configuration/prawini.html#choosing-a-site keyword : str, optional Comment trigger word. Defaults", "query. \"\"\" try: print(\"Retrieving {} comments...\".format(self.retrieval_limit)) comments = self.reddit.subreddit(self.subreddits).comments( limit=self.retrieval_limit ) for comment", "of reddit.Comment and query. \"\"\" try: print(\"Retrieving {} comments...\".format(self.retrieval_limit)) comments = self.reddit.subreddit(self.subreddits).comments( limit=self.retrieval_limit", "optional Maximum number of comments to retrieve at a time. Defaults to config.RETRIEVAL_LIMIT.", "print(\"Cache loaded.\") except FileNotFoundError: print(\"Cache file not found.\") print(\"Creating cache directory...\") try: path", "trigger word. Defaults to config.KEYWORD. retrieval_limit : int, optional Maximum number of comments", "open(file, 'w') as cache_file: try: cache_file.write(mem_cache.popleft()) for entry in mem_cache: cache_file.write('\\n'+entry) # avoid", "target.is_root and target.parent().author == self.username ) else False def has_already_replied(self, target): \"\"\"Checks if", "comments = self.reddit.subreddit(self.subreddits).comments( limit=self.retrieval_limit ) for comment in comments: if (comment.author != self.username", "\"\"\"Submit comment to target submission or comment. Parameters ---------- target : reddit.submission object", "import re import signal import sys from time import sleep import praw from", "loaded.\") except FileNotFoundError: print(\"Cache file not found.\") print(\"Creating cache directory...\") try: path =", "deque Items in memory cache \"\"\" try: print(\"Saving memory into cache file...\") with", "except IndexError: print(\"No items in cache\") except IOError as error: print(error) print(\"Unable to", "except praw.exceptions.APIException as error: print(\"Unable to authenticate:\", error) print(\"Retrying in {} \" \"seconds\".format(seconds_between_attempts))", "reply...\") return target.reply(comment) except praw.exceptions.APIException as error: print(\"API Error:\", error) raise @staticmethod def", "Subreddits to retrieve comments from. Defaults to config.SUBREDDITS. See: https://praw.readthedocs.io/en/latest/code_overview/models /subreddit.html#subreddit \"\"\" def", "re import signal import sys from time import sleep import praw from config", "list to \\n separated contents. Overwrites original cache file. Creates cache file if", "------- object reddit.comment of newly created comment. \"\"\" try: if target.author != self.username:", "cache file. Creates cache file if does not exist. Parameters ---------- file :", "success. Parameters ---------- max_attempts : int, optional Maximum number of authentication attempts before", "mem_cache : list or deque Items in memory cache \"\"\" try: print(\"Saving memory", "query = self.keyword.search(comment.body.lower()) if query: self.processed_comments.append(comment.id) yield {'comment': comment, 'query' : query.group(1)} except", "self.subreddits = subreddits self.username = site_name self.processed_comments = self.read_cache(CACHE_FILE) signal.signal(signal.SIGINT, self.bot_exit) def authenticate(self,", "to \\n separated contents. Overwrites original cache file. Creates cache file if does", "has already been replied by bot. Used to prevent multiple replies to the", "RETRIEVAL_LIMIT, SITE_NAME, SUBREDDITS, ) class RedditBot: \"\"\"Superclass for Reddit bots which adds common", "newly created comment. \"\"\" try: if target.author != self.username: print(\"Posting reply...\") return target.reply(comment)", "of authentication attempts before failure. Defaults to -1 (infinite attempts). seconds_between_attempts : int,", "file\") def bot_exit(self, *args, **kwargs): \"\"\"Saves self.processed_comments into cache file before exiting.\"\"\" #", "= site_name self.subreddits = subreddits self.username = site_name self.processed_comments = self.read_cache(CACHE_FILE) signal.signal(signal.SIGINT, self.bot_exit)", "error: print(error) print(\"Unable to create cache file\") def bot_exit(self, *args, **kwargs): \"\"\"Saves self.processed_comments", "or comment. comment : str Comment to post. Returns ------- object reddit.comment of", "https://praw.readthedocs.io/en/latest/code_overview/models /subreddit.html#subreddit \"\"\" def __init__(self, site_name=SITE_NAME, keyword=KEYWORD, retrieval_limit=RETRIEVAL_LIMIT, subreddits=SUBREDDITS, ): print(\"Initializing bot...\") self.keyword", "if target.author != self.username: print(\"Posting reply...\") return target.reply(comment) except praw.exceptions.APIException as error: print(\"API", "site_name within praw.ini. Defaults to config.SITE_NAME. See: https://praw.readthedocs.io/en/latest/getting_started /configuration/prawini.html#choosing-a-site keyword : str, optional", "/subreddit.html#subreddit \"\"\" def __init__(self, site_name=SITE_NAME, keyword=KEYWORD, retrieval_limit=RETRIEVAL_LIMIT, subreddits=SUBREDDITS, ): print(\"Initializing bot...\") self.keyword =", "comment. \"\"\" try: if target.author != self.username: print(\"Posting reply...\") return target.reply(comment) except praw.exceptions.APIException", "caching, and comment posting. Allows bot authors to concentrate on writing their custom", "reply loop caused by another bot. Parameters ---------- target : reddit.comment object Target", "is_summon_chain(self, target): \"\"\"Checks if parent comment of target is from self. Used to", "query: self.processed_comments.append(comment.id) yield {'comment': comment, 'query' : query.group(1)} except praw.exceptions.APIException as error: print(\"API", "Returns ------- bool True if parent comment of target is from bot. False", "attempt != max_attempts: try: print(\"Authenticating as {}...\".format(self.site_name)) self.reddit = praw.Reddit(self.site_name) self.username = self.reddit.user.me()", "Reddit bot functions such as keyword comment retrieval, processed comment caching, and comment", "already been replied by bot. Used to prevent multiple replies to the same", "Target Reddit submission or comment. comment : str Comment to post. Returns -------", "bool True if parent comment of target is from bot. False otherwise. \"\"\"", "comments...\".format(self.retrieval_limit)) comments = self.reddit.subreddit(self.subreddits).comments( limit=self.retrieval_limit ) for comment in comments: if (comment.author !=", "try: # implement replace_more()? target.refresh() for reply in target.replies.list(): if reply.author == self.username:", "while attempt != max_attempts: try: print(\"Authenticating as {}...\".format(self.site_name)) self.reddit = praw.Reddit(self.site_name) self.username =", ": str Comment to post. Returns ------- object reddit.comment of newly created comment.", "\"\"\"Opens and reads file, converting contents to \\n separated list. Creates cache file", "original cache file. Creates cache file if does not exist. Parameters ---------- file", "Initializes praw under site_name within praw.ini. Defaults to config.SITE_NAME. See: https://praw.readthedocs.io/en/latest/getting_started /configuration/prawini.html#choosing-a-site keyword", "site_name=SITE_NAME, keyword=KEYWORD, retrieval_limit=RETRIEVAL_LIMIT, subreddits=SUBREDDITS, ): print(\"Initializing bot...\") self.keyword = re.compile(keyword+r' ([ \\w]+)', re.I)", "Maximum number of comments to retrieve at a time. Defaults to config.RETRIEVAL_LIMIT. See:", "target): \"\"\"Checks if target comment has already been replied by bot. Used to", "authors to concentrate on writing their custom bot functions. \"\"\" from collections import", "and query. \"\"\" try: print(\"Retrieving {} comments...\".format(self.retrieval_limit)) comments = self.reddit.subreddit(self.subreddits).comments( limit=self.retrieval_limit ) for", "to config.CACHE_SIZE \"\"\" try: print(\"Loading cache file into memory...\") with open(file, 'r') as", "file. Returns ------- collections.deque Contents of cache file, limited to config.CACHE_SIZE \"\"\" try:", "sys.exit() def is_summon_chain(self, target): \"\"\"Checks if parent comment of target is from self.", "end of file so that we don't get empty # entries in deque", "with Reddit. Sets self.reddit and self.username on success. Parameters ---------- max_attempts : int,", "\"\"\" try: print(\"Loading cache file into memory...\") with open(file, 'r') as data: cache", "infinite reply loop caused by another bot. Parameters ---------- target : reddit.comment object", "= re.compile(keyword+r' ([ \\w]+)', re.I) self.reddit = None self.retrieval_limit = retrieval_limit self.site_name =", "Allows bot authors to concentrate on writing their custom bot functions. \"\"\" from", "from config import ( CACHE_FILE, CACHE_SIZE, KEYWORD, RETRIEVAL_LIMIT, SITE_NAME, SUBREDDITS, ) class RedditBot:", "\\n separated list. Creates cache file if does not exist. Parameters ---------- file", ": str Location of cache file. Returns ------- collections.deque Contents of cache file,", "entry in mem_cache: cache_file.write('\\n'+entry) # avoid adding \\n to end of file so", "file so that we don't get empty # entries in deque when next", "prevent multiple replies to the same request. Parameters ---------- target : reddit.comment object", "\"\"\"Checks if target comment has already been replied by bot. Used to prevent", "as error: print(\"API Error:\", error) raise @staticmethod def read_cache(file): \"\"\"Opens and reads file,", "posting. Allows bot authors to concentrate on writing their custom bot functions. \"\"\"", "functions such as keyword comment retrieval, processed comment caching, and comment posting. Allows", "number of comments to retrieve at a time. Defaults to config.RETRIEVAL_LIMIT. See: https://praw.readthedocs.io/en/latest/code_overview/models", "See: https://praw.readthedocs.io/en/latest/code_overview/models /subreddit.html#subreddit \"\"\" def __init__(self, site_name=SITE_NAME, keyword=KEYWORD, retrieval_limit=RETRIEVAL_LIMIT, subreddits=SUBREDDITS, ): print(\"Initializing bot...\")", "if reply.author == self.username: print(\"Comment already processed.\") return True print(\"Processing comment...\") return False", "to -1 (infinite attempts). seconds_between_attempts : int, optional Seconds to wait between authentication", ": reddit.submission object or reddit.comment object Target Reddit submission or comment. comment :", "on writing their custom bot functions. \"\"\" from collections import deque from os", ": list or deque Items in memory cache \"\"\" try: print(\"Saving memory into", "as error: print(error) print(\"Unable to retrieve comments.\") raise def submit_comment(self, target, comment): \"\"\"Submit", "sys from time import sleep import praw from config import ( CACHE_FILE, CACHE_SIZE,", "query.group(1)} except praw.exceptions.APIException as error: print(\"API Error:\", error) raise except AttributeError as error:", "target.parent().author == self.username ) else False def has_already_replied(self, target): \"\"\"Checks if target comment", "keyword=KEYWORD, retrieval_limit=RETRIEVAL_LIMIT, subreddits=SUBREDDITS, ): print(\"Initializing bot...\") self.keyword = re.compile(keyword+r' ([ \\w]+)', re.I) self.reddit", "error: print(error) print(\"Unable to create cache file\") mem_cache = deque([], CACHE_SIZE) return mem_cache", "submission or comment. comment : str Comment to post. Returns ------- object reddit.comment", "else False def has_already_replied(self, target): \"\"\"Checks if target comment has already been replied", ") else False def has_already_replied(self, target): \"\"\"Checks if target comment has already been", "\"\"\" try: print(\"Retrieving {} comments...\".format(self.retrieval_limit)) comments = self.reddit.subreddit(self.subreddits).comments( limit=self.retrieval_limit ) for comment in", "{}\".format(self.username)) return except praw.exceptions.APIException as error: print(\"Unable to authenticate:\", error) print(\"Retrying in {}", "self.username: print(\"Comment already processed.\") return True print(\"Processing comment...\") return False except praw.exceptions.APIException as", "multiple replies to the same request. Parameters ---------- target : reddit.comment object Target", "in mem_cache: cache_file.write('\\n'+entry) # avoid adding \\n to end of file so that", "file if does not exist. Parameters ---------- file : str Location of cache", "print(\"Unable to retrieve comments.\") raise def submit_comment(self, target, comment): \"\"\"Submit comment to target", "import ( CACHE_FILE, CACHE_SIZE, KEYWORD, RETRIEVAL_LIMIT, SITE_NAME, SUBREDDITS, ) class RedditBot: \"\"\"Superclass for", "subreddits : str, optional Subreddits to retrieve comments from. Defaults to config.SUBREDDITS. See:", "comments from subreddits, filters for keyword trigger, and excludes processed comments. Returns -------", "False otherwise. \"\"\" return True if ( not target.is_root and target.parent().author == self.username", "in memory cache \"\"\" try: print(\"Saving memory into cache file...\") with open(file, 'w')", "routines. Parameters ---------- site_name : str, optional Initializes praw under site_name within praw.ini.", "print(\"Bot stopped\") sys.exit() def is_summon_chain(self, target): \"\"\"Checks if parent comment of target is", "if parent comment of target is from self. Used to prevent infinite reply", "subreddits=SUBREDDITS, ): print(\"Initializing bot...\") self.keyword = re.compile(keyword+r' ([ \\w]+)', re.I) self.reddit = None", "config.KEYWORD. retrieval_limit : int, optional Maximum number of comments to retrieve at a", "limit=self.retrieval_limit ) for comment in comments: if (comment.author != self.username and comment not", "target : reddit.submission object or reddit.comment object Target Reddit submission or comment. comment", "object Target Reddit comment. Returns ------- bool True if parent comment of target", "memory cache \"\"\" try: print(\"Saving memory into cache file...\") with open(file, 'w') as", "raise @staticmethod def read_cache(file): \"\"\"Opens and reads file, converting contents to \\n separated", "except praw.exceptions.APIException as error: print(\"API Error:\", error) raise @staticmethod def read_cache(file): \"\"\"Opens and", "error: print(error) print(\"Unable to retrieve comments.\") raise def submit_comment(self, target, comment): \"\"\"Submit comment", "file : str Location of cache file. Returns ------- collections.deque Contents of cache", "optional Maximum number of authentication attempts before failure. Defaults to -1 (infinite attempts).", "number of authentication attempts before failure. Defaults to -1 (infinite attempts). seconds_between_attempts :", "Parameters ---------- target : reddit.comment object Target Reddit comment. Returns ------- bool True", "------- generator Dict of reddit.Comment and query. \"\"\" try: print(\"Retrieving {} comments...\".format(self.retrieval_limit)) comments", "deque([], CACHE_SIZE) return mem_cache @staticmethod def write_cache(file, mem_cache): \"\"\"Writes list into file, converting", "as data: cache = data.read() mem_cache = deque(cache.split('\\n'), CACHE_SIZE) print(\"Cache loaded.\") except FileNotFoundError:", "retrieve_comments(self): \"\"\"Retrieves comments from subreddits, filters for keyword trigger, and excludes processed comments.", "or reddit.comment object Target Reddit submission or comment. comment : str Comment to", "def write_cache(file, mem_cache): \"\"\"Writes list into file, converting list to \\n separated contents.", "Error:\", error) raise except AttributeError as error: print(error) print(\"Unable to retrieve comments.\") raise", "into cache file before exiting.\"\"\" # pylint: disable=unused-argument print(\"\\nStopping bot...\") self.write_cache(CACHE_FILE, self.processed_comments) print(\"Bot", "Defaults to config.SUBREDDITS. See: https://praw.readthedocs.io/en/latest/code_overview/models /subreddit.html#subreddit \"\"\" def __init__(self, site_name=SITE_NAME, keyword=KEYWORD, retrieval_limit=RETRIEVAL_LIMIT, subreddits=SUBREDDITS,", "str Location of cache file. mem_cache : list or deque Items in memory", "on success. Parameters ---------- max_attempts : int, optional Maximum number of authentication attempts", "Defaults to 60. \"\"\" attempt = 0 while attempt != max_attempts: try: print(\"Authenticating", "print(\"Successfully authenticated as {}\".format(self.username)) return except praw.exceptions.APIException as error: print(\"Unable to authenticate:\", error)", "keyword : str, optional Comment trigger word. Defaults to config.KEYWORD. retrieval_limit : int,", "self.retrieval_limit = retrieval_limit self.site_name = site_name self.subreddits = subreddits self.username = site_name self.processed_comments", "of newly created comment. \"\"\" try: if target.author != self.username: print(\"Posting reply...\") return", "Used to prevent multiple replies to the same request. Parameters ---------- target :", "__init__(self, site_name=SITE_NAME, keyword=KEYWORD, retrieval_limit=RETRIEVAL_LIMIT, subreddits=SUBREDDITS, ): print(\"Initializing bot...\") self.keyword = re.compile(keyword+r' ([ \\w]+)',", "as {}\".format(self.username)) return except praw.exceptions.APIException as error: print(\"Unable to authenticate:\", error) print(\"Retrying in", "raise except AttributeError as error: print(error) print(\"Unable to retrieve comments.\") raise def submit_comment(self,", "praw under site_name within praw.ini. Defaults to config.SITE_NAME. See: https://praw.readthedocs.io/en/latest/getting_started /configuration/prawini.html#choosing-a-site keyword :", "filters for keyword trigger, and excludes processed comments. Returns ------- generator Dict of", "to retrieve comments from. Defaults to config.SUBREDDITS. See: https://praw.readthedocs.io/en/latest/code_overview/models /subreddit.html#subreddit \"\"\" def __init__(self,", "and self.username on success. Parameters ---------- max_attempts : int, optional Maximum number of", "caused by another bot. Parameters ---------- target : reddit.comment object Target Reddit comment.", "into cache file...\") with open(file, 'w') as cache_file: try: cache_file.write(mem_cache.popleft()) for entry in", "retrieve comments from. Defaults to config.SUBREDDITS. See: https://praw.readthedocs.io/en/latest/code_overview/models /subreddit.html#subreddit \"\"\" def __init__(self, site_name=SITE_NAME,", "in comments: if (comment.author != self.username and comment not in self.processed_comments #and not", "prevent infinite reply loop caused by another bot. Parameters ---------- target : reddit.comment", "parent comment of target is from self. Used to prevent infinite reply loop", "return target.reply(comment) except praw.exceptions.APIException as error: print(\"API Error:\", error) raise @staticmethod def read_cache(file):", "bot...\") self.keyword = re.compile(keyword+r' ([ \\w]+)', re.I) self.reddit = None self.retrieval_limit = retrieval_limit", "self.processed_comments.append(comment.id) yield {'comment': comment, 'query' : query.group(1)} except praw.exceptions.APIException as error: print(\"API Error:\",", "target is from bot. False otherwise. \"\"\" return True if ( not target.is_root", "self.reddit = None self.retrieval_limit = retrieval_limit self.site_name = site_name self.subreddits = subreddits self.username", "print(\"Unable to authenticate:\", error) print(\"Retrying in {} \" \"seconds\".format(seconds_between_attempts)) sleep(seconds_between_attempts) attempt += 1", "generator Dict of reddit.Comment and query. \"\"\" try: print(\"Retrieving {} comments...\".format(self.retrieval_limit)) comments =", "Defaults to config.KEYWORD. retrieval_limit : int, optional Maximum number of comments to retrieve", "Common Routines Contains common Reddit bot functions such as keyword comment retrieval, processed", "of cache file, limited to config.CACHE_SIZE \"\"\" try: print(\"Loading cache file into memory...\")", "from subreddits, filters for keyword trigger, and excludes processed comments. Returns ------- generator", "as error: print(\"Unable to authenticate:\", error) print(\"Retrying in {} \" \"seconds\".format(seconds_between_attempts)) sleep(seconds_between_attempts) attempt", ": str, optional Subreddits to retrieve comments from. Defaults to config.SUBREDDITS. See: https://praw.readthedocs.io/en/latest/code_overview/models", "self.username = site_name self.processed_comments = self.read_cache(CACHE_FILE) signal.signal(signal.SIGINT, self.bot_exit) def authenticate(self, max_attempts=-1, seconds_between_attempts=60): \"\"\"Authenticates", "collections.deque Contents of cache file, limited to config.CACHE_SIZE \"\"\" try: print(\"Loading cache file", "a time. Defaults to config.RETRIEVAL_LIMIT. See: https://praw.readthedocs.io/en/latest/code_overview/models /subreddit.html#praw.models.Subreddit.comments subreddits : str, optional Subreddits", ") for comment in comments: if (comment.author != self.username and comment not in", "cache file. mem_cache : list or deque Items in memory cache \"\"\" try:", "as keyword comment retrieval, processed comment caching, and comment posting. Allows bot authors", "(infinite attempts). seconds_between_attempts : int, optional Seconds to wait between authentication attempts. Defaults", "cache file before exiting.\"\"\" # pylint: disable=unused-argument print(\"\\nStopping bot...\") self.write_cache(CACHE_FILE, self.processed_comments) print(\"Bot stopped\")", "target : reddit.comment object Target Reddit comment. Returns ------- bool True if parent", "**kwargs): \"\"\"Saves self.processed_comments into cache file before exiting.\"\"\" # pylint: disable=unused-argument print(\"\\nStopping bot...\")", "True print(\"Processing comment...\") return False except praw.exceptions.APIException as error: print(\"API Error:\", error) #", "directory created.\") except IOError as error: print(error) print(\"Unable to create cache file\") mem_cache", "\"\"\" try: print(\"Saving memory into cache file...\") with open(file, 'w') as cache_file: try:", "cache file, limited to config.CACHE_SIZE \"\"\" try: print(\"Loading cache file into memory...\") with", "cache file...\") with open(file, 'w') as cache_file: try: cache_file.write(mem_cache.popleft()) for entry in mem_cache:", "for comment in comments: if (comment.author != self.username and comment not in self.processed_comments", "\"\"\"Reddit Bot Common Routines Contains common Reddit bot functions such as keyword comment", "= self.keyword.search(comment.body.lower()) if query: self.processed_comments.append(comment.id) yield {'comment': comment, 'query' : query.group(1)} except praw.exceptions.APIException", "to end of file so that we don't get empty # entries in", "*args, **kwargs): \"\"\"Saves self.processed_comments into cache file before exiting.\"\"\" # pylint: disable=unused-argument print(\"\\nStopping", "time import sleep import praw from config import ( CACHE_FILE, CACHE_SIZE, KEYWORD, RETRIEVAL_LIMIT,", "and comment not in self.processed_comments #and not self.has_already_replied(comment) #and not self.is_summon_chain(comment) ): query", "path = '' for subdirectory in file.split('/')[:-1]: path += subdirectory + '/' mkdir(path)", "except AttributeError as error: print(error) print(\"Unable to retrieve comments.\") raise def submit_comment(self, target,", "raise def submit_comment(self, target, comment): \"\"\"Submit comment to target submission or comment. Parameters", "comments.\") raise def submit_comment(self, target, comment): \"\"\"Submit comment to target submission or comment.", "= subreddits self.username = site_name self.processed_comments = self.read_cache(CACHE_FILE) signal.signal(signal.SIGINT, self.bot_exit) def authenticate(self, max_attempts=-1,", "Error:\", error) raise @staticmethod def read_cache(file): \"\"\"Opens and reads file, converting contents to", "\"\"\" attempt = 0 while attempt != max_attempts: try: print(\"Authenticating as {}...\".format(self.site_name)) self.reddit", "# avoid adding \\n to end of file so that we don't get", "== self.username: print(\"Comment already processed.\") return True print(\"Processing comment...\") return False except praw.exceptions.APIException", "https://praw.readthedocs.io/en/latest/code_overview/models /subreddit.html#praw.models.Subreddit.comments subreddits : str, optional Subreddits to retrieve comments from. Defaults to", "= data.read() mem_cache = deque(cache.split('\\n'), CACHE_SIZE) print(\"Cache loaded.\") except FileNotFoundError: print(\"Cache file not", "into file, converting list to \\n separated contents. Overwrites original cache file. Creates", "KEYWORD, RETRIEVAL_LIMIT, SITE_NAME, SUBREDDITS, ) class RedditBot: \"\"\"Superclass for Reddit bots which adds", "replace_more()? target.refresh() for reply in target.replies.list(): if reply.author == self.username: print(\"Comment already processed.\")", "AttributeError as error: print(error) print(\"Unable to retrieve comments.\") raise def submit_comment(self, target, comment):", "cache_file: try: cache_file.write(mem_cache.popleft()) for entry in mem_cache: cache_file.write('\\n'+entry) # avoid adding \\n to", "target comment has already been replied by bot. Used to prevent multiple replies", "try: cache_file.write(mem_cache.popleft()) for entry in mem_cache: cache_file.write('\\n'+entry) # avoid adding \\n to end", "!= self.username and comment not in self.processed_comments #and not self.has_already_replied(comment) #and not self.is_summon_chain(comment)", "Returns ------- collections.deque Contents of cache file, limited to config.CACHE_SIZE \"\"\" try: print(\"Loading", "deque when next loaded print(\"Cache saved\") except IndexError: print(\"No items in cache\") except", "/subreddit.html#praw.models.Subreddit.comments subreddits : str, optional Subreddits to retrieve comments from. Defaults to config.SUBREDDITS.", "request. Parameters ---------- target : reddit.comment object Target Reddit comment. Returns ------- bool", "\"\"\"Writes list into file, converting list to \\n separated contents. Overwrites original cache", ": int, optional Seconds to wait between authentication attempts. Defaults to 60. \"\"\"", "to authenticate after {} ' 'attempts'.format(max_attempts)) def retrieve_comments(self): \"\"\"Retrieves comments from subreddits, filters", "str, optional Comment trigger word. Defaults to config.KEYWORD. retrieval_limit : int, optional Maximum", "print(\"Loading cache file into memory...\") with open(file, 'r') as data: cache = data.read()", "authentication attempts. Defaults to 60. \"\"\" attempt = 0 while attempt != max_attempts:", "converting list to \\n separated contents. Overwrites original cache file. Creates cache file", "import deque from os import mkdir import re import signal import sys from", "create cache file\") mem_cache = deque([], CACHE_SIZE) return mem_cache @staticmethod def write_cache(file, mem_cache):", "import signal import sys from time import sleep import praw from config import", "contents. Overwrites original cache file. Creates cache file if does not exist. Parameters", "self.username = self.reddit.user.me() print(\"Successfully authenticated as {}\".format(self.username)) return except praw.exceptions.APIException as error: print(\"Unable", "config import ( CACHE_FILE, CACHE_SIZE, KEYWORD, RETRIEVAL_LIMIT, SITE_NAME, SUBREDDITS, ) class RedditBot: \"\"\"Superclass", "CACHE_SIZE, KEYWORD, RETRIEVAL_LIMIT, SITE_NAME, SUBREDDITS, ) class RedditBot: \"\"\"Superclass for Reddit bots which", "Reddit bots which adds common bot routines. Parameters ---------- site_name : str, optional", "time. Defaults to config.RETRIEVAL_LIMIT. See: https://praw.readthedocs.io/en/latest/code_overview/models /subreddit.html#praw.models.Subreddit.comments subreddits : str, optional Subreddits to", "' 'attempts'.format(max_attempts)) def retrieve_comments(self): \"\"\"Retrieves comments from subreddits, filters for keyword trigger, and", "data: cache = data.read() mem_cache = deque(cache.split('\\n'), CACHE_SIZE) print(\"Cache loaded.\") except FileNotFoundError: print(\"Cache", "parent comment of target is from bot. False otherwise. \"\"\" return True if", "after {} ' 'attempts'.format(max_attempts)) def retrieve_comments(self): \"\"\"Retrieves comments from subreddits, filters for keyword", "to \\n separated list. Creates cache file if does not exist. Parameters ----------", "limited to config.CACHE_SIZE \"\"\" try: print(\"Loading cache file into memory...\") with open(file, 'r')", "comment to target submission or comment. Parameters ---------- target : reddit.submission object or", "if parent comment of target is from bot. False otherwise. \"\"\" return True", ": reddit.comment object Target Reddit comment. Returns ------- bool True if parent comment", "print(\"Authenticating as {}...\".format(self.site_name)) self.reddit = praw.Reddit(self.site_name) self.username = self.reddit.user.me() print(\"Successfully authenticated as {}\".format(self.username))", "of cache file. Returns ------- collections.deque Contents of cache file, limited to config.CACHE_SIZE", "False otherwise. \"\"\" try: # implement replace_more()? target.refresh() for reply in target.replies.list(): if", "site_name self.subreddits = subreddits self.username = site_name self.processed_comments = self.read_cache(CACHE_FILE) signal.signal(signal.SIGINT, self.bot_exit) def", "True if parent comment of target is from bot. False otherwise. \"\"\" return", "self.site_name = site_name self.subreddits = subreddits self.username = site_name self.processed_comments = self.read_cache(CACHE_FILE) signal.signal(signal.SIGINT,", "import sys from time import sleep import praw from config import ( CACHE_FILE,", "False def has_already_replied(self, target): \"\"\"Checks if target comment has already been replied by", "has_already_replied(self, target): \"\"\"Checks if target comment has already been replied by bot. Used", "str, optional Subreddits to retrieve comments from. Defaults to config.SUBREDDITS. See: https://praw.readthedocs.io/en/latest/code_overview/models /subreddit.html#subreddit", "subdirectory + '/' mkdir(path) print(\"Cache directory created.\") except IOError as error: print(error) print(\"Unable", "bot functions such as keyword comment retrieval, processed comment caching, and comment posting.", "print(error) print(\"Unable to create cache file\") def bot_exit(self, *args, **kwargs): \"\"\"Saves self.processed_comments into", "optional Comment trigger word. Defaults to config.KEYWORD. retrieval_limit : int, optional Maximum number", "import sleep import praw from config import ( CACHE_FILE, CACHE_SIZE, KEYWORD, RETRIEVAL_LIMIT, SITE_NAME,", "cache_file.write(mem_cache.popleft()) for entry in mem_cache: cache_file.write('\\n'+entry) # avoid adding \\n to end of", "bot routines. Parameters ---------- site_name : str, optional Initializes praw under site_name within", "file, converting list to \\n separated contents. Overwrites original cache file. Creates cache", "as cache_file: try: cache_file.write(mem_cache.popleft()) for entry in mem_cache: cache_file.write('\\n'+entry) # avoid adding \\n", "to create cache file\") def bot_exit(self, *args, **kwargs): \"\"\"Saves self.processed_comments into cache file", "stopped\") sys.exit() def is_summon_chain(self, target): \"\"\"Checks if parent comment of target is from", "Comment trigger word. Defaults to config.KEYWORD. retrieval_limit : int, optional Maximum number of", "if query: self.processed_comments.append(comment.id) yield {'comment': comment, 'query' : query.group(1)} except praw.exceptions.APIException as error:", "self.reddit.subreddit(self.subreddits).comments( limit=self.retrieval_limit ) for comment in comments: if (comment.author != self.username and comment", "and target.parent().author == self.username ) else False def has_already_replied(self, target): \"\"\"Checks if target", "sleep import praw from config import ( CACHE_FILE, CACHE_SIZE, KEYWORD, RETRIEVAL_LIMIT, SITE_NAME, SUBREDDITS,", "SITE_NAME, SUBREDDITS, ) class RedditBot: \"\"\"Superclass for Reddit bots which adds common bot", "\"\"\" try: # implement replace_more()? target.refresh() for reply in target.replies.list(): if reply.author ==", "bots which adds common bot routines. Parameters ---------- site_name : str, optional Initializes", "= self.read_cache(CACHE_FILE) signal.signal(signal.SIGINT, self.bot_exit) def authenticate(self, max_attempts=-1, seconds_between_attempts=60): \"\"\"Authenticates SITE_NAME with Reddit. Sets", "subreddits self.username = site_name self.processed_comments = self.read_cache(CACHE_FILE) signal.signal(signal.SIGINT, self.bot_exit) def authenticate(self, max_attempts=-1, seconds_between_attempts=60):", "comment): \"\"\"Submit comment to target submission or comment. Parameters ---------- target : reddit.submission", "file, limited to config.CACHE_SIZE \"\"\" try: print(\"Loading cache file into memory...\") with open(file,", "attempts before failure. Defaults to -1 (infinite attempts). seconds_between_attempts : int, optional Seconds", "converting contents to \\n separated list. Creates cache file if does not exist.", "of target is from bot. False otherwise. \"\"\" return True if ( not", "seconds_between_attempts=60): \"\"\"Authenticates SITE_NAME with Reddit. Sets self.reddit and self.username on success. Parameters ----------", "word. Defaults to config.KEYWORD. retrieval_limit : int, optional Maximum number of comments to", "re.I) self.reddit = None self.retrieval_limit = retrieval_limit self.site_name = site_name self.subreddits = subreddits", "in {} \" \"seconds\".format(seconds_between_attempts)) sleep(seconds_between_attempts) attempt += 1 raise RuntimeError('Failed to authenticate after", "contents to \\n separated list. Creates cache file if does not exist. Parameters", "next loaded print(\"Cache saved\") except IndexError: print(\"No items in cache\") except IOError as", "---------- target : reddit.submission object or reddit.comment object Target Reddit submission or comment.", "def is_summon_chain(self, target): \"\"\"Checks if parent comment of target is from self. Used", "self.keyword.search(comment.body.lower()) if query: self.processed_comments.append(comment.id) yield {'comment': comment, 'query' : query.group(1)} except praw.exceptions.APIException as", "---------- file : str Location of cache file. mem_cache : list or deque", "and excludes processed comments. Returns ------- generator Dict of reddit.Comment and query. \"\"\"", "items in cache\") except IOError as error: print(error) print(\"Unable to create cache file\")", "cache directory...\") try: path = '' for subdirectory in file.split('/')[:-1]: path += subdirectory", "RedditBot: \"\"\"Superclass for Reddit bots which adds common bot routines. Parameters ---------- site_name", "within praw.ini. Defaults to config.SITE_NAME. See: https://praw.readthedocs.io/en/latest/getting_started /configuration/prawini.html#choosing-a-site keyword : str, optional Comment", "= retrieval_limit self.site_name = site_name self.subreddits = subreddits self.username = site_name self.processed_comments =", "reddit.submission object or reddit.comment object Target Reddit submission or comment. comment : str", "reddit.comment of newly created comment. \"\"\" try: if target.author != self.username: print(\"Posting reply...\")", "of file so that we don't get empty # entries in deque when", "bot...\") self.write_cache(CACHE_FILE, self.processed_comments) print(\"Bot stopped\") sys.exit() def is_summon_chain(self, target): \"\"\"Checks if parent comment", "saved\") except IndexError: print(\"No items in cache\") except IOError as error: print(error) print(\"Unable", "print(\"Initializing bot...\") self.keyword = re.compile(keyword+r' ([ \\w]+)', re.I) self.reddit = None self.retrieval_limit =", "object reddit.comment of newly created comment. \"\"\" try: if target.author != self.username: print(\"Posting", "loop caused by another bot. Parameters ---------- target : reddit.comment object Target Reddit", "and reads file, converting contents to \\n separated list. Creates cache file if", "!= self.username: print(\"Posting reply...\") return target.reply(comment) except praw.exceptions.APIException as error: print(\"API Error:\", error)", ": str, optional Initializes praw under site_name within praw.ini. Defaults to config.SITE_NAME. See:", "to retrieve at a time. Defaults to config.RETRIEVAL_LIMIT. See: https://praw.readthedocs.io/en/latest/code_overview/models /subreddit.html#praw.models.Subreddit.comments subreddits :", "max_attempts: try: print(\"Authenticating as {}...\".format(self.site_name)) self.reddit = praw.Reddit(self.site_name) self.username = self.reddit.user.me() print(\"Successfully authenticated", "not self.has_already_replied(comment) #and not self.is_summon_chain(comment) ): query = self.keyword.search(comment.body.lower()) if query: self.processed_comments.append(comment.id) yield", "for Reddit bots which adds common bot routines. Parameters ---------- site_name : str,", "self.has_already_replied(comment) #and not self.is_summon_chain(comment) ): query = self.keyword.search(comment.body.lower()) if query: self.processed_comments.append(comment.id) yield {'comment':", "comment of target is from bot. False otherwise. \"\"\" return True if (", "comment...\") return False except praw.exceptions.APIException as error: print(\"API Error:\", error) # Failsafe return", "memory...\") with open(file, 'r') as data: cache = data.read() mem_cache = deque(cache.split('\\n'), CACHE_SIZE)", "self.username ) else False def has_already_replied(self, target): \"\"\"Checks if target comment has already", "error: print(\"API Error:\", error) raise except AttributeError as error: print(error) print(\"Unable to retrieve", "print(\"Cache file not found.\") print(\"Creating cache directory...\") try: path = '' for subdirectory", "!= max_attempts: try: print(\"Authenticating as {}...\".format(self.site_name)) self.reddit = praw.Reddit(self.site_name) self.username = self.reddit.user.me() print(\"Successfully", "for keyword trigger, and excludes processed comments. Returns ------- generator Dict of reddit.Comment", "file.split('/')[:-1]: path += subdirectory + '/' mkdir(path) print(\"Cache directory created.\") except IOError as", "optional Initializes praw under site_name within praw.ini. Defaults to config.SITE_NAME. See: https://praw.readthedocs.io/en/latest/getting_started /configuration/prawini.html#choosing-a-site", "retrieval_limit : int, optional Maximum number of comments to retrieve at a time.", "self.processed_comments #and not self.has_already_replied(comment) #and not self.is_summon_chain(comment) ): query = self.keyword.search(comment.body.lower()) if query:", "---------- max_attempts : int, optional Maximum number of authentication attempts before failure. Defaults", "post. Returns ------- object reddit.comment of newly created comment. \"\"\" try: if target.author", "= self.reddit.user.me() print(\"Successfully authenticated as {}\".format(self.username)) return except praw.exceptions.APIException as error: print(\"Unable to", "\\n separated contents. Overwrites original cache file. Creates cache file if does not", "{} \" \"seconds\".format(seconds_between_attempts)) sleep(seconds_between_attempts) attempt += 1 raise RuntimeError('Failed to authenticate after {}", "site_name : str, optional Initializes praw under site_name within praw.ini. Defaults to config.SITE_NAME.", "python3 \"\"\"Reddit Bot Common Routines Contains common Reddit bot functions such as keyword", "except IOError as error: print(error) print(\"Unable to create cache file\") def bot_exit(self, *args,", "config.SITE_NAME. See: https://praw.readthedocs.io/en/latest/getting_started /configuration/prawini.html#choosing-a-site keyword : str, optional Comment trigger word. Defaults to", "= site_name self.processed_comments = self.read_cache(CACHE_FILE) signal.signal(signal.SIGINT, self.bot_exit) def authenticate(self, max_attempts=-1, seconds_between_attempts=60): \"\"\"Authenticates SITE_NAME", "re.compile(keyword+r' ([ \\w]+)', re.I) self.reddit = None self.retrieval_limit = retrieval_limit self.site_name = site_name", "0 while attempt != max_attempts: try: print(\"Authenticating as {}...\".format(self.site_name)) self.reddit = praw.Reddit(self.site_name) self.username", "attempt = 0 while attempt != max_attempts: try: print(\"Authenticating as {}...\".format(self.site_name)) self.reddit =", "self.username: print(\"Posting reply...\") return target.reply(comment) except praw.exceptions.APIException as error: print(\"API Error:\", error) raise", "mem_cache @staticmethod def write_cache(file, mem_cache): \"\"\"Writes list into file, converting list to \\n", "Sets self.reddit and self.username on success. Parameters ---------- max_attempts : int, optional Maximum", "bot. False otherwise. \"\"\" return True if ( not target.is_root and target.parent().author ==", "into memory...\") with open(file, 'r') as data: cache = data.read() mem_cache = deque(cache.split('\\n'),", "= self.reddit.subreddit(self.subreddits).comments( limit=self.retrieval_limit ) for comment in comments: if (comment.author != self.username and", "config.SUBREDDITS. See: https://praw.readthedocs.io/en/latest/code_overview/models /subreddit.html#subreddit \"\"\" def __init__(self, site_name=SITE_NAME, keyword=KEYWORD, retrieval_limit=RETRIEVAL_LIMIT, subreddits=SUBREDDITS, ): print(\"Initializing", ": str, optional Comment trigger word. Defaults to config.KEYWORD. retrieval_limit : int, optional", "praw.exceptions.APIException as error: print(\"API Error:\", error) raise @staticmethod def read_cache(file): \"\"\"Opens and reads", "retrieval_limit=RETRIEVAL_LIMIT, subreddits=SUBREDDITS, ): print(\"Initializing bot...\") self.keyword = re.compile(keyword+r' ([ \\w]+)', re.I) self.reddit =", "\"\"\" def __init__(self, site_name=SITE_NAME, keyword=KEYWORD, retrieval_limit=RETRIEVAL_LIMIT, subreddits=SUBREDDITS, ): print(\"Initializing bot...\") self.keyword = re.compile(keyword+r'", "print(\"Retrieving {} comments...\".format(self.retrieval_limit)) comments = self.reddit.subreddit(self.subreddits).comments( limit=self.retrieval_limit ) for comment in comments: if", "error) raise except AttributeError as error: print(error) print(\"Unable to retrieve comments.\") raise def", "Creates cache file if does not exist. Parameters ---------- file : str Location", "self.reddit.user.me() print(\"Successfully authenticated as {}\".format(self.username)) return except praw.exceptions.APIException as error: print(\"Unable to authenticate:\",", "if target comment has already been replied by bot. Used to prevent multiple", "def read_cache(file): \"\"\"Opens and reads file, converting contents to \\n separated list. Creates", "retrieve at a time. Defaults to config.RETRIEVAL_LIMIT. See: https://praw.readthedocs.io/en/latest/code_overview/models /subreddit.html#praw.models.Subreddit.comments subreddits : str,", "Routines Contains common Reddit bot functions such as keyword comment retrieval, processed comment", "self.bot_exit) def authenticate(self, max_attempts=-1, seconds_between_attempts=60): \"\"\"Authenticates SITE_NAME with Reddit. Sets self.reddit and self.username", "+= subdirectory + '/' mkdir(path) print(\"Cache directory created.\") except IOError as error: print(error)", "\\w]+)', re.I) self.reddit = None self.retrieval_limit = retrieval_limit self.site_name = site_name self.subreddits =", "str Comment to post. Returns ------- object reddit.comment of newly created comment. \"\"\"", "retrieve comments.\") raise def submit_comment(self, target, comment): \"\"\"Submit comment to target submission or", "print(error) print(\"Unable to create cache file\") mem_cache = deque([], CACHE_SIZE) return mem_cache @staticmethod", "comment. comment : str Comment to post. Returns ------- object reddit.comment of newly", "found.\") print(\"Creating cache directory...\") try: path = '' for subdirectory in file.split('/')[:-1]: path", "([ \\w]+)', re.I) self.reddit = None self.retrieval_limit = retrieval_limit self.site_name = site_name self.subreddits", "as error: print(error) print(\"Unable to create cache file\") mem_cache = deque([], CACHE_SIZE) return", "config.RETRIEVAL_LIMIT. See: https://praw.readthedocs.io/en/latest/code_overview/models /subreddit.html#praw.models.Subreddit.comments subreddits : str, optional Subreddits to retrieve comments from.", "cache file if does not exist. Parameters ---------- file : str Location of", "str, optional Initializes praw under site_name within praw.ini. Defaults to config.SITE_NAME. See: https://praw.readthedocs.io/en/latest/getting_started", "been replied by bot. Used to prevent multiple replies to the same request.", "parent comment of target is from bot. False otherwise. \"\"\" try: # implement", "def __init__(self, site_name=SITE_NAME, keyword=KEYWORD, retrieval_limit=RETRIEVAL_LIMIT, subreddits=SUBREDDITS, ): print(\"Initializing bot...\") self.keyword = re.compile(keyword+r' ([", "# entries in deque when next loaded print(\"Cache saved\") except IndexError: print(\"No items", "self.processed_comments into cache file before exiting.\"\"\" # pylint: disable=unused-argument print(\"\\nStopping bot...\") self.write_cache(CACHE_FILE, self.processed_comments)", "attempts). seconds_between_attempts : int, optional Seconds to wait between authentication attempts. Defaults to", "( CACHE_FILE, CACHE_SIZE, KEYWORD, RETRIEVAL_LIMIT, SITE_NAME, SUBREDDITS, ) class RedditBot: \"\"\"Superclass for Reddit", "target.author != self.username: print(\"Posting reply...\") return target.reply(comment) except praw.exceptions.APIException as error: print(\"API Error:\",", "authenticated as {}\".format(self.username)) return except praw.exceptions.APIException as error: print(\"Unable to authenticate:\", error) print(\"Retrying", "reddit.comment object Target Reddit submission or comment. comment : str Comment to post.", "exist. Parameters ---------- file : str Location of cache file. mem_cache : list", "adds common bot routines. Parameters ---------- site_name : str, optional Initializes praw under", "sleep(seconds_between_attempts) attempt += 1 raise RuntimeError('Failed to authenticate after {} ' 'attempts'.format(max_attempts)) def", "not self.is_summon_chain(comment) ): query = self.keyword.search(comment.body.lower()) if query: self.processed_comments.append(comment.id) yield {'comment': comment, 'query'", "comment. Parameters ---------- target : reddit.submission object or reddit.comment object Target Reddit submission", "optional Subreddits to retrieve comments from. Defaults to config.SUBREDDITS. See: https://praw.readthedocs.io/en/latest/code_overview/models /subreddit.html#subreddit \"\"\"", "): print(\"Initializing bot...\") self.keyword = re.compile(keyword+r' ([ \\w]+)', re.I) self.reddit = None self.retrieval_limit", "for subdirectory in file.split('/')[:-1]: path += subdirectory + '/' mkdir(path) print(\"Cache directory created.\")", "if parent comment of target is from bot. False otherwise. \"\"\" try: #", "which adds common bot routines. Parameters ---------- site_name : str, optional Initializes praw", "IndexError: print(\"No items in cache\") except IOError as error: print(error) print(\"Unable to create", "praw.exceptions.APIException as error: print(\"Unable to authenticate:\", error) print(\"Retrying in {} \" \"seconds\".format(seconds_between_attempts)) sleep(seconds_between_attempts)", "print(\"Comment already processed.\") return True print(\"Processing comment...\") return False except praw.exceptions.APIException as error:", "trigger, and excludes processed comments. Returns ------- generator Dict of reddit.Comment and query.", "= None self.retrieval_limit = retrieval_limit self.site_name = site_name self.subreddits = subreddits self.username =", "for entry in mem_cache: cache_file.write('\\n'+entry) # avoid adding \\n to end of file", "cache file. Returns ------- collections.deque Contents of cache file, limited to config.CACHE_SIZE \"\"\"", "CACHE_SIZE) return mem_cache @staticmethod def write_cache(file, mem_cache): \"\"\"Writes list into file, converting list", "list into file, converting list to \\n separated contents. Overwrites original cache file.", "Bot Common Routines Contains common Reddit bot functions such as keyword comment retrieval,", "target is from bot. False otherwise. \"\"\" try: # implement replace_more()? target.refresh() for", "def has_already_replied(self, target): \"\"\"Checks if target comment has already been replied by bot.", "to config.SUBREDDITS. See: https://praw.readthedocs.io/en/latest/code_overview/models /subreddit.html#subreddit \"\"\" def __init__(self, site_name=SITE_NAME, keyword=KEYWORD, retrieval_limit=RETRIEVAL_LIMIT, subreddits=SUBREDDITS, ):", "seconds_between_attempts : int, optional Seconds to wait between authentication attempts. Defaults to 60.", "{} ' 'attempts'.format(max_attempts)) def retrieve_comments(self): \"\"\"Retrieves comments from subreddits, filters for keyword trigger,", "before failure. Defaults to -1 (infinite attempts). seconds_between_attempts : int, optional Seconds to", "\"\"\"Checks if parent comment of target is from self. Used to prevent infinite", "comment. Returns ------- bool True if parent comment of target is from bot.", "def retrieve_comments(self): \"\"\"Retrieves comments from subreddits, filters for keyword trigger, and excludes processed", "or comment. Parameters ---------- target : reddit.submission object or reddit.comment object Target Reddit", "try: print(\"Loading cache file into memory...\") with open(file, 'r') as data: cache =", "/configuration/prawini.html#choosing-a-site keyword : str, optional Comment trigger word. Defaults to config.KEYWORD. retrieval_limit :", "cache file into memory...\") with open(file, 'r') as data: cache = data.read() mem_cache", "cache file\") def bot_exit(self, *args, **kwargs): \"\"\"Saves self.processed_comments into cache file before exiting.\"\"\"", "praw.ini. Defaults to config.SITE_NAME. See: https://praw.readthedocs.io/en/latest/getting_started /configuration/prawini.html#choosing-a-site keyword : str, optional Comment trigger", "max_attempts=-1, seconds_between_attempts=60): \"\"\"Authenticates SITE_NAME with Reddit. Sets self.reddit and self.username on success. Parameters", "file. mem_cache : list or deque Items in memory cache \"\"\" try: print(\"Saving", "): query = self.keyword.search(comment.body.lower()) if query: self.processed_comments.append(comment.id) yield {'comment': comment, 'query' : query.group(1)}", "of target is from self. Used to prevent infinite reply loop caused by", "except FileNotFoundError: print(\"Cache file not found.\") print(\"Creating cache directory...\") try: path = ''", "praw from config import ( CACHE_FILE, CACHE_SIZE, KEYWORD, RETRIEVAL_LIMIT, SITE_NAME, SUBREDDITS, ) class", "reply.author == self.username: print(\"Comment already processed.\") return True print(\"Processing comment...\") return False except", "self.reddit = praw.Reddit(self.site_name) self.username = self.reddit.user.me() print(\"Successfully authenticated as {}\".format(self.username)) return except praw.exceptions.APIException", "target): \"\"\"Checks if parent comment of target is from self. Used to prevent", "otherwise. \"\"\" try: # implement replace_more()? target.refresh() for reply in target.replies.list(): if reply.author", "try: if target.author != self.username: print(\"Posting reply...\") return target.reply(comment) except praw.exceptions.APIException as error:", "another bot. Parameters ---------- target : reddit.comment object Target Reddit comment. Returns -------", "------- bool True if parent comment of target is from bot. False otherwise.", "to 60. \"\"\" attempt = 0 while attempt != max_attempts: try: print(\"Authenticating as", "'r') as data: cache = data.read() mem_cache = deque(cache.split('\\n'), CACHE_SIZE) print(\"Cache loaded.\") except", "authenticate after {} ' 'attempts'.format(max_attempts)) def retrieve_comments(self): \"\"\"Retrieves comments from subreddits, filters for", "comment of target is from bot. False otherwise. \"\"\" try: # implement replace_more()?", "in self.processed_comments #and not self.has_already_replied(comment) #and not self.is_summon_chain(comment) ): query = self.keyword.search(comment.body.lower()) if", "try: print(\"Retrieving {} comments...\".format(self.retrieval_limit)) comments = self.reddit.subreddit(self.subreddits).comments( limit=self.retrieval_limit ) for comment in comments:", "is from bot. False otherwise. \"\"\" try: # implement replace_more()? target.refresh() for reply", "under site_name within praw.ini. Defaults to config.SITE_NAME. See: https://praw.readthedocs.io/en/latest/getting_started /configuration/prawini.html#choosing-a-site keyword : str,", "str Location of cache file. Returns ------- collections.deque Contents of cache file, limited", "site_name self.processed_comments = self.read_cache(CACHE_FILE) signal.signal(signal.SIGINT, self.bot_exit) def authenticate(self, max_attempts=-1, seconds_between_attempts=60): \"\"\"Authenticates SITE_NAME with", "signal import sys from time import sleep import praw from config import (", "submission or comment. Parameters ---------- target : reddit.submission object or reddit.comment object Target", "'/' mkdir(path) print(\"Cache directory created.\") except IOError as error: print(error) print(\"Unable to create", "not in self.processed_comments #and not self.has_already_replied(comment) #and not self.is_summon_chain(comment) ): query = self.keyword.search(comment.body.lower())", "list. Creates cache file if does not exist. Parameters ---------- file : str", "between authentication attempts. Defaults to 60. \"\"\" attempt = 0 while attempt !=", "subdirectory in file.split('/')[:-1]: path += subdirectory + '/' mkdir(path) print(\"Cache directory created.\") except", "from self. Used to prevent infinite reply loop caused by another bot. Parameters", "\" \"seconds\".format(seconds_between_attempts)) sleep(seconds_between_attempts) attempt += 1 raise RuntimeError('Failed to authenticate after {} '", "Returns ------- generator Dict of reddit.Comment and query. \"\"\" try: print(\"Retrieving {} comments...\".format(self.retrieval_limit))", "= praw.Reddit(self.site_name) self.username = self.reddit.user.me() print(\"Successfully authenticated as {}\".format(self.username)) return except praw.exceptions.APIException as", "os import mkdir import re import signal import sys from time import sleep", "@staticmethod def write_cache(file, mem_cache): \"\"\"Writes list into file, converting list to \\n separated", "so that we don't get empty # entries in deque when next loaded", "if (comment.author != self.username and comment not in self.processed_comments #and not self.has_already_replied(comment) #and", "Reddit submission or comment. comment : str Comment to post. Returns ------- object", "authenticate(self, max_attempts=-1, seconds_between_attempts=60): \"\"\"Authenticates SITE_NAME with Reddit. Sets self.reddit and self.username on success.", "self.username on success. Parameters ---------- max_attempts : int, optional Maximum number of authentication", "= '' for subdirectory in file.split('/')[:-1]: path += subdirectory + '/' mkdir(path) print(\"Cache", "= deque([], CACHE_SIZE) return mem_cache @staticmethod def write_cache(file, mem_cache): \"\"\"Writes list into file,", "deque from os import mkdir import re import signal import sys from time", "comment posting. Allows bot authors to concentrate on writing their custom bot functions.", ": int, optional Maximum number of comments to retrieve at a time. Defaults", "True if parent comment of target is from bot. False otherwise. \"\"\" try:", "their custom bot functions. \"\"\" from collections import deque from os import mkdir", "int, optional Maximum number of authentication attempts before failure. Defaults to -1 (infinite" ]
[ "import unicode_literals import logging root_logger = logging.getLogger('autotweet') logging.basicConfig( format='%(asctime)s {%(module)s:%(levelname)s}: %(message)s', datefmt='%Y-%m-%d %H:%M:%S')", "from __future__ import unicode_literals import logging root_logger = logging.getLogger('autotweet') logging.basicConfig( format='%(asctime)s {%(module)s:%(levelname)s}: %(message)s',", "= logging.getLogger('autotweet') logging.basicConfig( format='%(asctime)s {%(module)s:%(levelname)s}: %(message)s', datefmt='%Y-%m-%d %H:%M:%S') def set_level(level): root_logger.setLevel(level) get_logger =", "logging root_logger = logging.getLogger('autotweet') logging.basicConfig( format='%(asctime)s {%(module)s:%(levelname)s}: %(message)s', datefmt='%Y-%m-%d %H:%M:%S') def set_level(level): root_logger.setLevel(level)", "root_logger = logging.getLogger('autotweet') logging.basicConfig( format='%(asctime)s {%(module)s:%(levelname)s}: %(message)s', datefmt='%Y-%m-%d %H:%M:%S') def set_level(level): root_logger.setLevel(level) get_logger", "unicode_literals import logging root_logger = logging.getLogger('autotweet') logging.basicConfig( format='%(asctime)s {%(module)s:%(levelname)s}: %(message)s', datefmt='%Y-%m-%d %H:%M:%S') def", "import logging root_logger = logging.getLogger('autotweet') logging.basicConfig( format='%(asctime)s {%(module)s:%(levelname)s}: %(message)s', datefmt='%Y-%m-%d %H:%M:%S') def set_level(level):", "logging.getLogger('autotweet') logging.basicConfig( format='%(asctime)s {%(module)s:%(levelname)s}: %(message)s', datefmt='%Y-%m-%d %H:%M:%S') def set_level(level): root_logger.setLevel(level) get_logger = root_logger.getChild", "__future__ import unicode_literals import logging root_logger = logging.getLogger('autotweet') logging.basicConfig( format='%(asctime)s {%(module)s:%(levelname)s}: %(message)s', datefmt='%Y-%m-%d" ]
[ "str(1) + \".png\", 0) img = np.array(Image.open(\"mnist1/\" + str(1) + \".png\", 0)) print(img)", "import os def threshold(train): tu = np.shape(train[0])[0] thresh = 0 for i in", "\".png\", 0) img = np.array(Image.open(\"mnist1/\" + str(1) + \".png\", 0)) print(img) # pot", "threshold(train): tu = np.shape(train[0])[0] thresh = 0 for i in range(tu): simul_active =", "rf from snn.spike_train import encode from snn.rl import rl from snn.rl import update", "simul_active = sum(train[:,i]) if simul_active>thresh: thresh = simul_active return (thresh/3)*par.scale if __name__ ==", "as plt from snn.recep_field import rf from snn.spike_train import encode from snn.rl import", "tu = np.shape(train[0])[0] thresh = 0 for i in range(tu): simul_active = sum(train[:,i])", "rl from snn.rl import update from snn.reconstruct import reconst_weights from snn.parameters import param", "pyplot as plt from snn.recep_field import rf from snn.spike_train import encode from snn.rl", "spiking activity. ######################################################################################################## import numpy as np from snn.neuron import neuron import random", "README ################################################# # This calculates threshold for an image depending upon its spiking", "sum(train[:,i]) if simul_active>thresh: thresh = simul_active return (thresh/3)*par.scale if __name__ == '__main__': #", "random from matplotlib import pyplot as plt from snn.recep_field import rf from snn.spike_train", "= np.shape(train[0])[0] thresh = 0 for i in range(tu): simul_active = sum(train[:,i]) if", "'__main__': # img = cv2.imread(\"mnist1/\" + str(1) + \".png\", 0) img = np.array(Image.open(\"mnist1/\"", "an image depending upon its spiking activity. ######################################################################################################## import numpy as np from", "from snn.rl import rl from snn.rl import update from snn.reconstruct import reconst_weights from", "i in range(tu): simul_active = sum(train[:,i]) if simul_active>thresh: thresh = simul_active return (thresh/3)*par.scale", "import encode from snn.rl import rl from snn.rl import update from snn.reconstruct import", "snn.recep_field import rf from snn.spike_train import encode from snn.rl import rl from snn.rl", "# This calculates threshold for an image depending upon its spiking activity. ########################################################################################################", "= np.array(Image.open(\"mnist1/\" + str(1) + \".png\", 0)) print(img) # pot = rf(img) #", "simul_active return (thresh/3)*par.scale if __name__ == '__main__': # img = cv2.imread(\"mnist1/\" + str(1)", "# img = cv2.imread(\"mnist1/\" + str(1) + \".png\", 0) img = np.array(Image.open(\"mnist1/\" +", "\".png\", 0)) print(img) # pot = rf(img) # train = np.array(encode(pot)) # print", "from snn.recep_field import rf from snn.spike_train import encode from snn.rl import rl from", "snn.reconstruct import reconst_weights from snn.parameters import param as par import os def threshold(train):", "activity. ######################################################################################################## import numpy as np from snn.neuron import neuron import random from", "import pyplot as plt from snn.recep_field import rf from snn.spike_train import encode from", "np.array(Image.open(\"mnist1/\" + str(1) + \".png\", 0)) print(img) # pot = rf(img) # train", "its spiking activity. ######################################################################################################## import numpy as np from snn.neuron import neuron import", "from snn.rl import update from snn.reconstruct import reconst_weights from snn.parameters import param as", "img = np.array(Image.open(\"mnist1/\" + str(1) + \".png\", 0)) print(img) # pot = rf(img)", "thresh = simul_active return (thresh/3)*par.scale if __name__ == '__main__': # img = cv2.imread(\"mnist1/\"", "par import os def threshold(train): tu = np.shape(train[0])[0] thresh = 0 for i", "import update from snn.reconstruct import reconst_weights from snn.parameters import param as par import", "import rf from snn.spike_train import encode from snn.rl import rl from snn.rl import", "snn.neuron import neuron import random from matplotlib import pyplot as plt from snn.recep_field", "simul_active>thresh: thresh = simul_active return (thresh/3)*par.scale if __name__ == '__main__': # img =", "image depending upon its spiking activity. ######################################################################################################## import numpy as np from snn.neuron", "snn.rl import update from snn.reconstruct import reconst_weights from snn.parameters import param as par", "This calculates threshold for an image depending upon its spiking activity. ######################################################################################################## import", "for an image depending upon its spiking activity. ######################################################################################################## import numpy as np", "0 for i in range(tu): simul_active = sum(train[:,i]) if simul_active>thresh: thresh = simul_active", "range(tu): simul_active = sum(train[:,i]) if simul_active>thresh: thresh = simul_active return (thresh/3)*par.scale if __name__", "if simul_active>thresh: thresh = simul_active return (thresh/3)*par.scale if __name__ == '__main__': # img", "__name__ == '__main__': # img = cv2.imread(\"mnist1/\" + str(1) + \".png\", 0) img", "################################################# # This calculates threshold for an image depending upon its spiking activity.", "+ str(1) + \".png\", 0) img = np.array(Image.open(\"mnist1/\" + str(1) + \".png\", 0))", "import rl from snn.rl import update from snn.reconstruct import reconst_weights from snn.parameters import", "def threshold(train): tu = np.shape(train[0])[0] thresh = 0 for i in range(tu): simul_active", "np.shape(train[0])[0] thresh = 0 for i in range(tu): simul_active = sum(train[:,i]) if simul_active>thresh:", "from matplotlib import pyplot as plt from snn.recep_field import rf from snn.spike_train import", "reconst_weights from snn.parameters import param as par import os def threshold(train): tu =", "import random from matplotlib import pyplot as plt from snn.recep_field import rf from", "thresh = 0 for i in range(tu): simul_active = sum(train[:,i]) if simul_active>thresh: thresh", "depending upon its spiking activity. ######################################################################################################## import numpy as np from snn.neuron import", "matplotlib import pyplot as plt from snn.recep_field import rf from snn.spike_train import encode", "from snn.parameters import param as par import os def threshold(train): tu = np.shape(train[0])[0]", "= sum(train[:,i]) if simul_active>thresh: thresh = simul_active return (thresh/3)*par.scale if __name__ == '__main__':", "0)) print(img) # pot = rf(img) # train = np.array(encode(pot)) # print threshold(train)", "snn.spike_train import encode from snn.rl import rl from snn.rl import update from snn.reconstruct", "update from snn.reconstruct import reconst_weights from snn.parameters import param as par import os", "= simul_active return (thresh/3)*par.scale if __name__ == '__main__': # img = cv2.imread(\"mnist1/\" +", "from snn.neuron import neuron import random from matplotlib import pyplot as plt from", "cv2.imread(\"mnist1/\" + str(1) + \".png\", 0) img = np.array(Image.open(\"mnist1/\" + str(1) + \".png\",", "+ \".png\", 0)) print(img) # pot = rf(img) # train = np.array(encode(pot)) #", "np from snn.neuron import neuron import random from matplotlib import pyplot as plt", "os def threshold(train): tu = np.shape(train[0])[0] thresh = 0 for i in range(tu):", "param as par import os def threshold(train): tu = np.shape(train[0])[0] thresh = 0", "str(1) + \".png\", 0)) print(img) # pot = rf(img) # train = np.array(encode(pot))", "0) img = np.array(Image.open(\"mnist1/\" + str(1) + \".png\", 0)) print(img) # pot =", "upon its spiking activity. ######################################################################################################## import numpy as np from snn.neuron import neuron", "######################################################################################################## import numpy as np from snn.neuron import neuron import random from matplotlib", "(thresh/3)*par.scale if __name__ == '__main__': # img = cv2.imread(\"mnist1/\" + str(1) + \".png\",", "= cv2.imread(\"mnist1/\" + str(1) + \".png\", 0) img = np.array(Image.open(\"mnist1/\" + str(1) +", "plt from snn.recep_field import rf from snn.spike_train import encode from snn.rl import rl", "== '__main__': # img = cv2.imread(\"mnist1/\" + str(1) + \".png\", 0) img =", "encode from snn.rl import rl from snn.rl import update from snn.reconstruct import reconst_weights", "snn.parameters import param as par import os def threshold(train): tu = np.shape(train[0])[0] thresh", "in range(tu): simul_active = sum(train[:,i]) if simul_active>thresh: thresh = simul_active return (thresh/3)*par.scale if", "+ str(1) + \".png\", 0)) print(img) # pot = rf(img) # train =", "snn.rl import rl from snn.rl import update from snn.reconstruct import reconst_weights from snn.parameters", "return (thresh/3)*par.scale if __name__ == '__main__': # img = cv2.imread(\"mnist1/\" + str(1) +", "threshold for an image depending upon its spiking activity. ######################################################################################################## import numpy as", "############################################## README ################################################# # This calculates threshold for an image depending upon its", "import param as par import os def threshold(train): tu = np.shape(train[0])[0] thresh =", "numpy as np from snn.neuron import neuron import random from matplotlib import pyplot", "for i in range(tu): simul_active = sum(train[:,i]) if simul_active>thresh: thresh = simul_active return", "from snn.reconstruct import reconst_weights from snn.parameters import param as par import os def", "import neuron import random from matplotlib import pyplot as plt from snn.recep_field import", "as par import os def threshold(train): tu = np.shape(train[0])[0] thresh = 0 for", "= 0 for i in range(tu): simul_active = sum(train[:,i]) if simul_active>thresh: thresh =", "as np from snn.neuron import neuron import random from matplotlib import pyplot as", "from snn.spike_train import encode from snn.rl import rl from snn.rl import update from", "neuron import random from matplotlib import pyplot as plt from snn.recep_field import rf", "img = cv2.imread(\"mnist1/\" + str(1) + \".png\", 0) img = np.array(Image.open(\"mnist1/\" + str(1)", "if __name__ == '__main__': # img = cv2.imread(\"mnist1/\" + str(1) + \".png\", 0)", "+ \".png\", 0) img = np.array(Image.open(\"mnist1/\" + str(1) + \".png\", 0)) print(img) #", "calculates threshold for an image depending upon its spiking activity. ######################################################################################################## import numpy", "import reconst_weights from snn.parameters import param as par import os def threshold(train): tu", "import numpy as np from snn.neuron import neuron import random from matplotlib import" ]
[ "dict(type='SyncBN', requires_grad=True) backbone_norm_cfg = dict(type='LN', requires_grad=True) model = dict( type='EncoderDecoder', pretrained=None, backbone=dict( type='SwinTransformer',", "16, 32], strides=(4, 2, 2, 2), out_indices=(0, 1, 2, 3), qkv_bias=True, qk_scale=None, patch_norm=True,", "act_cfg=dict(type='GELU'), norm_cfg=dict(type='LN', requires_grad=True)), decode_head=dict( type='AttHead', in_channels=[128, 256, 512, 1024], in_index=[0, 1, 2, 3],", "= dict(type='SyncBN', requires_grad=True) backbone_norm_cfg = dict(type='LN', requires_grad=True) model = dict( type='EncoderDecoder', pretrained=None, backbone=dict(", "2, 3, 6), channels=128, dropout_ratio=0.1, num_classes=3, norm_cfg=dict(type='SyncBN', requires_grad=True), align_corners=False, loss_decode=dict(type='LovaszLoss', reduction='none', loss_weight=1.0)), train_cfg=dict(),", "attn_drop_rate=0.2, drop_path_rate=0.2, use_abs_pos_embed=False, act_cfg=dict(type='GELU'), norm_cfg=dict(type='LN', requires_grad=True)), decode_head=dict( type='AttHead', in_channels=[128, 256, 512, 1024], in_index=[0,", "pool_scales=(1, 2, 3, 6), channels=128, dropout_ratio=0.1, num_classes=3, norm_cfg=dict(type='SyncBN', requires_grad=True), align_corners=False, loss_decode=dict(type='LovaszLoss', reduction='none', loss_weight=1.0)),", "2], num_heads=[4, 8, 16, 32], strides=(4, 2, 2, 2), out_indices=(0, 1, 2, 3),", "2, 3], pool_scales=(1, 2, 3, 6), channels=128, dropout_ratio=0.1, num_classes=3, norm_cfg=dict(type='SyncBN', requires_grad=True), align_corners=False, loss_decode=dict(type='LovaszLoss',", "patch_norm=True, drop_rate=0.2, attn_drop_rate=0.2, drop_path_rate=0.2, use_abs_pos_embed=False, act_cfg=dict(type='GELU'), norm_cfg=dict(type='LN', requires_grad=True)), decode_head=dict( type='AttHead', in_channels=[128, 256, 512,", "3), qkv_bias=True, qk_scale=None, patch_norm=True, drop_rate=0.2, attn_drop_rate=0.2, drop_path_rate=0.2, use_abs_pos_embed=False, act_cfg=dict(type='GELU'), norm_cfg=dict(type='LN', requires_grad=True)), decode_head=dict( type='AttHead',", "3], pool_scales=(1, 2, 3, 6), channels=128, dropout_ratio=0.1, num_classes=3, norm_cfg=dict(type='SyncBN', requires_grad=True), align_corners=False, loss_decode=dict(type='LovaszLoss', reduction='none',", "3, 6), channels=128, dropout_ratio=0.1, num_classes=3, norm_cfg=dict(type='SyncBN', requires_grad=True), align_corners=False, loss_decode=dict(type='LovaszLoss', reduction='none', loss_weight=1.0)), train_cfg=dict(), test_cfg=dict(mode='whole'))", "model = dict( type='EncoderDecoder', pretrained=None, backbone=dict( type='SwinTransformer', pretrain_img_size=384, embed_dims=128, patch_size=4, window_size=12, mlp_ratio=4, depths=[2,", "dict(type='LN', requires_grad=True) model = dict( type='EncoderDecoder', pretrained=None, backbone=dict( type='SwinTransformer', pretrain_img_size=384, embed_dims=128, patch_size=4, window_size=12,", "dict( type='EncoderDecoder', pretrained=None, backbone=dict( type='SwinTransformer', pretrain_img_size=384, embed_dims=128, patch_size=4, window_size=12, mlp_ratio=4, depths=[2, 2, 18,", "requires_grad=True)), decode_head=dict( type='AttHead', in_channels=[128, 256, 512, 1024], in_index=[0, 1, 2, 3], pool_scales=(1, 2,", "type='SwinTransformer', pretrain_img_size=384, embed_dims=128, patch_size=4, window_size=12, mlp_ratio=4, depths=[2, 2, 18, 2], num_heads=[4, 8, 16,", "1024], in_index=[0, 1, 2, 3], pool_scales=(1, 2, 3, 6), channels=128, dropout_ratio=0.1, num_classes=3, norm_cfg=dict(type='SyncBN',", "backbone=dict( type='SwinTransformer', pretrain_img_size=384, embed_dims=128, patch_size=4, window_size=12, mlp_ratio=4, depths=[2, 2, 18, 2], num_heads=[4, 8,", "32], strides=(4, 2, 2, 2), out_indices=(0, 1, 2, 3), qkv_bias=True, qk_scale=None, patch_norm=True, drop_rate=0.2,", "= dict(type='LN', requires_grad=True) model = dict( type='EncoderDecoder', pretrained=None, backbone=dict( type='SwinTransformer', pretrain_img_size=384, embed_dims=128, patch_size=4,", "requires_grad=True) model = dict( type='EncoderDecoder', pretrained=None, backbone=dict( type='SwinTransformer', pretrain_img_size=384, embed_dims=128, patch_size=4, window_size=12, mlp_ratio=4,", "2), out_indices=(0, 1, 2, 3), qkv_bias=True, qk_scale=None, patch_norm=True, drop_rate=0.2, attn_drop_rate=0.2, drop_path_rate=0.2, use_abs_pos_embed=False, act_cfg=dict(type='GELU'),", "mlp_ratio=4, depths=[2, 2, 18, 2], num_heads=[4, 8, 16, 32], strides=(4, 2, 2, 2),", "out_indices=(0, 1, 2, 3), qkv_bias=True, qk_scale=None, patch_norm=True, drop_rate=0.2, attn_drop_rate=0.2, drop_path_rate=0.2, use_abs_pos_embed=False, act_cfg=dict(type='GELU'), norm_cfg=dict(type='LN',", "pretrained=None, backbone=dict( type='SwinTransformer', pretrain_img_size=384, embed_dims=128, patch_size=4, window_size=12, mlp_ratio=4, depths=[2, 2, 18, 2], num_heads=[4,", "use_abs_pos_embed=False, act_cfg=dict(type='GELU'), norm_cfg=dict(type='LN', requires_grad=True)), decode_head=dict( type='AttHead', in_channels=[128, 256, 512, 1024], in_index=[0, 1, 2,", "in_channels=[128, 256, 512, 1024], in_index=[0, 1, 2, 3], pool_scales=(1, 2, 3, 6), channels=128,", "patch_size=4, window_size=12, mlp_ratio=4, depths=[2, 2, 18, 2], num_heads=[4, 8, 16, 32], strides=(4, 2,", "embed_dims=128, patch_size=4, window_size=12, mlp_ratio=4, depths=[2, 2, 18, 2], num_heads=[4, 8, 16, 32], strides=(4,", "norm_cfg=dict(type='LN', requires_grad=True)), decode_head=dict( type='AttHead', in_channels=[128, 256, 512, 1024], in_index=[0, 1, 2, 3], pool_scales=(1,", "window_size=12, mlp_ratio=4, depths=[2, 2, 18, 2], num_heads=[4, 8, 16, 32], strides=(4, 2, 2,", "depths=[2, 2, 18, 2], num_heads=[4, 8, 16, 32], strides=(4, 2, 2, 2), out_indices=(0,", "2, 2), out_indices=(0, 1, 2, 3), qkv_bias=True, qk_scale=None, patch_norm=True, drop_rate=0.2, attn_drop_rate=0.2, drop_path_rate=0.2, use_abs_pos_embed=False,", "= dict( type='EncoderDecoder', pretrained=None, backbone=dict( type='SwinTransformer', pretrain_img_size=384, embed_dims=128, patch_size=4, window_size=12, mlp_ratio=4, depths=[2, 2,", "qkv_bias=True, qk_scale=None, patch_norm=True, drop_rate=0.2, attn_drop_rate=0.2, drop_path_rate=0.2, use_abs_pos_embed=False, act_cfg=dict(type='GELU'), norm_cfg=dict(type='LN', requires_grad=True)), decode_head=dict( type='AttHead', in_channels=[128,", "256, 512, 1024], in_index=[0, 1, 2, 3], pool_scales=(1, 2, 3, 6), channels=128, dropout_ratio=0.1,", "drop_rate=0.2, attn_drop_rate=0.2, drop_path_rate=0.2, use_abs_pos_embed=False, act_cfg=dict(type='GELU'), norm_cfg=dict(type='LN', requires_grad=True)), decode_head=dict( type='AttHead', in_channels=[128, 256, 512, 1024],", "2, 2, 2), out_indices=(0, 1, 2, 3), qkv_bias=True, qk_scale=None, patch_norm=True, drop_rate=0.2, attn_drop_rate=0.2, drop_path_rate=0.2,", "<reponame>404479768/Swin-ATT norm_cfg = dict(type='SyncBN', requires_grad=True) backbone_norm_cfg = dict(type='LN', requires_grad=True) model = dict( type='EncoderDecoder',", "1, 2, 3], pool_scales=(1, 2, 3, 6), channels=128, dropout_ratio=0.1, num_classes=3, norm_cfg=dict(type='SyncBN', requires_grad=True), align_corners=False,", "pretrain_img_size=384, embed_dims=128, patch_size=4, window_size=12, mlp_ratio=4, depths=[2, 2, 18, 2], num_heads=[4, 8, 16, 32],", "type='AttHead', in_channels=[128, 256, 512, 1024], in_index=[0, 1, 2, 3], pool_scales=(1, 2, 3, 6),", "num_heads=[4, 8, 16, 32], strides=(4, 2, 2, 2), out_indices=(0, 1, 2, 3), qkv_bias=True,", "qk_scale=None, patch_norm=True, drop_rate=0.2, attn_drop_rate=0.2, drop_path_rate=0.2, use_abs_pos_embed=False, act_cfg=dict(type='GELU'), norm_cfg=dict(type='LN', requires_grad=True)), decode_head=dict( type='AttHead', in_channels=[128, 256,", "drop_path_rate=0.2, use_abs_pos_embed=False, act_cfg=dict(type='GELU'), norm_cfg=dict(type='LN', requires_grad=True)), decode_head=dict( type='AttHead', in_channels=[128, 256, 512, 1024], in_index=[0, 1,", "18, 2], num_heads=[4, 8, 16, 32], strides=(4, 2, 2, 2), out_indices=(0, 1, 2,", "type='EncoderDecoder', pretrained=None, backbone=dict( type='SwinTransformer', pretrain_img_size=384, embed_dims=128, patch_size=4, window_size=12, mlp_ratio=4, depths=[2, 2, 18, 2],", "2, 18, 2], num_heads=[4, 8, 16, 32], strides=(4, 2, 2, 2), out_indices=(0, 1,", "1, 2, 3), qkv_bias=True, qk_scale=None, patch_norm=True, drop_rate=0.2, attn_drop_rate=0.2, drop_path_rate=0.2, use_abs_pos_embed=False, act_cfg=dict(type='GELU'), norm_cfg=dict(type='LN', requires_grad=True)),", "in_index=[0, 1, 2, 3], pool_scales=(1, 2, 3, 6), channels=128, dropout_ratio=0.1, num_classes=3, norm_cfg=dict(type='SyncBN', requires_grad=True),", "backbone_norm_cfg = dict(type='LN', requires_grad=True) model = dict( type='EncoderDecoder', pretrained=None, backbone=dict( type='SwinTransformer', pretrain_img_size=384, embed_dims=128,", "8, 16, 32], strides=(4, 2, 2, 2), out_indices=(0, 1, 2, 3), qkv_bias=True, qk_scale=None,", "decode_head=dict( type='AttHead', in_channels=[128, 256, 512, 1024], in_index=[0, 1, 2, 3], pool_scales=(1, 2, 3,", "2, 3), qkv_bias=True, qk_scale=None, patch_norm=True, drop_rate=0.2, attn_drop_rate=0.2, drop_path_rate=0.2, use_abs_pos_embed=False, act_cfg=dict(type='GELU'), norm_cfg=dict(type='LN', requires_grad=True)), decode_head=dict(", "512, 1024], in_index=[0, 1, 2, 3], pool_scales=(1, 2, 3, 6), channels=128, dropout_ratio=0.1, num_classes=3,", "norm_cfg = dict(type='SyncBN', requires_grad=True) backbone_norm_cfg = dict(type='LN', requires_grad=True) model = dict( type='EncoderDecoder', pretrained=None,", "strides=(4, 2, 2, 2), out_indices=(0, 1, 2, 3), qkv_bias=True, qk_scale=None, patch_norm=True, drop_rate=0.2, attn_drop_rate=0.2,", "requires_grad=True) backbone_norm_cfg = dict(type='LN', requires_grad=True) model = dict( type='EncoderDecoder', pretrained=None, backbone=dict( type='SwinTransformer', pretrain_img_size=384," ]
[ "authoritys = [] for row in reader: authority = Authority.objects.create(authority_abbrev = row[0], authority_name", "row[1]) #print(authority) def reverse_func(apps, schema_editor): Authority = apps.get_model(\"accounts\", Authority) Authority.objects.all().delete() class Migration(migrations.Migration): dependencies", "authority = Authority.objects.create(authority_abbrev = row[0], authority_name = row[1]) #print(authority) def reverse_func(apps, schema_editor): Authority", "Authority = apps.get_model(\"accounts\", \"Authority\") with open(\"assets/authority/authority_names.csv\", 'r') as f: reader = csv.reader(f) header", "Authority.objects.all().delete() class Migration(migrations.Migration): dependencies = [ ('accounts', '0001_initial'), ] operations = [ migrations.RunPython(load_initial_data,", "schema_editor): Authority = apps.get_model(\"accounts\", \"Authority\") with open(\"assets/authority/authority_names.csv\", 'r') as f: reader = csv.reader(f)", "= apps.get_model(\"accounts\", \"Authority\") with open(\"assets/authority/authority_names.csv\", 'r') as f: reader = csv.reader(f) header =", "= Authority.objects.create(authority_abbrev = row[0], authority_name = row[1]) #print(authority) def reverse_func(apps, schema_editor): Authority =", "#print(authority) def reverse_func(apps, schema_editor): Authority = apps.get_model(\"accounts\", Authority) Authority.objects.all().delete() class Migration(migrations.Migration): dependencies =", "with open(\"assets/authority/authority_names.csv\", 'r') as f: reader = csv.reader(f) header = next(reader) authoritys =", "from datetime import datetime def load_initial_data(apps, schema_editor): Authority = apps.get_model(\"accounts\", \"Authority\") with open(\"assets/authority/authority_names.csv\",", "Authority) Authority.objects.all().delete() class Migration(migrations.Migration): dependencies = [ ('accounts', '0001_initial'), ] operations = [", "00:11 from __future__ import unicode_literals from django.db import migrations, models import csv from", "apps.get_model(\"accounts\", \"Authority\") with open(\"assets/authority/authority_names.csv\", 'r') as f: reader = csv.reader(f) header = next(reader)", "next(reader) authoritys = [] for row in reader: authority = Authority.objects.create(authority_abbrev = row[0],", "row[0], authority_name = row[1]) #print(authority) def reverse_func(apps, schema_editor): Authority = apps.get_model(\"accounts\", Authority) Authority.objects.all().delete()", "django.db import migrations, models import csv from datetime import datetime def load_initial_data(apps, schema_editor):", "Authority = apps.get_model(\"accounts\", Authority) Authority.objects.all().delete() class Migration(migrations.Migration): dependencies = [ ('accounts', '0001_initial'), ]", "Migration(migrations.Migration): dependencies = [ ('accounts', '0001_initial'), ] operations = [ migrations.RunPython(load_initial_data, reverse_func), ]", "def reverse_func(apps, schema_editor): Authority = apps.get_model(\"accounts\", Authority) Authority.objects.all().delete() class Migration(migrations.Migration): dependencies = [", "= csv.reader(f) header = next(reader) authoritys = [] for row in reader: authority", "reader = csv.reader(f) header = next(reader) authoritys = [] for row in reader:", "header = next(reader) authoritys = [] for row in reader: authority = Authority.objects.create(authority_abbrev", "schema_editor): Authority = apps.get_model(\"accounts\", Authority) Authority.objects.all().delete() class Migration(migrations.Migration): dependencies = [ ('accounts', '0001_initial'),", "for row in reader: authority = Authority.objects.create(authority_abbrev = row[0], authority_name = row[1]) #print(authority)", "import migrations, models import csv from datetime import datetime def load_initial_data(apps, schema_editor): Authority", "from __future__ import unicode_literals from django.db import migrations, models import csv from datetime", "= apps.get_model(\"accounts\", Authority) Authority.objects.all().delete() class Migration(migrations.Migration): dependencies = [ ('accounts', '0001_initial'), ] operations", "import unicode_literals from django.db import migrations, models import csv from datetime import datetime", "f: reader = csv.reader(f) header = next(reader) authoritys = [] for row in", "authority_name = row[1]) #print(authority) def reverse_func(apps, schema_editor): Authority = apps.get_model(\"accounts\", Authority) Authority.objects.all().delete() class", "row in reader: authority = Authority.objects.create(authority_abbrev = row[0], authority_name = row[1]) #print(authority) def", "= row[0], authority_name = row[1]) #print(authority) def reverse_func(apps, schema_editor): Authority = apps.get_model(\"accounts\", Authority)", "= [] for row in reader: authority = Authority.objects.create(authority_abbrev = row[0], authority_name =", "= row[1]) #print(authority) def reverse_func(apps, schema_editor): Authority = apps.get_model(\"accounts\", Authority) Authority.objects.all().delete() class Migration(migrations.Migration):", "2020-11-10 00:11 from __future__ import unicode_literals from django.db import migrations, models import csv", "\"Authority\") with open(\"assets/authority/authority_names.csv\", 'r') as f: reader = csv.reader(f) header = next(reader) authoritys", "__future__ import unicode_literals from django.db import migrations, models import csv from datetime import", "by Django 3.1 on 2020-11-10 00:11 from __future__ import unicode_literals from django.db import", "[] for row in reader: authority = Authority.objects.create(authority_abbrev = row[0], authority_name = row[1])", "in reader: authority = Authority.objects.create(authority_abbrev = row[0], authority_name = row[1]) #print(authority) def reverse_func(apps,", "Authority.objects.create(authority_abbrev = row[0], authority_name = row[1]) #print(authority) def reverse_func(apps, schema_editor): Authority = apps.get_model(\"accounts\",", "def load_initial_data(apps, schema_editor): Authority = apps.get_model(\"accounts\", \"Authority\") with open(\"assets/authority/authority_names.csv\", 'r') as f: reader", "csv.reader(f) header = next(reader) authoritys = [] for row in reader: authority =", "Generated by Django 3.1 on 2020-11-10 00:11 from __future__ import unicode_literals from django.db", "'r') as f: reader = csv.reader(f) header = next(reader) authoritys = [] for", "datetime def load_initial_data(apps, schema_editor): Authority = apps.get_model(\"accounts\", \"Authority\") with open(\"assets/authority/authority_names.csv\", 'r') as f:", "import csv from datetime import datetime def load_initial_data(apps, schema_editor): Authority = apps.get_model(\"accounts\", \"Authority\")", "3.1 on 2020-11-10 00:11 from __future__ import unicode_literals from django.db import migrations, models", "migrations, models import csv from datetime import datetime def load_initial_data(apps, schema_editor): Authority =", "load_initial_data(apps, schema_editor): Authority = apps.get_model(\"accounts\", \"Authority\") with open(\"assets/authority/authority_names.csv\", 'r') as f: reader =", "import datetime def load_initial_data(apps, schema_editor): Authority = apps.get_model(\"accounts\", \"Authority\") with open(\"assets/authority/authority_names.csv\", 'r') as", "apps.get_model(\"accounts\", Authority) Authority.objects.all().delete() class Migration(migrations.Migration): dependencies = [ ('accounts', '0001_initial'), ] operations =", "# Generated by Django 3.1 on 2020-11-10 00:11 from __future__ import unicode_literals from", "Django 3.1 on 2020-11-10 00:11 from __future__ import unicode_literals from django.db import migrations,", "models import csv from datetime import datetime def load_initial_data(apps, schema_editor): Authority = apps.get_model(\"accounts\",", "reverse_func(apps, schema_editor): Authority = apps.get_model(\"accounts\", Authority) Authority.objects.all().delete() class Migration(migrations.Migration): dependencies = [ ('accounts',", "open(\"assets/authority/authority_names.csv\", 'r') as f: reader = csv.reader(f) header = next(reader) authoritys = []", "class Migration(migrations.Migration): dependencies = [ ('accounts', '0001_initial'), ] operations = [ migrations.RunPython(load_initial_data, reverse_func),", "reader: authority = Authority.objects.create(authority_abbrev = row[0], authority_name = row[1]) #print(authority) def reverse_func(apps, schema_editor):", "unicode_literals from django.db import migrations, models import csv from datetime import datetime def", "from django.db import migrations, models import csv from datetime import datetime def load_initial_data(apps,", "= next(reader) authoritys = [] for row in reader: authority = Authority.objects.create(authority_abbrev =", "on 2020-11-10 00:11 from __future__ import unicode_literals from django.db import migrations, models import", "datetime import datetime def load_initial_data(apps, schema_editor): Authority = apps.get_model(\"accounts\", \"Authority\") with open(\"assets/authority/authority_names.csv\", 'r')", "csv from datetime import datetime def load_initial_data(apps, schema_editor): Authority = apps.get_model(\"accounts\", \"Authority\") with", "as f: reader = csv.reader(f) header = next(reader) authoritys = [] for row" ]
[ "# recall = recall_score(y_true, y_pred, average='macro') # f1score = f1_score(y_true, y_pred, average='macro') #", "testX, testy = load_dataset() # sess = onnxruntime.InferenceSession('./models/model1.onnx') sess = onnxruntime.InferenceSession('./cnn-pytorch.onnx') for i", "precision_score, accuracy_score # load a single file as a numpy array def load_file(filepath):", "i in sess.get_inputs(): print(i.name) print(i.shape) for i in sess.get_outputs(): print(i.name) print(i.shape) # y_predict", "+ group + '.txt', 'body_gyro_y_' + group + '.txt', 'body_gyro_z_' + group +", "y_predict = sess.run(None, {sess.get_inputs()[0].name: testX.astype(np.float32)}) testX = np.transpose(testX, (0, 2, 1)) testX =", "acceleration filenames += ['total_acc_x_' + group + '.txt', 'total_acc_y_' + group + '.txt',", "y elements def load_dataset(prefix=''): # load all train trainX, trainy = load_dataset_group('train', prefix", "return trainX, trainy, testX, testy # summarize scores def summarize_results(scores): print('scores:', scores) mean,", "load_dataset(prefix=''): # load all train trainX, trainy = load_dataset_group('train', prefix + 'UCI HAR", "a single file as a numpy array def load_file(filepath): dataframe = read_csv(filepath, header=None,", "# load data trainX, trainy, testX, testy = load_dataset() # sess = onnxruntime.InferenceSession('./models/model1.onnx')", "array filenames = list() # total acceleration filenames += ['total_acc_x_' + group +", "# load all test testX, testy = load_dataset_group('test', prefix + 'UCI HAR Dataset/')", "returns train and test X and y elements def load_dataset(prefix=''): # load all", "+ '.txt', 'body_acc_z_' + group + '.txt'] # body gyroscope filenames += ['body_gyro_x_'", "def load_group(filenames, prefix=''): loaded = list() for name in filenames: data = load_file(prefix", "filenames: data = load_file(prefix + name) loaded.append(data) # stack group so that features", "a 3D array of [samples, timesteps, features] def load_group(filenames, prefix=''): loaded = list()", "np.dstack(loaded) return loaded # load a dataset group, such as train or test", "# load a single file as a numpy array def load_file(filepath): dataframe =", "np.argmax(y_predict, axis=2) # testy = labels # y_true = np.reshape(testy, [-1]) # y_pred", "group + '.txt', 'body_acc_z_' + group + '.txt'] # body gyroscope filenames +=", "[-1]) # y_pred = np.reshape(y_predict, [-1]) # accuracy = accuracy_score(y_true, y_pred) # precision", "of files into a 3D array of [samples, timesteps, features] def load_group(filenames, prefix=''):", "test testX, testy = load_dataset_group('test', prefix + 'UCI HAR Dataset/') # print(testX.shape, testy.shape)", "= np.dstack(loaded) return loaded # load a dataset group, such as train or", "1)) testX = torch.utils.data.DataLoader(testX, batch_size=32, shuffle=True, num_workers=0) testy = torch.utils.data.DataLoader(testy, batch_size=32, shuffle=True, num_workers=0)", "# y_predict = np.array(y_predict) # y_predict = np.argmax(y_predict, axis=2) # testy = labels", "testy = torch.utils.data.DataLoader(testy, batch_size=32, shuffle=True, num_workers=0) for features, labels in zip(testX, testy): y_predict", "such as train or test def load_dataset_group(group, prefix=''): filepath = prefix + group", "name in filenames: data = load_file(prefix + name) loaded.append(data) # stack group so", "+ group + '.txt', 'total_acc_z_' + group + '.txt'] # body acceleration filenames", "output y = load_file(prefix + group + '/y_' + group + '.txt') return", "train data set, k-fold validation import numpy as np import onnxruntime import torch", "y trainy = to_categorical(trainy) testy = to_categorical(testy) print(trainX.shape, trainy.shape, testX.shape, testy.shape) return trainX,", "that features are the 3rd dimension loaded = np.dstack(loaded) return loaded # load", "# sess = onnxruntime.InferenceSession('./models/model1.onnx') sess = onnxruntime.InferenceSession('./cnn-pytorch.onnx') for i in sess.get_inputs(): print(i.name) print(i.shape)", "single file as a numpy array def load_file(filepath): dataframe = read_csv(filepath, header=None, delim_whitespace=True)", "'.txt', 'total_acc_y_' + group + '.txt', 'total_acc_z_' + group + '.txt'] # body", "loaded # load a dataset group, such as train or test def load_dataset_group(group,", "files into a 3D array of [samples, timesteps, features] def load_group(filenames, prefix=''): loaded", "from train data set, k-fold validation import numpy as np import onnxruntime import", "numpy as np import onnxruntime import torch from pandas import read_csv from tensorflow.python.keras.utils.np_utils", "from pandas import read_csv from tensorflow.python.keras.utils.np_utils import to_categorical from sklearn.metrics import f1_score, recall_score,", "def load_dataset_group(group, prefix=''): filepath = prefix + group + '/Inertial Signals/' # load", "2, 1)) testX = torch.utils.data.DataLoader(testX, batch_size=32, shuffle=True, num_workers=0) testy = torch.utils.data.DataLoader(testy, batch_size=32, shuffle=True,", "trainX, trainy, testX, testy # summarize scores def summarize_results(scores): print('scores:', scores) mean, std", "into a 3D array of [samples, timesteps, features] def load_group(filenames, prefix=''): loaded =", "run an experiment def run_experiment(repeats=10): # load data trainX, trainy, testX, testy =", "# y_true = np.reshape(testy, [-1]) # y_pred = np.reshape(y_predict, [-1]) # accuracy =", "dimension loaded = np.dstack(loaded) return loaded # load a dataset group, such as", "'.txt'] # body acceleration filenames += ['body_acc_x_' + group + '.txt', 'body_acc_y_' +", "gyroscope filenames += ['body_gyro_x_' + group + '.txt', 'body_gyro_y_' + group + '.txt',", "+ '.txt', 'total_acc_z_' + group + '.txt'] # body acceleration filenames += ['body_acc_x_'", "onnxruntime.InferenceSession('./cnn-pytorch.onnx') for i in sess.get_inputs(): print(i.name) print(i.shape) for i in sess.get_outputs(): print(i.name) print(i.shape)", "prefix + group + '/Inertial Signals/' # load all 9 files as a", "the dataset, returns train and test X and y elements def load_dataset(prefix=''): #", "trainy.shape, testX.shape, testy.shape) return trainX, trainy, testX, testy # summarize scores def summarize_results(scores):", "a list of files into a 3D array of [samples, timesteps, features] def", "a numpy array def load_file(filepath): dataframe = read_csv(filepath, header=None, delim_whitespace=True) return dataframe.values #", "= to_categorical(testy) print(trainX.shape, trainy.shape, testX.shape, testy.shape) return trainX, trainy, testX, testy # summarize", "labels in zip(testX, testy): y_predict = sess.run(None, {sess.get_inputs()[0].name: features.float().numpy()}) print('y_predict', y_predict) # y_predict", "list() for name in filenames: data = load_file(prefix + name) loaded.append(data) # stack", "in zip(testX, testy): y_predict = sess.run(None, {sess.get_inputs()[0].name: features.float().numpy()}) print('y_predict', y_predict) # y_predict =", "dataframe.values # load a list of files into a 3D array of [samples,", "summarize scores def summarize_results(scores): print('scores:', scores) mean, std = np.mean(scores), np.std(scores) return [mean,", "a dataset group, such as train or test def load_dataset_group(group, prefix=''): filepath =", "data set, k-fold validation import numpy as np import onnxruntime import torch from", "'.txt') return X, y # load the dataset, returns train and test X", "group + '.txt', 'body_gyro_y_' + group + '.txt', 'body_gyro_z_' + group + '.txt']", "# y_pred = np.reshape(y_predict, [-1]) # accuracy = accuracy_score(y_true, y_pred) # precision =", "+ group + '.txt') return X, y # load the dataset, returns train", "y_pred, average='macro') # f1score = f1_score(y_true, y_pred, average='macro') # print(accuracy, precision, recall, f1score)", "acceleration filenames += ['body_acc_x_' + group + '.txt', 'body_acc_y_' + group + '.txt',", "stack group so that features are the 3rd dimension loaded = np.dstack(loaded) return", "# load a list of files into a 3D array of [samples, timesteps,", "y_true = np.reshape(testy, [-1]) # y_pred = np.reshape(y_predict, [-1]) # accuracy = accuracy_score(y_true,", "'.txt', 'total_acc_z_' + group + '.txt'] # body acceleration filenames += ['body_acc_x_' +", "group so that features are the 3rd dimension loaded = np.dstack(loaded) return loaded", "group + '.txt', 'total_acc_z_' + group + '.txt'] # body acceleration filenames +=", "one hot encode y trainy = to_categorical(trainy) testy = to_categorical(testy) print(trainX.shape, trainy.shape, testX.shape,", "- 1 testy = testy - 1 # one hot encode y trainy", "(0, 2, 1)) testX = torch.utils.data.DataLoader(testX, batch_size=32, shuffle=True, num_workers=0) testy = torch.utils.data.DataLoader(testy, batch_size=32,", "sess.get_outputs(): print(i.name) print(i.shape) # y_predict = sess.run(None, {sess.get_inputs()[0].name: testX.astype(np.float32)}) testX = np.transpose(testX, (0,", "np.array(y_predict) # y_predict = np.argmax(y_predict, axis=2) # testy = labels # y_true =", "trainX, trainy, testX, testy = load_dataset() # sess = onnxruntime.InferenceSession('./models/model1.onnx') sess = onnxruntime.InferenceSession('./cnn-pytorch.onnx')", "# y_predict = np.argmax(y_predict, axis=2) # testy = labels # y_true = np.reshape(testy,", "experiment def run_experiment(repeats=10): # load data trainX, trainy, testX, testy = load_dataset() #", "# accuracy = accuracy_score(y_true, y_pred) # precision = precision_score(y_true, y_pred, average='macro') # recall", "= torch.utils.data.DataLoader(testX, batch_size=32, shuffle=True, num_workers=0) testy = torch.utils.data.DataLoader(testy, batch_size=32, shuffle=True, num_workers=0) for features,", "= np.reshape(testy, [-1]) # y_pred = np.reshape(y_predict, [-1]) # accuracy = accuracy_score(y_true, y_pred)", "= to_categorical(trainy) testy = to_categorical(testy) print(trainX.shape, trainy.shape, testX.shape, testy.shape) return trainX, trainy, testX,", "print(i.shape) for i in sess.get_outputs(): print(i.name) print(i.shape) # y_predict = sess.run(None, {sess.get_inputs()[0].name: testX.astype(np.float32)})", "'UCI HAR Dataset/') # print(trainX.shape, trainy.shape) # load all test testX, testy =", "= np.argmax(y_predict, axis=2) # testy = labels # y_true = np.reshape(testy, [-1]) #", "# total acceleration filenames += ['total_acc_x_' + group + '.txt', 'total_acc_y_' + group", "load_file(prefix + group + '/y_' + group + '.txt') return X, y #", "testX.shape, testy.shape) return trainX, trainy, testX, testy # summarize scores def summarize_results(scores): print('scores:',", "recall_score, precision_score, accuracy_score # load a single file as a numpy array def", "loaded.append(data) # stack group so that features are the 3rd dimension loaded =", "print(i.name) print(i.shape) # y_predict = sess.run(None, {sess.get_inputs()[0].name: testX.astype(np.float32)}) testX = np.transpose(testX, (0, 2,", "+ '.txt', 'body_gyro_z_' + group + '.txt'] # load input data X =", "as train or test def load_dataset_group(group, prefix=''): filepath = prefix + group +", "array of [samples, timesteps, features] def load_group(filenames, prefix=''): loaded = list() for name", "= np.transpose(testX, (0, 2, 1)) testX = torch.utils.data.DataLoader(testX, batch_size=32, shuffle=True, num_workers=0) testy =", "to_categorical(trainy) testy = to_categorical(testy) print(trainX.shape, trainy.shape, testX.shape, testy.shape) return trainX, trainy, testX, testy", "+ group + '.txt', 'total_acc_y_' + group + '.txt', 'total_acc_z_' + group +", "group + '.txt'] # load input data X = load_group(filenames, filepath) # load", "load data trainX, trainy, testX, testy = load_dataset() # sess = onnxruntime.InferenceSession('./models/model1.onnx') sess", "dataset, returns train and test X and y elements def load_dataset(prefix=''): # load", "+ '.txt', 'total_acc_y_' + group + '.txt', 'total_acc_z_' + group + '.txt'] #", "features.float().numpy()}) print('y_predict', y_predict) # y_predict = np.array(y_predict) # y_predict = np.argmax(y_predict, axis=2) #", "zero-offset class values trainy = trainy - 1 testy = testy - 1", "test def load_dataset_group(group, prefix=''): filepath = prefix + group + '/Inertial Signals/' #", "in filenames: data = load_file(prefix + name) loaded.append(data) # stack group so that", "return dataframe.values # load a list of files into a 3D array of", "filepath = prefix + group + '/Inertial Signals/' # load all 9 files", "# stack group so that features are the 3rd dimension loaded = np.dstack(loaded)", "train or test def load_dataset_group(group, prefix=''): filepath = prefix + group + '/Inertial", "+ group + '/Inertial Signals/' # load all 9 files as a single", "read_csv from tensorflow.python.keras.utils.np_utils import to_categorical from sklearn.metrics import f1_score, recall_score, precision_score, accuracy_score #", "group + '.txt'] # body acceleration filenames += ['body_acc_x_' + group + '.txt',", "'body_acc_y_' + group + '.txt', 'body_acc_z_' + group + '.txt'] # body gyroscope", "load_group(filenames, filepath) # load class output y = load_file(prefix + group + '/y_'", "sess.run(None, {sess.get_inputs()[0].name: features.float().numpy()}) print('y_predict', y_predict) # y_predict = np.array(y_predict) # y_predict = np.argmax(y_predict,", "prefix + 'UCI HAR Dataset/') # print(testX.shape, testy.shape) # zero-offset class values trainy", "sess.run(None, {sess.get_inputs()[0].name: testX.astype(np.float32)}) testX = np.transpose(testX, (0, 2, 1)) testX = torch.utils.data.DataLoader(testX, batch_size=32,", "testy = testy - 1 # one hot encode y trainy = to_categorical(trainy)", "= trainy - 1 testy = testy - 1 # one hot encode", "1 testy = testy - 1 # one hot encode y trainy =", "import f1_score, recall_score, precision_score, accuracy_score # load a single file as a numpy", "std] # run an experiment def run_experiment(repeats=10): # load data trainX, trainy, testX,", "features, labels in zip(testX, testy): y_predict = sess.run(None, {sess.get_inputs()[0].name: features.float().numpy()}) print('y_predict', y_predict) #", "for name in filenames: data = load_file(prefix + name) loaded.append(data) # stack group", "+= ['body_gyro_x_' + group + '.txt', 'body_gyro_y_' + group + '.txt', 'body_gyro_z_' +", "3D array of [samples, timesteps, features] def load_group(filenames, prefix=''): loaded = list() for", "# body acceleration filenames += ['body_acc_x_' + group + '.txt', 'body_acc_y_' + group", "run_experiment(repeats=10): # load data trainX, trainy, testX, testy = load_dataset() # sess =", "load all 9 files as a single array filenames = list() # total", "as a single array filenames = list() # total acceleration filenames += ['total_acc_x_'", "= precision_score(y_true, y_pred, average='macro') # recall = recall_score(y_true, y_pred, average='macro') # f1score =", "loaded = list() for name in filenames: data = load_file(prefix + name) loaded.append(data)", "load_group(filenames, prefix=''): loaded = list() for name in filenames: data = load_file(prefix +", "def run_experiment(repeats=10): # load data trainX, trainy, testX, testy = load_dataset() # sess", "# load input data X = load_group(filenames, filepath) # load class output y", "data from train data set, k-fold validation import numpy as np import onnxruntime", "single array filenames = list() # total acceleration filenames += ['total_acc_x_' + group", "- 1 # one hot encode y trainy = to_categorical(trainy) testy = to_categorical(testy)", "sklearn.metrics import f1_score, recall_score, precision_score, accuracy_score # load a single file as a", "+ group + '.txt', 'body_gyro_z_' + group + '.txt'] # load input data", "y_pred) # precision = precision_score(y_true, y_pred, average='macro') # recall = recall_score(y_true, y_pred, average='macro')", "load the dataset, returns train and test X and y elements def load_dataset(prefix=''):", "features] def load_group(filenames, prefix=''): loaded = list() for name in filenames: data =", "accuracy = accuracy_score(y_true, y_pred) # precision = precision_score(y_true, y_pred, average='macro') # recall =", "= sess.run(None, {sess.get_inputs()[0].name: features.float().numpy()}) print('y_predict', y_predict) # y_predict = np.array(y_predict) # y_predict =", "an experiment def run_experiment(repeats=10): # load data trainX, trainy, testX, testy = load_dataset()", "= np.mean(scores), np.std(scores) return [mean, std] # run an experiment def run_experiment(repeats=10): #", "all test testX, testy = load_dataset_group('test', prefix + 'UCI HAR Dataset/') # print(testX.shape,", "from sklearn.metrics import f1_score, recall_score, precision_score, accuracy_score # load a single file as", "[mean, std] # run an experiment def run_experiment(repeats=10): # load data trainX, trainy,", "import numpy as np import onnxruntime import torch from pandas import read_csv from", "'.txt'] # body gyroscope filenames += ['body_gyro_x_' + group + '.txt', 'body_gyro_y_' +", "class values trainy = trainy - 1 testy = testy - 1 #", "load a list of files into a 3D array of [samples, timesteps, features]", "# zero-offset class values trainy = trainy - 1 testy = testy -", "trainy, testX, testy = load_dataset() # sess = onnxruntime.InferenceSession('./models/model1.onnx') sess = onnxruntime.InferenceSession('./cnn-pytorch.onnx') for", "elements def load_dataset(prefix=''): # load all train trainX, trainy = load_dataset_group('train', prefix +", "['total_acc_x_' + group + '.txt', 'total_acc_y_' + group + '.txt', 'total_acc_z_' + group", "'.txt', 'body_acc_y_' + group + '.txt', 'body_acc_z_' + group + '.txt'] # body", "+ '/y_' + group + '.txt') return X, y # load the dataset,", "testy = to_categorical(testy) print(trainX.shape, trainy.shape, testX.shape, testy.shape) return trainX, trainy, testX, testy #", "# y_predict = sess.run(None, {sess.get_inputs()[0].name: testX.astype(np.float32)}) testX = np.transpose(testX, (0, 2, 1)) testX", "load_file(prefix + name) loaded.append(data) # stack group so that features are the 3rd", "= accuracy_score(y_true, y_pred) # precision = precision_score(y_true, y_pred, average='macro') # recall = recall_score(y_true,", "trainX, trainy = load_dataset_group('train', prefix + 'UCI HAR Dataset/') # print(trainX.shape, trainy.shape) #", "f1_score, recall_score, precision_score, accuracy_score # load a single file as a numpy array", "= torch.utils.data.DataLoader(testy, batch_size=32, shuffle=True, num_workers=0) for features, labels in zip(testX, testy): y_predict =", "filenames += ['total_acc_x_' + group + '.txt', 'total_acc_y_' + group + '.txt', 'total_acc_z_'", "name) loaded.append(data) # stack group so that features are the 3rd dimension loaded", "= np.reshape(y_predict, [-1]) # accuracy = accuracy_score(y_true, y_pred) # precision = precision_score(y_true, y_pred,", "mean, std = np.mean(scores), np.std(scores) return [mean, std] # run an experiment def", "'UCI HAR Dataset/') # print(testX.shape, testy.shape) # zero-offset class values trainy = trainy", "prefix + 'UCI HAR Dataset/') # print(trainX.shape, trainy.shape) # load all test testX,", "train and test X and y elements def load_dataset(prefix=''): # load all train", "def load_file(filepath): dataframe = read_csv(filepath, header=None, delim_whitespace=True) return dataframe.values # load a list", "filenames += ['body_acc_x_' + group + '.txt', 'body_acc_y_' + group + '.txt', 'body_acc_z_'", "features are the 3rd dimension loaded = np.dstack(loaded) return loaded # load a", "group + '.txt', 'total_acc_y_' + group + '.txt', 'total_acc_z_' + group + '.txt']", "Dataset/') # print(trainX.shape, trainy.shape) # load all test testX, testy = load_dataset_group('test', prefix", "+ '/Inertial Signals/' # load all 9 files as a single array filenames", "print(testX.shape, testy.shape) # zero-offset class values trainy = trainy - 1 testy =", "# load class output y = load_file(prefix + group + '/y_' + group", "testy): y_predict = sess.run(None, {sess.get_inputs()[0].name: features.float().numpy()}) print('y_predict', y_predict) # y_predict = np.array(y_predict) #", "testy - 1 # one hot encode y trainy = to_categorical(trainy) testy =", "for i in sess.get_inputs(): print(i.name) print(i.shape) for i in sess.get_outputs(): print(i.name) print(i.shape) #", "load_dataset() # sess = onnxruntime.InferenceSession('./models/model1.onnx') sess = onnxruntime.InferenceSession('./cnn-pytorch.onnx') for i in sess.get_inputs(): print(i.name)", "np.reshape(y_predict, [-1]) # accuracy = accuracy_score(y_true, y_pred) # precision = precision_score(y_true, y_pred, average='macro')", "+ group + '.txt'] # load input data X = load_group(filenames, filepath) #", "'.txt', 'body_gyro_z_' + group + '.txt'] # load input data X = load_group(filenames,", "labels # y_true = np.reshape(testy, [-1]) # y_pred = np.reshape(y_predict, [-1]) # accuracy", "torch from pandas import read_csv from tensorflow.python.keras.utils.np_utils import to_categorical from sklearn.metrics import f1_score,", "body acceleration filenames += ['body_acc_x_' + group + '.txt', 'body_acc_y_' + group +", "recall = recall_score(y_true, y_pred, average='macro') # f1score = f1_score(y_true, y_pred, average='macro') # print(accuracy,", "# print(testX.shape, testy.shape) # zero-offset class values trainy = trainy - 1 testy", "testy.shape) # zero-offset class values trainy = trainy - 1 testy = testy", "average='macro') # f1score = f1_score(y_true, y_pred, average='macro') # print(accuracy, precision, recall, f1score) run_experiment()", "filepath) # load class output y = load_file(prefix + group + '/y_' +", "X, y # load the dataset, returns train and test X and y", "= list() for name in filenames: data = load_file(prefix + name) loaded.append(data) #", "so that features are the 3rd dimension loaded = np.dstack(loaded) return loaded #", "# load all 9 files as a single array filenames = list() #", "['body_acc_x_' + group + '.txt', 'body_acc_y_' + group + '.txt', 'body_acc_z_' + group", "y_predict = sess.run(None, {sess.get_inputs()[0].name: features.float().numpy()}) print('y_predict', y_predict) # y_predict = np.array(y_predict) # y_predict", "return [mean, std] # run an experiment def run_experiment(repeats=10): # load data trainX,", "HAR Dataset/') # print(testX.shape, testy.shape) # zero-offset class values trainy = trainy -", "loaded = np.dstack(loaded) return loaded # load a dataset group, such as train", "array def load_file(filepath): dataframe = read_csv(filepath, header=None, delim_whitespace=True) return dataframe.values # load a", "['body_gyro_x_' + group + '.txt', 'body_gyro_y_' + group + '.txt', 'body_gyro_z_' + group", "shuffle=True, num_workers=0) testy = torch.utils.data.DataLoader(testy, batch_size=32, shuffle=True, num_workers=0) for features, labels in zip(testX,", "class output y = load_file(prefix + group + '/y_' + group + '.txt')", "to_categorical(testy) print(trainX.shape, trainy.shape, testX.shape, testy.shape) return trainX, trainy, testX, testy # summarize scores", "load_file(filepath): dataframe = read_csv(filepath, header=None, delim_whitespace=True) return dataframe.values # load a list of", "+ group + '.txt'] # body acceleration filenames += ['body_acc_x_' + group +", "testX = np.transpose(testX, (0, 2, 1)) testX = torch.utils.data.DataLoader(testX, batch_size=32, shuffle=True, num_workers=0) testy", "'.txt', 'body_gyro_y_' + group + '.txt', 'body_gyro_z_' + group + '.txt'] # load", "import onnxruntime import torch from pandas import read_csv from tensorflow.python.keras.utils.np_utils import to_categorical from", "= np.array(y_predict) # y_predict = np.argmax(y_predict, axis=2) # testy = labels # y_true", "hot encode y trainy = to_categorical(trainy) testy = to_categorical(testy) print(trainX.shape, trainy.shape, testX.shape, testy.shape)", "[samples, timesteps, features] def load_group(filenames, prefix=''): loaded = list() for name in filenames:", "test X and y elements def load_dataset(prefix=''): # load all train trainX, trainy", "in sess.get_inputs(): print(i.name) print(i.shape) for i in sess.get_outputs(): print(i.name) print(i.shape) # y_predict =", "'.txt'] # load input data X = load_group(filenames, filepath) # load class output", "= load_dataset() # sess = onnxruntime.InferenceSession('./models/model1.onnx') sess = onnxruntime.InferenceSession('./cnn-pytorch.onnx') for i in sess.get_inputs():", "y_pred = np.reshape(y_predict, [-1]) # accuracy = accuracy_score(y_true, y_pred) # precision = precision_score(y_true,", "testX, testy # summarize scores def summarize_results(scores): print('scores:', scores) mean, std = np.mean(scores),", "load_dataset_group(group, prefix=''): filepath = prefix + group + '/Inertial Signals/' # load all", "+ name) loaded.append(data) # stack group so that features are the 3rd dimension", "body gyroscope filenames += ['body_gyro_x_' + group + '.txt', 'body_gyro_y_' + group +", "group + '.txt', 'body_acc_y_' + group + '.txt', 'body_acc_z_' + group + '.txt']", "def summarize_results(scores): print('scores:', scores) mean, std = np.mean(scores), np.std(scores) return [mean, std] #", "+ group + '.txt'] # body gyroscope filenames += ['body_gyro_x_' + group +", "np.transpose(testX, (0, 2, 1)) testX = torch.utils.data.DataLoader(testX, batch_size=32, shuffle=True, num_workers=0) testy = torch.utils.data.DataLoader(testy,", "data X = load_group(filenames, filepath) # load class output y = load_file(prefix +", "# all the data from train data set, k-fold validation import numpy as", "np import onnxruntime import torch from pandas import read_csv from tensorflow.python.keras.utils.np_utils import to_categorical", "'.txt', 'body_acc_z_' + group + '.txt'] # body gyroscope filenames += ['body_gyro_x_' +", "load a single file as a numpy array def load_file(filepath): dataframe = read_csv(filepath,", "np.reshape(testy, [-1]) # y_pred = np.reshape(y_predict, [-1]) # accuracy = accuracy_score(y_true, y_pred) #", "{sess.get_inputs()[0].name: testX.astype(np.float32)}) testX = np.transpose(testX, (0, 2, 1)) testX = torch.utils.data.DataLoader(testX, batch_size=32, shuffle=True,", "load all train trainX, trainy = load_dataset_group('train', prefix + 'UCI HAR Dataset/') #", "num_workers=0) for features, labels in zip(testX, testy): y_predict = sess.run(None, {sess.get_inputs()[0].name: features.float().numpy()}) print('y_predict',", "onnxruntime.InferenceSession('./models/model1.onnx') sess = onnxruntime.InferenceSession('./cnn-pytorch.onnx') for i in sess.get_inputs(): print(i.name) print(i.shape) for i in", "+ group + '/y_' + group + '.txt') return X, y # load", "load class output y = load_file(prefix + group + '/y_' + group +", "precision_score(y_true, y_pred, average='macro') # recall = recall_score(y_true, y_pred, average='macro') # f1score = f1_score(y_true,", "y_pred, average='macro') # recall = recall_score(y_true, y_pred, average='macro') # f1score = f1_score(y_true, y_pred,", "k-fold validation import numpy as np import onnxruntime import torch from pandas import", "'body_gyro_z_' + group + '.txt'] # load input data X = load_group(filenames, filepath)", "sess.get_inputs(): print(i.name) print(i.shape) for i in sess.get_outputs(): print(i.name) print(i.shape) # y_predict = sess.run(None,", "+ '.txt'] # body acceleration filenames += ['body_acc_x_' + group + '.txt', 'body_acc_y_'", "= sess.run(None, {sess.get_inputs()[0].name: testX.astype(np.float32)}) testX = np.transpose(testX, (0, 2, 1)) testX = torch.utils.data.DataLoader(testX,", "and y elements def load_dataset(prefix=''): # load all train trainX, trainy = load_dataset_group('train',", "train trainX, trainy = load_dataset_group('train', prefix + 'UCI HAR Dataset/') # print(trainX.shape, trainy.shape)", "encode y trainy = to_categorical(trainy) testy = to_categorical(testy) print(trainX.shape, trainy.shape, testX.shape, testy.shape) return", "# load all train trainX, trainy = load_dataset_group('train', prefix + 'UCI HAR Dataset/')", "files as a single array filenames = list() # total acceleration filenames +=", "= read_csv(filepath, header=None, delim_whitespace=True) return dataframe.values # load a list of files into", "i in sess.get_outputs(): print(i.name) print(i.shape) # y_predict = sess.run(None, {sess.get_inputs()[0].name: testX.astype(np.float32)}) testX =", "group + '/y_' + group + '.txt') return X, y # load the", "+= ['total_acc_x_' + group + '.txt', 'total_acc_y_' + group + '.txt', 'total_acc_z_' +", "torch.utils.data.DataLoader(testy, batch_size=32, shuffle=True, num_workers=0) for features, labels in zip(testX, testy): y_predict = sess.run(None,", "9 files as a single array filenames = list() # total acceleration filenames", "list() # total acceleration filenames += ['total_acc_x_' + group + '.txt', 'total_acc_y_' +", "trainy = load_dataset_group('train', prefix + 'UCI HAR Dataset/') # print(trainX.shape, trainy.shape) # load", "prefix=''): loaded = list() for name in filenames: data = load_file(prefix + name)", "trainy = to_categorical(trainy) testy = to_categorical(testy) print(trainX.shape, trainy.shape, testX.shape, testy.shape) return trainX, trainy,", "testy # summarize scores def summarize_results(scores): print('scores:', scores) mean, std = np.mean(scores), np.std(scores)", "= onnxruntime.InferenceSession('./models/model1.onnx') sess = onnxruntime.InferenceSession('./cnn-pytorch.onnx') for i in sess.get_inputs(): print(i.name) print(i.shape) for i", "Signals/' # load all 9 files as a single array filenames = list()", "# load the dataset, returns train and test X and y elements def", "trainy - 1 testy = testy - 1 # one hot encode y", "+ group + '.txt', 'body_acc_y_' + group + '.txt', 'body_acc_z_' + group +", "+ '.txt') return X, y # load the dataset, returns train and test", "X and y elements def load_dataset(prefix=''): # load all train trainX, trainy =", "= testy - 1 # one hot encode y trainy = to_categorical(trainy) testy", "+ group + '.txt', 'body_acc_z_' + group + '.txt'] # body gyroscope filenames", "# testy = labels # y_true = np.reshape(testy, [-1]) # y_pred = np.reshape(y_predict,", "and test X and y elements def load_dataset(prefix=''): # load all train trainX,", "# run an experiment def run_experiment(repeats=10): # load data trainX, trainy, testX, testy", "= load_dataset_group('test', prefix + 'UCI HAR Dataset/') # print(testX.shape, testy.shape) # zero-offset class", "testy.shape) return trainX, trainy, testX, testy # summarize scores def summarize_results(scores): print('scores:', scores)", "prefix=''): filepath = prefix + group + '/Inertial Signals/' # load all 9", "= load_file(prefix + name) loaded.append(data) # stack group so that features are the", "batch_size=32, shuffle=True, num_workers=0) testy = torch.utils.data.DataLoader(testy, batch_size=32, shuffle=True, num_workers=0) for features, labels in", "accuracy_score # load a single file as a numpy array def load_file(filepath): dataframe", "'/Inertial Signals/' # load all 9 files as a single array filenames =", "= load_group(filenames, filepath) # load class output y = load_file(prefix + group +", "num_workers=0) testy = torch.utils.data.DataLoader(testy, batch_size=32, shuffle=True, num_workers=0) for features, labels in zip(testX, testy):", "trainy, testX, testy # summarize scores def summarize_results(scores): print('scores:', scores) mean, std =", "y = load_file(prefix + group + '/y_' + group + '.txt') return X,", "y_predict = np.array(y_predict) # y_predict = np.argmax(y_predict, axis=2) # testy = labels #", "filenames += ['body_gyro_x_' + group + '.txt', 'body_gyro_y_' + group + '.txt', 'body_gyro_z_'", "dataframe = read_csv(filepath, header=None, delim_whitespace=True) return dataframe.values # load a list of files", "import to_categorical from sklearn.metrics import f1_score, recall_score, precision_score, accuracy_score # load a single", "'total_acc_y_' + group + '.txt', 'total_acc_z_' + group + '.txt'] # body acceleration", "for i in sess.get_outputs(): print(i.name) print(i.shape) # y_predict = sess.run(None, {sess.get_inputs()[0].name: testX.astype(np.float32)}) testX", "set, k-fold validation import numpy as np import onnxruntime import torch from pandas", "accuracy_score(y_true, y_pred) # precision = precision_score(y_true, y_pred, average='macro') # recall = recall_score(y_true, y_pred,", "summarize_results(scores): print('scores:', scores) mean, std = np.mean(scores), np.std(scores) return [mean, std] # run", "import torch from pandas import read_csv from tensorflow.python.keras.utils.np_utils import to_categorical from sklearn.metrics import", "Dataset/') # print(testX.shape, testy.shape) # zero-offset class values trainy = trainy - 1", "y_predict) # y_predict = np.array(y_predict) # y_predict = np.argmax(y_predict, axis=2) # testy =", "the 3rd dimension loaded = np.dstack(loaded) return loaded # load a dataset group,", "= list() # total acceleration filenames += ['total_acc_x_' + group + '.txt', 'total_acc_y_'", "import read_csv from tensorflow.python.keras.utils.np_utils import to_categorical from sklearn.metrics import f1_score, recall_score, precision_score, accuracy_score", "'total_acc_z_' + group + '.txt'] # body acceleration filenames += ['body_acc_x_' + group", "1 # one hot encode y trainy = to_categorical(trainy) testy = to_categorical(testy) print(trainX.shape,", "scores def summarize_results(scores): print('scores:', scores) mean, std = np.mean(scores), np.std(scores) return [mean, std]", "data trainX, trainy, testX, testy = load_dataset() # sess = onnxruntime.InferenceSession('./models/model1.onnx') sess =", "np.std(scores) return [mean, std] # run an experiment def run_experiment(repeats=10): # load data", "= onnxruntime.InferenceSession('./cnn-pytorch.onnx') for i in sess.get_inputs(): print(i.name) print(i.shape) for i in sess.get_outputs(): print(i.name)", "= labels # y_true = np.reshape(testy, [-1]) # y_pred = np.reshape(y_predict, [-1]) #", "tensorflow.python.keras.utils.np_utils import to_categorical from sklearn.metrics import f1_score, recall_score, precision_score, accuracy_score # load a", "sess = onnxruntime.InferenceSession('./cnn-pytorch.onnx') for i in sess.get_inputs(): print(i.name) print(i.shape) for i in sess.get_outputs():", "all train trainX, trainy = load_dataset_group('train', prefix + 'UCI HAR Dataset/') # print(trainX.shape,", "batch_size=32, shuffle=True, num_workers=0) for features, labels in zip(testX, testy): y_predict = sess.run(None, {sess.get_inputs()[0].name:", "recall_score(y_true, y_pred, average='macro') # f1score = f1_score(y_true, y_pred, average='macro') # print(accuracy, precision, recall,", "file as a numpy array def load_file(filepath): dataframe = read_csv(filepath, header=None, delim_whitespace=True) return", "# one hot encode y trainy = to_categorical(trainy) testy = to_categorical(testy) print(trainX.shape, trainy.shape,", "print('scores:', scores) mean, std = np.mean(scores), np.std(scores) return [mean, std] # run an", "print(trainX.shape, trainy.shape) # load all test testX, testy = load_dataset_group('test', prefix + 'UCI", "trainy = trainy - 1 testy = testy - 1 # one hot", "= load_dataset_group('train', prefix + 'UCI HAR Dataset/') # print(trainX.shape, trainy.shape) # load all", "[-1]) # accuracy = accuracy_score(y_true, y_pred) # precision = precision_score(y_true, y_pred, average='macro') #", "print(trainX.shape, trainy.shape, testX.shape, testy.shape) return trainX, trainy, testX, testy # summarize scores def", "+ '.txt'] # load input data X = load_group(filenames, filepath) # load class", "input data X = load_group(filenames, filepath) # load class output y = load_file(prefix", "# print(trainX.shape, trainy.shape) # load all test testX, testy = load_dataset_group('test', prefix +", "+ '.txt'] # body gyroscope filenames += ['body_gyro_x_' + group + '.txt', 'body_gyro_y_'", "numpy array def load_file(filepath): dataframe = read_csv(filepath, header=None, delim_whitespace=True) return dataframe.values # load", "load_dataset_group('train', prefix + 'UCI HAR Dataset/') # print(trainX.shape, trainy.shape) # load all test", "'body_acc_z_' + group + '.txt'] # body gyroscope filenames += ['body_gyro_x_' + group", "load all test testX, testy = load_dataset_group('test', prefix + 'UCI HAR Dataset/') #", "print(i.name) print(i.shape) for i in sess.get_outputs(): print(i.name) print(i.shape) # y_predict = sess.run(None, {sess.get_inputs()[0].name:", "testy = load_dataset() # sess = onnxruntime.InferenceSession('./models/model1.onnx') sess = onnxruntime.InferenceSession('./cnn-pytorch.onnx') for i in", "return loaded # load a dataset group, such as train or test def", "delim_whitespace=True) return dataframe.values # load a list of files into a 3D array", "3rd dimension loaded = np.dstack(loaded) return loaded # load a dataset group, such", "= recall_score(y_true, y_pred, average='macro') # f1score = f1_score(y_true, y_pred, average='macro') # print(accuracy, precision,", "header=None, delim_whitespace=True) return dataframe.values # load a list of files into a 3D", "group + '.txt'] # body gyroscope filenames += ['body_gyro_x_' + group + '.txt',", "'/y_' + group + '.txt') return X, y # load the dataset, returns", "in sess.get_outputs(): print(i.name) print(i.shape) # y_predict = sess.run(None, {sess.get_inputs()[0].name: testX.astype(np.float32)}) testX = np.transpose(testX,", "+ 'UCI HAR Dataset/') # print(testX.shape, testy.shape) # zero-offset class values trainy =", "sess = onnxruntime.InferenceSession('./models/model1.onnx') sess = onnxruntime.InferenceSession('./cnn-pytorch.onnx') for i in sess.get_inputs(): print(i.name) print(i.shape) for", "or test def load_dataset_group(group, prefix=''): filepath = prefix + group + '/Inertial Signals/'", "load_dataset_group('test', prefix + 'UCI HAR Dataset/') # print(testX.shape, testy.shape) # zero-offset class values", "print(i.shape) # y_predict = sess.run(None, {sess.get_inputs()[0].name: testX.astype(np.float32)}) testX = np.transpose(testX, (0, 2, 1))", "{sess.get_inputs()[0].name: features.float().numpy()}) print('y_predict', y_predict) # y_predict = np.array(y_predict) # y_predict = np.argmax(y_predict, axis=2)", "HAR Dataset/') # print(trainX.shape, trainy.shape) # load all test testX, testy = load_dataset_group('test',", "average='macro') # recall = recall_score(y_true, y_pred, average='macro') # f1score = f1_score(y_true, y_pred, average='macro')", "print('y_predict', y_predict) # y_predict = np.array(y_predict) # y_predict = np.argmax(y_predict, axis=2) # testy", "values trainy = trainy - 1 testy = testy - 1 # one", "'body_gyro_y_' + group + '.txt', 'body_gyro_z_' + group + '.txt'] # load input", "load a dataset group, such as train or test def load_dataset_group(group, prefix=''): filepath", "# load a dataset group, such as train or test def load_dataset_group(group, prefix=''):", "shuffle=True, num_workers=0) for features, labels in zip(testX, testy): y_predict = sess.run(None, {sess.get_inputs()[0].name: features.float().numpy()})", "pandas import read_csv from tensorflow.python.keras.utils.np_utils import to_categorical from sklearn.metrics import f1_score, recall_score, precision_score,", "dataset group, such as train or test def load_dataset_group(group, prefix=''): filepath = prefix", "load input data X = load_group(filenames, filepath) # load class output y =", "onnxruntime import torch from pandas import read_csv from tensorflow.python.keras.utils.np_utils import to_categorical from sklearn.metrics", "data = load_file(prefix + name) loaded.append(data) # stack group so that features are", "group + '.txt', 'body_gyro_z_' + group + '.txt'] # load input data X", "X = load_group(filenames, filepath) # load class output y = load_file(prefix + group", "axis=2) # testy = labels # y_true = np.reshape(testy, [-1]) # y_pred =", "filenames = list() # total acceleration filenames += ['total_acc_x_' + group + '.txt',", "return X, y # load the dataset, returns train and test X and", "testX.astype(np.float32)}) testX = np.transpose(testX, (0, 2, 1)) testX = torch.utils.data.DataLoader(testX, batch_size=32, shuffle=True, num_workers=0)", "= load_file(prefix + group + '/y_' + group + '.txt') return X, y", "of [samples, timesteps, features] def load_group(filenames, prefix=''): loaded = list() for name in", "as np import onnxruntime import torch from pandas import read_csv from tensorflow.python.keras.utils.np_utils import", "# precision = precision_score(y_true, y_pred, average='macro') # recall = recall_score(y_true, y_pred, average='macro') #", "torch.utils.data.DataLoader(testX, batch_size=32, shuffle=True, num_workers=0) testy = torch.utils.data.DataLoader(testy, batch_size=32, shuffle=True, num_workers=0) for features, labels", "to_categorical from sklearn.metrics import f1_score, recall_score, precision_score, accuracy_score # load a single file", "testy = load_dataset_group('test', prefix + 'UCI HAR Dataset/') # print(testX.shape, testy.shape) # zero-offset", "def load_dataset(prefix=''): # load all train trainX, trainy = load_dataset_group('train', prefix + 'UCI", "all 9 files as a single array filenames = list() # total acceleration", "all the data from train data set, k-fold validation import numpy as np", "y_predict = np.argmax(y_predict, axis=2) # testy = labels # y_true = np.reshape(testy, [-1])", "zip(testX, testy): y_predict = sess.run(None, {sess.get_inputs()[0].name: features.float().numpy()}) print('y_predict', y_predict) # y_predict = np.array(y_predict)", "the data from train data set, k-fold validation import numpy as np import", "+= ['body_acc_x_' + group + '.txt', 'body_acc_y_' + group + '.txt', 'body_acc_z_' +", "testX = torch.utils.data.DataLoader(testX, batch_size=32, shuffle=True, num_workers=0) testy = torch.utils.data.DataLoader(testy, batch_size=32, shuffle=True, num_workers=0) for", "testX, testy = load_dataset_group('test', prefix + 'UCI HAR Dataset/') # print(testX.shape, testy.shape) #", "as a numpy array def load_file(filepath): dataframe = read_csv(filepath, header=None, delim_whitespace=True) return dataframe.values", "scores) mean, std = np.mean(scores), np.std(scores) return [mean, std] # run an experiment", "validation import numpy as np import onnxruntime import torch from pandas import read_csv", "group + '.txt') return X, y # load the dataset, returns train and", "+ '.txt', 'body_acc_y_' + group + '.txt', 'body_acc_z_' + group + '.txt'] #", "total acceleration filenames += ['total_acc_x_' + group + '.txt', 'total_acc_y_' + group +", "y # load the dataset, returns train and test X and y elements", "+ 'UCI HAR Dataset/') # print(trainX.shape, trainy.shape) # load all test testX, testy", "for features, labels in zip(testX, testy): y_predict = sess.run(None, {sess.get_inputs()[0].name: features.float().numpy()}) print('y_predict', y_predict)", "testy = labels # y_true = np.reshape(testy, [-1]) # y_pred = np.reshape(y_predict, [-1])", "precision = precision_score(y_true, y_pred, average='macro') # recall = recall_score(y_true, y_pred, average='macro') # f1score", "a single array filenames = list() # total acceleration filenames += ['total_acc_x_' +", "# body gyroscope filenames += ['body_gyro_x_' + group + '.txt', 'body_gyro_y_' + group", "trainy.shape) # load all test testX, testy = load_dataset_group('test', prefix + 'UCI HAR", "list of files into a 3D array of [samples, timesteps, features] def load_group(filenames,", "group + '/Inertial Signals/' # load all 9 files as a single array", "group, such as train or test def load_dataset_group(group, prefix=''): filepath = prefix +", "from tensorflow.python.keras.utils.np_utils import to_categorical from sklearn.metrics import f1_score, recall_score, precision_score, accuracy_score # load", "timesteps, features] def load_group(filenames, prefix=''): loaded = list() for name in filenames: data", "np.mean(scores), np.std(scores) return [mean, std] # run an experiment def run_experiment(repeats=10): # load", "+ '.txt', 'body_gyro_y_' + group + '.txt', 'body_gyro_z_' + group + '.txt'] #", "are the 3rd dimension loaded = np.dstack(loaded) return loaded # load a dataset", "std = np.mean(scores), np.std(scores) return [mean, std] # run an experiment def run_experiment(repeats=10):", "= prefix + group + '/Inertial Signals/' # load all 9 files as", "read_csv(filepath, header=None, delim_whitespace=True) return dataframe.values # load a list of files into a", "# summarize scores def summarize_results(scores): print('scores:', scores) mean, std = np.mean(scores), np.std(scores) return" ]
[ "be used for communication purposes') self.fields['phone'] = PhoneNumberField(initial=phone_initial, help_text='Phone number e.g.: +41222222222 .", "information if ORCiD ID is empty') return cd def save_person(self): cd = self.cleaned_data", "*args, **kwargs): self.person_position = kwargs.pop('person_position', None) self._only_basic_fields = kwargs.pop('only_basic_fields', False) self._all_fields_are_optional = kwargs.pop('all_fields_are_optional',", "self._only_basic_fields = kwargs.pop('only_basic_fields', False) self._all_fields_are_optional = kwargs.pop('all_fields_are_optional', False) help_texts = kwargs.pop('help_texts', {}) career_stage_queryset", "create an <a href=\"https://orcid.org\">ORCID iD</a> if you do not already have one')) self.fields['academic_title']", "if self._only_basic_fields == False: self.fields['gender'] = forms.ModelChoiceField(queryset=Gender.objects.all(), initial=gender_initial) if career_stage_queryset is None: career_stage_queryset", "proposal', label='Group / lab', required=False) # If adding fields here: see below to", "12: raise ValidationError(f'Invalid month: {month}', code='invalid', params={'value': month}) return f'{year}-{month}' def clean(self): cd", "iD is filled in') if self._all_fields_are_optional and not cd['orcid']: for field_str, field in", "name(s)', help_text='Your name is populated from your ORCID record. If you would like", "), ) if self._only_basic_fields: # The Layout always includes all the fields. Now", "= PersonPosition.objects.filter( person=physical_person, academic_title=self.cleaned_data['academic_title'], group=self.cleaned_data['group'], career_stage=self.cleaned_data['career_stage'] ) return person_positions def clean_phd_date(self): if 'phd_date'", "always yyyy-mm because the model has this validator (consistent with general mysql date", "self.cleaned_data['phd_date'].split('-') month_int = int(month) if month_int < 1 or month_int > 12: raise", "if field_str not in cd or not cd[field_str]: # It needs to be", "purposes of this proposal.') self.fields['group'] = forms.CharField(initial=group_initial, help_text='Please type the names of the", "if field_str in cd and cd[field_str]: self.add_error(field_str, 'It cannot contain any information if", "0000-0002-1825-0097).<br>' 'Please ask head of research unit if unknown', 'first_name': 'Populated from ORCID", "month_int > 12: raise ValidationError(f'Invalid month: {month}', code='invalid', params={'value': month}) return f'{year}-{month}' def", "Div('group', css_class='col-12'), css_class='row' ), ) if self._only_basic_fields: # The Layout always includes all", "the fields that don't exist # to avoid django-crispy-forms warnings (not fatal) PersonForm._delete_field_from_layout(self.helper.layout.fields,", "be in the model and consolidated? # TODO: discuss how to replace phones", "'Enter the ORCID iD (e.g.: 0000-0002-1825-0097).<br>' 'Please ask head of research unit if", "person_positions def clean_phd_date(self): if 'phd_date' not in self.cleaned_data: return None if self.cleaned_data['phd_date'] ==", "self.helper.layout used_help_texts = [] for field_str, field in self.fields.items(): if self._all_fields_are_optional: field.required =", "not cd[field_str]: # It needs to be in cd and have a value", "import PhoneNumberField from project_core.models import PersonTitle, Gender, PhysicalPerson, PersonPosition, Contact, CareerStage from project_core.utils.orcid", "= Layout( orcid_div('orcid'), Div( Div('first_name', css_class='col-4'), Div('surname', css_class='col-4'), Div('academic_title', css_class='col-2'), Div('gender', css_class='col-2'), css_class='row'", "the field has a validator # In the DB it's always yyyy-mm because", "0000-0002-1825-0097).<br>' 'Please create an <a href=\"https://orcid.org\">ORCID iD</a> if you do not already have", "self._only_basic_fields) self.fields['first_name'] = forms.CharField(initial=first_name_initial, label='First name(s)', help_text='Your name is populated from your ORCID", "css_class='col-2'), Div('gender', css_class='col-2'), css_class='row' ), Div( Div('career_stage', css_class='col-8'), Div('phd_date', css_class='col-4'), css_class='row' ), Div(", "cd = super().clean() if self.errors: # If there are errors they might be", "import forms from django.core.exceptions import ObjectDoesNotExist from django.core.validators import RegexValidator, ValidationError from django.forms", "(e.g.: 0000-0002-1825-0097).<br>' 'Please ask head of research unit if unknown', 'first_name': 'Populated from", "valid email address. You will receive a confirmation email when saving and submitting", "number e.g.: +41222222222 . Extension can be added with xNN at the end')", "field_set_read_only([self.fields['first_name'], self.fields['surname']]) if self._only_basic_fields == False: self.fields['gender'] = forms.ModelChoiceField(queryset=Gender.objects.all(), initial=gender_initial) if career_stage_queryset is", "help_texts = kwargs.pop('help_texts', {}) career_stage_queryset = kwargs.pop('career_stages_queryset', None) super().__init__(*args, **kwargs) orcid_initial = first_name_initial", "phd_date=cd.get('phd_date', None), academic_title=cd.get('academic_title'), group=cd.get('group'), career_stage=cd.get('career_stage'), organisation_names=cd.get('organisation_names', [])) if cd.get('email', None): # Should this", "self.fields['career_stage'] = forms.ModelChoiceField( queryset=career_stage_queryset, initial=career_stage_initial) self.fields['email'] = forms.EmailField(initial=email_initial, help_text='Please write a valid email", "= forms.CharField(initial=first_name_initial, label='First name(s)', help_text='Your name is populated from your ORCID record. If", "are affiliated for the purposes of this proposal', label='Group / lab', required=False) #", "gender_initial = self.person_position.person.gender email_initial = self.person_position.main_email() phone_initial = self.person_position.main_phone() if self.person_position.person.phd_date: # In", "= first_name_initial = surname_initial = organisations_initial = group_initial = \\ academic_title_initial = email_initial", "field_str): for item in container: if type(item) == Div: PersonForm._delete_field_from_layout(item, field_str) elif type(item)", "is filled in') if self._all_fields_are_optional and not cd['orcid']: for field_str, field in self.fields.items():", "model and consolidated? # TODO: discuss how to replace phones and handling of", "for the purposes of this proposal.') self.fields['group'] = forms.CharField(initial=group_initial, help_text='Please type the names", "applicable, please enter the date on which you were awarded, or expect to", "might be related to orcid (e.g. using the example # ORCID iD, so", "PersonPosition.objects.filter( person=physical_person, academic_title=self.cleaned_data['academic_title'], group=self.cleaned_data['group'], career_stage=self.cleaned_data['career_stage'] ) return person_positions def clean_phd_date(self): if 'phd_date' not", "a validator # In the DB it's always yyyy-mm because the model has", "field_str not in cd or not cd[field_str]: # It needs to be in", "Layout, Div from django import forms from django.core.exceptions import ObjectDoesNotExist from django.core.validators import", "are errors they might be related to orcid (e.g. using the example #", "in container: if type(item) == Div: PersonForm._delete_field_from_layout(item, field_str) elif type(item) == str and", ". Extension can be added with xNN at the end') self.fields['phd_date'] = forms.CharField(initial=phd_date_initial,", "= forms.CharField(initial=orcid_initial, **get_field_information(PhysicalPerson, 'orcid', label='ORCID iD', required=True, help_text='Enter your ORCID iD (e.g.: 0000-0002-1825-0097).<br>'", "self._all_fields_are_optional and cd['orcid']: for field_str, field in self.fields.items(): if field_str not in cd", "None # It has the correct format mm-yyyy because the field has a", "address will also be used for communication purposes') self.fields['phone'] = PhoneNumberField(initial=phone_initial, help_text='Phone number", "mm-yyyy)', required=False, widget=XDSoftYearMonthPickerInput, validators=[RegexValidator(regex='^[0-9]{2}-[0-9]{4}$', message='Format is mm-yyyy', code='Invalid format')]) self.fields['organisation_names'] = organisations_name_autocomplete(initial=organisations_initial, help_text='Please", "not in self.cleaned_data: return None if self.cleaned_data['phd_date'] == '': return None # It", "if unknown', 'first_name': 'Populated from ORCID iD', 'surname': 'Populated from ORCID iD', 'academic_title':", "return person_positions def clean_phd_date(self): if 'phd_date' not in self.cleaned_data: return None if self.cleaned_data['phd_date']", "import Form from phonenumber_field.formfields import PhoneNumberField from project_core.models import PersonTitle, Gender, PhysicalPerson, PersonPosition,", "code='Invalid format')]) self.fields['organisation_names'] = organisations_name_autocomplete(initial=organisations_initial, help_text='Please select the organisation(s) to which you are", "if self.cleaned_data['phd_date'] == '': return None # It has the correct format mm-yyyy", "are mandatory if self._all_fields_are_optional and cd['orcid']: for field_str, field in self.fields.items(): if field_str", "field_str, field in self.fields.items(): if field_str not in cd or not cd[field_str]: #", "is always saved as yyyy-mm (validator in the model) but it's visualized as", "help_texts[field_str] used_help_texts.append(field_str) if len(used_help_texts) != len(help_texts): print('Unused help texts:', help_texts.keys() - used_help_texts) self.helper", "phd_date_parts = self.person_position.person.phd_date.split('-') phd_date_initial = f'{phd_date_parts[1]}-{phd_date_parts[0]}' self.fields['orcid'] = forms.CharField(initial=orcid_initial, **get_field_information(PhysicalPerson, 'orcid', label='ORCID iD',", "email_contact = Contact() email_contact.method = Contact.EMAIL email_contact.person_position = person_position email_contact.entry = cd.get('email') email_contact.save()", "django-crispy-forms warnings (not fatal) PersonForm._delete_field_from_layout(self.helper.layout.fields, 'gender') PersonForm._delete_field_from_layout(self.helper.layout.fields, 'career_stage') PersonForm._delete_field_from_layout(self.helper.layout.fields, 'email') PersonForm._delete_field_from_layout(self.helper.layout.fields, 'phone') PersonForm._delete_field_from_layout(self.helper.layout.fields,", "will receive a confirmation email when saving and submitting your application form. This", "of this proposal.') self.fields['group'] = forms.CharField(initial=group_initial, help_text='Please type the names of the group(s)", "help_text='Where applicable, please enter the date on which you were awarded, or expect", "# to avoid django-crispy-forms warnings (not fatal) PersonForm._delete_field_from_layout(self.helper.layout.fields, 'gender') PersonForm._delete_field_from_layout(self.helper.layout.fields, 'career_stage') PersonForm._delete_field_from_layout(self.helper.layout.fields, 'email')", "int(month) if month_int < 1 or month_int > 12: raise ValidationError(f'Invalid month: {month}',", "phones and handling of multiple phones phone_contact = person_position.main_phone_model() if phone_contact is None:", "save_person(self): cd = self.cleaned_data person_position = create_person_position(cd['orcid'], cd['first_name'], cd['surname'], gender=cd.get('gender', None), phd_date=cd.get('phd_date', None),", "<a href=\"https://orcid.org/login\">ORCID</a>') field_set_read_only([self.fields['first_name'], self.fields['surname']]) if self._only_basic_fields == False: self.fields['gender'] = forms.ModelChoiceField(queryset=Gender.objects.all(), initial=gender_initial) if", "be awarded your PhD (use the format mm-yyyy)', required=False, widget=XDSoftYearMonthPickerInput, validators=[RegexValidator(regex='^[0-9]{2}-[0-9]{4}$', message='Format is", "group(s) or laboratories to which you are affiliated for the purposes of this", "label='Group / lab', required=False) # If adding fields here: see below to remove", "for field_str, field in self.fields.items(): if self._all_fields_are_optional: field.required = False if field_str in", "(not fatal) PersonForm._delete_field_from_layout(self.helper.layout.fields, 'gender') PersonForm._delete_field_from_layout(self.helper.layout.fields, 'career_stage') PersonForm._delete_field_from_layout(self.helper.layout.fields, 'email') PersonForm._delete_field_from_layout(self.helper.layout.fields, 'phone') PersonForm._delete_field_from_layout(self.helper.layout.fields, 'phd_date') PersonForm._delete_field_from_layout(self.helper.layout.fields,", "because the field has a validator # In the DB it's always yyyy-mm", "in: other fields are mandatory if self._all_fields_are_optional and cd['orcid']: for field_str, field in", "this be in the model and consolidated? # TODO: discuss how to replace", "fields here: see below to remove them from the self.helper.layout used_help_texts = []", "if self.person_position: orcid_initial = self.person_position.person.orcid first_name_initial = self.person_position.person.first_name surname_initial = self.person_position.person.surname organisations_initial =", "If you would like to change it please amend it in <a href=\"https://orcid.org/login\">ORCID</a>')", "== str and item == field_str: container.remove(field_str) def get_person_positions(self): \"\"\" Matches and returns", "cleaning: # the user needs to fix the errors in the form before", "css_class='row' ), Div( Div('organisation_names', css_class='col-12'), css_class='row' ), Div( Div('group', css_class='col-12'), css_class='row' ), )", "css_class='row' ), Div( Div('group', css_class='col-12'), css_class='row' ), ) if self._only_basic_fields: # The Layout", "change it please amend it in <a href=\"https://orcid.org/login\">ORCID</a>') self.fields['surname'] = forms.CharField(initial=surname_initial, label='Surname(s)', help_text='Your", "ID is empty') return cd def save_person(self): cd = self.cleaned_data person_position = create_person_position(cd['orcid'],", "False if field_str in help_texts: self.fields[field_str].help_text = help_texts[field_str] used_help_texts.append(field_str) if len(used_help_texts) != len(help_texts):", "self._only_basic_fields: # The Layout always includes all the fields. Now it's better to", "has a validator # In the DB it's always yyyy-mm because the model", "amend it in <a href=\"https://orcid.org/login\">ORCID</a>') field_set_read_only([self.fields['first_name'], self.fields['surname']]) if self._only_basic_fields == False: self.fields['gender'] =", "xNN at the end') self.fields['phd_date'] = forms.CharField(initial=phd_date_initial, label='Date of PhD', help_text='Where applicable, please", "forms.ModelChoiceField(queryset=PersonTitle.objects.all(), initial=academic_title_initial, required=not self._only_basic_fields) self.fields['first_name'] = forms.CharField(initial=first_name_initial, label='First name(s)', help_text='Your name is populated", "..widgets import XDSoftYearMonthPickerInput HELP_TEXTS_HEAD_OF_YOUR_RESEARCH = {'orcid': 'Enter the ORCID iD (e.g.: 0000-0002-1825-0097).<br>' 'Please", "they might be related to orcid (e.g. using the example # ORCID iD,", "'organisation_names') PersonForm._delete_field_from_layout(self.helper.layout.fields, 'group') @staticmethod def _delete_field_from_layout(container, field_str): for item in container: if type(item)", "can be added with xNN at the end') self.fields['phd_date'] = forms.CharField(initial=phd_date_initial, label='Date of", "elif type(item) == str and item == field_str: container.remove(field_str) def get_person_positions(self): \"\"\" Matches", "self.person_position.main_email() phone_initial = self.person_position.main_phone() if self.person_position.person.phd_date: # In the database is always saved", "help_text='Your name is populated from your ORCID record. If you would like to", "phd_date_initial = f'{phd_date_parts[1]}-{phd_date_parts[0]}' self.fields['orcid'] = forms.CharField(initial=orcid_initial, **get_field_information(PhysicalPerson, 'orcid', label='ORCID iD', required=True, help_text='Enter your", "self.fields['first_name'] = forms.CharField(initial=first_name_initial, label='First name(s)', help_text='Your name is populated from your ORCID record.", "format')]) self.fields['organisation_names'] = organisations_name_autocomplete(initial=organisations_initial, help_text='Please select the organisation(s) to which you are affiliated", "from your ORCID record. If you would like to change it please amend", "'first_name': 'Populated from ORCID iD', 'surname': 'Populated from ORCID iD', 'academic_title': 'Mandatory if", "create_person_position(cd['orcid'], cd['first_name'], cd['surname'], gender=cd.get('gender', None), phd_date=cd.get('phd_date', None), academic_title=cd.get('academic_title'), group=cd.get('group'), career_stage=cd.get('career_stage'), organisation_names=cd.get('organisation_names', [])) if", "do not already have one')) self.fields['academic_title'] = forms.ModelChoiceField(queryset=PersonTitle.objects.all(), initial=academic_title_initial, required=not self._only_basic_fields) self.fields['first_name'] =", "cleaning is done. return cd # If ORCID iD is filled in: other", "used_help_texts) self.helper = FormHelper(self) self.helper.form_tag = False self.helper.layout = Layout( orcid_div('orcid'), Div( Div('first_name',", "the names of the group(s) or laboratories to which you are affiliated for", "to which you are affiliated for the purposes of this proposal.') self.fields['group'] =", "phone_contact = Contact() phone_contact.method = Contact.PHONE phone_contact.person_position = person_position phone_contact.entry = cd.get('phone').as_international phone_contact.save()", "Div('surname', css_class='col-4'), Div('academic_title', css_class='col-2'), Div('gender', css_class='col-2'), css_class='row' ), Div( Div('career_stage', css_class='col-8'), Div('phd_date', css_class='col-4'),", "email when saving and submitting your application form. This email address will also", "len(used_help_texts) != len(help_texts): print('Unused help texts:', help_texts.keys() - used_help_texts) self.helper = FormHelper(self) self.helper.form_tag", "and have a value self.add_error(field_str, 'Mandatory field if ORCiD iD is filled in')", "orcid_initial = self.person_position.person.orcid first_name_initial = self.person_position.person.first_name surname_initial = self.person_position.person.surname organisations_initial = self.person_position.organisation_names.all() group_initial", "when saving and submitting your application form. This email address will also be", "and item == field_str: container.remove(field_str) def get_person_positions(self): \"\"\" Matches and returns the person_position", "iD is filled in: other fields are mandatory if self._all_fields_are_optional and cd['orcid']: for", "used_help_texts = [] for field_str, field in self.fields.items(): if self._all_fields_are_optional: field.required = False", "return cd def save_person(self): cd = self.cleaned_data person_position = create_person_position(cd['orcid'], cd['first_name'], cd['surname'], gender=cd.get('gender',", "group_initial = self.person_position.group academic_title_initial = self.person_position.academic_title career_stage_initial = self.person_position.career_stage gender_initial = self.person_position.person.gender email_initial", "if 'phd_date' not in self.cleaned_data: return None if self.cleaned_data['phd_date'] == '': return None", "are affiliated for the purposes of this proposal.') self.fields['group'] = forms.CharField(initial=group_initial, help_text='Please type", "laboratories to which you are affiliated for the purposes of this proposal', label='Group", "from the database. \"\"\" try: physical_person = PhysicalPerson.objects.get( orcid=self.cleaned_data['orcid'] ) except ObjectDoesNotExist: #", "Non-existing PHysicalPerson so it doesn't have any PersonPositions associated return [] person_positions =", "field in self.fields.items(): if field_str not in cd or not cd[field_str]: # It", "surname is populated from your ORCID record. If you would like to change", "kwargs.pop('only_basic_fields', False) self._all_fields_are_optional = kwargs.pop('all_fields_are_optional', False) help_texts = kwargs.pop('help_texts', {}) career_stage_queryset = kwargs.pop('career_stages_queryset',", "in self.fields.items(): if self._all_fields_are_optional: field.required = False if field_str in help_texts: self.fields[field_str].help_text =", "'It cannot contain any information if ORCiD ID is empty') return cd def", "if email_contact is None: email_contact = Contact() email_contact.method = Contact.EMAIL email_contact.person_position = person_position", "write a valid email address. You will receive a confirmation email when saving", "None: phone_contact = Contact() phone_contact.method = Contact.PHONE phone_contact.person_position = person_position phone_contact.entry = cd.get('phone').as_international", "person_position = create_person_position(cd['orcid'], cd['first_name'], cd['surname'], gender=cd.get('gender', None), phd_date=cd.get('phd_date', None), academic_title=cd.get('academic_title'), group=cd.get('group'), career_stage=cd.get('career_stage'), organisation_names=cd.get('organisation_names',", "**get_field_information(PhysicalPerson, 'orcid', label='ORCID iD', required=True, help_text='Enter your ORCID iD (e.g.: 0000-0002-1825-0097).<br>' 'Please create", "from django import forms from django.core.exceptions import ObjectDoesNotExist from django.core.validators import RegexValidator, ValidationError", "cd[field_str]: # It needs to be in cd and have a value self.add_error(field_str,", "email address. You will receive a confirmation email when saving and submitting your", "return cd # If ORCID iD is filled in: other fields are mandatory", "gender=cd.get('gender', None), phd_date=cd.get('phd_date', None), academic_title=cd.get('academic_title'), group=cd.get('group'), career_stage=cd.get('career_stage'), organisation_names=cd.get('organisation_names', [])) if cd.get('email', None): #", "in help_texts: self.fields[field_str].help_text = help_texts[field_str] used_help_texts.append(field_str) if len(used_help_texts) != len(help_texts): print('Unused help texts:',", "), Div( Div('career_stage', css_class='col-8'), Div('phd_date', css_class='col-4'), css_class='row' ), Div( Div('email', css_class='col-6'), Div('phone', css_class='col-6'),", "label='First name(s)', help_text='Your name is populated from your ORCID record. If you would", "proposal.') self.fields['group'] = forms.CharField(initial=group_initial, help_text='Please type the names of the group(s) or laboratories", "includes all the fields. Now it's better to remove the fields that don't", "is None: email_contact = Contact() email_contact.method = Contact.EMAIL email_contact.person_position = person_position email_contact.entry =", "contain any information if ORCiD ID is empty') return cd def save_person(self): cd", "**kwargs) orcid_initial = first_name_initial = surname_initial = organisations_initial = group_initial = \\ academic_title_initial", "field_str) elif type(item) == str and item == field_str: container.remove(field_str) def get_person_positions(self): \"\"\"", "'Mandatory if ORCID iD is entered'} class PersonForm(Form): def __init__(self, *args, **kwargs): self.person_position", "kwargs.pop('help_texts', {}) career_stage_queryset = kwargs.pop('career_stages_queryset', None) super().__init__(*args, **kwargs) orcid_initial = first_name_initial = surname_initial", "ORCiD iD is filled in') if self._all_fields_are_optional and not cd['orcid']: for field_str, field", "month_int = int(month) if month_int < 1 or month_int > 12: raise ValidationError(f'Invalid", "and submitting your application form. This email address will also be used for", "= forms.CharField(initial=surname_initial, label='Surname(s)', help_text='Your surname is populated from your ORCID record. If you", "should this be in the model and consolidated? # TODO: discuss how to", "the correct format mm-yyyy because the field has a validator # In the", "PhoneNumberField from project_core.models import PersonTitle, Gender, PhysicalPerson, PersonPosition, Contact, CareerStage from project_core.utils.orcid import", "email_contact.entry = cd.get('email') email_contact.save() if cd.get('phone', None): # Like before, should this be", "or laboratories to which you are affiliated for the purposes of this proposal',", "email address will also be used for communication purposes') self.fields['phone'] = PhoneNumberField(initial=phone_initial, help_text='Phone", "replace phones and handling of multiple phones phone_contact = person_position.main_phone_model() if phone_contact is", "= self.person_position.person.first_name surname_initial = self.person_position.person.surname organisations_initial = self.person_position.organisation_names.all() group_initial = self.person_position.group academic_title_initial =", "css_class='col-2'), css_class='row' ), Div( Div('career_stage', css_class='col-8'), Div('phd_date', css_class='col-4'), css_class='row' ), Div( Div('email', css_class='col-6'),", "PersonForm._delete_field_from_layout(self.helper.layout.fields, 'phd_date') PersonForm._delete_field_from_layout(self.helper.layout.fields, 'organisation_names') PersonForm._delete_field_from_layout(self.helper.layout.fields, 'group') @staticmethod def _delete_field_from_layout(container, field_str): for item in", "(e.g. using the example # ORCID iD, so cd['orcid'] doesn't exist. At this", "type(item) == str and item == field_str: container.remove(field_str) def get_person_positions(self): \"\"\" Matches and", "# Non-existing PHysicalPerson so it doesn't have any PersonPositions associated return [] person_positions", "the database. \"\"\" try: physical_person = PhysicalPerson.objects.get( orcid=self.cleaned_data['orcid'] ) except ObjectDoesNotExist: # Non-existing", "don't do further cleaning: # the user needs to fix the errors in", "project_core.utils.orcid import orcid_div, field_set_read_only from .utils import organisations_name_autocomplete, get_field_information from ..utils.utils import create_person_position", "form before further cleaning is done. return cd # If ORCID iD is", "would like to change it please amend it in <a href=\"https://orcid.org/login\">ORCID</a>') field_set_read_only([self.fields['first_name'], self.fields['surname']])", "errors they might be related to orcid (e.g. using the example # ORCID", "phd_date_initial = None if self.person_position: orcid_initial = self.person_position.person.orcid first_name_initial = self.person_position.person.first_name surname_initial =", "which you were awarded, or expect to be awarded your PhD (use the", "label='Date of PhD', help_text='Where applicable, please enter the date on which you were", "def __init__(self, *args, **kwargs): self.person_position = kwargs.pop('person_position', None) self._only_basic_fields = kwargs.pop('only_basic_fields', False) self._all_fields_are_optional", "required=False, widget=XDSoftYearMonthPickerInput, validators=[RegexValidator(regex='^[0-9]{2}-[0-9]{4}$', message='Format is mm-yyyy', code='Invalid format')]) self.fields['organisation_names'] = organisations_name_autocomplete(initial=organisations_initial, help_text='Please select", "params={'value': month}) return f'{year}-{month}' def clean(self): cd = super().clean() if self.errors: # If", "using the example # ORCID iD, so cd['orcid'] doesn't exist. At this point", "self.fields['group'] = forms.CharField(initial=group_initial, help_text='Please type the names of the group(s) or laboratories to", "your PhD (use the format mm-yyyy)', required=False, widget=XDSoftYearMonthPickerInput, validators=[RegexValidator(regex='^[0-9]{2}-[0-9]{4}$', message='Format is mm-yyyy', code='Invalid", "Should this be in the model? # TODO: discuss how to replace emails", "Div('phone', css_class='col-6'), css_class='row' ), Div( Div('organisation_names', css_class='col-12'), css_class='row' ), Div( Div('group', css_class='col-12'), css_class='row'", "before further cleaning is done. return cd # If ORCID iD is filled", "You will receive a confirmation email when saving and submitting your application form.", "initial=academic_title_initial, required=not self._only_basic_fields) self.fields['first_name'] = forms.CharField(initial=first_name_initial, label='First name(s)', help_text='Your name is populated from", "not in cd or not cd[field_str]: # It needs to be in cd", "email_contact is None: email_contact = Contact() email_contact.method = Contact.EMAIL email_contact.person_position = person_position email_contact.entry", "like to change it please amend it in <a href=\"https://orcid.org/login\">ORCID</a>') self.fields['surname'] = forms.CharField(initial=surname_initial,", "PersonForm._delete_field_from_layout(self.helper.layout.fields, 'group') @staticmethod def _delete_field_from_layout(container, field_str): for item in container: if type(item) ==", "the format mm-yyyy)', required=False, widget=XDSoftYearMonthPickerInput, validators=[RegexValidator(regex='^[0-9]{2}-[0-9]{4}$', message='Format is mm-yyyy', code='Invalid format')]) self.fields['organisation_names'] =", "field in self.fields.items(): if field_str in cd and cd[field_str]: self.add_error(field_str, 'It cannot contain", "django import forms from django.core.exceptions import ObjectDoesNotExist from django.core.validators import RegexValidator, ValidationError from", "from .utils import organisations_name_autocomplete, get_field_information from ..utils.utils import create_person_position from ..widgets import XDSoftYearMonthPickerInput", "exist. At this point we don't do further cleaning: # the user needs", "any information if ORCiD ID is empty') return cd def save_person(self): cd =", "if phone_contact is None: phone_contact = Contact() phone_contact.method = Contact.PHONE phone_contact.person_position = person_position", "Div: PersonForm._delete_field_from_layout(item, field_str) elif type(item) == str and item == field_str: container.remove(field_str) def", "== False: self.fields['gender'] = forms.ModelChoiceField(queryset=Gender.objects.all(), initial=gender_initial) if career_stage_queryset is None: career_stage_queryset = CareerStage.objects.all().order_by('list_order',", "cd and cd[field_str]: self.add_error(field_str, 'It cannot contain any information if ORCiD ID is", "<a href=\"https://orcid.org\">ORCID iD</a> if you do not already have one')) self.fields['academic_title'] = forms.ModelChoiceField(queryset=PersonTitle.objects.all(),", "year = self.cleaned_data['phd_date'].split('-') month_int = int(month) if month_int < 1 or month_int >", "django.core.validators import RegexValidator, ValidationError from django.forms import Form from phonenumber_field.formfields import PhoneNumberField from", "mm-yyyy', code='Invalid format')]) self.fields['organisation_names'] = organisations_name_autocomplete(initial=organisations_initial, help_text='Please select the organisation(s) to which you", "RegexValidator, ValidationError from django.forms import Form from phonenumber_field.formfields import PhoneNumberField from project_core.models import", "import organisations_name_autocomplete, get_field_information from ..utils.utils import create_person_position from ..widgets import XDSoftYearMonthPickerInput HELP_TEXTS_HEAD_OF_YOUR_RESEARCH =", "cd.get('phone', None): # Like before, should this be in the model and consolidated?", "None: career_stage_queryset = CareerStage.objects.all().order_by('list_order', 'name') self.fields['career_stage'] = forms.ModelChoiceField( queryset=career_stage_queryset, initial=career_stage_initial) self.fields['email'] = forms.EmailField(initial=email_initial,", "crispy_forms.layout import Layout, Div from django import forms from django.core.exceptions import ObjectDoesNotExist from", "phonenumber_field.formfields import PhoneNumberField from project_core.models import PersonTitle, Gender, PhysicalPerson, PersonPosition, Contact, CareerStage from", "model? # TODO: discuss how to replace emails email_contact = person_position.main_email_model() if email_contact", "help_texts: self.fields[field_str].help_text = help_texts[field_str] used_help_texts.append(field_str) if len(used_help_texts) != len(help_texts): print('Unused help texts:', help_texts.keys()", "surname_initial = self.person_position.person.surname organisations_initial = self.person_position.organisation_names.all() group_initial = self.person_position.group academic_title_initial = self.person_position.academic_title career_stage_initial", "forms from django.core.exceptions import ObjectDoesNotExist from django.core.validators import RegexValidator, ValidationError from django.forms import", "related to orcid (e.g. using the example # ORCID iD, so cd['orcid'] doesn't", "group=self.cleaned_data['group'], career_stage=self.cleaned_data['career_stage'] ) return person_positions def clean_phd_date(self): if 'phd_date' not in self.cleaned_data: return", "the self.helper.layout used_help_texts = [] for field_str, field in self.fields.items(): if self._all_fields_are_optional: field.required", "is entered'} class PersonForm(Form): def __init__(self, *args, **kwargs): self.person_position = kwargs.pop('person_position', None) self._only_basic_fields", "not already have one')) self.fields['academic_title'] = forms.ModelChoiceField(queryset=PersonTitle.objects.all(), initial=academic_title_initial, required=not self._only_basic_fields) self.fields['first_name'] = forms.CharField(initial=first_name_initial,", "import PersonTitle, Gender, PhysicalPerson, PersonPosition, Contact, CareerStage from project_core.utils.orcid import orcid_div, field_set_read_only from", "fields are mandatory if self._all_fields_are_optional and cd['orcid']: for field_str, field in self.fields.items(): if", "how to replace emails email_contact = person_position.main_email_model() if email_contact is None: email_contact =", "this validator (consistent with general mysql date format) month, year = self.cleaned_data['phd_date'].split('-') month_int", "adding fields here: see below to remove them from the self.helper.layout used_help_texts =", "@staticmethod def _delete_field_from_layout(container, field_str): for item in container: if type(item) == Div: PersonForm._delete_field_from_layout(item,", "one')) self.fields['academic_title'] = forms.ModelChoiceField(queryset=PersonTitle.objects.all(), initial=academic_title_initial, required=not self._only_basic_fields) self.fields['first_name'] = forms.CharField(initial=first_name_initial, label='First name(s)', help_text='Your", "(consistent with general mysql date format) month, year = self.cleaned_data['phd_date'].split('-') month_int = int(month)", "or month_int > 12: raise ValidationError(f'Invalid month: {month}', code='invalid', params={'value': month}) return f'{year}-{month}'", "field_str, field in self.fields.items(): if field_str in cd and cd[field_str]: self.add_error(field_str, 'It cannot", "unknown', 'first_name': 'Populated from ORCID iD', 'surname': 'Populated from ORCID iD', 'academic_title': 'Mandatory", "help_text='Please type the names of the group(s) or laboratories to which you are", "None), academic_title=cd.get('academic_title'), group=cd.get('group'), career_stage=cd.get('career_stage'), organisation_names=cd.get('organisation_names', [])) if cd.get('email', None): # Should this be", "# TODO: discuss how to replace emails email_contact = person_position.main_email_model() if email_contact is", "PhD', help_text='Where applicable, please enter the date on which you were awarded, or", "forms.ModelChoiceField( queryset=career_stage_queryset, initial=career_stage_initial) self.fields['email'] = forms.EmailField(initial=email_initial, help_text='Please write a valid email address. You", "purposes') self.fields['phone'] = PhoneNumberField(initial=phone_initial, help_text='Phone number e.g.: +41222222222 . Extension can be added", "the errors in the form before further cleaning is done. return cd #", "from phonenumber_field.formfields import PhoneNumberField from project_core.models import PersonTitle, Gender, PhysicalPerson, PersonPosition, Contact, CareerStage", "career_stage_initial = self.person_position.career_stage gender_initial = self.person_position.person.gender email_initial = self.person_position.main_email() phone_initial = self.person_position.main_phone() if", "a valid email address. You will receive a confirmation email when saving and", "queryset=career_stage_queryset, initial=career_stage_initial) self.fields['email'] = forms.EmailField(initial=email_initial, help_text='Please write a valid email address. You will", "Div from django import forms from django.core.exceptions import ObjectDoesNotExist from django.core.validators import RegexValidator,", "Contact() phone_contact.method = Contact.PHONE phone_contact.person_position = person_position phone_contact.entry = cd.get('phone').as_international phone_contact.save() return person_position", "for the purposes of this proposal', label='Group / lab', required=False) # If adding", "help texts:', help_texts.keys() - used_help_texts) self.helper = FormHelper(self) self.helper.form_tag = False self.helper.layout =", "= self.cleaned_data['phd_date'].split('-') month_int = int(month) if month_int < 1 or month_int > 12:", "self.person_position: orcid_initial = self.person_position.person.orcid first_name_initial = self.person_position.person.first_name surname_initial = self.person_position.person.surname organisations_initial = self.person_position.organisation_names.all()", "self.person_position.person.first_name surname_initial = self.person_position.person.surname organisations_initial = self.person_position.organisation_names.all() group_initial = self.person_position.group academic_title_initial = self.person_position.academic_title", "please amend it in <a href=\"https://orcid.org/login\">ORCID</a>') field_set_read_only([self.fields['first_name'], self.fields['surname']]) if self._only_basic_fields == False: self.fields['gender']", "to remove the fields that don't exist # to avoid django-crispy-forms warnings (not", "career_stage_queryset = kwargs.pop('career_stages_queryset', None) super().__init__(*args, **kwargs) orcid_initial = first_name_initial = surname_initial = organisations_initial", "visualized as mm-yyyy phd_date_parts = self.person_position.person.phd_date.split('-') phd_date_initial = f'{phd_date_parts[1]}-{phd_date_parts[0]}' self.fields['orcid'] = forms.CharField(initial=orcid_initial, **get_field_information(PhysicalPerson,", "doesn't exist. At this point we don't do further cleaning: # the user", "research unit if unknown', 'first_name': 'Populated from ORCID iD', 'surname': 'Populated from ORCID", "iD (e.g.: 0000-0002-1825-0097).<br>' 'Please create an <a href=\"https://orcid.org\">ORCID iD</a> if you do not", "self.fields['academic_title'] = forms.ModelChoiceField(queryset=PersonTitle.objects.all(), initial=academic_title_initial, required=not self._only_basic_fields) self.fields['first_name'] = forms.CharField(initial=first_name_initial, label='First name(s)', help_text='Your name", "'gender') PersonForm._delete_field_from_layout(self.helper.layout.fields, 'career_stage') PersonForm._delete_field_from_layout(self.helper.layout.fields, 'email') PersonForm._delete_field_from_layout(self.helper.layout.fields, 'phone') PersonForm._delete_field_from_layout(self.helper.layout.fields, 'phd_date') PersonForm._delete_field_from_layout(self.helper.layout.fields, 'organisation_names') PersonForm._delete_field_from_layout(self.helper.layout.fields, 'group')", "an <a href=\"https://orcid.org\">ORCID iD</a> if you do not already have one')) self.fields['academic_title'] =", "_delete_field_from_layout(container, field_str): for item in container: if type(item) == Div: PersonForm._delete_field_from_layout(item, field_str) elif", "Div('academic_title', css_class='col-2'), Div('gender', css_class='col-2'), css_class='row' ), Div( Div('career_stage', css_class='col-8'), Div('phd_date', css_class='col-4'), css_class='row' ),", "PersonForm._delete_field_from_layout(self.helper.layout.fields, 'career_stage') PersonForm._delete_field_from_layout(self.helper.layout.fields, 'email') PersonForm._delete_field_from_layout(self.helper.layout.fields, 'phone') PersonForm._delete_field_from_layout(self.helper.layout.fields, 'phd_date') PersonForm._delete_field_from_layout(self.helper.layout.fields, 'organisation_names') PersonForm._delete_field_from_layout(self.helper.layout.fields, 'group') @staticmethod", "phone_initial = gender_initial = career_stage_initial = phd_date_initial = None if self.person_position: orcid_initial =", "associated return [] person_positions = PersonPosition.objects.filter( person=physical_person, academic_title=self.cleaned_data['academic_title'], group=self.cleaned_data['group'], career_stage=self.cleaned_data['career_stage'] ) return person_positions", "not cd['orcid']: for field_str, field in self.fields.items(): if field_str in cd and cd[field_str]:", "print('Unused help texts:', help_texts.keys() - used_help_texts) self.helper = FormHelper(self) self.helper.form_tag = False self.helper.layout", "css_class='row' ), ) if self._only_basic_fields: # The Layout always includes all the fields.", "None), phd_date=cd.get('phd_date', None), academic_title=cd.get('academic_title'), group=cd.get('group'), career_stage=cd.get('career_stage'), organisation_names=cd.get('organisation_names', [])) if cd.get('email', None): # Should", "self.fields.items(): if field_str in cd and cd[field_str]: self.add_error(field_str, 'It cannot contain any information", "(use the format mm-yyyy)', required=False, widget=XDSoftYearMonthPickerInput, validators=[RegexValidator(regex='^[0-9]{2}-[0-9]{4}$', message='Format is mm-yyyy', code='Invalid format')]) self.fields['organisation_names']", "in cd or not cd[field_str]: # It needs to be in cd and", "help_text='Please select the organisation(s) to which you are affiliated for the purposes of", "as yyyy-mm (validator in the model) but it's visualized as mm-yyyy phd_date_parts =", "iD</a> if you do not already have one')) self.fields['academic_title'] = forms.ModelChoiceField(queryset=PersonTitle.objects.all(), initial=academic_title_initial, required=not", "self.fields[field_str].help_text = help_texts[field_str] used_help_texts.append(field_str) if len(used_help_texts) != len(help_texts): print('Unused help texts:', help_texts.keys() -", "the person_position from the database. \"\"\" try: physical_person = PhysicalPerson.objects.get( orcid=self.cleaned_data['orcid'] ) except", "example # ORCID iD, so cd['orcid'] doesn't exist. At this point we don't", "in') if self._all_fields_are_optional and not cd['orcid']: for field_str, field in self.fields.items(): if field_str", "from ORCID iD', 'surname': 'Populated from ORCID iD', 'academic_title': 'Mandatory if ORCID iD", "is None: career_stage_queryset = CareerStage.objects.all().order_by('list_order', 'name') self.fields['career_stage'] = forms.ModelChoiceField( queryset=career_stage_queryset, initial=career_stage_initial) self.fields['email'] =", "to fix the errors in the form before further cleaning is done. return", "from the self.helper.layout used_help_texts = [] for field_str, field in self.fields.items(): if self._all_fields_are_optional:", "this proposal', label='Group / lab', required=False) # If adding fields here: see below", "ORCID iD', 'academic_title': 'Mandatory if ORCID iD is entered'} class PersonForm(Form): def __init__(self,", "yyyy-mm because the model has this validator (consistent with general mysql date format)", "If ORCID iD is filled in: other fields are mandatory if self._all_fields_are_optional and", "like to change it please amend it in <a href=\"https://orcid.org/login\">ORCID</a>') field_set_read_only([self.fields['first_name'], self.fields['surname']]) if", "# If ORCID iD is filled in: other fields are mandatory if self._all_fields_are_optional", "import create_person_position from ..widgets import XDSoftYearMonthPickerInput HELP_TEXTS_HEAD_OF_YOUR_RESEARCH = {'orcid': 'Enter the ORCID iD", "organisation(s) to which you are affiliated for the purposes of this proposal.') self.fields['group']", "help_text='Please write a valid email address. You will receive a confirmation email when", "kwargs.pop('career_stages_queryset', None) super().__init__(*args, **kwargs) orcid_initial = first_name_initial = surname_initial = organisations_initial = group_initial", "in cd and cd[field_str]: self.add_error(field_str, 'It cannot contain any information if ORCiD ID", "== '': return None # It has the correct format mm-yyyy because the", "be related to orcid (e.g. using the example # ORCID iD, so cd['orcid']", "{}) career_stage_queryset = kwargs.pop('career_stages_queryset', None) super().__init__(*args, **kwargs) orcid_initial = first_name_initial = surname_initial =", "This email address will also be used for communication purposes') self.fields['phone'] = PhoneNumberField(initial=phone_initial,", "Contact.EMAIL email_contact.person_position = person_position email_contact.entry = cd.get('email') email_contact.save() if cd.get('phone', None): # Like", "from ..widgets import XDSoftYearMonthPickerInput HELP_TEXTS_HEAD_OF_YOUR_RESEARCH = {'orcid': 'Enter the ORCID iD (e.g.: 0000-0002-1825-0097).<br>'", "Contact, CareerStage from project_core.utils.orcid import orcid_div, field_set_read_only from .utils import organisations_name_autocomplete, get_field_information from", "e.g.: +41222222222 . Extension can be added with xNN at the end') self.fields['phd_date']", "self.add_error(field_str, 'Mandatory field if ORCiD iD is filled in') if self._all_fields_are_optional and not", "= group_initial = \\ academic_title_initial = email_initial = phone_initial = gender_initial = career_stage_initial", "see below to remove them from the self.helper.layout used_help_texts = [] for field_str,", "self.cleaned_data person_position = create_person_position(cd['orcid'], cd['first_name'], cd['surname'], gender=cd.get('gender', None), phd_date=cd.get('phd_date', None), academic_title=cd.get('academic_title'), group=cd.get('group'), career_stage=cd.get('career_stage'),", "!= len(help_texts): print('Unused help texts:', help_texts.keys() - used_help_texts) self.helper = FormHelper(self) self.helper.form_tag =", "returns the person_position from the database. \"\"\" try: physical_person = PhysicalPerson.objects.get( orcid=self.cleaned_data['orcid'] )", "warnings (not fatal) PersonForm._delete_field_from_layout(self.helper.layout.fields, 'gender') PersonForm._delete_field_from_layout(self.helper.layout.fields, 'career_stage') PersonForm._delete_field_from_layout(self.helper.layout.fields, 'email') PersonForm._delete_field_from_layout(self.helper.layout.fields, 'phone') PersonForm._delete_field_from_layout(self.helper.layout.fields, 'phd_date')", "False) help_texts = kwargs.pop('help_texts', {}) career_stage_queryset = kwargs.pop('career_stages_queryset', None) super().__init__(*args, **kwargs) orcid_initial =", "doesn't have any PersonPositions associated return [] person_positions = PersonPosition.objects.filter( person=physical_person, academic_title=self.cleaned_data['academic_title'], group=self.cleaned_data['group'],", "field_str in help_texts: self.fields[field_str].help_text = help_texts[field_str] used_help_texts.append(field_str) if len(used_help_texts) != len(help_texts): print('Unused help", "organisations_name_autocomplete, get_field_information from ..utils.utils import create_person_position from ..widgets import XDSoftYearMonthPickerInput HELP_TEXTS_HEAD_OF_YOUR_RESEARCH = {'orcid':", "= gender_initial = career_stage_initial = phd_date_initial = None if self.person_position: orcid_initial = self.person_position.person.orcid", "remove them from the self.helper.layout used_help_texts = [] for field_str, field in self.fields.items():", "this point we don't do further cleaning: # the user needs to fix", "If there are errors they might be related to orcid (e.g. using the", "it's visualized as mm-yyyy phd_date_parts = self.person_position.person.phd_date.split('-') phd_date_initial = f'{phd_date_parts[1]}-{phd_date_parts[0]}' self.fields['orcid'] = forms.CharField(initial=orcid_initial,", "person=physical_person, academic_title=self.cleaned_data['academic_title'], group=self.cleaned_data['group'], career_stage=self.cleaned_data['career_stage'] ) return person_positions def clean_phd_date(self): if 'phd_date' not in", "In the database is always saved as yyyy-mm (validator in the model) but", "ORCiD ID is empty') return cd def save_person(self): cd = self.cleaned_data person_position =", "'name') self.fields['career_stage'] = forms.ModelChoiceField( queryset=career_stage_queryset, initial=career_stage_initial) self.fields['email'] = forms.EmailField(initial=email_initial, help_text='Please write a valid", "str and item == field_str: container.remove(field_str) def get_person_positions(self): \"\"\" Matches and returns the", "but it's visualized as mm-yyyy phd_date_parts = self.person_position.person.phd_date.split('-') phd_date_initial = f'{phd_date_parts[1]}-{phd_date_parts[0]}' self.fields['orcid'] =", "'Populated from ORCID iD', 'surname': 'Populated from ORCID iD', 'academic_title': 'Mandatory if ORCID", "the purposes of this proposal', label='Group / lab', required=False) # If adding fields", "return None if self.cleaned_data['phd_date'] == '': return None # It has the correct", "orcid_div, field_set_read_only from .utils import organisations_name_autocomplete, get_field_information from ..utils.utils import create_person_position from ..widgets", "already have one')) self.fields['academic_title'] = forms.ModelChoiceField(queryset=PersonTitle.objects.all(), initial=academic_title_initial, required=not self._only_basic_fields) self.fields['first_name'] = forms.CharField(initial=first_name_initial, label='First", "awarded your PhD (use the format mm-yyyy)', required=False, widget=XDSoftYearMonthPickerInput, validators=[RegexValidator(regex='^[0-9]{2}-[0-9]{4}$', message='Format is mm-yyyy',", "this proposal.') self.fields['group'] = forms.CharField(initial=group_initial, help_text='Please type the names of the group(s) or", "mysql date format) month, year = self.cleaned_data['phd_date'].split('-') month_int = int(month) if month_int <", "ORCID iD is entered'} class PersonForm(Form): def __init__(self, *args, **kwargs): self.person_position = kwargs.pop('person_position',", "field_str: container.remove(field_str) def get_person_positions(self): \"\"\" Matches and returns the person_position from the database.", "None if self.person_position: orcid_initial = self.person_position.person.orcid first_name_initial = self.person_position.person.first_name surname_initial = self.person_position.person.surname organisations_initial", "self.person_position.organisation_names.all() group_initial = self.person_position.group academic_title_initial = self.person_position.academic_title career_stage_initial = self.person_position.career_stage gender_initial = self.person_position.person.gender", "css_class='row' ), Div( Div('career_stage', css_class='col-8'), Div('phd_date', css_class='col-4'), css_class='row' ), Div( Div('email', css_class='col-6'), Div('phone',", "# TODO: discuss how to replace phones and handling of multiple phones phone_contact", "person_positions = PersonPosition.objects.filter( person=physical_person, academic_title=self.cleaned_data['academic_title'], group=self.cleaned_data['group'], career_stage=self.cleaned_data['career_stage'] ) return person_positions def clean_phd_date(self): if", "= self.person_position.person.surname organisations_initial = self.person_position.organisation_names.all() group_initial = self.person_position.group academic_title_initial = self.person_position.academic_title career_stage_initial =", "that don't exist # to avoid django-crispy-forms warnings (not fatal) PersonForm._delete_field_from_layout(self.helper.layout.fields, 'gender') PersonForm._delete_field_from_layout(self.helper.layout.fields,", "the DB it's always yyyy-mm because the model has this validator (consistent with", "orcid_initial = first_name_initial = surname_initial = organisations_initial = group_initial = \\ academic_title_initial =", "if len(used_help_texts) != len(help_texts): print('Unused help texts:', help_texts.keys() - used_help_texts) self.helper = FormHelper(self)", "= email_initial = phone_initial = gender_initial = career_stage_initial = phd_date_initial = None if", "in <a href=\"https://orcid.org/login\">ORCID</a>') self.fields['surname'] = forms.CharField(initial=surname_initial, label='Surname(s)', help_text='Your surname is populated from your", "False self.helper.layout = Layout( orcid_div('orcid'), Div( Div('first_name', css_class='col-4'), Div('surname', css_class='col-4'), Div('academic_title', css_class='col-2'), Div('gender',", "f'{year}-{month}' def clean(self): cd = super().clean() if self.errors: # If there are errors", "message='Format is mm-yyyy', code='Invalid format')]) self.fields['organisation_names'] = organisations_name_autocomplete(initial=organisations_initial, help_text='Please select the organisation(s) to", "there are errors they might be related to orcid (e.g. using the example", "academic_title_initial = email_initial = phone_initial = gender_initial = career_stage_initial = phd_date_initial = None", "self.fields['surname']]) if self._only_basic_fields == False: self.fields['gender'] = forms.ModelChoiceField(queryset=Gender.objects.all(), initial=gender_initial) if career_stage_queryset is None:", "or not cd[field_str]: # It needs to be in cd and have a", "'phone') PersonForm._delete_field_from_layout(self.helper.layout.fields, 'phd_date') PersonForm._delete_field_from_layout(self.helper.layout.fields, 'organisation_names') PersonForm._delete_field_from_layout(self.helper.layout.fields, 'group') @staticmethod def _delete_field_from_layout(container, field_str): for item", "**kwargs): self.person_position = kwargs.pop('person_position', None) self._only_basic_fields = kwargs.pop('only_basic_fields', False) self._all_fields_are_optional = kwargs.pop('all_fields_are_optional', False)", "It has the correct format mm-yyyy because the field has a validator #", "the organisation(s) to which you are affiliated for the purposes of this proposal.')", "= phd_date_initial = None if self.person_position: orcid_initial = self.person_position.person.orcid first_name_initial = self.person_position.person.first_name surname_initial", "Div('first_name', css_class='col-4'), Div('surname', css_class='col-4'), Div('academic_title', css_class='col-2'), Div('gender', css_class='col-2'), css_class='row' ), Div( Div('career_stage', css_class='col-8'),", "it's better to remove the fields that don't exist # to avoid django-crispy-forms", "the model? # TODO: discuss how to replace emails email_contact = person_position.main_email_model() if", "used for communication purposes') self.fields['phone'] = PhoneNumberField(initial=phone_initial, help_text='Phone number e.g.: +41222222222 . Extension", "Extension can be added with xNN at the end') self.fields['phd_date'] = forms.CharField(initial=phd_date_initial, label='Date", "you would like to change it please amend it in <a href=\"https://orcid.org/login\">ORCID</a>') self.fields['surname']", "import ObjectDoesNotExist from django.core.validators import RegexValidator, ValidationError from django.forms import Form from phonenumber_field.formfields", "= forms.ModelChoiceField( queryset=career_stage_queryset, initial=career_stage_initial) self.fields['email'] = forms.EmailField(initial=email_initial, help_text='Please write a valid email address.", "lab', required=False) # If adding fields here: see below to remove them from", "always saved as yyyy-mm (validator in the model) but it's visualized as mm-yyyy", "self.person_position.academic_title career_stage_initial = self.person_position.career_stage gender_initial = self.person_position.person.gender email_initial = self.person_position.main_email() phone_initial = self.person_position.main_phone()", "for field_str, field in self.fields.items(): if field_str in cd and cd[field_str]: self.add_error(field_str, 'It", "from django.core.exceptions import ObjectDoesNotExist from django.core.validators import RegexValidator, ValidationError from django.forms import Form", "self.person_position.person.surname organisations_initial = self.person_position.organisation_names.all() group_initial = self.person_position.group academic_title_initial = self.person_position.academic_title career_stage_initial = self.person_position.career_stage", "further cleaning: # the user needs to fix the errors in the form", "try: physical_person = PhysicalPerson.objects.get( orcid=self.cleaned_data['orcid'] ) except ObjectDoesNotExist: # Non-existing PHysicalPerson so it", "None) super().__init__(*args, **kwargs) orcid_initial = first_name_initial = surname_initial = organisations_initial = group_initial =", "of the group(s) or laboratories to which you are affiliated for the purposes", "ValidationError from django.forms import Form from phonenumber_field.formfields import PhoneNumberField from project_core.models import PersonTitle,", "), Div( Div('group', css_class='col-12'), css_class='row' ), ) if self._only_basic_fields: # The Layout always", "point we don't do further cleaning: # the user needs to fix the", "it in <a href=\"https://orcid.org/login\">ORCID</a>') self.fields['surname'] = forms.CharField(initial=surname_initial, label='Surname(s)', help_text='Your surname is populated from", "is populated from your ORCID record. If you would like to change it", "ORCID iD (e.g.: 0000-0002-1825-0097).<br>' 'Please create an <a href=\"https://orcid.org\">ORCID iD</a> if you do", "in self.fields.items(): if field_str not in cd or not cd[field_str]: # It needs", "Like before, should this be in the model and consolidated? # TODO: discuss", "format) month, year = self.cleaned_data['phd_date'].split('-') month_int = int(month) if month_int < 1 or", "forms.CharField(initial=group_initial, help_text='Please type the names of the group(s) or laboratories to which you", "to replace phones and handling of multiple phones phone_contact = person_position.main_phone_model() if phone_contact", "= self.person_position.career_stage gender_initial = self.person_position.person.gender email_initial = self.person_position.main_email() phone_initial = self.person_position.main_phone() if self.person_position.person.phd_date:", "unit if unknown', 'first_name': 'Populated from ORCID iD', 'surname': 'Populated from ORCID iD',", "validators=[RegexValidator(regex='^[0-9]{2}-[0-9]{4}$', message='Format is mm-yyyy', code='Invalid format')]) self.fields['organisation_names'] = organisations_name_autocomplete(initial=organisations_initial, help_text='Please select the organisation(s)", "which you are affiliated for the purposes of this proposal', label='Group / lab',", "= self.cleaned_data person_position = create_person_position(cd['orcid'], cd['first_name'], cd['surname'], gender=cd.get('gender', None), phd_date=cd.get('phd_date', None), academic_title=cd.get('academic_title'), group=cd.get('group'),", "yyyy-mm (validator in the model) but it's visualized as mm-yyyy phd_date_parts = self.person_position.person.phd_date.split('-')", "have one')) self.fields['academic_title'] = forms.ModelChoiceField(queryset=PersonTitle.objects.all(), initial=academic_title_initial, required=not self._only_basic_fields) self.fields['first_name'] = forms.CharField(initial=first_name_initial, label='First name(s)',", "The Layout always includes all the fields. Now it's better to remove the", "have any PersonPositions associated return [] person_positions = PersonPosition.objects.filter( person=physical_person, academic_title=self.cleaned_data['academic_title'], group=self.cleaned_data['group'], career_stage=self.cleaned_data['career_stage']", "ORCID iD is filled in: other fields are mandatory if self._all_fields_are_optional and cd['orcid']:", "saving and submitting your application form. This email address will also be used", "= organisations_initial = group_initial = \\ academic_title_initial = email_initial = phone_initial = gender_initial", "get_person_positions(self): \"\"\" Matches and returns the person_position from the database. \"\"\" try: physical_person", "def _delete_field_from_layout(container, field_str): for item in container: if type(item) == Div: PersonForm._delete_field_from_layout(item, field_str)", "name is populated from your ORCID record. If you would like to change", "you were awarded, or expect to be awarded your PhD (use the format", "expect to be awarded your PhD (use the format mm-yyyy)', required=False, widget=XDSoftYearMonthPickerInput, validators=[RegexValidator(regex='^[0-9]{2}-[0-9]{4}$',", "DB it's always yyyy-mm because the model has this validator (consistent with general", "= forms.ModelChoiceField(queryset=Gender.objects.all(), initial=gender_initial) if career_stage_queryset is None: career_stage_queryset = CareerStage.objects.all().order_by('list_order', 'name') self.fields['career_stage'] =", "communication purposes') self.fields['phone'] = PhoneNumberField(initial=phone_initial, help_text='Phone number e.g.: +41222222222 . Extension can be", "cd[field_str]: self.add_error(field_str, 'It cannot contain any information if ORCiD ID is empty') return", "them from the self.helper.layout used_help_texts = [] for field_str, field in self.fields.items(): if", "the model has this validator (consistent with general mysql date format) month, year", "if self.person_position.person.phd_date: # In the database is always saved as yyyy-mm (validator in", "with general mysql date format) month, year = self.cleaned_data['phd_date'].split('-') month_int = int(month) if", "Layout always includes all the fields. Now it's better to remove the fields", "self.person_position.person.gender email_initial = self.person_position.main_email() phone_initial = self.person_position.main_phone() if self.person_position.person.phd_date: # In the database", "def clean(self): cd = super().clean() if self.errors: # If there are errors they", "= super().clean() if self.errors: # If there are errors they might be related", "self._all_fields_are_optional: field.required = False if field_str in help_texts: self.fields[field_str].help_text = help_texts[field_str] used_help_texts.append(field_str) if", "# It has the correct format mm-yyyy because the field has a validator", "If adding fields here: see below to remove them from the self.helper.layout used_help_texts", "FormHelper(self) self.helper.form_tag = False self.helper.layout = Layout( orcid_div('orcid'), Div( Div('first_name', css_class='col-4'), Div('surname', css_class='col-4'),", "get_field_information from ..utils.utils import create_person_position from ..widgets import XDSoftYearMonthPickerInput HELP_TEXTS_HEAD_OF_YOUR_RESEARCH = {'orcid': 'Enter", "import XDSoftYearMonthPickerInput HELP_TEXTS_HEAD_OF_YOUR_RESEARCH = {'orcid': 'Enter the ORCID iD (e.g.: 0000-0002-1825-0097).<br>' 'Please ask", "person_position from the database. \"\"\" try: physical_person = PhysicalPerson.objects.get( orcid=self.cleaned_data['orcid'] ) except ObjectDoesNotExist:", "(e.g.: 0000-0002-1825-0097).<br>' 'Please create an <a href=\"https://orcid.org\">ORCID iD</a> if you do not already", "email_contact.save() if cd.get('phone', None): # Like before, should this be in the model", "and cd[field_str]: self.add_error(field_str, 'It cannot contain any information if ORCiD ID is empty')", "enter the date on which you were awarded, or expect to be awarded", "organisations_initial = self.person_position.organisation_names.all() group_initial = self.person_position.group academic_title_initial = self.person_position.academic_title career_stage_initial = self.person_position.career_stage gender_initial", "if month_int < 1 or month_int > 12: raise ValidationError(f'Invalid month: {month}', code='invalid',", "= forms.ModelChoiceField(queryset=PersonTitle.objects.all(), initial=academic_title_initial, required=not self._only_basic_fields) self.fields['first_name'] = forms.CharField(initial=first_name_initial, label='First name(s)', help_text='Your name is", "cd and have a value self.add_error(field_str, 'Mandatory field if ORCiD iD is filled", "record. If you would like to change it please amend it in <a", "the date on which you were awarded, or expect to be awarded your", "cd or not cd[field_str]: # It needs to be in cd and have", "item in container: if type(item) == Div: PersonForm._delete_field_from_layout(item, field_str) elif type(item) == str", "ask head of research unit if unknown', 'first_name': 'Populated from ORCID iD', 'surname':", "= person_position.main_email_model() if email_contact is None: email_contact = Contact() email_contact.method = Contact.EMAIL email_contact.person_position", "added with xNN at the end') self.fields['phd_date'] = forms.CharField(initial=phd_date_initial, label='Date of PhD', help_text='Where", "type the names of the group(s) or laboratories to which you are affiliated", "you would like to change it please amend it in <a href=\"https://orcid.org/login\">ORCID</a>') field_set_read_only([self.fields['first_name'],", "css_class='col-4'), css_class='row' ), Div( Div('email', css_class='col-6'), Div('phone', css_class='col-6'), css_class='row' ), Div( Div('organisation_names', css_class='col-12'),", "clean_phd_date(self): if 'phd_date' not in self.cleaned_data: return None if self.cleaned_data['phd_date'] == '': return", "be added with xNN at the end') self.fields['phd_date'] = forms.CharField(initial=phd_date_initial, label='Date of PhD',", "group=cd.get('group'), career_stage=cd.get('career_stage'), organisation_names=cd.get('organisation_names', [])) if cd.get('email', None): # Should this be in the", "from project_core.models import PersonTitle, Gender, PhysicalPerson, PersonPosition, Contact, CareerStage from project_core.utils.orcid import orcid_div,", "to be awarded your PhD (use the format mm-yyyy)', required=False, widget=XDSoftYearMonthPickerInput, validators=[RegexValidator(regex='^[0-9]{2}-[0-9]{4}$', message='Format", "def get_person_positions(self): \"\"\" Matches and returns the person_position from the database. \"\"\" try:", "= CareerStage.objects.all().order_by('list_order', 'name') self.fields['career_stage'] = forms.ModelChoiceField( queryset=career_stage_queryset, initial=career_stage_initial) self.fields['email'] = forms.EmailField(initial=email_initial, help_text='Please write", "self.fields['phone'] = PhoneNumberField(initial=phone_initial, help_text='Phone number e.g.: +41222222222 . Extension can be added with", "import Layout, Div from django import forms from django.core.exceptions import ObjectDoesNotExist from django.core.validators", "PersonForm._delete_field_from_layout(self.helper.layout.fields, 'gender') PersonForm._delete_field_from_layout(self.helper.layout.fields, 'career_stage') PersonForm._delete_field_from_layout(self.helper.layout.fields, 'email') PersonForm._delete_field_from_layout(self.helper.layout.fields, 'phone') PersonForm._delete_field_from_layout(self.helper.layout.fields, 'phd_date') PersonForm._delete_field_from_layout(self.helper.layout.fields, 'organisation_names') PersonForm._delete_field_from_layout(self.helper.layout.fields,", "a value self.add_error(field_str, 'Mandatory field if ORCiD iD is filled in') if self._all_fields_are_optional", "is None: phone_contact = Contact() phone_contact.method = Contact.PHONE phone_contact.person_position = person_position phone_contact.entry =", "field_set_read_only from .utils import organisations_name_autocomplete, get_field_information from ..utils.utils import create_person_position from ..widgets import", "forms.CharField(initial=surname_initial, label='Surname(s)', help_text='Your surname is populated from your ORCID record. If you would", "type(item) == Div: PersonForm._delete_field_from_layout(item, field_str) elif type(item) == str and item == field_str:", "orcid_div('orcid'), Div( Div('first_name', css_class='col-4'), Div('surname', css_class='col-4'), Div('academic_title', css_class='col-2'), Div('gender', css_class='col-2'), css_class='row' ), Div(", "field if ORCiD iD is filled in') if self._all_fields_are_optional and not cd['orcid']: for", "field_str, field in self.fields.items(): if self._all_fields_are_optional: field.required = False if field_str in help_texts:", "month}) return f'{year}-{month}' def clean(self): cd = super().clean() if self.errors: # If there", "names of the group(s) or laboratories to which you are affiliated for the", "academic_title_initial = self.person_position.academic_title career_stage_initial = self.person_position.career_stage gender_initial = self.person_position.person.gender email_initial = self.person_position.main_email() phone_initial", "1 or month_int > 12: raise ValidationError(f'Invalid month: {month}', code='invalid', params={'value': month}) return", "= forms.CharField(initial=phd_date_initial, label='Date of PhD', help_text='Where applicable, please enter the date on which", "awarded, or expect to be awarded your PhD (use the format mm-yyyy)', required=False,", "cd['orcid'] doesn't exist. At this point we don't do further cleaning: # the", "PersonPositions associated return [] person_positions = PersonPosition.objects.filter( person=physical_person, academic_title=self.cleaned_data['academic_title'], group=self.cleaned_data['group'], career_stage=self.cleaned_data['career_stage'] ) return", "form. This email address will also be used for communication purposes') self.fields['phone'] =", "the form before further cleaning is done. return cd # If ORCID iD", "# ORCID iD, so cd['orcid'] doesn't exist. At this point we don't do", "populated from your ORCID record. If you would like to change it please", "if ORCiD ID is empty') return cd def save_person(self): cd = self.cleaned_data person_position", "cd['first_name'], cd['surname'], gender=cd.get('gender', None), phd_date=cd.get('phd_date', None), academic_title=cd.get('academic_title'), group=cd.get('group'), career_stage=cd.get('career_stage'), organisation_names=cd.get('organisation_names', [])) if cd.get('email',", "receive a confirmation email when saving and submitting your application form. This email", "= self.person_position.organisation_names.all() group_initial = self.person_position.group academic_title_initial = self.person_position.academic_title career_stage_initial = self.person_position.career_stage gender_initial =", "in the form before further cleaning is done. return cd # If ORCID", "= phone_initial = gender_initial = career_stage_initial = phd_date_initial = None if self.person_position: orcid_initial", "or expect to be awarded your PhD (use the format mm-yyyy)', required=False, widget=XDSoftYearMonthPickerInput,", "field in self.fields.items(): if self._all_fields_are_optional: field.required = False if field_str in help_texts: self.fields[field_str].help_text", "= self.person_position.group academic_title_initial = self.person_position.academic_title career_stage_initial = self.person_position.career_stage gender_initial = self.person_position.person.gender email_initial =", "database. \"\"\" try: physical_person = PhysicalPerson.objects.get( orcid=self.cleaned_data['orcid'] ) except ObjectDoesNotExist: # Non-existing PHysicalPerson", "cd = self.cleaned_data person_position = create_person_position(cd['orcid'], cd['first_name'], cd['surname'], gender=cd.get('gender', None), phd_date=cd.get('phd_date', None), academic_title=cd.get('academic_title'),", "were awarded, or expect to be awarded your PhD (use the format mm-yyyy)',", "to avoid django-crispy-forms warnings (not fatal) PersonForm._delete_field_from_layout(self.helper.layout.fields, 'gender') PersonForm._delete_field_from_layout(self.helper.layout.fields, 'career_stage') PersonForm._delete_field_from_layout(self.helper.layout.fields, 'email') PersonForm._delete_field_from_layout(self.helper.layout.fields,", "> 12: raise ValidationError(f'Invalid month: {month}', code='invalid', params={'value': month}) return f'{year}-{month}' def clean(self):", "import FormHelper from crispy_forms.layout import Layout, Div from django import forms from django.core.exceptions", "f'{phd_date_parts[1]}-{phd_date_parts[0]}' self.fields['orcid'] = forms.CharField(initial=orcid_initial, **get_field_information(PhysicalPerson, 'orcid', label='ORCID iD', required=True, help_text='Enter your ORCID iD", "cd['surname'], gender=cd.get('gender', None), phd_date=cd.get('phd_date', None), academic_title=cd.get('academic_title'), group=cd.get('group'), career_stage=cd.get('career_stage'), organisation_names=cd.get('organisation_names', [])) if cd.get('email', None):", "Now it's better to remove the fields that don't exist # to avoid", "in the model? # TODO: discuss how to replace emails email_contact = person_position.main_email_model()", "discuss how to replace phones and handling of multiple phones phone_contact = person_position.main_phone_model()", "as mm-yyyy phd_date_parts = self.person_position.person.phd_date.split('-') phd_date_initial = f'{phd_date_parts[1]}-{phd_date_parts[0]}' self.fields['orcid'] = forms.CharField(initial=orcid_initial, **get_field_information(PhysicalPerson, 'orcid',", "PHysicalPerson so it doesn't have any PersonPositions associated return [] person_positions = PersonPosition.objects.filter(", "avoid django-crispy-forms warnings (not fatal) PersonForm._delete_field_from_layout(self.helper.layout.fields, 'gender') PersonForm._delete_field_from_layout(self.helper.layout.fields, 'career_stage') PersonForm._delete_field_from_layout(self.helper.layout.fields, 'email') PersonForm._delete_field_from_layout(self.helper.layout.fields, 'phone')", "address. You will receive a confirmation email when saving and submitting your application", "/ lab', required=False) # If adding fields here: see below to remove them", "crispy_forms.helper import FormHelper from crispy_forms.layout import Layout, Div from django import forms from", "in the model) but it's visualized as mm-yyyy phd_date_parts = self.person_position.person.phd_date.split('-') phd_date_initial =", "orcid=self.cleaned_data['orcid'] ) except ObjectDoesNotExist: # Non-existing PHysicalPerson so it doesn't have any PersonPositions", "mm-yyyy because the field has a validator # In the DB it's always", "is done. return cd # If ORCID iD is filled in: other fields", "= FormHelper(self) self.helper.form_tag = False self.helper.layout = Layout( orcid_div('orcid'), Div( Div('first_name', css_class='col-4'), Div('surname',", "if type(item) == Div: PersonForm._delete_field_from_layout(item, field_str) elif type(item) == str and item ==", "# In the database is always saved as yyyy-mm (validator in the model)", "mm-yyyy phd_date_parts = self.person_position.person.phd_date.split('-') phd_date_initial = f'{phd_date_parts[1]}-{phd_date_parts[0]}' self.fields['orcid'] = forms.CharField(initial=orcid_initial, **get_field_information(PhysicalPerson, 'orcid', label='ORCID", "len(help_texts): print('Unused help texts:', help_texts.keys() - used_help_texts) self.helper = FormHelper(self) self.helper.form_tag = False", "initial=career_stage_initial) self.fields['email'] = forms.EmailField(initial=email_initial, help_text='Please write a valid email address. You will receive", "css_class='col-12'), css_class='row' ), ) if self._only_basic_fields: # The Layout always includes all the", "the ORCID iD (e.g.: 0000-0002-1825-0097).<br>' 'Please ask head of research unit if unknown',", "if career_stage_queryset is None: career_stage_queryset = CareerStage.objects.all().order_by('list_order', 'name') self.fields['career_stage'] = forms.ModelChoiceField( queryset=career_stage_queryset, initial=career_stage_initial)", "TODO: discuss how to replace emails email_contact = person_position.main_email_model() if email_contact is None:", "fix the errors in the form before further cleaning is done. return cd", "PersonForm._delete_field_from_layout(item, field_str) elif type(item) == str and item == field_str: container.remove(field_str) def get_person_positions(self):", "has this validator (consistent with general mysql date format) month, year = self.cleaned_data['phd_date'].split('-')", "label='ORCID iD', required=True, help_text='Enter your ORCID iD (e.g.: 0000-0002-1825-0097).<br>' 'Please create an <a", "= cd.get('email') email_contact.save() if cd.get('phone', None): # Like before, should this be in", "iD (e.g.: 0000-0002-1825-0097).<br>' 'Please ask head of research unit if unknown', 'first_name': 'Populated", "Div('gender', css_class='col-2'), css_class='row' ), Div( Div('career_stage', css_class='col-8'), Div('phd_date', css_class='col-4'), css_class='row' ), Div( Div('email',", "fatal) PersonForm._delete_field_from_layout(self.helper.layout.fields, 'gender') PersonForm._delete_field_from_layout(self.helper.layout.fields, 'career_stage') PersonForm._delete_field_from_layout(self.helper.layout.fields, 'email') PersonForm._delete_field_from_layout(self.helper.layout.fields, 'phone') PersonForm._delete_field_from_layout(self.helper.layout.fields, 'phd_date') PersonForm._delete_field_from_layout(self.helper.layout.fields, 'organisation_names')", "'academic_title': 'Mandatory if ORCID iD is entered'} class PersonForm(Form): def __init__(self, *args, **kwargs):", "any PersonPositions associated return [] person_positions = PersonPosition.objects.filter( person=physical_person, academic_title=self.cleaned_data['academic_title'], group=self.cleaned_data['group'], career_stage=self.cleaned_data['career_stage'] )", "career_stage_initial = phd_date_initial = None if self.person_position: orcid_initial = self.person_position.person.orcid first_name_initial = self.person_position.person.first_name", "email_initial = phone_initial = gender_initial = career_stage_initial = phd_date_initial = None if self.person_position:", "academic_title=cd.get('academic_title'), group=cd.get('group'), career_stage=cd.get('career_stage'), organisation_names=cd.get('organisation_names', [])) if cd.get('email', None): # Should this be in", "= person_position.main_phone_model() if phone_contact is None: phone_contact = Contact() phone_contact.method = Contact.PHONE phone_contact.person_position", "(validator in the model) but it's visualized as mm-yyyy phd_date_parts = self.person_position.person.phd_date.split('-') phd_date_initial", "= False if field_str in help_texts: self.fields[field_str].help_text = help_texts[field_str] used_help_texts.append(field_str) if len(used_help_texts) !=", "css_class='col-8'), Div('phd_date', css_class='col-4'), css_class='row' ), Div( Div('email', css_class='col-6'), Div('phone', css_class='col-6'), css_class='row' ), Div(", "css_class='col-6'), Div('phone', css_class='col-6'), css_class='row' ), Div( Div('organisation_names', css_class='col-12'), css_class='row' ), Div( Div('group', css_class='col-12'),", "# Should this be in the model? # TODO: discuss how to replace", "if cd.get('email', None): # Should this be in the model? # TODO: discuss", "format mm-yyyy)', required=False, widget=XDSoftYearMonthPickerInput, validators=[RegexValidator(regex='^[0-9]{2}-[0-9]{4}$', message='Format is mm-yyyy', code='Invalid format')]) self.fields['organisation_names'] = organisations_name_autocomplete(initial=organisations_initial,", "= [] for field_str, field in self.fields.items(): if self._all_fields_are_optional: field.required = False if", "ORCID record. If you would like to change it please amend it in", "self.helper.form_tag = False self.helper.layout = Layout( orcid_div('orcid'), Div( Div('first_name', css_class='col-4'), Div('surname', css_class='col-4'), Div('academic_title',", "= forms.EmailField(initial=email_initial, help_text='Please write a valid email address. You will receive a confirmation", "css_class='col-4'), Div('academic_title', css_class='col-2'), Div('gender', css_class='col-2'), css_class='row' ), Div( Div('career_stage', css_class='col-8'), Div('phd_date', css_class='col-4'), css_class='row'", "from ORCID iD', 'academic_title': 'Mandatory if ORCID iD is entered'} class PersonForm(Form): def", "has the correct format mm-yyyy because the field has a validator # In", "for field_str, field in self.fields.items(): if field_str not in cd or not cd[field_str]:", "how to replace phones and handling of multiple phones phone_contact = person_position.main_phone_model() if", "required=not self._only_basic_fields) self.fields['first_name'] = forms.CharField(initial=first_name_initial, label='First name(s)', help_text='Your name is populated from your", "Div('email', css_class='col-6'), Div('phone', css_class='col-6'), css_class='row' ), Div( Div('organisation_names', css_class='col-12'), css_class='row' ), Div( Div('group',", "in cd and have a value self.add_error(field_str, 'Mandatory field if ORCiD iD is", "purposes of this proposal', label='Group / lab', required=False) # If adding fields here:", "CareerStage from project_core.utils.orcid import orcid_div, field_set_read_only from .utils import organisations_name_autocomplete, get_field_information from ..utils.utils", "self.errors: # If there are errors they might be related to orcid (e.g.", "href=\"https://orcid.org\">ORCID iD</a> if you do not already have one')) self.fields['academic_title'] = forms.ModelChoiceField(queryset=PersonTitle.objects.all(), initial=academic_title_initial,", "create_person_position from ..widgets import XDSoftYearMonthPickerInput HELP_TEXTS_HEAD_OF_YOUR_RESEARCH = {'orcid': 'Enter the ORCID iD (e.g.:", "css_class='col-4'), Div('surname', css_class='col-4'), Div('academic_title', css_class='col-2'), Div('gender', css_class='col-2'), css_class='row' ), Div( Div('career_stage', css_class='col-8'), Div('phd_date',", "self.person_position.main_phone() if self.person_position.person.phd_date: # In the database is always saved as yyyy-mm (validator", "PersonTitle, Gender, PhysicalPerson, PersonPosition, Contact, CareerStage from project_core.utils.orcid import orcid_div, field_set_read_only from .utils", "= kwargs.pop('person_position', None) self._only_basic_fields = kwargs.pop('only_basic_fields', False) self._all_fields_are_optional = kwargs.pop('all_fields_are_optional', False) help_texts =", "CareerStage.objects.all().order_by('list_order', 'name') self.fields['career_stage'] = forms.ModelChoiceField( queryset=career_stage_queryset, initial=career_stage_initial) self.fields['email'] = forms.EmailField(initial=email_initial, help_text='Please write a", "Div( Div('career_stage', css_class='col-8'), Div('phd_date', css_class='col-4'), css_class='row' ), Div( Div('email', css_class='col-6'), Div('phone', css_class='col-6'), css_class='row'", "month: {month}', code='invalid', params={'value': month}) return f'{year}-{month}' def clean(self): cd = super().clean() if", "), Div( Div('organisation_names', css_class='col-12'), css_class='row' ), Div( Div('group', css_class='col-12'), css_class='row' ), ) if", "'': return None # It has the correct format mm-yyyy because the field", "so it doesn't have any PersonPositions associated return [] person_positions = PersonPosition.objects.filter( person=physical_person,", "css_class='row' ), Div( Div('email', css_class='col-6'), Div('phone', css_class='col-6'), css_class='row' ), Div( Div('organisation_names', css_class='col-12'), css_class='row'", "below to remove them from the self.helper.layout used_help_texts = [] for field_str, field", "field.required = False if field_str in help_texts: self.fields[field_str].help_text = help_texts[field_str] used_help_texts.append(field_str) if len(used_help_texts)", "= self.person_position.main_email() phone_initial = self.person_position.main_phone() if self.person_position.person.phd_date: # In the database is always", "to replace emails email_contact = person_position.main_email_model() if email_contact is None: email_contact = Contact()", "# Like before, should this be in the model and consolidated? # TODO:", "if field_str in help_texts: self.fields[field_str].help_text = help_texts[field_str] used_help_texts.append(field_str) if len(used_help_texts) != len(help_texts): print('Unused", "if you do not already have one')) self.fields['academic_title'] = forms.ModelChoiceField(queryset=PersonTitle.objects.all(), initial=academic_title_initial, required=not self._only_basic_fields)", "if self._all_fields_are_optional: field.required = False if field_str in help_texts: self.fields[field_str].help_text = help_texts[field_str] used_help_texts.append(field_str)", "), Div( Div('email', css_class='col-6'), Div('phone', css_class='col-6'), css_class='row' ), Div( Div('organisation_names', css_class='col-12'), css_class='row' ),", "month, year = self.cleaned_data['phd_date'].split('-') month_int = int(month) if month_int < 1 or month_int", "raise ValidationError(f'Invalid month: {month}', code='invalid', params={'value': month}) return f'{year}-{month}' def clean(self): cd =", "in self.fields.items(): if field_str in cd and cd[field_str]: self.add_error(field_str, 'It cannot contain any", ".utils import organisations_name_autocomplete, get_field_information from ..utils.utils import create_person_position from ..widgets import XDSoftYearMonthPickerInput HELP_TEXTS_HEAD_OF_YOUR_RESEARCH", "iD', required=True, help_text='Enter your ORCID iD (e.g.: 0000-0002-1825-0097).<br>' 'Please create an <a href=\"https://orcid.org\">ORCID", "be in the model? # TODO: discuss how to replace emails email_contact =", "= Contact.EMAIL email_contact.person_position = person_position email_contact.entry = cd.get('email') email_contact.save() if cd.get('phone', None): #", ") return person_positions def clean_phd_date(self): if 'phd_date' not in self.cleaned_data: return None if", "project_core.models import PersonTitle, Gender, PhysicalPerson, PersonPosition, Contact, CareerStage from project_core.utils.orcid import orcid_div, field_set_read_only", "self.person_position.career_stage gender_initial = self.person_position.person.gender email_initial = self.person_position.main_email() phone_initial = self.person_position.main_phone() if self.person_position.person.phd_date: #", "self.cleaned_data: return None if self.cleaned_data['phd_date'] == '': return None # It has the", "model) but it's visualized as mm-yyyy phd_date_parts = self.person_position.person.phd_date.split('-') phd_date_initial = f'{phd_date_parts[1]}-{phd_date_parts[0]}' self.fields['orcid']", "from django.forms import Form from phonenumber_field.formfields import PhoneNumberField from project_core.models import PersonTitle, Gender,", "be in cd and have a value self.add_error(field_str, 'Mandatory field if ORCiD iD", "# If adding fields here: see below to remove them from the self.helper.layout", "it please amend it in <a href=\"https://orcid.org/login\">ORCID</a>') field_set_read_only([self.fields['first_name'], self.fields['surname']]) if self._only_basic_fields == False:", "# If there are errors they might be related to orcid (e.g. using", "to orcid (e.g. using the example # ORCID iD, so cd['orcid'] doesn't exist.", "mandatory if self._all_fields_are_optional and cd['orcid']: for field_str, field in self.fields.items(): if field_str not", "None) self._only_basic_fields = kwargs.pop('only_basic_fields', False) self._all_fields_are_optional = kwargs.pop('all_fields_are_optional', False) help_texts = kwargs.pop('help_texts', {})", "in <a href=\"https://orcid.org/login\">ORCID</a>') field_set_read_only([self.fields['first_name'], self.fields['surname']]) if self._only_basic_fields == False: self.fields['gender'] = forms.ModelChoiceField(queryset=Gender.objects.all(), initial=gender_initial)", "HELP_TEXTS_HEAD_OF_YOUR_RESEARCH = {'orcid': 'Enter the ORCID iD (e.g.: 0000-0002-1825-0097).<br>' 'Please ask head of", "= f'{phd_date_parts[1]}-{phd_date_parts[0]}' self.fields['orcid'] = forms.CharField(initial=orcid_initial, **get_field_information(PhysicalPerson, 'orcid', label='ORCID iD', required=True, help_text='Enter your ORCID", "self.helper.layout = Layout( orcid_div('orcid'), Div( Div('first_name', css_class='col-4'), Div('surname', css_class='col-4'), Div('academic_title', css_class='col-2'), Div('gender', css_class='col-2'),", "multiple phones phone_contact = person_position.main_phone_model() if phone_contact is None: phone_contact = Contact() phone_contact.method", "would like to change it please amend it in <a href=\"https://orcid.org/login\">ORCID</a>') self.fields['surname'] =", "'career_stage') PersonForm._delete_field_from_layout(self.helper.layout.fields, 'email') PersonForm._delete_field_from_layout(self.helper.layout.fields, 'phone') PersonForm._delete_field_from_layout(self.helper.layout.fields, 'phd_date') PersonForm._delete_field_from_layout(self.helper.layout.fields, 'organisation_names') PersonForm._delete_field_from_layout(self.helper.layout.fields, 'group') @staticmethod def", "clean(self): cd = super().clean() if self.errors: # If there are errors they might", "Div( Div('organisation_names', css_class='col-12'), css_class='row' ), Div( Div('group', css_class='col-12'), css_class='row' ), ) if self._only_basic_fields:", "it please amend it in <a href=\"https://orcid.org/login\">ORCID</a>') self.fields['surname'] = forms.CharField(initial=surname_initial, label='Surname(s)', help_text='Your surname", "self.add_error(field_str, 'It cannot contain any information if ORCiD ID is empty') return cd", "person_position.main_email_model() if email_contact is None: email_contact = Contact() email_contact.method = Contact.EMAIL email_contact.person_position =", "import RegexValidator, ValidationError from django.forms import Form from phonenumber_field.formfields import PhoneNumberField from project_core.models", "iD', 'surname': 'Populated from ORCID iD', 'academic_title': 'Mandatory if ORCID iD is entered'}", "'orcid', label='ORCID iD', required=True, help_text='Enter your ORCID iD (e.g.: 0000-0002-1825-0097).<br>' 'Please create an", "always includes all the fields. Now it's better to remove the fields that", "handling of multiple phones phone_contact = person_position.main_phone_model() if phone_contact is None: phone_contact =", "replace emails email_contact = person_position.main_email_model() if email_contact is None: email_contact = Contact() email_contact.method", "Matches and returns the person_position from the database. \"\"\" try: physical_person = PhysicalPerson.objects.get(", "forms.EmailField(initial=email_initial, help_text='Please write a valid email address. You will receive a confirmation email", "cd['orcid']: for field_str, field in self.fields.items(): if field_str in cd and cd[field_str]: self.add_error(field_str,", "forms.CharField(initial=first_name_initial, label='First name(s)', help_text='Your name is populated from your ORCID record. If you", "required=True, help_text='Enter your ORCID iD (e.g.: 0000-0002-1825-0097).<br>' 'Please create an <a href=\"https://orcid.org\">ORCID iD</a>", "= self.person_position.person.phd_date.split('-') phd_date_initial = f'{phd_date_parts[1]}-{phd_date_parts[0]}' self.fields['orcid'] = forms.CharField(initial=orcid_initial, **get_field_information(PhysicalPerson, 'orcid', label='ORCID iD', required=True,", "a confirmation email when saving and submitting your application form. This email address", "= self.person_position.person.gender email_initial = self.person_position.main_email() phone_initial = self.person_position.main_phone() if self.person_position.person.phd_date: # In the", "field_str in cd and cd[field_str]: self.add_error(field_str, 'It cannot contain any information if ORCiD", "on which you were awarded, or expect to be awarded your PhD (use", "help_text='Phone number e.g.: +41222222222 . Extension can be added with xNN at the", "is empty') return cd def save_person(self): cd = self.cleaned_data person_position = create_person_position(cd['orcid'], cd['first_name'],", "validator (consistent with general mysql date format) month, year = self.cleaned_data['phd_date'].split('-') month_int =", "forms.CharField(initial=phd_date_initial, label='Date of PhD', help_text='Where applicable, please enter the date on which you", "PhysicalPerson, PersonPosition, Contact, CareerStage from project_core.utils.orcid import orcid_div, field_set_read_only from .utils import organisations_name_autocomplete,", "organisation_names=cd.get('organisation_names', [])) if cd.get('email', None): # Should this be in the model? #", "'surname': 'Populated from ORCID iD', 'academic_title': 'Mandatory if ORCID iD is entered'} class", "if ORCID iD is entered'} class PersonForm(Form): def __init__(self, *args, **kwargs): self.person_position =", "PhD (use the format mm-yyyy)', required=False, widget=XDSoftYearMonthPickerInput, validators=[RegexValidator(regex='^[0-9]{2}-[0-9]{4}$', message='Format is mm-yyyy', code='Invalid format')])", "super().clean() if self.errors: # If there are errors they might be related to", "forms.CharField(initial=orcid_initial, **get_field_information(PhysicalPerson, 'orcid', label='ORCID iD', required=True, help_text='Enter your ORCID iD (e.g.: 0000-0002-1825-0097).<br>' 'Please", "select the organisation(s) to which you are affiliated for the purposes of this", "self.fields['surname'] = forms.CharField(initial=surname_initial, label='Surname(s)', help_text='Your surname is populated from your ORCID record. If", "orcid (e.g. using the example # ORCID iD, so cd['orcid'] doesn't exist. At", "self.person_position.person.phd_date: # In the database is always saved as yyyy-mm (validator in the", "help_text='Enter your ORCID iD (e.g.: 0000-0002-1825-0097).<br>' 'Please create an <a href=\"https://orcid.org\">ORCID iD</a> if", "item == field_str: container.remove(field_str) def get_person_positions(self): \"\"\" Matches and returns the person_position from", "PersonForm._delete_field_from_layout(self.helper.layout.fields, 'phone') PersonForm._delete_field_from_layout(self.helper.layout.fields, 'phd_date') PersonForm._delete_field_from_layout(self.helper.layout.fields, 'organisation_names') PersonForm._delete_field_from_layout(self.helper.layout.fields, 'group') @staticmethod def _delete_field_from_layout(container, field_str): for", "user needs to fix the errors in the form before further cleaning is", "None: email_contact = Contact() email_contact.method = Contact.EMAIL email_contact.person_position = person_position email_contact.entry = cd.get('email')", "email_contact.person_position = person_position email_contact.entry = cd.get('email') email_contact.save() if cd.get('phone', None): # Like before,", "container: if type(item) == Div: PersonForm._delete_field_from_layout(item, field_str) elif type(item) == str and item", "= int(month) if month_int < 1 or month_int > 12: raise ValidationError(f'Invalid month:", "model has this validator (consistent with general mysql date format) month, year =", "if self._all_fields_are_optional and not cd['orcid']: for field_str, field in self.fields.items(): if field_str in", "== field_str: container.remove(field_str) def get_person_positions(self): \"\"\" Matches and returns the person_position from the", "is mm-yyyy', code='Invalid format')]) self.fields['organisation_names'] = organisations_name_autocomplete(initial=organisations_initial, help_text='Please select the organisation(s) to which", "= None if self.person_position: orcid_initial = self.person_position.person.orcid first_name_initial = self.person_position.person.first_name surname_initial = self.person_position.person.surname", "field has a validator # In the DB it's always yyyy-mm because the", "database is always saved as yyyy-mm (validator in the model) but it's visualized", "= False self.helper.layout = Layout( orcid_div('orcid'), Div( Div('first_name', css_class='col-4'), Div('surname', css_class='col-4'), Div('academic_title', css_class='col-2'),", "= PhysicalPerson.objects.get( orcid=self.cleaned_data['orcid'] ) except ObjectDoesNotExist: # Non-existing PHysicalPerson so it doesn't have", "kwargs.pop('person_position', None) self._only_basic_fields = kwargs.pop('only_basic_fields', False) self._all_fields_are_optional = kwargs.pop('all_fields_are_optional', False) help_texts = kwargs.pop('help_texts',", "saved as yyyy-mm (validator in the model) but it's visualized as mm-yyyy phd_date_parts", "phone_initial = self.person_position.main_phone() if self.person_position.person.phd_date: # In the database is always saved as", "errors in the form before further cleaning is done. return cd # If", "we don't do further cleaning: # the user needs to fix the errors", "if cd.get('phone', None): # Like before, should this be in the model and", "Form from phonenumber_field.formfields import PhoneNumberField from project_core.models import PersonTitle, Gender, PhysicalPerson, PersonPosition, Contact,", "at the end') self.fields['phd_date'] = forms.CharField(initial=phd_date_initial, label='Date of PhD', help_text='Where applicable, please enter", "self.person_position = kwargs.pop('person_position', None) self._only_basic_fields = kwargs.pop('only_basic_fields', False) self._all_fields_are_optional = kwargs.pop('all_fields_are_optional', False) help_texts", "href=\"https://orcid.org/login\">ORCID</a>') self.fields['surname'] = forms.CharField(initial=surname_initial, label='Surname(s)', help_text='Your surname is populated from your ORCID record.", "person_position email_contact.entry = cd.get('email') email_contact.save() if cd.get('phone', None): # Like before, should this", "iD, so cd['orcid'] doesn't exist. At this point we don't do further cleaning:", "from crispy_forms.helper import FormHelper from crispy_forms.layout import Layout, Div from django import forms", "from project_core.utils.orcid import orcid_div, field_set_read_only from .utils import organisations_name_autocomplete, get_field_information from ..utils.utils import", "consolidated? # TODO: discuss how to replace phones and handling of multiple phones", "affiliated for the purposes of this proposal', label='Group / lab', required=False) # If", "Layout( orcid_div('orcid'), Div( Div('first_name', css_class='col-4'), Div('surname', css_class='col-4'), Div('academic_title', css_class='col-2'), Div('gender', css_class='col-2'), css_class='row' ),", "and returns the person_position from the database. \"\"\" try: physical_person = PhysicalPerson.objects.get( orcid=self.cleaned_data['orcid']", "Div( Div('group', css_class='col-12'), css_class='row' ), ) if self._only_basic_fields: # The Layout always includes", "[] person_positions = PersonPosition.objects.filter( person=physical_person, academic_title=self.cleaned_data['academic_title'], group=self.cleaned_data['group'], career_stage=self.cleaned_data['career_stage'] ) return person_positions def clean_phd_date(self):", "PersonForm(Form): def __init__(self, *args, **kwargs): self.person_position = kwargs.pop('person_position', None) self._only_basic_fields = kwargs.pop('only_basic_fields', False)", "'phd_date' not in self.cleaned_data: return None if self.cleaned_data['phd_date'] == '': return None #", "the purposes of this proposal.') self.fields['group'] = forms.CharField(initial=group_initial, help_text='Please type the names of", "filled in') if self._all_fields_are_optional and not cd['orcid']: for field_str, field in self.fields.items(): if", "cannot contain any information if ORCiD ID is empty') return cd def save_person(self):", "def save_person(self): cd = self.cleaned_data person_position = create_person_position(cd['orcid'], cd['first_name'], cd['surname'], gender=cd.get('gender', None), phd_date=cd.get('phd_date',", "iD', 'academic_title': 'Mandatory if ORCID iD is entered'} class PersonForm(Form): def __init__(self, *args,", "self.cleaned_data['phd_date'] == '': return None # It has the correct format mm-yyyy because", "head of research unit if unknown', 'first_name': 'Populated from ORCID iD', 'surname': 'Populated", "Gender, PhysicalPerson, PersonPosition, Contact, CareerStage from project_core.utils.orcid import orcid_div, field_set_read_only from .utils import", "initial=gender_initial) if career_stage_queryset is None: career_stage_queryset = CareerStage.objects.all().order_by('list_order', 'name') self.fields['career_stage'] = forms.ModelChoiceField( queryset=career_stage_queryset,", "for item in container: if type(item) == Div: PersonForm._delete_field_from_layout(item, field_str) elif type(item) ==", "self._only_basic_fields == False: self.fields['gender'] = forms.ModelChoiceField(queryset=Gender.objects.all(), initial=gender_initial) if career_stage_queryset is None: career_stage_queryset =", "'email') PersonForm._delete_field_from_layout(self.helper.layout.fields, 'phone') PersonForm._delete_field_from_layout(self.helper.layout.fields, 'phd_date') PersonForm._delete_field_from_layout(self.helper.layout.fields, 'organisation_names') PersonForm._delete_field_from_layout(self.helper.layout.fields, 'group') @staticmethod def _delete_field_from_layout(container, field_str):", "self.fields['phd_date'] = forms.CharField(initial=phd_date_initial, label='Date of PhD', help_text='Where applicable, please enter the date on", "to remove them from the self.helper.layout used_help_texts = [] for field_str, field in", "At this point we don't do further cleaning: # the user needs to", "needs to fix the errors in the form before further cleaning is done.", "= career_stage_initial = phd_date_initial = None if self.person_position: orcid_initial = self.person_position.person.orcid first_name_initial =", "{month}', code='invalid', params={'value': month}) return f'{year}-{month}' def clean(self): cd = super().clean() if self.errors:", "Div('career_stage', css_class='col-8'), Div('phd_date', css_class='col-4'), css_class='row' ), Div( Div('email', css_class='col-6'), Div('phone', css_class='col-6'), css_class='row' ),", "and cd['orcid']: for field_str, field in self.fields.items(): if field_str not in cd or", "= kwargs.pop('career_stages_queryset', None) super().__init__(*args, **kwargs) orcid_initial = first_name_initial = surname_initial = organisations_initial =", "the fields. Now it's better to remove the fields that don't exist #", "# It needs to be in cd and have a value self.add_error(field_str, 'Mandatory", "if self._only_basic_fields: # The Layout always includes all the fields. Now it's better", "filled in: other fields are mandatory if self._all_fields_are_optional and cd['orcid']: for field_str, field", "you are affiliated for the purposes of this proposal', label='Group / lab', required=False)", "ObjectDoesNotExist from django.core.validators import RegexValidator, ValidationError from django.forms import Form from phonenumber_field.formfields import", "label='Surname(s)', help_text='Your surname is populated from your ORCID record. If you would like", ") except ObjectDoesNotExist: # Non-existing PHysicalPerson so it doesn't have any PersonPositions associated", "your application form. This email address will also be used for communication purposes')", "validator # In the DB it's always yyyy-mm because the model has this", "= forms.CharField(initial=group_initial, help_text='Please type the names of the group(s) or laboratories to which", "= self.person_position.academic_title career_stage_initial = self.person_position.career_stage gender_initial = self.person_position.person.gender email_initial = self.person_position.main_email() phone_initial =", "general mysql date format) month, year = self.cleaned_data['phd_date'].split('-') month_int = int(month) if month_int", "submitting your application form. This email address will also be used for communication", "email_contact.method = Contact.EMAIL email_contact.person_position = person_position email_contact.entry = cd.get('email') email_contact.save() if cd.get('phone', None):", "have a value self.add_error(field_str, 'Mandatory field if ORCiD iD is filled in') if", "TODO: discuss how to replace phones and handling of multiple phones phone_contact =", "if self._all_fields_are_optional and cd['orcid']: for field_str, field in self.fields.items(): if field_str not in", "first_name_initial = surname_initial = organisations_initial = group_initial = \\ academic_title_initial = email_initial =", "don't exist # to avoid django-crispy-forms warnings (not fatal) PersonForm._delete_field_from_layout(self.helper.layout.fields, 'gender') PersonForm._delete_field_from_layout(self.helper.layout.fields, 'career_stage')", "organisations_name_autocomplete(initial=organisations_initial, help_text='Please select the organisation(s) to which you are affiliated for the purposes", "False: self.fields['gender'] = forms.ModelChoiceField(queryset=Gender.objects.all(), initial=gender_initial) if career_stage_queryset is None: career_stage_queryset = CareerStage.objects.all().order_by('list_order', 'name')", "please enter the date on which you were awarded, or expect to be", "physical_person = PhysicalPerson.objects.get( orcid=self.cleaned_data['orcid'] ) except ObjectDoesNotExist: # Non-existing PHysicalPerson so it doesn't", "'Please ask head of research unit if unknown', 'first_name': 'Populated from ORCID iD',", "career_stage_queryset = CareerStage.objects.all().order_by('list_order', 'name') self.fields['career_stage'] = forms.ModelChoiceField( queryset=career_stage_queryset, initial=career_stage_initial) self.fields['email'] = forms.EmailField(initial=email_initial, help_text='Please", "<a href=\"https://orcid.org/login\">ORCID</a>') self.fields['surname'] = forms.CharField(initial=surname_initial, label='Surname(s)', help_text='Your surname is populated from your ORCID", "# The Layout always includes all the fields. Now it's better to remove", "PersonForm._delete_field_from_layout(self.helper.layout.fields, 'organisation_names') PersonForm._delete_field_from_layout(self.helper.layout.fields, 'group') @staticmethod def _delete_field_from_layout(container, field_str): for item in container: if", "'group') @staticmethod def _delete_field_from_layout(container, field_str): for item in container: if type(item) == Div:", "== Div: PersonForm._delete_field_from_layout(item, field_str) elif type(item) == str and item == field_str: container.remove(field_str)", "< 1 or month_int > 12: raise ValidationError(f'Invalid month: {month}', code='invalid', params={'value': month})", "django.core.exceptions import ObjectDoesNotExist from django.core.validators import RegexValidator, ValidationError from django.forms import Form from", "of multiple phones phone_contact = person_position.main_phone_model() if phone_contact is None: phone_contact = Contact()", "= PhoneNumberField(initial=phone_initial, help_text='Phone number e.g.: +41222222222 . Extension can be added with xNN", "the user needs to fix the errors in the form before further cleaning", "career_stage=self.cleaned_data['career_stage'] ) return person_positions def clean_phd_date(self): if 'phd_date' not in self.cleaned_data: return None", "It needs to be in cd and have a value self.add_error(field_str, 'Mandatory field", "Div('phd_date', css_class='col-4'), css_class='row' ), Div( Div('email', css_class='col-6'), Div('phone', css_class='col-6'), css_class='row' ), Div( Div('organisation_names',", "amend it in <a href=\"https://orcid.org/login\">ORCID</a>') self.fields['surname'] = forms.CharField(initial=surname_initial, label='Surname(s)', help_text='Your surname is populated", "and handling of multiple phones phone_contact = person_position.main_phone_model() if phone_contact is None: phone_contact", "ValidationError(f'Invalid month: {month}', code='invalid', params={'value': month}) return f'{year}-{month}' def clean(self): cd = super().clean()", "= person_position email_contact.entry = cd.get('email') email_contact.save() if cd.get('phone', None): # Like before, should", "widget=XDSoftYearMonthPickerInput, validators=[RegexValidator(regex='^[0-9]{2}-[0-9]{4}$', message='Format is mm-yyyy', code='Invalid format')]) self.fields['organisation_names'] = organisations_name_autocomplete(initial=organisations_initial, help_text='Please select the", "[] for field_str, field in self.fields.items(): if self._all_fields_are_optional: field.required = False if field_str", "confirmation email when saving and submitting your application form. This email address will", "PhoneNumberField(initial=phone_initial, help_text='Phone number e.g.: +41222222222 . Extension can be added with xNN at", "for communication purposes') self.fields['phone'] = PhoneNumberField(initial=phone_initial, help_text='Phone number e.g.: +41222222222 . Extension can", "gender_initial = career_stage_initial = phd_date_initial = None if self.person_position: orcid_initial = self.person_position.person.orcid first_name_initial", "fields that don't exist # to avoid django-crispy-forms warnings (not fatal) PersonForm._delete_field_from_layout(self.helper.layout.fields, 'gender')", "return None # It has the correct format mm-yyyy because the field has", "\"\"\" Matches and returns the person_position from the database. \"\"\" try: physical_person =", "css_class='col-6'), css_class='row' ), Div( Div('organisation_names', css_class='col-12'), css_class='row' ), Div( Div('group', css_class='col-12'), css_class='row' ),", "application form. This email address will also be used for communication purposes') self.fields['phone']", "self._all_fields_are_optional = kwargs.pop('all_fields_are_optional', False) help_texts = kwargs.pop('help_texts', {}) career_stage_queryset = kwargs.pop('career_stages_queryset', None) super().__init__(*args,", "emails email_contact = person_position.main_email_model() if email_contact is None: email_contact = Contact() email_contact.method =", "phone_contact is None: phone_contact = Contact() phone_contact.method = Contact.PHONE phone_contact.person_position = person_position phone_contact.entry", "self.person_position.group academic_title_initial = self.person_position.academic_title career_stage_initial = self.person_position.career_stage gender_initial = self.person_position.person.gender email_initial = self.person_position.main_email()", "and consolidated? # TODO: discuss how to replace phones and handling of multiple", "cd def save_person(self): cd = self.cleaned_data person_position = create_person_position(cd['orcid'], cd['first_name'], cd['surname'], gender=cd.get('gender', None),", "needs to be in cd and have a value self.add_error(field_str, 'Mandatory field if", "other fields are mandatory if self._all_fields_are_optional and cd['orcid']: for field_str, field in self.fields.items():", "email_initial = self.person_position.main_email() phone_initial = self.person_position.main_phone() if self.person_position.person.phd_date: # In the database is", "used_help_texts.append(field_str) if len(used_help_texts) != len(help_texts): print('Unused help texts:', help_texts.keys() - used_help_texts) self.helper =", "PersonForm._delete_field_from_layout(self.helper.layout.fields, 'email') PersonForm._delete_field_from_layout(self.helper.layout.fields, 'phone') PersonForm._delete_field_from_layout(self.helper.layout.fields, 'phd_date') PersonForm._delete_field_from_layout(self.helper.layout.fields, 'organisation_names') PersonForm._delete_field_from_layout(self.helper.layout.fields, 'group') @staticmethod def _delete_field_from_layout(container,", "the model and consolidated? # TODO: discuss how to replace phones and handling", "remove the fields that don't exist # to avoid django-crispy-forms warnings (not fatal)", "\\ academic_title_initial = email_initial = phone_initial = gender_initial = career_stage_initial = phd_date_initial =", "to be in cd and have a value self.add_error(field_str, 'Mandatory field if ORCiD", "required=False) # If adding fields here: see below to remove them from the", "self.fields['organisation_names'] = organisations_name_autocomplete(initial=organisations_initial, help_text='Please select the organisation(s) to which you are affiliated for", "surname_initial = organisations_initial = group_initial = \\ academic_title_initial = email_initial = phone_initial =", "from ..utils.utils import create_person_position from ..widgets import XDSoftYearMonthPickerInput HELP_TEXTS_HEAD_OF_YOUR_RESEARCH = {'orcid': 'Enter the", "group_initial = \\ academic_title_initial = email_initial = phone_initial = gender_initial = career_stage_initial =", "end') self.fields['phd_date'] = forms.CharField(initial=phd_date_initial, label='Date of PhD', help_text='Where applicable, please enter the date", "value self.add_error(field_str, 'Mandatory field if ORCiD iD is filled in') if self._all_fields_are_optional and", "= {'orcid': 'Enter the ORCID iD (e.g.: 0000-0002-1825-0097).<br>' 'Please ask head of research", "cd # If ORCID iD is filled in: other fields are mandatory if", "django.forms import Form from phonenumber_field.formfields import PhoneNumberField from project_core.models import PersonTitle, Gender, PhysicalPerson,", "if ORCiD iD is filled in') if self._all_fields_are_optional and not cd['orcid']: for field_str,", "your ORCID iD (e.g.: 0000-0002-1825-0097).<br>' 'Please create an <a href=\"https://orcid.org\">ORCID iD</a> if you", "it doesn't have any PersonPositions associated return [] person_positions = PersonPosition.objects.filter( person=physical_person, academic_title=self.cleaned_data['academic_title'],", "Contact() email_contact.method = Contact.EMAIL email_contact.person_position = person_position email_contact.entry = cd.get('email') email_contact.save() if cd.get('phone',", "class PersonForm(Form): def __init__(self, *args, **kwargs): self.person_position = kwargs.pop('person_position', None) self._only_basic_fields = kwargs.pop('only_basic_fields',", "except ObjectDoesNotExist: # Non-existing PHysicalPerson so it doesn't have any PersonPositions associated return", "self.fields.items(): if self._all_fields_are_optional: field.required = False if field_str in help_texts: self.fields[field_str].help_text = help_texts[field_str]", "of PhD', help_text='Where applicable, please enter the date on which you were awarded,", "affiliated for the purposes of this proposal.') self.fields['group'] = forms.CharField(initial=group_initial, help_text='Please type the", "to which you are affiliated for the purposes of this proposal', label='Group /", "you are affiliated for the purposes of this proposal.') self.fields['group'] = forms.CharField(initial=group_initial, help_text='Please", "texts:', help_texts.keys() - used_help_texts) self.helper = FormHelper(self) self.helper.form_tag = False self.helper.layout = Layout(", "False) self._all_fields_are_optional = kwargs.pop('all_fields_are_optional', False) help_texts = kwargs.pop('help_texts', {}) career_stage_queryset = kwargs.pop('career_stages_queryset', None)", "'phd_date') PersonForm._delete_field_from_layout(self.helper.layout.fields, 'organisation_names') PersonForm._delete_field_from_layout(self.helper.layout.fields, 'group') @staticmethod def _delete_field_from_layout(container, field_str): for item in container:", "done. return cd # If ORCID iD is filled in: other fields are", "cd['orcid']: for field_str, field in self.fields.items(): if field_str not in cd or not", "of this proposal', label='Group / lab', required=False) # If adding fields here: see", "{'orcid': 'Enter the ORCID iD (e.g.: 0000-0002-1825-0097).<br>' 'Please ask head of research unit", "Div('organisation_names', css_class='col-12'), css_class='row' ), Div( Div('group', css_class='col-12'), css_class='row' ), ) if self._only_basic_fields: #", "date format) month, year = self.cleaned_data['phd_date'].split('-') month_int = int(month) if month_int < 1", "= help_texts[field_str] used_help_texts.append(field_str) if len(used_help_texts) != len(help_texts): print('Unused help texts:', help_texts.keys() - used_help_texts)", "will also be used for communication purposes') self.fields['phone'] = PhoneNumberField(initial=phone_initial, help_text='Phone number e.g.:", "+41222222222 . Extension can be added with xNN at the end') self.fields['phd_date'] =", "Div( Div('email', css_class='col-6'), Div('phone', css_class='col-6'), css_class='row' ), Div( Div('organisation_names', css_class='col-12'), css_class='row' ), Div(", "None): # Like before, should this be in the model and consolidated? #", "self._all_fields_are_optional and not cd['orcid']: for field_str, field in self.fields.items(): if field_str in cd", "FormHelper from crispy_forms.layout import Layout, Div from django import forms from django.core.exceptions import", "= self.person_position.person.orcid first_name_initial = self.person_position.person.first_name surname_initial = self.person_position.person.surname organisations_initial = self.person_position.organisation_names.all() group_initial =", "self.helper = FormHelper(self) self.helper.form_tag = False self.helper.layout = Layout( orcid_div('orcid'), Div( Div('first_name', css_class='col-4'),", "entered'} class PersonForm(Form): def __init__(self, *args, **kwargs): self.person_position = kwargs.pop('person_position', None) self._only_basic_fields =", "= surname_initial = organisations_initial = group_initial = \\ academic_title_initial = email_initial = phone_initial", "self.fields['orcid'] = forms.CharField(initial=orcid_initial, **get_field_information(PhysicalPerson, 'orcid', label='ORCID iD', required=True, help_text='Enter your ORCID iD (e.g.:", "'Please create an <a href=\"https://orcid.org\">ORCID iD</a> if you do not already have one'))", "with xNN at the end') self.fields['phd_date'] = forms.CharField(initial=phd_date_initial, label='Date of PhD', help_text='Where applicable,", "__init__(self, *args, **kwargs): self.person_position = kwargs.pop('person_position', None) self._only_basic_fields = kwargs.pop('only_basic_fields', False) self._all_fields_are_optional =", "date on which you were awarded, or expect to be awarded your PhD", "help_texts.keys() - used_help_texts) self.helper = FormHelper(self) self.helper.form_tag = False self.helper.layout = Layout( orcid_div('orcid'),", "better to remove the fields that don't exist # to avoid django-crispy-forms warnings", "format mm-yyyy because the field has a validator # In the DB it's", "before, should this be in the model and consolidated? # TODO: discuss how", "please amend it in <a href=\"https://orcid.org/login\">ORCID</a>') self.fields['surname'] = forms.CharField(initial=surname_initial, label='Surname(s)', help_text='Your surname is", "phone_contact = person_position.main_phone_model() if phone_contact is None: phone_contact = Contact() phone_contact.method = Contact.PHONE", "..utils.utils import create_person_position from ..widgets import XDSoftYearMonthPickerInput HELP_TEXTS_HEAD_OF_YOUR_RESEARCH = {'orcid': 'Enter the ORCID", "None if self.cleaned_data['phd_date'] == '': return None # It has the correct format", "the database is always saved as yyyy-mm (validator in the model) but it's", "the example # ORCID iD, so cd['orcid'] doesn't exist. At this point we", "you do not already have one')) self.fields['academic_title'] = forms.ModelChoiceField(queryset=PersonTitle.objects.all(), initial=academic_title_initial, required=not self._only_basic_fields) self.fields['first_name']", "href=\"https://orcid.org/login\">ORCID</a>') field_set_read_only([self.fields['first_name'], self.fields['surname']]) if self._only_basic_fields == False: self.fields['gender'] = forms.ModelChoiceField(queryset=Gender.objects.all(), initial=gender_initial) if career_stage_queryset", "phones phone_contact = person_position.main_phone_model() if phone_contact is None: phone_contact = Contact() phone_contact.method =", "person_position.main_phone_model() if phone_contact is None: phone_contact = Contact() phone_contact.method = Contact.PHONE phone_contact.person_position =", "to change it please amend it in <a href=\"https://orcid.org/login\">ORCID</a>') field_set_read_only([self.fields['first_name'], self.fields['surname']]) if self._only_basic_fields", "email_contact = person_position.main_email_model() if email_contact is None: email_contact = Contact() email_contact.method = Contact.EMAIL", "your ORCID record. If you would like to change it please amend it", "help_text='Your surname is populated from your ORCID record. If you would like to", "= organisations_name_autocomplete(initial=organisations_initial, help_text='Please select the organisation(s) to which you are affiliated for the", "= kwargs.pop('only_basic_fields', False) self._all_fields_are_optional = kwargs.pop('all_fields_are_optional', False) help_texts = kwargs.pop('help_texts', {}) career_stage_queryset =", "super().__init__(*args, **kwargs) orcid_initial = first_name_initial = surname_initial = organisations_initial = group_initial = \\", "cd.get('email', None): # Should this be in the model? # TODO: discuss how", "self.fields['email'] = forms.EmailField(initial=email_initial, help_text='Please write a valid email address. You will receive a", "= Contact() email_contact.method = Contact.EMAIL email_contact.person_position = person_position email_contact.entry = cd.get('email') email_contact.save() if", "import orcid_div, field_set_read_only from .utils import organisations_name_autocomplete, get_field_information from ..utils.utils import create_person_position from", "from crispy_forms.layout import Layout, Div from django import forms from django.core.exceptions import ObjectDoesNotExist", "= kwargs.pop('all_fields_are_optional', False) help_texts = kwargs.pop('help_texts', {}) career_stage_queryset = kwargs.pop('career_stages_queryset', None) super().__init__(*args, **kwargs)", "change it please amend it in <a href=\"https://orcid.org/login\">ORCID</a>') field_set_read_only([self.fields['first_name'], self.fields['surname']]) if self._only_basic_fields ==", "is filled in: other fields are mandatory if self._all_fields_are_optional and cd['orcid']: for field_str,", "of research unit if unknown', 'first_name': 'Populated from ORCID iD', 'surname': 'Populated from", "self.fields.items(): if field_str not in cd or not cd[field_str]: # It needs to", "and not cd['orcid']: for field_str, field in self.fields.items(): if field_str in cd and", "# the user needs to fix the errors in the form before further", "iD is entered'} class PersonForm(Form): def __init__(self, *args, **kwargs): self.person_position = kwargs.pop('person_position', None)", "kwargs.pop('all_fields_are_optional', False) help_texts = kwargs.pop('help_texts', {}) career_stage_queryset = kwargs.pop('career_stages_queryset', None) super().__init__(*args, **kwargs) orcid_initial", "also be used for communication purposes') self.fields['phone'] = PhoneNumberField(initial=phone_initial, help_text='Phone number e.g.: +41222222222", "ORCID iD', 'surname': 'Populated from ORCID iD', 'academic_title': 'Mandatory if ORCID iD is", "'Mandatory field if ORCiD iD is filled in') if self._all_fields_are_optional and not cd['orcid']:", "def clean_phd_date(self): if 'phd_date' not in self.cleaned_data: return None if self.cleaned_data['phd_date'] == '':", "first_name_initial = self.person_position.person.first_name surname_initial = self.person_position.person.surname organisations_initial = self.person_position.organisation_names.all() group_initial = self.person_position.group academic_title_initial", "\"\"\" try: physical_person = PhysicalPerson.objects.get( orcid=self.cleaned_data['orcid'] ) except ObjectDoesNotExist: # Non-existing PHysicalPerson so", "'Populated from ORCID iD', 'academic_title': 'Mandatory if ORCID iD is entered'} class PersonForm(Form):", ") if self._only_basic_fields: # The Layout always includes all the fields. Now it's", "= create_person_position(cd['orcid'], cd['first_name'], cd['surname'], gender=cd.get('gender', None), phd_date=cd.get('phd_date', None), academic_title=cd.get('academic_title'), group=cd.get('group'), career_stage=cd.get('career_stage'), organisation_names=cd.get('organisation_names', []))", "self.person_position.person.orcid first_name_initial = self.person_position.person.first_name surname_initial = self.person_position.person.surname organisations_initial = self.person_position.organisation_names.all() group_initial = self.person_position.group", "None): # Should this be in the model? # TODO: discuss how to", "here: see below to remove them from the self.helper.layout used_help_texts = [] for", "PersonPosition, Contact, CareerStage from project_core.utils.orcid import orcid_div, field_set_read_only from .utils import organisations_name_autocomplete, get_field_information", "the end') self.fields['phd_date'] = forms.CharField(initial=phd_date_initial, label='Date of PhD', help_text='Where applicable, please enter the", "In the DB it's always yyyy-mm because the model has this validator (consistent", "= self.person_position.main_phone() if self.person_position.person.phd_date: # In the database is always saved as yyyy-mm", "to change it please amend it in <a href=\"https://orcid.org/login\">ORCID</a>') self.fields['surname'] = forms.CharField(initial=surname_initial, label='Surname(s)',", "career_stage_queryset is None: career_stage_queryset = CareerStage.objects.all().order_by('list_order', 'name') self.fields['career_stage'] = forms.ModelChoiceField( queryset=career_stage_queryset, initial=career_stage_initial) self.fields['email']", "month_int < 1 or month_int > 12: raise ValidationError(f'Invalid month: {month}', code='invalid', params={'value':", "return [] person_positions = PersonPosition.objects.filter( person=physical_person, academic_title=self.cleaned_data['academic_title'], group=self.cleaned_data['group'], career_stage=self.cleaned_data['career_stage'] ) return person_positions def", "this be in the model? # TODO: discuss how to replace emails email_contact", "academic_title=self.cleaned_data['academic_title'], group=self.cleaned_data['group'], career_stage=self.cleaned_data['career_stage'] ) return person_positions def clean_phd_date(self): if 'phd_date' not in self.cleaned_data:", "do further cleaning: # the user needs to fix the errors in the", "the group(s) or laboratories to which you are affiliated for the purposes of", "from django.core.validators import RegexValidator, ValidationError from django.forms import Form from phonenumber_field.formfields import PhoneNumberField", "organisations_initial = group_initial = \\ academic_title_initial = email_initial = phone_initial = gender_initial =", "# In the DB it's always yyyy-mm because the model has this validator", "= kwargs.pop('help_texts', {}) career_stage_queryset = kwargs.pop('career_stages_queryset', None) super().__init__(*args, **kwargs) orcid_initial = first_name_initial =", "all the fields. Now it's better to remove the fields that don't exist", "css_class='col-12'), css_class='row' ), Div( Div('group', css_class='col-12'), css_class='row' ), ) if self._only_basic_fields: # The", "[])) if cd.get('email', None): # Should this be in the model? # TODO:", "the model) but it's visualized as mm-yyyy phd_date_parts = self.person_position.person.phd_date.split('-') phd_date_initial = f'{phd_date_parts[1]}-{phd_date_parts[0]}'", "which you are affiliated for the purposes of this proposal.') self.fields['group'] = forms.CharField(initial=group_initial,", "return f'{year}-{month}' def clean(self): cd = super().clean() if self.errors: # If there are", "it's always yyyy-mm because the model has this validator (consistent with general mysql", "code='invalid', params={'value': month}) return f'{year}-{month}' def clean(self): cd = super().clean() if self.errors: #", "discuss how to replace emails email_contact = person_position.main_email_model() if email_contact is None: email_contact", "ORCID iD, so cd['orcid'] doesn't exist. At this point we don't do further", "ORCID iD (e.g.: 0000-0002-1825-0097).<br>' 'Please ask head of research unit if unknown', 'first_name':", "Div( Div('first_name', css_class='col-4'), Div('surname', css_class='col-4'), Div('academic_title', css_class='col-2'), Div('gender', css_class='col-2'), css_class='row' ), Div( Div('career_stage',", "self.person_position.person.phd_date.split('-') phd_date_initial = f'{phd_date_parts[1]}-{phd_date_parts[0]}' self.fields['orcid'] = forms.CharField(initial=orcid_initial, **get_field_information(PhysicalPerson, 'orcid', label='ORCID iD', required=True, help_text='Enter", "container.remove(field_str) def get_person_positions(self): \"\"\" Matches and returns the person_position from the database. \"\"\"", "ObjectDoesNotExist: # Non-existing PHysicalPerson so it doesn't have any PersonPositions associated return []", "in self.cleaned_data: return None if self.cleaned_data['phd_date'] == '': return None # It has", "so cd['orcid'] doesn't exist. At this point we don't do further cleaning: #", "career_stage=cd.get('career_stage'), organisation_names=cd.get('organisation_names', [])) if cd.get('email', None): # Should this be in the model?", "fields. Now it's better to remove the fields that don't exist # to", "PhysicalPerson.objects.get( orcid=self.cleaned_data['orcid'] ) except ObjectDoesNotExist: # Non-existing PHysicalPerson so it doesn't have any", "because the model has this validator (consistent with general mysql date format) month,", "further cleaning is done. return cd # If ORCID iD is filled in:", "= Contact() phone_contact.method = Contact.PHONE phone_contact.person_position = person_position phone_contact.entry = cd.get('phone').as_international phone_contact.save() return", "- used_help_texts) self.helper = FormHelper(self) self.helper.form_tag = False self.helper.layout = Layout( orcid_div('orcid'), Div(", "exist # to avoid django-crispy-forms warnings (not fatal) PersonForm._delete_field_from_layout(self.helper.layout.fields, 'gender') PersonForm._delete_field_from_layout(self.helper.layout.fields, 'career_stage') PersonForm._delete_field_from_layout(self.helper.layout.fields,", "if self.errors: # If there are errors they might be related to orcid", "it in <a href=\"https://orcid.org/login\">ORCID</a>') field_set_read_only([self.fields['first_name'], self.fields['surname']]) if self._only_basic_fields == False: self.fields['gender'] = forms.ModelChoiceField(queryset=Gender.objects.all(),", "cd.get('email') email_contact.save() if cd.get('phone', None): # Like before, should this be in the", "in the model and consolidated? # TODO: discuss how to replace phones and", "correct format mm-yyyy because the field has a validator # In the DB", "self.fields['gender'] = forms.ModelChoiceField(queryset=Gender.objects.all(), initial=gender_initial) if career_stage_queryset is None: career_stage_queryset = CareerStage.objects.all().order_by('list_order', 'name') self.fields['career_stage']", "forms.ModelChoiceField(queryset=Gender.objects.all(), initial=gender_initial) if career_stage_queryset is None: career_stage_queryset = CareerStage.objects.all().order_by('list_order', 'name') self.fields['career_stage'] = forms.ModelChoiceField(", "= \\ academic_title_initial = email_initial = phone_initial = gender_initial = career_stage_initial = phd_date_initial", "empty') return cd def save_person(self): cd = self.cleaned_data person_position = create_person_position(cd['orcid'], cd['first_name'], cd['surname'],", "XDSoftYearMonthPickerInput HELP_TEXTS_HEAD_OF_YOUR_RESEARCH = {'orcid': 'Enter the ORCID iD (e.g.: 0000-0002-1825-0097).<br>' 'Please ask head" ]
[ "with some special types of parameters. - begin: bool Whether it's the first", "= v: \\ self.updateParam(ind, typ, x, begin = True)) le1.textEdited.connect(lambda x, ind =", "param = None): ''' After parameter changes due to importing or change of", "ind = k, typ = v: \\ self.updateParam(ind, typ, x)) self.addWidget(cb, i, 1)", "twoHB.addWidget(le0) twoHB.addWidget(QLabel(\"to\")) twoHB.addWidget(le1) self.addLayout(twoHB, i, 1) self.senderList.append([le0, le1]) elif v == \"intl\" or", "x, ind = k, typ = v: self.updateParam(ind, typ, x)) self.addWidget(le, i, 1)", "pt: cb.addItem(j) if len(pt): cb.setCurrentIndex(0) else: self.err = True elif v == \"int\"", "Build the boxes. Parameters ---------- paramTyp: dictionary Defining types of parameters in the", "None: for i, (k, v) in enumerate(self.paramTyp.items()): if v == \"protocol\" and self.projMan", "= [] elif typ == \"protocol\": self.param[ind] = val elif typ == \"bool\":", "= '' le = self.senderList[i] le.setText(ds) elif v == \"strl\": if len(val): ds", "ind = k, typ = v: self.updateParam(ind, typ, x)) self.addWidget(le, i, 1) self.senderList.append(le)", "self.updateParam(ind, typ, x)) self.addWidget(le, i, 1) self.senderList.append(le) elif v == \"intr\" or v", "val.split(',')] else: self.param[ind] = [] elif typ == \"protocol\": self.param[ind] = val elif", "\\ max(map(abs, val)) < 1e3): ds = \", \".join(map(str, val)) else: ds =", "import QLabel, QGridLayout, QLineEdit, \\ QVBoxLayout, QHBoxLayout, QComboBox, QPushButton, QCheckBox import numpy as", "= QLineEdit() le1 = QLineEdit() le0.textEdited.connect(lambda x, ind = k, typ = v:", "out of the input widget with the value. **kargs: Arguments come with some", "1) self.senderList.append(cb) else: print(\"Unknown parameter type.\") self.updateDisp() self.updateDisp(param) def updateDisp(self, param = None):", "= list(map(int, val.split(','))) else: self.param[ind] = [] elif typ == \"floatl\": if len(val):", "or v == \"floatr\": le0, le1 = self.senderList[i] if v == \"intr\" or", "cb.currentTextChanged.connect(lambda x, ind = k, typ = v: \\ self.updateParam(ind, typ, x)) cb.setCurrentIndex(0)", "== \"int\" or (1e-3 < abs(val) and abs(val) < 1e3): ds = str(val)", "'' le = self.senderList[i] le.setText(ds) elif v == \"strl\": if len(val): ds =", "self.senderList[i] cb.clear() pt = self.projMan.getProtocols() for j in pt: cb.addItem(j) if len(pt): cb.setCurrentIndex(0)", "---------- param: dictionary, optional New parameters. Default is None, only tend to update", "else: self.param[ind] = [] elif typ == \"protocol\": self.param[ind] = val elif typ", "with a bunch of widgets from PyQt5.QtWidgets import QLabel, QGridLayout, QLineEdit, \\ QVBoxLayout,", "else: ds = \", \".join([\"{:.3e}\".format(d) for d in val]) else: ds = ''", "else: ds = '' le = self.senderList[i] le.setText(ds) elif v == \"bool\": cb", "self.param[k] if v == \"protocol\" and projMan != None: cb = QComboBox() cb.currentTextChanged.connect(lambda", "self.sender().setStyleSheet(\"background:#FF0000;\") self.err = True def getParam(self): ''' Get parameters managed in this widget.", "to update protocols. ''' if param == None: for i, (k, v) in", "= \", \".join(val) else: ds = '' le = self.senderList[i] le.setText(ds) elif v", "options: cb.addItem(j) cb.currentTextChanged.connect(lambda x, ind = k, typ = v: \\ self.updateParam(ind, typ,", "or v == \"floatr\": le0 = QLineEdit() le1 = QLineEdit() le0.textEdited.connect(lambda x, ind", "self.err = True elif v == \"int\" or v == \"float\": if v", "for d in val.split(',')] else: self.param[ind] = [] elif typ == \"protocol\": self.param[ind]", "self.err = False self.param = param self.paramTyp = paramTyp self.projMan = projMan self.senderList", "x, ind = k, typ = v: \\ self.updateParam(ind, typ, x, begin =", "pt = self.projMan.getProtocols() for j in pt: cb.addItem(j) if len(pt): cb.setCurrentIndex(0) else: self.err", "i, 1) self.senderList.append(cb) elif v == \"int\" or v == \"float\": le =", "if len(val): ds = \", \".join(val) else: ds = '' le = self.senderList[i]", "\"float\": self.param[ind] = float(val) elif typ == \"intr\": if kargs[\"begin\"]: self.param[ind][0] = int(val)", "= v: self.updateParam(ind, typ, x)) self.addWidget(le, i, 1) self.senderList.append(le) elif v == \"intr\"", "if v == \"intr\" or (1e-3 < abs(val[1]) and abs(val[1]) < 1e3): ds", "v == \"floatr\": le0, le1 = self.senderList[i] if v == \"intr\" or (1e-3", "enumerate(self.paramTyp.items()): val = param[k] if v == \"protocol\" and self.projMan != None: cb", "bool(val) elif \"combo\" in typ: self.param[ind] = val else: print(\"Unknown parameter type\") except", "error in the parameters. senderList: ''' super().__init__(parent) self.err = False self.param = param", "self.addWidget(cb, i, 1) self.senderList.append(cb) else: print(\"Unknown parameter type.\") self.updateDisp() self.updateDisp(param) def updateDisp(self, param", "= \"{:.3e}\".format(val) le = self.senderList[i] le.setText(ds) elif v == \"intr\" or v ==", "as pd class ParamWidget(QGridLayout): ''' Collecting all the input boxes and labels to", "[] elif typ == \"protocol\": self.param[ind] = val elif typ == \"bool\": self.param[ind]", "in enumerate(paramTyp.items()): self.addWidget(QLabel(k), i, 0) val = self.param[k] if v == \"protocol\" and", "typ, x)) self.addWidget(cb, i, 1) self.senderList.append(cb) elif \"combo\" in v: options = v.split(',')[1:]", "elif v == \"intl\" or v == \"floatl\": if len(val): if v ==", "it's the first one of the two value range parameters. ''' try: self.err", "\".join(map(str, val)) else: ds = \", \".join([\"{:.3e}\".format(d) for d in val]) else: ds", "is None, only tend to update protocols. ''' if param == None: for", "= QHBoxLayout() twoHB.addWidget(le0) twoHB.addWidget(QLabel(\"to\")) twoHB.addWidget(le1) self.addLayout(twoHB, i, 1) self.senderList.append([le0, le1]) elif v ==", "\", \".join([\"{:.3e}\".format(d) for d in val]) else: ds = '' le = self.senderList[i]", "''' Update individual parameters in profile using values get from input widgets. Parameters", "val)) and \\ max(map(abs, val)) < 1e3): ds = \", \".join(map(str, val)) else:", "self.updateDisp(param) def updateDisp(self, param = None): ''' After parameter changes due to importing", "QComboBox() for j in options: cb.addItem(j) cb.currentTextChanged.connect(lambda x, ind = k, typ =", "val)) < 1e3): ds = \", \".join(map(str, val)) else: ds = \", \".join([\"{:.3e}\".format(d)", "== \"intr\" or (1e-3 < abs(val[1]) and abs(val[1]) < 1e3): ds = str(val[1])", "\"floatr\": le0, le1 = self.senderList[i] if v == \"intr\" or (1e-3 < abs(val[0])", "else: self.err = True elif v == \"int\" or v == \"float\": if", "None: cb = self.senderList[i] cb.clear() pt = self.projMan.getProtocols() for j in pt: cb.addItem(j)", "= \"{:.3e}\".format(val[0]) le0.setText(ds) if v == \"intr\" or (1e-3 < abs(val[1]) and abs(val[1])", "val.split(','))) else: self.param[ind] = [] elif typ == \"floatl\": if len(val): self.param[ind] =", "val = param[k] if v == \"protocol\" and self.projMan != None: cb =", "\\ self.updateParam(ind, typ, x)) cb.setCurrentIndex(0) self.addWidget(cb, i, 1) self.senderList.append(cb) else: print(\"Unknown parameter type.\")", "parameters in the set read from paramMan. projMan: Project Project management class, used", "self.addWidget(QLabel(k), i, 0) val = self.param[k] if v == \"protocol\" and projMan !=", "QLineEdit() le.textEdited.connect(lambda x, ind = k, typ = v: \\ self.updateParam(ind, typ, x))", "ind: string Key of the individual parameter to be set. typ: string Type", "the individual parameter. val: string Text out of the input widget with the", "type\") print(v, val) self.update() def updateParam(self, ind, typ, val, **kargs): ''' Update individual", "QComboBox() cb.currentTextChanged.connect(lambda x, ind = k, typ = v: \\ self.updateParam(ind, typ, x))", "= paramTyp self.projMan = projMan self.senderList = [] for i, (k, v) in", "abs(val[0]) and abs(val[0]) < 1e3): ds = str(val[0]) else: ds = \"{:.3e}\".format(val[0]) le0.setText(ds)", "or v == \"floatl\" or v == \"strl\": le = QLineEdit() le.textEdited.connect(lambda x,", "self.updateParam(ind, typ, x)) cb.setCurrentIndex(0) self.addWidget(cb, i, 1) self.senderList.append(cb) else: print(\"Unknown parameter type.\") self.updateDisp()", "in val.split(',')] else: self.param[ind] = [] elif typ == \"protocol\": self.param[ind] = val", "i, 1) self.senderList.append(le) elif v == \"intr\" or v == \"floatr\": le0 =", "(k, v) in enumerate(self.paramTyp.items()): if v == \"protocol\" and self.projMan != None: cb", "set. typ: string Type of the individual parameter. val: string Text out of", "one of the two value range parameters. ''' try: self.err = False self.sender().setStyleSheet(\"background:#FFFFFF;\")", "ds = \"{:.3e}\".format(val) le = self.senderList[i] le.setText(ds) elif v == \"intr\" or v", "self.senderList.append(cb) else: print(\"Unknown parameter type.\") self.updateDisp() self.updateDisp(param) def updateDisp(self, param = None): '''", "self.addWidget(cb, i, 1) self.senderList.append(cb) elif \"combo\" in v: options = v.split(',')[1:] cb =", "profile using values get from input widgets. Parameters ---------- ind: string Key of", "ds = \"{:.3e}\".format(val[1]) le1.setText(ds) elif v == \"intl\" or v == \"floatl\": if", "parameters. senderList: ''' super().__init__(parent) self.err = False self.param = param self.paramTyp = paramTyp", "== \"floatl\": if len(val): if v == \"intl\" or (1e-3 < min(map(abs, val))", "paramTyp, param, projMan = None, parent = None): ''' Build the boxes. Parameters", "\"combo\" in v: options = v.split(',')[1:] cb = QComboBox() for j in options:", "tend to update protocols. ''' if param == None: for i, (k, v)", "typ = v: \\ self.updateParam(ind, typ, x)) self.addWidget(cb, i, 1) self.senderList.append(cb) elif v", "self.updateParam(ind, typ, x)) self.addWidget(cb, i, 1) self.senderList.append(cb) elif \"combo\" in v: options =", "None, parent = None): ''' Build the boxes. Parameters ---------- paramTyp: dictionary Defining", "v == \"protocol\" and projMan != None: cb = QComboBox() cb.currentTextChanged.connect(lambda x, ind", "in enumerate(self.paramTyp.items()): val = param[k] if v == \"protocol\" and self.projMan != None:", "kargs[\"begin\"]: self.param[ind][0] = float(val) else: self.param[ind][1] = float(val) elif typ == \"intl\": if", "= QLineEdit() le0.textEdited.connect(lambda x, ind = k, typ = v: \\ self.updateParam(ind, typ,", "print(\"Unknown parameter type\") print(v, val) self.update() def updateParam(self, ind, typ, val, **kargs): '''", "\"strl\": le = QLineEdit() le.textEdited.connect(lambda x, ind = k, typ = v: \\", "parameter type.\") self.updateDisp() self.updateDisp(param) def updateDisp(self, param = None): ''' After parameter changes", "bool Whether it's the first one of the two value range parameters. '''", "parameter type\") print(v, val) self.update() def updateParam(self, ind, typ, val, **kargs): ''' Update", "QLineEdit() le1 = QLineEdit() le0.textEdited.connect(lambda x, ind = k, typ = v: \\", "the first one of the two value range parameters. ''' try: self.err =", "int(val) elif typ == \"float\": self.param[ind] = float(val) elif typ == \"intr\": if", "min(map(abs, val)) and \\ max(map(abs, val)) < 1e3): ds = \", \".join(map(str, val))", "!= None: cb = QComboBox() cb.currentTextChanged.connect(lambda x, ind = k, typ = v:", "lstHB.addWidget(btn) self.addLayout(lstHB, i, 1) self.senderList.append(le) elif v == \"bool\": cb = QCheckBox() cb.stateChanged.connect(lambda", "1e3): ds = \", \".join(map(str, val)) else: ds = \", \".join([\"{:.3e}\".format(d) for d", "self.senderList[i] le.setText(ds) elif v == \"intr\" or v == \"floatr\": le0, le1 =", "== \"float\": le = QLineEdit() le.textEdited.connect(lambda x, ind = k, typ = v:", "string Key of the individual parameter to be set. typ: string Type of", "= \"{:.3e}\".format(val[1]) le1.setText(ds) elif v == \"intl\" or v == \"floatl\": if len(val):", "---------- paramTyp: dictionary Defining types of parameters in the set. param: dictionary The", "parameters managed in this widget. ''' if not self.err: return self.param else: return", "of the individual parameter to be set. typ: string Type of the individual", "= v: \\ self.updateParam(ind, typ, x)) cb.setCurrentIndex(0) self.addWidget(cb, i, 1) self.senderList.append(cb) else: print(\"Unknown", "dictionary Defining types of parameters in the set. param: dictionary The parameters in", "''' After parameter changes due to importing or change of protocols, update display", "val]) else: ds = '' le = self.senderList[i] le.setText(ds) elif v == \"strl\":", "= True else: self.param = param for i, (k, v) in enumerate(self.paramTyp.items()): val", "twoHB.addWidget(le1) self.addLayout(twoHB, i, 1) self.senderList.append([le0, le1]) elif v == \"intl\" or v ==", "param self.paramTyp = paramTyp self.projMan = projMan self.senderList = [] for i, (k,", "typ == \"floatr\": if kargs[\"begin\"]: self.param[ind][0] = float(val) else: self.param[ind][1] = float(val) elif", "== \"int\": self.param[ind] = int(val) elif typ == \"float\": self.param[ind] = float(val) elif", "in the set. param: dictionary The parameters in the set read from paramMan.", "typ, x)) self.addWidget(le, i, 1) self.senderList.append(le) elif v == \"intr\" or v ==", "self.param = param for i, (k, v) in enumerate(self.paramTyp.items()): val = param[k] if", "cb.addItem(j) if len(pt): cb.setCurrentIndex(0) else: self.err = True elif v == \"int\" or", "= v: \\ self.updateParam(ind, typ, x)) self.addWidget(cb, i, 1) self.senderList.append(cb) elif v ==", "self.param[ind] = bool(val) elif \"combo\" in typ: self.param[ind] = val else: print(\"Unknown parameter", "and labels to assign data. ''' def __init__(self, paramTyp, param, projMan = None,", "in the parameters. senderList: ''' super().__init__(parent) self.err = False self.param = param self.paramTyp", "== \"floatl\" or v == \"strl\": le = QLineEdit() le.textEdited.connect(lambda x, ind =", "= None, parent = None): ''' Build the boxes. Parameters ---------- paramTyp: dictionary", "= val elif typ == \"bool\": self.param[ind] = bool(val) elif \"combo\" in typ:", "values get from input widgets. Parameters ---------- ind: string Key of the individual", "try: self.err = False self.sender().setStyleSheet(\"background:#FFFFFF;\") if typ == \"int\": self.param[ind] = int(val) elif", "1e3): ds = str(val[1]) else: ds = \"{:.3e}\".format(val[1]) le1.setText(ds) elif v == \"intl\"", "Project management class, used for access raw data. Attributes ---------- param: dictionary Parameter", "= None): ''' After parameter changes due to importing or change of protocols,", "\"intr\" or (1e-3 < abs(val[0]) and abs(val[0]) < 1e3): ds = str(val[0]) else:", "(k, v) in enumerate(self.paramTyp.items()): val = param[k] if v == \"protocol\" and self.projMan", "== \"intl\": if len(val): self.param[ind] = list(map(int, val.split(','))) else: self.param[ind] = [] elif", "numpy as np import pandas as pd class ParamWidget(QGridLayout): ''' Collecting all the", "used for access raw data. Attributes ---------- param: dictionary Parameter set managed by", "if len(pt): cb.setCurrentIndex(0) else: self.err = True else: self.param = param for i,", "from PyQt5.QtWidgets import QLabel, QGridLayout, QLineEdit, \\ QVBoxLayout, QHBoxLayout, QComboBox, QPushButton, QCheckBox import", "parameters. Default is None, only tend to update protocols. ''' if param ==", "int(val) else: self.param[ind][1] = int(val) elif typ == \"floatr\": if kargs[\"begin\"]: self.param[ind][0] =", "self.err = True else: self.param = param for i, (k, v) in enumerate(self.paramTyp.items()):", "np import pandas as pd class ParamWidget(QGridLayout): ''' Collecting all the input boxes", "Update individual parameters in profile using values get from input widgets. Parameters ----------", "the two value range parameters. ''' try: self.err = False self.sender().setStyleSheet(\"background:#FFFFFF;\") if typ", "elif typ == \"float\": self.param[ind] = float(val) elif typ == \"intr\": if kargs[\"begin\"]:", "from paramMan. projMan: Project Project management class, used for access raw data. Attributes", "range parameters. ''' try: self.err = False self.sender().setStyleSheet(\"background:#FFFFFF;\") if typ == \"int\": self.param[ind]", "or v == \"floatl\": if len(val): if v == \"intl\" or (1e-3 <", "for i, (k, v) in enumerate(paramTyp.items()): self.addWidget(QLabel(k), i, 0) val = self.param[k] if", "v == \"intl\" or v == \"floatl\" or v == \"strl\": le =", "from a GridLayout with a bunch of widgets from PyQt5.QtWidgets import QLabel, QGridLayout,", "only tend to update protocols. ''' if param == None: for i, (k,", "== \"intr\" or v == \"floatr\": le0, le1 = self.senderList[i] if v ==", "read from paramMan. projMan: Project Project management class, used for access raw data.", "typ == \"intl\": if len(val): self.param[ind] = list(map(int, val.split(','))) else: self.param[ind] = []", "= self.senderList[i] if v == \"intr\" or (1e-3 < abs(val[0]) and abs(val[0]) <", "enumerate(paramTyp.items()): self.addWidget(QLabel(k), i, 0) val = self.param[k] if v == \"protocol\" and projMan", "self.param = param self.paramTyp = paramTyp self.projMan = projMan self.senderList = [] for", "paramTyp self.projMan = projMan self.senderList = [] for i, (k, v) in enumerate(paramTyp.items()):", "True else: self.param = param for i, (k, v) in enumerate(self.paramTyp.items()): val =", "float(val) else: self.param[ind][1] = float(val) elif typ == \"intl\": if len(val): self.param[ind] =", "value range parameters. ''' try: self.err = False self.sender().setStyleSheet(\"background:#FFFFFF;\") if typ == \"int\":", "this grid widget. err: bool Whether there's an error in the parameters. senderList:", "= QComboBox() for j in options: cb.addItem(j) cb.currentTextChanged.connect(lambda x, ind = k, typ", "< 1e3): ds = str(val) else: ds = \"{:.3e}\".format(val) le = self.senderList[i] le.setText(ds)", "= k, typ = v: \\ self.updateParam(ind, typ, x, begin = True)) le1.textEdited.connect(lambda", "typ == \"floatl\": if len(val): self.param[ind] = list(map(float, val.split(','))) else: self.param[ind] = []", "if v == \"int\" or (1e-3 < abs(val) and abs(val) < 1e3): ds", "self.err = False self.sender().setStyleSheet(\"background:#FFFFFF;\") if typ == \"int\": self.param[ind] = int(val) elif typ", "x)) cb.setCurrentIndex(0) self.addWidget(cb, i, 1) self.senderList.append(cb) else: print(\"Unknown parameter type.\") self.updateDisp() self.updateDisp(param) def", "typ: string Type of the individual parameter. val: string Text out of the", "self.senderList.append(le) elif v == \"bool\": cb = QCheckBox() cb.stateChanged.connect(lambda x, ind = k,", "if v == \"intr\" or (1e-3 < abs(val[0]) and abs(val[0]) < 1e3): ds", "self.sender().setStyleSheet(\"background:#FFFFFF;\") if typ == \"int\": self.param[ind] = int(val) elif typ == \"float\": self.param[ind]", "self.param[ind] = [] elif typ == \"strl\": if len(val): self.param[ind] = [d.strip() for", "of parameters in the set. param: dictionary The parameters in the set read", "= k, typ = v: \\ self.updateParam(ind, typ, x)) btn = QPushButton(\"...\") lstHB", "abs(val) < 1e3): ds = str(val) else: ds = \"{:.3e}\".format(val) le = self.senderList[i]", "for d in val]) else: ds = '' le = self.senderList[i] le.setText(ds) elif", "if len(val): self.param[ind] = [d.strip() for d in val.split(',')] else: self.param[ind] = []", "v: \\ self.updateParam(ind, typ, x, begin = True)) le1.textEdited.connect(lambda x, ind = k,", "v == \"bool\": cb = QCheckBox() cb.stateChanged.connect(lambda x, ind = k, typ =", "i, (k, v) in enumerate(paramTyp.items()): self.addWidget(QLabel(k), i, 0) val = self.param[k] if v", "len(val): self.param[ind] = [d.strip() for d in val.split(',')] else: self.param[ind] = [] elif", "cb = QComboBox() cb.currentTextChanged.connect(lambda x, ind = k, typ = v: \\ self.updateParam(ind,", "== \"bool\": self.param[ind] = bool(val) elif \"combo\" in typ: self.param[ind] = val else:", "= float(val) elif typ == \"intl\": if len(val): self.param[ind] = list(map(int, val.split(','))) else:", "self.senderList = [] for i, (k, v) in enumerate(paramTyp.items()): self.addWidget(QLabel(k), i, 0) val", "\"bool\": cb = self.senderList[i] cb.setChecked(val) elif \"combo\" in v: cb = self.senderList[i] cb.setCurrentText(val)", "i, 1) self.senderList.append(cb) elif \"combo\" in v: options = v.split(',')[1:] cb = QComboBox()", "= param self.paramTyp = paramTyp self.projMan = projMan self.senderList = [] for i,", "\"intr\" or v == \"floatr\": le0, le1 = self.senderList[i] if v == \"intr\"", "abs(val) and abs(val) < 1e3): ds = str(val) else: ds = \"{:.3e}\".format(val) le", "QComboBox, QPushButton, QCheckBox import numpy as np import pandas as pd class ParamWidget(QGridLayout):", "The parameters in the set read from paramMan. projMan: Project Project management class,", "err: bool Whether there's an error in the parameters. senderList: ''' super().__init__(parent) self.err", "QHBoxLayout() twoHB.addWidget(le0) twoHB.addWidget(QLabel(\"to\")) twoHB.addWidget(le1) self.addLayout(twoHB, i, 1) self.senderList.append([le0, le1]) elif v == \"intl\"", "v) in enumerate(self.paramTyp.items()): if v == \"protocol\" and self.projMan != None: cb =", "< abs(val[0]) and abs(val[0]) < 1e3): ds = str(val[0]) else: ds = \"{:.3e}\".format(val[0])", "import pandas as pd class ParamWidget(QGridLayout): ''' Collecting all the input boxes and", "v == \"int\" or v == \"float\": le = QLineEdit() le.textEdited.connect(lambda x, ind", "''' Get parameters managed in this widget. ''' if not self.err: return self.param", "in the set read from paramMan. projMan: Project Project management class, used for", "1e3): ds = str(val[0]) else: ds = \"{:.3e}\".format(val[0]) le0.setText(ds) if v == \"intr\"", "== \"intl\" or (1e-3 < min(map(abs, val)) and \\ max(map(abs, val)) < 1e3):", "if len(val): self.param[ind] = list(map(int, val.split(','))) else: self.param[ind] = [] elif typ ==", "def updateDisp(self, param = None): ''' After parameter changes due to importing or", "the set. param: dictionary The parameters in the set read from paramMan. projMan:", "v == \"intl\" or (1e-3 < min(map(abs, val)) and \\ max(map(abs, val)) <", "i, 1) self.senderList.append(le) elif v == \"bool\": cb = QCheckBox() cb.stateChanged.connect(lambda x, ind", "set read from paramMan. projMan: Project Project management class, used for access raw", "Collecting all the input boxes and labels to assign data. ''' def __init__(self,", "len(pt): cb.setCurrentIndex(0) else: self.err = True elif v == \"int\" or v ==", "= v: \\ self.updateParam(ind, typ, x)) btn = QPushButton(\"...\") lstHB = QHBoxLayout() lstHB.addWidget(le)", "widgets. Parameters ---------- ind: string Key of the individual parameter to be set.", "elif \"combo\" in v: cb = self.senderList[i] cb.setCurrentText(val) else: print(\"Unknown parameter type\") print(v,", "for access raw data. Attributes ---------- param: dictionary Parameter set managed by this", "\"combo\" in typ: self.param[ind] = val else: print(\"Unknown parameter type\") except ValueError: self.sender().setStyleSheet(\"background:#FF0000;\")", "v == \"strl\": if len(val): ds = \", \".join(val) else: ds = ''", "of the two value range parameters. ''' try: self.err = False self.sender().setStyleSheet(\"background:#FFFFFF;\") if", "1) self.senderList.append(le) elif v == \"bool\": cb = QCheckBox() cb.stateChanged.connect(lambda x, ind =", "= False self.param = param self.paramTyp = paramTyp self.projMan = projMan self.senderList =", "= self.projMan.getProtocols() for j in pt: cb.addItem(j) if len(pt): cb.setCurrentIndex(0) else: self.err =", "else: self.param[ind] = [] elif typ == \"floatl\": if len(val): self.param[ind] = list(map(float,", "== \"strl\": if len(val): ds = \", \".join(val) else: ds = '' le", "\"protocol\": self.param[ind] = val elif typ == \"bool\": self.param[ind] = bool(val) elif \"combo\"", "derived from a GridLayout with a bunch of widgets from PyQt5.QtWidgets import QLabel,", "of the individual parameter. val: string Text out of the input widget with", "\"intl\" or v == \"floatl\": if len(val): if v == \"intl\" or (1e-3", "begin: bool Whether it's the first one of the two value range parameters.", "elif typ == \"protocol\": self.param[ind] = val elif typ == \"bool\": self.param[ind] =", "by this grid widget. err: bool Whether there's an error in the parameters.", "the parameters. senderList: ''' super().__init__(parent) self.err = False self.param = param self.paramTyp =", "lstHB = QHBoxLayout() lstHB.addWidget(le) lstHB.addWidget(btn) self.addLayout(lstHB, i, 1) self.senderList.append(le) elif v == \"bool\":", "le1.textEdited.connect(lambda x, ind = k, typ = v: self.updateParam(ind, typ, x, begin =", "ind, typ, val, **kargs): ''' Update individual parameters in profile using values get", "paramTyp: dictionary Defining types of parameters in the set. param: dictionary The parameters", "access raw data. Attributes ---------- param: dictionary Parameter set managed by this grid", "= k, typ = v: self.updateParam(ind, typ, x, begin = False)) twoHB =", "= False self.sender().setStyleSheet(\"background:#FFFFFF;\") if typ == \"int\": self.param[ind] = int(val) elif typ ==", "assign data. ''' def __init__(self, paramTyp, param, projMan = None, parent = None):", "self.updateParam(ind, typ, x)) btn = QPushButton(\"...\") lstHB = QHBoxLayout() lstHB.addWidget(le) lstHB.addWidget(btn) self.addLayout(lstHB, i,", "or v == \"float\": if v == \"int\" or (1e-3 < abs(val) and", "== \"float\": self.param[ind] = float(val) elif typ == \"intr\": if kargs[\"begin\"]: self.param[ind][0] =", "in options: cb.addItem(j) cb.currentTextChanged.connect(lambda x, ind = k, typ = v: \\ self.updateParam(ind,", "projMan = None, parent = None): ''' Build the boxes. Parameters ---------- paramTyp:", "a bunch of widgets from PyQt5.QtWidgets import QLabel, QGridLayout, QLineEdit, \\ QVBoxLayout, QHBoxLayout,", "self.senderList[i] cb.setCurrentText(val) else: print(\"Unknown parameter type\") print(v, val) self.update() def updateParam(self, ind, typ,", "Whether it's the first one of the two value range parameters. ''' try:", "\"intl\": if len(val): self.param[ind] = list(map(int, val.split(','))) else: self.param[ind] = [] elif typ", "cb = self.senderList[i] cb.setCurrentText(val) else: print(\"Unknown parameter type\") print(v, val) self.update() def updateParam(self,", "v == \"bool\": cb = self.senderList[i] cb.setChecked(val) elif \"combo\" in v: cb =", "QLineEdit() le.textEdited.connect(lambda x, ind = k, typ = v: self.updateParam(ind, typ, x)) self.addWidget(le,", "self.param[ind] = val else: print(\"Unknown parameter type\") except ValueError: self.sender().setStyleSheet(\"background:#FF0000;\") self.err = True", "v.split(',')[1:] cb = QComboBox() for j in options: cb.addItem(j) cb.currentTextChanged.connect(lambda x, ind =", "val, **kargs): ''' Update individual parameters in profile using values get from input", "in pt: cb.addItem(j) if len(pt): cb.setCurrentIndex(0) else: self.err = True elif v ==", "= self.senderList[i] le.setText(ds) elif v == \"strl\": if len(val): ds = \", \".join(val)", "== \"int\" or v == \"float\": le = QLineEdit() le.textEdited.connect(lambda x, ind =", "typ = v: self.updateParam(ind, typ, x, begin = False)) twoHB = QHBoxLayout() twoHB.addWidget(le0)", "begin = True)) le1.textEdited.connect(lambda x, ind = k, typ = v: self.updateParam(ind, typ,", "\"floatr\": le0 = QLineEdit() le1 = QLineEdit() le0.textEdited.connect(lambda x, ind = k, typ", "else: print(\"Unknown parameter type\") except ValueError: self.sender().setStyleSheet(\"background:#FF0000;\") self.err = True def getParam(self): '''", "or change of protocols, update display of parameters. Parameters ---------- param: dictionary, optional", "\"{:.3e}\".format(val) le = self.senderList[i] le.setText(ds) elif v == \"intr\" or v == \"floatr\":", "None: cb = QComboBox() cb.currentTextChanged.connect(lambda x, ind = k, typ = v: \\", "parameters in the set. param: dictionary The parameters in the set read from", "individual parameter. val: string Text out of the input widget with the value.", "input boxes and labels to assign data. ''' def __init__(self, paramTyp, param, projMan", "v: self.updateParam(ind, typ, x)) self.addWidget(le, i, 1) self.senderList.append(le) elif v == \"intr\" or", "cb.setCurrentIndex(0) self.addWidget(cb, i, 1) self.senderList.append(cb) else: print(\"Unknown parameter type.\") self.updateDisp() self.updateDisp(param) def updateDisp(self,", "in v: cb = self.senderList[i] cb.setCurrentText(val) else: print(\"Unknown parameter type\") print(v, val) self.update()", "be set. typ: string Type of the individual parameter. val: string Text out", "(1e-3 < abs(val) and abs(val) < 1e3): ds = str(val) else: ds =", "class ParamWidget(QGridLayout): ''' Collecting all the input boxes and labels to assign data.", "typ = v: \\ self.updateParam(ind, typ, x, begin = True)) le1.textEdited.connect(lambda x, ind", "cb.currentTextChanged.connect(lambda x, ind = k, typ = v: \\ self.updateParam(ind, typ, x)) self.addWidget(cb,", "= QLineEdit() le.textEdited.connect(lambda x, ind = k, typ = v: \\ self.updateParam(ind, typ,", "i, 1) self.senderList.append([le0, le1]) elif v == \"intl\" or v == \"floatl\" or", "print(\"Unknown parameter type.\") self.updateDisp() self.updateDisp(param) def updateDisp(self, param = None): ''' After parameter", "typ, x, begin = False)) twoHB = QHBoxLayout() twoHB.addWidget(le0) twoHB.addWidget(QLabel(\"to\")) twoHB.addWidget(le1) self.addLayout(twoHB, i,", "\".join(val) else: ds = '' le = self.senderList[i] le.setText(ds) elif v == \"bool\":", "parameters. ''' try: self.err = False self.sender().setStyleSheet(\"background:#FFFFFF;\") if typ == \"int\": self.param[ind] =", "widgets from PyQt5.QtWidgets import QLabel, QGridLayout, QLineEdit, \\ QVBoxLayout, QHBoxLayout, QComboBox, QPushButton, QCheckBox", "if len(pt): cb.setCurrentIndex(0) else: self.err = True elif v == \"int\" or v", "cb.setCurrentIndex(0) else: self.err = True elif v == \"int\" or v == \"float\":", "def __init__(self, paramTyp, param, projMan = None, parent = None): ''' Build the", "projMan: Project Project management class, used for access raw data. Attributes ---------- param:", "senderList: ''' super().__init__(parent) self.err = False self.param = param self.paramTyp = paramTyp self.projMan", "QCheckBox import numpy as np import pandas as pd class ParamWidget(QGridLayout): ''' Collecting", "the input widget with the value. **kargs: Arguments come with some special types", "ds = str(val) else: ds = \"{:.3e}\".format(val) le = self.senderList[i] le.setText(ds) elif v", "== \"intl\" or v == \"floatl\": if len(val): if v == \"intl\" or", "self.senderList[i] cb.setChecked(val) elif \"combo\" in v: cb = self.senderList[i] cb.setCurrentText(val) else: print(\"Unknown parameter", "self.param[ind] = val elif typ == \"bool\": self.param[ind] = bool(val) elif \"combo\" in", "parameter changes due to importing or change of protocols, update display of parameters.", "ind = k, typ = v: self.updateParam(ind, typ, x, begin = False)) twoHB", "\"intl\" or v == \"floatl\" or v == \"strl\": le = QLineEdit() le.textEdited.connect(lambda", "elif v == \"int\" or v == \"float\": if v == \"int\" or", "or (1e-3 < min(map(abs, val)) and \\ max(map(abs, val)) < 1e3): ds =", "bool Whether there's an error in the parameters. senderList: ''' super().__init__(parent) self.err =", "data. Attributes ---------- param: dictionary Parameter set managed by this grid widget. err:", "v: self.updateParam(ind, typ, x, begin = False)) twoHB = QHBoxLayout() twoHB.addWidget(le0) twoHB.addWidget(QLabel(\"to\")) twoHB.addWidget(le1)", "= self.param[k] if v == \"protocol\" and projMan != None: cb = QComboBox()", "\"intr\" or v == \"floatr\": le0 = QLineEdit() le1 = QLineEdit() le0.textEdited.connect(lambda x,", "Parameter set managed by this grid widget. err: bool Whether there's an error", "x, begin = True)) le1.textEdited.connect(lambda x, ind = k, typ = v: self.updateParam(ind,", "PyQt5.QtWidgets import QLabel, QGridLayout, QLineEdit, \\ QVBoxLayout, QHBoxLayout, QComboBox, QPushButton, QCheckBox import numpy", "list(map(int, val.split(','))) else: self.param[ind] = [] elif typ == \"floatl\": if len(val): self.param[ind]", "else: ds = \"{:.3e}\".format(val[1]) le1.setText(ds) elif v == \"intl\" or v == \"floatl\":", "self.senderList[i] if v == \"intr\" or (1e-3 < abs(val[0]) and abs(val[0]) < 1e3):", "self.projMan = projMan self.senderList = [] for i, (k, v) in enumerate(paramTyp.items()): self.addWidget(QLabel(k),", "(1e-3 < min(map(abs, val)) and \\ max(map(abs, val)) < 1e3): ds = \",", "boxes and labels to assign data. ''' def __init__(self, paramTyp, param, projMan =", "for j in pt: cb.addItem(j) if len(pt): cb.setCurrentIndex(0) else: self.err = True else:", "in typ: self.param[ind] = val else: print(\"Unknown parameter type\") except ValueError: self.sender().setStyleSheet(\"background:#FF0000;\") self.err", "= '' le = self.senderList[i] le.setText(ds) elif v == \"bool\": cb = self.senderList[i]", "or (1e-3 < abs(val) and abs(val) < 1e3): ds = str(val) else: ds", "individual parameters in profile using values get from input widgets. Parameters ---------- ind:", "types of parameters. - begin: bool Whether it's the first one of the", "j in pt: cb.addItem(j) if len(pt): cb.setCurrentIndex(0) else: self.err = True elif v", "print(\"Unknown parameter type\") except ValueError: self.sender().setStyleSheet(\"background:#FF0000;\") self.err = True def getParam(self): ''' Get", "get from input widgets. Parameters ---------- ind: string Key of the individual parameter", "Project Project management class, used for access raw data. Attributes ---------- param: dictionary", "and self.projMan != None: cb = self.senderList[i] cb.clear() pt = self.projMan.getProtocols() for j", "1e3): ds = str(val) else: ds = \"{:.3e}\".format(val) le = self.senderList[i] le.setText(ds) elif", "le1 = self.senderList[i] if v == \"intr\" or (1e-3 < abs(val[0]) and abs(val[0])", "abs(val[0]) < 1e3): ds = str(val[0]) else: ds = \"{:.3e}\".format(val[0]) le0.setText(ds) if v", "if typ == \"int\": self.param[ind] = int(val) elif typ == \"float\": self.param[ind] =", "= int(val) else: self.param[ind][1] = int(val) elif typ == \"floatr\": if kargs[\"begin\"]: self.param[ind][0]", "== \"protocol\" and self.projMan != None: cb = self.senderList[i] cb.clear() pt = self.projMan.getProtocols()", "**kargs): ''' Update individual parameters in profile using values get from input widgets.", "of parameters. - begin: bool Whether it's the first one of the two", "[] elif typ == \"floatl\": if len(val): self.param[ind] = list(map(float, val.split(','))) else: self.param[ind]", "QLabel, QGridLayout, QLineEdit, \\ QVBoxLayout, QHBoxLayout, QComboBox, QPushButton, QCheckBox import numpy as np", "v == \"float\": if v == \"int\" or (1e-3 < abs(val) and abs(val)", "cb = self.senderList[i] cb.setChecked(val) elif \"combo\" in v: cb = self.senderList[i] cb.setCurrentText(val) else:", "labels to assign data. ''' def __init__(self, paramTyp, param, projMan = None, parent", "abs(val[1]) < 1e3): ds = str(val[1]) else: ds = \"{:.3e}\".format(val[1]) le1.setText(ds) elif v", "parent = None): ''' Build the boxes. Parameters ---------- paramTyp: dictionary Defining types", "lstHB.addWidget(le) lstHB.addWidget(btn) self.addLayout(lstHB, i, 1) self.senderList.append(le) elif v == \"bool\": cb = QCheckBox()", "j in pt: cb.addItem(j) if len(pt): cb.setCurrentIndex(0) else: self.err = True else: self.param", "== \"intr\" or v == \"floatr\": le0 = QLineEdit() le1 = QLineEdit() le0.textEdited.connect(lambda", "== \"strl\": le = QLineEdit() le.textEdited.connect(lambda x, ind = k, typ = v:", "QHBoxLayout() lstHB.addWidget(le) lstHB.addWidget(btn) self.addLayout(lstHB, i, 1) self.senderList.append(le) elif v == \"bool\": cb =", "k, typ = v: \\ self.updateParam(ind, typ, x)) btn = QPushButton(\"...\") lstHB =", "val)) else: ds = \", \".join([\"{:.3e}\".format(d) for d in val]) else: ds =", "True)) le1.textEdited.connect(lambda x, ind = k, typ = v: self.updateParam(ind, typ, x, begin", "self.updateParam(ind, typ, x, begin = False)) twoHB = QHBoxLayout() twoHB.addWidget(le0) twoHB.addWidget(QLabel(\"to\")) twoHB.addWidget(le1) self.addLayout(twoHB,", "else: print(\"Unknown parameter type.\") self.updateDisp() self.updateDisp(param) def updateDisp(self, param = None): ''' After", "elif v == \"int\" or v == \"float\": le = QLineEdit() le.textEdited.connect(lambda x,", "\"{:.3e}\".format(val[0]) le0.setText(ds) if v == \"intr\" or (1e-3 < abs(val[1]) and abs(val[1]) <", "cb.stateChanged.connect(lambda x, ind = k, typ = v: \\ self.updateParam(ind, typ, x)) self.addWidget(cb,", "= v: \\ self.updateParam(ind, typ, x)) self.addWidget(cb, i, 1) self.senderList.append(cb) elif \"combo\" in", "twoHB = QHBoxLayout() twoHB.addWidget(le0) twoHB.addWidget(QLabel(\"to\")) twoHB.addWidget(le1) self.addLayout(twoHB, i, 1) self.senderList.append([le0, le1]) elif v", "of the input widget with the value. **kargs: Arguments come with some special", "le1.setText(ds) elif v == \"intl\" or v == \"floatl\": if len(val): if v", "if v == \"protocol\" and self.projMan != None: cb = self.senderList[i] cb.clear() pt", "= float(val) elif typ == \"intr\": if kargs[\"begin\"]: self.param[ind][0] = int(val) else: self.param[ind][1]", "= True)) le1.textEdited.connect(lambda x, ind = k, typ = v: self.updateParam(ind, typ, x,", "len(val): ds = \", \".join(val) else: ds = '' le = self.senderList[i] le.setText(ds)", "\", \".join(val) else: ds = '' le = self.senderList[i] le.setText(ds) elif v ==", "= projMan self.senderList = [] for i, (k, v) in enumerate(paramTyp.items()): self.addWidget(QLabel(k), i,", "cb.setCurrentIndex(0) else: self.err = True else: self.param = param for i, (k, v)", "le = self.senderList[i] le.setText(ds) elif v == \"intr\" or v == \"floatr\": le0,", "le0.textEdited.connect(lambda x, ind = k, typ = v: \\ self.updateParam(ind, typ, x, begin", "x, begin = False)) twoHB = QHBoxLayout() twoHB.addWidget(le0) twoHB.addWidget(QLabel(\"to\")) twoHB.addWidget(le1) self.addLayout(twoHB, i, 1)", "val = self.param[k] if v == \"protocol\" and projMan != None: cb =", "elif v == \"bool\": cb = self.senderList[i] cb.setChecked(val) elif \"combo\" in v: cb", "v: \\ self.updateParam(ind, typ, x)) self.addWidget(cb, i, 1) self.senderList.append(cb) elif v == \"int\"", "parameters. Parameters ---------- param: dictionary, optional New parameters. Default is None, only tend", "typ, x)) self.addWidget(cb, i, 1) self.senderList.append(cb) elif v == \"int\" or v ==", "ValueError: self.sender().setStyleSheet(\"background:#FF0000;\") self.err = True def getParam(self): ''' Get parameters managed in this", "\"combo\" in v: cb = self.senderList[i] cb.setCurrentText(val) else: print(\"Unknown parameter type\") print(v, val)", "optional New parameters. Default is None, only tend to update protocols. ''' if", "set. param: dictionary The parameters in the set read from paramMan. projMan: Project", "**kargs: Arguments come with some special types of parameters. - begin: bool Whether", "input widget with the value. **kargs: Arguments come with some special types of", "\"floatl\" or v == \"strl\": le = QLineEdit() le.textEdited.connect(lambda x, ind = k,", "= [] elif typ == \"strl\": if len(val): self.param[ind] = [d.strip() for d", "v == \"floatr\": le0 = QLineEdit() le1 = QLineEdit() le0.textEdited.connect(lambda x, ind =", "cb.setChecked(val) elif \"combo\" in v: cb = self.senderList[i] cb.setCurrentText(val) else: print(\"Unknown parameter type\")", "\"float\": if v == \"int\" or (1e-3 < abs(val) and abs(val) < 1e3):", "parameters in profile using values get from input widgets. Parameters ---------- ind: string", "class derived from a GridLayout with a bunch of widgets from PyQt5.QtWidgets import", "k, typ = v: self.updateParam(ind, typ, x, begin = False)) twoHB = QHBoxLayout()", "str(val[0]) else: ds = \"{:.3e}\".format(val[0]) le0.setText(ds) if v == \"intr\" or (1e-3 <", "Arguments come with some special types of parameters. - begin: bool Whether it's", "dictionary The parameters in the set read from paramMan. projMan: Project Project management", "= int(val) elif typ == \"floatr\": if kargs[\"begin\"]: self.param[ind][0] = float(val) else: self.param[ind][1]", "param for i, (k, v) in enumerate(self.paramTyp.items()): val = param[k] if v ==", "v == \"int\" or v == \"float\": if v == \"int\" or (1e-3", "Text out of the input widget with the value. **kargs: Arguments come with", "< abs(val[1]) and abs(val[1]) < 1e3): ds = str(val[1]) else: ds = \"{:.3e}\".format(val[1])", "typ == \"float\": self.param[ind] = float(val) elif typ == \"intr\": if kargs[\"begin\"]: self.param[ind][0]", "to importing or change of protocols, update display of parameters. Parameters ---------- param:", "__init__(self, paramTyp, param, projMan = None, parent = None): ''' Build the boxes.", "if kargs[\"begin\"]: self.param[ind][0] = float(val) else: self.param[ind][1] = float(val) elif typ == \"intl\":", "\"protocol\" and projMan != None: cb = QComboBox() cb.currentTextChanged.connect(lambda x, ind = k,", "= str(val[1]) else: ds = \"{:.3e}\".format(val[1]) le1.setText(ds) elif v == \"intl\" or v", "self.senderList.append(cb) elif v == \"int\" or v == \"float\": le = QLineEdit() le.textEdited.connect(lambda", "twoHB.addWidget(QLabel(\"to\")) twoHB.addWidget(le1) self.addLayout(twoHB, i, 1) self.senderList.append([le0, le1]) elif v == \"intl\" or v", "in enumerate(self.paramTyp.items()): if v == \"protocol\" and self.projMan != None: cb = self.senderList[i]", "= [d.strip() for d in val.split(',')] else: self.param[ind] = [] elif typ ==", "paramMan. projMan: Project Project management class, used for access raw data. Attributes ----------", "projMan self.senderList = [] for i, (k, v) in enumerate(paramTyp.items()): self.addWidget(QLabel(k), i, 0)", "typ == \"intr\": if kargs[\"begin\"]: self.param[ind][0] = int(val) else: self.param[ind][1] = int(val) elif", "== \"intr\": if kargs[\"begin\"]: self.param[ind][0] = int(val) else: self.param[ind][1] = int(val) elif typ", "val else: print(\"Unknown parameter type\") except ValueError: self.sender().setStyleSheet(\"background:#FF0000;\") self.err = True def getParam(self):", "typ = v: self.updateParam(ind, typ, x)) self.addWidget(le, i, 1) self.senderList.append(le) elif v ==", "else: ds = \"{:.3e}\".format(val) le = self.senderList[i] le.setText(ds) elif v == \"intr\" or", "\"int\": self.param[ind] = int(val) elif typ == \"float\": self.param[ind] = float(val) elif typ", "(1e-3 < abs(val[1]) and abs(val[1]) < 1e3): ds = str(val[1]) else: ds =", "== \"floatr\": le0, le1 = self.senderList[i] if v == \"intr\" or (1e-3 <", "v: \\ self.updateParam(ind, typ, x)) self.addWidget(cb, i, 1) self.senderList.append(cb) elif \"combo\" in v:", "x, ind = k, typ = v: \\ self.updateParam(ind, typ, x)) cb.setCurrentIndex(0) self.addWidget(cb,", "= bool(val) elif \"combo\" in typ: self.param[ind] = val else: print(\"Unknown parameter type\")", "= self.senderList[i] cb.setChecked(val) elif \"combo\" in v: cb = self.senderList[i] cb.setCurrentText(val) else: print(\"Unknown", "boxes. Parameters ---------- paramTyp: dictionary Defining types of parameters in the set. param:", "elif typ == \"intr\": if kargs[\"begin\"]: self.param[ind][0] = int(val) else: self.param[ind][1] = int(val)", "self.addWidget(le, i, 1) self.senderList.append(le) elif v == \"intr\" or v == \"floatr\": le0", "ds = str(val[0]) else: ds = \"{:.3e}\".format(val[0]) le0.setText(ds) if v == \"intr\" or", "data. ''' def __init__(self, paramTyp, param, projMan = None, parent = None): '''", "value. **kargs: Arguments come with some special types of parameters. - begin: bool", "\"floatl\": if len(val): self.param[ind] = list(map(float, val.split(','))) else: self.param[ind] = [] elif typ", "le0, le1 = self.senderList[i] if v == \"intr\" or (1e-3 < abs(val[0]) and", "if len(val): self.param[ind] = list(map(float, val.split(','))) else: self.param[ind] = [] elif typ ==", "string Text out of the input widget with the value. **kargs: Arguments come", "typ, x, begin = True)) le1.textEdited.connect(lambda x, ind = k, typ = v:", "all the input boxes and labels to assign data. ''' def __init__(self, paramTyp,", "in v: options = v.split(',')[1:] cb = QComboBox() for j in options: cb.addItem(j)", "input widgets. Parameters ---------- ind: string Key of the individual parameter to be", "an error in the parameters. senderList: ''' super().__init__(parent) self.err = False self.param =", "of widgets from PyQt5.QtWidgets import QLabel, QGridLayout, QLineEdit, \\ QVBoxLayout, QHBoxLayout, QComboBox, QPushButton,", "v == \"intr\" or v == \"floatr\": le0 = QLineEdit() le1 = QLineEdit()", "else: ds = \"{:.3e}\".format(val[0]) le0.setText(ds) if v == \"intr\" or (1e-3 < abs(val[1])", "elif typ == \"intl\": if len(val): self.param[ind] = list(map(int, val.split(','))) else: self.param[ind] =", "le1 = QLineEdit() le0.textEdited.connect(lambda x, ind = k, typ = v: \\ self.updateParam(ind,", "= v: self.updateParam(ind, typ, x, begin = False)) twoHB = QHBoxLayout() twoHB.addWidget(le0) twoHB.addWidget(QLabel(\"to\"))", "x, ind = k, typ = v: self.updateParam(ind, typ, x, begin = False))", "= v.split(',')[1:] cb = QComboBox() for j in options: cb.addItem(j) cb.currentTextChanged.connect(lambda x, ind", "False self.param = param self.paramTyp = paramTyp self.projMan = projMan self.senderList = []", "ind = k, typ = v: \\ self.updateParam(ind, typ, x, begin = True))", "self.addLayout(twoHB, i, 1) self.senderList.append([le0, le1]) elif v == \"intl\" or v == \"floatl\"", "\".join([\"{:.3e}\".format(d) for d in val]) else: ds = '' le = self.senderList[i] le.setText(ds)", "using values get from input widgets. Parameters ---------- ind: string Key of the", "= int(val) elif typ == \"float\": self.param[ind] = float(val) elif typ == \"intr\":", "1) self.senderList.append([le0, le1]) elif v == \"intl\" or v == \"floatl\" or v", "type.\") self.updateDisp() self.updateDisp(param) def updateDisp(self, param = None): ''' After parameter changes due", "param == None: for i, (k, v) in enumerate(self.paramTyp.items()): if v == \"protocol\"", "---------- ind: string Key of the individual parameter to be set. typ: string", "self.senderList[i] le.setText(ds) elif v == \"strl\": if len(val): ds = \", \".join(val) else:", "le.textEdited.connect(lambda x, ind = k, typ = v: \\ self.updateParam(ind, typ, x)) btn", "parameter type\") except ValueError: self.sender().setStyleSheet(\"background:#FF0000;\") self.err = True def getParam(self): ''' Get parameters", "changes due to importing or change of protocols, update display of parameters. Parameters", "and abs(val) < 1e3): ds = str(val) else: ds = \"{:.3e}\".format(val) le =", "individual parameter to be set. typ: string Type of the individual parameter. val:", "QVBoxLayout, QHBoxLayout, QComboBox, QPushButton, QCheckBox import numpy as np import pandas as pd", "Attributes ---------- param: dictionary Parameter set managed by this grid widget. err: bool", "x)) btn = QPushButton(\"...\") lstHB = QHBoxLayout() lstHB.addWidget(le) lstHB.addWidget(btn) self.addLayout(lstHB, i, 1) self.senderList.append(le)", "QLineEdit, \\ QVBoxLayout, QHBoxLayout, QComboBox, QPushButton, QCheckBox import numpy as np import pandas", "le.setText(ds) elif v == \"bool\": cb = self.senderList[i] cb.setChecked(val) elif \"combo\" in v:", "1) self.senderList.append(cb) elif v == \"int\" or v == \"float\": le = QLineEdit()", "\"floatl\": if len(val): if v == \"intl\" or (1e-3 < min(map(abs, val)) and", "''' Collecting all the input boxes and labels to assign data. ''' def", "float(val) elif typ == \"intl\": if len(val): self.param[ind] = list(map(int, val.split(','))) else: self.param[ind]", "ds = \", \".join([\"{:.3e}\".format(d) for d in val]) else: ds = '' le", "< 1e3): ds = str(val[0]) else: ds = \"{:.3e}\".format(val[0]) le0.setText(ds) if v ==", "the boxes. Parameters ---------- paramTyp: dictionary Defining types of parameters in the set.", "if len(val): if v == \"intl\" or (1e-3 < min(map(abs, val)) and \\", "== None: for i, (k, v) in enumerate(self.paramTyp.items()): if v == \"protocol\" and", "\"strl\": if len(val): self.param[ind] = [d.strip() for d in val.split(',')] else: self.param[ind] =", "1) self.senderList.append(le) elif v == \"intr\" or v == \"floatr\": le0 = QLineEdit()", "== \"bool\": cb = QCheckBox() cb.stateChanged.connect(lambda x, ind = k, typ = v:", "elif \"combo\" in v: options = v.split(',')[1:] cb = QComboBox() for j in", "self.param[ind][1] = float(val) elif typ == \"intl\": if len(val): self.param[ind] = list(map(int, val.split(',')))", "le = self.senderList[i] le.setText(ds) elif v == \"bool\": cb = self.senderList[i] cb.setChecked(val) elif", "= self.senderList[i] le.setText(ds) elif v == \"bool\": cb = self.senderList[i] cb.setChecked(val) elif \"combo\"", "self.senderList.append([le0, le1]) elif v == \"intl\" or v == \"floatl\" or v ==", "print(v, val) self.update() def updateParam(self, ind, typ, val, **kargs): ''' Update individual parameters", "int(val) elif typ == \"floatr\": if kargs[\"begin\"]: self.param[ind][0] = float(val) else: self.param[ind][1] =", "False self.sender().setStyleSheet(\"background:#FFFFFF;\") if typ == \"int\": self.param[ind] = int(val) elif typ == \"float\":", "Defining types of parameters in the set. param: dictionary The parameters in the", "dictionary, optional New parameters. Default is None, only tend to update protocols. '''", "Key of the individual parameter to be set. typ: string Type of the", "elif typ == \"bool\": self.param[ind] = bool(val) elif \"combo\" in typ: self.param[ind] =", "self.param[ind][1] = int(val) elif typ == \"floatr\": if kargs[\"begin\"]: self.param[ind][0] = float(val) else:", "typ == \"int\": self.param[ind] = int(val) elif typ == \"float\": self.param[ind] = float(val)", "self.param[ind] = list(map(float, val.split(','))) else: self.param[ind] = [] elif typ == \"strl\": if", "val elif typ == \"bool\": self.param[ind] = bool(val) elif \"combo\" in typ: self.param[ind]", "typ, val, **kargs): ''' Update individual parameters in profile using values get from", "elif v == \"strl\": if len(val): ds = \", \".join(val) else: ds =", "le.textEdited.connect(lambda x, ind = k, typ = v: self.updateParam(ind, typ, x)) self.addWidget(le, i,", "le.setText(ds) elif v == \"intr\" or v == \"floatr\": le0, le1 = self.senderList[i]", "= True def getParam(self): ''' Get parameters managed in this widget. ''' if", "str(val[1]) else: ds = \"{:.3e}\".format(val[1]) le1.setText(ds) elif v == \"intl\" or v ==", "typ == \"protocol\": self.param[ind] = val elif typ == \"bool\": self.param[ind] = bool(val)", "k, typ = v: self.updateParam(ind, typ, x)) self.addWidget(le, i, 1) self.senderList.append(le) elif v", "[d.strip() for d in val.split(',')] else: self.param[ind] = [] elif typ == \"protocol\":", "v == \"int\" or (1e-3 < abs(val) and abs(val) < 1e3): ds =", "self.senderList[i] le.setText(ds) elif v == \"bool\": cb = self.senderList[i] cb.setChecked(val) elif \"combo\" in", "Parameters ---------- paramTyp: dictionary Defining types of parameters in the set. param: dictionary", "= \", \".join([\"{:.3e}\".format(d) for d in val]) else: ds = '' le =", "i, 1) self.senderList.append(cb) else: print(\"Unknown parameter type.\") self.updateDisp() self.updateDisp(param) def updateDisp(self, param =", "importing or change of protocols, update display of parameters. Parameters ---------- param: dictionary,", "def updateParam(self, ind, typ, val, **kargs): ''' Update individual parameters in profile using", "le.setText(ds) elif v == \"strl\": if len(val): ds = \", \".join(val) else: ds", "le0.setText(ds) if v == \"intr\" or (1e-3 < abs(val[1]) and abs(val[1]) < 1e3):", "self.param[ind][0] = int(val) else: self.param[ind][1] = int(val) elif typ == \"floatr\": if kargs[\"begin\"]:", "cb.addItem(j) cb.currentTextChanged.connect(lambda x, ind = k, typ = v: \\ self.updateParam(ind, typ, x))", "widget. err: bool Whether there's an error in the parameters. senderList: ''' super().__init__(parent)", "== \"floatr\": if kargs[\"begin\"]: self.param[ind][0] = float(val) else: self.param[ind][1] = float(val) elif typ", "typ == \"bool\": self.param[ind] = bool(val) elif \"combo\" in typ: self.param[ind] = val", "managed by this grid widget. err: bool Whether there's an error in the", "= QCheckBox() cb.stateChanged.connect(lambda x, ind = k, typ = v: \\ self.updateParam(ind, typ,", "display of parameters. Parameters ---------- param: dictionary, optional New parameters. Default is None,", "ind = k, typ = v: \\ self.updateParam(ind, typ, x)) btn = QPushButton(\"...\")", "pandas as pd class ParamWidget(QGridLayout): ''' Collecting all the input boxes and labels", "for j in options: cb.addItem(j) cb.currentTextChanged.connect(lambda x, ind = k, typ = v:", "len(val): self.param[ind] = list(map(int, val.split(','))) else: self.param[ind] = [] elif typ == \"floatl\":", "\\ self.updateParam(ind, typ, x)) self.addWidget(cb, i, 1) self.senderList.append(cb) elif \"combo\" in v: options", "[] elif typ == \"strl\": if len(val): self.param[ind] = [d.strip() for d in", "import numpy as np import pandas as pd class ParamWidget(QGridLayout): ''' Collecting all", "elif typ == \"floatl\": if len(val): self.param[ind] = list(map(float, val.split(','))) else: self.param[ind] =", "btn = QPushButton(\"...\") lstHB = QHBoxLayout() lstHB.addWidget(le) lstHB.addWidget(btn) self.addLayout(lstHB, i, 1) self.senderList.append(le) elif", "the input boxes and labels to assign data. ''' def __init__(self, paramTyp, param,", "if param == None: for i, (k, v) in enumerate(self.paramTyp.items()): if v ==", "Whether there's an error in the parameters. senderList: ''' super().__init__(parent) self.err = False", "i, (k, v) in enumerate(self.paramTyp.items()): if v == \"protocol\" and self.projMan != None:", "le = QLineEdit() le.textEdited.connect(lambda x, ind = k, typ = v: self.updateParam(ind, typ,", "(1e-3 < abs(val[0]) and abs(val[0]) < 1e3): ds = str(val[0]) else: ds =", "ind = k, typ = v: \\ self.updateParam(ind, typ, x)) cb.setCurrentIndex(0) self.addWidget(cb, i,", "val.split(','))) else: self.param[ind] = [] elif typ == \"strl\": if len(val): self.param[ind] =", "\\ self.updateParam(ind, typ, x)) btn = QPushButton(\"...\") lstHB = QHBoxLayout() lstHB.addWidget(le) lstHB.addWidget(btn) self.addLayout(lstHB,", "options = v.split(',')[1:] cb = QComboBox() for j in options: cb.addItem(j) cb.currentTextChanged.connect(lambda x,", "as np import pandas as pd class ParamWidget(QGridLayout): ''' Collecting all the input", "x, ind = k, typ = v: \\ self.updateParam(ind, typ, x)) self.addWidget(cb, i,", "== \"protocol\" and projMan != None: cb = QComboBox() cb.currentTextChanged.connect(lambda x, ind =", "param: dictionary The parameters in the set read from paramMan. projMan: Project Project", "def getParam(self): ''' Get parameters managed in this widget. ''' if not self.err:", "< 1e3): ds = \", \".join(map(str, val)) else: ds = \", \".join([\"{:.3e}\".format(d) for", "set managed by this grid widget. err: bool Whether there's an error in", "= float(val) else: self.param[ind][1] = float(val) elif typ == \"intl\": if len(val): self.param[ind]", "d in val]) else: ds = '' le = self.senderList[i] le.setText(ds) elif v", "max(map(abs, val)) < 1e3): ds = \", \".join(map(str, val)) else: ds = \",", "= \", \".join(map(str, val)) else: ds = \", \".join([\"{:.3e}\".format(d) for d in val])", "\"int\" or v == \"float\": if v == \"int\" or (1e-3 < abs(val)", "begin = False)) twoHB = QHBoxLayout() twoHB.addWidget(le0) twoHB.addWidget(QLabel(\"to\")) twoHB.addWidget(le1) self.addLayout(twoHB, i, 1) self.senderList.append([le0,", "update display of parameters. Parameters ---------- param: dictionary, optional New parameters. Default is", "= val else: print(\"Unknown parameter type\") except ValueError: self.sender().setStyleSheet(\"background:#FF0000;\") self.err = True def", "else: ds = '' le = self.senderList[i] le.setText(ds) elif v == \"strl\": if", "to be set. typ: string Type of the individual parameter. val: string Text", "self.param[ind] = [d.strip() for d in val.split(',')] else: self.param[ind] = [] elif typ", "class, used for access raw data. Attributes ---------- param: dictionary Parameter set managed", "''' super().__init__(parent) self.err = False self.param = param self.paramTyp = paramTyp self.projMan =", "if v == \"protocol\" and projMan != None: cb = QComboBox() cb.currentTextChanged.connect(lambda x,", "= k, typ = v: \\ self.updateParam(ind, typ, x)) cb.setCurrentIndex(0) self.addWidget(cb, i, 1)", "typ == \"strl\": if len(val): self.param[ind] = [d.strip() for d in val.split(',')] else:", "a GridLayout with a bunch of widgets from PyQt5.QtWidgets import QLabel, QGridLayout, QLineEdit,", "\\ self.updateParam(ind, typ, x, begin = True)) le1.textEdited.connect(lambda x, ind = k, typ", "update protocols. ''' if param == None: for i, (k, v) in enumerate(self.paramTyp.items()):", "else: self.err = True else: self.param = param for i, (k, v) in", "else: self.param[ind] = [] elif typ == \"strl\": if len(val): self.param[ind] = [d.strip()", "k, typ = v: \\ self.updateParam(ind, typ, x, begin = True)) le1.textEdited.connect(lambda x,", "elif v == \"intl\" or v == \"floatl\" or v == \"strl\": le", "= param[k] if v == \"protocol\" and self.projMan != None: cb = self.senderList[i]", "param: dictionary, optional New parameters. Default is None, only tend to update protocols.", "if kargs[\"begin\"]: self.param[ind][0] = int(val) else: self.param[ind][1] = int(val) elif typ == \"floatr\":", "(k, v) in enumerate(paramTyp.items()): self.addWidget(QLabel(k), i, 0) val = self.param[k] if v ==", "\"bool\": cb = QCheckBox() cb.stateChanged.connect(lambda x, ind = k, typ = v: \\", "pd class ParamWidget(QGridLayout): ''' Collecting all the input boxes and labels to assign", "self.updateParam(ind, typ, x)) self.addWidget(cb, i, 1) self.senderList.append(cb) elif v == \"int\" or v", "ds = \", \".join(val) else: ds = '' le = self.senderList[i] le.setText(ds) elif", "for i, (k, v) in enumerate(self.paramTyp.items()): val = param[k] if v == \"protocol\"", "elif v == \"intr\" or v == \"floatr\": le0, le1 = self.senderList[i] if", "self.update() def updateParam(self, ind, typ, val, **kargs): ''' Update individual parameters in profile", "two value range parameters. ''' try: self.err = False self.sender().setStyleSheet(\"background:#FFFFFF;\") if typ ==", "param[k] if v == \"protocol\" and self.projMan != None: cb = self.senderList[i] cb.clear()", "\"bool\": self.param[ind] = bool(val) elif \"combo\" in typ: self.param[ind] = val else: print(\"Unknown", "v: \\ self.updateParam(ind, typ, x)) cb.setCurrentIndex(0) self.addWidget(cb, i, 1) self.senderList.append(cb) else: print(\"Unknown parameter", "\"intr\": if kargs[\"begin\"]: self.param[ind][0] = int(val) else: self.param[ind][1] = int(val) elif typ ==", "self.err = True def getParam(self): ''' Get parameters managed in this widget. '''", "= QComboBox() cb.currentTextChanged.connect(lambda x, ind = k, typ = v: \\ self.updateParam(ind, typ,", "= param for i, (k, v) in enumerate(self.paramTyp.items()): val = param[k] if v", "and \\ max(map(abs, val)) < 1e3): ds = \", \".join(map(str, val)) else: ds", "ds = \", \".join(map(str, val)) else: ds = \", \".join([\"{:.3e}\".format(d) for d in", "v) in enumerate(self.paramTyp.items()): val = param[k] if v == \"protocol\" and self.projMan !=", "and abs(val[0]) < 1e3): ds = str(val[0]) else: ds = \"{:.3e}\".format(val[0]) le0.setText(ds) if", "ds = '' le = self.senderList[i] le.setText(ds) elif v == \"strl\": if len(val):", "self.param[ind] = int(val) elif typ == \"float\": self.param[ind] = float(val) elif typ ==", "of parameters. Parameters ---------- param: dictionary, optional New parameters. Default is None, only", "and abs(val[1]) < 1e3): ds = str(val[1]) else: ds = \"{:.3e}\".format(val[1]) le1.setText(ds) elif", "v: \\ self.updateParam(ind, typ, x)) btn = QPushButton(\"...\") lstHB = QHBoxLayout() lstHB.addWidget(le) lstHB.addWidget(btn)", "float(val) elif typ == \"intr\": if kargs[\"begin\"]: self.param[ind][0] = int(val) else: self.param[ind][1] =", "x)) self.addWidget(cb, i, 1) self.senderList.append(cb) elif \"combo\" in v: options = v.split(',')[1:] cb", "typ, x)) btn = QPushButton(\"...\") lstHB = QHBoxLayout() lstHB.addWidget(le) lstHB.addWidget(btn) self.addLayout(lstHB, i, 1)", "= self.senderList[i] cb.setCurrentText(val) else: print(\"Unknown parameter type\") print(v, val) self.update() def updateParam(self, ind,", "le0 = QLineEdit() le1 = QLineEdit() le0.textEdited.connect(lambda x, ind = k, typ =", "v: cb = self.senderList[i] cb.setCurrentText(val) else: print(\"Unknown parameter type\") print(v, val) self.update() def", "cb.addItem(j) if len(pt): cb.setCurrentIndex(0) else: self.err = True else: self.param = param for", "self.projMan.getProtocols() for j in pt: cb.addItem(j) if len(pt): cb.setCurrentIndex(0) else: self.err = True", "raw data. Attributes ---------- param: dictionary Parameter set managed by this grid widget.", "\"intl\" or (1e-3 < min(map(abs, val)) and \\ max(map(abs, val)) < 1e3): ds", "self.param[ind] = [] elif typ == \"floatl\": if len(val): self.param[ind] = list(map(float, val.split(',')))", "parameters. - begin: bool Whether it's the first one of the two value", "cb = QComboBox() for j in options: cb.addItem(j) cb.currentTextChanged.connect(lambda x, ind = k,", "there's an error in the parameters. senderList: ''' super().__init__(parent) self.err = False self.param", "i, 0) val = self.param[k] if v == \"protocol\" and projMan != None:", "Parameters ---------- ind: string Key of the individual parameter to be set. typ:", "\"float\": le = QLineEdit() le.textEdited.connect(lambda x, ind = k, typ = v: self.updateParam(ind,", "== \"floatl\": if len(val): self.param[ind] = list(map(float, val.split(','))) else: self.param[ind] = [] elif", "''' Build the boxes. Parameters ---------- paramTyp: dictionary Defining types of parameters in", "cb.setCurrentText(val) else: print(\"Unknown parameter type\") print(v, val) self.update() def updateParam(self, ind, typ, val,", "v == \"intr\" or (1e-3 < abs(val[1]) and abs(val[1]) < 1e3): ds =", "True def getParam(self): ''' Get parameters managed in this widget. ''' if not", "the individual parameter to be set. typ: string Type of the individual parameter.", "None): ''' Build the boxes. Parameters ---------- paramTyp: dictionary Defining types of parameters", "cb = QCheckBox() cb.stateChanged.connect(lambda x, ind = k, typ = v: \\ self.updateParam(ind,", "typ = v: \\ self.updateParam(ind, typ, x)) cb.setCurrentIndex(0) self.addWidget(cb, i, 1) self.senderList.append(cb) else:", "= QLineEdit() le.textEdited.connect(lambda x, ind = k, typ = v: self.updateParam(ind, typ, x))", "in profile using values get from input widgets. Parameters ---------- ind: string Key", "QGridLayout, QLineEdit, \\ QVBoxLayout, QHBoxLayout, QComboBox, QPushButton, QCheckBox import numpy as np import", "\\ QVBoxLayout, QHBoxLayout, QComboBox, QPushButton, QCheckBox import numpy as np import pandas as", "self.paramTyp = paramTyp self.projMan = projMan self.senderList = [] for i, (k, v)", "\"int\" or v == \"float\": le = QLineEdit() le.textEdited.connect(lambda x, ind = k,", "for i, (k, v) in enumerate(self.paramTyp.items()): if v == \"protocol\" and self.projMan !=", "== \"intl\" or v == \"floatl\" or v == \"strl\": le = QLineEdit()", "with the value. **kargs: Arguments come with some special types of parameters. -", "v == \"intr\" or (1e-3 < abs(val[0]) and abs(val[0]) < 1e3): ds =", "else: self.param[ind][1] = int(val) elif typ == \"floatr\": if kargs[\"begin\"]: self.param[ind][0] = float(val)", "= QHBoxLayout() lstHB.addWidget(le) lstHB.addWidget(btn) self.addLayout(lstHB, i, 1) self.senderList.append(le) elif v == \"bool\": cb", "= self.senderList[i] cb.clear() pt = self.projMan.getProtocols() for j in pt: cb.addItem(j) if len(pt):", "k, typ = v: \\ self.updateParam(ind, typ, x)) cb.setCurrentIndex(0) self.addWidget(cb, i, 1) self.senderList.append(cb)", "= str(val) else: ds = \"{:.3e}\".format(val) le = self.senderList[i] le.setText(ds) elif v ==", "= str(val[0]) else: ds = \"{:.3e}\".format(val[0]) le0.setText(ds) if v == \"intr\" or (1e-3", "\"{:.3e}\".format(val[1]) le1.setText(ds) elif v == \"intl\" or v == \"floatl\": if len(val): if", "k, typ = v: \\ self.updateParam(ind, typ, x)) self.addWidget(cb, i, 1) self.senderList.append(cb) elif", "else: self.param = param for i, (k, v) in enumerate(self.paramTyp.items()): val = param[k]", "self.addLayout(lstHB, i, 1) self.senderList.append(le) elif v == \"bool\": cb = QCheckBox() cb.stateChanged.connect(lambda x,", "self.addWidget(cb, i, 1) self.senderList.append(cb) elif v == \"int\" or v == \"float\": le", "v == \"intr\" or v == \"floatr\": le0, le1 = self.senderList[i] if v", "- begin: bool Whether it's the first one of the two value range", "typ = v: \\ self.updateParam(ind, typ, x)) btn = QPushButton(\"...\") lstHB = QHBoxLayout()", "for j in pt: cb.addItem(j) if len(pt): cb.setCurrentIndex(0) else: self.err = True elif", "x)) self.addWidget(le, i, 1) self.senderList.append(le) elif v == \"intr\" or v == \"floatr\":", "due to importing or change of protocols, update display of parameters. Parameters ----------", "New parameters. Default is None, only tend to update protocols. ''' if param", "super().__init__(parent) self.err = False self.param = param self.paramTyp = paramTyp self.projMan = projMan", "parameter to be set. typ: string Type of the individual parameter. val: string", "\\ self.updateParam(ind, typ, x)) self.addWidget(cb, i, 1) self.senderList.append(cb) elif v == \"int\" or", "v) in enumerate(paramTyp.items()): self.addWidget(QLabel(k), i, 0) val = self.param[k] if v == \"protocol\"", "= None): ''' Build the boxes. Parameters ---------- paramTyp: dictionary Defining types of", "== \"int\" or v == \"float\": if v == \"int\" or (1e-3 <", "QCheckBox() cb.stateChanged.connect(lambda x, ind = k, typ = v: \\ self.updateParam(ind, typ, x))", "\", \".join(map(str, val)) else: ds = \", \".join([\"{:.3e}\".format(d) for d in val]) else:", "< abs(val) and abs(val) < 1e3): ds = str(val) else: ds = \"{:.3e}\".format(val)", "= [] elif typ == \"floatl\": if len(val): self.param[ind] = list(map(float, val.split(','))) else:", "self.param[ind][0] = float(val) else: self.param[ind][1] = float(val) elif typ == \"intl\": if len(val):", "typ, x)) cb.setCurrentIndex(0) self.addWidget(cb, i, 1) self.senderList.append(cb) else: print(\"Unknown parameter type.\") self.updateDisp() self.updateDisp(param)", "updateDisp(self, param = None): ''' After parameter changes due to importing or change", "Get parameters managed in this widget. ''' if not self.err: return self.param else:", "typ = v: \\ self.updateParam(ind, typ, x)) self.addWidget(cb, i, 1) self.senderList.append(cb) elif \"combo\"", "le1]) elif v == \"intl\" or v == \"floatl\" or v == \"strl\":", "pt: cb.addItem(j) if len(pt): cb.setCurrentIndex(0) else: self.err = True else: self.param = param", "typ: self.param[ind] = val else: print(\"Unknown parameter type\") except ValueError: self.sender().setStyleSheet(\"background:#FF0000;\") self.err =", "and projMan != None: cb = QComboBox() cb.currentTextChanged.connect(lambda x, ind = k, typ", "== \"float\": if v == \"int\" or (1e-3 < abs(val) and abs(val) <", "or v == \"float\": le = QLineEdit() le.textEdited.connect(lambda x, ind = k, typ", "== \"bool\": cb = self.senderList[i] cb.setChecked(val) elif \"combo\" in v: cb = self.senderList[i]", "self.senderList.append(cb) elif \"combo\" in v: options = v.split(',')[1:] cb = QComboBox() for j", "---------- param: dictionary Parameter set managed by this grid widget. err: bool Whether", "protocols. ''' if param == None: for i, (k, v) in enumerate(self.paramTyp.items()): if", "== \"strl\": if len(val): self.param[ind] = [d.strip() for d in val.split(',')] else: self.param[ind]", "to assign data. ''' def __init__(self, paramTyp, param, projMan = None, parent =", "1) self.senderList.append(cb) elif \"combo\" in v: options = v.split(',')[1:] cb = QComboBox() for", "protocols, update display of parameters. Parameters ---------- param: dictionary, optional New parameters. Default", "updateParam(self, ind, typ, val, **kargs): ''' Update individual parameters in profile using values", "self.updateDisp() self.updateDisp(param) def updateDisp(self, param = None): ''' After parameter changes due to", "''' if param == None: for i, (k, v) in enumerate(self.paramTyp.items()): if v", "== \"floatr\": le0 = QLineEdit() le1 = QLineEdit() le0.textEdited.connect(lambda x, ind = k,", "managed in this widget. ''' if not self.err: return self.param else: return None", "v == \"strl\": le = QLineEdit() le.textEdited.connect(lambda x, ind = k, typ =", "v == \"protocol\" and self.projMan != None: cb = self.senderList[i] cb.clear() pt =", "grid widget. err: bool Whether there's an error in the parameters. senderList: '''", "param: dictionary Parameter set managed by this grid widget. err: bool Whether there's", "self.param[ind] = list(map(int, val.split(','))) else: self.param[ind] = [] elif typ == \"floatl\": if", "i, (k, v) in enumerate(self.paramTyp.items()): val = param[k] if v == \"protocol\" and", "\"intr\" or (1e-3 < abs(val[1]) and abs(val[1]) < 1e3): ds = str(val[1]) else:", "change of protocols, update display of parameters. Parameters ---------- param: dictionary, optional New", "v == \"floatl\": if len(val): if v == \"intl\" or (1e-3 < min(map(abs,", "in val]) else: ds = '' le = self.senderList[i] le.setText(ds) elif v ==", "the value. **kargs: Arguments come with some special types of parameters. - begin:", "str(val) else: ds = \"{:.3e}\".format(val) le = self.senderList[i] le.setText(ds) elif v == \"intr\"", "< min(map(abs, val)) and \\ max(map(abs, val)) < 1e3): ds = \", \".join(map(str,", "QPushButton(\"...\") lstHB = QHBoxLayout() lstHB.addWidget(le) lstHB.addWidget(btn) self.addLayout(lstHB, i, 1) self.senderList.append(le) elif v ==", "ParamWidget(QGridLayout): ''' Collecting all the input boxes and labels to assign data. '''", "elif \"combo\" in typ: self.param[ind] = val else: print(\"Unknown parameter type\") except ValueError:", "self.senderList.append(le) elif v == \"intr\" or v == \"floatr\": le0 = QLineEdit() le1", "\"protocol\" and self.projMan != None: cb = self.senderList[i] cb.clear() pt = self.projMan.getProtocols() for", "elif typ == \"floatr\": if kargs[\"begin\"]: self.param[ind][0] = float(val) else: self.param[ind][1] = float(val)", "type\") except ValueError: self.sender().setStyleSheet(\"background:#FF0000;\") self.err = True def getParam(self): ''' Get parameters managed", "string Type of the individual parameter. val: string Text out of the input", "of protocols, update display of parameters. Parameters ---------- param: dictionary, optional New parameters.", "self.param[ind] = [] elif typ == \"protocol\": self.param[ind] = val elif typ ==", "first one of the two value range parameters. ''' try: self.err = False", "< 1e3): ds = str(val[1]) else: ds = \"{:.3e}\".format(val[1]) le1.setText(ds) elif v ==", "v == \"intl\" or v == \"floatl\": if len(val): if v == \"intl\"", "self.projMan != None: cb = self.senderList[i] cb.clear() pt = self.projMan.getProtocols() for j in", "except ValueError: self.sender().setStyleSheet(\"background:#FF0000;\") self.err = True def getParam(self): ''' Get parameters managed in", "x, ind = k, typ = v: \\ self.updateParam(ind, typ, x)) btn =", "getParam(self): ''' Get parameters managed in this widget. ''' if not self.err: return", "or (1e-3 < abs(val[1]) and abs(val[1]) < 1e3): ds = str(val[1]) else: ds", "le = self.senderList[i] le.setText(ds) elif v == \"strl\": if len(val): ds = \",", "elif v == \"intr\" or v == \"floatr\": le0 = QLineEdit() le1 =", "dictionary Parameter set managed by this grid widget. err: bool Whether there's an", "''' def __init__(self, paramTyp, param, projMan = None, parent = None): ''' Build", "list(map(float, val.split(','))) else: self.param[ind] = [] elif typ == \"strl\": if len(val): self.param[ind]", "parameter. val: string Text out of the input widget with the value. **kargs:", "or v == \"strl\": le = QLineEdit() le.textEdited.connect(lambda x, ind = k, typ", "== \"protocol\": self.param[ind] = val elif typ == \"bool\": self.param[ind] = bool(val) elif", "ds = \"{:.3e}\".format(val[0]) le0.setText(ds) if v == \"intr\" or (1e-3 < abs(val[1]) and", "len(pt): cb.setCurrentIndex(0) else: self.err = True else: self.param = param for i, (k,", "widget with the value. **kargs: Arguments come with some special types of parameters.", "or (1e-3 < abs(val[0]) and abs(val[0]) < 1e3): ds = str(val[0]) else: ds", "True elif v == \"int\" or v == \"float\": if v == \"int\"", "val) self.update() def updateParam(self, ind, typ, val, **kargs): ''' Update individual parameters in", "kargs[\"begin\"]: self.param[ind][0] = int(val) else: self.param[ind][1] = int(val) elif typ == \"floatr\": if", "len(val): self.param[ind] = list(map(float, val.split(','))) else: self.param[ind] = [] elif typ == \"strl\":", "management class, used for access raw data. Attributes ---------- param: dictionary Parameter set", "GridLayout with a bunch of widgets from PyQt5.QtWidgets import QLabel, QGridLayout, QLineEdit, \\", "ds = str(val[1]) else: ds = \"{:.3e}\".format(val[1]) le1.setText(ds) elif v == \"intl\" or", "else: self.param[ind][1] = float(val) elif typ == \"intl\": if len(val): self.param[ind] = list(map(int,", "types of parameters in the set. param: dictionary The parameters in the set", "== \"intr\" or (1e-3 < abs(val[0]) and abs(val[0]) < 1e3): ds = str(val[0])", "\"int\" or (1e-3 < abs(val) and abs(val) < 1e3): ds = str(val) else:", "# class derived from a GridLayout with a bunch of widgets from PyQt5.QtWidgets", "len(val): if v == \"intl\" or (1e-3 < min(map(abs, val)) and \\ max(map(abs,", "0) val = self.param[k] if v == \"protocol\" and projMan != None: cb", "from input widgets. Parameters ---------- ind: string Key of the individual parameter to", "= True elif v == \"int\" or v == \"float\": if v ==", "False)) twoHB = QHBoxLayout() twoHB.addWidget(le0) twoHB.addWidget(QLabel(\"to\")) twoHB.addWidget(le1) self.addLayout(twoHB, i, 1) self.senderList.append([le0, le1]) elif", "= list(map(float, val.split(','))) else: self.param[ind] = [] elif typ == \"strl\": if len(val):", "x)) self.addWidget(cb, i, 1) self.senderList.append(cb) elif v == \"int\" or v == \"float\":", "v: options = v.split(',')[1:] cb = QComboBox() for j in options: cb.addItem(j) cb.currentTextChanged.connect(lambda", "if v == \"intl\" or (1e-3 < min(map(abs, val)) and \\ max(map(abs, val))", "some special types of parameters. - begin: bool Whether it's the first one", "Type of the individual parameter. val: string Text out of the input widget", "param, projMan = None, parent = None): ''' Build the boxes. Parameters ----------", "\"strl\": if len(val): ds = \", \".join(val) else: ds = '' le =", "val: string Text out of the input widget with the value. **kargs: Arguments", "special types of parameters. - begin: bool Whether it's the first one of", "''' try: self.err = False self.sender().setStyleSheet(\"background:#FFFFFF;\") if typ == \"int\": self.param[ind] = int(val)", "Parameters ---------- param: dictionary, optional New parameters. Default is None, only tend to", "v == \"floatl\" or v == \"strl\": le = QLineEdit() le.textEdited.connect(lambda x, ind", "QPushButton, QCheckBox import numpy as np import pandas as pd class ParamWidget(QGridLayout): '''", "= False)) twoHB = QHBoxLayout() twoHB.addWidget(le0) twoHB.addWidget(QLabel(\"to\")) twoHB.addWidget(le1) self.addLayout(twoHB, i, 1) self.senderList.append([le0, le1])", "= [] for i, (k, v) in enumerate(paramTyp.items()): self.addWidget(QLabel(k), i, 0) val =", "= QPushButton(\"...\") lstHB = QHBoxLayout() lstHB.addWidget(le) lstHB.addWidget(btn) self.addLayout(lstHB, i, 1) self.senderList.append(le) elif v", "in pt: cb.addItem(j) if len(pt): cb.setCurrentIndex(0) else: self.err = True else: self.param =", "d in val.split(',')] else: self.param[ind] = [] elif typ == \"protocol\": self.param[ind] =", "abs(val[1]) and abs(val[1]) < 1e3): ds = str(val[1]) else: ds = \"{:.3e}\".format(val[1]) le1.setText(ds)", "le = QLineEdit() le.textEdited.connect(lambda x, ind = k, typ = v: \\ self.updateParam(ind,", "enumerate(self.paramTyp.items()): if v == \"protocol\" and self.projMan != None: cb = self.senderList[i] cb.clear()", "QHBoxLayout, QComboBox, QPushButton, QCheckBox import numpy as np import pandas as pd class", "else: print(\"Unknown parameter type\") print(v, val) self.update() def updateParam(self, ind, typ, val, **kargs):", "projMan != None: cb = QComboBox() cb.currentTextChanged.connect(lambda x, ind = k, typ =", "come with some special types of parameters. - begin: bool Whether it's the", "Default is None, only tend to update protocols. ''' if param == None:", "j in options: cb.addItem(j) cb.currentTextChanged.connect(lambda x, ind = k, typ = v: \\", "cb.clear() pt = self.projMan.getProtocols() for j in pt: cb.addItem(j) if len(pt): cb.setCurrentIndex(0) else:", "self.param[ind] = float(val) elif typ == \"intr\": if kargs[\"begin\"]: self.param[ind][0] = int(val) else:", "After parameter changes due to importing or change of protocols, update display of", "self.updateParam(ind, typ, x, begin = True)) le1.textEdited.connect(lambda x, ind = k, typ =", "= k, typ = v: \\ self.updateParam(ind, typ, x)) self.addWidget(cb, i, 1) self.senderList.append(cb)", "= k, typ = v: self.updateParam(ind, typ, x)) self.addWidget(le, i, 1) self.senderList.append(le) elif", "elif v == \"bool\": cb = QCheckBox() cb.stateChanged.connect(lambda x, ind = k, typ", "bunch of widgets from PyQt5.QtWidgets import QLabel, QGridLayout, QLineEdit, \\ QVBoxLayout, QHBoxLayout, QComboBox,", "[] for i, (k, v) in enumerate(paramTyp.items()): self.addWidget(QLabel(k), i, 0) val = self.param[k]", "None): ''' After parameter changes due to importing or change of protocols, update", "cb = self.senderList[i] cb.clear() pt = self.projMan.getProtocols() for j in pt: cb.addItem(j) if", "ds = '' le = self.senderList[i] le.setText(ds) elif v == \"bool\": cb =", "QLineEdit() le0.textEdited.connect(lambda x, ind = k, typ = v: \\ self.updateParam(ind, typ, x,", "None, only tend to update protocols. ''' if param == None: for i,", "!= None: cb = self.senderList[i] cb.clear() pt = self.projMan.getProtocols() for j in pt:", "'' le = self.senderList[i] le.setText(ds) elif v == \"bool\": cb = self.senderList[i] cb.setChecked(val)", "= self.senderList[i] le.setText(ds) elif v == \"intr\" or v == \"floatr\": le0, le1", "elif typ == \"strl\": if len(val): self.param[ind] = [d.strip() for d in val.split(',')]", "the set read from paramMan. projMan: Project Project management class, used for access", "v == \"float\": le = QLineEdit() le.textEdited.connect(lambda x, ind = k, typ =", "\"floatr\": if kargs[\"begin\"]: self.param[ind][0] = float(val) else: self.param[ind][1] = float(val) elif typ ==" ]
[ "csq[4] r.gene_short_name = csq[3] r.tss_id = '-' r.locus = '<pos>' r.length = '-'", "try: tx = variant.format('TX')[0] except (KeyError, TypeError): continue csqs = variant.INFO['CSQ'].split('|') txs =", "| quote}}) topiary = {{args.topiary | quote}} netmhc = {{args.netmhc | quote}} netmhcpan", "| quote}} params = {{args.params | repr}} refall = {{args.refall | quote}} tmpdir", "TsvWriter(neatfile) nwriter.cnames = ['HLA_allele', 'mt_peptide', 'mt_affinity', 'wt_peptide', 'wt_affinity', 'delta_affinity', 'gene'] nwriter.writeHead() tpreader.rewind() for", "'transcript': feature = gff['attributes']['transcript_id'] else: continue if feature not in features: continue allpos[feature]", "inptype = 1, f = wildfile, _prefix = '-', xls = True, xlsfile", "csqs if f'|{transcript}|' in csq][0].split('|') r = TsvRecord() r.tracking_id = csq[6] r.class_code =", "['HLA_allele', 'Peptide', 'Affinity', 'Gene', 'ENSG', 'ENST', 'Ref_peptide', 'Ref_affinity', 'Mutation', 'AAChange'] writer.writeHead() writerall =", "# read the output \"\"\" HLA-A24:02 HLA-A29:02 Pos Peptide ID core icore 1-log50k", "= gxfile2 if txfile: txfile2 = outfile.with_suffix('.tx') with open(txfile) as fin, open(txfile2, 'w')", "reader = TsvReader(xlsfile, comment = '\\t\\t\\t') wildbindings = {} for r in reader:", "import re from os import environ from pathlib import Path from cyvcf2 import", "'.'.join([ {{proc.id | quote}}, {{proc.tag | quote}}, {{proc.suffix | quote}}, {{job.index | quote}}])", "+ hla] = float(r[7 + i*5]) writer = TsvWriter(outfile) writer.cnames = tpreader.cnames +", "# ENSG00000268020 - - ENSG00000268020 AL627309.1 - chr1:53048-54936 - - 0 0 0", "# genome = gmaps[genome] # extract expression from VCF file vcf = VCF(infile)", "afile = {{i.afile | ?path.isfile | =readlines | !alwaysList | repr}} outfile =", "out.ENST = r.transcript_id wtpep = mutpeps.get(r.peptide + '\\t' + r.allele, '-') out.Ref_peptide =", "pass def run_smm(): pass def run_smm_pmbec(): pass runner = { 'netmhc' : run_netmhc,", "in params else ','.join( allele for allele in Path(params['mhc-alleles-file']).read_text().splitlines() if allele ) wildfile", "continue if feature not in features: continue allpos[feature] ='{}:{}-{}'.format(gff['seqid'], gff['start'], gff['end']) if gxfile:", "0 0 OK # ENSG00000268020 - - ENSG00000268020 AL627309.1 - chr1:53048-54936 - -", "allpos: logger.warning('Cannot find position information for: %s, skipping', feature_id) else: fout.write(line.replace('<pos>', allpos[feature_id])) txfile", "| quote}}, {{proc.suffix | quote}}, {{job.index | quote}}]) tmpdir.mkdir(exist_ok = True, parents =", "nearest_ref_id gene_id gene_short_name tss_id locus length coverage FPKM FPKM_conf_lo FPKM_conf_hi FPKM_status # ENSG00000240361", "47.6206 \"\"\" with xlsfile.open('r') as f: alleles = [allele.replace('HLA-A', 'HLA-A*').replace('HLA-B', 'HLA-B*').replace('HLA-C', 'HLA-C*') for", "| quote}}) outdir = Path({{o.outdir | quote}}) topiary = {{args.topiary | quote}} netmhc", "index = int(r.mutation_start_in_peptide) if wildpep[index] != m.group(2): continue wildpep = wildpep[:index] + m.group(1)", "output \"\"\" HLA-A24:02 HLA-A29:02 Pos Peptide ID core icore 1-log50k nM Rank core", "writer.cnames writerall.writeHead() tpreader.rewind() for r in tpreader: out = TsvRecord() out.HLA_allele = r.allele", "import VCF from gff import Gff from diot import Diot from cmdy import", "as fin, open(txfile2, 'w') as fout: for line in fin: if '<pos>' not", "0 OK txfile = outfile.with_suffix('.tx_nopos') writer = TsvWriter(txfile) writer.cnames = ['tracking_id', 'class_code', 'nearest_ref_id',", "out.Ref_affinity or float(out.Ref_affinity) >= 2000): writer.write(out) def run_netmhcpan(): shell.load_config(netmhcpan = netmhcpan) mhcallele2 =", "RuntimeError(\"Failed to run netmhcpan, output file not generated.\") # read the output \"\"\"", "| quote}} netmhcpan = {{args.netmhcpan | quote}} netmhciipan = {{args.netmhciipan | quote}} netmhccons", "find position information for: %s, skipping', feature_id) else: fout.write(line.replace('<pos>', allpos[feature_id])) txfile = txfile2", "= Path.home().joinpath('.cache', 'pyensembl') if not datadir.joinpath(genome).is_dir() and not datadir.joinpath(gmaps.get(genome, genome)).is_dir(): raise RuntimeError(\"You don't", "500 and ('>' in out.Ref_affinity or float(out.Ref_affinity) >= 2000): writer.write(out) def run_netmhcpan(): shell.load_config(netmhcpan", "gx.split(',') for gx in gxs: gene, expr = gx.split('|', 1) csq = [csq", "wildpep = wildpep[:index] + m.group(1) + wildpep[(index+1):] mutpeps[r.peptide + '\\t' + r.allele] =", "False wildbindings = {allele: {} for allele in mhcallele2.split(',')} for line in res:", "continue wildpep = wildpep[:index] + m.group(1) + wildpep[(index+1):] mutpeps[r.peptide + '\\t' + r.allele]", "a = mhcallele2, v = True, inptype = 1, f = wildfile, _prefix", "sure those mhc-predictors are in PATH PATHs = set() for mhcpred in (netmhc,", "tx.split(',') for tx in txs: transcript, expr = tx.split('|', 1) csq = [csq", "'-' r.nearest_ref_id = '-' r.gene_id = csq[4] r.gene_short_name = csq[3] r.tss_id = '-'", "= genome params['output-csv'] = outfile.with_suffix('.nowt') params['mhc-predictor'] = mhc_predictor # make sure those mhc-predictors", "--rna-gene-fpkm-tracking-file genes.fpkm_tracking \\ --rna-min-gene-expression 4.0 \\ --rna-transcript-fpkm-tracking-file isoforms.fpkm_tracking \\ --rna-min-transcript-expression 1.5 \\ --output-csv", "to annotate the data gmaps = {'hg19': 'GRCh37', 'hg38': 'GRCh38'} datadir = Path.home().joinpath('.cache',", "OR4G11P - chr1:62947-63887 - - 0 0 0 OK # ENSG00000268020 - -", "0 OK # ENSG00000268020 - - ENSG00000268020 AL627309.1 - chr1:53048-54936 - - 0", "datadir = Path.home().joinpath('.cache', 'pyensembl') if not datadir.joinpath(genome).is_dir() and not datadir.joinpath(gmaps.get(genome, genome)).is_dir(): raise RuntimeError(\"You", "shell.load_config(netmhc = netmhc) mhcallele2 = params['mhc-alleles'].replace(':', '').replace('*', '') wildfile = outfile.parent / 'wildtype.peptides.txt'", "= writer.cnames writerall.writeHead() tpreader.rewind() for r in tpreader: out = TsvRecord() out.HLA_allele =", "{{args.topiary | quote}} netmhc = {{args.netmhc | quote}} netmhcpan = {{args.netmhcpan | quote}}", "0 0 OK gxfile = outfile.with_suffix('.gx_nopos') writer = TsvWriter(gxfile) writer.cnames = ['tracking_id', 'class_code',", "alleles = [allele.replace('HLA-A', 'HLA-A*').replace('HLA-B', 'HLA-B*').replace('HLA-C', 'HLA-C*') for allele in f.readline().strip().split('\\t') if allele] reader", "PEPLIST LYLPALWFH LYLPALWFH 0.1353 11560.5488 6.1138 LYLPALWFH LYLPALWFH 0.4137 568.6087 1.1231 0 RRQRRQRRW", "568.6087 1.1231 0 RRQRRQRRW PEPLIST RRQRRQRRW RRQRRQRRW 0.0788 21311.8301 12.3392 RRQRRQRRW RRQRRQRRW 0.0308", "run 'pyensembl install' first or \" \"specify 'params.download_reference_genome_data = True'. \" \"If you", "0 0 0 OK txfile = outfile.with_suffix('.tx_nopos') writer = TsvWriter(txfile) writer.cnames = ['tracking_id',", "+ '\\t' + r.allele, '-') out.Ref_peptide = wtpep out.Ref_affinity = wildbindings[r.allele.replace(':', '').replace('*', '')].get(wtpep,", "if f'|{gene}|' in csq][0].split('|') r = TsvRecord() r.tracking_id = csq[4] r.class_code = '-'", "mhcallele2 = params['mhc-alleles'].replace(':', '').replace('*', '') wildfile = outfile.parent / 'wildtype.peptides.txt' wildfile.write_text('\\n'.join(wildpeps)) nparams =", "g.33037619G>T,40,AAFVQTHRT,HLA-A*02:01,22758.73,32.0,netMHC,9,HLA-DPA1,ENSG00000231389,ENST00000419277,HLA-DPA1-001,p.P49T,Substitution,True,8,9,0.0 if mhc_predictor in ('netmhc', 'netmhcpan', 'netmhciipan', 'netmhccons', 'smm', 'smm_pmbec'): wildpeps = set()", "ENSG00000240361 - - ENSG00000240361 OR4G11P - chr1:62947-63887 - - 0 0 0 OK", "not vcf.contains('CSQ'): raise ValueError('VCF file has to be annotated with by VEP') #", "VCF(infile) gxfile = txfile = False features = set() if vcf.contains('GX'): if not", "{allele: {} for allele in mhcallele2.split(',')} for line in res: if 'PEPLIST' not", "else: continue if feature not in features: continue allpos[feature] ='{}:{}-{}'.format(gff['seqid'], gff['start'], gff['end']) if", "= r.allele out.Peptide = r.peptide out.Affinity = r.affinity out.Gene = r.gene out.ENSG =", "'netmhcpan' : run_netmhcpan, 'netmhciipan' : run_netmhciipan, 'netmhccons' : run_netmhccons, 'smm' : run_smm, 'smm-pmbec'", "--rna-min-transcript-expression 1.5 \\ --output-csv epitopes.csv \"\"\" import re from os import environ from", "file has to be annotated with by VEP') # tracking_id class_code nearest_ref_id gene_id", "variant.INFO['CSQ'].split('|') txs = tx.split(',') for tx in txs: transcript, expr = tx.split('|', 1)", "p.N84S m = re.match(r'^p\\.([A-Z])\\d+([A-Z])$', r.effect) if not m: continue wildpep = r.peptide index", "nwriter = TsvWriter(neatfile) nwriter.cnames = ['HLA_allele', 'mt_peptide', 'mt_affinity', 'wt_peptide', 'wt_affinity', 'delta_affinity', 'gene'] nwriter.writeHead()", "gff['attributes']['gene_id'] elif gff['type'] == 'transcript': feature = gff['attributes']['transcript_id'] else: continue if feature not", "fout.write(line.replace('<pos>', allpos[feature_id])) txfile = txfile2 params['rna-gene-fpkm-tracking-file'] = gxfile params['rna-transcript-fpkm-tracking-file'] = txfile shell.load_config(topiary =", "True, parents = True) # check if we have downloaded annotation data for", "= r.peptide out.Affinity = r.affinity out.Gene = r.gene out.ENSG = r.gene_id out.ENST =", "gene_id gene_short_name tss_id locus length coverage FPKM FPKM_conf_lo FPKM_conf_hi FPKM_status # ENSG00000240361 -", "import shell2 as shell, logger from bioprocs.utils.tsvio2 import TsvReader, TsvWriter, TsvRecord {% from", "mhc-predictors are in PATH PATHs = set() for mhcpred in (netmhc, netmhcpan, netmhciipan,", "\"\"\" with xlsfile.open('r') as f: alleles = [allele.replace('HLA-A', 'HLA-A*').replace('HLA-B', 'HLA-B*').replace('HLA-C', 'HLA-C*') for allele", "\"specify 'params.download_reference_genome_data = True'. \" \"If you have it installed somewhere else, make", "if we have downloaded annotation data for the genome # topiary will use", ": run_netmhc, 'netmhcpan' : run_netmhcpan, 'netmhciipan' : run_netmhciipan, 'netmhccons' : run_netmhccons, 'smm' :", "for r in reader: peptide = r[1] for i, hla in enumerate(alleles): wildbindings[peptide", "netmhccons = {{args.netmhccons | quote}} smm = {{args.smm | quote}} smm_pmbec = {{args.smm_pmbec", "writer.close() if vcf.contains('TX'): if not vcf.contains('CSQ'): raise ValueError('VCF file has to be annotated", "the genome # topiary will use it to annotate the data gmaps =", "gx.split('|', 1) csq = [csq for csq in csqs if f'|{gene}|' in csq][0].split('|')", "datadir.joinpath(genome).is_dir() and not datadir.joinpath(gmaps.get(genome, genome)).is_dir(): raise RuntimeError(\"You don't have annotation data for genome", "the output \"\"\" HLA-A24:02 HLA-A29:02 Pos Peptide ID core icore 1-log50k nM Rank", "'Ref_peptide', 'Ref_affinity', 'Mutation', 'AAChange'] writer.writeHead() writerall = TsvWriter(outfile.with_suffix('.all.txt')) writerall.cnames = writer.cnames writerall.writeHead() tpreader.rewind()", "'gene'] nwriter.writeHead() tpreader.rewind() for r in tpreader: r.wildpeptide = mutpeps.get(r.peptide + '\\t' +", "= {{args.params | repr}} refall = {{args.refall | quote}} tmpdir = Path({{args.tmpdir |", "\\ --mhc-predictor netmhcpan \\ --mhc-alleles HLA-A*02:01,HLA-B*07:02 \\ --ic50-cutoff 500 \\ --percentile-cutoff 2.0 \\", "= r.gene out.ENSG = r.gene_id out.ENST = r.transcript_id wtpep = mutpeps.get(r.peptide + '\\t'", "Gff(refall): if gff['type'] == 'gene': feature = gff['attributes']['gene_id'] elif gff['type'] == 'transcript': feature", "genome)).is_dir(): raise RuntimeError(\"You don't have annotation data for genome {}{} installed. \" \"Either", "locus length coverage FPKM FPKM_conf_lo FPKM_conf_hi FPKM_status # ENSG00000240361 - - ENSG00000240361 OR4G11P", "'')].get(wtpep, '>500') out.Mutation = r.variant out.AAChange = r.effect writerall.write(out) if float(out.Affinity) < 500", "r.effect writerall.write(out) if float(out.Affinity) < 500 and ('>' in out.Ref_affinity or float(out.Ref_affinity) >=", "--mhc-epitope-lengths 8-11 \\ --rna-gene-fpkm-tracking-file genes.fpkm_tracking \\ --rna-min-gene-expression 4.0 \\ --rna-transcript-fpkm-tracking-file isoforms.fpkm_tracking \\ --rna-min-transcript-expression", "'tss_id', 'locus', 'length', 'coverage', 'FPKM', 'FPKM_conf_lo', 'FPKM_conf_hi', 'FPKM_status'] writer.writeHead() for variant in vcf:", "in features: continue allpos[feature] ='{}:{}-{}'.format(gff['seqid'], gff['start'], gff['end']) if gxfile: gxfile2 = outfile.with_suffix('.gx') with", "/ 'wildtype.peptides.txt' wildfile.write_text('\\n'.join(wildpeps)) nparams = Diot( a = mhcallele2, v = True, inptype", "wildfile.write_text('\\n'.join(wildpeps)) nparams = Diot( a = mhcallele2, v = True, inptype = 1,", "= ','.join(alleles) params.genome = genome params['output-csv'] = outfile.with_suffix('.nowt') params['mhc-predictor'] = mhc_predictor # make", "peptides if it is not a substitution continue # parse effect: p.N84S m", "if feature_id not in allpos: logger.warning('Cannot find position information for: %s, skipping', feature_id)", "csq][0].split('|') r = TsvRecord() r.tracking_id = csq[6] r.class_code = '-' r.nearest_ref_id = '-'", "run_netmhcpan(): shell.load_config(netmhcpan = netmhcpan) mhcallele2 = params['mhc-alleles'] if 'mhc-alleles' in params else ','.join(", "= '-' r.locus = '<pos>' r.length = '-' r.coverage = '-' r.FPKM =", "{{args.mhc_predictor | quote}} genome = {{args.genome | quote}} params = {{args.params | repr}}", "variant in vcf: # try..except try: gx = variant.format('GX')[0] except (KeyError, TypeError): continue", "topiary = {{args.topiary | quote}} netmhc = {{args.netmhc | quote}} netmhcpan = {{args.netmhcpan", "logger.warning('Cannot find position information for: %s, skipping', feature_id) else: fout.write(line.replace('<pos>', allpos[feature_id])) txfile =", "\"\"\" HLA-A24:02 HLA-A29:02 Pos Peptide ID core icore 1-log50k nM Rank core icore", "installed. \" \"Either you run 'pyensembl install' first or \" \"specify 'params.download_reference_genome_data =", "quote}}, {{job.index | quote}}]) tmpdir.mkdir(exist_ok = True, parents = True) # check if", "def run_smm_pmbec(): pass runner = { 'netmhc' : run_netmhc, 'netmhcpan' : run_netmhcpan, 'netmhciipan'", "Rank 0 LYLPALWFH PEPLIST LYLPALWFH LYLPALWFH 0.1353 11560.5488 6.1138 LYLPALWFH LYLPALWFH 0.4137 568.6087", "'netmhc' : run_netmhc, 'netmhcpan' : run_netmhcpan, 'netmhciipan' : run_netmhciipan, 'netmhccons' : run_netmhccons, 'smm'", "< 500 and ('>' in out.Ref_affinity or float(out.Ref_affinity) >= 2000): writer.write(out) def run_netmhcpan():", "r.nearest_ref_id = '-' r.gene_id = csq[4] r.gene_short_name = csq[3] r.tss_id = '-' r.locus", "params['mhc-alleles'] = ','.join(alleles) params.genome = genome params['output-csv'] = outfile.with_suffix('.nowt') params['mhc-predictor'] = mhc_predictor #", "{{i.infile | quote}} afile = {{i.afile | ?path.isfile | =readlines | !alwaysList |", "for csq in csqs if f'|{transcript}|' in csq][0].split('|') r = TsvRecord() r.tracking_id =", "gmaps[genome]) if genome in gmaps else '', datadir)) # if not datadir.joinpath(genome).is_dir() and", "r.allele] = wildpep wildpeps.add(wildpep) def run_netmhc(): shell.load_config(netmhc = netmhc) mhcallele2 = params['mhc-alleles'].replace(':', '').replace('*',", "+ r.allele, '-') out.Ref_peptide = wtpep out.Ref_affinity = wildbindings[r.allele.replace(':', '').replace('*', '')].get(wtpep, '>500') out.Mutation", "will use it to annotate the data gmaps = {'hg19': 'GRCh37', 'hg38': 'GRCh38'}", "= True) res = shell.netmhc(**nparams) pos_hit = False wildbindings = {allele: {} for", "- - ENSG00000268020 AL627309.1 - chr1:53048-54936 - - 0 0 0 OK gxfile", "quote}}) / '.'.join([ {{proc.id | quote}}, {{proc.tag | quote}}, {{proc.suffix | quote}}, {{job.index", "genome # topiary will use it to annotate the data gmaps = {'hg19':", "{{proc.tag | quote}}, {{proc.suffix | quote}}, {{job.index | quote}}]) tmpdir.mkdir(exist_ok = True, parents", "r.allele, r.peptide, r.affinity, r.wildpeptide, r.wildaffinity, r.deltaaffinity, r.gene]) writer.write(r) def run_netmhciipan(): pass def run_netmhccons():", "datadir.joinpath(gmaps.get(genome, genome)).is_dir(): # genome = gmaps[genome] # extract expression from VCF file vcf", "1-log50k nM Rank 0 LYLPALWFH PEPLIST LYLPALWFH LYLPALWFH 0.1353 11560.5488 6.1138 LYLPALWFH LYLPALWFH", "1,chr6 g.33037619G>T,40,AAFVQTHRT,HLA-A*02:01,22758.73,32.0,netMHC,9,HLA-DPA1,ENSG00000231389,ENST00000419277,HLA-DPA1-001,p.P49T,Substitution,True,8,9,0.0 if mhc_predictor in ('netmhc', 'netmhcpan', 'netmhciipan', 'netmhccons', 'smm', 'smm_pmbec'): wildpeps =", "csq = [csq for csq in csqs if f'|{transcript}|' in csq][0].split('|') r =", "r in tpreader: r.wildpeptide = mutpeps.get(r.peptide + '\\t' + r.allele, '-') r.wildaffinity =", "'FPKM_conf_hi', 'FPKM_status'] writer.writeHead() for variant in vcf: # try..except try: tx = variant.format('TX')[0]", "quote}}, {{proc.tag | quote}}, {{proc.suffix | quote}}, {{job.index | quote}}]) tmpdir.mkdir(exist_ok = True,", "True, _debug = True) res = shell.netmhc(**nparams) pos_hit = False wildbindings = {allele:", "out = TsvRecord() out.HLA_allele = r.allele out.Peptide = r.peptide out.Affinity = r.affinity out.Gene", "xlsfile) shell.fg.netmhcpan(**nparams) if not xlsfile.is_file(): raise RuntimeError(\"Failed to run netmhcpan, output file not", "have it installed somewhere else, make a symbolic link to {}\".format(genome, ('/' +", "expr = tx.split('|', 1) csq = [csq for csq in csqs if f'|{transcript}|'", "0 0 OK txfile = outfile.with_suffix('.tx_nopos') writer = TsvWriter(txfile) writer.cnames = ['tracking_id', 'class_code',", "if genome in gmaps else '', datadir)) # if not datadir.joinpath(genome).is_dir() and datadir.joinpath(gmaps.get(genome,", "= True, inptype = 1, f = wildfile, _prefix = '-', xls =", "to get the wildtype peptides if it is not a substitution continue #", "'FPKM_status'] writer.writeHead() for variant in vcf: # try..except try: tx = variant.format('TX')[0] except", "in Gff(refall): if gff['type'] == 'gene': feature = gff['attributes']['gene_id'] elif gff['type'] == 'transcript':", "= Diot( a = mhcallele2, v = True, inptype = 1, f =", "- 0 0 0 OK gxfile = outfile.with_suffix('.gx_nopos') writer = TsvWriter(gxfile) writer.cnames =", "= gxfile params['rna-transcript-fpkm-tracking-file'] = txfile shell.load_config(topiary = topiary) if infile.endswith('.vcf') or infile.endswith('.vcf.gz'): params.vcf", "allele] reader = TsvReader(xlsfile, comment = '\\t\\t\\t') wildbindings = {} for r in", "data for the genome # topiary will use it to annotate the data", "= xlsfile) shell.fg.netmhcpan(**nparams) if not xlsfile.is_file(): raise RuntimeError(\"Failed to run netmhcpan, output file", "1, f = wildfile, _prefix = '-', _iter = True, _debug = True)", "r.affinity, r.wildpeptide, r.wildaffinity, r.deltaaffinity, r.gene]) writer.write(r) def run_netmhciipan(): pass def run_netmhccons(): pass def", "nwriter.cnames = ['HLA_allele', 'mt_peptide', 'mt_affinity', 'wt_peptide', 'wt_affinity', 'delta_affinity', 'gene'] nwriter.writeHead() tpreader.rewind() for r", "symbolic link to {}\".format(genome, ('/' + gmaps[genome]) if genome in gmaps else '',", "txs: transcript, expr = tx.split('|', 1) csq = [csq for csq in csqs", "2000): writer.write(out) def run_netmhcpan(): shell.load_config(netmhcpan = netmhcpan) mhcallele2 = params['mhc-alleles'] if 'mhc-alleles' in", "+ ':' + ':'.join(PATHs)) shell.fg.topiary(**params) # add wildtype binding # #,variant,peptide_offset,peptide,allele,affinity,percentile_rank,prediction_method_name,peptide_length,gene,gene_id,transcript_id,transcript_name,effect,effect_type,contains_mutant_residues,mutation_start_in_peptide,mutation_end_in_peptide,gene_expression # 0,chr6", "'gene': feature = gff['attributes']['gene_id'] elif gff['type'] == 'transcript': feature = gff['attributes']['transcript_id'] else: continue", "if txfile: txfile2 = outfile.with_suffix('.tx') with open(txfile) as fin, open(txfile2, 'w') as fout:", "r.allele out.Peptide = r.peptide out.Affinity = r.affinity out.Gene = r.gene out.ENSG = r.gene_id", "netmhcpan \\ --mhc-alleles HLA-A*02:01,HLA-B*07:02 \\ --ic50-cutoff 500 \\ --percentile-cutoff 2.0 \\ --mhc-epitope-lengths 8-11", "<reponame>pwwang/biopipen<gh_stars>1-10 \"\"\" ./topiary \\ --vcf somatic.vcf \\ --mhc-predictor netmhcpan \\ --mhc-alleles HLA-A*02:01,HLA-B*07:02 \\", "'-' r.coverage = '-' r.FPKM = expr r.FPKM_conf_lo = 0 r.FPKM_conf_hi = 1000", "= {{args.netmhciipan | quote}} netmhccons = {{args.netmhccons | quote}} smm = {{args.smm |", "quote}} netmhc = {{args.netmhc | quote}} netmhcpan = {{args.netmhcpan | quote}} netmhciipan =", "'-', _iter = True, _debug = True) res = shell.netmhc(**nparams) pos_hit = False", "'wildtype.binding.txt' nparams = Diot( a = mhcallele2, v = True, BA = True,", "= [csq for csq in csqs if f'|{gene}|' in csq][0].split('|') r = TsvRecord()", "='{}:{}-{}'.format(gff['seqid'], gff['start'], gff['end']) if gxfile: gxfile2 = outfile.with_suffix('.gx') with open(gxfile) as fin, open(gxfile2,", "'PEPLIST' not in line or line.startswith('Protein'): continue parts = line.split() wildbindings[parts[1]][parts[2]] = parts[12]", "in out.Ref_affinity or float(out.Ref_affinity) >= 2000): writer.write(out) def run_netmhcpan(): shell.load_config(netmhcpan = netmhcpan) mhcallele2", "gxfile = txfile = False features = set() if vcf.contains('GX'): if not vcf.contains('CSQ'):", "quote}} afile = {{i.afile | ?path.isfile | =readlines | !alwaysList | repr}} outfile", "if r.wildaffinity != '-': r.deltaaffinity = float(r.affinity) - r.wildaffinity else: r.deltaaffinity = '-'", "if not vcf.contains('CSQ'): raise ValueError('VCF file has to be annotated with by VEP')", "tmpdir.mkdir(exist_ok = True, parents = True) # check if we have downloaded annotation", "r.wildpeptide, r.wildaffinity, r.deltaaffinity, r.gene]) writer.write(r) def run_netmhciipan(): pass def run_netmhccons(): pass def run_smm():", "= txfile = False features = set() if vcf.contains('GX'): if not vcf.contains('CSQ'): raise", "= float(r.affinity) - r.wildaffinity else: r.deltaaffinity = '-' nwriter.write([ r.allele, r.peptide, r.affinity, r.wildpeptide,", "writer.writeHead() for variant in vcf: # try..except try: gx = variant.format('GX')[0] except (KeyError,", "txfile: txfile2 = outfile.with_suffix('.tx') with open(txfile) as fin, open(txfile2, 'w') as fout: for", "I don't know how to get the wildtype peptides if it is not", "from os import environ from pathlib import Path from cyvcf2 import VCF from", "vcf: # try..except try: gx = variant.format('GX')[0] except (KeyError, TypeError): continue csqs =", "refall = {{args.refall | quote}} tmpdir = Path({{args.tmpdir | quote}}) / '.'.join([ {{proc.id", "- r.wildaffinity else: r.deltaaffinity = '-' nwriter.write([ r.allele, r.peptide, r.affinity, r.wildpeptide, r.wildaffinity, r.deltaaffinity,", "cyvcf2 import VCF from gff import Gff from diot import Diot from cmdy", "quote}} mhc_predictor = {{args.mhc_predictor | quote}} genome = {{args.genome | quote}} params =", "else: params.maf = infile alleles = [allele.replace('*', '') for allele in afile] params['mhc-alleles']", "import Gff from diot import Diot from cmdy import CmdyReturnCodeException from bioprocs.utils import", "tracking_id class_code nearest_ref_id gene_id gene_short_name tss_id locus length coverage FPKM FPKM_conf_lo FPKM_conf_hi FPKM_status", "!alwaysList | repr}} outfile = Path({{o.outfile | quote}}) outdir = Path({{o.outdir | quote}})", "it is not a substitution continue # parse effect: p.N84S m = re.match(r'^p\\.([A-Z])\\d+([A-Z])$',", "= TsvReader(xlsfile, comment = '\\t\\t\\t') wildbindings = {} for r in reader: peptide", "for tx in txs: transcript, expr = tx.split('|', 1) csq = [csq for", "nM Rank core icore 1-log50k nM Rank 0 LYLPALWFH PEPLIST LYLPALWFH LYLPALWFH 0.1353", "TsvRecord() r.tracking_id = csq[4] r.class_code = '-' r.nearest_ref_id = '-' r.gene_id = csq[4]", "for r in tpreader: out = TsvRecord() out.HLA_allele = r.allele out.Peptide = r.peptide", "| quote}} netmhciipan = {{args.netmhciipan | quote}} netmhccons = {{args.netmhccons | quote}} smm", "vcf.contains('GX'): if not vcf.contains('CSQ'): raise ValueError('VCF file has to be annotated with by", "CmdyReturnCodeException: continue params._env = Diot(PATH = environ['PATH'] + ':' + ':'.join(PATHs)) shell.fg.topiary(**params) #", "else: fout.write(line.replace('<pos>', allpos[feature_id])) txfile = txfile2 params['rna-gene-fpkm-tracking-file'] = gxfile params['rna-transcript-fpkm-tracking-file'] = txfile shell.load_config(topiary", "shell.fg.topiary(**params) # add wildtype binding # #,variant,peptide_offset,peptide,allele,affinity,percentile_rank,prediction_method_name,peptide_length,gene,gene_id,transcript_id,transcript_name,effect,effect_type,contains_mutant_residues,mutation_start_in_peptide,mutation_end_in_peptide,gene_expression # 0,chr6 g.31237146C>A,353,AACSNSAHG,HLA-A*02:01,35651.3,65.0,netMHC,9,HLA-C,ENSG00000204525,ENST00000383329,HLA-C-002,p.Q361H,Substitution,True,7,8,0.0 # 1,chr6 g.33037619G>T,40,AAFVQTHRT,HLA-A*02:01,22758.73,32.0,netMHC,9,HLA-DPA1,ENSG00000231389,ENST00000419277,HLA-DPA1-001,p.P49T,Substitution,True,8,9,0.0", "variant in vcf: # try..except try: tx = variant.format('TX')[0] except (KeyError, TypeError): continue", "feature_id) else: fout.write(line.replace('<pos>', allpos[feature_id])) txfile = txfile2 params['rna-gene-fpkm-tracking-file'] = gxfile params['rna-transcript-fpkm-tracking-file'] = txfile", "topiary) if infile.endswith('.vcf') or infile.endswith('.vcf.gz'): params.vcf = infile else: params.maf = infile alleles", "'netmhciipan', 'netmhccons', 'smm', 'smm_pmbec'): wildpeps = set() mutpeps = {} tpreader = TsvReader(outfile.with_suffix('.nowt'),", "= {{args.netmhc | quote}} netmhcpan = {{args.netmhcpan | quote}} netmhciipan = {{args.netmhciipan |", "add wildtype binding # #,variant,peptide_offset,peptide,allele,affinity,percentile_rank,prediction_method_name,peptide_length,gene,gene_id,transcript_id,transcript_name,effect,effect_type,contains_mutant_residues,mutation_start_in_peptide,mutation_end_in_peptide,gene_expression # 0,chr6 g.31237146C>A,353,AACSNSAHG,HLA-A*02:01,35651.3,65.0,netMHC,9,HLA-C,ENSG00000204525,ENST00000383329,HLA-C-002,p.Q361H,Substitution,True,7,8,0.0 # 1,chr6 g.33037619G>T,40,AAFVQTHRT,HLA-A*02:01,22758.73,32.0,netMHC,9,HLA-DPA1,ENSG00000231389,ENST00000419277,HLA-DPA1-001,p.P49T,Substitution,True,8,9,0.0 if mhc_predictor", "smm = {{args.smm | quote}} smm_pmbec = {{args.smm_pmbec | quote}} mhc_predictor = {{args.mhc_predictor", "= set() mutpeps = {} tpreader = TsvReader(outfile.with_suffix('.nowt'), comment = '###', delimit =", "find position information for: %s, skipping', feature_id) else: fout.write(line.replace('<pos>', allpos[feature_id])) gxfile = gxfile2", "= False features = set() if vcf.contains('GX'): if not vcf.contains('CSQ'): raise ValueError('VCF file", "+ gmaps[genome]) if genome in gmaps else '', datadir)) # if not datadir.joinpath(genome).is_dir()", "vcf.contains('CSQ'): raise ValueError('VCF file has to be annotated with by VEP') # tracking_id", "# add wildtype binding # #,variant,peptide_offset,peptide,allele,affinity,percentile_rank,prediction_method_name,peptide_length,gene,gene_id,transcript_id,transcript_name,effect,effect_type,contains_mutant_residues,mutation_start_in_peptide,mutation_end_in_peptide,gene_expression # 0,chr6 g.31237146C>A,353,AACSNSAHG,HLA-A*02:01,35651.3,65.0,netMHC,9,HLA-C,ENSG00000204525,ENST00000383329,HLA-C-002,p.Q361H,Substitution,True,7,8,0.0 # 1,chr6 g.33037619G>T,40,AAFVQTHRT,HLA-A*02:01,22758.73,32.0,netMHC,9,HLA-DPA1,ENSG00000231389,ENST00000419277,HLA-DPA1-001,p.P49T,Substitution,True,8,9,0.0 if", "/ '.'.join([ {{proc.id | quote}}, {{proc.tag | quote}}, {{proc.suffix | quote}}, {{job.index |", "\"If you have it installed somewhere else, make a symbolic link to {}\".format(genome,", "shell.netmhc(**nparams) pos_hit = False wildbindings = {allele: {} for allele in mhcallele2.split(',')} for", "gmaps else '', datadir)) # if not datadir.joinpath(genome).is_dir() and datadir.joinpath(gmaps.get(genome, genome)).is_dir(): # genome", "and not datadir.joinpath(gmaps.get(genome, genome)).is_dir(): raise RuntimeError(\"You don't have annotation data for genome {}{}", "to {}\".format(genome, ('/' + gmaps[genome]) if genome in gmaps else '', datadir)) #", "try..except try: gx = variant.format('GX')[0] except (KeyError, TypeError): continue csqs = variant.INFO['CSQ'].split(',') gxs", "= {{args.smm | quote}} smm_pmbec = {{args.smm_pmbec | quote}} mhc_predictor = {{args.mhc_predictor |", "4.0 \\ --rna-transcript-fpkm-tracking-file isoforms.fpkm_tracking \\ --rna-min-transcript-expression 1.5 \\ --output-csv epitopes.csv \"\"\" import re", "21311.8301 12.3392 RRQRRQRRW RRQRRQRRW 0.0308 35829.9805 47.6206 \"\"\" with xlsfile.open('r') as f: alleles", "True, inptype = 1, f = wildfile, _prefix = '-', _iter = True,", "in gmaps else '', datadir)) # if not datadir.joinpath(genome).is_dir() and datadir.joinpath(gmaps.get(genome, genome)).is_dir(): #", "quote}}) outdir = Path({{o.outdir | quote}}) topiary = {{args.topiary | quote}} netmhc =", "line.split('\\t', 1)[0] if feature_id not in allpos: logger.warning('Cannot find position information for: %s,", "{} for allele in mhcallele2.split(',')} for line in res: if 'PEPLIST' not in", "CmdyReturnCodeException from bioprocs.utils import shell2 as shell, logger from bioprocs.utils.tsvio2 import TsvReader, TsvWriter,", "allpos[feature_id])) gxfile = gxfile2 if txfile: txfile2 = outfile.with_suffix('.tx') with open(txfile) as fin,", "# check if we have downloaded annotation data for the genome # topiary", "Pos Peptide ID core icore 1-log50k nM Rank core icore 1-log50k nM Rank", "except CmdyReturnCodeException: continue params._env = Diot(PATH = environ['PATH'] + ':' + ':'.join(PATHs)) shell.fg.topiary(**params)", "RRQRRQRRW RRQRRQRRW 0.0788 21311.8301 12.3392 RRQRRQRRW RRQRRQRRW 0.0308 35829.9805 47.6206 \"\"\" with xlsfile.open('r')", "out.Mutation = r.variant out.AAChange = r.effect writerall.write(out) if float(out.Affinity) < 500 and ('>'", "wildfile = outfile.parent / 'wildtype.peptides.txt' wildfile.write_text('\\n'.join(wildpeps)) nparams = Diot( a = mhcallele2, v", "'FPKM', 'FPKM_conf_lo', 'FPKM_conf_hi', 'FPKM_status'] writer.writeHead() for variant in vcf: # try..except try: gx", "params else ','.join( allele for allele in Path(params['mhc-alleles-file']).read_text().splitlines() if allele ) wildfile =", "f: alleles = [allele.replace('HLA-A', 'HLA-A*').replace('HLA-B', 'HLA-B*').replace('HLA-C', 'HLA-C*') for allele in f.readline().strip().split('\\t') if allele]", "out.Affinity = r.affinity out.Gene = r.gene out.ENSG = r.gene_id out.ENST = r.transcript_id wtpep", "tpreader.cnames + ['wildpeptide', 'wildaffinity', 'deltaaffinity'] writer.writeHead() nwriter = TsvWriter(neatfile) nwriter.cnames = ['HLA_allele', 'mt_peptide',", "r.gene_short_name = csq[3] r.tss_id = '-' r.locus = '<pos>' r.length = '-' r.coverage", "_iter = True, _debug = True) res = shell.netmhc(**nparams) pos_hit = False wildbindings", "nwriter.write([ r.allele, r.peptide, r.affinity, r.wildpeptide, r.wildaffinity, r.deltaaffinity, r.gene]) writer.write(r) def run_netmhciipan(): pass def", "gmaps = {'hg19': 'GRCh37', 'hg38': 'GRCh38'} datadir = Path.home().joinpath('.cache', 'pyensembl') if not datadir.joinpath(genome).is_dir()", "('netmhc', 'netmhcpan', 'netmhciipan', 'netmhccons', 'smm', 'smm_pmbec'): wildpeps = set() mutpeps = {} tpreader", "r.tracking_id = csq[6] r.class_code = '-' r.nearest_ref_id = '-' r.gene_id = csq[4] r.gene_short_name", "it to annotate the data gmaps = {'hg19': 'GRCh37', 'hg38': 'GRCh38'} datadir =", "params = {{args.params | repr}} refall = {{args.refall | quote}} tmpdir = Path({{args.tmpdir", "+ ':'.join(PATHs)) shell.fg.topiary(**params) # add wildtype binding # #,variant,peptide_offset,peptide,allele,affinity,percentile_rank,prediction_method_name,peptide_length,gene,gene_id,transcript_id,transcript_name,effect,effect_type,contains_mutant_residues,mutation_start_in_peptide,mutation_end_in_peptide,gene_expression # 0,chr6 g.31237146C>A,353,AACSNSAHG,HLA-A*02:01,35651.3,65.0,netMHC,9,HLA-C,ENSG00000204525,ENST00000383329,HLA-C-002,p.Q361H,Substitution,True,7,8,0.0 #", "in Path(params['mhc-alleles-file']).read_text().splitlines() if allele ) wildfile = outfile.parent / 'wildtype.peptides.txt' wildfile.write_text('\\n'.join(wildpeps)) xlsfile =", "0.4137 568.6087 1.1231 0 RRQRRQRRW PEPLIST RRQRRQRRW RRQRRQRRW 0.0788 21311.8301 12.3392 RRQRRQRRW RRQRRQRRW", "'FPKM', 'FPKM_conf_lo', 'FPKM_conf_hi', 'FPKM_status'] writer.writeHead() for variant in vcf: # try..except try: tx", "'<pos>' not in line: fout.write(line) else: feature_id = line.split('\\t', 1)[0] if feature_id not", "if wildpep[index] != m.group(2): continue wildpep = wildpep[:index] + m.group(1) + wildpep[(index+1):] mutpeps[r.peptide", "shell, logger from bioprocs.utils.tsvio2 import TsvReader, TsvWriter, TsvRecord {% from os import path%}", "'smm_pmbec'): wildpeps = set() mutpeps = {} tpreader = TsvReader(outfile.with_suffix('.nowt'), comment = '###',", "annotate the data gmaps = {'hg19': 'GRCh37', 'hg38': 'GRCh38'} datadir = Path.home().joinpath('.cache', 'pyensembl')", "0 RRQRRQRRW PEPLIST RRQRRQRRW RRQRRQRRW 0.0788 21311.8301 12.3392 RRQRRQRRW RRQRRQRRW 0.0308 35829.9805 47.6206", "f = wildfile, _prefix = '-', xls = True, xlsfile = xlsfile) shell.fg.netmhcpan(**nparams)", "= outfile.with_suffix('.tx') with open(txfile) as fin, open(txfile2, 'w') as fout: for line in", "'Affinity', 'Gene', 'ENSG', 'ENST', 'Ref_peptide', 'Ref_affinity', 'Mutation', 'AAChange'] writer.writeHead() writerall = TsvWriter(outfile.with_suffix('.all.txt')) writerall.cnames", "gxs: gene, expr = gx.split('|', 1) csq = [csq for csq in csqs", "- - 0 0 0 OK gxfile = outfile.with_suffix('.gx_nopos') writer = TsvWriter(gxfile) writer.cnames", "{{proc.id | quote}}, {{proc.tag | quote}}, {{proc.suffix | quote}}, {{job.index | quote}}]) tmpdir.mkdir(exist_ok", "line.startswith('Protein'): continue parts = line.split() wildbindings[parts[1]][parts[2]] = parts[12] writer = TsvWriter(outfile) writer.cnames =", "writer.cnames = tpreader.cnames + ['wildpeptide', 'wildaffinity', 'deltaaffinity'] writer.writeHead() nwriter = TsvWriter(neatfile) nwriter.cnames =", "wildbindings.get(r.wildpeptide + '\\t' + r.allele, '-') if r.wildaffinity != '-': r.deltaaffinity = float(r.affinity)", "for allele in afile] params['mhc-alleles'] = ','.join(alleles) params.genome = genome params['output-csv'] = outfile.with_suffix('.nowt')", "csqs if f'|{gene}|' in csq][0].split('|') r = TsvRecord() r.tracking_id = csq[4] r.class_code =", "'\\t' + hla] = float(r[7 + i*5]) writer = TsvWriter(outfile) writer.cnames = tpreader.cnames", "TsvRecord {% from os import path%} infile = {{i.infile | quote}} afile =", "BA = True, inptype = 1, f = wildfile, _prefix = '-', xls", "Diot( a = mhcallele2, v = True, inptype = 1, f = wildfile,", "data gmaps = {'hg19': 'GRCh37', 'hg38': 'GRCh38'} datadir = Path.home().joinpath('.cache', 'pyensembl') if not", "= gff['attributes']['gene_id'] elif gff['type'] == 'transcript': feature = gff['attributes']['transcript_id'] else: continue if feature", "TsvWriter(outfile) writer.cnames = tpreader.cnames + ['wildpeptide', 'wildaffinity', 'deltaaffinity'] writer.writeHead() nwriter = TsvWriter(neatfile) nwriter.cnames", "= wildfile, _prefix = '-', xls = True, xlsfile = xlsfile) shell.fg.netmhcpan(**nparams) if", "OK # ENSG00000268020 - - ENSG00000268020 AL627309.1 - chr1:53048-54936 - - 0 0", "mutpeps[r.peptide + '\\t' + r.allele] = wildpep wildpeps.add(wildpep) def run_netmhc(): shell.load_config(netmhc = netmhc)", "- chr1:62947-63887 - - 0 0 0 OK # ENSG00000268020 - - ENSG00000268020", "RRQRRQRRW RRQRRQRRW 0.0308 35829.9805 47.6206 \"\"\" with xlsfile.open('r') as f: alleles = [allele.replace('HLA-A',", "continue csqs = variant.INFO['CSQ'].split(',') gxs = gx.split(',') for gx in gxs: gene, expr", "writer = TsvWriter(outfile) writer.cnames = tpreader.cnames + ['wildpeptide', 'wildaffinity', 'deltaaffinity'] writer.writeHead() nwriter =", "if infile.endswith('.vcf') or infile.endswith('.vcf.gz'): params.vcf = infile else: params.maf = infile alleles =", "Rank core icore 1-log50k nM Rank 0 LYLPALWFH PEPLIST LYLPALWFH LYLPALWFH 0.1353 11560.5488", "FPKM_conf_hi FPKM_status # ENSG00000240361 - - ENSG00000240361 OR4G11P - chr1:62947-63887 - - 0", "- - ENSG00000268020 AL627309.1 - chr1:53048-54936 - - 0 0 0 OK txfile", "wildpep = r.peptide index = int(r.mutation_start_in_peptide) if wildpep[index] != m.group(2): continue wildpep =", "in allpos: logger.warning('Cannot find position information for: %s, skipping', feature_id) else: fout.write(line.replace('<pos>', allpos[feature_id]))", "not a substitution continue # parse effect: p.N84S m = re.match(r'^p\\.([A-Z])\\d+([A-Z])$', r.effect) if", "= r.affinity out.Gene = r.gene out.ENSG = r.gene_id out.ENST = r.transcript_id wtpep =", "output file not generated.\") # read the output \"\"\" HLA-A24:02 HLA-A29:02 Pos Peptide", "tpreader = TsvReader(outfile.with_suffix('.nowt'), comment = '###', delimit = ',') for r in tpreader:", "= infile alleles = [allele.replace('*', '') for allele in afile] params['mhc-alleles'] = ','.join(alleles)", "else ','.join( allele for allele in Path(params['mhc-alleles-file']).read_text().splitlines() if allele ) wildfile = outfile.parent", "reader: peptide = r[1] for i, hla in enumerate(alleles): wildbindings[peptide + '\\t' +", "+ '\\t' + r.allele, '-') r.wildaffinity = wildbindings.get(r.wildpeptide + '\\t' + r.allele, '-')", "tpreader: r.wildpeptide = mutpeps.get(r.peptide + '\\t' + r.allele, '-') r.wildaffinity = wildbindings.get(r.wildpeptide +", "'Mutation', 'AAChange'] writer.writeHead() writerall = TsvWriter(outfile.with_suffix('.all.txt')) writerall.cnames = writer.cnames writerall.writeHead() tpreader.rewind() for r", "+ '\\t' + r.allele, '-') if r.wildaffinity != '-': r.deltaaffinity = float(r.affinity) -", "{{proc.suffix | quote}}, {{job.index | quote}}]) tmpdir.mkdir(exist_ok = True, parents = True) #", "= params['mhc-alleles'] if 'mhc-alleles' in params else ','.join( allele for allele in Path(params['mhc-alleles-file']).read_text().splitlines()", "= re.match(r'^p\\.([A-Z])\\d+([A-Z])$', r.effect) if not m: continue wildpep = r.peptide index = int(r.mutation_start_in_peptide)", "or line.startswith('Protein'): continue parts = line.split() wildbindings[parts[1]][parts[2]] = parts[12] writer = TsvWriter(outfile) writer.cnames", "for variant in vcf: # try..except try: tx = variant.format('TX')[0] except (KeyError, TypeError):", "parts = line.split() wildbindings[parts[1]][parts[2]] = parts[12] writer = TsvWriter(outfile) writer.cnames = ['HLA_allele', 'Peptide',", "r.FPKM_conf_hi = 1000 r.FPKM_status = 'OK' writer.write(r) features.add(r.tracking_id) writer.close() if gxfile or txfile:", "params['rna-transcript-fpkm-tracking-file'] = txfile shell.load_config(topiary = topiary) if infile.endswith('.vcf') or infile.endswith('.vcf.gz'): params.vcf = infile", "res: if 'PEPLIST' not in line or line.startswith('Protein'): continue parts = line.split() wildbindings[parts[1]][parts[2]]", "11560.5488 6.1138 LYLPALWFH LYLPALWFH 0.4137 568.6087 1.1231 0 RRQRRQRRW PEPLIST RRQRRQRRW RRQRRQRRW 0.0788", "'smm', 'smm_pmbec'): wildpeps = set() mutpeps = {} tpreader = TsvReader(outfile.with_suffix('.nowt'), comment =", "+ r.allele, '-') r.wildaffinity = wildbindings.get(r.wildpeptide + '\\t' + r.allele, '-') if r.wildaffinity", "'mt_affinity', 'wt_peptide', 'wt_affinity', 'delta_affinity', 'gene'] nwriter.writeHead() tpreader.rewind() for r in tpreader: r.wildpeptide =", "set() mutpeps = {} tpreader = TsvReader(outfile.with_suffix('.nowt'), comment = '###', delimit = ',')", "'\\t' + r.allele] = wildpep wildpeps.add(wildpep) def run_netmhc(): shell.load_config(netmhc = netmhc) mhcallele2 =", "PEPLIST RRQRRQRRW RRQRRQRRW 0.0788 21311.8301 12.3392 RRQRRQRRW RRQRRQRRW 0.0308 35829.9805 47.6206 \"\"\" with", "csq][0].split('|') r = TsvRecord() r.tracking_id = csq[4] r.class_code = '-' r.nearest_ref_id = '-'", "+ '\\t' + r.allele] = wildpep wildpeps.add(wildpep) def run_netmhc(): shell.load_config(netmhc = netmhc) mhcallele2", "= mhc_predictor # make sure those mhc-predictors are in PATH PATHs = set()", "= gmaps[genome] # extract expression from VCF file vcf = VCF(infile) gxfile =", "= variant.INFO['CSQ'].split('|') txs = tx.split(',') for tx in txs: transcript, expr = tx.split('|',", "{{args.smm | quote}} smm_pmbec = {{args.smm_pmbec | quote}} mhc_predictor = {{args.mhc_predictor | quote}}", "--output-csv epitopes.csv \"\"\" import re from os import environ from pathlib import Path", "FPKM FPKM_conf_lo FPKM_conf_hi FPKM_status # ENSG00000240361 - - ENSG00000240361 OR4G11P - chr1:62947-63887 -", "not generated.\") # read the output \"\"\" HLA-A24:02 HLA-A29:02 Pos Peptide ID core", "LYLPALWFH 0.1353 11560.5488 6.1138 LYLPALWFH LYLPALWFH 0.4137 568.6087 1.1231 0 RRQRRQRRW PEPLIST RRQRRQRRW", "Diot(PATH = environ['PATH'] + ':' + ':'.join(PATHs)) shell.fg.topiary(**params) # add wildtype binding #", "TsvReader(outfile.with_suffix('.nowt'), comment = '###', delimit = ',') for r in tpreader: if r.effect_type", "vcf = VCF(infile) gxfile = txfile = False features = set() if vcf.contains('GX'):", "r.tracking_id = csq[4] r.class_code = '-' r.nearest_ref_id = '-' r.gene_id = csq[4] r.gene_short_name", "wildfile = outfile.parent / 'wildtype.peptides.txt' wildfile.write_text('\\n'.join(wildpeps)) xlsfile = outfile.parent / 'wildtype.binding.txt' nparams =", "to run netmhcpan, output file not generated.\") # read the output \"\"\" HLA-A24:02", "LYLPALWFH 0.4137 568.6087 1.1231 0 RRQRRQRRW PEPLIST RRQRRQRRW RRQRRQRRW 0.0788 21311.8301 12.3392 RRQRRQRRW", "= TsvRecord() r.tracking_id = csq[6] r.class_code = '-' r.nearest_ref_id = '-' r.gene_id =", "r[1] for i, hla in enumerate(alleles): wildbindings[peptide + '\\t' + hla] = float(r[7", "TypeError): continue csqs = variant.INFO['CSQ'].split('|') txs = tx.split(',') for tx in txs: transcript,", "ENSG00000268020 - - ENSG00000268020 AL627309.1 - chr1:53048-54936 - - 0 0 0 OK", "# parse effect: p.N84S m = re.match(r'^p\\.([A-Z])\\d+([A-Z])$', r.effect) if not m: continue wildpep", "= True, parents = True) # check if we have downloaded annotation data", "[allele.replace('*', '') for allele in afile] params['mhc-alleles'] = ','.join(alleles) params.genome = genome params['output-csv']", "Peptide ID core icore 1-log50k nM Rank core icore 1-log50k nM Rank 0", "TypeError): continue csqs = variant.INFO['CSQ'].split(',') gxs = gx.split(',') for gx in gxs: gene,", "f'|{gene}|' in csq][0].split('|') r = TsvRecord() r.tracking_id = csq[4] r.class_code = '-' r.nearest_ref_id", "'HLA-C*') for allele in f.readline().strip().split('\\t') if allele] reader = TsvReader(xlsfile, comment = '\\t\\t\\t')", "fin, open(txfile2, 'w') as fout: for line in fin: if '<pos>' not in", "gxfile: gxfile2 = outfile.with_suffix('.gx') with open(gxfile) as fin, open(gxfile2, 'w') as fout: for", "VCF from gff import Gff from diot import Diot from cmdy import CmdyReturnCodeException", "1-log50k nM Rank core icore 1-log50k nM Rank 0 LYLPALWFH PEPLIST LYLPALWFH LYLPALWFH", "enumerate(alleles): wildbindings[peptide + '\\t' + hla] = float(r[7 + i*5]) writer = TsvWriter(outfile)", "for line in fin: if '<pos>' not in line: fout.write(line) else: feature_id =", "txfile2 params['rna-gene-fpkm-tracking-file'] = gxfile params['rna-transcript-fpkm-tracking-file'] = txfile shell.load_config(topiary = topiary) if infile.endswith('.vcf') or", "make sure those mhc-predictors are in PATH PATHs = set() for mhcpred in", "outfile.with_suffix('.gx') with open(gxfile) as fin, open(gxfile2, 'w') as fout: for line in fin:", "TsvRecord() out.HLA_allele = r.allele out.Peptide = r.peptide out.Affinity = r.affinity out.Gene = r.gene", "not in features: continue allpos[feature] ='{}:{}-{}'.format(gff['seqid'], gff['start'], gff['end']) if gxfile: gxfile2 = outfile.with_suffix('.gx')", "./topiary \\ --vcf somatic.vcf \\ --mhc-predictor netmhcpan \\ --mhc-alleles HLA-A*02:01,HLA-B*07:02 \\ --ic50-cutoff 500", "r = TsvRecord() r.tracking_id = csq[4] r.class_code = '-' r.nearest_ref_id = '-' r.gene_id", "netmhcpan, output file not generated.\") # read the output \"\"\" HLA-A24:02 HLA-A29:02 Pos", "= outfile.with_suffix('.tx_nopos') writer = TsvWriter(txfile) writer.cnames = ['tracking_id', 'class_code', 'nearest_ref_id', 'gene_id', 'gene_short_name', 'tss_id',", "those mhc-predictors are in PATH PATHs = set() for mhcpred in (netmhc, netmhcpan,", "except (KeyError, TypeError): continue csqs = variant.INFO['CSQ'].split(',') gxs = gx.split(',') for gx in", "('/' + gmaps[genome]) if genome in gmaps else '', datadir)) # if not", "= '-', _iter = True, _debug = True) res = shell.netmhc(**nparams) pos_hit =", "= csq[6] r.class_code = '-' r.nearest_ref_id = '-' r.gene_id = csq[4] r.gene_short_name =", "= txfile2 params['rna-gene-fpkm-tracking-file'] = gxfile params['rna-transcript-fpkm-tracking-file'] = txfile shell.load_config(topiary = topiary) if infile.endswith('.vcf')", "in csq][0].split('|') r = TsvRecord() r.tracking_id = csq[6] r.class_code = '-' r.nearest_ref_id =", "features.add(r.tracking_id) writer.close() if vcf.contains('TX'): if not vcf.contains('CSQ'): raise ValueError('VCF file has to be", "'###', delimit = ',') for r in tpreader: if r.effect_type != 'Substitution': #", "r.transcript_id wtpep = mutpeps.get(r.peptide + '\\t' + r.allele, '-') out.Ref_peptide = wtpep out.Ref_affinity", "| quote}} tmpdir = Path({{args.tmpdir | quote}}) / '.'.join([ {{proc.id | quote}}, {{proc.tag", "{{args.params | repr}} refall = {{args.refall | quote}} tmpdir = Path({{args.tmpdir | quote}})", "\\ --mhc-epitope-lengths 8-11 \\ --rna-gene-fpkm-tracking-file genes.fpkm_tracking \\ --rna-min-gene-expression 4.0 \\ --rna-transcript-fpkm-tracking-file isoforms.fpkm_tracking \\", "= set() for mhcpred in (netmhc, netmhcpan, netmhciipan, netmhccons, smm, smm_pmbec): try: PATHs.add(str(Path(shell.which(mhcpred)).parent))", "isoforms.fpkm_tracking \\ --rna-min-transcript-expression 1.5 \\ --output-csv epitopes.csv \"\"\" import re from os import", "HLA-A*02:01,HLA-B*07:02 \\ --ic50-cutoff 500 \\ --percentile-cutoff 2.0 \\ --mhc-epitope-lengths 8-11 \\ --rna-gene-fpkm-tracking-file genes.fpkm_tracking", "mhc_predictor in ('netmhc', 'netmhcpan', 'netmhciipan', 'netmhccons', 'smm', 'smm_pmbec'): wildpeps = set() mutpeps =", "if it is not a substitution continue # parse effect: p.N84S m =", "run_netmhc, 'netmhcpan' : run_netmhcpan, 'netmhciipan' : run_netmhciipan, 'netmhccons' : run_netmhccons, 'smm' : run_smm,", "'').replace('*', '') wildfile = outfile.parent / 'wildtype.peptides.txt' wildfile.write_text('\\n'.join(wildpeps)) nparams = Diot( a =", "csq[4] r.class_code = '-' r.nearest_ref_id = '-' r.gene_id = csq[4] r.gene_short_name = csq[3]", "'coverage', 'FPKM', 'FPKM_conf_lo', 'FPKM_conf_hi', 'FPKM_status'] writer.writeHead() for variant in vcf: # try..except try:", "wildbindings[peptide + '\\t' + hla] = float(r[7 + i*5]) writer = TsvWriter(outfile) writer.cnames", "['HLA_allele', 'mt_peptide', 'mt_affinity', 'wt_peptide', 'wt_affinity', 'delta_affinity', 'gene'] nwriter.writeHead() tpreader.rewind() for r in tpreader:", "feature = gff['attributes']['transcript_id'] else: continue if feature not in features: continue allpos[feature] ='{}:{}-{}'.format(gff['seqid'],", "for allele in mhcallele2.split(',')} for line in res: if 'PEPLIST' not in line", "Path.home().joinpath('.cache', 'pyensembl') if not datadir.joinpath(genome).is_dir() and not datadir.joinpath(gmaps.get(genome, genome)).is_dir(): raise RuntimeError(\"You don't have", "in (netmhc, netmhcpan, netmhciipan, netmhccons, smm, smm_pmbec): try: PATHs.add(str(Path(shell.which(mhcpred)).parent)) except CmdyReturnCodeException: continue params._env", "float(out.Affinity) < 500 and ('>' in out.Ref_affinity or float(out.Ref_affinity) >= 2000): writer.write(out) def", "writerall.cnames = writer.cnames writerall.writeHead() tpreader.rewind() for r in tpreader: out = TsvRecord() out.HLA_allele", "if r.effect_type != 'Substitution': # I don't know how to get the wildtype", "# extract expression from VCF file vcf = VCF(infile) gxfile = txfile =", "= gx.split(',') for gx in gxs: gene, expr = gx.split('|', 1) csq =", "True, xlsfile = xlsfile) shell.fg.netmhcpan(**nparams) if not xlsfile.is_file(): raise RuntimeError(\"Failed to run netmhcpan,", "install' first or \" \"specify 'params.download_reference_genome_data = True'. \" \"If you have it", "for allele in Path(params['mhc-alleles-file']).read_text().splitlines() if allele ) wildfile = outfile.parent / 'wildtype.peptides.txt' wildfile.write_text('\\n'.join(wildpeps))", "= variant.INFO['CSQ'].split(',') gxs = gx.split(',') for gx in gxs: gene, expr = gx.split('|',", "not in line or line.startswith('Protein'): continue parts = line.split() wildbindings[parts[1]][parts[2]] = parts[12] writer", "= {{args.refall | quote}} tmpdir = Path({{args.tmpdir | quote}}) / '.'.join([ {{proc.id |", "peptide = r[1] for i, hla in enumerate(alleles): wildbindings[peptide + '\\t' + hla]", "allpos[feature_id])) txfile = txfile2 params['rna-gene-fpkm-tracking-file'] = gxfile params['rna-transcript-fpkm-tracking-file'] = txfile shell.load_config(topiary = topiary)", "# 0,chr6 g.31237146C>A,353,AACSNSAHG,HLA-A*02:01,35651.3,65.0,netMHC,9,HLA-C,ENSG00000204525,ENST00000383329,HLA-C-002,p.Q361H,Substitution,True,7,8,0.0 # 1,chr6 g.33037619G>T,40,AAFVQTHRT,HLA-A*02:01,22758.73,32.0,netMHC,9,HLA-DPA1,ENSG00000231389,ENST00000419277,HLA-DPA1-001,p.P49T,Substitution,True,8,9,0.0 if mhc_predictor in ('netmhc', 'netmhcpan', 'netmhciipan', 'netmhccons',", "gff in Gff(refall): if gff['type'] == 'gene': feature = gff['attributes']['gene_id'] elif gff['type'] ==", "v = True, inptype = 1, f = wildfile, _prefix = '-', _iter", "'wt_peptide', 'wt_affinity', 'delta_affinity', 'gene'] nwriter.writeHead() tpreader.rewind() for r in tpreader: r.wildpeptide = mutpeps.get(r.peptide", "# tracking_id class_code nearest_ref_id gene_id gene_short_name tss_id locus length coverage FPKM FPKM_conf_lo FPKM_conf_hi", "in line: fout.write(line) else: feature_id = line.split('\\t', 1)[0] if feature_id not in allpos:", "line in fin: if '<pos>' not in line: fout.write(line) else: feature_id = line.split('\\t',", "- - 0 0 0 OK txfile = outfile.with_suffix('.tx_nopos') writer = TsvWriter(txfile) writer.cnames", "if gxfile or txfile: allpos = {} for gff in Gff(refall): if gff['type']", "csq in csqs if f'|{transcript}|' in csq][0].split('|') r = TsvRecord() r.tracking_id = csq[6]", "if f'|{transcript}|' in csq][0].split('|') r = TsvRecord() r.tracking_id = csq[6] r.class_code = '-'", "1, f = wildfile, _prefix = '-', xls = True, xlsfile = xlsfile)", "# make sure those mhc-predictors are in PATH PATHs = set() for mhcpred", "= '-' r.FPKM = expr r.FPKM_conf_lo = 0 r.FPKM_conf_hi = 1000 r.FPKM_status =", "',') for r in tpreader: if r.effect_type != 'Substitution': # I don't know", "continue # parse effect: p.N84S m = re.match(r'^p\\.([A-Z])\\d+([A-Z])$', r.effect) if not m: continue", "out.ENSG = r.gene_id out.ENST = r.transcript_id wtpep = mutpeps.get(r.peptide + '\\t' + r.allele,", "'-' r.locus = '<pos>' r.length = '-' r.coverage = '-' r.FPKM = expr", "= '<pos>' r.length = '-' r.coverage = '-' r.FPKM = expr r.FPKM_conf_lo =", "vcf: # try..except try: tx = variant.format('TX')[0] except (KeyError, TypeError): continue csqs =", "= csq[4] r.gene_short_name = csq[3] r.tss_id = '-' r.locus = '<pos>' r.length =", "gff import Gff from diot import Diot from cmdy import CmdyReturnCodeException from bioprocs.utils", "outdir = Path({{o.outdir | quote}}) topiary = {{args.topiary | quote}} netmhc = {{args.netmhc", "try: PATHs.add(str(Path(shell.which(mhcpred)).parent)) except CmdyReturnCodeException: continue params._env = Diot(PATH = environ['PATH'] + ':' +", "hla] = float(r[7 + i*5]) writer = TsvWriter(outfile) writer.cnames = tpreader.cnames + ['wildpeptide',", "Path(params['mhc-alleles-file']).read_text().splitlines() if allele ) wildfile = outfile.parent / 'wildtype.peptides.txt' wildfile.write_text('\\n'.join(wildpeps)) xlsfile = outfile.parent", "'locus', 'length', 'coverage', 'FPKM', 'FPKM_conf_lo', 'FPKM_conf_hi', 'FPKM_status'] writer.writeHead() for variant in vcf: #", "g.31237146C>A,353,AACSNSAHG,HLA-A*02:01,35651.3,65.0,netMHC,9,HLA-C,ENSG00000204525,ENST00000383329,HLA-C-002,p.Q361H,Substitution,True,7,8,0.0 # 1,chr6 g.33037619G>T,40,AAFVQTHRT,HLA-A*02:01,22758.73,32.0,netMHC,9,HLA-DPA1,ENSG00000231389,ENST00000419277,HLA-DPA1-001,p.P49T,Substitution,True,8,9,0.0 if mhc_predictor in ('netmhc', 'netmhcpan', 'netmhciipan', 'netmhccons', 'smm', 'smm_pmbec'):", "\\ --ic50-cutoff 500 \\ --percentile-cutoff 2.0 \\ --mhc-epitope-lengths 8-11 \\ --rna-gene-fpkm-tracking-file genes.fpkm_tracking \\", "= wtpep out.Ref_affinity = wildbindings[r.allele.replace(':', '').replace('*', '')].get(wtpep, '>500') out.Mutation = r.variant out.AAChange =", "try..except try: tx = variant.format('TX')[0] except (KeyError, TypeError): continue csqs = variant.INFO['CSQ'].split('|') txs", "r.locus = '<pos>' r.length = '-' r.coverage = '-' r.FPKM = expr r.FPKM_conf_lo", "netmhcpan) mhcallele2 = params['mhc-alleles'] if 'mhc-alleles' in params else ','.join( allele for allele", "\" \"If you have it installed somewhere else, make a symbolic link to", "if float(out.Affinity) < 500 and ('>' in out.Ref_affinity or float(out.Ref_affinity) >= 2000): writer.write(out)", "{{args.netmhc | quote}} netmhcpan = {{args.netmhcpan | quote}} netmhciipan = {{args.netmhciipan | quote}}", "= mhcallele2, v = True, inptype = 1, f = wildfile, _prefix =", "= Diot(PATH = environ['PATH'] + ':' + ':'.join(PATHs)) shell.fg.topiary(**params) # add wildtype binding", "'HLA-B*').replace('HLA-C', 'HLA-C*') for allele in f.readline().strip().split('\\t') if allele] reader = TsvReader(xlsfile, comment =", "= TsvRecord() r.tracking_id = csq[4] r.class_code = '-' r.nearest_ref_id = '-' r.gene_id =", "writer.write(r) features.add(r.tracking_id) writer.close() if vcf.contains('TX'): if not vcf.contains('CSQ'): raise ValueError('VCF file has to", "for r in tpreader: r.wildpeptide = mutpeps.get(r.peptide + '\\t' + r.allele, '-') r.wildaffinity", "quote}} smm_pmbec = {{args.smm_pmbec | quote}} mhc_predictor = {{args.mhc_predictor | quote}} genome =", "= infile else: params.maf = infile alleles = [allele.replace('*', '') for allele in", "'-') if r.wildaffinity != '-': r.deltaaffinity = float(r.affinity) - r.wildaffinity else: r.deltaaffinity =", "mhcallele2, v = True, inptype = 1, f = wildfile, _prefix = '-',", "r.length = '-' r.coverage = '-' r.FPKM = expr r.FPKM_conf_lo = 0 r.FPKM_conf_hi", "xlsfile = xlsfile) shell.fg.netmhcpan(**nparams) if not xlsfile.is_file(): raise RuntimeError(\"Failed to run netmhcpan, output", "extract expression from VCF file vcf = VCF(infile) gxfile = txfile = False", "continue parts = line.split() wildbindings[parts[1]][parts[2]] = parts[12] writer = TsvWriter(outfile) writer.cnames = ['HLA_allele',", "annotation data for the genome # topiary will use it to annotate the", "35829.9805 47.6206 \"\"\" with xlsfile.open('r') as f: alleles = [allele.replace('HLA-A', 'HLA-A*').replace('HLA-B', 'HLA-B*').replace('HLA-C', 'HLA-C*')", "1) csq = [csq for csq in csqs if f'|{gene}|' in csq][0].split('|') r", "fin, open(gxfile2, 'w') as fout: for line in fin: if '<pos>' not in", "= [csq for csq in csqs if f'|{transcript}|' in csq][0].split('|') r = TsvRecord()", "if feature not in features: continue allpos[feature] ='{}:{}-{}'.format(gff['seqid'], gff['start'], gff['end']) if gxfile: gxfile2", "m.group(2): continue wildpep = wildpep[:index] + m.group(1) + wildpep[(index+1):] mutpeps[r.peptide + '\\t' +", "= {{args.genome | quote}} params = {{args.params | repr}} refall = {{args.refall |", "netmhcpan, netmhciipan, netmhccons, smm, smm_pmbec): try: PATHs.add(str(Path(shell.which(mhcpred)).parent)) except CmdyReturnCodeException: continue params._env = Diot(PATH", "0.0308 35829.9805 47.6206 \"\"\" with xlsfile.open('r') as f: alleles = [allele.replace('HLA-A', 'HLA-A*').replace('HLA-B', 'HLA-B*').replace('HLA-C',", "writer.write(out) def run_netmhcpan(): shell.load_config(netmhcpan = netmhcpan) mhcallele2 = params['mhc-alleles'] if 'mhc-alleles' in params", "smm_pmbec = {{args.smm_pmbec | quote}} mhc_predictor = {{args.mhc_predictor | quote}} genome = {{args.genome", "'GRCh37', 'hg38': 'GRCh38'} datadir = Path.home().joinpath('.cache', 'pyensembl') if not datadir.joinpath(genome).is_dir() and not datadir.joinpath(gmaps.get(genome,", "PATH PATHs = set() for mhcpred in (netmhc, netmhcpan, netmhciipan, netmhccons, smm, smm_pmbec):", "= parts[12] writer = TsvWriter(outfile) writer.cnames = ['HLA_allele', 'Peptide', 'Affinity', 'Gene', 'ENSG', 'ENST',", "re.match(r'^p\\.([A-Z])\\d+([A-Z])$', r.effect) if not m: continue wildpep = r.peptide index = int(r.mutation_start_in_peptide) if", "a symbolic link to {}\".format(genome, ('/' + gmaps[genome]) if genome in gmaps else", "= 1000 r.FPKM_status = 'OK' writer.write(r) features.add(r.tracking_id) writer.close() if vcf.contains('TX'): if not vcf.contains('CSQ'):", "'') for allele in afile] params['mhc-alleles'] = ','.join(alleles) params.genome = genome params['output-csv'] =", "runner = { 'netmhc' : run_netmhc, 'netmhcpan' : run_netmhcpan, 'netmhciipan' : run_netmhciipan, 'netmhccons'", "= expr r.FPKM_conf_lo = 0 r.FPKM_conf_hi = 1000 r.FPKM_status = 'OK' writer.write(r) features.add(r.tracking_id)", "True) res = shell.netmhc(**nparams) pos_hit = False wildbindings = {allele: {} for allele", "for line in res: if 'PEPLIST' not in line or line.startswith('Protein'): continue parts", "from cyvcf2 import VCF from gff import Gff from diot import Diot from", "nparams = Diot( a = mhcallele2, v = True, inptype = 1, f", "and datadir.joinpath(gmaps.get(genome, genome)).is_dir(): # genome = gmaps[genome] # extract expression from VCF file", "'HLA-A*').replace('HLA-B', 'HLA-B*').replace('HLA-C', 'HLA-C*') for allele in f.readline().strip().split('\\t') if allele] reader = TsvReader(xlsfile, comment", "wildpeps = set() mutpeps = {} tpreader = TsvReader(outfile.with_suffix('.nowt'), comment = '###', delimit", "os import path%} infile = {{i.infile | quote}} afile = {{i.afile | ?path.isfile", "we have downloaded annotation data for the genome # topiary will use it", "environ from pathlib import Path from cyvcf2 import VCF from gff import Gff", "wildbindings[r.allele.replace(':', '').replace('*', '')].get(wtpep, '>500') out.Mutation = r.variant out.AAChange = r.effect writerall.write(out) if float(out.Affinity)", "= variant.format('TX')[0] except (KeyError, TypeError): continue csqs = variant.INFO['CSQ'].split('|') txs = tx.split(',') for", "quote}} smm = {{args.smm | quote}} smm_pmbec = {{args.smm_pmbec | quote}} mhc_predictor =", "has to be annotated with by VEP') # tracking_id class_code nearest_ref_id gene_id gene_short_name", "== 'gene': feature = gff['attributes']['gene_id'] elif gff['type'] == 'transcript': feature = gff['attributes']['transcript_id'] else:", "in tpreader: r.wildpeptide = mutpeps.get(r.peptide + '\\t' + r.allele, '-') r.wildaffinity = wildbindings.get(r.wildpeptide", "+ r.allele, '-') if r.wildaffinity != '-': r.deltaaffinity = float(r.affinity) - r.wildaffinity else:", "with by VEP') # tracking_id class_code nearest_ref_id gene_id gene_short_name tss_id locus length coverage", "= TsvWriter(neatfile) nwriter.cnames = ['HLA_allele', 'mt_peptide', 'mt_affinity', 'wt_peptide', 'wt_affinity', 'delta_affinity', 'gene'] nwriter.writeHead() tpreader.rewind()", "| quote}} mhc_predictor = {{args.mhc_predictor | quote}} genome = {{args.genome | quote}} params", "= {{args.topiary | quote}} netmhc = {{args.netmhc | quote}} netmhcpan = {{args.netmhcpan |", "{{job.index | quote}}]) tmpdir.mkdir(exist_ok = True, parents = True) # check if we", "[allele.replace('HLA-A', 'HLA-A*').replace('HLA-B', 'HLA-B*').replace('HLA-C', 'HLA-C*') for allele in f.readline().strip().split('\\t') if allele] reader = TsvReader(xlsfile,", "coverage FPKM FPKM_conf_lo FPKM_conf_hi FPKM_status # ENSG00000240361 - - ENSG00000240361 OR4G11P - chr1:62947-63887", "'', datadir)) # if not datadir.joinpath(genome).is_dir() and datadir.joinpath(gmaps.get(genome, genome)).is_dir(): # genome = gmaps[genome]", "gx = variant.format('GX')[0] except (KeyError, TypeError): continue csqs = variant.INFO['CSQ'].split(',') gxs = gx.split(',')", "= {{i.infile | quote}} afile = {{i.afile | ?path.isfile | =readlines | !alwaysList", "don't have annotation data for genome {}{} installed. \" \"Either you run 'pyensembl", "wtpep out.Ref_affinity = wildbindings[r.allele.replace(':', '').replace('*', '')].get(wtpep, '>500') out.Mutation = r.variant out.AAChange = r.effect", "(KeyError, TypeError): continue csqs = variant.INFO['CSQ'].split('|') txs = tx.split(',') for tx in txs:", "= [allele.replace('HLA-A', 'HLA-A*').replace('HLA-B', 'HLA-B*').replace('HLA-C', 'HLA-C*') for allele in f.readline().strip().split('\\t') if allele] reader =", "PATHs = set() for mhcpred in (netmhc, netmhcpan, netmhciipan, netmhccons, smm, smm_pmbec): try:", "{{args.netmhcpan | quote}} netmhciipan = {{args.netmhciipan | quote}} netmhccons = {{args.netmhccons | quote}}", "pass runner = { 'netmhc' : run_netmhc, 'netmhcpan' : run_netmhcpan, 'netmhciipan' : run_netmhciipan,", "open(txfile2, 'w') as fout: for line in fin: if '<pos>' not in line:", "m.group(1) + wildpep[(index+1):] mutpeps[r.peptide + '\\t' + r.allele] = wildpep wildpeps.add(wildpep) def run_netmhc():", "substitution continue # parse effect: p.N84S m = re.match(r'^p\\.([A-Z])\\d+([A-Z])$', r.effect) if not m:", "'Substitution': # I don't know how to get the wildtype peptides if it", "position information for: %s, skipping', feature_id) else: fout.write(line.replace('<pos>', allpos[feature_id])) gxfile = gxfile2 if", "'AAChange'] writer.writeHead() writerall = TsvWriter(outfile.with_suffix('.all.txt')) writerall.cnames = writer.cnames writerall.writeHead() tpreader.rewind() for r in", "from bioprocs.utils.tsvio2 import TsvReader, TsvWriter, TsvRecord {% from os import path%} infile =", "True, BA = True, inptype = 1, f = wildfile, _prefix = '-',", "in f.readline().strip().split('\\t') if allele] reader = TsvReader(xlsfile, comment = '\\t\\t\\t') wildbindings = {}", "if not datadir.joinpath(genome).is_dir() and not datadir.joinpath(gmaps.get(genome, genome)).is_dir(): raise RuntimeError(\"You don't have annotation data", "gxfile2 = outfile.with_suffix('.gx') with open(gxfile) as fin, open(gxfile2, 'w') as fout: for line", "= Path({{o.outdir | quote}}) topiary = {{args.topiary | quote}} netmhc = {{args.netmhc |", "allele in f.readline().strip().split('\\t') if allele] reader = TsvReader(xlsfile, comment = '\\t\\t\\t') wildbindings =", "from gff import Gff from diot import Diot from cmdy import CmdyReturnCodeException from", "= '-', xls = True, xlsfile = xlsfile) shell.fg.netmhcpan(**nparams) if not xlsfile.is_file(): raise", "not in allpos: logger.warning('Cannot find position information for: %s, skipping', feature_id) else: fout.write(line.replace('<pos>',", "expr r.FPKM_conf_lo = 0 r.FPKM_conf_hi = 1000 r.FPKM_status = 'OK' writer.write(r) features.add(r.tracking_id) writer.close()", "ID core icore 1-log50k nM Rank core icore 1-log50k nM Rank 0 LYLPALWFH", "r.gene_id = csq[4] r.gene_short_name = csq[3] r.tss_id = '-' r.locus = '<pos>' r.length", "genome = gmaps[genome] # extract expression from VCF file vcf = VCF(infile) gxfile", "mhc_predictor # make sure those mhc-predictors are in PATH PATHs = set() for", "raise ValueError('VCF file has to be annotated with by VEP') # tracking_id class_code", "Gff from diot import Diot from cmdy import CmdyReturnCodeException from bioprocs.utils import shell2", "downloaded annotation data for the genome # topiary will use it to annotate", "1)[0] if feature_id not in allpos: logger.warning('Cannot find position information for: %s, skipping',", "_prefix = '-', _iter = True, _debug = True) res = shell.netmhc(**nparams) pos_hit", "hla in enumerate(alleles): wildbindings[peptide + '\\t' + hla] = float(r[7 + i*5]) writer", "def run_smm(): pass def run_smm_pmbec(): pass runner = { 'netmhc' : run_netmhc, 'netmhcpan'", "= 0 r.FPKM_conf_hi = 1000 r.FPKM_status = 'OK' writer.write(r) features.add(r.tracking_id) writer.close() if gxfile", "'Ref_affinity', 'Mutation', 'AAChange'] writer.writeHead() writerall = TsvWriter(outfile.with_suffix('.all.txt')) writerall.cnames = writer.cnames writerall.writeHead() tpreader.rewind() for", "--mhc-alleles HLA-A*02:01,HLA-B*07:02 \\ --ic50-cutoff 500 \\ --percentile-cutoff 2.0 \\ --mhc-epitope-lengths 8-11 \\ --rna-gene-fpkm-tracking-file", "information for: %s, skipping', feature_id) else: fout.write(line.replace('<pos>', allpos[feature_id])) gxfile = gxfile2 if txfile:", "have annotation data for genome {}{} installed. \" \"Either you run 'pyensembl install'", "chr1:62947-63887 - - 0 0 0 OK # ENSG00000268020 - - ENSG00000268020 AL627309.1", "Path({{o.outdir | quote}}) topiary = {{args.topiary | quote}} netmhc = {{args.netmhc | quote}}", "gff['start'], gff['end']) if gxfile: gxfile2 = outfile.with_suffix('.gx') with open(gxfile) as fin, open(gxfile2, 'w')", "outfile = Path({{o.outfile | quote}}) outdir = Path({{o.outdir | quote}}) topiary = {{args.topiary", "wildbindings[parts[1]][parts[2]] = parts[12] writer = TsvWriter(outfile) writer.cnames = ['HLA_allele', 'Peptide', 'Affinity', 'Gene', 'ENSG',", "0,chr6 g.31237146C>A,353,AACSNSAHG,HLA-A*02:01,35651.3,65.0,netMHC,9,HLA-C,ENSG00000204525,ENST00000383329,HLA-C-002,p.Q361H,Substitution,True,7,8,0.0 # 1,chr6 g.33037619G>T,40,AAFVQTHRT,HLA-A*02:01,22758.73,32.0,netMHC,9,HLA-DPA1,ENSG00000231389,ENST00000419277,HLA-DPA1-001,p.P49T,Substitution,True,8,9,0.0 if mhc_predictor in ('netmhc', 'netmhcpan', 'netmhciipan', 'netmhccons', 'smm',", "mhcallele2.split(',')} for line in res: if 'PEPLIST' not in line or line.startswith('Protein'): continue", "'pyensembl') if not datadir.joinpath(genome).is_dir() and not datadir.joinpath(gmaps.get(genome, genome)).is_dir(): raise RuntimeError(\"You don't have annotation", "wildpep[(index+1):] mutpeps[r.peptide + '\\t' + r.allele] = wildpep wildpeps.add(wildpep) def run_netmhc(): shell.load_config(netmhc =", "= r.effect writerall.write(out) if float(out.Affinity) < 500 and ('>' in out.Ref_affinity or float(out.Ref_affinity)", "= '-' nwriter.write([ r.allele, r.peptide, r.affinity, r.wildpeptide, r.wildaffinity, r.deltaaffinity, r.gene]) writer.write(r) def run_netmhciipan():", "generated.\") # read the output \"\"\" HLA-A24:02 HLA-A29:02 Pos Peptide ID core icore", "1.1231 0 RRQRRQRRW PEPLIST RRQRRQRRW RRQRRQRRW 0.0788 21311.8301 12.3392 RRQRRQRRW RRQRRQRRW 0.0308 35829.9805", "feature_id) else: fout.write(line.replace('<pos>', allpos[feature_id])) gxfile = gxfile2 if txfile: txfile2 = outfile.with_suffix('.tx') with", "r.affinity out.Gene = r.gene out.ENSG = r.gene_id out.ENST = r.transcript_id wtpep = mutpeps.get(r.peptide", "/ 'wildtype.peptides.txt' wildfile.write_text('\\n'.join(wildpeps)) xlsfile = outfile.parent / 'wildtype.binding.txt' nparams = Diot( a =", "ENSG00000268020 AL627309.1 - chr1:53048-54936 - - 0 0 0 OK gxfile = outfile.with_suffix('.gx_nopos')", "False features = set() if vcf.contains('GX'): if not vcf.contains('CSQ'): raise ValueError('VCF file has", "VEP') # tracking_id class_code nearest_ref_id gene_id gene_short_name tss_id locus length coverage FPKM FPKM_conf_lo", "infile.endswith('.vcf.gz'): params.vcf = infile else: params.maf = infile alleles = [allele.replace('*', '') for", "(KeyError, TypeError): continue csqs = variant.INFO['CSQ'].split(',') gxs = gx.split(',') for gx in gxs:", "txfile = False features = set() if vcf.contains('GX'): if not vcf.contains('CSQ'): raise ValueError('VCF", "allele for allele in Path(params['mhc-alleles-file']).read_text().splitlines() if allele ) wildfile = outfile.parent / 'wildtype.peptides.txt'", "= TsvWriter(gxfile) writer.cnames = ['tracking_id', 'class_code', 'nearest_ref_id', 'gene_id', 'gene_short_name', 'tss_id', 'locus', 'length', 'coverage',", "tpreader.rewind() for r in tpreader: r.wildpeptide = mutpeps.get(r.peptide + '\\t' + r.allele, '-')", "TsvWriter(outfile) writer.cnames = ['HLA_allele', 'Peptide', 'Affinity', 'Gene', 'ENSG', 'ENST', 'Ref_peptide', 'Ref_affinity', 'Mutation', 'AAChange']", "bioprocs.utils import shell2 as shell, logger from bioprocs.utils.tsvio2 import TsvReader, TsvWriter, TsvRecord {%", "/ 'wildtype.binding.txt' nparams = Diot( a = mhcallele2, v = True, BA =", "['tracking_id', 'class_code', 'nearest_ref_id', 'gene_id', 'gene_short_name', 'tss_id', 'locus', 'length', 'coverage', 'FPKM', 'FPKM_conf_lo', 'FPKM_conf_hi', 'FPKM_status']", "for allele in f.readline().strip().split('\\t') if allele] reader = TsvReader(xlsfile, comment = '\\t\\t\\t') wildbindings", "= True) # check if we have downloaded annotation data for the genome", "= {} for r in reader: peptide = r[1] for i, hla in", "raise RuntimeError(\"You don't have annotation data for genome {}{} installed. \" \"Either you", "\" \"Either you run 'pyensembl install' first or \" \"specify 'params.download_reference_genome_data = True'.", "pathlib import Path from cyvcf2 import VCF from gff import Gff from diot", "= 0 r.FPKM_conf_hi = 1000 r.FPKM_status = 'OK' writer.write(r) features.add(r.tracking_id) writer.close() if vcf.contains('TX'):", "'GRCh38'} datadir = Path.home().joinpath('.cache', 'pyensembl') if not datadir.joinpath(genome).is_dir() and not datadir.joinpath(gmaps.get(genome, genome)).is_dir(): raise", "+ i*5]) writer = TsvWriter(outfile) writer.cnames = tpreader.cnames + ['wildpeptide', 'wildaffinity', 'deltaaffinity'] writer.writeHead()", "v = True, BA = True, inptype = 1, f = wildfile, _prefix", "smm_pmbec): try: PATHs.add(str(Path(shell.which(mhcpred)).parent)) except CmdyReturnCodeException: continue params._env = Diot(PATH = environ['PATH'] + ':'", "= environ['PATH'] + ':' + ':'.join(PATHs)) shell.fg.topiary(**params) # add wildtype binding # #,variant,peptide_offset,peptide,allele,affinity,percentile_rank,prediction_method_name,peptide_length,gene,gene_id,transcript_id,transcript_name,effect,effect_type,contains_mutant_residues,mutation_start_in_peptide,mutation_end_in_peptide,gene_expression", "= True, inptype = 1, f = wildfile, _prefix = '-', _iter =", "re from os import environ from pathlib import Path from cyvcf2 import VCF", "writer = TsvWriter(txfile) writer.cnames = ['tracking_id', 'class_code', 'nearest_ref_id', 'gene_id', 'gene_short_name', 'tss_id', 'locus', 'length',", "set() if vcf.contains('GX'): if not vcf.contains('CSQ'): raise ValueError('VCF file has to be annotated", "Path from cyvcf2 import VCF from gff import Gff from diot import Diot", "or \" \"specify 'params.download_reference_genome_data = True'. \" \"If you have it installed somewhere", "= r.peptide index = int(r.mutation_start_in_peptide) if wildpep[index] != m.group(2): continue wildpep = wildpep[:index]", "writer.writeHead() writerall = TsvWriter(outfile.with_suffix('.all.txt')) writerall.cnames = writer.cnames writerall.writeHead() tpreader.rewind() for r in tpreader:", "set() for mhcpred in (netmhc, netmhcpan, netmhciipan, netmhccons, smm, smm_pmbec): try: PATHs.add(str(Path(shell.which(mhcpred)).parent)) except", "with open(gxfile) as fin, open(gxfile2, 'w') as fout: for line in fin: if", "= '-' r.coverage = '-' r.FPKM = expr r.FPKM_conf_lo = 0 r.FPKM_conf_hi =", "datadir.joinpath(genome).is_dir() and datadir.joinpath(gmaps.get(genome, genome)).is_dir(): # genome = gmaps[genome] # extract expression from VCF", "= line.split('\\t', 1)[0] if feature_id not in allpos: logger.warning('Cannot find position information for:", "skipping', feature_id) else: fout.write(line.replace('<pos>', allpos[feature_id])) txfile = txfile2 params['rna-gene-fpkm-tracking-file'] = gxfile params['rna-transcript-fpkm-tracking-file'] =", "| quote}} genome = {{args.genome | quote}} params = {{args.params | repr}} refall", "= outfile.parent / 'wildtype.peptides.txt' wildfile.write_text('\\n'.join(wildpeps)) nparams = Diot( a = mhcallele2, v =", "= line.split() wildbindings[parts[1]][parts[2]] = parts[12] writer = TsvWriter(outfile) writer.cnames = ['HLA_allele', 'Peptide', 'Affinity',", "= wildbindings.get(r.wildpeptide + '\\t' + r.allele, '-') if r.wildaffinity != '-': r.deltaaffinity =", "information for: %s, skipping', feature_id) else: fout.write(line.replace('<pos>', allpos[feature_id])) txfile = txfile2 params['rna-gene-fpkm-tracking-file'] =", ">= 2000): writer.write(out) def run_netmhcpan(): shell.load_config(netmhcpan = netmhcpan) mhcallele2 = params['mhc-alleles'] if 'mhc-alleles'", "\\ --mhc-alleles HLA-A*02:01,HLA-B*07:02 \\ --ic50-cutoff 500 \\ --percentile-cutoff 2.0 \\ --mhc-epitope-lengths 8-11 \\", "import Path from cyvcf2 import VCF from gff import Gff from diot import", "r.gene]) writer.write(r) def run_netmhciipan(): pass def run_netmhccons(): pass def run_smm(): pass def run_smm_pmbec():", "feature = gff['attributes']['gene_id'] elif gff['type'] == 'transcript': feature = gff['attributes']['transcript_id'] else: continue if", "# try..except try: tx = variant.format('TX')[0] except (KeyError, TypeError): continue csqs = variant.INFO['CSQ'].split('|')", "wildpep[:index] + m.group(1) + wildpep[(index+1):] mutpeps[r.peptide + '\\t' + r.allele] = wildpep wildpeps.add(wildpep)", "from VCF file vcf = VCF(infile) gxfile = txfile = False features =", "elif gff['type'] == 'transcript': feature = gff['attributes']['transcript_id'] else: continue if feature not in", "VCF file vcf = VCF(infile) gxfile = txfile = False features = set()", "writer.write(r) def run_netmhciipan(): pass def run_netmhccons(): pass def run_smm(): pass def run_smm_pmbec(): pass", "netmhc = {{args.netmhc | quote}} netmhcpan = {{args.netmhcpan | quote}} netmhciipan = {{args.netmhciipan", "params['mhc-predictor'] = mhc_predictor # make sure those mhc-predictors are in PATH PATHs =", "writer.close() if gxfile or txfile: allpos = {} for gff in Gff(refall): if", "nwriter.writeHead() tpreader.rewind() for r in tpreader: r.wildpeptide = mutpeps.get(r.peptide + '\\t' + r.allele,", "not xlsfile.is_file(): raise RuntimeError(\"Failed to run netmhcpan, output file not generated.\") # read", "if vcf.contains('TX'): if not vcf.contains('CSQ'): raise ValueError('VCF file has to be annotated with", "# ENSG00000240361 - - ENSG00000240361 OR4G11P - chr1:62947-63887 - - 0 0 0", "else: r.deltaaffinity = '-' nwriter.write([ r.allele, r.peptide, r.affinity, r.wildpeptide, r.wildaffinity, r.deltaaffinity, r.gene]) writer.write(r)", "= 'OK' writer.write(r) features.add(r.tracking_id) writer.close() if gxfile or txfile: allpos = {} for", "- ENSG00000268020 AL627309.1 - chr1:53048-54936 - - 0 0 0 OK gxfile =", "is not a substitution continue # parse effect: p.N84S m = re.match(r'^p\\.([A-Z])\\d+([A-Z])$', r.effect)", "1.5 \\ --output-csv epitopes.csv \"\"\" import re from os import environ from pathlib", "[csq for csq in csqs if f'|{transcript}|' in csq][0].split('|') r = TsvRecord() r.tracking_id", "r.effect_type != 'Substitution': # I don't know how to get the wildtype peptides", "TsvRecord() r.tracking_id = csq[6] r.class_code = '-' r.nearest_ref_id = '-' r.gene_id = csq[4]", "wildpeps.add(wildpep) def run_netmhc(): shell.load_config(netmhc = netmhc) mhcallele2 = params['mhc-alleles'].replace(':', '').replace('*', '') wildfile =", "have downloaded annotation data for the genome # topiary will use it to", "mhcallele2, v = True, BA = True, inptype = 1, f = wildfile,", "(netmhc, netmhcpan, netmhciipan, netmhccons, smm, smm_pmbec): try: PATHs.add(str(Path(shell.which(mhcpred)).parent)) except CmdyReturnCodeException: continue params._env =", "= variant.format('GX')[0] except (KeyError, TypeError): continue csqs = variant.INFO['CSQ'].split(',') gxs = gx.split(',') for", "infile.endswith('.vcf') or infile.endswith('.vcf.gz'): params.vcf = infile else: params.maf = infile alleles = [allele.replace('*',", "0 0 0 OK gxfile = outfile.with_suffix('.gx_nopos') writer = TsvWriter(gxfile) writer.cnames = ['tracking_id',", "fout: for line in fin: if '<pos>' not in line: fout.write(line) else: feature_id", "from diot import Diot from cmdy import CmdyReturnCodeException from bioprocs.utils import shell2 as", "r.deltaaffinity = float(r.affinity) - r.wildaffinity else: r.deltaaffinity = '-' nwriter.write([ r.allele, r.peptide, r.affinity,", "RuntimeError(\"You don't have annotation data for genome {}{} installed. \" \"Either you run", "'\\t' + r.allele, '-') r.wildaffinity = wildbindings.get(r.wildpeptide + '\\t' + r.allele, '-') if", "| quote}} smm_pmbec = {{args.smm_pmbec | quote}} mhc_predictor = {{args.mhc_predictor | quote}} genome", "netmhciipan = {{args.netmhciipan | quote}} netmhccons = {{args.netmhccons | quote}} smm = {{args.smm", "out.Peptide = r.peptide out.Affinity = r.affinity out.Gene = r.gene out.ENSG = r.gene_id out.ENST", "= TsvWriter(outfile) writer.cnames = tpreader.cnames + ['wildpeptide', 'wildaffinity', 'deltaaffinity'] writer.writeHead() nwriter = TsvWriter(neatfile)", "= {} tpreader = TsvReader(outfile.with_suffix('.nowt'), comment = '###', delimit = ',') for r", "# if not datadir.joinpath(genome).is_dir() and datadir.joinpath(gmaps.get(genome, genome)).is_dir(): # genome = gmaps[genome] # extract", "you run 'pyensembl install' first or \" \"specify 'params.download_reference_genome_data = True'. \" \"If", "somatic.vcf \\ --mhc-predictor netmhcpan \\ --mhc-alleles HLA-A*02:01,HLA-B*07:02 \\ --ic50-cutoff 500 \\ --percentile-cutoff 2.0", "= tx.split('|', 1) csq = [csq for csq in csqs if f'|{transcript}|' in", "writerall.writeHead() tpreader.rewind() for r in tpreader: out = TsvRecord() out.HLA_allele = r.allele out.Peptide", "logger from bioprocs.utils.tsvio2 import TsvReader, TsvWriter, TsvRecord {% from os import path%} infile", "Path({{args.tmpdir | quote}}) / '.'.join([ {{proc.id | quote}}, {{proc.tag | quote}}, {{proc.suffix |", "params['mhc-alleles'] if 'mhc-alleles' in params else ','.join( allele for allele in Path(params['mhc-alleles-file']).read_text().splitlines() if", "line.split() wildbindings[parts[1]][parts[2]] = parts[12] writer = TsvWriter(outfile) writer.cnames = ['HLA_allele', 'Peptide', 'Affinity', 'Gene',", "feature_id = line.split('\\t', 1)[0] if feature_id not in allpos: logger.warning('Cannot find position information", "{{args.netmhccons | quote}} smm = {{args.smm | quote}} smm_pmbec = {{args.smm_pmbec | quote}}", "{'hg19': 'GRCh37', 'hg38': 'GRCh38'} datadir = Path.home().joinpath('.cache', 'pyensembl') if not datadir.joinpath(genome).is_dir() and not", "float(r.affinity) - r.wildaffinity else: r.deltaaffinity = '-' nwriter.write([ r.allele, r.peptide, r.affinity, r.wildpeptide, r.wildaffinity,", "'-': r.deltaaffinity = float(r.affinity) - r.wildaffinity else: r.deltaaffinity = '-' nwriter.write([ r.allele, r.peptide,", "\\ --output-csv epitopes.csv \"\"\" import re from os import environ from pathlib import", "gxfile = gxfile2 if txfile: txfile2 = outfile.with_suffix('.tx') with open(txfile) as fin, open(txfile2,", "LYLPALWFH LYLPALWFH 0.1353 11560.5488 6.1138 LYLPALWFH LYLPALWFH 0.4137 568.6087 1.1231 0 RRQRRQRRW PEPLIST", "500 \\ --percentile-cutoff 2.0 \\ --mhc-epitope-lengths 8-11 \\ --rna-gene-fpkm-tracking-file genes.fpkm_tracking \\ --rna-min-gene-expression 4.0", "= mutpeps.get(r.peptide + '\\t' + r.allele, '-') out.Ref_peptide = wtpep out.Ref_affinity = wildbindings[r.allele.replace(':',", "feature not in features: continue allpos[feature] ='{}:{}-{}'.format(gff['seqid'], gff['start'], gff['end']) if gxfile: gxfile2 =", "float(out.Ref_affinity) >= 2000): writer.write(out) def run_netmhcpan(): shell.load_config(netmhcpan = netmhcpan) mhcallele2 = params['mhc-alleles'] if", "chr1:53048-54936 - - 0 0 0 OK txfile = outfile.with_suffix('.tx_nopos') writer = TsvWriter(txfile)", "= {{i.afile | ?path.isfile | =readlines | !alwaysList | repr}} outfile = Path({{o.outfile", "--vcf somatic.vcf \\ --mhc-predictor netmhcpan \\ --mhc-alleles HLA-A*02:01,HLA-B*07:02 \\ --ic50-cutoff 500 \\ --percentile-cutoff", "res = shell.netmhc(**nparams) pos_hit = False wildbindings = {allele: {} for allele in", "| quote}}]) tmpdir.mkdir(exist_ok = True, parents = True) # check if we have", "csq = [csq for csq in csqs if f'|{gene}|' in csq][0].split('|') r =", "= Path({{args.tmpdir | quote}}) / '.'.join([ {{proc.id | quote}}, {{proc.tag | quote}}, {{proc.suffix", "!= '-': r.deltaaffinity = float(r.affinity) - r.wildaffinity else: r.deltaaffinity = '-' nwriter.write([ r.allele,", "else: feature_id = line.split('\\t', 1)[0] if feature_id not in allpos: logger.warning('Cannot find position", "= {{args.smm_pmbec | quote}} mhc_predictor = {{args.mhc_predictor | quote}} genome = {{args.genome |", "= gx.split('|', 1) csq = [csq for csq in csqs if f'|{gene}|' in", "tx in txs: transcript, expr = tx.split('|', 1) csq = [csq for csq", "mutpeps.get(r.peptide + '\\t' + r.allele, '-') out.Ref_peptide = wtpep out.Ref_affinity = wildbindings[r.allele.replace(':', '').replace('*',", "= wildpep wildpeps.add(wildpep) def run_netmhc(): shell.load_config(netmhc = netmhc) mhcallele2 = params['mhc-alleles'].replace(':', '').replace('*', '')", "run_netmhciipan(): pass def run_netmhccons(): pass def run_smm(): pass def run_smm_pmbec(): pass runner =", "if 'PEPLIST' not in line or line.startswith('Protein'): continue parts = line.split() wildbindings[parts[1]][parts[2]] =", "genome in gmaps else '', datadir)) # if not datadir.joinpath(genome).is_dir() and datadir.joinpath(gmaps.get(genome, genome)).is_dir():", "TsvWriter(outfile.with_suffix('.all.txt')) writerall.cnames = writer.cnames writerall.writeHead() tpreader.rewind() for r in tpreader: out = TsvRecord()", "wildbindings = {allele: {} for allele in mhcallele2.split(',')} for line in res: if", "r.FPKM = expr r.FPKM_conf_lo = 0 r.FPKM_conf_hi = 1000 r.FPKM_status = 'OK' writer.write(r)", "= [allele.replace('*', '') for allele in afile] params['mhc-alleles'] = ','.join(alleles) params.genome = genome", "6.1138 LYLPALWFH LYLPALWFH 0.4137 568.6087 1.1231 0 RRQRRQRRW PEPLIST RRQRRQRRW RRQRRQRRW 0.0788 21311.8301", "csq[6] r.class_code = '-' r.nearest_ref_id = '-' r.gene_id = csq[4] r.gene_short_name = csq[3]", "core icore 1-log50k nM Rank 0 LYLPALWFH PEPLIST LYLPALWFH LYLPALWFH 0.1353 11560.5488 6.1138", "= wildfile, _prefix = '-', _iter = True, _debug = True) res =", "variant.INFO['CSQ'].split(',') gxs = gx.split(',') for gx in gxs: gene, expr = gx.split('|', 1)", "= '###', delimit = ',') for r in tpreader: if r.effect_type != 'Substitution':", "{} tpreader = TsvReader(outfile.with_suffix('.nowt'), comment = '###', delimit = ',') for r in", "def run_netmhccons(): pass def run_smm(): pass def run_smm_pmbec(): pass runner = { 'netmhc'", "quote}}, {{proc.suffix | quote}}, {{job.index | quote}}]) tmpdir.mkdir(exist_ok = True, parents = True)", "= int(r.mutation_start_in_peptide) if wildpep[index] != m.group(2): continue wildpep = wildpep[:index] + m.group(1) +", "in vcf: # try..except try: gx = variant.format('GX')[0] except (KeyError, TypeError): continue csqs", "import TsvReader, TsvWriter, TsvRecord {% from os import path%} infile = {{i.infile |", "feature_id not in allpos: logger.warning('Cannot find position information for: %s, skipping', feature_id) else:", "installed somewhere else, make a symbolic link to {}\".format(genome, ('/' + gmaps[genome]) if", "genome = {{args.genome | quote}} params = {{args.params | repr}} refall = {{args.refall", "'mhc-alleles' in params else ','.join( allele for allele in Path(params['mhc-alleles-file']).read_text().splitlines() if allele )", "mhcpred in (netmhc, netmhcpan, netmhciipan, netmhccons, smm, smm_pmbec): try: PATHs.add(str(Path(shell.which(mhcpred)).parent)) except CmdyReturnCodeException: continue", "shell.fg.netmhcpan(**nparams) if not xlsfile.is_file(): raise RuntimeError(\"Failed to run netmhcpan, output file not generated.\")", "FPKM_conf_lo FPKM_conf_hi FPKM_status # ENSG00000240361 - - ENSG00000240361 OR4G11P - chr1:62947-63887 - -", "afile] params['mhc-alleles'] = ','.join(alleles) params.genome = genome params['output-csv'] = outfile.with_suffix('.nowt') params['mhc-predictor'] = mhc_predictor", "somewhere else, make a symbolic link to {}\".format(genome, ('/' + gmaps[genome]) if genome", "are in PATH PATHs = set() for mhcpred in (netmhc, netmhcpan, netmhciipan, netmhccons,", "if allele ) wildfile = outfile.parent / 'wildtype.peptides.txt' wildfile.write_text('\\n'.join(wildpeps)) xlsfile = outfile.parent /", "netmhccons, smm, smm_pmbec): try: PATHs.add(str(Path(shell.which(mhcpred)).parent)) except CmdyReturnCodeException: continue params._env = Diot(PATH = environ['PATH']", "for gff in Gff(refall): if gff['type'] == 'gene': feature = gff['attributes']['gene_id'] elif gff['type']", "with xlsfile.open('r') as f: alleles = [allele.replace('HLA-A', 'HLA-A*').replace('HLA-B', 'HLA-B*').replace('HLA-C', 'HLA-C*') for allele in", "run_netmhccons(): pass def run_smm(): pass def run_smm_pmbec(): pass runner = { 'netmhc' :", "cmdy import CmdyReturnCodeException from bioprocs.utils import shell2 as shell, logger from bioprocs.utils.tsvio2 import", ": run_netmhcpan, 'netmhciipan' : run_netmhciipan, 'netmhccons' : run_netmhccons, 'smm' : run_smm, 'smm-pmbec' :", "{{args.netmhciipan | quote}} netmhccons = {{args.netmhccons | quote}} smm = {{args.smm | quote}}", "in ('netmhc', 'netmhcpan', 'netmhciipan', 'netmhccons', 'smm', 'smm_pmbec'): wildpeps = set() mutpeps = {}", "HLA-A24:02 HLA-A29:02 Pos Peptide ID core icore 1-log50k nM Rank core icore 1-log50k", "xlsfile = outfile.parent / 'wildtype.binding.txt' nparams = Diot( a = mhcallele2, v =", "pass def run_smm_pmbec(): pass runner = { 'netmhc' : run_netmhc, 'netmhcpan' : run_netmhcpan,", "chr1:53048-54936 - - 0 0 0 OK gxfile = outfile.with_suffix('.gx_nopos') writer = TsvWriter(gxfile)", "effect: p.N84S m = re.match(r'^p\\.([A-Z])\\d+([A-Z])$', r.effect) if not m: continue wildpep = r.peptide", "':'.join(PATHs)) shell.fg.topiary(**params) # add wildtype binding # #,variant,peptide_offset,peptide,allele,affinity,percentile_rank,prediction_method_name,peptide_length,gene,gene_id,transcript_id,transcript_name,effect,effect_type,contains_mutant_residues,mutation_start_in_peptide,mutation_end_in_peptide,gene_expression # 0,chr6 g.31237146C>A,353,AACSNSAHG,HLA-A*02:01,35651.3,65.0,netMHC,9,HLA-C,ENSG00000204525,ENST00000383329,HLA-C-002,p.Q361H,Substitution,True,7,8,0.0 # 1,chr6", "+ ['wildpeptide', 'wildaffinity', 'deltaaffinity'] writer.writeHead() nwriter = TsvWriter(neatfile) nwriter.cnames = ['HLA_allele', 'mt_peptide', 'mt_affinity',", "'ENST', 'Ref_peptide', 'Ref_affinity', 'Mutation', 'AAChange'] writer.writeHead() writerall = TsvWriter(outfile.with_suffix('.all.txt')) writerall.cnames = writer.cnames writerall.writeHead()", "= txfile shell.load_config(topiary = topiary) if infile.endswith('.vcf') or infile.endswith('.vcf.gz'): params.vcf = infile else:", "to be annotated with by VEP') # tracking_id class_code nearest_ref_id gene_id gene_short_name tss_id", "if mhc_predictor in ('netmhc', 'netmhcpan', 'netmhciipan', 'netmhccons', 'smm', 'smm_pmbec'): wildpeps = set() mutpeps", "allpos: logger.warning('Cannot find position information for: %s, skipping', feature_id) else: fout.write(line.replace('<pos>', allpos[feature_id])) gxfile", "1) csq = [csq for csq in csqs if f'|{transcript}|' in csq][0].split('|') r", "RRQRRQRRW 0.0308 35829.9805 47.6206 \"\"\" with xlsfile.open('r') as f: alleles = [allele.replace('HLA-A', 'HLA-A*').replace('HLA-B',", "quote}}]) tmpdir.mkdir(exist_ok = True, parents = True) # check if we have downloaded", "try: gx = variant.format('GX')[0] except (KeyError, TypeError): continue csqs = variant.INFO['CSQ'].split(',') gxs =", "r.gene out.ENSG = r.gene_id out.ENST = r.transcript_id wtpep = mutpeps.get(r.peptide + '\\t' +", "binding # #,variant,peptide_offset,peptide,allele,affinity,percentile_rank,prediction_method_name,peptide_length,gene,gene_id,transcript_id,transcript_name,effect,effect_type,contains_mutant_residues,mutation_start_in_peptide,mutation_end_in_peptide,gene_expression # 0,chr6 g.31237146C>A,353,AACSNSAHG,HLA-A*02:01,35651.3,65.0,netMHC,9,HLA-C,ENSG00000204525,ENST00000383329,HLA-C-002,p.Q361H,Substitution,True,7,8,0.0 # 1,chr6 g.33037619G>T,40,AAFVQTHRT,HLA-A*02:01,22758.73,32.0,netMHC,9,HLA-DPA1,ENSG00000231389,ENST00000419277,HLA-DPA1-001,p.P49T,Substitution,True,8,9,0.0 if mhc_predictor in ('netmhc',", "TsvWriter, TsvRecord {% from os import path%} infile = {{i.infile | quote}} afile", "the data gmaps = {'hg19': 'GRCh37', 'hg38': 'GRCh38'} datadir = Path.home().joinpath('.cache', 'pyensembl') if", "if allele] reader = TsvReader(xlsfile, comment = '\\t\\t\\t') wildbindings = {} for r", "for i, hla in enumerate(alleles): wildbindings[peptide + '\\t' + hla] = float(r[7 +", "- ENSG00000268020 AL627309.1 - chr1:53048-54936 - - 0 0 0 OK txfile =", "not datadir.joinpath(gmaps.get(genome, genome)).is_dir(): raise RuntimeError(\"You don't have annotation data for genome {}{} installed.", "outfile.with_suffix('.tx') with open(txfile) as fin, open(txfile2, 'w') as fout: for line in fin:", "{{args.genome | quote}} params = {{args.params | repr}} refall = {{args.refall | quote}}", "for genome {}{} installed. \" \"Either you run 'pyensembl install' first or \"", "HLA-A29:02 Pos Peptide ID core icore 1-log50k nM Rank core icore 1-log50k nM", "features.add(r.tracking_id) writer.close() if gxfile or txfile: allpos = {} for gff in Gff(refall):", "writer.cnames = ['tracking_id', 'class_code', 'nearest_ref_id', 'gene_id', 'gene_short_name', 'tss_id', 'locus', 'length', 'coverage', 'FPKM', 'FPKM_conf_lo',", "you have it installed somewhere else, make a symbolic link to {}\".format(genome, ('/'", "txfile2 = outfile.with_suffix('.tx') with open(txfile) as fin, open(txfile2, 'w') as fout: for line", "params._env = Diot(PATH = environ['PATH'] + ':' + ':'.join(PATHs)) shell.fg.topiary(**params) # add wildtype", "# 1,chr6 g.33037619G>T,40,AAFVQTHRT,HLA-A*02:01,22758.73,32.0,netMHC,9,HLA-DPA1,ENSG00000231389,ENST00000419277,HLA-DPA1-001,p.P49T,Substitution,True,8,9,0.0 if mhc_predictor in ('netmhc', 'netmhcpan', 'netmhciipan', 'netmhccons', 'smm', 'smm_pmbec'): wildpeps", "= ['HLA_allele', 'mt_peptide', 'mt_affinity', 'wt_peptide', 'wt_affinity', 'delta_affinity', 'gene'] nwriter.writeHead() tpreader.rewind() for r in", "'>500') out.Mutation = r.variant out.AAChange = r.effect writerall.write(out) if float(out.Affinity) < 500 and", "r.gene_id out.ENST = r.transcript_id wtpep = mutpeps.get(r.peptide + '\\t' + r.allele, '-') out.Ref_peptide", "Diot from cmdy import CmdyReturnCodeException from bioprocs.utils import shell2 as shell, logger from", "gff['type'] == 'gene': feature = gff['attributes']['gene_id'] elif gff['type'] == 'transcript': feature = gff['attributes']['transcript_id']", "how to get the wildtype peptides if it is not a substitution continue", "= params['mhc-alleles'].replace(':', '').replace('*', '') wildfile = outfile.parent / 'wildtype.peptides.txt' wildfile.write_text('\\n'.join(wildpeps)) nparams = Diot(", "= gff['attributes']['transcript_id'] else: continue if feature not in features: continue allpos[feature] ='{}:{}-{}'.format(gff['seqid'], gff['start'],", "variant.format('TX')[0] except (KeyError, TypeError): continue csqs = variant.INFO['CSQ'].split('|') txs = tx.split(',') for tx", ": run_netmhciipan, 'netmhccons' : run_netmhccons, 'smm' : run_smm, 'smm-pmbec' : run_smm_pmbec, } runner.get(mhc_predictor)()", "netmhcpan = {{args.netmhcpan | quote}} netmhciipan = {{args.netmhciipan | quote}} netmhccons = {{args.netmhccons", "a = mhcallele2, v = True, BA = True, inptype = 1, f", "be annotated with by VEP') # tracking_id class_code nearest_ref_id gene_id gene_short_name tss_id locus", "tpreader.rewind() for r in tpreader: out = TsvRecord() out.HLA_allele = r.allele out.Peptide =", "parts[12] writer = TsvWriter(outfile) writer.cnames = ['HLA_allele', 'Peptide', 'Affinity', 'Gene', 'ENSG', 'ENST', 'Ref_peptide',", "TsvWriter(txfile) writer.cnames = ['tracking_id', 'class_code', 'nearest_ref_id', 'gene_id', 'gene_short_name', 'tss_id', 'locus', 'length', 'coverage', 'FPKM',", "gff['type'] == 'transcript': feature = gff['attributes']['transcript_id'] else: continue if feature not in features:", "= wildbindings[r.allele.replace(':', '').replace('*', '')].get(wtpep, '>500') out.Mutation = r.variant out.AAChange = r.effect writerall.write(out) if", "allele in Path(params['mhc-alleles-file']).read_text().splitlines() if allele ) wildfile = outfile.parent / 'wildtype.peptides.txt' wildfile.write_text('\\n'.join(wildpeps)) xlsfile", "'OK' writer.write(r) features.add(r.tracking_id) writer.close() if gxfile or txfile: allpos = {} for gff", "use it to annotate the data gmaps = {'hg19': 'GRCh37', 'hg38': 'GRCh38'} datadir", "for: %s, skipping', feature_id) else: fout.write(line.replace('<pos>', allpos[feature_id])) txfile = txfile2 params['rna-gene-fpkm-tracking-file'] = gxfile", "'class_code', 'nearest_ref_id', 'gene_id', 'gene_short_name', 'tss_id', 'locus', 'length', 'coverage', 'FPKM', 'FPKM_conf_lo', 'FPKM_conf_hi', 'FPKM_status'] writer.writeHead()", "topiary will use it to annotate the data gmaps = {'hg19': 'GRCh37', 'hg38':", "'nearest_ref_id', 'gene_id', 'gene_short_name', 'tss_id', 'locus', 'length', 'coverage', 'FPKM', 'FPKM_conf_lo', 'FPKM_conf_hi', 'FPKM_status'] writer.writeHead() for", "| quote}}) / '.'.join([ {{proc.id | quote}}, {{proc.tag | quote}}, {{proc.suffix | quote}},", "{} for gff in Gff(refall): if gff['type'] == 'gene': feature = gff['attributes']['gene_id'] elif", "{}{} installed. \" \"Either you run 'pyensembl install' first or \" \"specify 'params.download_reference_genome_data", "= TsvRecord() out.HLA_allele = r.allele out.Peptide = r.peptide out.Affinity = r.affinity out.Gene =", "r.tss_id = '-' r.locus = '<pos>' r.length = '-' r.coverage = '-' r.FPKM", "0 OK gxfile = outfile.with_suffix('.gx_nopos') writer = TsvWriter(gxfile) writer.cnames = ['tracking_id', 'class_code', 'nearest_ref_id',", "nM Rank 0 LYLPALWFH PEPLIST LYLPALWFH LYLPALWFH 0.1353 11560.5488 6.1138 LYLPALWFH LYLPALWFH 0.4137", "not datadir.joinpath(genome).is_dir() and not datadir.joinpath(gmaps.get(genome, genome)).is_dir(): raise RuntimeError(\"You don't have annotation data for", "core icore 1-log50k nM Rank core icore 1-log50k nM Rank 0 LYLPALWFH PEPLIST", "0 0 0 OK # ENSG00000268020 - - ENSG00000268020 AL627309.1 - chr1:53048-54936 -", "from bioprocs.utils import shell2 as shell, logger from bioprocs.utils.tsvio2 import TsvReader, TsvWriter, TsvRecord", "'-') r.wildaffinity = wildbindings.get(r.wildpeptide + '\\t' + r.allele, '-') if r.wildaffinity != '-':", "fin: if '<pos>' not in line: fout.write(line) else: feature_id = line.split('\\t', 1)[0] if", "\\ --vcf somatic.vcf \\ --mhc-predictor netmhcpan \\ --mhc-alleles HLA-A*02:01,HLA-B*07:02 \\ --ic50-cutoff 500 \\", "gff['end']) if gxfile: gxfile2 = outfile.with_suffix('.gx') with open(gxfile) as fin, open(gxfile2, 'w') as", "= False wildbindings = {allele: {} for allele in mhcallele2.split(',')} for line in", "wildpep wildpeps.add(wildpep) def run_netmhc(): shell.load_config(netmhc = netmhc) mhcallele2 = params['mhc-alleles'].replace(':', '').replace('*', '') wildfile", "= { 'netmhc' : run_netmhc, 'netmhcpan' : run_netmhcpan, 'netmhciipan' : run_netmhciipan, 'netmhccons' :", "'-', xls = True, xlsfile = xlsfile) shell.fg.netmhcpan(**nparams) if not xlsfile.is_file(): raise RuntimeError(\"Failed", "writer.writeHead() nwriter = TsvWriter(neatfile) nwriter.cnames = ['HLA_allele', 'mt_peptide', 'mt_affinity', 'wt_peptide', 'wt_affinity', 'delta_affinity', 'gene']", "('>' in out.Ref_affinity or float(out.Ref_affinity) >= 2000): writer.write(out) def run_netmhcpan(): shell.load_config(netmhcpan = netmhcpan)", "= TsvWriter(outfile.with_suffix('.all.txt')) writerall.cnames = writer.cnames writerall.writeHead() tpreader.rewind() for r in tpreader: out =", "0 r.FPKM_conf_hi = 1000 r.FPKM_status = 'OK' writer.write(r) features.add(r.tracking_id) writer.close() if gxfile or", "in tpreader: out = TsvRecord() out.HLA_allele = r.allele out.Peptide = r.peptide out.Affinity =", "= outfile.with_suffix('.nowt') params['mhc-predictor'] = mhc_predictor # make sure those mhc-predictors are in PATH", "= topiary) if infile.endswith('.vcf') or infile.endswith('.vcf.gz'): params.vcf = infile else: params.maf = infile", "= outfile.parent / 'wildtype.peptides.txt' wildfile.write_text('\\n'.join(wildpeps)) xlsfile = outfile.parent / 'wildtype.binding.txt' nparams = Diot(", "if gff['type'] == 'gene': feature = gff['attributes']['gene_id'] elif gff['type'] == 'transcript': feature =", "in enumerate(alleles): wildbindings[peptide + '\\t' + hla] = float(r[7 + i*5]) writer =", "= float(r[7 + i*5]) writer = TsvWriter(outfile) writer.cnames = tpreader.cnames + ['wildpeptide', 'wildaffinity',", "= mutpeps.get(r.peptide + '\\t' + r.allele, '-') r.wildaffinity = wildbindings.get(r.wildpeptide + '\\t' +", "from cmdy import CmdyReturnCodeException from bioprocs.utils import shell2 as shell, logger from bioprocs.utils.tsvio2", "class_code nearest_ref_id gene_id gene_short_name tss_id locus length coverage FPKM FPKM_conf_lo FPKM_conf_hi FPKM_status #", "'gene_short_name', 'tss_id', 'locus', 'length', 'coverage', 'FPKM', 'FPKM_conf_lo', 'FPKM_conf_hi', 'FPKM_status'] writer.writeHead() for variant in", "f.readline().strip().split('\\t') if allele] reader = TsvReader(xlsfile, comment = '\\t\\t\\t') wildbindings = {} for", "allpos = {} for gff in Gff(refall): if gff['type'] == 'gene': feature =", "as shell, logger from bioprocs.utils.tsvio2 import TsvReader, TsvWriter, TsvRecord {% from os import", "1000 r.FPKM_status = 'OK' writer.write(r) features.add(r.tracking_id) writer.close() if vcf.contains('TX'): if not vcf.contains('CSQ'): raise", "for csq in csqs if f'|{gene}|' in csq][0].split('|') r = TsvRecord() r.tracking_id =", "'w') as fout: for line in fin: if '<pos>' not in line: fout.write(line)", "= shell.netmhc(**nparams) pos_hit = False wildbindings = {allele: {} for allele in mhcallele2.split(',')}", "f'|{transcript}|' in csq][0].split('|') r = TsvRecord() r.tracking_id = csq[6] r.class_code = '-' r.nearest_ref_id", "open(gxfile) as fin, open(gxfile2, 'w') as fout: for line in fin: if '<pos>'", "comment = '###', delimit = ',') for r in tpreader: if r.effect_type !=", "OK txfile = outfile.with_suffix('.tx_nopos') writer = TsvWriter(txfile) writer.cnames = ['tracking_id', 'class_code', 'nearest_ref_id', 'gene_id',", "'gene_id', 'gene_short_name', 'tss_id', 'locus', 'length', 'coverage', 'FPKM', 'FPKM_conf_lo', 'FPKM_conf_hi', 'FPKM_status'] writer.writeHead() for variant", "with open(txfile) as fin, open(txfile2, 'w') as fout: for line in fin: if", "'netmhccons', 'smm', 'smm_pmbec'): wildpeps = set() mutpeps = {} tpreader = TsvReader(outfile.with_suffix('.nowt'), comment", "continue params._env = Diot(PATH = environ['PATH'] + ':' + ':'.join(PATHs)) shell.fg.topiary(**params) # add", "def run_netmhc(): shell.load_config(netmhc = netmhc) mhcallele2 = params['mhc-alleles'].replace(':', '').replace('*', '') wildfile = outfile.parent", "pass def run_netmhccons(): pass def run_smm(): pass def run_smm_pmbec(): pass runner = {", "| quote}} afile = {{i.afile | ?path.isfile | =readlines | !alwaysList | repr}}", "= mhcallele2, v = True, BA = True, inptype = 1, f =", "in csqs if f'|{gene}|' in csq][0].split('|') r = TsvRecord() r.tracking_id = csq[4] r.class_code", "\"\"\" ./topiary \\ --vcf somatic.vcf \\ --mhc-predictor netmhcpan \\ --mhc-alleles HLA-A*02:01,HLA-B*07:02 \\ --ic50-cutoff", "out.Ref_peptide = wtpep out.Ref_affinity = wildbindings[r.allele.replace(':', '').replace('*', '')].get(wtpep, '>500') out.Mutation = r.variant out.AAChange", "quote}} netmhccons = {{args.netmhccons | quote}} smm = {{args.smm | quote}} smm_pmbec =", "allele in afile] params['mhc-alleles'] = ','.join(alleles) params.genome = genome params['output-csv'] = outfile.with_suffix('.nowt') params['mhc-predictor']", "outfile.parent / 'wildtype.peptides.txt' wildfile.write_text('\\n'.join(wildpeps)) xlsfile = outfile.parent / 'wildtype.binding.txt' nparams = Diot( a", "\\ --percentile-cutoff 2.0 \\ --mhc-epitope-lengths 8-11 \\ --rna-gene-fpkm-tracking-file genes.fpkm_tracking \\ --rna-min-gene-expression 4.0 \\", "csqs = variant.INFO['CSQ'].split(',') gxs = gx.split(',') for gx in gxs: gene, expr =", "don't know how to get the wildtype peptides if it is not a", "- chr1:53048-54936 - - 0 0 0 OK txfile = outfile.with_suffix('.tx_nopos') writer =", "xlsfile.open('r') as f: alleles = [allele.replace('HLA-A', 'HLA-A*').replace('HLA-B', 'HLA-B*').replace('HLA-C', 'HLA-C*') for allele in f.readline().strip().split('\\t')", "import environ from pathlib import Path from cyvcf2 import VCF from gff import", "0.1353 11560.5488 6.1138 LYLPALWFH LYLPALWFH 0.4137 568.6087 1.1231 0 RRQRRQRRW PEPLIST RRQRRQRRW RRQRRQRRW", "fout.write(line) else: feature_id = line.split('\\t', 1)[0] if feature_id not in allpos: logger.warning('Cannot find", "txfile: allpos = {} for gff in Gff(refall): if gff['type'] == 'gene': feature", "r.wildaffinity, r.deltaaffinity, r.gene]) writer.write(r) def run_netmhciipan(): pass def run_netmhccons(): pass def run_smm(): pass", "txfile = txfile2 params['rna-gene-fpkm-tracking-file'] = gxfile params['rna-transcript-fpkm-tracking-file'] = txfile shell.load_config(topiary = topiary) if", "r.allele, '-') out.Ref_peptide = wtpep out.Ref_affinity = wildbindings[r.allele.replace(':', '').replace('*', '')].get(wtpep, '>500') out.Mutation =", "except (KeyError, TypeError): continue csqs = variant.INFO['CSQ'].split('|') txs = tx.split(',') for tx in", "inptype = 1, f = wildfile, _prefix = '-', _iter = True, _debug", "= outfile.with_suffix('.gx_nopos') writer = TsvWriter(gxfile) writer.cnames = ['tracking_id', 'class_code', 'nearest_ref_id', 'gene_id', 'gene_short_name', 'tss_id',", "{{i.afile | ?path.isfile | =readlines | !alwaysList | repr}} outfile = Path({{o.outfile |", "continue csqs = variant.INFO['CSQ'].split('|') txs = tx.split(',') for tx in txs: transcript, expr", "read the output \"\"\" HLA-A24:02 HLA-A29:02 Pos Peptide ID core icore 1-log50k nM", "True, inptype = 1, f = wildfile, _prefix = '-', xls = True,", "'wt_affinity', 'delta_affinity', 'gene'] nwriter.writeHead() tpreader.rewind() for r in tpreader: r.wildpeptide = mutpeps.get(r.peptide +", "out.Ref_affinity = wildbindings[r.allele.replace(':', '').replace('*', '')].get(wtpep, '>500') out.Mutation = r.variant out.AAChange = r.effect writerall.write(out)", "params.maf = infile alleles = [allele.replace('*', '') for allele in afile] params['mhc-alleles'] =", "txs = tx.split(',') for tx in txs: transcript, expr = tx.split('|', 1) csq", "':' + ':'.join(PATHs)) shell.fg.topiary(**params) # add wildtype binding # #,variant,peptide_offset,peptide,allele,affinity,percentile_rank,prediction_method_name,peptide_length,gene,gene_id,transcript_id,transcript_name,effect,effect_type,contains_mutant_residues,mutation_start_in_peptide,mutation_end_in_peptide,gene_expression # 0,chr6 g.31237146C>A,353,AACSNSAHG,HLA-A*02:01,35651.3,65.0,netMHC,9,HLA-C,ENSG00000204525,ENST00000383329,HLA-C-002,p.Q361H,Substitution,True,7,8,0.0", "| ?path.isfile | =readlines | !alwaysList | repr}} outfile = Path({{o.outfile | quote}})", "infile = {{i.infile | quote}} afile = {{i.afile | ?path.isfile | =readlines |", "= r[1] for i, hla in enumerate(alleles): wildbindings[peptide + '\\t' + hla] =", "epitopes.csv \"\"\" import re from os import environ from pathlib import Path from", "run netmhcpan, output file not generated.\") # read the output \"\"\" HLA-A24:02 HLA-A29:02", "12.3392 RRQRRQRRW RRQRRQRRW 0.0308 35829.9805 47.6206 \"\"\" with xlsfile.open('r') as f: alleles =", "gmaps[genome] # extract expression from VCF file vcf = VCF(infile) gxfile = txfile", "\"\"\" import re from os import environ from pathlib import Path from cyvcf2", "it installed somewhere else, make a symbolic link to {}\".format(genome, ('/' + gmaps[genome])", "= {'hg19': 'GRCh37', 'hg38': 'GRCh38'} datadir = Path.home().joinpath('.cache', 'pyensembl') if not datadir.joinpath(genome).is_dir() and", "i, hla in enumerate(alleles): wildbindings[peptide + '\\t' + hla] = float(r[7 + i*5])", "wildpep[index] != m.group(2): continue wildpep = wildpep[:index] + m.group(1) + wildpep[(index+1):] mutpeps[r.peptide +", "for variant in vcf: # try..except try: gx = variant.format('GX')[0] except (KeyError, TypeError):", "vcf.contains('TX'): if not vcf.contains('CSQ'): raise ValueError('VCF file has to be annotated with by", "quote}} netmhcpan = {{args.netmhcpan | quote}} netmhciipan = {{args.netmhciipan | quote}} netmhccons =", "in vcf: # try..except try: tx = variant.format('TX')[0] except (KeyError, TypeError): continue csqs", "Diot( a = mhcallele2, v = True, BA = True, inptype = 1,", "- ENSG00000240361 OR4G11P - chr1:62947-63887 - - 0 0 0 OK # ENSG00000268020", "pos_hit = False wildbindings = {allele: {} for allele in mhcallele2.split(',')} for line", "writer = TsvWriter(gxfile) writer.cnames = ['tracking_id', 'class_code', 'nearest_ref_id', 'gene_id', 'gene_short_name', 'tss_id', 'locus', 'length',", "= TsvReader(outfile.with_suffix('.nowt'), comment = '###', delimit = ',') for r in tpreader: if", "tpreader: if r.effect_type != 'Substitution': # I don't know how to get the", "for: %s, skipping', feature_id) else: fout.write(line.replace('<pos>', allpos[feature_id])) gxfile = gxfile2 if txfile: txfile2", "comment = '\\t\\t\\t') wildbindings = {} for r in reader: peptide = r[1]", "gff['attributes']['transcript_id'] else: continue if feature not in features: continue allpos[feature] ='{}:{}-{}'.format(gff['seqid'], gff['start'], gff['end'])", "r.FPKM_status = 'OK' writer.write(r) features.add(r.tracking_id) writer.close() if vcf.contains('TX'): if not vcf.contains('CSQ'): raise ValueError('VCF", "line: fout.write(line) else: feature_id = line.split('\\t', 1)[0] if feature_id not in allpos: logger.warning('Cannot", "or float(out.Ref_affinity) >= 2000): writer.write(out) def run_netmhcpan(): shell.load_config(netmhcpan = netmhcpan) mhcallele2 = params['mhc-alleles']", "--ic50-cutoff 500 \\ --percentile-cutoff 2.0 \\ --mhc-epitope-lengths 8-11 \\ --rna-gene-fpkm-tracking-file genes.fpkm_tracking \\ --rna-min-gene-expression", "'FPKM_status'] writer.writeHead() for variant in vcf: # try..except try: gx = variant.format('GX')[0] except", "wildfile.write_text('\\n'.join(wildpeps)) xlsfile = outfile.parent / 'wildtype.binding.txt' nparams = Diot( a = mhcallele2, v", "in csq][0].split('|') r = TsvRecord() r.tracking_id = csq[4] r.class_code = '-' r.nearest_ref_id =", "csq in csqs if f'|{gene}|' in csq][0].split('|') r = TsvRecord() r.tracking_id = csq[4]", "in afile] params['mhc-alleles'] = ','.join(alleles) params.genome = genome params['output-csv'] = outfile.with_suffix('.nowt') params['mhc-predictor'] =", "'ENSG', 'ENST', 'Ref_peptide', 'Ref_affinity', 'Mutation', 'AAChange'] writer.writeHead() writerall = TsvWriter(outfile.with_suffix('.all.txt')) writerall.cnames = writer.cnames", "= r.gene_id out.ENST = r.transcript_id wtpep = mutpeps.get(r.peptide + '\\t' + r.allele, '-')", "tx = variant.format('TX')[0] except (KeyError, TypeError): continue csqs = variant.INFO['CSQ'].split('|') txs = tx.split(',')", "'pyensembl install' first or \" \"specify 'params.download_reference_genome_data = True'. \" \"If you have", "else, make a symbolic link to {}\".format(genome, ('/' + gmaps[genome]) if genome in", "import CmdyReturnCodeException from bioprocs.utils import shell2 as shell, logger from bioprocs.utils.tsvio2 import TsvReader,", "['wildpeptide', 'wildaffinity', 'deltaaffinity'] writer.writeHead() nwriter = TsvWriter(neatfile) nwriter.cnames = ['HLA_allele', 'mt_peptide', 'mt_affinity', 'wt_peptide',", "fout.write(line.replace('<pos>', allpos[feature_id])) gxfile = gxfile2 if txfile: txfile2 = outfile.with_suffix('.tx') with open(txfile) as", "out.AAChange = r.effect writerall.write(out) if float(out.Affinity) < 500 and ('>' in out.Ref_affinity or", "mhc_predictor = {{args.mhc_predictor | quote}} genome = {{args.genome | quote}} params = {{args.params", "if not datadir.joinpath(genome).is_dir() and datadir.joinpath(gmaps.get(genome, genome)).is_dir(): # genome = gmaps[genome] # extract expression", "datadir.joinpath(gmaps.get(genome, genome)).is_dir(): raise RuntimeError(\"You don't have annotation data for genome {}{} installed. \"", "TsvWriter(gxfile) writer.cnames = ['tracking_id', 'class_code', 'nearest_ref_id', 'gene_id', 'gene_short_name', 'tss_id', 'locus', 'length', 'coverage', 'FPKM',", "True) # check if we have downloaded annotation data for the genome #", "{{args.refall | quote}} tmpdir = Path({{args.tmpdir | quote}}) / '.'.join([ {{proc.id | quote}},", "repr}} refall = {{args.refall | quote}} tmpdir = Path({{args.tmpdir | quote}}) / '.'.join([", "as fout: for line in fin: if '<pos>' not in line: fout.write(line) else:", "txfile = outfile.with_suffix('.tx_nopos') writer = TsvWriter(txfile) writer.cnames = ['tracking_id', 'class_code', 'nearest_ref_id', 'gene_id', 'gene_short_name',", "'\\t' + r.allele, '-') if r.wildaffinity != '-': r.deltaaffinity = float(r.affinity) - r.wildaffinity", "def run_netmhcpan(): shell.load_config(netmhcpan = netmhcpan) mhcallele2 = params['mhc-alleles'] if 'mhc-alleles' in params else", "genes.fpkm_tracking \\ --rna-min-gene-expression 4.0 \\ --rna-transcript-fpkm-tracking-file isoforms.fpkm_tracking \\ --rna-min-transcript-expression 1.5 \\ --output-csv epitopes.csv", "csqs = variant.INFO['CSQ'].split('|') txs = tx.split(',') for tx in txs: transcript, expr =", "--rna-transcript-fpkm-tracking-file isoforms.fpkm_tracking \\ --rna-min-transcript-expression 1.5 \\ --output-csv epitopes.csv \"\"\" import re from os", "run_netmhc(): shell.load_config(netmhc = netmhc) mhcallele2 = params['mhc-alleles'].replace(':', '').replace('*', '') wildfile = outfile.parent /", "= 'OK' writer.write(r) features.add(r.tracking_id) writer.close() if vcf.contains('TX'): if not vcf.contains('CSQ'): raise ValueError('VCF file", "= {allele: {} for allele in mhcallele2.split(',')} for line in res: if 'PEPLIST'", "= {{args.netmhcpan | quote}} netmhciipan = {{args.netmhciipan | quote}} netmhccons = {{args.netmhccons |", "- - 0 0 0 OK # ENSG00000268020 - - ENSG00000268020 AL627309.1 -", "= ',') for r in tpreader: if r.effect_type != 'Substitution': # I don't", "if not xlsfile.is_file(): raise RuntimeError(\"Failed to run netmhcpan, output file not generated.\") #", "'netmhciipan' : run_netmhciipan, 'netmhccons' : run_netmhccons, 'smm' : run_smm, 'smm-pmbec' : run_smm_pmbec, }", "+ wildpep[(index+1):] mutpeps[r.peptide + '\\t' + r.allele] = wildpep wildpeps.add(wildpep) def run_netmhc(): shell.load_config(netmhc", "Path({{o.outfile | quote}}) outdir = Path({{o.outdir | quote}}) topiary = {{args.topiary | quote}}", "TsvReader, TsvWriter, TsvRecord {% from os import path%} infile = {{i.infile | quote}}", "+ r.allele] = wildpep wildpeps.add(wildpep) def run_netmhc(): shell.load_config(netmhc = netmhc) mhcallele2 = params['mhc-alleles'].replace(':',", "as f: alleles = [allele.replace('HLA-A', 'HLA-A*').replace('HLA-B', 'HLA-B*').replace('HLA-C', 'HLA-C*') for allele in f.readline().strip().split('\\t') if", "r.FPKM_status = 'OK' writer.write(r) features.add(r.tracking_id) writer.close() if gxfile or txfile: allpos = {}", "icore 1-log50k nM Rank 0 LYLPALWFH PEPLIST LYLPALWFH LYLPALWFH 0.1353 11560.5488 6.1138 LYLPALWFH", "= {{args.mhc_predictor | quote}} genome = {{args.genome | quote}} params = {{args.params |", "\\ --rna-min-gene-expression 4.0 \\ --rna-transcript-fpkm-tracking-file isoforms.fpkm_tracking \\ --rna-min-transcript-expression 1.5 \\ --output-csv epitopes.csv \"\"\"", "--percentile-cutoff 2.0 \\ --mhc-epitope-lengths 8-11 \\ --rna-gene-fpkm-tracking-file genes.fpkm_tracking \\ --rna-min-gene-expression 4.0 \\ --rna-transcript-fpkm-tracking-file", "gene_short_name tss_id locus length coverage FPKM FPKM_conf_lo FPKM_conf_hi FPKM_status # ENSG00000240361 - -", "for the genome # topiary will use it to annotate the data gmaps", "- chr1:53048-54936 - - 0 0 0 OK gxfile = outfile.with_suffix('.gx_nopos') writer =", "# #,variant,peptide_offset,peptide,allele,affinity,percentile_rank,prediction_method_name,peptide_length,gene,gene_id,transcript_id,transcript_name,effect,effect_type,contains_mutant_residues,mutation_start_in_peptide,mutation_end_in_peptide,gene_expression # 0,chr6 g.31237146C>A,353,AACSNSAHG,HLA-A*02:01,35651.3,65.0,netMHC,9,HLA-C,ENSG00000204525,ENST00000383329,HLA-C-002,p.Q361H,Substitution,True,7,8,0.0 # 1,chr6 g.33037619G>T,40,AAFVQTHRT,HLA-A*02:01,22758.73,32.0,netMHC,9,HLA-DPA1,ENSG00000231389,ENST00000419277,HLA-DPA1-001,p.P49T,Substitution,True,8,9,0.0 if mhc_predictor in ('netmhc', 'netmhcpan',", "= 1, f = wildfile, _prefix = '-', xls = True, xlsfile =", "csq[3] r.tss_id = '-' r.locus = '<pos>' r.length = '-' r.coverage = '-'", "\\ --rna-transcript-fpkm-tracking-file isoforms.fpkm_tracking \\ --rna-min-transcript-expression 1.5 \\ --output-csv epitopes.csv \"\"\" import re from", "run_smm(): pass def run_smm_pmbec(): pass runner = { 'netmhc' : run_netmhc, 'netmhcpan' :", "+ m.group(1) + wildpep[(index+1):] mutpeps[r.peptide + '\\t' + r.allele] = wildpep wildpeps.add(wildpep) def", "# try..except try: gx = variant.format('GX')[0] except (KeyError, TypeError): continue csqs = variant.INFO['CSQ'].split(',')", "in PATH PATHs = set() for mhcpred in (netmhc, netmhcpan, netmhciipan, netmhccons, smm,", "= outfile.parent / 'wildtype.binding.txt' nparams = Diot( a = mhcallele2, v = True,", "== 'transcript': feature = gff['attributes']['transcript_id'] else: continue if feature not in features: continue", "icore 1-log50k nM Rank core icore 1-log50k nM Rank 0 LYLPALWFH PEPLIST LYLPALWFH", "= '-' r.nearest_ref_id = '-' r.gene_id = csq[4] r.gene_short_name = csq[3] r.tss_id =", "params['rna-gene-fpkm-tracking-file'] = gxfile params['rna-transcript-fpkm-tracking-file'] = txfile shell.load_config(topiary = topiary) if infile.endswith('.vcf') or infile.endswith('.vcf.gz'):", "\\ --rna-min-transcript-expression 1.5 \\ --output-csv epitopes.csv \"\"\" import re from os import environ", "m = re.match(r'^p\\.([A-Z])\\d+([A-Z])$', r.effect) if not m: continue wildpep = r.peptide index =", "{}\".format(genome, ('/' + gmaps[genome]) if genome in gmaps else '', datadir)) # if", "{ 'netmhc' : run_netmhc, 'netmhcpan' : run_netmhcpan, 'netmhciipan' : run_netmhciipan, 'netmhccons' : run_netmhccons,", "'OK' writer.write(r) features.add(r.tracking_id) writer.close() if vcf.contains('TX'): if not vcf.contains('CSQ'): raise ValueError('VCF file has", "r.deltaaffinity = '-' nwriter.write([ r.allele, r.peptide, r.affinity, r.wildpeptide, r.wildaffinity, r.deltaaffinity, r.gene]) writer.write(r) def", "tss_id locus length coverage FPKM FPKM_conf_lo FPKM_conf_hi FPKM_status # ENSG00000240361 - - ENSG00000240361", "or txfile: allpos = {} for gff in Gff(refall): if gff['type'] == 'gene':", "nparams = Diot( a = mhcallele2, v = True, BA = True, inptype", "| quote}}, {{job.index | quote}}]) tmpdir.mkdir(exist_ok = True, parents = True) # check", "outfile.with_suffix('.nowt') params['mhc-predictor'] = mhc_predictor # make sure those mhc-predictors are in PATH PATHs", "','.join( allele for allele in Path(params['mhc-alleles-file']).read_text().splitlines() if allele ) wildfile = outfile.parent /", "r.class_code = '-' r.nearest_ref_id = '-' r.gene_id = csq[4] r.gene_short_name = csq[3] r.tss_id", "| =readlines | !alwaysList | repr}} outfile = Path({{o.outfile | quote}}) outdir =", ") wildfile = outfile.parent / 'wildtype.peptides.txt' wildfile.write_text('\\n'.join(wildpeps)) xlsfile = outfile.parent / 'wildtype.binding.txt' nparams", "tpreader: out = TsvRecord() out.HLA_allele = r.allele out.Peptide = r.peptide out.Affinity = r.affinity", "= r.variant out.AAChange = r.effect writerall.write(out) if float(out.Affinity) < 500 and ('>' in", "open(gxfile2, 'w') as fout: for line in fin: if '<pos>' not in line:", "run_netmhcpan, 'netmhciipan' : run_netmhciipan, 'netmhccons' : run_netmhccons, 'smm' : run_smm, 'smm-pmbec' : run_smm_pmbec,", "in txs: transcript, expr = tx.split('|', 1) csq = [csq for csq in", "infile alleles = [allele.replace('*', '') for allele in afile] params['mhc-alleles'] = ','.join(alleles) params.genome", "for mhcpred in (netmhc, netmhcpan, netmhciipan, netmhccons, smm, smm_pmbec): try: PATHs.add(str(Path(shell.which(mhcpred)).parent)) except CmdyReturnCodeException:", "allele ) wildfile = outfile.parent / 'wildtype.peptides.txt' wildfile.write_text('\\n'.join(wildpeps)) xlsfile = outfile.parent / 'wildtype.binding.txt'", "xlsfile.is_file(): raise RuntimeError(\"Failed to run netmhcpan, output file not generated.\") # read the", "'-' r.FPKM = expr r.FPKM_conf_lo = 0 r.FPKM_conf_hi = 1000 r.FPKM_status = 'OK'", "r in tpreader: out = TsvRecord() out.HLA_allele = r.allele out.Peptide = r.peptide out.Affinity", "= VCF(infile) gxfile = txfile = False features = set() if vcf.contains('GX'): if", "r.wildaffinity else: r.deltaaffinity = '-' nwriter.write([ r.allele, r.peptide, r.affinity, r.wildpeptide, r.wildaffinity, r.deltaaffinity, r.gene])", "= ['tracking_id', 'class_code', 'nearest_ref_id', 'gene_id', 'gene_short_name', 'tss_id', 'locus', 'length', 'coverage', 'FPKM', 'FPKM_conf_lo', 'FPKM_conf_hi',", "= tpreader.cnames + ['wildpeptide', 'wildaffinity', 'deltaaffinity'] writer.writeHead() nwriter = TsvWriter(neatfile) nwriter.cnames = ['HLA_allele',", "os import environ from pathlib import Path from cyvcf2 import VCF from gff", "= '-' r.gene_id = csq[4] r.gene_short_name = csq[3] r.tss_id = '-' r.locus =", "file not generated.\") # read the output \"\"\" HLA-A24:02 HLA-A29:02 Pos Peptide ID", "# topiary will use it to annotate the data gmaps = {'hg19': 'GRCh37',", "r in reader: peptide = r[1] for i, hla in enumerate(alleles): wildbindings[peptide +", "in gxs: gene, expr = gx.split('|', 1) csq = [csq for csq in", "'\\t' + r.allele, '-') out.Ref_peptide = wtpep out.Ref_affinity = wildbindings[r.allele.replace(':', '').replace('*', '')].get(wtpep, '>500')", "raise RuntimeError(\"Failed to run netmhcpan, output file not generated.\") # read the output", "# I don't know how to get the wildtype peptides if it is", "0 r.FPKM_conf_hi = 1000 r.FPKM_status = 'OK' writer.write(r) features.add(r.tracking_id) writer.close() if vcf.contains('TX'): if", "'Gene', 'ENSG', 'ENST', 'Ref_peptide', 'Ref_affinity', 'Mutation', 'AAChange'] writer.writeHead() writerall = TsvWriter(outfile.with_suffix('.all.txt')) writerall.cnames =", "or infile.endswith('.vcf.gz'): params.vcf = infile else: params.maf = infile alleles = [allele.replace('*', '')", "gxfile2 if txfile: txfile2 = outfile.with_suffix('.tx') with open(txfile) as fin, open(txfile2, 'w') as", "in res: if 'PEPLIST' not in line or line.startswith('Protein'): continue parts = line.split()", "repr}} outfile = Path({{o.outfile | quote}}) outdir = Path({{o.outdir | quote}}) topiary =", "data for genome {}{} installed. \" \"Either you run 'pyensembl install' first or", "in fin: if '<pos>' not in line: fout.write(line) else: feature_id = line.split('\\t', 1)[0]", "1000 r.FPKM_status = 'OK' writer.write(r) features.add(r.tracking_id) writer.close() if gxfile or txfile: allpos =", "line in res: if 'PEPLIST' not in line or line.startswith('Protein'): continue parts =", "delimit = ',') for r in tpreader: if r.effect_type != 'Substitution': # I", "else '', datadir)) # if not datadir.joinpath(genome).is_dir() and datadir.joinpath(gmaps.get(genome, genome)).is_dir(): # genome =", "0 LYLPALWFH PEPLIST LYLPALWFH LYLPALWFH 0.1353 11560.5488 6.1138 LYLPALWFH LYLPALWFH 0.4137 568.6087 1.1231", "diot import Diot from cmdy import CmdyReturnCodeException from bioprocs.utils import shell2 as shell,", "=readlines | !alwaysList | repr}} outfile = Path({{o.outfile | quote}}) outdir = Path({{o.outdir", "skipping', feature_id) else: fout.write(line.replace('<pos>', allpos[feature_id])) gxfile = gxfile2 if txfile: txfile2 = outfile.with_suffix('.tx')", "shell2 as shell, logger from bioprocs.utils.tsvio2 import TsvReader, TsvWriter, TsvRecord {% from os", "mutpeps = {} tpreader = TsvReader(outfile.with_suffix('.nowt'), comment = '###', delimit = ',') for", "tmpdir = Path({{args.tmpdir | quote}}) / '.'.join([ {{proc.id | quote}}, {{proc.tag | quote}},", "!= 'Substitution': # I don't know how to get the wildtype peptides if", "r.wildpeptide = mutpeps.get(r.peptide + '\\t' + r.allele, '-') r.wildaffinity = wildbindings.get(r.wildpeptide + '\\t'", "by VEP') # tracking_id class_code nearest_ref_id gene_id gene_short_name tss_id locus length coverage FPKM", "def run_netmhciipan(): pass def run_netmhccons(): pass def run_smm(): pass def run_smm_pmbec(): pass runner", "%s, skipping', feature_id) else: fout.write(line.replace('<pos>', allpos[feature_id])) txfile = txfile2 params['rna-gene-fpkm-tracking-file'] = gxfile params['rna-transcript-fpkm-tracking-file']", "run_smm_pmbec(): pass runner = { 'netmhc' : run_netmhc, 'netmhcpan' : run_netmhcpan, 'netmhciipan' :", "'netmhcpan', 'netmhciipan', 'netmhccons', 'smm', 'smm_pmbec'): wildpeps = set() mutpeps = {} tpreader =", "int(r.mutation_start_in_peptide) if wildpep[index] != m.group(2): continue wildpep = wildpep[:index] + m.group(1) + wildpep[(index+1):]", "r.variant out.AAChange = r.effect writerall.write(out) if float(out.Affinity) < 500 and ('>' in out.Ref_affinity", "quote}}) topiary = {{args.topiary | quote}} netmhc = {{args.netmhc | quote}} netmhcpan =", "parents = True) # check if we have downloaded annotation data for the", "length coverage FPKM FPKM_conf_lo FPKM_conf_hi FPKM_status # ENSG00000240361 - - ENSG00000240361 OR4G11P -", "r in tpreader: if r.effect_type != 'Substitution': # I don't know how to", "r.FPKM_conf_lo = 0 r.FPKM_conf_hi = 1000 r.FPKM_status = 'OK' writer.write(r) features.add(r.tracking_id) writer.close() if", "gxfile or txfile: allpos = {} for gff in Gff(refall): if gff['type'] ==", "RRQRRQRRW PEPLIST RRQRRQRRW RRQRRQRRW 0.0788 21311.8301 12.3392 RRQRRQRRW RRQRRQRRW 0.0308 35829.9805 47.6206 \"\"\"", "in tpreader: if r.effect_type != 'Substitution': # I don't know how to get", "PATHs.add(str(Path(shell.which(mhcpred)).parent)) except CmdyReturnCodeException: continue params._env = Diot(PATH = environ['PATH'] + ':' + ':'.join(PATHs))", "if 'mhc-alleles' in params else ','.join( allele for allele in Path(params['mhc-alleles-file']).read_text().splitlines() if allele", "gene, expr = gx.split('|', 1) csq = [csq for csq in csqs if", "= tx.split(',') for tx in txs: transcript, expr = tx.split('|', 1) csq =", "import path%} infile = {{i.infile | quote}} afile = {{i.afile | ?path.isfile |", "+ '\\t' + hla] = float(r[7 + i*5]) writer = TsvWriter(outfile) writer.cnames =", "writerall = TsvWriter(outfile.with_suffix('.all.txt')) writerall.cnames = writer.cnames writerall.writeHead() tpreader.rewind() for r in tpreader: out", "path%} infile = {{i.infile | quote}} afile = {{i.afile | ?path.isfile | =readlines", "- 0 0 0 OK # ENSG00000268020 - - ENSG00000268020 AL627309.1 - chr1:53048-54936", "writerall.write(out) if float(out.Affinity) < 500 and ('>' in out.Ref_affinity or float(out.Ref_affinity) >= 2000):", "if vcf.contains('GX'): if not vcf.contains('CSQ'): raise ValueError('VCF file has to be annotated with", "f = wildfile, _prefix = '-', _iter = True, _debug = True) res", "'wildtype.peptides.txt' wildfile.write_text('\\n'.join(wildpeps)) xlsfile = outfile.parent / 'wildtype.binding.txt' nparams = Diot( a = mhcallele2,", "ENSG00000240361 OR4G11P - chr1:62947-63887 - - 0 0 0 OK # ENSG00000268020 -", "outfile.parent / 'wildtype.peptides.txt' wildfile.write_text('\\n'.join(wildpeps)) nparams = Diot( a = mhcallele2, v = True,", "params['mhc-alleles'].replace(':', '').replace('*', '') wildfile = outfile.parent / 'wildtype.peptides.txt' wildfile.write_text('\\n'.join(wildpeps)) nparams = Diot( a", "position information for: %s, skipping', feature_id) else: fout.write(line.replace('<pos>', allpos[feature_id])) txfile = txfile2 params['rna-gene-fpkm-tracking-file']", "'-' nwriter.write([ r.allele, r.peptide, r.affinity, r.wildpeptide, r.wildaffinity, r.deltaaffinity, r.gene]) writer.write(r) def run_netmhciipan(): pass", "| repr}} refall = {{args.refall | quote}} tmpdir = Path({{args.tmpdir | quote}}) /", "\" \"specify 'params.download_reference_genome_data = True'. \" \"If you have it installed somewhere else,", "quote}} netmhciipan = {{args.netmhciipan | quote}} netmhccons = {{args.netmhccons | quote}} smm =", "out.Gene = r.gene out.ENSG = r.gene_id out.ENST = r.transcript_id wtpep = mutpeps.get(r.peptide +", "r.wildaffinity = wildbindings.get(r.wildpeptide + '\\t' + r.allele, '-') if r.wildaffinity != '-': r.deltaaffinity", "- - ENSG00000240361 OR4G11P - chr1:62947-63887 - - 0 0 0 OK #", "= netmhc) mhcallele2 = params['mhc-alleles'].replace(':', '').replace('*', '') wildfile = outfile.parent / 'wildtype.peptides.txt' wildfile.write_text('\\n'.join(wildpeps))", "if gxfile: gxfile2 = outfile.with_suffix('.gx') with open(gxfile) as fin, open(gxfile2, 'w') as fout:", "in mhcallele2.split(',')} for line in res: if 'PEPLIST' not in line or line.startswith('Protein'):", "import Diot from cmdy import CmdyReturnCodeException from bioprocs.utils import shell2 as shell, logger", "[csq for csq in csqs if f'|{gene}|' in csq][0].split('|') r = TsvRecord() r.tracking_id", "annotation data for genome {}{} installed. \" \"Either you run 'pyensembl install' first", "tx.split('|', 1) csq = [csq for csq in csqs if f'|{transcript}|' in csq][0].split('|')", "m: continue wildpep = r.peptide index = int(r.mutation_start_in_peptide) if wildpep[index] != m.group(2): continue", "mhcallele2 = params['mhc-alleles'] if 'mhc-alleles' in params else ','.join( allele for allele in", "for gx in gxs: gene, expr = gx.split('|', 1) csq = [csq for", "txfile shell.load_config(topiary = topiary) if infile.endswith('.vcf') or infile.endswith('.vcf.gz'): params.vcf = infile else: params.maf", "netmhc) mhcallele2 = params['mhc-alleles'].replace(':', '').replace('*', '') wildfile = outfile.parent / 'wildtype.peptides.txt' wildfile.write_text('\\n'.join(wildpeps)) nparams", "2.0 \\ --mhc-epitope-lengths 8-11 \\ --rna-gene-fpkm-tracking-file genes.fpkm_tracking \\ --rna-min-gene-expression 4.0 \\ --rna-transcript-fpkm-tracking-file isoforms.fpkm_tracking", "the wildtype peptides if it is not a substitution continue # parse effect:", "xls = True, xlsfile = xlsfile) shell.fg.netmhcpan(**nparams) if not xlsfile.is_file(): raise RuntimeError(\"Failed to", "not datadir.joinpath(genome).is_dir() and datadir.joinpath(gmaps.get(genome, genome)).is_dir(): # genome = gmaps[genome] # extract expression from", "'hg38': 'GRCh38'} datadir = Path.home().joinpath('.cache', 'pyensembl') if not datadir.joinpath(genome).is_dir() and not datadir.joinpath(gmaps.get(genome, genome)).is_dir():", "gx in gxs: gene, expr = gx.split('|', 1) csq = [csq for csq", "'params.download_reference_genome_data = True'. \" \"If you have it installed somewhere else, make a", "outfile.with_suffix('.tx_nopos') writer = TsvWriter(txfile) writer.cnames = ['tracking_id', 'class_code', 'nearest_ref_id', 'gene_id', 'gene_short_name', 'tss_id', 'locus',", "allpos[feature] ='{}:{}-{}'.format(gff['seqid'], gff['start'], gff['end']) if gxfile: gxfile2 = outfile.with_suffix('.gx') with open(gxfile) as fin,", "logger.warning('Cannot find position information for: %s, skipping', feature_id) else: fout.write(line.replace('<pos>', allpos[feature_id])) gxfile =", "genome)).is_dir(): # genome = gmaps[genome] # extract expression from VCF file vcf =", "| repr}} outfile = Path({{o.outfile | quote}}) outdir = Path({{o.outdir | quote}}) topiary", "not in line: fout.write(line) else: feature_id = line.split('\\t', 1)[0] if feature_id not in", "r.allele, '-') if r.wildaffinity != '-': r.deltaaffinity = float(r.affinity) - r.wildaffinity else: r.deltaaffinity", "'-') out.Ref_peptide = wtpep out.Ref_affinity = wildbindings[r.allele.replace(':', '').replace('*', '')].get(wtpep, '>500') out.Mutation = r.variant", "0.0788 21311.8301 12.3392 RRQRRQRRW RRQRRQRRW 0.0308 35829.9805 47.6206 \"\"\" with xlsfile.open('r') as f:", "= Path({{o.outfile | quote}}) outdir = Path({{o.outdir | quote}}) topiary = {{args.topiary |", "writer.write(r) features.add(r.tracking_id) writer.close() if gxfile or txfile: allpos = {} for gff in", "'\\t\\t\\t') wildbindings = {} for r in reader: peptide = r[1] for i,", "= Diot( a = mhcallele2, v = True, BA = True, inptype =", "FPKM_status # ENSG00000240361 - - ENSG00000240361 OR4G11P - chr1:62947-63887 - - 0 0", "smm, smm_pmbec): try: PATHs.add(str(Path(shell.which(mhcpred)).parent)) except CmdyReturnCodeException: continue params._env = Diot(PATH = environ['PATH'] +", "infile else: params.maf = infile alleles = [allele.replace('*', '') for allele in afile]", "link to {}\".format(genome, ('/' + gmaps[genome]) if genome in gmaps else '', datadir))", "= wildpep[:index] + m.group(1) + wildpep[(index+1):] mutpeps[r.peptide + '\\t' + r.allele] = wildpep", "line or line.startswith('Protein'): continue parts = line.split() wildbindings[parts[1]][parts[2]] = parts[12] writer = TsvWriter(outfile)", "AL627309.1 - chr1:53048-54936 - - 0 0 0 OK txfile = outfile.with_suffix('.tx_nopos') writer", "i*5]) writer = TsvWriter(outfile) writer.cnames = tpreader.cnames + ['wildpeptide', 'wildaffinity', 'deltaaffinity'] writer.writeHead() nwriter", "--mhc-predictor netmhcpan \\ --mhc-alleles HLA-A*02:01,HLA-B*07:02 \\ --ic50-cutoff 500 \\ --percentile-cutoff 2.0 \\ --mhc-epitope-lengths", "| !alwaysList | repr}} outfile = Path({{o.outfile | quote}}) outdir = Path({{o.outdir |", "| quote}} netmhc = {{args.netmhc | quote}} netmhcpan = {{args.netmhcpan | quote}} netmhciipan", "a substitution continue # parse effect: p.N84S m = re.match(r'^p\\.([A-Z])\\d+([A-Z])$', r.effect) if not", "r.allele, '-') r.wildaffinity = wildbindings.get(r.wildpeptide + '\\t' + r.allele, '-') if r.wildaffinity !=", "wildbindings = {} for r in reader: peptide = r[1] for i, hla", "!= m.group(2): continue wildpep = wildpep[:index] + m.group(1) + wildpep[(index+1):] mutpeps[r.peptide + '\\t'", "?path.isfile | =readlines | !alwaysList | repr}} outfile = Path({{o.outfile | quote}}) outdir", "{} for r in reader: peptide = r[1] for i, hla in enumerate(alleles):", "genome {}{} installed. \" \"Either you run 'pyensembl install' first or \" \"specify", "= {{args.netmhccons | quote}} smm = {{args.smm | quote}} smm_pmbec = {{args.smm_pmbec |", "wildtype peptides if it is not a substitution continue # parse effect: p.N84S", "LYLPALWFH PEPLIST LYLPALWFH LYLPALWFH 0.1353 11560.5488 6.1138 LYLPALWFH LYLPALWFH 0.4137 568.6087 1.1231 0", "True'. \" \"If you have it installed somewhere else, make a symbolic link", "variant.format('GX')[0] except (KeyError, TypeError): continue csqs = variant.INFO['CSQ'].split(',') gxs = gx.split(',') for gx", "LYLPALWFH LYLPALWFH 0.4137 568.6087 1.1231 0 RRQRRQRRW PEPLIST RRQRRQRRW RRQRRQRRW 0.0788 21311.8301 12.3392", "continue wildpep = r.peptide index = int(r.mutation_start_in_peptide) if wildpep[index] != m.group(2): continue wildpep", "'').replace('*', '')].get(wtpep, '>500') out.Mutation = r.variant out.AAChange = r.effect writerall.write(out) if float(out.Affinity) <", "in csqs if f'|{transcript}|' in csq][0].split('|') r = TsvRecord() r.tracking_id = csq[6] r.class_code", "outfile.parent / 'wildtype.binding.txt' nparams = Diot( a = mhcallele2, v = True, BA", "wtpep = mutpeps.get(r.peptide + '\\t' + r.allele, '-') out.Ref_peptide = wtpep out.Ref_affinity =", "not m: continue wildpep = r.peptide index = int(r.mutation_start_in_peptide) if wildpep[index] != m.group(2):", "| quote}}, {{proc.tag | quote}}, {{proc.suffix | quote}}, {{job.index | quote}}]) tmpdir.mkdir(exist_ok =", "= outfile.with_suffix('.gx') with open(gxfile) as fin, open(gxfile2, 'w') as fout: for line in", "r.peptide, r.affinity, r.wildpeptide, r.wildaffinity, r.deltaaffinity, r.gene]) writer.write(r) def run_netmhciipan(): pass def run_netmhccons(): pass", "from pathlib import Path from cyvcf2 import VCF from gff import Gff from", "r.effect) if not m: continue wildpep = r.peptide index = int(r.mutation_start_in_peptide) if wildpep[index]", "r.peptide index = int(r.mutation_start_in_peptide) if wildpep[index] != m.group(2): continue wildpep = wildpep[:index] +", "'') wildfile = outfile.parent / 'wildtype.peptides.txt' wildfile.write_text('\\n'.join(wildpeps)) nparams = Diot( a = mhcallele2,", "as fin, open(gxfile2, 'w') as fout: for line in fin: if '<pos>' not", "{% from os import path%} infile = {{i.infile | quote}} afile = {{i.afile", "in line or line.startswith('Protein'): continue parts = line.split() wildbindings[parts[1]][parts[2]] = parts[12] writer =", "RRQRRQRRW 0.0788 21311.8301 12.3392 RRQRRQRRW RRQRRQRRW 0.0308 35829.9805 47.6206 \"\"\" with xlsfile.open('r') as", "features = set() if vcf.contains('GX'): if not vcf.contains('CSQ'): raise ValueError('VCF file has to", "r.peptide out.Affinity = r.affinity out.Gene = r.gene out.ENSG = r.gene_id out.ENST = r.transcript_id", "gxs = gx.split(',') for gx in gxs: gene, expr = gx.split('|', 1) csq", "file vcf = VCF(infile) gxfile = txfile = False features = set() if", "= 1000 r.FPKM_status = 'OK' writer.write(r) features.add(r.tracking_id) writer.close() if gxfile or txfile: allpos", "environ['PATH'] + ':' + ':'.join(PATHs)) shell.fg.topiary(**params) # add wildtype binding # #,variant,peptide_offset,peptide,allele,affinity,percentile_rank,prediction_method_name,peptide_length,gene,gene_id,transcript_id,transcript_name,effect,effect_type,contains_mutant_residues,mutation_start_in_peptide,mutation_end_in_peptide,gene_expression #", "'wildtype.peptides.txt' wildfile.write_text('\\n'.join(wildpeps)) nparams = Diot( a = mhcallele2, v = True, inptype =", "\\ --rna-gene-fpkm-tracking-file genes.fpkm_tracking \\ --rna-min-gene-expression 4.0 \\ --rna-transcript-fpkm-tracking-file isoforms.fpkm_tracking \\ --rna-min-transcript-expression 1.5 \\", "= True, xlsfile = xlsfile) shell.fg.netmhcpan(**nparams) if not xlsfile.is_file(): raise RuntimeError(\"Failed to run", "annotated with by VEP') # tracking_id class_code nearest_ref_id gene_id gene_short_name tss_id locus length", "open(txfile) as fin, open(txfile2, 'w') as fout: for line in fin: if '<pos>'", "writer.cnames = ['HLA_allele', 'Peptide', 'Affinity', 'Gene', 'ENSG', 'ENST', 'Ref_peptide', 'Ref_affinity', 'Mutation', 'AAChange'] writer.writeHead()", "features: continue allpos[feature] ='{}:{}-{}'.format(gff['seqid'], gff['start'], gff['end']) if gxfile: gxfile2 = outfile.with_suffix('.gx') with open(gxfile)", "first or \" \"specify 'params.download_reference_genome_data = True'. \" \"If you have it installed", "make a symbolic link to {}\".format(genome, ('/' + gmaps[genome]) if genome in gmaps", "'-' r.gene_id = csq[4] r.gene_short_name = csq[3] r.tss_id = '-' r.locus = '<pos>'", "writer = TsvWriter(outfile) writer.cnames = ['HLA_allele', 'Peptide', 'Affinity', 'Gene', 'ENSG', 'ENST', 'Ref_peptide', 'Ref_affinity',", "params['output-csv'] = outfile.with_suffix('.nowt') params['mhc-predictor'] = mhc_predictor # make sure those mhc-predictors are in", "for r in tpreader: if r.effect_type != 'Substitution': # I don't know how", "= csq[3] r.tss_id = '-' r.locus = '<pos>' r.length = '-' r.coverage =", "'FPKM_conf_lo', 'FPKM_conf_hi', 'FPKM_status'] writer.writeHead() for variant in vcf: # try..except try: gx =", "alleles = [allele.replace('*', '') for allele in afile] params['mhc-alleles'] = ','.join(alleles) params.genome =", "float(r[7 + i*5]) writer = TsvWriter(outfile) writer.cnames = tpreader.cnames + ['wildpeptide', 'wildaffinity', 'deltaaffinity']", "r = TsvRecord() r.tracking_id = csq[6] r.class_code = '-' r.nearest_ref_id = '-' r.gene_id", "| quote}} smm = {{args.smm | quote}} smm_pmbec = {{args.smm_pmbec | quote}} mhc_predictor", "netmhciipan, netmhccons, smm, smm_pmbec): try: PATHs.add(str(Path(shell.which(mhcpred)).parent)) except CmdyReturnCodeException: continue params._env = Diot(PATH =", "| quote}} netmhccons = {{args.netmhccons | quote}} smm = {{args.smm | quote}} smm_pmbec", "= True, BA = True, inptype = 1, f = wildfile, _prefix =", "params.vcf = infile else: params.maf = infile alleles = [allele.replace('*', '') for allele", "_debug = True) res = shell.netmhc(**nparams) pos_hit = False wildbindings = {allele: {}", "wildfile, _prefix = '-', xls = True, xlsfile = xlsfile) shell.fg.netmhcpan(**nparams) if not", "= csq[4] r.class_code = '-' r.nearest_ref_id = '-' r.gene_id = csq[4] r.gene_short_name =", "'wildaffinity', 'deltaaffinity'] writer.writeHead() nwriter = TsvWriter(neatfile) nwriter.cnames = ['HLA_allele', 'mt_peptide', 'mt_affinity', 'wt_peptide', 'wt_affinity',", "check if we have downloaded annotation data for the genome # topiary will", "r.coverage = '-' r.FPKM = expr r.FPKM_conf_lo = 0 r.FPKM_conf_hi = 1000 r.FPKM_status", "continue allpos[feature] ='{}:{}-{}'.format(gff['seqid'], gff['start'], gff['end']) if gxfile: gxfile2 = outfile.with_suffix('.gx') with open(gxfile) as", "--rna-min-gene-expression 4.0 \\ --rna-transcript-fpkm-tracking-file isoforms.fpkm_tracking \\ --rna-min-transcript-expression 1.5 \\ --output-csv epitopes.csv \"\"\" import", "OK gxfile = outfile.with_suffix('.gx_nopos') writer = TsvWriter(gxfile) writer.cnames = ['tracking_id', 'class_code', 'nearest_ref_id', 'gene_id',", "expression from VCF file vcf = VCF(infile) gxfile = txfile = False features", "parse effect: p.N84S m = re.match(r'^p\\.([A-Z])\\d+([A-Z])$', r.effect) if not m: continue wildpep =", "quote}} genome = {{args.genome | quote}} params = {{args.params | repr}} refall =", "datadir)) # if not datadir.joinpath(genome).is_dir() and datadir.joinpath(gmaps.get(genome, genome)).is_dir(): # genome = gmaps[genome] #", "= set() if vcf.contains('GX'): if not vcf.contains('CSQ'): raise ValueError('VCF file has to be", "r.deltaaffinity, r.gene]) writer.write(r) def run_netmhciipan(): pass def run_netmhccons(): pass def run_smm(): pass def", "AL627309.1 - chr1:53048-54936 - - 0 0 0 OK gxfile = outfile.with_suffix('.gx_nopos') writer", "in reader: peptide = r[1] for i, hla in enumerate(alleles): wildbindings[peptide + '\\t'", "r.FPKM_conf_hi = 1000 r.FPKM_status = 'OK' writer.write(r) features.add(r.tracking_id) writer.close() if vcf.contains('TX'): if not", "- 0 0 0 OK txfile = outfile.with_suffix('.tx_nopos') writer = TsvWriter(txfile) writer.cnames =", "= 1, f = wildfile, _prefix = '-', _iter = True, _debug =", "else: fout.write(line.replace('<pos>', allpos[feature_id])) gxfile = gxfile2 if txfile: txfile2 = outfile.with_suffix('.tx') with open(txfile)", "writer.writeHead() for variant in vcf: # try..except try: tx = variant.format('TX')[0] except (KeyError,", "= ['HLA_allele', 'Peptide', 'Affinity', 'Gene', 'ENSG', 'ENST', 'Ref_peptide', 'Ref_affinity', 'Mutation', 'AAChange'] writer.writeHead() writerall", "gxfile = outfile.with_suffix('.gx_nopos') writer = TsvWriter(gxfile) writer.cnames = ['tracking_id', 'class_code', 'nearest_ref_id', 'gene_id', 'gene_short_name',", "'Peptide', 'Affinity', 'Gene', 'ENSG', 'ENST', 'Ref_peptide', 'Ref_affinity', 'Mutation', 'AAChange'] writer.writeHead() writerall = TsvWriter(outfile.with_suffix('.all.txt'))", "r.wildaffinity != '-': r.deltaaffinity = float(r.affinity) - r.wildaffinity else: r.deltaaffinity = '-' nwriter.write([", "'mt_peptide', 'mt_affinity', 'wt_peptide', 'wt_affinity', 'delta_affinity', 'gene'] nwriter.writeHead() tpreader.rewind() for r in tpreader: r.wildpeptide", "'<pos>' r.length = '-' r.coverage = '-' r.FPKM = expr r.FPKM_conf_lo = 0", "'length', 'coverage', 'FPKM', 'FPKM_conf_lo', 'FPKM_conf_hi', 'FPKM_status'] writer.writeHead() for variant in vcf: # try..except", "ENSG00000268020 AL627309.1 - chr1:53048-54936 - - 0 0 0 OK txfile = outfile.with_suffix('.tx_nopos')", "if not m: continue wildpep = r.peptide index = int(r.mutation_start_in_peptide) if wildpep[index] !=", "8-11 \\ --rna-gene-fpkm-tracking-file genes.fpkm_tracking \\ --rna-min-gene-expression 4.0 \\ --rna-transcript-fpkm-tracking-file isoforms.fpkm_tracking \\ --rna-min-transcript-expression 1.5", "gxfile params['rna-transcript-fpkm-tracking-file'] = txfile shell.load_config(topiary = topiary) if infile.endswith('.vcf') or infile.endswith('.vcf.gz'): params.vcf =", "mutpeps.get(r.peptide + '\\t' + r.allele, '-') r.wildaffinity = wildbindings.get(r.wildpeptide + '\\t' + r.allele,", "quote}} params = {{args.params | repr}} refall = {{args.refall | quote}} tmpdir =", "= True'. \" \"If you have it installed somewhere else, make a symbolic", "= '\\t\\t\\t') wildbindings = {} for r in reader: peptide = r[1] for", "= netmhcpan) mhcallele2 = params['mhc-alleles'] if 'mhc-alleles' in params else ','.join( allele for", "= TsvWriter(txfile) writer.cnames = ['tracking_id', 'class_code', 'nearest_ref_id', 'gene_id', 'gene_short_name', 'tss_id', 'locus', 'length', 'coverage',", "bioprocs.utils.tsvio2 import TsvReader, TsvWriter, TsvRecord {% from os import path%} infile = {{i.infile", "ValueError('VCF file has to be annotated with by VEP') # tracking_id class_code nearest_ref_id", "= TsvWriter(outfile) writer.cnames = ['HLA_allele', 'Peptide', 'Affinity', 'Gene', 'ENSG', 'ENST', 'Ref_peptide', 'Ref_affinity', 'Mutation',", "out.HLA_allele = r.allele out.Peptide = r.peptide out.Affinity = r.affinity out.Gene = r.gene out.ENSG", "expr = gx.split('|', 1) csq = [csq for csq in csqs if f'|{gene}|'", "wildfile, _prefix = '-', _iter = True, _debug = True) res = shell.netmhc(**nparams)", "transcript, expr = tx.split('|', 1) csq = [csq for csq in csqs if", "know how to get the wildtype peptides if it is not a substitution", "shell.load_config(topiary = topiary) if infile.endswith('.vcf') or infile.endswith('.vcf.gz'): params.vcf = infile else: params.maf =", "_prefix = '-', xls = True, xlsfile = xlsfile) shell.fg.netmhcpan(**nparams) if not xlsfile.is_file():", "quote}} tmpdir = Path({{args.tmpdir | quote}}) / '.'.join([ {{proc.id | quote}}, {{proc.tag |", "= True, _debug = True) res = shell.netmhc(**nparams) pos_hit = False wildbindings =", "','.join(alleles) params.genome = genome params['output-csv'] = outfile.with_suffix('.nowt') params['mhc-predictor'] = mhc_predictor # make sure", "#,variant,peptide_offset,peptide,allele,affinity,percentile_rank,prediction_method_name,peptide_length,gene,gene_id,transcript_id,transcript_name,effect,effect_type,contains_mutant_residues,mutation_start_in_peptide,mutation_end_in_peptide,gene_expression # 0,chr6 g.31237146C>A,353,AACSNSAHG,HLA-A*02:01,35651.3,65.0,netMHC,9,HLA-C,ENSG00000204525,ENST00000383329,HLA-C-002,p.Q361H,Substitution,True,7,8,0.0 # 1,chr6 g.33037619G>T,40,AAFVQTHRT,HLA-A*02:01,22758.73,32.0,netMHC,9,HLA-DPA1,ENSG00000231389,ENST00000419277,HLA-DPA1-001,p.P49T,Substitution,True,8,9,0.0 if mhc_predictor in ('netmhc', 'netmhcpan', 'netmhciipan',", "'delta_affinity', 'gene'] nwriter.writeHead() tpreader.rewind() for r in tpreader: r.wildpeptide = mutpeps.get(r.peptide + '\\t'", "from os import path%} infile = {{i.infile | quote}} afile = {{i.afile |", "'deltaaffinity'] writer.writeHead() nwriter = TsvWriter(neatfile) nwriter.cnames = ['HLA_allele', 'mt_peptide', 'mt_affinity', 'wt_peptide', 'wt_affinity', 'delta_affinity',", "= r.transcript_id wtpep = mutpeps.get(r.peptide + '\\t' + r.allele, '-') out.Ref_peptide = wtpep", "= {} for gff in Gff(refall): if gff['type'] == 'gene': feature = gff['attributes']['gene_id']", "shell.load_config(netmhcpan = netmhcpan) mhcallele2 = params['mhc-alleles'] if 'mhc-alleles' in params else ','.join( allele", "wildtype binding # #,variant,peptide_offset,peptide,allele,affinity,percentile_rank,prediction_method_name,peptide_length,gene,gene_id,transcript_id,transcript_name,effect,effect_type,contains_mutant_residues,mutation_start_in_peptide,mutation_end_in_peptide,gene_expression # 0,chr6 g.31237146C>A,353,AACSNSAHG,HLA-A*02:01,35651.3,65.0,netMHC,9,HLA-C,ENSG00000204525,ENST00000383329,HLA-C-002,p.Q361H,Substitution,True,7,8,0.0 # 1,chr6 g.33037619G>T,40,AAFVQTHRT,HLA-A*02:01,22758.73,32.0,netMHC,9,HLA-DPA1,ENSG00000231389,ENST00000419277,HLA-DPA1-001,p.P49T,Substitution,True,8,9,0.0 if mhc_predictor in", "outfile.with_suffix('.gx_nopos') writer = TsvWriter(gxfile) writer.cnames = ['tracking_id', 'class_code', 'nearest_ref_id', 'gene_id', 'gene_short_name', 'tss_id', 'locus',", "'FPKM_conf_lo', 'FPKM_conf_hi', 'FPKM_status'] writer.writeHead() for variant in vcf: # try..except try: tx =", "TsvReader(xlsfile, comment = '\\t\\t\\t') wildbindings = {} for r in reader: peptide =", "{{args.smm_pmbec | quote}} mhc_predictor = {{args.mhc_predictor | quote}} genome = {{args.genome | quote}}", "params.genome = genome params['output-csv'] = outfile.with_suffix('.nowt') params['mhc-predictor'] = mhc_predictor # make sure those", "genome params['output-csv'] = outfile.with_suffix('.nowt') params['mhc-predictor'] = mhc_predictor # make sure those mhc-predictors are", "and ('>' in out.Ref_affinity or float(out.Ref_affinity) >= 2000): writer.write(out) def run_netmhcpan(): shell.load_config(netmhcpan =", "\"Either you run 'pyensembl install' first or \" \"specify 'params.download_reference_genome_data = True'. \"", "if '<pos>' not in line: fout.write(line) else: feature_id = line.split('\\t', 1)[0] if feature_id", "%s, skipping', feature_id) else: fout.write(line.replace('<pos>', allpos[feature_id])) gxfile = gxfile2 if txfile: txfile2 =", "'FPKM_conf_hi', 'FPKM_status'] writer.writeHead() for variant in vcf: # try..except try: gx = variant.format('GX')[0]", "allele in mhcallele2.split(',')} for line in res: if 'PEPLIST' not in line or", "get the wildtype peptides if it is not a substitution continue # parse" ]
[ "import include_docs_urls from auth_api import views urlpatterns = [ url(r'^admin/', admin.site.urls), url(r'^docs/', include_docs_urls(title='Todo", "from django.conf.urls import url, include from django.contrib import admin from rest_framework.documentation import include_docs_urls", "views urlpatterns = [ url(r'^admin/', admin.site.urls), url(r'^docs/', include_docs_urls(title='Todo API', description='RESTful API for Todo')),", "admin.site.urls), url(r'^docs/', include_docs_urls(title='Todo API', description='RESTful API for Todo')), url(r'^$', views.api_root), url(r'^', include('users.urls', namespace='users')),", "= [ url(r'^admin/', admin.site.urls), url(r'^docs/', include_docs_urls(title='Todo API', description='RESTful API for Todo')), url(r'^$', views.api_root),", "url(r'^admin/', admin.site.urls), url(r'^docs/', include_docs_urls(title='Todo API', description='RESTful API for Todo')), url(r'^$', views.api_root), url(r'^', include('users.urls',", "import url, include from django.contrib import admin from rest_framework.documentation import include_docs_urls from auth_api", "[ url(r'^admin/', admin.site.urls), url(r'^docs/', include_docs_urls(title='Todo API', description='RESTful API for Todo')), url(r'^$', views.api_root), url(r'^',", "include_docs_urls from auth_api import views urlpatterns = [ url(r'^admin/', admin.site.urls), url(r'^docs/', include_docs_urls(title='Todo API',", "import views urlpatterns = [ url(r'^admin/', admin.site.urls), url(r'^docs/', include_docs_urls(title='Todo API', description='RESTful API for", "import admin from rest_framework.documentation import include_docs_urls from auth_api import views urlpatterns = [", "django.contrib import admin from rest_framework.documentation import include_docs_urls from auth_api import views urlpatterns =", "urlpatterns = [ url(r'^admin/', admin.site.urls), url(r'^docs/', include_docs_urls(title='Todo API', description='RESTful API for Todo')), url(r'^$',", "url, include from django.contrib import admin from rest_framework.documentation import include_docs_urls from auth_api import", "description='RESTful API for Todo')), url(r'^$', views.api_root), url(r'^', include('users.urls', namespace='users')), url(r'^', include('todos.urls', namespace='todos')), ]", "url(r'^docs/', include_docs_urls(title='Todo API', description='RESTful API for Todo')), url(r'^$', views.api_root), url(r'^', include('users.urls', namespace='users')), url(r'^',", "admin from rest_framework.documentation import include_docs_urls from auth_api import views urlpatterns = [ url(r'^admin/',", "include_docs_urls(title='Todo API', description='RESTful API for Todo')), url(r'^$', views.api_root), url(r'^', include('users.urls', namespace='users')), url(r'^', include('todos.urls',", "from django.contrib import admin from rest_framework.documentation import include_docs_urls from auth_api import views urlpatterns", "include from django.contrib import admin from rest_framework.documentation import include_docs_urls from auth_api import views", "auth_api import views urlpatterns = [ url(r'^admin/', admin.site.urls), url(r'^docs/', include_docs_urls(title='Todo API', description='RESTful API", "from rest_framework.documentation import include_docs_urls from auth_api import views urlpatterns = [ url(r'^admin/', admin.site.urls),", "django.conf.urls import url, include from django.contrib import admin from rest_framework.documentation import include_docs_urls from", "rest_framework.documentation import include_docs_urls from auth_api import views urlpatterns = [ url(r'^admin/', admin.site.urls), url(r'^docs/',", "API', description='RESTful API for Todo')), url(r'^$', views.api_root), url(r'^', include('users.urls', namespace='users')), url(r'^', include('todos.urls', namespace='todos')),", "from auth_api import views urlpatterns = [ url(r'^admin/', admin.site.urls), url(r'^docs/', include_docs_urls(title='Todo API', description='RESTful" ]
[ "get_redis(host='localhost', port=6379): \"\"\" 获取redis操作对象 :param host: :param port: :return: \"\"\" pool = redis.ConnectionPool(host=host,", "super().__init__(url_map) self.regex = args[0] def get_redis(host='localhost', port=6379): \"\"\" 获取redis操作对象 :param host: :param port:", "= args[0] def get_redis(host='localhost', port=6379): \"\"\" 获取redis操作对象 :param host: :param port: :return: \"\"\"", "import BaseConverter class RegexConverter(BaseConverter): \"\"\" 正则匹配路由 \"\"\" def __init__(self, url_map, *args): super().__init__(url_map) self.regex", "__init__(self, url_map, *args): super().__init__(url_map) self.regex = args[0] def get_redis(host='localhost', port=6379): \"\"\" 获取redis操作对象 :param", "BaseConverter class RegexConverter(BaseConverter): \"\"\" 正则匹配路由 \"\"\" def __init__(self, url_map, *args): super().__init__(url_map) self.regex =", "*args): super().__init__(url_map) self.regex = args[0] def get_redis(host='localhost', port=6379): \"\"\" 获取redis操作对象 :param host: :param", "from werkzeug.routing import BaseConverter class RegexConverter(BaseConverter): \"\"\" 正则匹配路由 \"\"\" def __init__(self, url_map, *args):", "\"\"\" 正则匹配路由 \"\"\" def __init__(self, url_map, *args): super().__init__(url_map) self.regex = args[0] def get_redis(host='localhost',", "redis from werkzeug.routing import BaseConverter class RegexConverter(BaseConverter): \"\"\" 正则匹配路由 \"\"\" def __init__(self, url_map,", "def get_redis(host='localhost', port=6379): \"\"\" 获取redis操作对象 :param host: :param port: :return: \"\"\" pool =", "正则匹配路由 \"\"\" def __init__(self, url_map, *args): super().__init__(url_map) self.regex = args[0] def get_redis(host='localhost', port=6379):", "werkzeug.routing import BaseConverter class RegexConverter(BaseConverter): \"\"\" 正则匹配路由 \"\"\" def __init__(self, url_map, *args): super().__init__(url_map)", "args[0] def get_redis(host='localhost', port=6379): \"\"\" 获取redis操作对象 :param host: :param port: :return: \"\"\" pool", "\"\"\" 获取redis操作对象 :param host: :param port: :return: \"\"\" pool = redis.ConnectionPool(host=host, port=port, decode_responses=True)", "host: :param port: :return: \"\"\" pool = redis.ConnectionPool(host=host, port=port, decode_responses=True) redis_con = redis.Redis(connection_pool=pool)", "RegexConverter(BaseConverter): \"\"\" 正则匹配路由 \"\"\" def __init__(self, url_map, *args): super().__init__(url_map) self.regex = args[0] def", "port: :return: \"\"\" pool = redis.ConnectionPool(host=host, port=port, decode_responses=True) redis_con = redis.Redis(connection_pool=pool) return redis_con", "url_map, *args): super().__init__(url_map) self.regex = args[0] def get_redis(host='localhost', port=6379): \"\"\" 获取redis操作对象 :param host:", "\"\"\" def __init__(self, url_map, *args): super().__init__(url_map) self.regex = args[0] def get_redis(host='localhost', port=6379): \"\"\"", ":param port: :return: \"\"\" pool = redis.ConnectionPool(host=host, port=port, decode_responses=True) redis_con = redis.Redis(connection_pool=pool) return", "class RegexConverter(BaseConverter): \"\"\" 正则匹配路由 \"\"\" def __init__(self, url_map, *args): super().__init__(url_map) self.regex = args[0]", "port=6379): \"\"\" 获取redis操作对象 :param host: :param port: :return: \"\"\" pool = redis.ConnectionPool(host=host, port=port,", "获取redis操作对象 :param host: :param port: :return: \"\"\" pool = redis.ConnectionPool(host=host, port=port, decode_responses=True) redis_con", "import redis from werkzeug.routing import BaseConverter class RegexConverter(BaseConverter): \"\"\" 正则匹配路由 \"\"\" def __init__(self,", "<filename>hot_crawler/utils.py import redis from werkzeug.routing import BaseConverter class RegexConverter(BaseConverter): \"\"\" 正则匹配路由 \"\"\" def", "def __init__(self, url_map, *args): super().__init__(url_map) self.regex = args[0] def get_redis(host='localhost', port=6379): \"\"\" 获取redis操作对象", "self.regex = args[0] def get_redis(host='localhost', port=6379): \"\"\" 获取redis操作对象 :param host: :param port: :return:", ":param host: :param port: :return: \"\"\" pool = redis.ConnectionPool(host=host, port=port, decode_responses=True) redis_con =" ]
[ "== [\"test_user\"] api_get_request_mock.assert_called_once_with( \"https://api.opsgenie.com/v2/schedules/test_schedule/on-calls\", {\"name\": \"GenieKey\", \"token\": \"OPSGENIE_KEY\"}, ) @patch(\"integrations.opsgenie.api_get_request\") def test_get_on_call_users_with_exception(api_get_request_mock): api_get_request_mock.return_value", "unittest.mock import patch @patch(\"integrations.opsgenie.api_get_request\") @patch(\"integrations.opsgenie.OPSGENIE_KEY\", \"OPSGENIE_KEY\") def test_get_on_call_users(api_get_request_mock): api_get_request_mock.return_value = ( '{\"data\": {\"onCallParticipants\":", "\"https://api.opsgenie.com/v2/schedules/test_schedule/on-calls\", {\"name\": \"GenieKey\", \"token\": \"OPSGENIE_KEY\"}, ) @patch(\"integrations.opsgenie.api_get_request\") def test_get_on_call_users_with_exception(api_get_request_mock): api_get_request_mock.return_value = \"{]\" assert", "assert opsgenie.get_on_call_users(\"test_schedule\") == [] @patch(\"integrations.opsgenie.Request\") @patch(\"integrations.opsgenie.urlopen\") def test_api_get_request(urlopen_mock, request_mock): urlopen_mock.return_value.read.return_value.decode.return_value = ( '{\"data\":", "@patch(\"integrations.opsgenie.api_get_request\") def test_get_on_call_users_with_exception(api_get_request_mock): api_get_request_mock.return_value = \"{]\" assert opsgenie.get_on_call_users(\"test_schedule\") == [] @patch(\"integrations.opsgenie.Request\") @patch(\"integrations.opsgenie.urlopen\") def", "\"OPSGENIE_KEY\"} ) == '{\"data\": {\"onCallParticipants\": [{\"name\": \"test_user\"}]}}' ) request_mock.assert_called_once_with(\"test_url\") request_mock.return_value.add_header.assert_called_once_with( \"Authorization\", \"GenieKey OPSGENIE_KEY\"", "@patch(\"integrations.opsgenie.OPSGENIE_KEY\", \"OPSGENIE_KEY\") def test_get_on_call_users(api_get_request_mock): api_get_request_mock.return_value = ( '{\"data\": {\"onCallParticipants\": [{\"name\": \"test_user\"}]}}' ) assert", "( '{\"data\": {\"onCallParticipants\": [{\"name\": \"test_user\"}]}}' ) assert ( opsgenie.api_get_request( \"test_url\", {\"name\": \"GenieKey\", \"token\":", "== '{\"data\": {\"onCallParticipants\": [{\"name\": \"test_user\"}]}}' ) request_mock.assert_called_once_with(\"test_url\") request_mock.return_value.add_header.assert_called_once_with( \"Authorization\", \"GenieKey OPSGENIE_KEY\" ) urlopen_mock.assert_called_once_with(request_mock.return_value)", "== [] @patch(\"integrations.opsgenie.Request\") @patch(\"integrations.opsgenie.urlopen\") def test_api_get_request(urlopen_mock, request_mock): urlopen_mock.return_value.read.return_value.decode.return_value = ( '{\"data\": {\"onCallParticipants\": [{\"name\":", "@patch(\"integrations.opsgenie.Request\") @patch(\"integrations.opsgenie.urlopen\") def test_api_get_request(urlopen_mock, request_mock): urlopen_mock.return_value.read.return_value.decode.return_value = ( '{\"data\": {\"onCallParticipants\": [{\"name\": \"test_user\"}]}}' )", "[{\"name\": \"test_user\"}]}}' ) assert ( opsgenie.api_get_request( \"test_url\", {\"name\": \"GenieKey\", \"token\": \"OPSGENIE_KEY\"} ) ==", "\"test_url\", {\"name\": \"GenieKey\", \"token\": \"OPSGENIE_KEY\"} ) == '{\"data\": {\"onCallParticipants\": [{\"name\": \"test_user\"}]}}' ) request_mock.assert_called_once_with(\"test_url\")", "def test_get_on_call_users(api_get_request_mock): api_get_request_mock.return_value = ( '{\"data\": {\"onCallParticipants\": [{\"name\": \"test_user\"}]}}' ) assert opsgenie.get_on_call_users(\"test_schedule\") ==", "[\"test_user\"] api_get_request_mock.assert_called_once_with( \"https://api.opsgenie.com/v2/schedules/test_schedule/on-calls\", {\"name\": \"GenieKey\", \"token\": \"OPSGENIE_KEY\"}, ) @patch(\"integrations.opsgenie.api_get_request\") def test_get_on_call_users_with_exception(api_get_request_mock): api_get_request_mock.return_value =", "test_get_on_call_users_with_exception(api_get_request_mock): api_get_request_mock.return_value = \"{]\" assert opsgenie.get_on_call_users(\"test_schedule\") == [] @patch(\"integrations.opsgenie.Request\") @patch(\"integrations.opsgenie.urlopen\") def test_api_get_request(urlopen_mock, request_mock):", "{\"onCallParticipants\": [{\"name\": \"test_user\"}]}}' ) assert ( opsgenie.api_get_request( \"test_url\", {\"name\": \"GenieKey\", \"token\": \"OPSGENIE_KEY\"} )", "'{\"data\": {\"onCallParticipants\": [{\"name\": \"test_user\"}]}}' ) assert ( opsgenie.api_get_request( \"test_url\", {\"name\": \"GenieKey\", \"token\": \"OPSGENIE_KEY\"}", "patch @patch(\"integrations.opsgenie.api_get_request\") @patch(\"integrations.opsgenie.OPSGENIE_KEY\", \"OPSGENIE_KEY\") def test_get_on_call_users(api_get_request_mock): api_get_request_mock.return_value = ( '{\"data\": {\"onCallParticipants\": [{\"name\": \"test_user\"}]}}'", "test_get_on_call_users(api_get_request_mock): api_get_request_mock.return_value = ( '{\"data\": {\"onCallParticipants\": [{\"name\": \"test_user\"}]}}' ) assert opsgenie.get_on_call_users(\"test_schedule\") == [\"test_user\"]", "opsgenie.api_get_request( \"test_url\", {\"name\": \"GenieKey\", \"token\": \"OPSGENIE_KEY\"} ) == '{\"data\": {\"onCallParticipants\": [{\"name\": \"test_user\"}]}}' )", "{\"name\": \"GenieKey\", \"token\": \"OPSGENIE_KEY\"} ) == '{\"data\": {\"onCallParticipants\": [{\"name\": \"test_user\"}]}}' ) request_mock.assert_called_once_with(\"test_url\") request_mock.return_value.add_header.assert_called_once_with(", "assert opsgenie.get_on_call_users(\"test_schedule\") == [\"test_user\"] api_get_request_mock.assert_called_once_with( \"https://api.opsgenie.com/v2/schedules/test_schedule/on-calls\", {\"name\": \"GenieKey\", \"token\": \"OPSGENIE_KEY\"}, ) @patch(\"integrations.opsgenie.api_get_request\") def", "{\"name\": \"GenieKey\", \"token\": \"OPSGENIE_KEY\"}, ) @patch(\"integrations.opsgenie.api_get_request\") def test_get_on_call_users_with_exception(api_get_request_mock): api_get_request_mock.return_value = \"{]\" assert opsgenie.get_on_call_users(\"test_schedule\")", "integrations import opsgenie from unittest.mock import patch @patch(\"integrations.opsgenie.api_get_request\") @patch(\"integrations.opsgenie.OPSGENIE_KEY\", \"OPSGENIE_KEY\") def test_get_on_call_users(api_get_request_mock): api_get_request_mock.return_value", "test_api_get_request(urlopen_mock, request_mock): urlopen_mock.return_value.read.return_value.decode.return_value = ( '{\"data\": {\"onCallParticipants\": [{\"name\": \"test_user\"}]}}' ) assert ( opsgenie.api_get_request(", ") assert opsgenie.get_on_call_users(\"test_schedule\") == [\"test_user\"] api_get_request_mock.assert_called_once_with( \"https://api.opsgenie.com/v2/schedules/test_schedule/on-calls\", {\"name\": \"GenieKey\", \"token\": \"OPSGENIE_KEY\"}, ) @patch(\"integrations.opsgenie.api_get_request\")", "\"token\": \"OPSGENIE_KEY\"}, ) @patch(\"integrations.opsgenie.api_get_request\") def test_get_on_call_users_with_exception(api_get_request_mock): api_get_request_mock.return_value = \"{]\" assert opsgenie.get_on_call_users(\"test_schedule\") == []", "urlopen_mock.return_value.read.return_value.decode.return_value = ( '{\"data\": {\"onCallParticipants\": [{\"name\": \"test_user\"}]}}' ) assert ( opsgenie.api_get_request( \"test_url\", {\"name\":", "\"OPSGENIE_KEY\") def test_get_on_call_users(api_get_request_mock): api_get_request_mock.return_value = ( '{\"data\": {\"onCallParticipants\": [{\"name\": \"test_user\"}]}}' ) assert opsgenie.get_on_call_users(\"test_schedule\")", "\"OPSGENIE_KEY\"}, ) @patch(\"integrations.opsgenie.api_get_request\") def test_get_on_call_users_with_exception(api_get_request_mock): api_get_request_mock.return_value = \"{]\" assert opsgenie.get_on_call_users(\"test_schedule\") == [] @patch(\"integrations.opsgenie.Request\")", "<filename>app/tests/intergrations/test_opsgenie.py<gh_stars>0 from integrations import opsgenie from unittest.mock import patch @patch(\"integrations.opsgenie.api_get_request\") @patch(\"integrations.opsgenie.OPSGENIE_KEY\", \"OPSGENIE_KEY\") def", "import opsgenie from unittest.mock import patch @patch(\"integrations.opsgenie.api_get_request\") @patch(\"integrations.opsgenie.OPSGENIE_KEY\", \"OPSGENIE_KEY\") def test_get_on_call_users(api_get_request_mock): api_get_request_mock.return_value =", "\"token\": \"OPSGENIE_KEY\"} ) == '{\"data\": {\"onCallParticipants\": [{\"name\": \"test_user\"}]}}' ) request_mock.assert_called_once_with(\"test_url\") request_mock.return_value.add_header.assert_called_once_with( \"Authorization\", \"GenieKey", "\"test_user\"}]}}' ) assert opsgenie.get_on_call_users(\"test_schedule\") == [\"test_user\"] api_get_request_mock.assert_called_once_with( \"https://api.opsgenie.com/v2/schedules/test_schedule/on-calls\", {\"name\": \"GenieKey\", \"token\": \"OPSGENIE_KEY\"}, )", "opsgenie from unittest.mock import patch @patch(\"integrations.opsgenie.api_get_request\") @patch(\"integrations.opsgenie.OPSGENIE_KEY\", \"OPSGENIE_KEY\") def test_get_on_call_users(api_get_request_mock): api_get_request_mock.return_value = (", "= ( '{\"data\": {\"onCallParticipants\": [{\"name\": \"test_user\"}]}}' ) assert opsgenie.get_on_call_users(\"test_schedule\") == [\"test_user\"] api_get_request_mock.assert_called_once_with( \"https://api.opsgenie.com/v2/schedules/test_schedule/on-calls\",", "{\"onCallParticipants\": [{\"name\": \"test_user\"}]}}' ) assert opsgenie.get_on_call_users(\"test_schedule\") == [\"test_user\"] api_get_request_mock.assert_called_once_with( \"https://api.opsgenie.com/v2/schedules/test_schedule/on-calls\", {\"name\": \"GenieKey\", \"token\":", "( opsgenie.api_get_request( \"test_url\", {\"name\": \"GenieKey\", \"token\": \"OPSGENIE_KEY\"} ) == '{\"data\": {\"onCallParticipants\": [{\"name\": \"test_user\"}]}}'", "from integrations import opsgenie from unittest.mock import patch @patch(\"integrations.opsgenie.api_get_request\") @patch(\"integrations.opsgenie.OPSGENIE_KEY\", \"OPSGENIE_KEY\") def test_get_on_call_users(api_get_request_mock):", "\"GenieKey\", \"token\": \"OPSGENIE_KEY\"}, ) @patch(\"integrations.opsgenie.api_get_request\") def test_get_on_call_users_with_exception(api_get_request_mock): api_get_request_mock.return_value = \"{]\" assert opsgenie.get_on_call_users(\"test_schedule\") ==", "def test_api_get_request(urlopen_mock, request_mock): urlopen_mock.return_value.read.return_value.decode.return_value = ( '{\"data\": {\"onCallParticipants\": [{\"name\": \"test_user\"}]}}' ) assert (", "\"test_user\"}]}}' ) assert ( opsgenie.api_get_request( \"test_url\", {\"name\": \"GenieKey\", \"token\": \"OPSGENIE_KEY\"} ) == '{\"data\":", ") assert ( opsgenie.api_get_request( \"test_url\", {\"name\": \"GenieKey\", \"token\": \"OPSGENIE_KEY\"} ) == '{\"data\": {\"onCallParticipants\":", "import patch @patch(\"integrations.opsgenie.api_get_request\") @patch(\"integrations.opsgenie.OPSGENIE_KEY\", \"OPSGENIE_KEY\") def test_get_on_call_users(api_get_request_mock): api_get_request_mock.return_value = ( '{\"data\": {\"onCallParticipants\": [{\"name\":", "[{\"name\": \"test_user\"}]}}' ) assert opsgenie.get_on_call_users(\"test_schedule\") == [\"test_user\"] api_get_request_mock.assert_called_once_with( \"https://api.opsgenie.com/v2/schedules/test_schedule/on-calls\", {\"name\": \"GenieKey\", \"token\": \"OPSGENIE_KEY\"},", "@patch(\"integrations.opsgenie.api_get_request\") @patch(\"integrations.opsgenie.OPSGENIE_KEY\", \"OPSGENIE_KEY\") def test_get_on_call_users(api_get_request_mock): api_get_request_mock.return_value = ( '{\"data\": {\"onCallParticipants\": [{\"name\": \"test_user\"}]}}' )", "opsgenie.get_on_call_users(\"test_schedule\") == [] @patch(\"integrations.opsgenie.Request\") @patch(\"integrations.opsgenie.urlopen\") def test_api_get_request(urlopen_mock, request_mock): urlopen_mock.return_value.read.return_value.decode.return_value = ( '{\"data\": {\"onCallParticipants\":", "api_get_request_mock.assert_called_once_with( \"https://api.opsgenie.com/v2/schedules/test_schedule/on-calls\", {\"name\": \"GenieKey\", \"token\": \"OPSGENIE_KEY\"}, ) @patch(\"integrations.opsgenie.api_get_request\") def test_get_on_call_users_with_exception(api_get_request_mock): api_get_request_mock.return_value = \"{]\"", "( '{\"data\": {\"onCallParticipants\": [{\"name\": \"test_user\"}]}}' ) assert opsgenie.get_on_call_users(\"test_schedule\") == [\"test_user\"] api_get_request_mock.assert_called_once_with( \"https://api.opsgenie.com/v2/schedules/test_schedule/on-calls\", {\"name\":", "api_get_request_mock.return_value = \"{]\" assert opsgenie.get_on_call_users(\"test_schedule\") == [] @patch(\"integrations.opsgenie.Request\") @patch(\"integrations.opsgenie.urlopen\") def test_api_get_request(urlopen_mock, request_mock): urlopen_mock.return_value.read.return_value.decode.return_value", "= ( '{\"data\": {\"onCallParticipants\": [{\"name\": \"test_user\"}]}}' ) assert ( opsgenie.api_get_request( \"test_url\", {\"name\": \"GenieKey\",", "@patch(\"integrations.opsgenie.urlopen\") def test_api_get_request(urlopen_mock, request_mock): urlopen_mock.return_value.read.return_value.decode.return_value = ( '{\"data\": {\"onCallParticipants\": [{\"name\": \"test_user\"}]}}' ) assert", "from unittest.mock import patch @patch(\"integrations.opsgenie.api_get_request\") @patch(\"integrations.opsgenie.OPSGENIE_KEY\", \"OPSGENIE_KEY\") def test_get_on_call_users(api_get_request_mock): api_get_request_mock.return_value = ( '{\"data\":", "[] @patch(\"integrations.opsgenie.Request\") @patch(\"integrations.opsgenie.urlopen\") def test_api_get_request(urlopen_mock, request_mock): urlopen_mock.return_value.read.return_value.decode.return_value = ( '{\"data\": {\"onCallParticipants\": [{\"name\": \"test_user\"}]}}'", "assert ( opsgenie.api_get_request( \"test_url\", {\"name\": \"GenieKey\", \"token\": \"OPSGENIE_KEY\"} ) == '{\"data\": {\"onCallParticipants\": [{\"name\":", ") == '{\"data\": {\"onCallParticipants\": [{\"name\": \"test_user\"}]}}' ) request_mock.assert_called_once_with(\"test_url\") request_mock.return_value.add_header.assert_called_once_with( \"Authorization\", \"GenieKey OPSGENIE_KEY\" )", "\"GenieKey\", \"token\": \"OPSGENIE_KEY\"} ) == '{\"data\": {\"onCallParticipants\": [{\"name\": \"test_user\"}]}}' ) request_mock.assert_called_once_with(\"test_url\") request_mock.return_value.add_header.assert_called_once_with( \"Authorization\",", "= \"{]\" assert opsgenie.get_on_call_users(\"test_schedule\") == [] @patch(\"integrations.opsgenie.Request\") @patch(\"integrations.opsgenie.urlopen\") def test_api_get_request(urlopen_mock, request_mock): urlopen_mock.return_value.read.return_value.decode.return_value =", ") @patch(\"integrations.opsgenie.api_get_request\") def test_get_on_call_users_with_exception(api_get_request_mock): api_get_request_mock.return_value = \"{]\" assert opsgenie.get_on_call_users(\"test_schedule\") == [] @patch(\"integrations.opsgenie.Request\") @patch(\"integrations.opsgenie.urlopen\")", "\"{]\" assert opsgenie.get_on_call_users(\"test_schedule\") == [] @patch(\"integrations.opsgenie.Request\") @patch(\"integrations.opsgenie.urlopen\") def test_api_get_request(urlopen_mock, request_mock): urlopen_mock.return_value.read.return_value.decode.return_value = (", "api_get_request_mock.return_value = ( '{\"data\": {\"onCallParticipants\": [{\"name\": \"test_user\"}]}}' ) assert opsgenie.get_on_call_users(\"test_schedule\") == [\"test_user\"] api_get_request_mock.assert_called_once_with(", "def test_get_on_call_users_with_exception(api_get_request_mock): api_get_request_mock.return_value = \"{]\" assert opsgenie.get_on_call_users(\"test_schedule\") == [] @patch(\"integrations.opsgenie.Request\") @patch(\"integrations.opsgenie.urlopen\") def test_api_get_request(urlopen_mock,", "request_mock): urlopen_mock.return_value.read.return_value.decode.return_value = ( '{\"data\": {\"onCallParticipants\": [{\"name\": \"test_user\"}]}}' ) assert ( opsgenie.api_get_request( \"test_url\",", "opsgenie.get_on_call_users(\"test_schedule\") == [\"test_user\"] api_get_request_mock.assert_called_once_with( \"https://api.opsgenie.com/v2/schedules/test_schedule/on-calls\", {\"name\": \"GenieKey\", \"token\": \"OPSGENIE_KEY\"}, ) @patch(\"integrations.opsgenie.api_get_request\") def test_get_on_call_users_with_exception(api_get_request_mock):", "'{\"data\": {\"onCallParticipants\": [{\"name\": \"test_user\"}]}}' ) assert opsgenie.get_on_call_users(\"test_schedule\") == [\"test_user\"] api_get_request_mock.assert_called_once_with( \"https://api.opsgenie.com/v2/schedules/test_schedule/on-calls\", {\"name\": \"GenieKey\"," ]
[ "super(NonKatakanaError, self).__init__() self.char = char self.utterance = utterance def __str__(self): return (u\"Wrongly interpreted", "__init__(self, sentence, word): super(UnidentifiedJapaneseText, self).__init__() self.sentence = sentence self.word = word def __str__(self):", "for word '%s' in sentence: \\n'%s'\" % (self.word, self.sentence)) class ChunkingError(Exception): \"\"\"Raised when", "class UnidentifiedJapaneseText(Exception): def __init__(self, sentence, word): super(UnidentifiedJapaneseText, self).__init__() self.sentence = sentence self.word =", "error for string: \\n %s\" % self.textStr class EmptyStrError(Exception): def __str__(self): return \"Empty", "def __str__(self): return \"Empty string passed in\" class NonKatakanaError(Exception): def __init__(self, char, utterance):", "when a katakana string cannot be parsed correctly \"\"\" def __init__(self, txt): super(ChunkingError,", "utterance): super(NonKatakanaError, self).__init__() self.char = char self.utterance = utterance def __str__(self): return (u\"Wrongly", "in dictionary for word '%s' in sentence: \\n'%s'\" % (self.word, self.sentence)) class ChunkingError(Exception):", "UnidentifiedJapaneseText(Exception): def __init__(self, sentence, word): super(UnidentifiedJapaneseText, self).__init__() self.sentence = sentence self.word = word", "self.textStr class EmptyStrError(Exception): def __str__(self): return \"Empty string passed in\" class NonKatakanaError(Exception): def", "ChunkingError(Exception): \"\"\"Raised when a katakana string cannot be parsed correctly \"\"\" def __init__(self,", "for string: \\n %s\" % self.textStr class EmptyStrError(Exception): def __str__(self): return \"Empty string", "self.char = char self.utterance = utterance def __str__(self): return (u\"Wrongly interpreted character '%s'", "char self.utterance = utterance def __str__(self): return (u\"Wrongly interpreted character '%s' as kana", "'%s' in sentence: \\n'%s'\" % (self.word, self.sentence)) class ChunkingError(Exception): \"\"\"Raised when a katakana", "def __init__(self, sentence, word): super(UnidentifiedJapaneseText, self).__init__() self.sentence = sentence self.word = word def", "sentence, word): super(UnidentifiedJapaneseText, self).__init__() self.sentence = sentence self.word = word def __str__(self): return", "__str__(self): return u\"Chunking error for string: \\n %s\" % self.textStr class EmptyStrError(Exception): def", "def __str__(self): return (u\"No match in dictionary for word '%s' in sentence: \\n'%s'\"", "word def __str__(self): return (u\"No match in dictionary for word '%s' in sentence:", "correctly \"\"\" def __init__(self, txt): super(ChunkingError, self).__init__() self.textStr = txt def __str__(self): return", "= char self.utterance = utterance def __str__(self): return (u\"Wrongly interpreted character '%s' as", "passed in\" class NonKatakanaError(Exception): def __init__(self, char, utterance): super(NonKatakanaError, self).__init__() self.char = char", "class ChunkingError(Exception): \"\"\"Raised when a katakana string cannot be parsed correctly \"\"\" def", "self.utterance = utterance def __str__(self): return (u\"Wrongly interpreted character '%s' as kana in", "self.sentence)) class ChunkingError(Exception): \"\"\"Raised when a katakana string cannot be parsed correctly \"\"\"", "self.word = word def __str__(self): return (u\"No match in dictionary for word '%s'", "katakana string cannot be parsed correctly \"\"\" def __init__(self, txt): super(ChunkingError, self).__init__() self.textStr", "cannot be parsed correctly \"\"\" def __init__(self, txt): super(ChunkingError, self).__init__() self.textStr = txt", "self.sentence = sentence self.word = word def __str__(self): return (u\"No match in dictionary", "return u\"Chunking error for string: \\n %s\" % self.textStr class EmptyStrError(Exception): def __str__(self):", "in\" class NonKatakanaError(Exception): def __init__(self, char, utterance): super(NonKatakanaError, self).__init__() self.char = char self.utterance", "= sentence self.word = word def __str__(self): return (u\"No match in dictionary for", "<reponame>iory/japanese2phoneme<filename>japanese2phoneme/exceptions.py<gh_stars>0 class UnidentifiedJapaneseText(Exception): def __init__(self, sentence, word): super(UnidentifiedJapaneseText, self).__init__() self.sentence = sentence self.word", "\"\"\"Raised when a katakana string cannot be parsed correctly \"\"\" def __init__(self, txt):", "utterance def __str__(self): return (u\"Wrongly interpreted character '%s' as kana in utterance:\\n%s\" %", "self.textStr = txt def __str__(self): return u\"Chunking error for string: \\n %s\" %", "def __str__(self): return (u\"Wrongly interpreted character '%s' as kana in utterance:\\n%s\" % (self.char,", "self).__init__() self.char = char self.utterance = utterance def __str__(self): return (u\"Wrongly interpreted character", "self).__init__() self.sentence = sentence self.word = word def __str__(self): return (u\"No match in", "sentence self.word = word def __str__(self): return (u\"No match in dictionary for word", "a katakana string cannot be parsed correctly \"\"\" def __init__(self, txt): super(ChunkingError, self).__init__()", "= utterance def __str__(self): return (u\"Wrongly interpreted character '%s' as kana in utterance:\\n%s\"", "super(UnidentifiedJapaneseText, self).__init__() self.sentence = sentence self.word = word def __str__(self): return (u\"No match", "string passed in\" class NonKatakanaError(Exception): def __init__(self, char, utterance): super(NonKatakanaError, self).__init__() self.char =", "def __init__(self, char, utterance): super(NonKatakanaError, self).__init__() self.char = char self.utterance = utterance def", "be parsed correctly \"\"\" def __init__(self, txt): super(ChunkingError, self).__init__() self.textStr = txt def", "%s\" % self.textStr class EmptyStrError(Exception): def __str__(self): return \"Empty string passed in\" class", "\"Empty string passed in\" class NonKatakanaError(Exception): def __init__(self, char, utterance): super(NonKatakanaError, self).__init__() self.char", "__str__(self): return (u\"Wrongly interpreted character '%s' as kana in utterance:\\n%s\" % (self.char, self.utterance))", "\\n %s\" % self.textStr class EmptyStrError(Exception): def __str__(self): return \"Empty string passed in\"", "= txt def __str__(self): return u\"Chunking error for string: \\n %s\" % self.textStr", "sentence: \\n'%s'\" % (self.word, self.sentence)) class ChunkingError(Exception): \"\"\"Raised when a katakana string cannot", "def __str__(self): return u\"Chunking error for string: \\n %s\" % self.textStr class EmptyStrError(Exception):", "word '%s' in sentence: \\n'%s'\" % (self.word, self.sentence)) class ChunkingError(Exception): \"\"\"Raised when a", "__str__(self): return (u\"No match in dictionary for word '%s' in sentence: \\n'%s'\" %", "return (u\"No match in dictionary for word '%s' in sentence: \\n'%s'\" % (self.word,", "string: \\n %s\" % self.textStr class EmptyStrError(Exception): def __str__(self): return \"Empty string passed", "% self.textStr class EmptyStrError(Exception): def __str__(self): return \"Empty string passed in\" class NonKatakanaError(Exception):", "(self.word, self.sentence)) class ChunkingError(Exception): \"\"\"Raised when a katakana string cannot be parsed correctly", "return \"Empty string passed in\" class NonKatakanaError(Exception): def __init__(self, char, utterance): super(NonKatakanaError, self).__init__()", "= word def __str__(self): return (u\"No match in dictionary for word '%s' in", "self).__init__() self.textStr = txt def __str__(self): return u\"Chunking error for string: \\n %s\"", "(u\"No match in dictionary for word '%s' in sentence: \\n'%s'\" % (self.word, self.sentence))", "__init__(self, txt): super(ChunkingError, self).__init__() self.textStr = txt def __str__(self): return u\"Chunking error for", "EmptyStrError(Exception): def __str__(self): return \"Empty string passed in\" class NonKatakanaError(Exception): def __init__(self, char,", "def __init__(self, txt): super(ChunkingError, self).__init__() self.textStr = txt def __str__(self): return u\"Chunking error", "% (self.word, self.sentence)) class ChunkingError(Exception): \"\"\"Raised when a katakana string cannot be parsed", "in sentence: \\n'%s'\" % (self.word, self.sentence)) class ChunkingError(Exception): \"\"\"Raised when a katakana string", "class EmptyStrError(Exception): def __str__(self): return \"Empty string passed in\" class NonKatakanaError(Exception): def __init__(self,", "txt): super(ChunkingError, self).__init__() self.textStr = txt def __str__(self): return u\"Chunking error for string:", "parsed correctly \"\"\" def __init__(self, txt): super(ChunkingError, self).__init__() self.textStr = txt def __str__(self):", "super(ChunkingError, self).__init__() self.textStr = txt def __str__(self): return u\"Chunking error for string: \\n", "txt def __str__(self): return u\"Chunking error for string: \\n %s\" % self.textStr class", "__init__(self, char, utterance): super(NonKatakanaError, self).__init__() self.char = char self.utterance = utterance def __str__(self):", "match in dictionary for word '%s' in sentence: \\n'%s'\" % (self.word, self.sentence)) class", "\\n'%s'\" % (self.word, self.sentence)) class ChunkingError(Exception): \"\"\"Raised when a katakana string cannot be", "__str__(self): return \"Empty string passed in\" class NonKatakanaError(Exception): def __init__(self, char, utterance): super(NonKatakanaError,", "class NonKatakanaError(Exception): def __init__(self, char, utterance): super(NonKatakanaError, self).__init__() self.char = char self.utterance =", "word): super(UnidentifiedJapaneseText, self).__init__() self.sentence = sentence self.word = word def __str__(self): return (u\"No", "NonKatakanaError(Exception): def __init__(self, char, utterance): super(NonKatakanaError, self).__init__() self.char = char self.utterance = utterance", "string cannot be parsed correctly \"\"\" def __init__(self, txt): super(ChunkingError, self).__init__() self.textStr =", "\"\"\" def __init__(self, txt): super(ChunkingError, self).__init__() self.textStr = txt def __str__(self): return u\"Chunking", "char, utterance): super(NonKatakanaError, self).__init__() self.char = char self.utterance = utterance def __str__(self): return", "dictionary for word '%s' in sentence: \\n'%s'\" % (self.word, self.sentence)) class ChunkingError(Exception): \"\"\"Raised", "u\"Chunking error for string: \\n %s\" % self.textStr class EmptyStrError(Exception): def __str__(self): return" ]
[ "for row in range(rows): line = '' for col in range(cols): if field[row][col]", "<gh_stars>1-10 import sys def get_neighbours(row, col, rows, cols): neighbours = [] for i", "for _ in range(rows)] for row in range(rows): for col, char in enumerate(file.readline()):", "1: line += '*' continue neighbours = get_neighbours(row, col, rows, cols) mines =", "-1 < col + j < cols: neighbours.append((row + i, col + j))", "+= str(mines) res.append(line + '\\n') return res def main(file): res = [] field_num", "range(cols)] for _ in range(rows)] for row in range(rows): for col, char in", "res.append('Field #{}:\\n'.format(field_num)) res.extend(solve(field, rows, cols)) res.append('\\n') field_num += 1 return res[0: -1] if", "0: continue elif -1 < row + i < rows and -1 <", "sum(field[r][c] for r, c in neighbours) line += str(mines) res.append(line + '\\n') return", "neighbours.append((row + i, col + j)) return neighbours def solve(field, rows, cols): res", "and j == 0: continue elif -1 < row + i < rows", "res def main(file): res = [] field_num = 1 while True: rows, cols", "_ in range(rows)] for row in range(rows): for col, char in enumerate(file.readline()): if", "+ j)) return neighbours def solve(field, rows, cols): res = [] for row", "field_num = 1 while True: rows, cols = [int(x) for x in file.readline().split()]", "= [] for row in range(rows): line = '' for col in range(cols):", "'\\n') return res def main(file): res = [] field_num = 1 while True:", "rows, cols)) res.append('\\n') field_num += 1 return res[0: -1] if __name__ == '__main__':", "-1 < row + i < rows and -1 < col + j", "cols = [int(x) for x in file.readline().split()] if rows == cols == 0:", "def main(file): res = [] field_num = 1 while True: rows, cols =", "== '*': field[row][col] = 1 res.append('Field #{}:\\n'.format(field_num)) res.extend(solve(field, rows, cols)) res.append('\\n') field_num +=", "= 1 res.append('Field #{}:\\n'.format(field_num)) res.extend(solve(field, rows, cols)) res.append('\\n') field_num += 1 return res[0:", "rows, cols): res = [] for row in range(rows): line = '' for", "== 0: continue elif -1 < row + i < rows and -1", "in range(-1, 2): for j in range(-1, 2): if i == 0 and", "i, col + j)) return neighbours def solve(field, rows, cols): res = []", "neighbours) line += str(mines) res.append(line + '\\n') return res def main(file): res =", "row in range(rows): line = '' for col in range(cols): if field[row][col] ==", "res.extend(solve(field, rows, cols)) res.append('\\n') field_num += 1 return res[0: -1] if __name__ ==", "cols) mines = sum(field[r][c] for r, c in neighbours) line += str(mines) res.append(line", "return res def main(file): res = [] field_num = 1 while True: rows,", "res.append('\\n') field_num += 1 return res[0: -1] if __name__ == '__main__': print(''.join(main(sys.stdin)), end='')", "== 1: line += '*' continue neighbours = get_neighbours(row, col, rows, cols) mines", "neighbours def solve(field, rows, cols): res = [] for row in range(rows): line", "'*' continue neighbours = get_neighbours(row, col, rows, cols) mines = sum(field[r][c] for r,", "i == 0 and j == 0: continue elif -1 < row +", "+ j < cols: neighbours.append((row + i, col + j)) return neighbours def", "2): if i == 0 and j == 0: continue elif -1 <", "cols: neighbours.append((row + i, col + j)) return neighbours def solve(field, rows, cols):", "neighbours = get_neighbours(row, col, rows, cols) mines = sum(field[r][c] for r, c in", "file.readline().split()] if rows == cols == 0: break field = [[0 for _", "= [[0 for _ in range(cols)] for _ in range(rows)] for row in", "for x in file.readline().split()] if rows == cols == 0: break field =", "= [] field_num = 1 while True: rows, cols = [int(x) for x", "= sum(field[r][c] for r, c in neighbours) line += str(mines) res.append(line + '\\n')", "char == '*': field[row][col] = 1 res.append('Field #{}:\\n'.format(field_num)) res.extend(solve(field, rows, cols)) res.append('\\n') field_num", "col, rows, cols) mines = sum(field[r][c] for r, c in neighbours) line +=", "j)) return neighbours def solve(field, rows, cols): res = [] for row in", "j < cols: neighbours.append((row + i, col + j)) return neighbours def solve(field,", "for r, c in neighbours) line += str(mines) res.append(line + '\\n') return res", "for i in range(-1, 2): for j in range(-1, 2): if i ==", "for col in range(cols): if field[row][col] == 1: line += '*' continue neighbours", "solve(field, rows, cols): res = [] for row in range(rows): line = ''", "main(file): res = [] field_num = 1 while True: rows, cols = [int(x)", "char in enumerate(file.readline()): if char == '*': field[row][col] = 1 res.append('Field #{}:\\n'.format(field_num)) res.extend(solve(field,", "in file.readline().split()] if rows == cols == 0: break field = [[0 for", "0 and j == 0: continue elif -1 < row + i <", "rows, cols) mines = sum(field[r][c] for r, c in neighbours) line += str(mines)", "'*': field[row][col] = 1 res.append('Field #{}:\\n'.format(field_num)) res.extend(solve(field, rows, cols)) res.append('\\n') field_num += 1", "[] field_num = 1 while True: rows, cols = [int(x) for x in", "def solve(field, rows, cols): res = [] for row in range(rows): line =", "line = '' for col in range(cols): if field[row][col] == 1: line +=", "range(rows): for col, char in enumerate(file.readline()): if char == '*': field[row][col] = 1", "in range(rows): line = '' for col in range(cols): if field[row][col] == 1:", "+= '*' continue neighbours = get_neighbours(row, col, rows, cols) mines = sum(field[r][c] for", "field[row][col] == 1: line += '*' continue neighbours = get_neighbours(row, col, rows, cols)", "range(rows): line = '' for col in range(cols): if field[row][col] == 1: line", "j == 0: continue elif -1 < row + i < rows and", "in neighbours) line += str(mines) res.append(line + '\\n') return res def main(file): res", "def get_neighbours(row, col, rows, cols): neighbours = [] for i in range(-1, 2):", "cols)) res.append('\\n') field_num += 1 return res[0: -1] if __name__ == '__main__': print(''.join(main(sys.stdin)),", "col, rows, cols): neighbours = [] for i in range(-1, 2): for j", "[] for row in range(rows): line = '' for col in range(cols): if", "True: rows, cols = [int(x) for x in file.readline().split()] if rows == cols", "== 0: break field = [[0 for _ in range(cols)] for _ in", "in range(rows)] for row in range(rows): for col, char in enumerate(file.readline()): if char", "cols): neighbours = [] for i in range(-1, 2): for j in range(-1,", "+ i, col + j)) return neighbours def solve(field, rows, cols): res =", "in range(rows): for col, char in enumerate(file.readline()): if char == '*': field[row][col] =", "get_neighbours(row, col, rows, cols) mines = sum(field[r][c] for r, c in neighbours) line", "enumerate(file.readline()): if char == '*': field[row][col] = 1 res.append('Field #{}:\\n'.format(field_num)) res.extend(solve(field, rows, cols))", "continue neighbours = get_neighbours(row, col, rows, cols) mines = sum(field[r][c] for r, c", "if char == '*': field[row][col] = 1 res.append('Field #{}:\\n'.format(field_num)) res.extend(solve(field, rows, cols)) res.append('\\n')", "< col + j < cols: neighbours.append((row + i, col + j)) return", "x in file.readline().split()] if rows == cols == 0: break field = [[0", "cols): res = [] for row in range(rows): line = '' for col", "col in range(cols): if field[row][col] == 1: line += '*' continue neighbours =", "line += '*' continue neighbours = get_neighbours(row, col, rows, cols) mines = sum(field[r][c]", "for row in range(rows): for col, char in enumerate(file.readline()): if char == '*':", "+ i < rows and -1 < col + j < cols: neighbours.append((row", "= 1 while True: rows, cols = [int(x) for x in file.readline().split()] if", "in range(-1, 2): if i == 0 and j == 0: continue elif", "if i == 0 and j == 0: continue elif -1 < row", "2): for j in range(-1, 2): if i == 0 and j ==", "in range(cols)] for _ in range(rows)] for row in range(rows): for col, char", "[] for i in range(-1, 2): for j in range(-1, 2): if i", "cols == 0: break field = [[0 for _ in range(cols)] for _", "0: break field = [[0 for _ in range(cols)] for _ in range(rows)]", "1 while True: rows, cols = [int(x) for x in file.readline().split()] if rows", "r, c in neighbours) line += str(mines) res.append(line + '\\n') return res def", "rows, cols): neighbours = [] for i in range(-1, 2): for j in", "c in neighbours) line += str(mines) res.append(line + '\\n') return res def main(file):", "str(mines) res.append(line + '\\n') return res def main(file): res = [] field_num =", "== cols == 0: break field = [[0 for _ in range(cols)] for", "get_neighbours(row, col, rows, cols): neighbours = [] for i in range(-1, 2): for", "for _ in range(cols)] for _ in range(rows)] for row in range(rows): for", "break field = [[0 for _ in range(cols)] for _ in range(rows)] for", "= '' for col in range(cols): if field[row][col] == 1: line += '*'", "neighbours = [] for i in range(-1, 2): for j in range(-1, 2):", "range(rows)] for row in range(rows): for col, char in enumerate(file.readline()): if char ==", "i in range(-1, 2): for j in range(-1, 2): if i == 0", "in range(cols): if field[row][col] == 1: line += '*' continue neighbours = get_neighbours(row,", "import sys def get_neighbours(row, col, rows, cols): neighbours = [] for i in", "and -1 < col + j < cols: neighbours.append((row + i, col +", "for col, char in enumerate(file.readline()): if char == '*': field[row][col] = 1 res.append('Field", "j in range(-1, 2): if i == 0 and j == 0: continue", "col + j)) return neighbours def solve(field, rows, cols): res = [] for", "if field[row][col] == 1: line += '*' continue neighbours = get_neighbours(row, col, rows,", "res = [] for row in range(rows): line = '' for col in", "[int(x) for x in file.readline().split()] if rows == cols == 0: break field", "field = [[0 for _ in range(cols)] for _ in range(rows)] for row", "col + j < cols: neighbours.append((row + i, col + j)) return neighbours", "= get_neighbours(row, col, rows, cols) mines = sum(field[r][c] for r, c in neighbours)", "+ '\\n') return res def main(file): res = [] field_num = 1 while", "[[0 for _ in range(cols)] for _ in range(rows)] for row in range(rows):", "return neighbours def solve(field, rows, cols): res = [] for row in range(rows):", "for j in range(-1, 2): if i == 0 and j == 0:", "row + i < rows and -1 < col + j < cols:", "< rows and -1 < col + j < cols: neighbours.append((row + i,", "rows and -1 < col + j < cols: neighbours.append((row + i, col", "_ in range(cols)] for _ in range(rows)] for row in range(rows): for col,", "rows, cols = [int(x) for x in file.readline().split()] if rows == cols ==", "i < rows and -1 < col + j < cols: neighbours.append((row +", "1 res.append('Field #{}:\\n'.format(field_num)) res.extend(solve(field, rows, cols)) res.append('\\n') field_num += 1 return res[0: -1]", "range(cols): if field[row][col] == 1: line += '*' continue neighbours = get_neighbours(row, col,", "mines = sum(field[r][c] for r, c in neighbours) line += str(mines) res.append(line +", "field[row][col] = 1 res.append('Field #{}:\\n'.format(field_num)) res.extend(solve(field, rows, cols)) res.append('\\n') field_num += 1 return", "rows == cols == 0: break field = [[0 for _ in range(cols)]", "while True: rows, cols = [int(x) for x in file.readline().split()] if rows ==", "< cols: neighbours.append((row + i, col + j)) return neighbours def solve(field, rows,", "#{}:\\n'.format(field_num)) res.extend(solve(field, rows, cols)) res.append('\\n') field_num += 1 return res[0: -1] if __name__", "'' for col in range(cols): if field[row][col] == 1: line += '*' continue", "elif -1 < row + i < rows and -1 < col +", "res = [] field_num = 1 while True: rows, cols = [int(x) for", "sys def get_neighbours(row, col, rows, cols): neighbours = [] for i in range(-1,", "range(-1, 2): for j in range(-1, 2): if i == 0 and j", "row in range(rows): for col, char in enumerate(file.readline()): if char == '*': field[row][col]", "= [int(x) for x in file.readline().split()] if rows == cols == 0: break", "in enumerate(file.readline()): if char == '*': field[row][col] = 1 res.append('Field #{}:\\n'.format(field_num)) res.extend(solve(field, rows,", "res.append(line + '\\n') return res def main(file): res = [] field_num = 1", "= [] for i in range(-1, 2): for j in range(-1, 2): if", "range(-1, 2): if i == 0 and j == 0: continue elif -1", "== 0 and j == 0: continue elif -1 < row + i", "continue elif -1 < row + i < rows and -1 < col", "< row + i < rows and -1 < col + j <", "col, char in enumerate(file.readline()): if char == '*': field[row][col] = 1 res.append('Field #{}:\\n'.format(field_num))", "line += str(mines) res.append(line + '\\n') return res def main(file): res = []", "if rows == cols == 0: break field = [[0 for _ in" ]
[ "2,row + 2),(0,0,255),2) for corner in centroids: row = corner[0] col = corner[1]", "cv2.cornerHarris(gray,2,3,0.004) #0.04 dst = cv2.dilate(dst,None) ret, dst = cv2.threshold(dst,0.01*dst.max(),255,0) #0.01*dst.max() was original value", "stats, centroids = cv2.connectedComponentsWithStats(dst) # define the criteria to stop and refine the", "dst = cv2.cornerHarris(gray,2,3,0.004) #0.04 dst = cv2.dilate(dst,None) ret, dst = cv2.threshold(dst,0.01*dst.max(),255,0) #0.01*dst.max() was", "+ 2,row + 2),(0,0,255),2) for corner in centroids: row = corner[0] col =", "#gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) def corner_detect(gray_input, img_input, row, col): height, width = gray_input.shape #print(height,", "#print(height, width) gray = gray_input img = img_input #crop_img = img[y:y+h, x:x+w] #gray", "corners = np.array(corners).astype(int) centroids = np.array(centroids).astype(int) for corner in corners: row = corner[0]", "= np.int0(res) img[res[:,1],res[:,0]]=[0,0,255] img[res[:,3],res[:,2]] = [0,255,0] print(corners) corners = np.array(corners).astype(int) centroids = np.array(centroids).astype(int)", "+ cv2.TERM_CRITERIA_MAX_ITER, 100, 0.001) corners = cv2.cornerSubPix(gray,np.float32(centroids),(5,5),(-1,-1),criteria) # Now draw them res =", "define the criteria to stop and refine the corners criteria = (cv2.TERM_CRITERIA_EPS +", "them res = np.hstack((centroids,corners)) res = np.int0(res) img[res[:,1],res[:,0]]=[0,0,255] img[res[:,3],res[:,2]] = [0,255,0] print(corners) corners", "img_input #crop_img = img[y:y+h, x:x+w] #gray = gray_input[col+col:h, row+row:w] #img = img_input[col:col+70, row:row+70]", "= (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100, 0.001) corners = cv2.cornerSubPix(gray,np.float32(centroids),(5,5),(-1,-1),criteria) # Now draw them", "corner[0] col = corner[1] cv2.rectangle(img,(col, row),(col + 2,row + 2),(0,255,0),2) cv2.imwrite('subpixel5.png',img) return img", "+ 2),(0,0,255),2) for corner in centroids: row = corner[0] col = corner[1] cv2.rectangle(img,(col,", "= corner[0] col = corner[1] cv2.rectangle(img,(col, row),(col + 2,row + 2),(0,255,0),2) cv2.imwrite('subpixel5.png',img) return", "[0,255,0] print(corners) corners = np.array(corners).astype(int) centroids = np.array(centroids).astype(int) for corner in corners: row", "np.int0(res) img[res[:,1],res[:,0]]=[0,0,255] img[res[:,3],res[:,2]] = [0,255,0] print(corners) corners = np.array(corners).astype(int) centroids = np.array(centroids).astype(int) for", "centroids = cv2.connectedComponentsWithStats(dst) # define the criteria to stop and refine the corners", "cv2.imread(filename) #gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) def corner_detect(gray_input, img_input, row, col): height, width = gray_input.shape", "img_input, row, col): height, width = gray_input.shape #print(height, width) gray = gray_input img", "= img_input[col:col+70, row:row+70] # find Harris corners gray = np.float32(gray) dst = cv2.cornerHarris(gray,2,3,0.004)", "= cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) def corner_detect(gray_input, img_input, row, col): height, width = gray_input.shape #print(height, width)", "gray_input[col+col:h, row+row:w] #img = img_input[col:col+70, row:row+70] # find Harris corners gray = np.float32(gray)", "to stop and refine the corners criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100, 0.001)", "res = np.hstack((centroids,corners)) res = np.int0(res) img[res[:,1],res[:,0]]=[0,0,255] img[res[:,3],res[:,2]] = [0,255,0] print(corners) corners =", "x:x+w] #gray = gray_input[col+col:h, row+row:w] #img = img_input[col:col+70, row:row+70] # find Harris corners", "# find centroids ret, labels, stats, centroids = cv2.connectedComponentsWithStats(dst) # define the criteria", "draw them res = np.hstack((centroids,corners)) res = np.int0(res) img[res[:,1],res[:,0]]=[0,0,255] img[res[:,3],res[:,2]] = [0,255,0] print(corners)", "np.hstack((centroids,corners)) res = np.int0(res) img[res[:,1],res[:,0]]=[0,0,255] img[res[:,3],res[:,2]] = [0,255,0] print(corners) corners = np.array(corners).astype(int) centroids", "row = corner[0] col = corner[1] cv2.rectangle(img,(col, row),(col + 2,row + 2),(0,255,0),2) cv2.imwrite('subpixel5.png',img)", "corner[0] col = corner[1] cv2.rectangle(img,(col, row),(col + 2,row + 2),(0,0,255),2) for corner in", "= cv2.cornerSubPix(gray,np.float32(centroids),(5,5),(-1,-1),criteria) # Now draw them res = np.hstack((centroids,corners)) res = np.int0(res) img[res[:,1],res[:,0]]=[0,0,255]", "cv2.cornerSubPix(gray,np.float32(centroids),(5,5),(-1,-1),criteria) # Now draw them res = np.hstack((centroids,corners)) res = np.int0(res) img[res[:,1],res[:,0]]=[0,0,255] img[res[:,3],res[:,2]]", "in corners: row = corner[0] col = corner[1] cv2.rectangle(img,(col, row),(col + 2,row +", "corner[1] cv2.rectangle(img,(col, row),(col + 2,row + 2),(0,0,255),2) for corner in centroids: row =", "for corner in centroids: row = corner[0] col = corner[1] cv2.rectangle(img,(col, row),(col +", "corners = cv2.cornerSubPix(gray,np.float32(centroids),(5,5),(-1,-1),criteria) # Now draw them res = np.hstack((centroids,corners)) res = np.int0(res)", "= [0,255,0] print(corners) corners = np.array(corners).astype(int) centroids = np.array(centroids).astype(int) for corner in corners:", "= gray_input img = img_input #crop_img = img[y:y+h, x:x+w] #gray = gray_input[col+col:h, row+row:w]", "cv2.rectangle(img,(col, row),(col + 2,row + 2),(0,0,255),2) for corner in centroids: row = corner[0]", "ret, labels, stats, centroids = cv2.connectedComponentsWithStats(dst) # define the criteria to stop and", "#filename = 'chessboard2.jpg' #img = cv2.imread(filename) #gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) def corner_detect(gray_input, img_input, row,", "= cv2.connectedComponentsWithStats(dst) # define the criteria to stop and refine the corners criteria", "img_input[col:col+70, row:row+70] # find Harris corners gray = np.float32(gray) dst = cv2.cornerHarris(gray,2,3,0.004) #0.04", "(cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100, 0.001) corners = cv2.cornerSubPix(gray,np.float32(centroids),(5,5),(-1,-1),criteria) # Now draw them res", "criteria to stop and refine the corners criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100,", "in centroids: row = corner[0] col = corner[1] cv2.rectangle(img,(col, row),(col + 2,row +", "dst = np.uint8(dst) # find centroids ret, labels, stats, centroids = cv2.connectedComponentsWithStats(dst) #", "value dst = np.uint8(dst) # find centroids ret, labels, stats, centroids = cv2.connectedComponentsWithStats(dst)", "the corners criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100, 0.001) corners = cv2.cornerSubPix(gray,np.float32(centroids),(5,5),(-1,-1),criteria) #", "print(corners) corners = np.array(corners).astype(int) centroids = np.array(centroids).astype(int) for corner in corners: row =", "as np #filename = 'chessboard2.jpg' #img = cv2.imread(filename) #gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) def corner_detect(gray_input,", "row+row:w] #img = img_input[col:col+70, row:row+70] # find Harris corners gray = np.float32(gray) dst", "= np.array(centroids).astype(int) for corner in corners: row = corner[0] col = corner[1] cv2.rectangle(img,(col,", "img[res[:,3],res[:,2]] = [0,255,0] print(corners) corners = np.array(corners).astype(int) centroids = np.array(centroids).astype(int) for corner in", "= cv2.cornerHarris(gray,2,3,0.004) #0.04 dst = cv2.dilate(dst,None) ret, dst = cv2.threshold(dst,0.01*dst.max(),255,0) #0.01*dst.max() was original", "cv2.connectedComponentsWithStats(dst) # define the criteria to stop and refine the corners criteria =", "np.float32(gray) dst = cv2.cornerHarris(gray,2,3,0.004) #0.04 dst = cv2.dilate(dst,None) ret, dst = cv2.threshold(dst,0.01*dst.max(),255,0) #0.01*dst.max()", "Harris corners gray = np.float32(gray) dst = cv2.cornerHarris(gray,2,3,0.004) #0.04 dst = cv2.dilate(dst,None) ret,", "find centroids ret, labels, stats, centroids = cv2.connectedComponentsWithStats(dst) # define the criteria to", "np.array(corners).astype(int) centroids = np.array(centroids).astype(int) for corner in corners: row = corner[0] col =", "dst = cv2.dilate(dst,None) ret, dst = cv2.threshold(dst,0.01*dst.max(),255,0) #0.01*dst.max() was original value dst =", "100, 0.001) corners = cv2.cornerSubPix(gray,np.float32(centroids),(5,5),(-1,-1),criteria) # Now draw them res = np.hstack((centroids,corners)) res", "labels, stats, centroids = cv2.connectedComponentsWithStats(dst) # define the criteria to stop and refine", "centroids ret, labels, stats, centroids = cv2.connectedComponentsWithStats(dst) # define the criteria to stop", "corner_detect(gray_input, img_input, row, col): height, width = gray_input.shape #print(height, width) gray = gray_input", "centroids: row = corner[0] col = corner[1] cv2.rectangle(img,(col, row),(col + 2,row + 2),(0,255,0),2)", "np #filename = 'chessboard2.jpg' #img = cv2.imread(filename) #gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) def corner_detect(gray_input, img_input,", "= img[y:y+h, x:x+w] #gray = gray_input[col+col:h, row+row:w] #img = img_input[col:col+70, row:row+70] # find", "= np.array(corners).astype(int) centroids = np.array(centroids).astype(int) for corner in corners: row = corner[0] col", "2),(0,0,255),2) for corner in centroids: row = corner[0] col = corner[1] cv2.rectangle(img,(col, row),(col", "= np.float32(gray) dst = cv2.cornerHarris(gray,2,3,0.004) #0.04 dst = cv2.dilate(dst,None) ret, dst = cv2.threshold(dst,0.01*dst.max(),255,0)", "height, width = gray_input.shape #print(height, width) gray = gray_input img = img_input #crop_img", "cv2.dilate(dst,None) ret, dst = cv2.threshold(dst,0.01*dst.max(),255,0) #0.01*dst.max() was original value dst = np.uint8(dst) #", "original value dst = np.uint8(dst) # find centroids ret, labels, stats, centroids =", "= img_input #crop_img = img[y:y+h, x:x+w] #gray = gray_input[col+col:h, row+row:w] #img = img_input[col:col+70,", "img = img_input #crop_img = img[y:y+h, x:x+w] #gray = gray_input[col+col:h, row+row:w] #img =", "img[res[:,1],res[:,0]]=[0,0,255] img[res[:,3],res[:,2]] = [0,255,0] print(corners) corners = np.array(corners).astype(int) centroids = np.array(centroids).astype(int) for corner", "= gray_input[col+col:h, row+row:w] #img = img_input[col:col+70, row:row+70] # find Harris corners gray =", "# find Harris corners gray = np.float32(gray) dst = cv2.cornerHarris(gray,2,3,0.004) #0.04 dst =", "# define the criteria to stop and refine the corners criteria = (cv2.TERM_CRITERIA_EPS", "was original value dst = np.uint8(dst) # find centroids ret, labels, stats, centroids", "cv2.threshold(dst,0.01*dst.max(),255,0) #0.01*dst.max() was original value dst = np.uint8(dst) # find centroids ret, labels,", "cv2 import numpy as np #filename = 'chessboard2.jpg' #img = cv2.imread(filename) #gray =", "find Harris corners gray = np.float32(gray) dst = cv2.cornerHarris(gray,2,3,0.004) #0.04 dst = cv2.dilate(dst,None)", "res = np.int0(res) img[res[:,1],res[:,0]]=[0,0,255] img[res[:,3],res[:,2]] = [0,255,0] print(corners) corners = np.array(corners).astype(int) centroids =", "the criteria to stop and refine the corners criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER,", "= cv2.imread(filename) #gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) def corner_detect(gray_input, img_input, row, col): height, width =", "img[y:y+h, x:x+w] #gray = gray_input[col+col:h, row+row:w] #img = img_input[col:col+70, row:row+70] # find Harris", "#crop_img = img[y:y+h, x:x+w] #gray = gray_input[col+col:h, row+row:w] #img = img_input[col:col+70, row:row+70] #", "centroids = np.array(centroids).astype(int) for corner in corners: row = corner[0] col = corner[1]", "gray = np.float32(gray) dst = cv2.cornerHarris(gray,2,3,0.004) #0.04 dst = cv2.dilate(dst,None) ret, dst =", "stop and refine the corners criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100, 0.001) corners", "corner in centroids: row = corner[0] col = corner[1] cv2.rectangle(img,(col, row),(col + 2,row", "= 'chessboard2.jpg' #img = cv2.imread(filename) #gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) def corner_detect(gray_input, img_input, row, col):", "width) gray = gray_input img = img_input #crop_img = img[y:y+h, x:x+w] #gray =", "and refine the corners criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100, 0.001) corners =", "#0.01*dst.max() was original value dst = np.uint8(dst) # find centroids ret, labels, stats,", "= cv2.dilate(dst,None) ret, dst = cv2.threshold(dst,0.01*dst.max(),255,0) #0.01*dst.max() was original value dst = np.uint8(dst)", "0.001) corners = cv2.cornerSubPix(gray,np.float32(centroids),(5,5),(-1,-1),criteria) # Now draw them res = np.hstack((centroids,corners)) res =", "Now draw them res = np.hstack((centroids,corners)) res = np.int0(res) img[res[:,1],res[:,0]]=[0,0,255] img[res[:,3],res[:,2]] = [0,255,0]", "row:row+70] # find Harris corners gray = np.float32(gray) dst = cv2.cornerHarris(gray,2,3,0.004) #0.04 dst", "def corner_detect(gray_input, img_input, row, col): height, width = gray_input.shape #print(height, width) gray =", "= corner[1] cv2.rectangle(img,(col, row),(col + 2,row + 2),(0,0,255),2) for corner in centroids: row", "refine the corners criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100, 0.001) corners = cv2.cornerSubPix(gray,np.float32(centroids),(5,5),(-1,-1),criteria)", "#img = img_input[col:col+70, row:row+70] # find Harris corners gray = np.float32(gray) dst =", "import numpy as np #filename = 'chessboard2.jpg' #img = cv2.imread(filename) #gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)", "corners: row = corner[0] col = corner[1] cv2.rectangle(img,(col, row),(col + 2,row + 2),(0,0,255),2)", "import cv2 import numpy as np #filename = 'chessboard2.jpg' #img = cv2.imread(filename) #gray", "corners criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100, 0.001) corners = cv2.cornerSubPix(gray,np.float32(centroids),(5,5),(-1,-1),criteria) # Now", "row),(col + 2,row + 2),(0,0,255),2) for corner in centroids: row = corner[0] col", "corner in corners: row = corner[0] col = corner[1] cv2.rectangle(img,(col, row),(col + 2,row", "#0.04 dst = cv2.dilate(dst,None) ret, dst = cv2.threshold(dst,0.01*dst.max(),255,0) #0.01*dst.max() was original value dst", "'chessboard2.jpg' #img = cv2.imread(filename) #gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) def corner_detect(gray_input, img_input, row, col): height,", "row, col): height, width = gray_input.shape #print(height, width) gray = gray_input img =", "# Now draw them res = np.hstack((centroids,corners)) res = np.int0(res) img[res[:,1],res[:,0]]=[0,0,255] img[res[:,3],res[:,2]] =", "width = gray_input.shape #print(height, width) gray = gray_input img = img_input #crop_img =", "ret, dst = cv2.threshold(dst,0.01*dst.max(),255,0) #0.01*dst.max() was original value dst = np.uint8(dst) # find", "numpy as np #filename = 'chessboard2.jpg' #img = cv2.imread(filename) #gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) def", "#gray = gray_input[col+col:h, row+row:w] #img = img_input[col:col+70, row:row+70] # find Harris corners gray", "= gray_input.shape #print(height, width) gray = gray_input img = img_input #crop_img = img[y:y+h,", "cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) def corner_detect(gray_input, img_input, row, col): height, width = gray_input.shape #print(height, width) gray", "col): height, width = gray_input.shape #print(height, width) gray = gray_input img = img_input", "np.array(centroids).astype(int) for corner in corners: row = corner[0] col = corner[1] cv2.rectangle(img,(col, row),(col", "gray = gray_input img = img_input #crop_img = img[y:y+h, x:x+w] #gray = gray_input[col+col:h,", "for corner in corners: row = corner[0] col = corner[1] cv2.rectangle(img,(col, row),(col +", "= corner[0] col = corner[1] cv2.rectangle(img,(col, row),(col + 2,row + 2),(0,0,255),2) for corner", "<reponame>SJungert/computer_gaze_tracking import cv2 import numpy as np #filename = 'chessboard2.jpg' #img = cv2.imread(filename)", "criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100, 0.001) corners = cv2.cornerSubPix(gray,np.float32(centroids),(5,5),(-1,-1),criteria) # Now draw", "gray_input.shape #print(height, width) gray = gray_input img = img_input #crop_img = img[y:y+h, x:x+w]", "gray_input img = img_input #crop_img = img[y:y+h, x:x+w] #gray = gray_input[col+col:h, row+row:w] #img", "col = corner[1] cv2.rectangle(img,(col, row),(col + 2,row + 2),(0,0,255),2) for corner in centroids:", "= cv2.threshold(dst,0.01*dst.max(),255,0) #0.01*dst.max() was original value dst = np.uint8(dst) # find centroids ret,", "= np.uint8(dst) # find centroids ret, labels, stats, centroids = cv2.connectedComponentsWithStats(dst) # define", "#img = cv2.imread(filename) #gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) def corner_detect(gray_input, img_input, row, col): height, width", "= np.hstack((centroids,corners)) res = np.int0(res) img[res[:,1],res[:,0]]=[0,0,255] img[res[:,3],res[:,2]] = [0,255,0] print(corners) corners = np.array(corners).astype(int)", "cv2.TERM_CRITERIA_MAX_ITER, 100, 0.001) corners = cv2.cornerSubPix(gray,np.float32(centroids),(5,5),(-1,-1),criteria) # Now draw them res = np.hstack((centroids,corners))", "dst = cv2.threshold(dst,0.01*dst.max(),255,0) #0.01*dst.max() was original value dst = np.uint8(dst) # find centroids", "row = corner[0] col = corner[1] cv2.rectangle(img,(col, row),(col + 2,row + 2),(0,0,255),2) for", "np.uint8(dst) # find centroids ret, labels, stats, centroids = cv2.connectedComponentsWithStats(dst) # define the", "corners gray = np.float32(gray) dst = cv2.cornerHarris(gray,2,3,0.004) #0.04 dst = cv2.dilate(dst,None) ret, dst" ]
[ "This exercise will help you deal with other file formats and how toa", "import genfromtxt my_data = genfromtxt(path, delimiter=',',skip_header=1) # Number of unique matches unique_team =", "Not every data format will be in csv there are other file formats", "numpy import genfromtxt my_data = genfromtxt(path, delimiter=',',skip_header=1) # Number of unique matches unique_team", "# An exercise to know who is the most aggresive player or maybe", "one particular team # Filter record where batsman scored six and player with", "other file formats also. # This exercise will help you deal with other", "of mataches=\", unique_team.shape[0]) print(\"Set of unique_team which played match=\", unique_team[:-1]) print(\"Sum of all", "this exercise will help you get the statistics on one particular team #", "delimiter=',',skip_header=1) # Number of unique matches unique_team = np.unique(my_data[:,0],axis=0) print(\"Uniue no of mataches=\",", "getting to know that which are all those six teams that played in", "# Filter record where batsman scored six and player with most number of", "# This exercise will help you deal with other file formats and how", "Sum of all extras # An exercise to make you familiar with indexing", "unique_team = np.unique(my_data[:,0],axis=0) print(\"Uniue no of mataches=\", unique_team.shape[0]) print(\"Set of unique_team which played", "exercise to know who is the most aggresive player or maybe the scoring", "# this exercise will help you get the statistics on one particular team", "Filter record where batsman scored six and player with most number of sixex", "unique_team.shape[0]) print(\"Set of unique_team which played match=\", unique_team[:-1]) print(\"Sum of all extras in", "print(\"Sum of all extras in all delivery=\",np.sum(my_data[:,17].astype(int), axis = 0)) print(\"Get all deliveries", "further statistics keeping that in mind. # Number of unique teams # this", "# Sum of all extras # An exercise to make you familiar with", "so that we can analyze further statistics keeping that in mind. # Number", "given player is out,tell wickettype=\",my_data[my_data[:,22]!=np.nan][:,11]) toss_won_by_mum=len(my_data[my_data[:,5]=='Mumbai Indians']) print(\"Toss won by Mumbai indians=\",toss_won_by_mum) print(\"Batsman", "is out,tell wickettype=\",my_data[my_data[:,22]!=np.nan][:,11]) toss_won_by_mum=len(my_data[my_data[:,5]=='Mumbai Indians']) print(\"Toss won by Mumbai indians=\",toss_won_by_mum) print(\"Batsman who scored", "# How many matches were held in total we need to know so", "of unique teams # this exercise deals with you getting to know that", "you getting to know that which are all those six teams that played", "played in the tournament. # Sum of all extras # An exercise to", "Number of times Mumbai Indians won the toss # this exercise will help", "print(\"Get all deliveries which given player is out,tell wickettype=\",my_data[my_data[:,22]!=np.nan][:,11]) toss_won_by_mum=len(my_data[my_data[:,5]=='Mumbai Indians']) print(\"Toss won", "the tournament. # Sum of all extras # An exercise to make you", "player got out # Get the array of all delivery numbers when a", "of unique matches unique_team = np.unique(my_data[:,0],axis=0) print(\"Uniue no of mataches=\", unique_team.shape[0]) print(\"Set of", "csv there are other file formats also. # This exercise will help you", "statistics on one particular team # Filter record where batsman scored six and", "wicket type. # Number of times Mumbai Indians won the toss # this", "sixex # An exercise to know who is the most aggresive player or", "be in csv there are other file formats also. # This exercise will", "player with most number of sixex # An exercise to know who is", "extras # An exercise to make you familiar with indexing and slicing up", "need to know so that we can analyze further statistics keeping that in", "in mind. # Number of unique teams # this exercise deals with you", "won by Mumbai indians=\",toss_won_by_mum) print(\"Batsman who scored 6 runs\",my_data[my_data[:,16].astype(int)>=6].shape[0]) # How many matches", "toa read it. from numpy import genfromtxt my_data = genfromtxt(path, delimiter=',',skip_header=1) # Number", "to make you familiar with indexing and slicing up within data. # Delivery", "are all those six teams that played in the tournament. # Sum of", "all delivery=\",np.sum(my_data[:,17].astype(int), axis = 0)) print(\"Get all deliveries which given player is out,tell", "this exercise deals with you getting to know that which are all those", "with indexing and slicing up within data. # Delivery number when a given", "most number of sixex # An exercise to know who is the most", "with you getting to know that which are all those six teams that", "can analyze further statistics keeping that in mind. # Number of unique teams", "array of all delivery numbers when a given player got out. Also mention", "in total we need to know so that we can analyze further statistics", "out,tell wickettype=\",my_data[my_data[:,22]!=np.nan][:,11]) toss_won_by_mum=len(my_data[my_data[:,5]=='Mumbai Indians']) print(\"Toss won by Mumbai indians=\",toss_won_by_mum) print(\"Batsman who scored 6", "np.unique(my_data[:,0],axis=0) print(\"Uniue no of mataches=\", unique_team.shape[0]) print(\"Set of unique_team which played match=\", unique_team[:-1])", "mind. # Number of unique teams # this exercise deals with you getting", "file formats and how toa read it. from numpy import genfromtxt my_data =", "will help you get the statistics on one particular team # Filter record", "that which are all those six teams that played in the tournament. #", "0)) print(\"Get all deliveries which given player is out,tell wickettype=\",my_data[my_data[:,22]!=np.nan][:,11]) toss_won_by_mum=len(my_data[my_data[:,5]=='Mumbai Indians']) print(\"Toss", "the array of all delivery numbers when a given player got out. Also", "Mumbai Indians won the toss # this exercise will help you get the", "analyze further statistics keeping that in mind. # Number of unique teams #", "make you familiar with indexing and slicing up within data. # Delivery number", "all those six teams that played in the tournament. # Sum of all", "on one particular team # Filter record where batsman scored six and player", "indians=\",toss_won_by_mum) print(\"Batsman who scored 6 runs\",my_data[my_data[:,16].astype(int)>=6].shape[0]) # How many matches were held in", "tournament. # Sum of all extras # An exercise to make you familiar", "# Not every data format will be in csv there are other file", "out. Also mention the wicket type. # Number of times Mumbai Indians won", "got out. Also mention the wicket type. # Number of times Mumbai Indians", "number of sixex # An exercise to know who is the most aggresive", "all extras # An exercise to make you familiar with indexing and slicing", "help you deal with other file formats and how toa read it. from", "with most number of sixex # An exercise to know who is the", "of all delivery numbers when a given player got out. Also mention the", "and player with most number of sixex # An exercise to know who", "how toa read it. from numpy import genfromtxt my_data = genfromtxt(path, delimiter=',',skip_header=1) #", "teams # this exercise deals with you getting to know that which are", "data. # Delivery number when a given player got out # Get the", "in csv there are other file formats also. # This exercise will help", "in all delivery=\",np.sum(my_data[:,17].astype(int), axis = 0)) print(\"Get all deliveries which given player is", "that we can analyze further statistics keeping that in mind. # Number of", "within data. # Delivery number when a given player got out # Get", "of sixex # An exercise to know who is the most aggresive player", "Number of unique matches unique_team = np.unique(my_data[:,0],axis=0) print(\"Uniue no of mataches=\", unique_team.shape[0]) print(\"Set", "unique_team which played match=\", unique_team[:-1]) print(\"Sum of all extras in all delivery=\",np.sum(my_data[:,17].astype(int), axis", "total we need to know so that we can analyze further statistics keeping", "know that which are all those six teams that played in the tournament.", "help you get the statistics on one particular team # Filter record where", "numpy as np # Not every data format will be in csv there", "extras in all delivery=\",np.sum(my_data[:,17].astype(int), axis = 0)) print(\"Get all deliveries which given player", "print(\"Set of unique_team which played match=\", unique_team[:-1]) print(\"Sum of all extras in all", "toss_won_by_mum=len(my_data[my_data[:,5]=='Mumbai Indians']) print(\"Toss won by Mumbai indians=\",toss_won_by_mum) print(\"Batsman who scored 6 runs\",my_data[my_data[:,16].astype(int)>=6].shape[0]) #", "which are all those six teams that played in the tournament. # Sum", "Mumbai indians=\",toss_won_by_mum) print(\"Batsman who scored 6 runs\",my_data[my_data[:,16].astype(int)>=6].shape[0]) # How many matches were held", "six teams that played in the tournament. # Sum of all extras #", "player got out. Also mention the wicket type. # Number of times Mumbai", "we need to know so that we can analyze further statistics keeping that", "those six teams that played in the tournament. # Sum of all extras", "number when a given player got out # Get the array of all", "type. # Number of times Mumbai Indians won the toss # this exercise", "of times Mumbai Indians won the toss # this exercise will help you", "get the statistics on one particular team # Filter record where batsman scored", "mention the wicket type. # Number of times Mumbai Indians won the toss", "every data format will be in csv there are other file formats also.", "and how toa read it. from numpy import genfromtxt my_data = genfromtxt(path, delimiter=',',skip_header=1)", "scored six and player with most number of sixex # An exercise to", "exercise deals with you getting to know that which are all those six", "teams that played in the tournament. # Sum of all extras # An", "# Number of times Mumbai Indians won the toss # this exercise will", "deal with other file formats and how toa read it. from numpy import", "= np.unique(my_data[:,0],axis=0) print(\"Uniue no of mataches=\", unique_team.shape[0]) print(\"Set of unique_team which played match=\",", "to know that which are all those six teams that played in the", "by Mumbai indians=\",toss_won_by_mum) print(\"Batsman who scored 6 runs\",my_data[my_data[:,16].astype(int)>=6].shape[0]) # How many matches were", "played match=\", unique_team[:-1]) print(\"Sum of all extras in all delivery=\",np.sum(my_data[:,17].astype(int), axis = 0))", "familiar with indexing and slicing up within data. # Delivery number when a", "matches unique_team = np.unique(my_data[:,0],axis=0) print(\"Uniue no of mataches=\", unique_team.shape[0]) print(\"Set of unique_team which", "How many matches were held in total we need to know so that", "were held in total we need to know so that we can analyze", "numbers when a given player got out. Also mention the wicket type. #", "six and player with most number of sixex # An exercise to know", "unique teams # this exercise deals with you getting to know that which", "match=\", unique_team[:-1]) print(\"Sum of all extras in all delivery=\",np.sum(my_data[:,17].astype(int), axis = 0)) print(\"Get", "formats also. # This exercise will help you deal with other file formats", "= genfromtxt(path, delimiter=',',skip_header=1) # Number of unique matches unique_team = np.unique(my_data[:,0],axis=0) print(\"Uniue no", "when a given player got out # Get the array of all delivery", "given player got out # Get the array of all delivery numbers when", "team # Filter record where batsman scored six and player with most number", "where batsman scored six and player with most number of sixex # An", "a given player got out. Also mention the wicket type. # Number of", "# -------------- import numpy as np # Not every data format will be", "in the tournament. # Sum of all extras # An exercise to make", "print(\"Batsman who scored 6 runs\",my_data[my_data[:,16].astype(int)>=6].shape[0]) # How many matches were held in total", "Delivery number when a given player got out # Get the array of", "# Delivery number when a given player got out # Get the array", "who scored 6 runs\",my_data[my_data[:,16].astype(int)>=6].shape[0]) # How many matches were held in total we", "deals with you getting to know that which are all those six teams", "will help you deal with other file formats and how toa read it.", "Also mention the wicket type. # Number of times Mumbai Indians won the", "that in mind. # Number of unique teams # this exercise deals with", "formats and how toa read it. from numpy import genfromtxt my_data = genfromtxt(path,", "record where batsman scored six and player with most number of sixex #", "are other file formats also. # This exercise will help you deal with", "An exercise to know who is the most aggresive player or maybe the", "and slicing up within data. # Delivery number when a given player got", "it. from numpy import genfromtxt my_data = genfromtxt(path, delimiter=',',skip_header=1) # Number of unique", "deliveries which given player is out,tell wickettype=\",my_data[my_data[:,22]!=np.nan][:,11]) toss_won_by_mum=len(my_data[my_data[:,5]=='Mumbai Indians']) print(\"Toss won by Mumbai", "of all extras in all delivery=\",np.sum(my_data[:,17].astype(int), axis = 0)) print(\"Get all deliveries which", "all delivery numbers when a given player got out. Also mention the wicket", "unique_team[:-1]) print(\"Sum of all extras in all delivery=\",np.sum(my_data[:,17].astype(int), axis = 0)) print(\"Get all", "all extras in all delivery=\",np.sum(my_data[:,17].astype(int), axis = 0)) print(\"Get all deliveries which given", "unique matches unique_team = np.unique(my_data[:,0],axis=0) print(\"Uniue no of mataches=\", unique_team.shape[0]) print(\"Set of unique_team", "6 runs\",my_data[my_data[:,16].astype(int)>=6].shape[0]) # How many matches were held in total we need to", "will be in csv there are other file formats also. # This exercise", "the wicket type. # Number of times Mumbai Indians won the toss #", "batsman scored six and player with most number of sixex # An exercise", "import numpy as np # Not every data format will be in csv", "the toss # this exercise will help you get the statistics on one", "-------------- import numpy as np # Not every data format will be in", "Indians won the toss # this exercise will help you get the statistics", "player is out,tell wickettype=\",my_data[my_data[:,22]!=np.nan][:,11]) toss_won_by_mum=len(my_data[my_data[:,5]=='Mumbai Indians']) print(\"Toss won by Mumbai indians=\",toss_won_by_mum) print(\"Batsman who", "got out # Get the array of all delivery numbers when a given", "delivery=\",np.sum(my_data[:,17].astype(int), axis = 0)) print(\"Get all deliveries which given player is out,tell wickettype=\",my_data[my_data[:,22]!=np.nan][:,11])", "# this exercise deals with you getting to know that which are all", "# Number of unique teams # this exercise deals with you getting to", "of all extras # An exercise to make you familiar with indexing and", "file formats also. # This exercise will help you deal with other file", "no of mataches=\", unique_team.shape[0]) print(\"Set of unique_team which played match=\", unique_team[:-1]) print(\"Sum of", "np # Not every data format will be in csv there are other", "Indians']) print(\"Toss won by Mumbai indians=\",toss_won_by_mum) print(\"Batsman who scored 6 runs\",my_data[my_data[:,16].astype(int)>=6].shape[0]) # How", "you get the statistics on one particular team # Filter record where batsman", "with other file formats and how toa read it. from numpy import genfromtxt", "axis = 0)) print(\"Get all deliveries which given player is out,tell wickettype=\",my_data[my_data[:,22]!=np.nan][:,11]) toss_won_by_mum=len(my_data[my_data[:,5]=='Mumbai", "there are other file formats also. # This exercise will help you deal", "that played in the tournament. # Sum of all extras # An exercise", "scored 6 runs\",my_data[my_data[:,16].astype(int)>=6].shape[0]) # How many matches were held in total we need", "Number of unique teams # this exercise deals with you getting to know", "format will be in csv there are other file formats also. # This", "my_data = genfromtxt(path, delimiter=',',skip_header=1) # Number of unique matches unique_team = np.unique(my_data[:,0],axis=0) print(\"Uniue", "slicing up within data. # Delivery number when a given player got out", "runs\",my_data[my_data[:,16].astype(int)>=6].shape[0]) # How many matches were held in total we need to know", "exercise will help you get the statistics on one particular team # Filter", "genfromtxt(path, delimiter=',',skip_header=1) # Number of unique matches unique_team = np.unique(my_data[:,0],axis=0) print(\"Uniue no of", "which played match=\", unique_team[:-1]) print(\"Sum of all extras in all delivery=\",np.sum(my_data[:,17].astype(int), axis =", "we can analyze further statistics keeping that in mind. # Number of unique", "also. # This exercise will help you deal with other file formats and", "Get the array of all delivery numbers when a given player got out.", "print(\"Toss won by Mumbai indians=\",toss_won_by_mum) print(\"Batsman who scored 6 runs\",my_data[my_data[:,16].astype(int)>=6].shape[0]) # How many", "exercise to make you familiar with indexing and slicing up within data. #", "many matches were held in total we need to know so that we", "to know who is the most aggresive player or maybe the scoring player", "An exercise to make you familiar with indexing and slicing up within data.", "delivery numbers when a given player got out. Also mention the wicket type.", "= 0)) print(\"Get all deliveries which given player is out,tell wickettype=\",my_data[my_data[:,22]!=np.nan][:,11]) toss_won_by_mum=len(my_data[my_data[:,5]=='Mumbai Indians'])", "all deliveries which given player is out,tell wickettype=\",my_data[my_data[:,22]!=np.nan][:,11]) toss_won_by_mum=len(my_data[my_data[:,5]=='Mumbai Indians']) print(\"Toss won by", "know so that we can analyze further statistics keeping that in mind. #", "a given player got out # Get the array of all delivery numbers", "given player got out. Also mention the wicket type. # Number of times", "of unique_team which played match=\", unique_team[:-1]) print(\"Sum of all extras in all delivery=\",np.sum(my_data[:,17].astype(int),", "data format will be in csv there are other file formats also. #", "out # Get the array of all delivery numbers when a given player", "# Get the array of all delivery numbers when a given player got", "# An exercise to make you familiar with indexing and slicing up within", "up within data. # Delivery number when a given player got out #", "matches were held in total we need to know so that we can", "keeping that in mind. # Number of unique teams # this exercise deals", "held in total we need to know so that we can analyze further", "genfromtxt my_data = genfromtxt(path, delimiter=',',skip_header=1) # Number of unique matches unique_team = np.unique(my_data[:,0],axis=0)", "other file formats and how toa read it. from numpy import genfromtxt my_data", "<filename>code.py # -------------- import numpy as np # Not every data format will", "times Mumbai Indians won the toss # this exercise will help you get", "to know so that we can analyze further statistics keeping that in mind.", "read it. from numpy import genfromtxt my_data = genfromtxt(path, delimiter=',',skip_header=1) # Number of", "you familiar with indexing and slicing up within data. # Delivery number when", "print(\"Uniue no of mataches=\", unique_team.shape[0]) print(\"Set of unique_team which played match=\", unique_team[:-1]) print(\"Sum", "the statistics on one particular team # Filter record where batsman scored six", "won the toss # this exercise will help you get the statistics on", "particular team # Filter record where batsman scored six and player with most", "# Number of unique matches unique_team = np.unique(my_data[:,0],axis=0) print(\"Uniue no of mataches=\", unique_team.shape[0])", "exercise will help you deal with other file formats and how toa read", "indexing and slicing up within data. # Delivery number when a given player", "wickettype=\",my_data[my_data[:,22]!=np.nan][:,11]) toss_won_by_mum=len(my_data[my_data[:,5]=='Mumbai Indians']) print(\"Toss won by Mumbai indians=\",toss_won_by_mum) print(\"Batsman who scored 6 runs\",my_data[my_data[:,16].astype(int)>=6].shape[0])", "as np # Not every data format will be in csv there are", "you deal with other file formats and how toa read it. from numpy", "when a given player got out. Also mention the wicket type. # Number", "which given player is out,tell wickettype=\",my_data[my_data[:,22]!=np.nan][:,11]) toss_won_by_mum=len(my_data[my_data[:,5]=='Mumbai Indians']) print(\"Toss won by Mumbai indians=\",toss_won_by_mum)", "from numpy import genfromtxt my_data = genfromtxt(path, delimiter=',',skip_header=1) # Number of unique matches", "statistics keeping that in mind. # Number of unique teams # this exercise", "toss # this exercise will help you get the statistics on one particular", "mataches=\", unique_team.shape[0]) print(\"Set of unique_team which played match=\", unique_team[:-1]) print(\"Sum of all extras" ]
[ "* 2022-05-01: :class:`WorkspaceFeaturesOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.WorkspaceFeaturesOperations>` \"\"\" api_version = self._get_api_version('workspace_features') if api_version == '2021-10-01': from .v2021_10_01.operations", "not have operation group 'batch_job_deployment'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def batch_job_endpoint(self):", "does not have operation group 'code_versions'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def", "pass class AzureMachineLearningWorkspaces(MultiApiClientMixin, _SDKClient): \"\"\"These APIs allow end users to operate on Azure", "1.5.0: :class:`DeleteOperations<azure.mgmt.machinelearningservices.dataset_dataplane.operations.DeleteOperations>` * v1.0: :class:`DeleteOperations<azure.mgmt.machinelearningservices.runhistory.operations.DeleteOperations>` \"\"\" api_version = self._get_api_version('delete') if api_version == '1.5.0':", "on the API version: * 2021-10-01: :class:`VirtualMachineSizesOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.VirtualMachineSizesOperations>` * 2022-01-01-preview: :class:`VirtualMachineSizesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.VirtualMachineSizesOperations>` * 2022-05-01: :class:`VirtualMachineSizesOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.VirtualMachineSizesOperations>`", "the API version: * v1.0: :class:`RunOperations<azure.mgmt.machinelearningservices.runhistory.operations.RunOperations>` \"\"\" api_version = self._get_api_version('run') if api_version ==", "api_version == '1.5.0': from .dataset_dataplane import models return models elif api_version == '1.0.0':", "OperationClass else: raise ValueError(\"API version {} does not have operation group 'data_version'\".format(api_version)) return", "Deserializer(self._models_dict(api_version))) @property def model_containers(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`ModelContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.ModelContainersOperations>`", "{} does not have operation group 'online_endpoints'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property", "depends on the API version: * 2021-10-01: :class:`ComponentVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.ComponentVersionsOperations>` * 2021-10-01-dataplanepreview: :class:`ComponentVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.ComponentVersionsOperations>` * 2022-02-01-preview:", "* 2022-05-01: :class:`DataContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.DataContainersOperations>` \"\"\" api_version = self._get_api_version('data_containers') if api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations", "def batch_endpoints(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`BatchEndpointsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.BatchEndpointsOperations>` * 2022-02-01-preview:", "datastores(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`DatastoresOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.DatastoresOperations>` * 2022-02-01-preview: :class:`DatastoresOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.DatastoresOperations>`", "import EnvironmentContainersOperations as OperationClass elif api_version == '2021-10-01-dataplanepreview': from .v2021_10_01_dataplanepreview.operations import EnvironmentContainersOperations as", "if api_version == '2021-10-01': from .v2021_10_01.operations import PrivateEndpointConnectionsOperations as OperationClass elif api_version ==", "version: * 2021-10-01: :class:`WorkspaceConnectionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.WorkspaceConnectionsOperations>` * 2022-01-01-preview: :class:`WorkspaceConnectionsOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.WorkspaceConnectionsOperations>` * 2022-05-01: :class:`WorkspaceConnectionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.WorkspaceConnectionsOperations>` \"\"\" api_version =", "the API version: * 2021-10-01-dataplanepreview: :class:`TemporaryDataReferencesOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.TemporaryDataReferencesOperations>` \"\"\" api_version = self._get_api_version('temporary_data_references') if api_version ==", "2022-05-01: :mod:`v2022_05_01.models<azure.mgmt.machinelearningservices.v2022_05_01.models>` \"\"\" if api_version == '1.5.0': from .dataset_dataplane import models return models", "elif api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import CodeVersionsOperations as OperationClass elif api_version ==", "as OperationClass elif api_version == '2022-01-01-preview': from .v2022_01_01_preview.operations import PrivateLinkResourcesOperations as OperationClass elif", "models(cls, api_version=DEFAULT_API_VERSION): \"\"\"Module depends on the API version: * 1.5.0: :mod:`dataset_dataplane.models<azure.mgmt.machinelearningservices.dataset_dataplane.models>` * 1.0.0:", "have operation group 'model_versions'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def models(self): \"\"\"Instance", "from .v2022_01_01_preview.operations import QuotasOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import", "_PROFILE_TAG + \" latest\" ) def __init__( self, credential, # type: \"TokenCredential\" subscription_id,", "raise ValueError(\"API version {} does not have operation group 'workspace_connections'\".format(api_version)) return OperationClass(self._client, self._config,", ".v2022_05_01.operations import EnvironmentContainersOperations as OperationClass else: raise ValueError(\"API version {} does not have", "else: raise ValueError(\"API version {} does not have operation group 'environment_versions'\".format(api_version)) return OperationClass(self._client,", "RunArtifactsOperations as OperationClass else: raise ValueError(\"API version {} does not have operation group", "cls.models(api_version).__dict__.items() if isinstance(v, type)} @classmethod def models(cls, api_version=DEFAULT_API_VERSION): \"\"\"Module depends on the API", "* 1.5.0: :class:`DatasetControllerV2Operations<azure.mgmt.machinelearningservices.dataset_dataplane.operations.DatasetControllerV2Operations>` \"\"\" api_version = self._get_api_version('dataset_controller_v2') if api_version == '1.5.0': from .dataset_dataplane.operations", "version {} does not have operation group 'experiments'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))", "the API version: * 2021-10-01: :class:`WorkspaceConnectionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.WorkspaceConnectionsOperations>` * 2022-01-01-preview: :class:`WorkspaceConnectionsOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.WorkspaceConnectionsOperations>` * 2022-05-01: :class:`WorkspaceConnectionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.WorkspaceConnectionsOperations>` \"\"\"", "2021-10-01: :class:`WorkspaceConnectionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.WorkspaceConnectionsOperations>` * 2022-01-01-preview: :class:`WorkspaceConnectionsOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.WorkspaceConnectionsOperations>` * 2022-05-01: :class:`WorkspaceConnectionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.WorkspaceConnectionsOperations>` \"\"\" api_version = self._get_api_version('workspace_connections') if", "group 'workspace_features'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def workspaces(self): \"\"\"Instance depends on", ".v2020_09_01_dataplanepreview.operations import BatchJobEndpointOperations as OperationClass else: raise ValueError(\"API version {} does not have", "ComponentVersionsOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import ComponentVersionsOperations as OperationClass", "@property def datasets_v1(self): \"\"\"Instance depends on the API version: * 1.5.0: :class:`DatasetsV1Operations<azure.mgmt.machinelearningservices.dataset_dataplane.operations.DatasetsV1Operations>` \"\"\"", "described in the profile. :param credential: Credential needed for the client to connect", "depends on the API version: * 1.5.0: :class:`DeleteOperations<azure.mgmt.machinelearningservices.dataset_dataplane.operations.DeleteOperations>` * v1.0: :class:`DeleteOperations<azure.mgmt.machinelearningservices.runhistory.operations.DeleteOperations>` \"\"\" api_version", "group 'runs'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def spans(self): \"\"\"Instance depends on", "raise ValueError(\"API version {} does not have operation group 'operations'\".format(api_version)) return OperationClass(self._client, self._config,", "2022-05-01: :class:`CodeContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.CodeContainersOperations>` \"\"\" api_version = self._get_api_version('code_containers') if api_version == '2021-10-01': from .v2021_10_01.operations import", "elif api_version == '2022-01-01-preview': from .v2022_01_01_preview.operations import ComputeOperations as OperationClass elif api_version ==", "import PrivateEndpointConnectionsOperations as OperationClass else: raise ValueError(\"API version {} does not have operation", "ValueError(\"API version {} does not have operation group 'spans'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)),", "from .v2021_10_01.operations import ModelContainersOperations as OperationClass elif api_version == '2021-10-01-dataplanepreview': from .v2021_10_01_dataplanepreview.operations import", ":class:`AsyncOperationsOperations<azure.mgmt.machinelearningservices.registry_discovery.operations.AsyncOperationsOperations>` \"\"\" api_version = self._get_api_version('async_operations') if api_version == 'v1.0': from .registry_discovery.operations import AsyncOperationsOperations", "OnlineDeploymentsOperations as OperationClass else: raise ValueError(\"API version {} does not have operation group", "if TYPE_CHECKING: # pylint: disable=unused-import,ungrouped-imports from typing import Any, Optional from azure.core.credentials import", "2021-10-01-dataplanepreview: :class:`EnvironmentVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.EnvironmentVersionsOperations>` * 2022-02-01-preview: :class:`EnvironmentVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.EnvironmentVersionsOperations>` * 2022-05-01: :class:`EnvironmentVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.EnvironmentVersionsOperations>` \"\"\" api_version = self._get_api_version('environment_versions') if", ":class:`RegistryManagementNonWorkspaceOperations<azure.mgmt.machinelearningservices.registry_discovery.operations.RegistryManagementNonWorkspaceOperations>` \"\"\" api_version = self._get_api_version('registry_management_non_workspace') if api_version == 'v1.0': from .registry_discovery.operations import RegistryManagementNonWorkspaceOperations", "does not have operation group 'metric'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def", "api_version == '1.5.0': from .dataset_dataplane.operations import GetOperationStatusOperations as OperationClass else: raise ValueError(\"API version", "api_version = self._get_api_version('environment_containers') if api_version == '2021-10-01': from .v2021_10_01.operations import EnvironmentContainersOperations as OperationClass", "* 2022-05-01: :class:`ModelContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.ModelContainersOperations>` \"\"\" api_version = self._get_api_version('model_containers') if api_version == '2021-10-01': from .v2021_10_01.operations", ":class:`DeleteOperations<azure.mgmt.machinelearningservices.runhistory.operations.DeleteOperations>` \"\"\" api_version = self._get_api_version('delete') if api_version == '1.5.0': from .dataset_dataplane.operations import DeleteOperations", "= self._get_api_version('extensive_model') if api_version == '1.0.0': from .model_dataplane.operations import ExtensiveModelOperations as OperationClass else:", "import OnlineDeploymentsOperations as OperationClass else: raise ValueError(\"API version {} does not have operation", "not have operation group 'metric'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def migration(self):", "* 1.0.0: :class:`AssetsOperations<azure.mgmt.machinelearningservices.model_dataplane.operations.AssetsOperations>` \"\"\" api_version = self._get_api_version('assets') if api_version == '1.0.0': from .model_dataplane.operations", ":class:`ComponentContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.ComponentContainersOperations>` \"\"\" api_version = self._get_api_version('component_containers') if api_version == '2021-10-01': from .v2021_10_01.operations import ComponentContainersOperations", "profile: A profile definition, from KnownProfiles to dict. :type profile: azure.profiles.KnownProfiles :keyword int", "* 2020-09-01-dataplanepreview: :class:`BatchJobDeploymentOperations<azure.mgmt.machinelearningservices.v2020_09_01_dataplanepreview.operations.BatchJobDeploymentOperations>` \"\"\" api_version = self._get_api_version('batch_job_deployment') if api_version == '2020-09-01-dataplanepreview': from .v2020_09_01_dataplanepreview.operations", "2022-05-01: :class:`ModelContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.ModelContainersOperations>` \"\"\" api_version = self._get_api_version('model_containers') if api_version == '2021-10-01': from .v2021_10_01.operations import", "License.txt in the project root for # license information. # # Code generated", "CodeVersionsOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import CodeVersionsOperations as OperationClass", "== '2022-01-01-preview': from .v2022_01_01_preview.operations import UsagesOperations as OperationClass elif api_version == '2022-05-01': from", "not have operation group 'models'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def online_deployments(self):", "{} does not have operation group 'component_containers'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property", "the API version: * 2021-10-01: :class:`BatchDeploymentsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.BatchDeploymentsOperations>` * 2022-02-01-preview: :class:`BatchDeploymentsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.BatchDeploymentsOperations>` * 2022-05-01: :class:`BatchDeploymentsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.BatchDeploymentsOperations>` \"\"\"", "does not have operation group 'get_operation_status'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def", ":class:`CodeVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.CodeVersionsOperations>` \"\"\" api_version = self._get_api_version('code_versions') if api_version == '2021-10-01': from .v2021_10_01.operations import CodeVersionsOperations", "import DatastoresOperations as OperationClass else: raise ValueError(\"API version {} does not have operation", "api_version == '2020-09-01-dataplanepreview': from .v2020_09_01_dataplanepreview import models return models elif api_version == '2021-10-01':", "elif api_version == '2022-05-01': from .v2022_05_01.operations import ComponentVersionsOperations as OperationClass else: raise ValueError(\"API", ":class:`PrivateLinkResourcesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.PrivateLinkResourcesOperations>` * 2022-05-01: :class:`PrivateLinkResourcesOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.PrivateLinkResourcesOperations>` \"\"\" api_version = self._get_api_version('private_link_resources') if api_version == '2021-10-01': from", ":class:`QuotasOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.QuotasOperations>` * 2022-05-01: :class:`QuotasOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.QuotasOperations>` \"\"\" api_version = self._get_api_version('quotas') if api_version == '2021-10-01': from", "\"\"\" api_version = self._get_api_version('temporary_data_references') if api_version == '2021-10-01-dataplanepreview': from .v2021_10_01_dataplanepreview.operations import TemporaryDataReferencesOperations as", "group 'temporary_data_references'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def usages(self): \"\"\"Instance depends on", "== '2022-02-01-preview': from .v2022_02_01_preview.operations import CodeVersionsOperations as OperationClass elif api_version == '2022-05-01': from", "version {} does not have operation group 'quotas'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))", "from .dataset_dataplane.operations import DatasetsV1Operations as OperationClass else: raise ValueError(\"API version {} does not", "v1.0: :class:`MetricOperations<azure.mgmt.machinelearningservices.runhistory.operations.MetricOperations>` \"\"\" api_version = self._get_api_version('metric') if api_version == 'v1.0': from .runhistory.operations import", "CodeVersionsOperations as OperationClass elif api_version == '2021-10-01-dataplanepreview': from .v2021_10_01_dataplanepreview.operations import CodeVersionsOperations as OperationClass", "have operation group 'run_artifacts'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def runs(self): \"\"\"Instance", "import RegistryManagementNonWorkspaceOperations as OperationClass else: raise ValueError(\"API version {} does not have operation", "return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def datastores(self): \"\"\"Instance depends on the API", ".model_dataplane.operations import ModelsOperations as OperationClass else: raise ValueError(\"API version {} does not have", "import models return models elif api_version == 'v1.0': from .runhistory import models return", "* 1.0.0: :class:`ModelsOperations<azure.mgmt.machinelearningservices.model_dataplane.operations.ModelsOperations>` \"\"\" api_version = self._get_api_version('models') if api_version == '1.0.0': from .model_dataplane.operations", "BatchDeploymentsOperations as OperationClass elif api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import BatchDeploymentsOperations as OperationClass", "def compute(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`ComputeOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.ComputeOperations>` * 2022-01-01-preview:", "api_version == '2022-01-01-preview': from .v2022_01_01_preview.operations import QuotasOperations as OperationClass elif api_version == '2022-05-01':", "from .v2021_10_01.operations import WorkspaceFeaturesOperations as OperationClass elif api_version == '2022-01-01-preview': from .v2022_01_01_preview.operations import", "\"\"\"Instance depends on the API version: * 2021-10-01-dataplanepreview: :class:`TemporaryDataReferencesOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.TemporaryDataReferencesOperations>` \"\"\" api_version = self._get_api_version('temporary_data_references')", "= AzureMachineLearningWorkspacesConfiguration(credential, subscription_id, **kwargs) self._client = ARMPipelineClient(base_url=base_url, config=self._config, **kwargs) super(AzureMachineLearningWorkspaces, self).__init__( api_version=api_version, profile=profile", "from .v2022_01_01_preview.operations import PrivateLinkResourcesOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import", "api_version == '1.5.0': from .dataset_dataplane.operations import DataVersionOperations as OperationClass else: raise ValueError(\"API version", "ValueError(\"API version {} does not have operation group 'environment_versions'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)),", ":class:`ModelContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.ModelContainersOperations>` * 2022-05-01: :class:`ModelContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.ModelContainersOperations>` \"\"\" api_version = self._get_api_version('model_containers') if api_version == '2021-10-01': from", ".v2021_10_01_dataplanepreview.operations import ModelVersionsOperations as OperationClass elif api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import ModelVersionsOperations", "import EnvironmentContainersOperations as OperationClass else: raise ValueError(\"API version {} does not have operation", "== '1.5.0': from .dataset_dataplane.operations import DeleteOperations as OperationClass elif api_version == 'v1.0': from", "\"\"\"Instance depends on the API version: * 2021-10-01: :class:`WorkspaceFeaturesOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.WorkspaceFeaturesOperations>` * 2022-01-01-preview: :class:`WorkspaceFeaturesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.WorkspaceFeaturesOperations>` *", "\" latest\" ) def __init__( self, credential, # type: \"TokenCredential\" subscription_id, # type:", "'2021-10-01': from .v2021_10_01.operations import BatchDeploymentsOperations as OperationClass elif api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations", "depends on the API version: * 1.5.0: :class:`DatasetV2Operations<azure.mgmt.machinelearningservices.dataset_dataplane.operations.DatasetV2Operations>` \"\"\" api_version = self._get_api_version('dataset_v2') if", "api_version == '2021-10-01': from .v2021_10_01.operations import DatastoresOperations as OperationClass elif api_version == '2022-02-01-preview':", "elif api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import EnvironmentContainersOperations as OperationClass elif api_version ==", "models return models elif api_version == 'v1.0': from .runhistory import models return models", "self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def workspace_connections(self): \"\"\"Instance depends on the API version: *", "self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def workspace_features(self): \"\"\"Instance depends on the API version: *", "depends on the API version: * 1.0.0: :class:`AssetsOperations<azure.mgmt.machinelearningservices.model_dataplane.operations.AssetsOperations>` \"\"\" api_version = self._get_api_version('assets') if", "have operation group 'migration'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def model_containers(self): \"\"\"Instance", ".v2022_05_01.operations import WorkspacesOperations as OperationClass else: raise ValueError(\"API version {} does not have", "have operation group 'metric'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def migration(self): \"\"\"Instance", "API version: * 2021-10-01: :class:`JobsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.JobsOperations>` * 2022-02-01-preview: :class:`JobsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.JobsOperations>` * 2022-05-01: :class:`JobsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.JobsOperations>` \"\"\" api_version", "OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def workspaces(self): \"\"\"Instance depends on the API version:", "version: * 2021-10-01: :class:`UsagesOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.UsagesOperations>` * 2022-01-01-preview: :class:`UsagesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.UsagesOperations>` * 2022-05-01: :class:`UsagesOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.UsagesOperations>` \"\"\" api_version =", "async_operations(self): \"\"\"Instance depends on the API version: * v1.0: :class:`AsyncOperationsOperations<azure.mgmt.machinelearningservices.registry_discovery.operations.AsyncOperationsOperations>` \"\"\" api_version =", "\"\"\" api_version = self._get_api_version('dataset_containers') if api_version == '2021-10-01': from .v2021_10_01.operations import DatasetContainersOperations as", ":type credential: ~azure.core.credentials.TokenCredential :param subscription_id: The ID of the target subscription. :type subscription_id:", "{} does not have operation group 'batch_endpoints'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property", "the API version: * 2021-10-01: :class:`ComponentVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.ComponentVersionsOperations>` * 2021-10-01-dataplanepreview: :class:`ComponentVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.ComponentVersionsOperations>` * 2022-02-01-preview: :class:`ComponentVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.ComponentVersionsOperations>` *", "return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def delete(self): \"\"\"Instance depends on the API", "ValueError(\"API version {} does not have operation group 'private_endpoint_connections'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)),", "\"\"\" api_version = self._get_api_version('spans') if api_version == 'v1.0': from .runhistory.operations import SpansOperations as", "Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def online_endpoints(self): \"\"\"Instance depends on the API version: * 2021-10-01:", "OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import JobsOperations as OperationClass else: raise", "'compute'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def data_call(self): \"\"\"Instance depends on the", "else: raise ValueError(\"API version {} does not have operation group 'dataset_controller_v2'\".format(api_version)) return OperationClass(self._client,", "OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def metric(self): \"\"\"Instance depends on the API version:", "OperationClass else: raise ValueError(\"API version {} does not have operation group 'async_operations'\".format(api_version)) return", "self._get_api_version('batch_job_endpoint') if api_version == '2020-09-01-dataplanepreview': from .v2020_09_01_dataplanepreview.operations import BatchJobEndpointOperations as OperationClass else: raise", "does not have operation group 'model_versions'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def", "import DatasetVersionsOperations as OperationClass else: raise ValueError(\"API version {} does not have operation", "not have operation group 'private_endpoint_connections'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def private_link_resources(self):", "import MultiApiClientMixin from msrest import Deserializer, Serializer from ._configuration import AzureMachineLearningWorkspacesConfiguration if TYPE_CHECKING:", ".v2021_10_01.operations import ModelContainersOperations as OperationClass elif api_version == '2021-10-01-dataplanepreview': from .v2021_10_01_dataplanepreview.operations import ModelContainersOperations", "* 2021-10-01-dataplanepreview: :class:`TemporaryDataReferencesOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.TemporaryDataReferencesOperations>` \"\"\" api_version = self._get_api_version('temporary_data_references') if api_version == '2021-10-01-dataplanepreview': from .v2021_10_01_dataplanepreview.operations", "return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def environment_versions(self): \"\"\"Instance depends on the API", "OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def registry_management_non_workspace(self): \"\"\"Instance depends on the API version:", "raise ValueError(\"API version {} does not have operation group 'component_containers'\".format(api_version)) return OperationClass(self._client, self._config,", "if api_version == 'v1.0': from .runhistory.operations import RunOperations as OperationClass else: raise ValueError(\"API", "api_version == '2022-01-01-preview': from .v2022_01_01_preview.operations import Operations as OperationClass elif api_version == '2022-05-01':", "from .v2022_05_01.operations import WorkspaceFeaturesOperations as OperationClass else: raise ValueError(\"API version {} does not", "2022-02-01-preview: :class:`OnlineDeploymentsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.OnlineDeploymentsOperations>` * 2022-05-01: :class:`OnlineDeploymentsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.OnlineDeploymentsOperations>` \"\"\" api_version = self._get_api_version('online_deployments') if api_version == '2021-10-01':", "'2022-05-01': from .v2022_05_01.operations import EnvironmentVersionsOperations as OperationClass else: raise ValueError(\"API version {} does", "OperationClass else: raise ValueError(\"API version {} does not have operation group 'data_containers'\".format(api_version)) return", "depends on the API version: * 2021-10-01: :class:`BatchDeploymentsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.BatchDeploymentsOperations>` * 2022-02-01-preview: :class:`BatchDeploymentsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.BatchDeploymentsOperations>` * 2022-05-01:", "Deserializer(self._models_dict(api_version))) @property def batch_endpoints(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`BatchEndpointsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.BatchEndpointsOperations>`", "does not have operation group 'data_version'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def", "api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import DatastoresOperations as OperationClass elif api_version == '2022-05-01':", "import KnownProfiles, ProfileDefinition from azure.profiles.multiapiclient import MultiApiClientMixin from msrest import Deserializer, Serializer from", "else: raise ValueError(\"API version {} does not have operation group 'component_versions'\".format(api_version)) return OperationClass(self._client,", "elif api_version == '2022-05-01': from .v2022_05_01.operations import Operations as OperationClass else: raise ValueError(\"API", "as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import VirtualMachineSizesOperations as OperationClass else:", "api_version = self._get_api_version('dataset_containers') if api_version == '2021-10-01': from .v2021_10_01.operations import DatasetContainersOperations as OperationClass", "\"\"\" api_version = self._get_api_version('jobs') if api_version == '2021-10-01': from .v2021_10_01.operations import JobsOperations as", "removed in final version of multiapi azure-core based client \"\"\" pass class AzureMachineLearningWorkspaces(MultiApiClientMixin,", "'1.0.0', 'models': '1.0.0', 'registry_management_non_workspace': 'v1.0', 'run': 'v1.0', 'run_artifacts': 'v1.0', 'runs': 'v1.0', 'spans': 'v1.0',", "def batch_deployments(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`BatchDeploymentsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.BatchDeploymentsOperations>` * 2022-02-01-preview:", "WorkspacesOperations as OperationClass elif api_version == '2022-01-01-preview': from .v2022_01_01_preview.operations import WorkspacesOperations as OperationClass", "\"\"\" api_version = self._get_api_version('environment_containers') if api_version == '2021-10-01': from .v2021_10_01.operations import EnvironmentContainersOperations as", "2022-05-01: :class:`BatchDeploymentsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.BatchDeploymentsOperations>` \"\"\" api_version = self._get_api_version('batch_deployments') if api_version == '2021-10-01': from .v2021_10_01.operations import", "* 2022-02-01-preview: :mod:`v2022_02_01_preview.models<azure.mgmt.machinelearningservices.v2022_02_01_preview.models>` * 2022-05-01: :mod:`v2022_05_01.models<azure.mgmt.machinelearningservices.v2022_05_01.models>` \"\"\" if api_version == '1.5.0': from .dataset_dataplane", "* 2022-01-01-preview: :class:`VirtualMachineSizesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.VirtualMachineSizesOperations>` * 2022-05-01: :class:`VirtualMachineSizesOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.VirtualMachineSizesOperations>` \"\"\" api_version = self._get_api_version('virtual_machine_sizes') if api_version ==", "api_version == '1.0.0': from .model_dataplane.operations import MigrationOperations as OperationClass else: raise ValueError(\"API version", "from .dataset_dataplane.operations import GetOperationStatusOperations as OperationClass else: raise ValueError(\"API version {} does not", "def environment_containers(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`EnvironmentContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.EnvironmentContainersOperations>` * 2021-10-01-dataplanepreview:", "elif api_version == '2022-01-01-preview': from .v2022_01_01_preview.operations import WorkspaceFeaturesOperations as OperationClass elif api_version ==", "Deserializer(self._models_dict(api_version))) @property def model_versions(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`ModelVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.ModelVersionsOperations>`", "return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def private_link_resources(self): \"\"\"Instance depends on the API", "2021-10-01: :class:`EnvironmentContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.EnvironmentContainersOperations>` * 2021-10-01-dataplanepreview: :class:`EnvironmentContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.EnvironmentContainersOperations>` * 2022-02-01-preview: :class:`EnvironmentContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.EnvironmentContainersOperations>` * 2022-05-01: :class:`EnvironmentContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.EnvironmentContainersOperations>` \"\"\" api_version", "depends on the API version: * v1.0: :class:`MetricOperations<azure.mgmt.machinelearningservices.runhistory.operations.MetricOperations>` \"\"\" api_version = self._get_api_version('metric') if", ":class:`WorkspacesOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.WorkspacesOperations>` * 2022-01-01-preview: :class:`WorkspacesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.WorkspacesOperations>` * 2022-05-01: :class:`WorkspacesOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.WorkspacesOperations>` \"\"\" api_version = self._get_api_version('workspaces') if api_version", "\"\"\"Instance depends on the API version: * 2021-10-01: :class:`OnlineDeploymentsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.OnlineDeploymentsOperations>` * 2022-02-01-preview: :class:`OnlineDeploymentsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.OnlineDeploymentsOperations>` *", "does not have operation group 'online_deployments'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def", "OperationClass else: raise ValueError(\"API version {} does not have operation group 'private_link_resources'\".format(api_version)) return", "2021-10-01: :class:`ComputeOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.ComputeOperations>` * 2022-01-01-preview: :class:`ComputeOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.ComputeOperations>` * 2022-05-01: :class:`ComputeOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.ComputeOperations>` \"\"\" api_version = self._get_api_version('compute') if", "OperationClass else: raise ValueError(\"API version {} does not have operation group 'workspace_connections'\".format(api_version)) return", "on the API version: * v1.0: :class:`RunArtifactsOperations<azure.mgmt.machinelearningservices.runhistory.operations.RunArtifactsOperations>` \"\"\" api_version = self._get_api_version('run_artifacts') if api_version", ".v2022_05_01.operations import WorkspaceFeaturesOperations as OperationClass else: raise ValueError(\"API version {} does not have", "_SDKClient(object): def __init__(self, *args, **kwargs): \"\"\"This is a fake class to support current", "== '2021-10-01': from .v2021_10_01.operations import DatasetVersionsOperations as OperationClass else: raise ValueError(\"API version {}", "depends on the API version: * 1.0.0: :class:`ModelsOperations<azure.mgmt.machinelearningservices.model_dataplane.operations.ModelsOperations>` \"\"\" api_version = self._get_api_version('models') if", "@property def data_containers(self): \"\"\"Instance depends on the API version: * 2022-02-01-preview: :class:`DataContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.DataContainersOperations>` *", "2021-10-01-dataplanepreview: :class:`ComponentContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.ComponentContainersOperations>` * 2022-02-01-preview: :class:`ComponentContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.ComponentContainersOperations>` * 2022-05-01: :class:`ComponentContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.ComponentContainersOperations>` \"\"\" api_version = self._get_api_version('component_containers') if", "OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import EnvironmentContainersOperations as OperationClass else: raise", "does not have operation group 'batch_job_endpoint'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def", "the API version: * 1.5.0: :class:`DataCallOperations<azure.mgmt.machinelearningservices.dataset_dataplane.operations.DataCallOperations>` \"\"\" api_version = self._get_api_version('data_call') if api_version ==", "operations(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`Operations<azure.mgmt.machinelearningservices.v2021_10_01.operations.Operations>` * 2022-01-01-preview: :class:`Operations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.Operations>`", "if api_version == 'v1.0': from .runhistory.operations import MetricOperations as OperationClass else: raise ValueError(\"API", "a fake class to support current implemetation of MultiApiClientMixin.\" Will be removed in", "the client to connect to Azure. :type credential: ~azure.core.credentials.TokenCredential :param subscription_id: The ID", "group 'compute'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def data_call(self): \"\"\"Instance depends on", "version {} does not have operation group 'data_containers'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))", "use if no profile is provided, or if missing in profile. :type api_version:", "self, credential, # type: \"TokenCredential\" subscription_id, # type: str api_version=None, # type: Optional[str]", ":class:`PrivateEndpointConnectionsOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.PrivateEndpointConnectionsOperations>` * 2022-05-01: :class:`PrivateEndpointConnectionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.PrivateEndpointConnectionsOperations>` \"\"\" api_version = self._get_api_version('private_endpoint_connections') if api_version == '2021-10-01': from", "OperationClass elif api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import DatastoresOperations as OperationClass elif api_version", "EnvironmentVersionsOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import EnvironmentVersionsOperations as OperationClass", "v1.0: :class:`AsyncOperationsOperations<azure.mgmt.machinelearningservices.registry_discovery.operations.AsyncOperationsOperations>` \"\"\" api_version = self._get_api_version('async_operations') if api_version == 'v1.0': from .registry_discovery.operations import", "raise ValueError(\"API version {} does not have operation group 'migration'\".format(api_version)) return OperationClass(self._client, self._config,", ".v2021_10_01.operations import ModelVersionsOperations as OperationClass elif api_version == '2021-10-01-dataplanepreview': from .v2021_10_01_dataplanepreview.operations import ModelVersionsOperations", "self._get_api_version('workspace_connections') if api_version == '2021-10-01': from .v2021_10_01.operations import WorkspaceConnectionsOperations as OperationClass elif api_version", "else: raise ValueError(\"API version {} does not have operation group 'data_version'\".format(api_version)) return OperationClass(self._client,", "'2022-02-01-preview': from .v2022_02_01_preview.operations import EnvironmentVersionsOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations", "if api_version == 'v1.0': from .registry_discovery.operations import AsyncOperationsOperations as OperationClass else: raise ValueError(\"API", "# Changes may cause incorrect behavior and will be lost if the code", "Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def compute(self): \"\"\"Instance depends on the API version: * 2021-10-01:", "from .v2022_02_01_preview.operations import DataVersionsOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import", "'events'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def experiments(self): \"\"\"Instance depends on the", "OperationClass else: raise ValueError(\"API version {} does not have operation group 'batch_deployments'\".format(api_version)) return", "from .v2021_10_01_dataplanepreview.operations import ComponentVersionsOperations as OperationClass elif api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import", "does not have operation group 'jobs'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def", "api_version = self._get_api_version('model_versions') if api_version == '2021-10-01': from .v2021_10_01.operations import ModelVersionsOperations as OperationClass", "ModelContainersOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import ModelContainersOperations as OperationClass", ".v2022_05_01.operations import Operations as OperationClass else: raise ValueError(\"API version {} does not have", "dataset_controller_v2(self): \"\"\"Instance depends on the API version: * 1.5.0: :class:`DatasetControllerV2Operations<azure.mgmt.machinelearningservices.dataset_dataplane.operations.DatasetControllerV2Operations>` \"\"\" api_version =", "on the API version: * v1.0: :class:`RunOperations<azure.mgmt.machinelearningservices.runhistory.operations.RunOperations>` \"\"\" api_version = self._get_api_version('run') if api_version", "api_version == '2022-01-01-preview': from .v2022_01_01_preview import models return models elif api_version == '2022-02-01-preview':", "Deserializer(self._models_dict(api_version))) @property def workspace_connections(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`WorkspaceConnectionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.WorkspaceConnectionsOperations>`", "License. See License.txt in the project root for # license information. # #", "from .runhistory.operations import SpansOperations as OperationClass else: raise ValueError(\"API version {} does not", "on the API version: * 2021-10-01: :class:`BatchDeploymentsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.BatchDeploymentsOperations>` * 2022-02-01-preview: :class:`BatchDeploymentsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.BatchDeploymentsOperations>` * 2022-05-01: :class:`BatchDeploymentsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.BatchDeploymentsOperations>`", "OperationClass else: raise ValueError(\"API version {} does not have operation group 'migration'\".format(api_version)) return", ":class:`UsagesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.UsagesOperations>` * 2022-05-01: :class:`UsagesOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.UsagesOperations>` \"\"\" api_version = self._get_api_version('usages') if api_version == '2021-10-01': from", "elif api_version == '2021-10-01-dataplanepreview': from .v2021_10_01_dataplanepreview.operations import ModelContainersOperations as OperationClass elif api_version ==", "from .v2022_05_01.operations import DatastoresOperations as OperationClass else: raise ValueError(\"API version {} does not", ".v2021_10_01_dataplanepreview.operations import TemporaryDataReferencesOperations as OperationClass else: raise ValueError(\"API version {} does not have", "OperationClass elif api_version == '2022-01-01-preview': from .v2022_01_01_preview.operations import QuotasOperations as OperationClass elif api_version", "\"\"\" api_version = self._get_api_version('runs') if api_version == 'v1.0': from .runhistory.operations import RunsOperations as", "PrivateEndpointConnectionsOperations as OperationClass else: raise ValueError(\"API version {} does not have operation group", "Deserializer(self._models_dict(api_version))) @property def private_link_resources(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`PrivateLinkResourcesOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.PrivateLinkResourcesOperations>`", "{} does not have operation group 'assets'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property", "as OperationClass else: raise ValueError(\"API version {} does not have operation group 'quotas'\".format(api_version))", ":class:`EnvironmentVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.EnvironmentVersionsOperations>` * 2021-10-01-dataplanepreview: :class:`EnvironmentVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.EnvironmentVersionsOperations>` * 2022-02-01-preview: :class:`EnvironmentVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.EnvironmentVersionsOperations>` * 2022-05-01: :class:`EnvironmentVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.EnvironmentVersionsOperations>` \"\"\" api_version =", ":mod:`runhistory.models<azure.mgmt.machinelearningservices.runhistory.models>` * 2020-09-01-dataplanepreview: :mod:`v2020_09_01_dataplanepreview.models<azure.mgmt.machinelearningservices.v2020_09_01_dataplanepreview.models>` * 2021-10-01: :mod:`v2021_10_01.models<azure.mgmt.machinelearningservices.v2021_10_01.models>` * 2021-10-01-dataplanepreview: :mod:`v2021_10_01_dataplanepreview.models<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.models>` * 2022-01-01-preview: :mod:`v2022_01_01_preview.models<azure.mgmt.machinelearningservices.v2022_01_01_preview.models>`", "operation group 'code_versions'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def component_containers(self): \"\"\"Instance depends", "== '2022-01-01-preview': from .v2022_01_01_preview.operations import PrivateEndpointConnectionsOperations as OperationClass elif api_version == '2022-05-01': from", "as OperationClass else: raise ValueError(\"API version {} does not have operation group 'data_containers'\".format(api_version))", "depends on the API version: * v1.0: :class:`RunArtifactsOperations<azure.mgmt.machinelearningservices.runhistory.operations.RunArtifactsOperations>` \"\"\" api_version = self._get_api_version('run_artifacts') if", "{} does not have operation group 'datastores'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property", "The profile sets a mapping between an operation group and its API version.", "import TemporaryDataReferencesOperations as OperationClass else: raise ValueError(\"API version {} does not have operation", "run(self): \"\"\"Instance depends on the API version: * v1.0: :class:`RunOperations<azure.mgmt.machinelearningservices.runhistory.operations.RunOperations>` \"\"\" api_version =", ".runhistory.operations import RunArtifactsOperations as OperationClass else: raise ValueError(\"API version {} does not have", "\"\"\" api_version = self._get_api_version('data_call') if api_version == '1.5.0': from .dataset_dataplane.operations import DataCallOperations as", "k, v in cls.models(api_version).__dict__.items() if isinstance(v, type)} @classmethod def models(cls, api_version=DEFAULT_API_VERSION): \"\"\"Module depends", "API version: * v1.0: :class:`RunArtifactsOperations<azure.mgmt.machinelearningservices.runhistory.operations.RunArtifactsOperations>` \"\"\" api_version = self._get_api_version('run_artifacts') if api_version == 'v1.0':", "== 'v1.0': from .runhistory.operations import DeleteOperations as OperationClass else: raise ValueError(\"API version {}", ".registry_discovery.operations import AsyncOperationsOperations as OperationClass else: raise ValueError(\"API version {} does not have", "CodeContainersOperations as OperationClass elif api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import CodeContainersOperations as OperationClass", "from .v2021_10_01.operations import CodeVersionsOperations as OperationClass elif api_version == '2021-10-01-dataplanepreview': from .v2021_10_01_dataplanepreview.operations import", "from .v2021_10_01.operations import WorkspacesOperations as OperationClass elif api_version == '2022-01-01-preview': from .v2022_01_01_preview.operations import", ":class:`DataContainerOperations<azure.mgmt.machinelearningservices.dataset_dataplane.operations.DataContainerOperations>` \"\"\" api_version = self._get_api_version('data_container') if api_version == '1.5.0': from .dataset_dataplane.operations import DataContainerOperations", "EnvironmentVersionsOperations as OperationClass else: raise ValueError(\"API version {} does not have operation group", "API version: * v1.0: :class:`ExperimentsOperations<azure.mgmt.machinelearningservices.runhistory.operations.ExperimentsOperations>` \"\"\" api_version = self._get_api_version('experiments') if api_version == 'v1.0':", "elif api_version == '2021-10-01-dataplanepreview': from .v2021_10_01_dataplanepreview.operations import CodeVersionsOperations as OperationClass elif api_version ==", "== 'v1.0': from .runhistory.operations import EventsOperations as OperationClass else: raise ValueError(\"API version {}", "self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def data_containers(self): \"\"\"Instance depends on the API version: *", "two polls for LRO operations if no Retry-After header is present. \"\"\" DEFAULT_API_VERSION", "on the API version: * v1.0: :class:`AsyncOperationsOperations<azure.mgmt.machinelearningservices.registry_discovery.operations.AsyncOperationsOperations>` \"\"\" api_version = self._get_api_version('async_operations') if api_version", "* 2021-10-01: :class:`CodeContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.CodeContainersOperations>` * 2021-10-01-dataplanepreview: :class:`CodeContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.CodeContainersOperations>` * 2022-02-01-preview: :class:`CodeContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.CodeContainersOperations>` * 2022-05-01: :class:`CodeContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.CodeContainersOperations>` \"\"\"", "group 'datasets_v1'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def datastores(self): \"\"\"Instance depends on", "OperationClass else: raise ValueError(\"API version {} does not have operation group 'operations'\".format(api_version)) return", "subscription_id, # type: str api_version=None, # type: Optional[str] base_url=\"https://management.azure.com\", # type: str profile=KnownProfiles.default,", "not have operation group 'run_artifacts'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def runs(self):", "api_version == '2022-05-01': from .v2022_05_01.operations import OnlineEndpointsOperations as OperationClass else: raise ValueError(\"API version", "== '2021-10-01-dataplanepreview': from .v2021_10_01_dataplanepreview import models return models elif api_version == '2022-01-01-preview': from", "client to connect to Azure. :type credential: ~azure.core.credentials.TokenCredential :param subscription_id: The ID of", "v1.0: :class:`DeleteOperations<azure.mgmt.machinelearningservices.runhistory.operations.DeleteOperations>` \"\"\" api_version = self._get_api_version('delete') if api_version == '1.5.0': from .dataset_dataplane.operations import", "super(AzureMachineLearningWorkspaces, self).__init__( api_version=api_version, profile=profile ) @classmethod def _models_dict(cls, api_version): return {k: v for", "2022-02-01-preview: :class:`OnlineEndpointsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.OnlineEndpointsOperations>` * 2022-05-01: :class:`OnlineEndpointsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.OnlineEndpointsOperations>` \"\"\" api_version = self._get_api_version('online_endpoints') if api_version == '2021-10-01':", "not have operation group 'virtual_machine_sizes'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def workspace_connections(self):", "{} does not have operation group 'batch_deployments'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property", "if api_version == '2021-10-01': from .v2021_10_01.operations import QuotasOperations as OperationClass elif api_version ==", "have operation group 'virtual_machine_sizes'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def workspace_connections(self): \"\"\"Instance", "By default, it uses the latest API version available on public Azure. For", "'v1.0': from .runhistory.operations import RunOperations as OperationClass else: raise ValueError(\"API version {} does", "self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def run(self): \"\"\"Instance depends on the API version: *", "version: * 2021-10-01: :class:`BatchDeploymentsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.BatchDeploymentsOperations>` * 2022-02-01-preview: :class:`BatchDeploymentsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.BatchDeploymentsOperations>` * 2022-05-01: :class:`BatchDeploymentsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.BatchDeploymentsOperations>` \"\"\" api_version =", "'2021-10-01': from .v2021_10_01.operations import EnvironmentVersionsOperations as OperationClass elif api_version == '2021-10-01-dataplanepreview': from .v2021_10_01_dataplanepreview.operations", "operation group 'private_link_resources'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def quotas(self): \"\"\"Instance depends", "as OperationClass else: raise ValueError(\"API version {} does not have operation group 'workspaces'\".format(api_version))", "from .v2021_10_01.operations import OnlineDeploymentsOperations as OperationClass elif api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import", "== '2022-02-01-preview': from .v2022_02_01_preview.operations import BatchDeploymentsOperations as OperationClass elif api_version == '2022-05-01': from", "Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def data_containers(self): \"\"\"Instance depends on the API version: * 2022-02-01-preview:", "if api_version == '2021-10-01': from .v2021_10_01.operations import CodeVersionsOperations as OperationClass elif api_version ==", "type: Any ): self._config = AzureMachineLearningWorkspacesConfiguration(credential, subscription_id, **kwargs) self._client = ARMPipelineClient(base_url=base_url, config=self._config, **kwargs)", "self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def operations(self): \"\"\"Instance depends on the API version: *", "def operations(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`Operations<azure.mgmt.machinelearningservices.v2021_10_01.operations.Operations>` * 2022-01-01-preview:", "OperationClass else: raise ValueError(\"API version {} does not have operation group 'component_versions'\".format(api_version)) return", "{} does not have operation group 'dataset_versions'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property", "as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import OnlineEndpointsOperations as OperationClass else:", "models elif api_version == '2022-02-01-preview': from .v2022_02_01_preview import models return models elif api_version", "'2022-05-01': from .v2022_05_01.operations import BatchDeploymentsOperations as OperationClass else: raise ValueError(\"API version {} does", "2021-10-01: :class:`QuotasOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.QuotasOperations>` * 2022-01-01-preview: :class:`QuotasOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.QuotasOperations>` * 2022-05-01: :class:`QuotasOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.QuotasOperations>` \"\"\" api_version = self._get_api_version('quotas') if", "ValueError(\"API version {} does not have operation group 'batch_deployments'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)),", "'online_deployments'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def online_endpoints(self): \"\"\"Instance depends on the", "depends on the API version: * 2021-10-01: :class:`WorkspacesOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.WorkspacesOperations>` * 2022-01-01-preview: :class:`WorkspacesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.WorkspacesOperations>` * 2022-05-01:", "from .v2021_10_01.operations import ComponentContainersOperations as OperationClass elif api_version == '2021-10-01-dataplanepreview': from .v2021_10_01_dataplanepreview.operations import", "raise ValueError(\"API version {} does not have operation group 'code_versions'\".format(api_version)) return OperationClass(self._client, self._config,", "API version: * v1.0: :class:`SpansOperations<azure.mgmt.machinelearningservices.runhistory.operations.SpansOperations>` \"\"\" api_version = self._get_api_version('spans') if api_version == 'v1.0':", "* 2022-05-01: :class:`WorkspaceConnectionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.WorkspaceConnectionsOperations>` \"\"\" api_version = self._get_api_version('workspace_connections') if api_version == '2021-10-01': from .v2021_10_01.operations", "depends on the API version: * 2021-10-01: :class:`WorkspaceConnectionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.WorkspaceConnectionsOperations>` * 2022-01-01-preview: :class:`WorkspaceConnectionsOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.WorkspaceConnectionsOperations>` * 2022-05-01:", "self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def runs(self): \"\"\"Instance depends on the API version: *", "from azure.mgmt.core import ARMPipelineClient from azure.profiles import KnownProfiles, ProfileDefinition from azure.profiles.multiapiclient import MultiApiClientMixin", "from .v2022_05_01.operations import OnlineEndpointsOperations as OperationClass else: raise ValueError(\"API version {} does not", "\"\"\"Instance depends on the API version: * v1.0: :class:`AsyncOperationsOperations<azure.mgmt.machinelearningservices.registry_discovery.operations.AsyncOperationsOperations>` \"\"\" api_version = self._get_api_version('async_operations')", "\"\"\" api_version = self._get_api_version('dataset_v2') if api_version == '1.5.0': from .dataset_dataplane.operations import DatasetV2Operations as", "\"\"\" api_version = self._get_api_version('workspaces') if api_version == '2021-10-01': from .v2021_10_01.operations import WorkspacesOperations as", "api_version == '2022-05-01': from .v2022_05_01.operations import ComputeOperations as OperationClass else: raise ValueError(\"API version", "ComponentContainersOperations as OperationClass elif api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import ComponentContainersOperations as OperationClass", "return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def data_containers(self): \"\"\"Instance depends on the API", ":class:`DataVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.DataVersionsOperations>` \"\"\" api_version = self._get_api_version('data_versions') if api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import DataVersionsOperations", "as OperationClass else: raise ValueError(\"API version {} does not have operation group 'temporary_data_references'\".format(api_version))", "api_version == '2022-05-01': from .v2022_05_01.operations import WorkspaceConnectionsOperations as OperationClass else: raise ValueError(\"API version", "2022-05-01: :class:`UsagesOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.UsagesOperations>` \"\"\" api_version = self._get_api_version('usages') if api_version == '2021-10-01': from .v2021_10_01.operations import", "ValueError(\"API version {} does not have operation group 'models'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)),", "def usages(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`UsagesOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.UsagesOperations>` * 2022-01-01-preview:", "import ModelVersionsOperations as OperationClass elif api_version == '2021-10-01-dataplanepreview': from .v2021_10_01_dataplanepreview.operations import ModelVersionsOperations as", "from .v2022_05_01.operations import ModelVersionsOperations as OperationClass else: raise ValueError(\"API version {} does not", "'2022-01-01-preview': from .v2022_01_01_preview import models return models elif api_version == '2022-02-01-preview': from .v2022_02_01_preview", "return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def component_containers(self): \"\"\"Instance depends on the API", "Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def runs(self): \"\"\"Instance depends on the API version: * v1.0:", "does not have operation group 'datastores'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def", "does not have operation group 'delete'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def", "Deserializer(self._models_dict(api_version))) @property def usages(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`UsagesOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.UsagesOperations>`", ".model_dataplane.operations import ExtensiveModelOperations as OperationClass else: raise ValueError(\"API version {} does not have", ":class:`Operations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.Operations>` * 2022-05-01: :class:`Operations<azure.mgmt.machinelearningservices.v2022_05_01.operations.Operations>` \"\"\" api_version = self._get_api_version('operations') if api_version == '2021-10-01': from", "as OperationClass else: raise ValueError(\"API version {} does not have operation group 'run'\".format(api_version))", "Deserializer(self._models_dict(api_version))) @property def experiments(self): \"\"\"Instance depends on the API version: * v1.0: :class:`ExperimentsOperations<azure.mgmt.machinelearningservices.runhistory.operations.ExperimentsOperations>`", "Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def migration(self): \"\"\"Instance depends on the API version: * 1.0.0:", "api_version == '2021-10-01': from .v2021_10_01.operations import QuotasOperations as OperationClass elif api_version == '2022-01-01-preview':", "the API version: * 2021-10-01: :class:`OnlineDeploymentsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.OnlineDeploymentsOperations>` * 2022-02-01-preview: :class:`OnlineDeploymentsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.OnlineDeploymentsOperations>` * 2022-05-01: :class:`OnlineDeploymentsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.OnlineDeploymentsOperations>` \"\"\"", "return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def usages(self): \"\"\"Instance depends on the API", "{} does not have operation group 'runs'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property", "* 2021-10-01: :class:`JobsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.JobsOperations>` * 2022-02-01-preview: :class:`JobsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.JobsOperations>` * 2022-05-01: :class:`JobsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.JobsOperations>` \"\"\" api_version = self._get_api_version('jobs')", "= self._get_api_version('datastores') if api_version == '2021-10-01': from .v2021_10_01.operations import DatastoresOperations as OperationClass elif", "ValueError(\"API version {} does not have operation group 'component_containers'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)),", "self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def dataset_versions(self): \"\"\"Instance depends on the API version: *", ".dataset_dataplane.operations import DatasetControllerV2Operations as OperationClass else: raise ValueError(\"API version {} does not have", "Deserializer(self._models_dict(api_version))) @property def run(self): \"\"\"Instance depends on the API version: * v1.0: :class:`RunOperations<azure.mgmt.machinelearningservices.runhistory.operations.RunOperations>`", "OperationClass elif api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import CodeContainersOperations as OperationClass elif api_version", "# type: Any ): self._config = AzureMachineLearningWorkspacesConfiguration(credential, subscription_id, **kwargs) self._client = ARMPipelineClient(base_url=base_url, config=self._config,", "API version: * 2021-10-01: :class:`BatchEndpointsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.BatchEndpointsOperations>` * 2022-02-01-preview: :class:`BatchEndpointsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.BatchEndpointsOperations>` * 2022-05-01: :class:`BatchEndpointsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.BatchEndpointsOperations>` \"\"\" api_version", "as OperationClass else: raise ValueError(\"API version {} does not have operation group 'compute'\".format(api_version))", "may cause incorrect behavior and will be lost if the code is #", "api_version = self._get_api_version('data_call') if api_version == '1.5.0': from .dataset_dataplane.operations import DataCallOperations as OperationClass", "'2021-10-01': from .v2021_10_01.operations import ModelContainersOperations as OperationClass elif api_version == '2021-10-01-dataplanepreview': from .v2021_10_01_dataplanepreview.operations", "not have operation group 'operations'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def private_endpoint_connections(self):", "version: * 2021-10-01: :class:`PrivateLinkResourcesOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.PrivateLinkResourcesOperations>` * 2022-01-01-preview: :class:`PrivateLinkResourcesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.PrivateLinkResourcesOperations>` * 2022-05-01: :class:`PrivateLinkResourcesOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.PrivateLinkResourcesOperations>` \"\"\" api_version =", "{} does not have operation group 'environment_versions'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property", "version: * v1.0: :class:`RegistryManagementNonWorkspaceOperations<azure.mgmt.machinelearningservices.registry_discovery.operations.RegistryManagementNonWorkspaceOperations>` \"\"\" api_version = self._get_api_version('registry_management_non_workspace') if api_version == 'v1.0': from", "'v1.0', 'batch_job_deployment': '2020-09-01-dataplanepreview', 'batch_job_endpoint': '2020-09-01-dataplanepreview', 'data_call': '1.5.0', 'data_container': '1.5.0', 'data_version': '1.5.0', 'dataset_containers': '2021-10-01',", "1.0.0: :class:`AssetsOperations<azure.mgmt.machinelearningservices.model_dataplane.operations.AssetsOperations>` \"\"\" api_version = self._get_api_version('assets') if api_version == '1.0.0': from .model_dataplane.operations import", "elif api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import BatchDeploymentsOperations as OperationClass elif api_version ==", "version {} does not have operation group 'batch_deployments'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))", "@property def model_versions(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`ModelVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.ModelVersionsOperations>` *", "elif api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import ComponentContainersOperations as OperationClass elif api_version ==", "as OperationClass else: raise ValueError(\"API version {} does not have operation group 'model_versions'\".format(api_version))", "== '2022-05-01': from .v2022_05_01.operations import BatchEndpointsOperations as OperationClass else: raise ValueError(\"API version {}", "raise ValueError(\"API version {} does not have operation group 'delete'\".format(api_version)) return OperationClass(self._client, self._config,", "version: * 2021-10-01: :class:`ModelVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.ModelVersionsOperations>` * 2021-10-01-dataplanepreview: :class:`ModelVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.ModelVersionsOperations>` * 2022-02-01-preview: :class:`ModelVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.ModelVersionsOperations>` * 2022-05-01: :class:`ModelVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.ModelVersionsOperations>`", ":class:`PrivateEndpointConnectionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.PrivateEndpointConnectionsOperations>` \"\"\" api_version = self._get_api_version('private_endpoint_connections') if api_version == '2021-10-01': from .v2021_10_01.operations import PrivateEndpointConnectionsOperations", "from .v2022_02_01_preview.operations import OnlineDeploymentsOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import", "OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def jobs(self): \"\"\"Instance depends on the API version:", "default, it uses the latest API version available on public Azure. For production,", "'2021-10-01': from .v2021_10_01.operations import ComponentVersionsOperations as OperationClass elif api_version == '2021-10-01-dataplanepreview': from .v2021_10_01_dataplanepreview.operations", "depends on the API version: * v1.0: :class:`RunOperations<azure.mgmt.machinelearningservices.runhistory.operations.RunOperations>` \"\"\" api_version = self._get_api_version('run') if", "from typing import TYPE_CHECKING from azure.mgmt.core import ARMPipelineClient from azure.profiles import KnownProfiles, ProfileDefinition", "elif api_version == '2022-05-01': from .v2022_05_01.operations import BatchEndpointsOperations as OperationClass else: raise ValueError(\"API", ":class:`DataVersionOperations<azure.mgmt.machinelearningservices.dataset_dataplane.operations.DataVersionOperations>` \"\"\" api_version = self._get_api_version('data_version') if api_version == '1.5.0': from .dataset_dataplane.operations import DataVersionOperations", "DatasetVersionsOperations as OperationClass else: raise ValueError(\"API version {} does not have operation group", "elif api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import ModelVersionsOperations as OperationClass elif api_version ==", "return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def online_endpoints(self): \"\"\"Instance depends on the API", "ProfileDefinition from azure.profiles.multiapiclient import MultiApiClientMixin from msrest import Deserializer, Serializer from ._configuration import", "self._get_api_version('component_containers') if api_version == '2021-10-01': from .v2021_10_01.operations import ComponentContainersOperations as OperationClass elif api_version", ":class:`EventsOperations<azure.mgmt.machinelearningservices.runhistory.operations.EventsOperations>` \"\"\" api_version = self._get_api_version('events') if api_version == 'v1.0': from .runhistory.operations import EventsOperations", "group 'jobs'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def metric(self): \"\"\"Instance depends on", "not available\".format(api_version)) @property def assets(self): \"\"\"Instance depends on the API version: * 1.0.0:", "Default waiting time between two polls for LRO operations if no Retry-After header", "def datasets_v1(self): \"\"\"Instance depends on the API version: * 1.5.0: :class:`DatasetsV1Operations<azure.mgmt.machinelearningservices.dataset_dataplane.operations.DatasetsV1Operations>` \"\"\" api_version", "A profile definition, from KnownProfiles to dict. :type profile: azure.profiles.KnownProfiles :keyword int polling_interval:", "'2022-05-01': from .v2022_05_01.operations import CodeContainersOperations as OperationClass else: raise ValueError(\"API version {} does", "== '2022-05-01': from .v2022_05_01.operations import PrivateLinkResourcesOperations as OperationClass else: raise ValueError(\"API version {}", "not have operation group 'dataset_containers'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def dataset_controller_v2(self):", "self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def registry_management_non_workspace(self): \"\"\"Instance depends on the API version: *", "@property def environment_versions(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`EnvironmentVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.EnvironmentVersionsOperations>` *", "* 2022-01-01-preview: :class:`UsagesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.UsagesOperations>` * 2022-05-01: :class:`UsagesOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.UsagesOperations>` \"\"\" api_version = self._get_api_version('usages') if api_version ==", "== '2022-02-01-preview': from .v2022_02_01_preview.operations import JobsOperations as OperationClass elif api_version == '2022-05-01': from", "= self._get_api_version('data_versions') if api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import DataVersionsOperations as OperationClass elif", "OperationClass else: raise ValueError(\"API version {} does not have operation group 'compute'\".format(api_version)) return", "elif api_version == '2022-05-01': from .v2022_05_01.operations import JobsOperations as OperationClass else: raise ValueError(\"API", "api_version == 'v1.0': from .runhistory.operations import MetricOperations as OperationClass else: raise ValueError(\"API version", "@property def dataset_v2(self): \"\"\"Instance depends on the API version: * 1.5.0: :class:`DatasetV2Operations<azure.mgmt.machinelearningservices.dataset_dataplane.operations.DatasetV2Operations>` \"\"\"", "api_version = self._get_api_version('registry_management_non_workspace') if api_version == 'v1.0': from .registry_discovery.operations import RegistryManagementNonWorkspaceOperations as OperationClass", "API version: * v1.0: :class:`RunsOperations<azure.mgmt.machinelearningservices.runhistory.operations.RunsOperations>` \"\"\" api_version = self._get_api_version('runs') if api_version == 'v1.0':", "== '2021-10-01': from .v2021_10_01.operations import DatastoresOperations as OperationClass elif api_version == '2022-02-01-preview': from", "Deserializer(self._models_dict(api_version))) @property def migration(self): \"\"\"Instance depends on the API version: * 1.0.0: :class:`MigrationOperations<azure.mgmt.machinelearningservices.model_dataplane.operations.MigrationOperations>`", "workspace_features(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`WorkspaceFeaturesOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.WorkspaceFeaturesOperations>` * 2022-01-01-preview: :class:`WorkspaceFeaturesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.WorkspaceFeaturesOperations>`", "group 'data_versions'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def dataset_containers(self): \"\"\"Instance depends on", "{} does not have operation group 'delete'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property", "Deserializer, Serializer from ._configuration import AzureMachineLearningWorkspacesConfiguration if TYPE_CHECKING: # pylint: disable=unused-import,ungrouped-imports from typing", "have operation group 'code_versions'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def component_containers(self): \"\"\"Instance", "else: raise ValueError(\"API version {} does not have operation group 'online_endpoints'\".format(api_version)) return OperationClass(self._client,", "v1.0: :class:`EventsOperations<azure.mgmt.machinelearningservices.runhistory.operations.EventsOperations>` \"\"\" api_version = self._get_api_version('events') if api_version == 'v1.0': from .runhistory.operations import", "if api_version == '1.5.0': from .dataset_dataplane.operations import DataContainerOperations as OperationClass else: raise ValueError(\"API", ":class:`ModelsOperations<azure.mgmt.machinelearningservices.model_dataplane.operations.ModelsOperations>` \"\"\" api_version = self._get_api_version('models') if api_version == '1.0.0': from .model_dataplane.operations import ModelsOperations", "\"\"\"Instance depends on the API version: * v1.0: :class:`RunArtifactsOperations<azure.mgmt.machinelearningservices.runhistory.operations.RunArtifactsOperations>` \"\"\" api_version = self._get_api_version('run_artifacts')", "api_version = self._get_api_version('extensive_model') if api_version == '1.0.0': from .model_dataplane.operations import ExtensiveModelOperations as OperationClass", "as OperationClass else: raise ValueError(\"API version {} does not have operation group 'data_version'\".format(api_version))", "Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def model_versions(self): \"\"\"Instance depends on the API version: * 2021-10-01:", "@property def code_containers(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`CodeContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.CodeContainersOperations>` *", "depends on the API version: * v1.0: :class:`RegistryManagementNonWorkspaceOperations<azure.mgmt.machinelearningservices.registry_discovery.operations.RegistryManagementNonWorkspaceOperations>` \"\"\" api_version = self._get_api_version('registry_management_non_workspace') if", "* 2022-05-01: :class:`JobsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.JobsOperations>` \"\"\" api_version = self._get_api_version('jobs') if api_version == '2021-10-01': from .v2021_10_01.operations", "\"\"\" api_version = self._get_api_version('data_versions') if api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import DataVersionsOperations as", "operation group 'online_endpoints'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def operations(self): \"\"\"Instance depends", "'2022-05-01': from .v2022_05_01.operations import PrivateLinkResourcesOperations as OperationClass else: raise ValueError(\"API version {} does", "OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def private_link_resources(self): \"\"\"Instance depends on the API version:", "under the MIT License. See License.txt in the project root for # license", "as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import EnvironmentVersionsOperations as OperationClass else:", "api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import CodeVersionsOperations as OperationClass elif api_version == '2022-05-01':", "version {} does not have operation group 'code_versions'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))", ".v2021_10_01.operations import WorkspaceFeaturesOperations as OperationClass elif api_version == '2022-01-01-preview': from .v2022_01_01_preview.operations import WorkspaceFeaturesOperations", "Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def online_deployments(self): \"\"\"Instance depends on the API version: * 2021-10-01:", ".v2022_01_01_preview.operations import WorkspaceFeaturesOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import WorkspaceFeaturesOperations", "operation group is not described in the profile. :param credential: Credential needed for", "2022-02-01-preview: :class:`CodeVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.CodeVersionsOperations>` * 2022-05-01: :class:`CodeVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.CodeVersionsOperations>` \"\"\" api_version = self._get_api_version('code_versions') if api_version == '2021-10-01':", "{ None: DEFAULT_API_VERSION, 'assets': '1.0.0', 'async_operations': 'v1.0', 'batch_job_deployment': '2020-09-01-dataplanepreview', 'batch_job_endpoint': '2020-09-01-dataplanepreview', 'data_call': '1.5.0',", "api_version == 'v1.0': from .runhistory import models return models elif api_version == '2020-09-01-dataplanepreview':", "Deserializer(self._models_dict(api_version))) @property def data_version(self): \"\"\"Instance depends on the API version: * 1.5.0: :class:`DataVersionOperations<azure.mgmt.machinelearningservices.dataset_dataplane.operations.DataVersionOperations>`", "Learning Workspace resources. This ready contains multiple API versions, to help you deal", "{} does not have operation group 'dataset_containers'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property", "the API version: * 2021-10-01: :class:`ModelContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.ModelContainersOperations>` * 2021-10-01-dataplanepreview: :class:`ModelContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.ModelContainersOperations>` * 2022-02-01-preview: :class:`ModelContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.ModelContainersOperations>` *", "import CodeVersionsOperations as OperationClass elif api_version == '2021-10-01-dataplanepreview': from .v2021_10_01_dataplanepreview.operations import CodeVersionsOperations as", "elif api_version == '2022-05-01': from .v2022_05_01.operations import CodeVersionsOperations as OperationClass else: raise ValueError(\"API", "on the API version: * 1.5.0: :class:`DatasetsV1Operations<azure.mgmt.machinelearningservices.dataset_dataplane.operations.DatasetsV1Operations>` \"\"\" api_version = self._get_api_version('datasets_v1') if api_version", "version {} does not have operation group 'events'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))", "lost if the code is # regenerated. # -------------------------------------------------------------------------- from typing import TYPE_CHECKING", "as OperationClass else: raise ValueError(\"API version {} does not have operation group 'usages'\".format(api_version))", "RunOperations as OperationClass else: raise ValueError(\"API version {} does not have operation group", "def workspaces(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`WorkspacesOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.WorkspacesOperations>` * 2022-01-01-preview:", "OperationClass else: raise ValueError(\"API version {} does not have operation group 'delete'\".format(api_version)) return", "raise ValueError(\"API version {} does not have operation group 'workspace_features'\".format(api_version)) return OperationClass(self._client, self._config,", "else: raise ValueError(\"API version {} does not have operation group 'code_versions'\".format(api_version)) return OperationClass(self._client,", "temporary_data_references(self): \"\"\"Instance depends on the API version: * 2021-10-01-dataplanepreview: :class:`TemporaryDataReferencesOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.TemporaryDataReferencesOperations>` \"\"\" api_version =", "Deserializer(self._models_dict(api_version))) @property def code_containers(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`CodeContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.CodeContainersOperations>`", "OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def get_operation_status(self): \"\"\"Instance depends on the API version:", "2022-01-01-preview: :mod:`v2022_01_01_preview.models<azure.mgmt.machinelearningservices.v2022_01_01_preview.models>` * 2022-02-01-preview: :mod:`v2022_02_01_preview.models<azure.mgmt.machinelearningservices.v2022_02_01_preview.models>` * 2022-05-01: :mod:`v2022_05_01.models<azure.mgmt.machinelearningservices.v2022_05_01.models>` \"\"\" if api_version == '1.5.0':", "return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def model_containers(self): \"\"\"Instance depends on the API", "from .dataset_dataplane import models return models elif api_version == '1.0.0': from .model_dataplane import", "DataContainersOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import DataContainersOperations as OperationClass", "== '2020-09-01-dataplanepreview': from .v2020_09_01_dataplanepreview import models return models elif api_version == '2021-10-01': from", "== '2021-10-01': from .v2021_10_01.operations import JobsOperations as OperationClass elif api_version == '2022-02-01-preview': from", "* 2022-01-01-preview: :class:`PrivateLinkResourcesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.PrivateLinkResourcesOperations>` * 2022-05-01: :class:`PrivateLinkResourcesOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.PrivateLinkResourcesOperations>` \"\"\" api_version = self._get_api_version('private_link_resources') if api_version ==", "is provided, or if missing in profile. :type api_version: str :param base_url: Service", "OperationClass elif api_version == '2021-10-01-dataplanepreview': from .v2021_10_01_dataplanepreview.operations import EnvironmentContainersOperations as OperationClass elif api_version", "* 1.0.0: :class:`MigrationOperations<azure.mgmt.machinelearningservices.model_dataplane.operations.MigrationOperations>` \"\"\" api_version = self._get_api_version('migration') if api_version == '1.0.0': from .model_dataplane.operations", "return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def dataset_v2(self): \"\"\"Instance depends on the API", "else: raise ValueError(\"API version {} does not have operation group 'metric'\".format(api_version)) return OperationClass(self._client,", "'2022-05-01': from .v2022_05_01.operations import ModelContainersOperations as OperationClass else: raise ValueError(\"API version {} does", "'1.5.0', 'data_container': '1.5.0', 'data_version': '1.5.0', 'dataset_containers': '2021-10-01', 'dataset_controller_v2': '1.5.0', 'dataset_v2': '1.5.0', 'dataset_versions': '2021-10-01',", "not have operation group 'online_deployments'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def online_endpoints(self):", "* 2021-10-01: :class:`CodeVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.CodeVersionsOperations>` * 2021-10-01-dataplanepreview: :class:`CodeVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.CodeVersionsOperations>` * 2022-02-01-preview: :class:`CodeVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.CodeVersionsOperations>` * 2022-05-01: :class:`CodeVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.CodeVersionsOperations>` \"\"\"", "models return models elif api_version == '2022-05-01': from .v2022_05_01 import models return models", ".v2022_01_01_preview.operations import VirtualMachineSizesOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import VirtualMachineSizesOperations", "# type: str api_version=None, # type: Optional[str] base_url=\"https://management.azure.com\", # type: str profile=KnownProfiles.default, #", "'batch_job_deployment': '2020-09-01-dataplanepreview', 'batch_job_endpoint': '2020-09-01-dataplanepreview', 'data_call': '1.5.0', 'data_container': '1.5.0', 'data_version': '1.5.0', 'dataset_containers': '2021-10-01', 'dataset_controller_v2':", "'environment_containers'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def environment_versions(self): \"\"\"Instance depends on the", "\"\"\" api_version = self._get_api_version('online_deployments') if api_version == '2021-10-01': from .v2021_10_01.operations import OnlineDeploymentsOperations as", "= self._get_api_version('workspace_features') if api_version == '2021-10-01': from .v2021_10_01.operations import WorkspaceFeaturesOperations as OperationClass elif", ":class:`OnlineDeploymentsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.OnlineDeploymentsOperations>` * 2022-02-01-preview: :class:`OnlineDeploymentsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.OnlineDeploymentsOperations>` * 2022-05-01: :class:`OnlineDeploymentsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.OnlineDeploymentsOperations>` \"\"\" api_version = self._get_api_version('online_deployments') if api_version", "elif api_version == '2022-01-01-preview': from .v2022_01_01_preview.operations import WorkspaceConnectionsOperations as OperationClass elif api_version ==", ".v2021_10_01_dataplanepreview.operations import CodeContainersOperations as OperationClass elif api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import CodeContainersOperations", "1.5.0: :class:`DataCallOperations<azure.mgmt.machinelearningservices.dataset_dataplane.operations.DataCallOperations>` \"\"\" api_version = self._get_api_version('data_call') if api_version == '1.5.0': from .dataset_dataplane.operations import", "models return models elif api_version == '2021-10-01': from .v2021_10_01 import models return models", "OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import PrivateEndpointConnectionsOperations as OperationClass else: raise", "2022-05-01: :class:`WorkspaceConnectionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.WorkspaceConnectionsOperations>` \"\"\" api_version = self._get_api_version('workspace_connections') if api_version == '2021-10-01': from .v2021_10_01.operations import", "'1.0.0': from .model_dataplane.operations import AssetsOperations as OperationClass else: raise ValueError(\"API version {} does", "OperationClass elif api_version == '2022-01-01-preview': from .v2022_01_01_preview.operations import WorkspaceConnectionsOperations as OperationClass elif api_version", "version {} does not have operation group 'run'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))", "group 'model_versions'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def models(self): \"\"\"Instance depends on", "ValueError(\"API version {} does not have operation group 'temporary_data_references'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)),", "depends on the API version: * 1.5.0: :mod:`dataset_dataplane.models<azure.mgmt.machinelearningservices.dataset_dataplane.models>` * 1.0.0: :mod:`model_dataplane.models<azure.mgmt.machinelearningservices.model_dataplane.models>` * v1.0:", "'usages'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def virtual_machine_sizes(self): \"\"\"Instance depends on the", "# license information. # # Code generated by Microsoft (R) AutoRest Code Generator.", ":mod:`v2021_10_01_dataplanepreview.models<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.models>` * 2022-01-01-preview: :mod:`v2022_01_01_preview.models<azure.mgmt.machinelearningservices.v2022_01_01_preview.models>` * 2022-02-01-preview: :mod:`v2022_02_01_preview.models<azure.mgmt.machinelearningservices.v2022_02_01_preview.models>` * 2022-05-01: :mod:`v2022_05_01.models<azure.mgmt.machinelearningservices.v2022_05_01.models>` \"\"\" if api_version", "elif api_version == '2021-10-01': from .v2021_10_01 import models return models elif api_version ==", "api_version = self._get_api_version('data_containers') if api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import DataContainersOperations as OperationClass", "ExtensiveModelOperations as OperationClass else: raise ValueError(\"API version {} does not have operation group", "api_version = self._get_api_version('private_link_resources') if api_version == '2021-10-01': from .v2021_10_01.operations import PrivateLinkResourcesOperations as OperationClass", "OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def batch_deployments(self): \"\"\"Instance depends on the API version:", "BatchEndpointsOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import BatchEndpointsOperations as OperationClass", "depends on the API version: * 1.5.0: :class:`DataCallOperations<azure.mgmt.machinelearningservices.dataset_dataplane.operations.DataCallOperations>` \"\"\" api_version = self._get_api_version('data_call') if", "have operation group 'experiments'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def extensive_model(self): \"\"\"Instance", "Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def get_operation_status(self): \"\"\"Instance depends on the API version: * 1.5.0:", "self).__init__( api_version=api_version, profile=profile ) @classmethod def _models_dict(cls, api_version): return {k: v for k,", "subscription_id, **kwargs) self._client = ARMPipelineClient(base_url=base_url, config=self._config, **kwargs) super(AzureMachineLearningWorkspaces, self).__init__( api_version=api_version, profile=profile ) @classmethod", ":class:`EnvironmentContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.EnvironmentContainersOperations>` * 2022-05-01: :class:`EnvironmentContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.EnvironmentContainersOperations>` \"\"\" api_version = self._get_api_version('environment_containers') if api_version == '2021-10-01': from", "self._client = ARMPipelineClient(base_url=base_url, config=self._config, **kwargs) super(AzureMachineLearningWorkspaces, self).__init__( api_version=api_version, profile=profile ) @classmethod def _models_dict(cls,", "on the API version: * 1.5.0: :mod:`dataset_dataplane.models<azure.mgmt.machinelearningservices.dataset_dataplane.models>` * 1.0.0: :mod:`model_dataplane.models<azure.mgmt.machinelearningservices.model_dataplane.models>` * v1.0: :mod:`registry_discovery.models<azure.mgmt.machinelearningservices.registry_discovery.models>`", "\"\"\"Instance depends on the API version: * 2021-10-01: :class:`QuotasOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.QuotasOperations>` * 2022-01-01-preview: :class:`QuotasOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.QuotasOperations>` *", "on the API version: * v1.0: :class:`ExperimentsOperations<azure.mgmt.machinelearningservices.runhistory.operations.ExperimentsOperations>` \"\"\" api_version = self._get_api_version('experiments') if api_version", "API version: * 1.5.0: :class:`DataContainerOperations<azure.mgmt.machinelearningservices.dataset_dataplane.operations.DataContainerOperations>` \"\"\" api_version = self._get_api_version('data_container') if api_version == '1.5.0':", "raise ValueError(\"API version {} does not have operation group 'environment_containers'\".format(api_version)) return OperationClass(self._client, self._config,", "AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost", "'2021-10-01-dataplanepreview': from .v2021_10_01_dataplanepreview.operations import CodeVersionsOperations as OperationClass elif api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations", ".registry_discovery.operations import RegistryManagementNonWorkspaceOperations as OperationClass else: raise ValueError(\"API version {} does not have", "OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def workspace_connections(self): \"\"\"Instance depends on the API version:", "api_version == '2022-01-01-preview': from .v2022_01_01_preview.operations import UsagesOperations as OperationClass elif api_version == '2022-05-01':", "from typing import Any, Optional from azure.core.credentials import TokenCredential class _SDKClient(object): def __init__(self,", "= self._get_api_version('compute') if api_version == '2021-10-01': from .v2021_10_01.operations import ComputeOperations as OperationClass elif", "== 'v1.0': from .runhistory.operations import RunArtifactsOperations as OperationClass else: raise ValueError(\"API version {}", "OperationClass else: raise ValueError(\"API version {} does not have operation group 'environment_containers'\".format(api_version)) return", "the profile. :param credential: Credential needed for the client to connect to Azure.", "str :param api_version: API version to use if no profile is provided, or", "{} does not have operation group 'spans'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property", "not have operation group 'datasets_v1'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def datastores(self):", "does not have operation group 'component_containers'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def", "operation group 'registry_management_non_workspace'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def run(self): \"\"\"Instance depends", "model_containers(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`ModelContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.ModelContainersOperations>` * 2021-10-01-dataplanepreview: :class:`ModelContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.ModelContainersOperations>`", "be lost if the code is # regenerated. # -------------------------------------------------------------------------- from typing import", "= self._get_api_version('workspaces') if api_version == '2021-10-01': from .v2021_10_01.operations import WorkspacesOperations as OperationClass elif", "the API version: * 2022-02-01-preview: :class:`DataVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.DataVersionsOperations>` * 2022-05-01: :class:`DataVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.DataVersionsOperations>` \"\"\" api_version = self._get_api_version('data_versions')", "* 2021-10-01: :class:`PrivateEndpointConnectionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.PrivateEndpointConnectionsOperations>` * 2022-01-01-preview: :class:`PrivateEndpointConnectionsOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.PrivateEndpointConnectionsOperations>` * 2022-05-01: :class:`PrivateEndpointConnectionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.PrivateEndpointConnectionsOperations>` \"\"\" api_version = self._get_api_version('private_endpoint_connections')", "'v1.0', 'experiments': 'v1.0', 'extensive_model': '1.0.0', 'get_operation_status': '1.5.0', 'metric': 'v1.0', 'migration': '1.0.0', 'models': '1.0.0',", "depends on the API version: * 2021-10-01: :class:`OnlineEndpointsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.OnlineEndpointsOperations>` * 2022-02-01-preview: :class:`OnlineEndpointsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.OnlineEndpointsOperations>` * 2022-05-01:", "help you deal with all of the Azure clouds (Azure Stack, Azure Government,", "raise ValueError(\"API version {} is not available\".format(api_version)) @property def assets(self): \"\"\"Instance depends on", "from .v2022_02_01_preview import models return models elif api_version == '2022-05-01': from .v2022_05_01 import", "{} does not have operation group 'compute'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property", "'2021-10-01': from .v2021_10_01.operations import WorkspaceConnectionsOperations as OperationClass elif api_version == '2022-01-01-preview': from .v2022_01_01_preview.operations", "'virtual_machine_sizes'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def workspace_connections(self): \"\"\"Instance depends on the", "return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def data_container(self): \"\"\"Instance depends on the API", "ValueError(\"API version {} does not have operation group 'code_containers'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)),", "api_version == '2022-05-01': from .v2022_05_01.operations import ComponentContainersOperations as OperationClass else: raise ValueError(\"API version", "operation group 'data_containers'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def data_version(self): \"\"\"Instance depends", "{} does not have operation group 'private_endpoint_connections'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property", "if api_version == '2021-10-01': from .v2021_10_01.operations import PrivateLinkResourcesOperations as OperationClass elif api_version ==", "if no profile is provided, or if missing in profile. :type api_version: str", "QuotasOperations as OperationClass elif api_version == '2022-01-01-preview': from .v2022_01_01_preview.operations import QuotasOperations as OperationClass", "OperationClass else: raise ValueError(\"API version {} does not have operation group 'metric'\".format(api_version)) return", ".v2021_10_01.operations import VirtualMachineSizesOperations as OperationClass elif api_version == '2022-01-01-preview': from .v2022_01_01_preview.operations import VirtualMachineSizesOperations", "not have operation group 'registry_management_non_workspace'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def run(self):", ":keyword int polling_interval: Default waiting time between two polls for LRO operations if", "'2022-02-01-preview': from .v2022_02_01_preview.operations import ModelContainersOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations", "from .v2022_02_01_preview.operations import BatchDeploymentsOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import", ".dataset_dataplane.operations import DataVersionOperations as OperationClass else: raise ValueError(\"API version {} does not have", "DatastoresOperations as OperationClass elif api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import DatastoresOperations as OperationClass", ".v2022_05_01.operations import CodeVersionsOperations as OperationClass else: raise ValueError(\"API version {} does not have", "WorkspaceConnectionsOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import WorkspaceConnectionsOperations as OperationClass", "'2021-10-01': from .v2021_10_01.operations import EnvironmentContainersOperations as OperationClass elif api_version == '2021-10-01-dataplanepreview': from .v2021_10_01_dataplanepreview.operations", ":type subscription_id: str :param api_version: API version to use if no profile is", "SpansOperations as OperationClass else: raise ValueError(\"API version {} does not have operation group", "{} does not have operation group 'async_operations'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property", "import models return models elif api_version == '2022-05-01': from .v2022_05_01 import models return", "operation group 'run_artifacts'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def runs(self): \"\"\"Instance depends", "as OperationClass elif api_version == '2021-10-01-dataplanepreview': from .v2021_10_01_dataplanepreview.operations import CodeVersionsOperations as OperationClass elif", ":mod:`v2020_09_01_dataplanepreview.models<azure.mgmt.machinelearningservices.v2020_09_01_dataplanepreview.models>` * 2021-10-01: :mod:`v2021_10_01.models<azure.mgmt.machinelearningservices.v2021_10_01.models>` * 2021-10-01-dataplanepreview: :mod:`v2021_10_01_dataplanepreview.models<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.models>` * 2022-01-01-preview: :mod:`v2022_01_01_preview.models<azure.mgmt.machinelearningservices.v2022_01_01_preview.models>` * 2022-02-01-preview: :mod:`v2022_02_01_preview.models<azure.mgmt.machinelearningservices.v2022_02_01_preview.models>`", "import ModelContainersOperations as OperationClass else: raise ValueError(\"API version {} does not have operation", "== 'v1.0': from .registry_discovery.operations import RegistryManagementNonWorkspaceOperations as OperationClass else: raise ValueError(\"API version {}", "version {} does not have operation group 'models'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))", "\"TokenCredential\" subscription_id, # type: str api_version=None, # type: Optional[str] base_url=\"https://management.azure.com\", # type: str", "ModelContainersOperations as OperationClass elif api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import ModelContainersOperations as OperationClass", ".runhistory.operations import EventsOperations as OperationClass else: raise ValueError(\"API version {} does not have", "between two polls for LRO operations if no Retry-After header is present. \"\"\"", ":type base_url: str :param profile: A profile definition, from KnownProfiles to dict. :type", "on the API version: * 2021-10-01: :class:`QuotasOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.QuotasOperations>` * 2022-01-01-preview: :class:`QuotasOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.QuotasOperations>` * 2022-05-01: :class:`QuotasOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.QuotasOperations>`", "* 2022-02-01-preview: :class:`CodeContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.CodeContainersOperations>` * 2022-05-01: :class:`CodeContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.CodeContainersOperations>` \"\"\" api_version = self._get_api_version('code_containers') if api_version ==", "\"\"\"Instance depends on the API version: * 1.0.0: :class:`ExtensiveModelOperations<azure.mgmt.machinelearningservices.model_dataplane.operations.ExtensiveModelOperations>` \"\"\" api_version = self._get_api_version('extensive_model')", ".v2022_05_01 import models return models raise ValueError(\"API version {} is not available\".format(api_version)) @property", "OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def data_container(self): \"\"\"Instance depends on the API version:", "PrivateEndpointConnectionsOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import PrivateEndpointConnectionsOperations as OperationClass", ".runhistory import models return models elif api_version == '2020-09-01-dataplanepreview': from .v2020_09_01_dataplanepreview import models", "2021-10-01: :class:`OnlineEndpointsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.OnlineEndpointsOperations>` * 2022-02-01-preview: :class:`OnlineEndpointsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.OnlineEndpointsOperations>` * 2022-05-01: :class:`OnlineEndpointsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.OnlineEndpointsOperations>` \"\"\" api_version = self._get_api_version('online_endpoints') if", "behavior and will be lost if the code is # regenerated. # --------------------------------------------------------------------------", "DataVersionOperations as OperationClass else: raise ValueError(\"API version {} does not have operation group", "uses the latest API version available on public Azure. For production, you should", "Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def dataset_v2(self): \"\"\"Instance depends on the API version: * 1.5.0:", "\"\"\"Instance depends on the API version: * 1.5.0: :class:`DataCallOperations<azure.mgmt.machinelearningservices.dataset_dataplane.operations.DataCallOperations>` \"\"\" api_version = self._get_api_version('data_call')", "== '2021-10-01': from .v2021_10_01.operations import EnvironmentContainersOperations as OperationClass elif api_version == '2021-10-01-dataplanepreview': from", "return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def async_operations(self): \"\"\"Instance depends on the API", "version: * 2021-10-01: :class:`EnvironmentContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.EnvironmentContainersOperations>` * 2021-10-01-dataplanepreview: :class:`EnvironmentContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.EnvironmentContainersOperations>` * 2022-02-01-preview: :class:`EnvironmentContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.EnvironmentContainersOperations>` * 2022-05-01: :class:`EnvironmentContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.EnvironmentContainersOperations>`", "API version: * 2021-10-01: :class:`ComputeOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.ComputeOperations>` * 2022-01-01-preview: :class:`ComputeOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.ComputeOperations>` * 2022-05-01: :class:`ComputeOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.ComputeOperations>` \"\"\" api_version", ".v2021_10_01_dataplanepreview.operations import ModelContainersOperations as OperationClass elif api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import ModelContainersOperations", "= self._get_api_version('virtual_machine_sizes') if api_version == '2021-10-01': from .v2021_10_01.operations import VirtualMachineSizesOperations as OperationClass elif", "'component_containers'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def component_versions(self): \"\"\"Instance depends on the", "'2022-05-01': from .v2022_05_01.operations import OnlineEndpointsOperations as OperationClass else: raise ValueError(\"API version {} does", "self._get_api_version('compute') if api_version == '2021-10-01': from .v2021_10_01.operations import ComputeOperations as OperationClass elif api_version", "{} does not have operation group 'virtual_machine_sizes'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property", "WorkspaceFeaturesOperations as OperationClass elif api_version == '2022-01-01-preview': from .v2022_01_01_preview.operations import WorkspaceFeaturesOperations as OperationClass", "Deserializer(self._models_dict(api_version))) @property def component_versions(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`ComponentVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.ComponentVersionsOperations>`", "'2021-10-01-dataplanepreview': from .v2021_10_01_dataplanepreview.operations import ComponentVersionsOperations as OperationClass elif api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations", "def code_containers(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`CodeContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.CodeContainersOperations>` * 2021-10-01-dataplanepreview:", "does not have operation group 'dataset_containers'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def", "{} does not have operation group 'operations'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property", ":class:`ModelVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.ModelVersionsOperations>` \"\"\" api_version = self._get_api_version('model_versions') if api_version == '2021-10-01': from .v2021_10_01.operations import ModelVersionsOperations", "ValueError(\"API version {} does not have operation group 'migration'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)),", "multiple API versions, to help you deal with all of the Azure clouds", "OperationClass else: raise ValueError(\"API version {} does not have operation group 'temporary_data_references'\".format(api_version)) return", "\"\"\"These APIs allow end users to operate on Azure Machine Learning Workspace resources.", ":class:`TemporaryDataReferencesOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.TemporaryDataReferencesOperations>` \"\"\" api_version = self._get_api_version('temporary_data_references') if api_version == '2021-10-01-dataplanepreview': from .v2021_10_01_dataplanepreview.operations import TemporaryDataReferencesOperations", "2022-01-01-preview: :class:`PrivateLinkResourcesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.PrivateLinkResourcesOperations>` * 2022-05-01: :class:`PrivateLinkResourcesOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.PrivateLinkResourcesOperations>` \"\"\" api_version = self._get_api_version('private_link_resources') if api_version == '2021-10-01':", "api_version = self._get_api_version('model_containers') if api_version == '2021-10-01': from .v2021_10_01.operations import ModelContainersOperations as OperationClass", "def workspace_features(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`WorkspaceFeaturesOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.WorkspaceFeaturesOperations>` * 2022-01-01-preview:", "OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import UsagesOperations as OperationClass else: raise", "version: * 1.5.0: :class:`DataCallOperations<azure.mgmt.machinelearningservices.dataset_dataplane.operations.DataCallOperations>` \"\"\" api_version = self._get_api_version('data_call') if api_version == '1.5.0': from", "ModelVersionsOperations as OperationClass else: raise ValueError(\"API version {} does not have operation group", "typing import TYPE_CHECKING from azure.mgmt.core import ARMPipelineClient from azure.profiles import KnownProfiles, ProfileDefinition from", "api-version parameter sets the default API version if the operation group is not", ".v2022_01_01_preview.operations import WorkspacesOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import WorkspacesOperations", "== 'v1.0': from .registry_discovery import models return models elif api_version == 'v1.0': from", "ValueError(\"API version {} does not have operation group 'data_containers'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)),", ".v2022_05_01.operations import CodeContainersOperations as OperationClass else: raise ValueError(\"API version {} does not have", "* 2022-02-01-preview: :class:`EnvironmentContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.EnvironmentContainersOperations>` * 2022-05-01: :class:`EnvironmentContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.EnvironmentContainersOperations>` \"\"\" api_version = self._get_api_version('environment_containers') if api_version ==", "if api_version == '2021-10-01': from .v2021_10_01.operations import JobsOperations as OperationClass elif api_version ==", "else: raise ValueError(\"API version {} does not have operation group 'quotas'\".format(api_version)) return OperationClass(self._client,", "'v1.0', 'extensive_model': '1.0.0', 'get_operation_status': '1.5.0', 'metric': 'v1.0', 'migration': '1.0.0', 'models': '1.0.0', 'registry_management_non_workspace': 'v1.0',", ".dataset_dataplane import models return models elif api_version == '1.0.0': from .model_dataplane import models", "OperationClass elif api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import JobsOperations as OperationClass elif api_version", "does not have operation group 'assets'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def", "'1.5.0': from .dataset_dataplane import models return models elif api_version == '1.0.0': from .model_dataplane", "2021-10-01: :class:`PrivateLinkResourcesOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.PrivateLinkResourcesOperations>` * 2022-01-01-preview: :class:`PrivateLinkResourcesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.PrivateLinkResourcesOperations>` * 2022-05-01: :class:`PrivateLinkResourcesOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.PrivateLinkResourcesOperations>` \"\"\" api_version = self._get_api_version('private_link_resources') if", "have operation group 'workspace_features'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def workspaces(self): \"\"\"Instance", "group 'get_operation_status'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def jobs(self): \"\"\"Instance depends on", "raise ValueError(\"API version {} does not have operation group 'usages'\".format(api_version)) return OperationClass(self._client, self._config,", "self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def data_versions(self): \"\"\"Instance depends on the API version: *", "2021-10-01-dataplanepreview: :class:`CodeContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.CodeContainersOperations>` * 2022-02-01-preview: :class:`CodeContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.CodeContainersOperations>` * 2022-05-01: :class:`CodeContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.CodeContainersOperations>` \"\"\" api_version = self._get_api_version('code_containers') if", ":class:`MetricOperations<azure.mgmt.machinelearningservices.runhistory.operations.MetricOperations>` \"\"\" api_version = self._get_api_version('metric') if api_version == 'v1.0': from .runhistory.operations import MetricOperations", "else: raise ValueError(\"API version {} does not have operation group 'virtual_machine_sizes'\".format(api_version)) return OperationClass(self._client,", "== '2021-10-01-dataplanepreview': from .v2021_10_01_dataplanepreview.operations import EnvironmentVersionsOperations as OperationClass elif api_version == '2022-02-01-preview': from", "**kwargs # type: Any ): self._config = AzureMachineLearningWorkspacesConfiguration(credential, subscription_id, **kwargs) self._client = ARMPipelineClient(base_url=base_url,", "api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import BatchDeploymentsOperations as OperationClass elif api_version == '2022-05-01':", "\"\"\"Instance depends on the API version: * 2022-02-01-preview: :class:`DataVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.DataVersionsOperations>` * 2022-05-01: :class:`DataVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.DataVersionsOperations>` \"\"\"", "= self._get_api_version('code_versions') if api_version == '2021-10-01': from .v2021_10_01.operations import CodeVersionsOperations as OperationClass elif", "version: * 2021-10-01: :class:`DatasetContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.DatasetContainersOperations>` \"\"\" api_version = self._get_api_version('dataset_containers') if api_version == '2021-10-01': from", "OperationClass else: raise ValueError(\"API version {} does not have operation group 'quotas'\".format(api_version)) return", "= self._get_api_version('assets') if api_version == '1.0.0': from .model_dataplane.operations import AssetsOperations as OperationClass else:", "api_version == '2022-05-01': from .v2022_05_01.operations import DataVersionsOperations as OperationClass else: raise ValueError(\"API version", "API version: * 2021-10-01: :class:`DatastoresOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.DatastoresOperations>` * 2022-02-01-preview: :class:`DatastoresOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.DatastoresOperations>` * 2022-05-01: :class:`DatastoresOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.DatastoresOperations>` \"\"\" api_version", "import DataContainerOperations as OperationClass else: raise ValueError(\"API version {} does not have operation", "{} does not have operation group 'model_versions'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property", "OperationClass else: raise ValueError(\"API version {} does not have operation group 'registry_management_non_workspace'\".format(api_version)) return", "'2021-10-01': from .v2021_10_01.operations import QuotasOperations as OperationClass elif api_version == '2022-01-01-preview': from .v2022_01_01_preview.operations", "* 1.0.0: :class:`ExtensiveModelOperations<azure.mgmt.machinelearningservices.model_dataplane.operations.ExtensiveModelOperations>` \"\"\" api_version = self._get_api_version('extensive_model') if api_version == '1.0.0': from .model_dataplane.operations", ".v2022_02_01_preview.operations import ModelVersionsOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import ModelVersionsOperations", "{} does not have operation group 'workspaces'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) def", "from .v2022_01_01_preview.operations import UsagesOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import", "OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def environment_containers(self): \"\"\"Instance depends on the API version:", "from .v2021_10_01.operations import PrivateEndpointConnectionsOperations as OperationClass elif api_version == '2022-01-01-preview': from .v2022_01_01_preview.operations import", "OperationClass else: raise ValueError(\"API version {} does not have operation group 'workspaces'\".format(api_version)) return", "operation group 'environment_versions'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def events(self): \"\"\"Instance depends", ".v2022_05_01.operations import OnlineEndpointsOperations as OperationClass else: raise ValueError(\"API version {} does not have", "* v1.0: :class:`RunsOperations<azure.mgmt.machinelearningservices.runhistory.operations.RunsOperations>` \"\"\" api_version = self._get_api_version('runs') if api_version == 'v1.0': from .runhistory.operations", "Deserializer(self._models_dict(api_version))) def close(self): self._client.close() def __enter__(self): self._client.__enter__() return self def __exit__(self, *exc_details): self._client.__exit__(*exc_details)", "import ModelVersionsOperations as OperationClass else: raise ValueError(\"API version {} does not have operation", "= self._get_api_version('operations') if api_version == '2021-10-01': from .v2021_10_01.operations import Operations as OperationClass elif", "raise ValueError(\"API version {} does not have operation group 'data_containers'\".format(api_version)) return OperationClass(self._client, self._config,", "ModelsOperations as OperationClass else: raise ValueError(\"API version {} does not have operation group", "api_version = self._get_api_version('run_artifacts') if api_version == 'v1.0': from .runhistory.operations import RunArtifactsOperations as OperationClass", ":class:`RunOperations<azure.mgmt.machinelearningservices.runhistory.operations.RunOperations>` \"\"\" api_version = self._get_api_version('run') if api_version == 'v1.0': from .runhistory.operations import RunOperations", "2021-10-01: :class:`UsagesOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.UsagesOperations>` * 2022-01-01-preview: :class:`UsagesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.UsagesOperations>` * 2022-05-01: :class:`UsagesOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.UsagesOperations>` \"\"\" api_version = self._get_api_version('usages') if", "mapping between an operation group and its API version. The api-version parameter sets", "import PrivateLinkResourcesOperations as OperationClass elif api_version == '2022-01-01-preview': from .v2022_01_01_preview.operations import PrivateLinkResourcesOperations as", "import BatchJobEndpointOperations as OperationClass else: raise ValueError(\"API version {} does not have operation", "'1.5.0', 'dataset_containers': '2021-10-01', 'dataset_controller_v2': '1.5.0', 'dataset_v2': '1.5.0', 'dataset_versions': '2021-10-01', 'datasets_v1': '1.5.0', 'delete': 'v1.0',", "operation group 'metric'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def migration(self): \"\"\"Instance depends", "Changes may cause incorrect behavior and will be lost if the code is", "version {} does not have operation group 'dataset_versions'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))", "import DatastoresOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import DatastoresOperations as", "GetOperationStatusOperations as OperationClass else: raise ValueError(\"API version {} does not have operation group", "2021-10-01: :class:`ModelContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.ModelContainersOperations>` * 2021-10-01-dataplanepreview: :class:`ModelContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.ModelContainersOperations>` * 2022-02-01-preview: :class:`ModelContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.ModelContainersOperations>` * 2022-05-01: :class:`ModelContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.ModelContainersOperations>` \"\"\" api_version", "import MigrationOperations as OperationClass else: raise ValueError(\"API version {} does not have operation", "2022-05-01: :class:`OnlineEndpointsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.OnlineEndpointsOperations>` \"\"\" api_version = self._get_api_version('online_endpoints') if api_version == '2021-10-01': from .v2021_10_01.operations import", "OperationClass elif api_version == '2021-10-01-dataplanepreview': from .v2021_10_01_dataplanepreview.operations import ComponentContainersOperations as OperationClass elif api_version", "api_version == 'v1.0': from .runhistory.operations import RunsOperations as OperationClass else: raise ValueError(\"API version", "from .v2022_01_01_preview import models return models elif api_version == '2022-02-01-preview': from .v2022_02_01_preview import", "as OperationClass elif api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import ModelContainersOperations as OperationClass elif", "OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import WorkspacesOperations as OperationClass else: raise", "version {} does not have operation group 'spans'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))", "\"\"\"Instance depends on the API version: * 2021-10-01: :class:`EnvironmentContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.EnvironmentContainersOperations>` * 2021-10-01-dataplanepreview: :class:`EnvironmentContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.EnvironmentContainersOperations>` *", "models return models raise ValueError(\"API version {} is not available\".format(api_version)) @property def assets(self):", "{} does not have operation group 'code_versions'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property", "2022-02-01-preview: :class:`BatchDeploymentsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.BatchDeploymentsOperations>` * 2022-05-01: :class:`BatchDeploymentsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.BatchDeploymentsOperations>` \"\"\" api_version = self._get_api_version('batch_deployments') if api_version == '2021-10-01':", "'2022-05-01': from .v2022_05_01.operations import CodeVersionsOperations as OperationClass else: raise ValueError(\"API version {} does", "data_versions(self): \"\"\"Instance depends on the API version: * 2022-02-01-preview: :class:`DataVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.DataVersionsOperations>` * 2022-05-01: :class:`DataVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.DataVersionsOperations>`", "== '2022-02-01-preview': from .v2022_02_01_preview.operations import DataVersionsOperations as OperationClass elif api_version == '2022-05-01': from", "metric(self): \"\"\"Instance depends on the API version: * v1.0: :class:`MetricOperations<azure.mgmt.machinelearningservices.runhistory.operations.MetricOperations>` \"\"\" api_version =", "\"\"\" api_version = self._get_api_version('data_containers') if api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import DataContainersOperations as", "'v1.0': from .runhistory.operations import SpansOperations as OperationClass else: raise ValueError(\"API version {} does", "\"\"\"Instance depends on the API version: * 2021-10-01: :class:`ComputeOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.ComputeOperations>` * 2022-01-01-preview: :class:`ComputeOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.ComputeOperations>` *", "self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def component_versions(self): \"\"\"Instance depends on the API version: *", "\"\"\"Instance depends on the API version: * 2021-10-01: :class:`VirtualMachineSizesOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.VirtualMachineSizesOperations>` * 2022-01-01-preview: :class:`VirtualMachineSizesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.VirtualMachineSizesOperations>` *", "depends on the API version: * 2021-10-01: :class:`EnvironmentContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.EnvironmentContainersOperations>` * 2021-10-01-dataplanepreview: :class:`EnvironmentContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.EnvironmentContainersOperations>` * 2022-02-01-preview:", "OperationClass else: raise ValueError(\"API version {} does not have operation group 'get_operation_status'\".format(api_version)) return", "@property def online_deployments(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`OnlineDeploymentsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.OnlineDeploymentsOperations>` *", "OperationClass else: raise ValueError(\"API version {} does not have operation group 'batch_job_deployment'\".format(api_version)) return", "import SpansOperations as OperationClass else: raise ValueError(\"API version {} does not have operation", "have operation group 'spans'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def temporary_data_references(self): \"\"\"Instance", "\"\"\" api_version = self._get_api_version('workspace_connections') if api_version == '2021-10-01': from .v2021_10_01.operations import WorkspaceConnectionsOperations as", "version: * 2020-09-01-dataplanepreview: :class:`BatchJobDeploymentOperations<azure.mgmt.machinelearningservices.v2020_09_01_dataplanepreview.operations.BatchJobDeploymentOperations>` \"\"\" api_version = self._get_api_version('batch_job_deployment') if api_version == '2020-09-01-dataplanepreview': from", "2021-10-01-dataplanepreview: :class:`ComponentVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.ComponentVersionsOperations>` * 2022-02-01-preview: :class:`ComponentVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.ComponentVersionsOperations>` * 2022-05-01: :class:`ComponentVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.ComponentVersionsOperations>` \"\"\" api_version = self._get_api_version('component_versions') if", ".v2022_02_01_preview.operations import EnvironmentContainersOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import EnvironmentContainersOperations", "from .v2021_10_01.operations import Operations as OperationClass elif api_version == '2022-01-01-preview': from .v2022_01_01_preview.operations import", "CodeContainersOperations as OperationClass else: raise ValueError(\"API version {} does not have operation group", "\"\"\" api_version = self._get_api_version('extensive_model') if api_version == '1.0.0': from .model_dataplane.operations import ExtensiveModelOperations as", ".v2022_05_01.operations import ComputeOperations as OperationClass else: raise ValueError(\"API version {} does not have", "does not have operation group 'registry_management_non_workspace'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def", "raise ValueError(\"API version {} does not have operation group 'batch_endpoints'\".format(api_version)) return OperationClass(self._client, self._config,", "on the API version: * 2021-10-01: :class:`ModelContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.ModelContainersOperations>` * 2021-10-01-dataplanepreview: :class:`ModelContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.ModelContainersOperations>` * 2022-02-01-preview: :class:`ModelContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.ModelContainersOperations>`", "api_version = self._get_api_version('compute') if api_version == '2021-10-01': from .v2021_10_01.operations import ComputeOperations as OperationClass", "api_version == '2021-10-01': from .v2021_10_01.operations import DatasetVersionsOperations as OperationClass else: raise ValueError(\"API version", "Deserializer(self._models_dict(api_version))) @property def data_versions(self): \"\"\"Instance depends on the API version: * 2022-02-01-preview: :class:`DataVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.DataVersionsOperations>`", "\"\"\"Instance depends on the API version: * 2021-10-01: :class:`DatastoresOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.DatastoresOperations>` * 2022-02-01-preview: :class:`DatastoresOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.DatastoresOperations>` *", "version {} does not have operation group 'datastores'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))", "if api_version == '1.0.0': from .model_dataplane.operations import MigrationOperations as OperationClass else: raise ValueError(\"API", "import BatchEndpointsOperations as OperationClass elif api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import BatchEndpointsOperations as", "'2021-10-01': from .v2021_10_01 import models return models elif api_version == '2021-10-01-dataplanepreview': from .v2021_10_01_dataplanepreview", "== '2022-01-01-preview': from .v2022_01_01_preview.operations import QuotasOperations as OperationClass elif api_version == '2022-05-01': from", "else: raise ValueError(\"API version {} does not have operation group 'extensive_model'\".format(api_version)) return OperationClass(self._client,", "OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def online_endpoints(self): \"\"\"Instance depends on the API version:", "'run'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def run_artifacts(self): \"\"\"Instance depends on the", "'code_versions'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def component_containers(self): \"\"\"Instance depends on the", "api_version == '2022-01-01-preview': from .v2022_01_01_preview.operations import PrivateEndpointConnectionsOperations as OperationClass elif api_version == '2022-05-01':", "Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def events(self): \"\"\"Instance depends on the API version: * v1.0:", "OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import VirtualMachineSizesOperations as OperationClass else: raise", "2022-01-01-preview: :class:`WorkspacesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.WorkspacesOperations>` * 2022-05-01: :class:`WorkspacesOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.WorkspacesOperations>` \"\"\" api_version = self._get_api_version('workspaces') if api_version == '2021-10-01':", "OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import BatchDeploymentsOperations as OperationClass else: raise", "== '2022-05-01': from .v2022_05_01.operations import DataContainersOperations as OperationClass else: raise ValueError(\"API version {}", "depends on the API version: * 2021-10-01: :class:`ModelVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.ModelVersionsOperations>` * 2021-10-01-dataplanepreview: :class:`ModelVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.ModelVersionsOperations>` * 2022-02-01-preview:", "self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def spans(self): \"\"\"Instance depends on the API version: *", ":class:`WorkspacesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.WorkspacesOperations>` * 2022-05-01: :class:`WorkspacesOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.WorkspacesOperations>` \"\"\" api_version = self._get_api_version('workspaces') if api_version == '2021-10-01': from", "if api_version == '2021-10-01': from .v2021_10_01.operations import WorkspaceFeaturesOperations as OperationClass elif api_version ==", "@property def dataset_containers(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`DatasetContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.DatasetContainersOperations>` \"\"\"", "'v1.0': from .registry_discovery.operations import AsyncOperationsOperations as OperationClass else: raise ValueError(\"API version {} does", "def data_container(self): \"\"\"Instance depends on the API version: * 1.5.0: :class:`DataContainerOperations<azure.mgmt.machinelearningservices.dataset_dataplane.operations.DataContainerOperations>` \"\"\" api_version", "Deserializer(self._models_dict(api_version))) @property def models(self): \"\"\"Instance depends on the API version: * 1.0.0: :class:`ModelsOperations<azure.mgmt.machinelearningservices.model_dataplane.operations.ModelsOperations>`", "'runs'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def spans(self): \"\"\"Instance depends on the", "as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import UsagesOperations as OperationClass else:", "api_version == '2022-01-01-preview': from .v2022_01_01_preview.operations import WorkspacesOperations as OperationClass elif api_version == '2022-05-01':", "as OperationClass else: raise ValueError(\"API version {} does not have operation group 'async_operations'\".format(api_version))", "does not have operation group 'events'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def", "'2022-02-01-preview': from .v2022_02_01_preview.operations import OnlineDeploymentsOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations", "raise ValueError(\"API version {} does not have operation group 'get_operation_status'\".format(api_version)) return OperationClass(self._client, self._config,", "== '1.5.0': from .dataset_dataplane.operations import DatasetsV1Operations as OperationClass else: raise ValueError(\"API version {}", "not have operation group 'data_containers'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def data_version(self):", "Deserializer(self._models_dict(api_version))) @property def data_call(self): \"\"\"Instance depends on the API version: * 1.5.0: :class:`DataCallOperations<azure.mgmt.machinelearningservices.dataset_dataplane.operations.DataCallOperations>`", ".v2022_02_01_preview.operations import DatastoresOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import DatastoresOperations", "v1.0: :mod:`registry_discovery.models<azure.mgmt.machinelearningservices.registry_discovery.models>` * v1.0: :mod:`runhistory.models<azure.mgmt.machinelearningservices.runhistory.models>` * 2020-09-01-dataplanepreview: :mod:`v2020_09_01_dataplanepreview.models<azure.mgmt.machinelearningservices.v2020_09_01_dataplanepreview.models>` * 2021-10-01: :mod:`v2021_10_01.models<azure.mgmt.machinelearningservices.v2021_10_01.models>` * 2021-10-01-dataplanepreview:", ":class:`PrivateLinkResourcesOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.PrivateLinkResourcesOperations>` \"\"\" api_version = self._get_api_version('private_link_resources') if api_version == '2021-10-01': from .v2021_10_01.operations import PrivateLinkResourcesOperations", "on the API version: * 1.0.0: :class:`AssetsOperations<azure.mgmt.machinelearningservices.model_dataplane.operations.AssetsOperations>` \"\"\" api_version = self._get_api_version('assets') if api_version", "from .runhistory.operations import ExperimentsOperations as OperationClass else: raise ValueError(\"API version {} does not", "if api_version == '1.5.0': from .dataset_dataplane.operations import DataVersionOperations as OperationClass else: raise ValueError(\"API", "as OperationClass else: raise ValueError(\"API version {} does not have operation group 'private_endpoint_connections'\".format(api_version))", "api_version == '2022-05-01': from .v2022_05_01.operations import WorkspaceFeaturesOperations as OperationClass else: raise ValueError(\"API version", ".v2021_10_01.operations import CodeVersionsOperations as OperationClass elif api_version == '2021-10-01-dataplanepreview': from .v2021_10_01_dataplanepreview.operations import CodeVersionsOperations", "import JobsOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import JobsOperations as", "== '2022-05-01': from .v2022_05_01.operations import QuotasOperations as OperationClass else: raise ValueError(\"API version {}", "type: KnownProfiles **kwargs # type: Any ): self._config = AzureMachineLearningWorkspacesConfiguration(credential, subscription_id, **kwargs) self._client", "group 'model_containers'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def model_versions(self): \"\"\"Instance depends on", "assets(self): \"\"\"Instance depends on the API version: * 1.0.0: :class:`AssetsOperations<azure.mgmt.machinelearningservices.model_dataplane.operations.AssetsOperations>` \"\"\" api_version =", "models elif api_version == 'v1.0': from .runhistory import models return models elif api_version", "api_version == '2022-05-01': from .v2022_05_01.operations import VirtualMachineSizesOperations as OperationClass else: raise ValueError(\"API version", "2022-02-01-preview: :class:`EnvironmentContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.EnvironmentContainersOperations>` * 2022-05-01: :class:`EnvironmentContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.EnvironmentContainersOperations>` \"\"\" api_version = self._get_api_version('environment_containers') if api_version == '2021-10-01':", "self._get_api_version('environment_containers') if api_version == '2021-10-01': from .v2021_10_01.operations import EnvironmentContainersOperations as OperationClass elif api_version", "OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import ComputeOperations as OperationClass else: raise", "\"\"\"Instance depends on the API version: * 2021-10-01: :class:`JobsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.JobsOperations>` * 2022-02-01-preview: :class:`JobsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.JobsOperations>` *", "2022-01-01-preview: :class:`ComputeOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.ComputeOperations>` * 2022-05-01: :class:`ComputeOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.ComputeOperations>` \"\"\" api_version = self._get_api_version('compute') if api_version == '2021-10-01':", "OperationClass else: raise ValueError(\"API version {} does not have operation group 'data_container'\".format(api_version)) return", "'data_container': '1.5.0', 'data_version': '1.5.0', 'dataset_containers': '2021-10-01', 'dataset_controller_v2': '1.5.0', 'dataset_v2': '1.5.0', 'dataset_versions': '2021-10-01', 'datasets_v1':", "DataCallOperations as OperationClass else: raise ValueError(\"API version {} does not have operation group", "return models elif api_version == '2022-02-01-preview': from .v2022_02_01_preview import models return models elif", "import EnvironmentVersionsOperations as OperationClass elif api_version == '2021-10-01-dataplanepreview': from .v2021_10_01_dataplanepreview.operations import EnvironmentVersionsOperations as", "Retry-After header is present. \"\"\" DEFAULT_API_VERSION = '2022-05-01' _PROFILE_TAG = \"azure.mgmt.machinelearningservices.AzureMachineLearningWorkspaces\" LATEST_PROFILE =", "import PrivateEndpointConnectionsOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import PrivateEndpointConnectionsOperations as", "have operation group 'environment_containers'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def environment_versions(self): \"\"\"Instance", "API version: * 1.5.0: :class:`GetOperationStatusOperations<azure.mgmt.machinelearningservices.dataset_dataplane.operations.GetOperationStatusOperations>` \"\"\" api_version = self._get_api_version('get_operation_status') if api_version == '1.5.0':", "'1.5.0', 'dataset_versions': '2021-10-01', 'datasets_v1': '1.5.0', 'delete': 'v1.0', 'events': 'v1.0', 'experiments': 'v1.0', 'extensive_model': '1.0.0',", ":class:`DatastoresOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.DatastoresOperations>` * 2022-05-01: :class:`DatastoresOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.DatastoresOperations>` \"\"\" api_version = self._get_api_version('datastores') if api_version == '2021-10-01': from", "rights reserved. # Licensed under the MIT License. See License.txt in the project", "elif api_version == '2022-05-01': from .v2022_05_01.operations import EnvironmentVersionsOperations as OperationClass else: raise ValueError(\"API", "= self._get_api_version('online_deployments') if api_version == '2021-10-01': from .v2021_10_01.operations import OnlineDeploymentsOperations as OperationClass elif", "\"\"\"Instance depends on the API version: * 1.0.0: :class:`AssetsOperations<azure.mgmt.machinelearningservices.model_dataplane.operations.AssetsOperations>` \"\"\" api_version = self._get_api_version('assets')", "self._get_api_version('dataset_versions') if api_version == '2021-10-01': from .v2021_10_01.operations import DatasetVersionsOperations as OperationClass else: raise", "credential: ~azure.core.credentials.TokenCredential :param subscription_id: The ID of the target subscription. :type subscription_id: str", "import ExtensiveModelOperations as OperationClass else: raise ValueError(\"API version {} does not have operation", ":class:`GetOperationStatusOperations<azure.mgmt.machinelearningservices.dataset_dataplane.operations.GetOperationStatusOperations>` \"\"\" api_version = self._get_api_version('get_operation_status') if api_version == '1.5.0': from .dataset_dataplane.operations import GetOperationStatusOperations", "OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def workspace_features(self): \"\"\"Instance depends on the API version:", "on the API version: * 2020-09-01-dataplanepreview: :class:`BatchJobEndpointOperations<azure.mgmt.machinelearningservices.v2020_09_01_dataplanepreview.operations.BatchJobEndpointOperations>` \"\"\" api_version = self._get_api_version('batch_job_endpoint') if api_version", "have operation group 'model_containers'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def model_versions(self): \"\"\"Instance", "operation group 'data_container'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def data_containers(self): \"\"\"Instance depends", "'batch_endpoints'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def batch_job_deployment(self): \"\"\"Instance depends on the", "\"\"\" api_version = self._get_api_version('dataset_controller_v2') if api_version == '1.5.0': from .dataset_dataplane.operations import DatasetControllerV2Operations as", "api_version == '2021-10-01': from .v2021_10_01.operations import ComputeOperations as OperationClass elif api_version == '2022-01-01-preview':", "as OperationClass else: raise ValueError(\"API version {} does not have operation group 'batch_deployments'\".format(api_version))", "* 2022-05-01: :mod:`v2022_05_01.models<azure.mgmt.machinelearningservices.v2022_05_01.models>` \"\"\" if api_version == '1.5.0': from .dataset_dataplane import models return", "'v1.0': from .registry_discovery import models return models elif api_version == 'v1.0': from .runhistory", "from .v2021_10_01.operations import BatchDeploymentsOperations as OperationClass elif api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import", ":mod:`v2021_10_01.models<azure.mgmt.machinelearningservices.v2021_10_01.models>` * 2021-10-01-dataplanepreview: :mod:`v2021_10_01_dataplanepreview.models<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.models>` * 2022-01-01-preview: :mod:`v2022_01_01_preview.models<azure.mgmt.machinelearningservices.v2022_01_01_preview.models>` * 2022-02-01-preview: :mod:`v2022_02_01_preview.models<azure.mgmt.machinelearningservices.v2022_02_01_preview.models>` * 2022-05-01: :mod:`v2022_05_01.models<azure.mgmt.machinelearningservices.v2022_05_01.models>`", "OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def delete(self): \"\"\"Instance depends on the API version:", "no profile is provided, or if missing in profile. :type api_version: str :param", "api_version == '2021-10-01-dataplanepreview': from .v2021_10_01_dataplanepreview.operations import EnvironmentVersionsOperations as OperationClass elif api_version == '2022-02-01-preview':", "'2022-05-01': from .v2022_05_01.operations import ComponentContainersOperations as OperationClass else: raise ValueError(\"API version {} does", "as OperationClass elif api_version == '2022-01-01-preview': from .v2022_01_01_preview.operations import QuotasOperations as OperationClass elif", "raise ValueError(\"API version {} does not have operation group 'data_version'\".format(api_version)) return OperationClass(self._client, self._config,", "@property def private_link_resources(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`PrivateLinkResourcesOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.PrivateLinkResourcesOperations>` *", "group 'quotas'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def registry_management_non_workspace(self): \"\"\"Instance depends on", "'v1.0', 'run': 'v1.0', 'run_artifacts': 'v1.0', 'runs': 'v1.0', 'spans': 'v1.0', 'temporary_data_references': '2021-10-01-dataplanepreview', }}, _PROFILE_TAG", "from .v2022_02_01_preview.operations import EnvironmentVersionsOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import", "ValueError(\"API version {} does not have operation group 'jobs'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)),", ":class:`JobsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.JobsOperations>` \"\"\" api_version = self._get_api_version('jobs') if api_version == '2021-10-01': from .v2021_10_01.operations import JobsOperations", "'2022-05-01': from .v2022_05_01.operations import DataContainersOperations as OperationClass else: raise ValueError(\"API version {} does", "as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import JobsOperations as OperationClass else:", "'2022-05-01': from .v2022_05_01.operations import JobsOperations as OperationClass else: raise ValueError(\"API version {} does", "operation group 'batch_job_endpoint'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def code_containers(self): \"\"\"Instance depends", "'2022-05-01' _PROFILE_TAG = \"azure.mgmt.machinelearningservices.AzureMachineLearningWorkspaces\" LATEST_PROFILE = ProfileDefinition({ _PROFILE_TAG: { None: DEFAULT_API_VERSION, 'assets': '1.0.0',", "ProfileDefinition({ _PROFILE_TAG: { None: DEFAULT_API_VERSION, 'assets': '1.0.0', 'async_operations': 'v1.0', 'batch_job_deployment': '2020-09-01-dataplanepreview', 'batch_job_endpoint': '2020-09-01-dataplanepreview',", "version: * 2021-10-01: :class:`BatchEndpointsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.BatchEndpointsOperations>` * 2022-02-01-preview: :class:`BatchEndpointsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.BatchEndpointsOperations>` * 2022-05-01: :class:`BatchEndpointsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.BatchEndpointsOperations>` \"\"\" api_version =", "\"\"\" api_version = self._get_api_version('batch_endpoints') if api_version == '2021-10-01': from .v2021_10_01.operations import BatchEndpointsOperations as", "import ExperimentsOperations as OperationClass else: raise ValueError(\"API version {} does not have operation", "* 2021-10-01: :class:`ModelContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.ModelContainersOperations>` * 2021-10-01-dataplanepreview: :class:`ModelContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.ModelContainersOperations>` * 2022-02-01-preview: :class:`ModelContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.ModelContainersOperations>` * 2022-05-01: :class:`ModelContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.ModelContainersOperations>` \"\"\"", "self._get_api_version('temporary_data_references') if api_version == '2021-10-01-dataplanepreview': from .v2021_10_01_dataplanepreview.operations import TemporaryDataReferencesOperations as OperationClass else: raise", "Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def environment_versions(self): \"\"\"Instance depends on the API version: * 2021-10-01:", "type: str api_version=None, # type: Optional[str] base_url=\"https://management.azure.com\", # type: str profile=KnownProfiles.default, # type:", "self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) def close(self): self._client.close() def __enter__(self): self._client.__enter__() return self def __exit__(self,", "* v1.0: :class:`ExperimentsOperations<azure.mgmt.machinelearningservices.runhistory.operations.ExperimentsOperations>` \"\"\" api_version = self._get_api_version('experiments') if api_version == 'v1.0': from .runhistory.operations", "self._get_api_version('private_link_resources') if api_version == '2021-10-01': from .v2021_10_01.operations import PrivateLinkResourcesOperations as OperationClass elif api_version", "* 2022-05-01: :class:`BatchDeploymentsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.BatchDeploymentsOperations>` \"\"\" api_version = self._get_api_version('batch_deployments') if api_version == '2021-10-01': from .v2021_10_01.operations", "OnlineDeploymentsOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import OnlineDeploymentsOperations as OperationClass", "component_versions(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`ComponentVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.ComponentVersionsOperations>` * 2021-10-01-dataplanepreview: :class:`ComponentVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.ComponentVersionsOperations>`", "api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import ModelContainersOperations as OperationClass elif api_version == '2022-05-01':", "depends on the API version: * 2021-10-01: :class:`CodeContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.CodeContainersOperations>` * 2021-10-01-dataplanepreview: :class:`CodeContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.CodeContainersOperations>` * 2022-02-01-preview:", "\"\"\"Instance depends on the API version: * v1.0: :class:`MetricOperations<azure.mgmt.machinelearningservices.runhistory.operations.MetricOperations>` \"\"\" api_version = self._get_api_version('metric')", "Deserializer(self._models_dict(api_version))) @property def data_containers(self): \"\"\"Instance depends on the API version: * 2022-02-01-preview: :class:`DataContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.DataContainersOperations>`", "* 2022-01-01-preview: :mod:`v2022_01_01_preview.models<azure.mgmt.machinelearningservices.v2022_01_01_preview.models>` * 2022-02-01-preview: :mod:`v2022_02_01_preview.models<azure.mgmt.machinelearningservices.v2022_02_01_preview.models>` * 2022-05-01: :mod:`v2022_05_01.models<azure.mgmt.machinelearningservices.v2022_05_01.models>` \"\"\" if api_version ==", "== '2022-02-01-preview': from .v2022_02_01_preview.operations import CodeContainersOperations as OperationClass elif api_version == '2022-05-01': from", "version: * 1.5.0: :class:`DataVersionOperations<azure.mgmt.machinelearningservices.dataset_dataplane.operations.DataVersionOperations>` \"\"\" api_version = self._get_api_version('data_version') if api_version == '1.5.0': from", "self._get_api_version('virtual_machine_sizes') if api_version == '2021-10-01': from .v2021_10_01.operations import VirtualMachineSizesOperations as OperationClass elif api_version", "if api_version == '2021-10-01': from .v2021_10_01.operations import VirtualMachineSizesOperations as OperationClass elif api_version ==", "def dataset_controller_v2(self): \"\"\"Instance depends on the API version: * 1.5.0: :class:`DatasetControllerV2Operations<azure.mgmt.machinelearningservices.dataset_dataplane.operations.DatasetControllerV2Operations>` \"\"\" api_version", "ValueError(\"API version {} does not have operation group 'batch_job_endpoint'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)),", "'batch_deployments'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def batch_endpoints(self): \"\"\"Instance depends on the", "* 2020-09-01-dataplanepreview: :mod:`v2020_09_01_dataplanepreview.models<azure.mgmt.machinelearningservices.v2020_09_01_dataplanepreview.models>` * 2021-10-01: :mod:`v2021_10_01.models<azure.mgmt.machinelearningservices.v2021_10_01.models>` * 2021-10-01-dataplanepreview: :mod:`v2021_10_01_dataplanepreview.models<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.models>` * 2022-01-01-preview: :mod:`v2022_01_01_preview.models<azure.mgmt.machinelearningservices.v2022_01_01_preview.models>` *", "import Operations as OperationClass else: raise ValueError(\"API version {} does not have operation", "credential: Credential needed for the client to connect to Azure. :type credential: ~azure.core.credentials.TokenCredential", "from .v2022_05_01.operations import CodeVersionsOperations as OperationClass else: raise ValueError(\"API version {} does not", "'private_endpoint_connections'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def private_link_resources(self): \"\"\"Instance depends on the", "OperationClass elif api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import OnlineEndpointsOperations as OperationClass elif api_version", "api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import OnlineEndpointsOperations as OperationClass elif api_version == '2022-05-01':", "from .v2021_10_01.operations import ComponentVersionsOperations as OperationClass elif api_version == '2021-10-01-dataplanepreview': from .v2021_10_01_dataplanepreview.operations import", "group and its API version. The api-version parameter sets the default API version", "from .v2022_02_01_preview.operations import ModelVersionsOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import", "OperationClass elif api_version == '2022-01-01-preview': from .v2022_01_01_preview.operations import WorkspaceFeaturesOperations as OperationClass elif api_version", "have operation group 'private_link_resources'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def quotas(self): \"\"\"Instance", "{} does not have operation group 'models'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property", ".v2022_01_01_preview.operations import PrivateLinkResourcesOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import PrivateLinkResourcesOperations", "return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def quotas(self): \"\"\"Instance depends on the API", "profile: azure.profiles.KnownProfiles :keyword int polling_interval: Default waiting time between two polls for LRO", "as OperationClass else: raise ValueError(\"API version {} does not have operation group 'spans'\".format(api_version))", "final version of multiapi azure-core based client \"\"\" pass class AzureMachineLearningWorkspaces(MultiApiClientMixin, _SDKClient): \"\"\"These", "azure.mgmt.core import ARMPipelineClient from azure.profiles import KnownProfiles, ProfileDefinition from azure.profiles.multiapiclient import MultiApiClientMixin from", "'2022-01-01-preview': from .v2022_01_01_preview.operations import Operations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations", "not have operation group 'code_containers'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def code_versions(self):", "ValueError(\"API version {} does not have operation group 'batch_endpoints'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)),", "EnvironmentContainersOperations as OperationClass elif api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import EnvironmentContainersOperations as OperationClass", "def online_endpoints(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`OnlineEndpointsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.OnlineEndpointsOperations>` * 2022-02-01-preview:", "jobs(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`JobsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.JobsOperations>` * 2022-02-01-preview: :class:`JobsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.JobsOperations>`", "def model_containers(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`ModelContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.ModelContainersOperations>` * 2021-10-01-dataplanepreview:", "elif api_version == '2022-05-01': from .v2022_05_01.operations import QuotasOperations as OperationClass else: raise ValueError(\"API", "subscription. :type subscription_id: str :param api_version: API version to use if no profile", "== '2022-05-01': from .v2022_05_01.operations import CodeContainersOperations as OperationClass else: raise ValueError(\"API version {}", "OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def datasets_v1(self): \"\"\"Instance depends on the API version:", "information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes", "on the API version: * 2021-10-01: :class:`CodeVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.CodeVersionsOperations>` * 2021-10-01-dataplanepreview: :class:`CodeVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.CodeVersionsOperations>` * 2022-02-01-preview: :class:`CodeVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.CodeVersionsOperations>`", "models elif api_version == '2022-01-01-preview': from .v2022_01_01_preview import models return models elif api_version", "events(self): \"\"\"Instance depends on the API version: * v1.0: :class:`EventsOperations<azure.mgmt.machinelearningservices.runhistory.operations.EventsOperations>` \"\"\" api_version =", "multiapi azure-core based client \"\"\" pass class AzureMachineLearningWorkspaces(MultiApiClientMixin, _SDKClient): \"\"\"These APIs allow end", ":class:`EnvironmentVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.EnvironmentVersionsOperations>` * 2022-05-01: :class:`EnvironmentVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.EnvironmentVersionsOperations>` \"\"\" api_version = self._get_api_version('environment_versions') if api_version == '2021-10-01': from", "Deserializer(self._models_dict(api_version))) @property def metric(self): \"\"\"Instance depends on the API version: * v1.0: :class:`MetricOperations<azure.mgmt.machinelearningservices.runhistory.operations.MetricOperations>`", "the API version: * v1.0: :class:`AsyncOperationsOperations<azure.mgmt.machinelearningservices.registry_discovery.operations.AsyncOperationsOperations>` \"\"\" api_version = self._get_api_version('async_operations') if api_version ==", ":class:`ModelVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.ModelVersionsOperations>` * 2022-05-01: :class:`ModelVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.ModelVersionsOperations>` \"\"\" api_version = self._get_api_version('model_versions') if api_version == '2021-10-01': from", "api_version == '2022-05-01': from .v2022_05_01.operations import ComponentVersionsOperations as OperationClass else: raise ValueError(\"API version", "else: raise ValueError(\"API version {} does not have operation group 'registry_management_non_workspace'\".format(api_version)) return OperationClass(self._client,", "version {} does not have operation group 'compute'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))", "== '2022-02-01-preview': from .v2022_02_01_preview.operations import EnvironmentVersionsOperations as OperationClass elif api_version == '2022-05-01': from", "import BatchJobDeploymentOperations as OperationClass else: raise ValueError(\"API version {} does not have operation", "2022-05-01: :class:`DataContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.DataContainersOperations>` \"\"\" api_version = self._get_api_version('data_containers') if api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import", "api_version == '2022-05-01': from .v2022_05_01.operations import EnvironmentVersionsOperations as OperationClass else: raise ValueError(\"API version", "* 2022-02-01-preview: :class:`DataContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.DataContainersOperations>` * 2022-05-01: :class:`DataContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.DataContainersOperations>` \"\"\" api_version = self._get_api_version('data_containers') if api_version ==", "import DataContainersOperations as OperationClass else: raise ValueError(\"API version {} does not have operation", "as OperationClass elif api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import ComponentContainersOperations as OperationClass elif", "'2021-10-01': from .v2021_10_01.operations import Operations as OperationClass elif api_version == '2022-01-01-preview': from .v2022_01_01_preview.operations", "is a fake class to support current implemetation of MultiApiClientMixin.\" Will be removed", "api_version == '2022-05-01': from .v2022_05_01.operations import ModelVersionsOperations as OperationClass else: raise ValueError(\"API version", "depends on the API version: * 2021-10-01: :class:`QuotasOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.QuotasOperations>` * 2022-01-01-preview: :class:`QuotasOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.QuotasOperations>` * 2022-05-01:", "api_version=None, # type: Optional[str] base_url=\"https://management.azure.com\", # type: str profile=KnownProfiles.default, # type: KnownProfiles **kwargs", "does not have operation group 'usages'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def", "api_version == '2021-10-01': from .v2021_10_01.operations import EnvironmentContainersOperations as OperationClass elif api_version == '2021-10-01-dataplanepreview':", "on the API version: * v1.0: :class:`EventsOperations<azure.mgmt.machinelearningservices.runhistory.operations.EventsOperations>` \"\"\" api_version = self._get_api_version('events') if api_version", "\"\"\" api_version = self._get_api_version('virtual_machine_sizes') if api_version == '2021-10-01': from .v2021_10_01.operations import VirtualMachineSizesOperations as", "depends on the API version: * 1.0.0: :class:`ExtensiveModelOperations<azure.mgmt.machinelearningservices.model_dataplane.operations.ExtensiveModelOperations>` \"\"\" api_version = self._get_api_version('extensive_model') if", "**kwargs) super(AzureMachineLearningWorkspaces, self).__init__( api_version=api_version, profile=profile ) @classmethod def _models_dict(cls, api_version): return {k: v", "group 'batch_endpoints'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def batch_job_deployment(self): \"\"\"Instance depends on", "as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import DataVersionsOperations as OperationClass else:", "depends on the API version: * 1.5.0: :class:`GetOperationStatusOperations<azure.mgmt.machinelearningservices.dataset_dataplane.operations.GetOperationStatusOperations>` \"\"\" api_version = self._get_api_version('get_operation_status') if", "private_link_resources(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`PrivateLinkResourcesOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.PrivateLinkResourcesOperations>` * 2022-01-01-preview: :class:`PrivateLinkResourcesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.PrivateLinkResourcesOperations>`", "if api_version == '2021-10-01': from .v2021_10_01.operations import BatchEndpointsOperations as OperationClass elif api_version ==", "self._get_api_version('async_operations') if api_version == 'v1.0': from .registry_discovery.operations import AsyncOperationsOperations as OperationClass else: raise", "BatchJobEndpointOperations as OperationClass else: raise ValueError(\"API version {} does not have operation group", "* 2021-10-01: :mod:`v2021_10_01.models<azure.mgmt.machinelearningservices.v2021_10_01.models>` * 2021-10-01-dataplanepreview: :mod:`v2021_10_01_dataplanepreview.models<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.models>` * 2022-01-01-preview: :mod:`v2022_01_01_preview.models<azure.mgmt.machinelearningservices.v2022_01_01_preview.models>` * 2022-02-01-preview: :mod:`v2022_02_01_preview.models<azure.mgmt.machinelearningservices.v2022_02_01_preview.models>` *", ":class:`ComponentVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.ComponentVersionsOperations>` \"\"\" api_version = self._get_api_version('component_versions') if api_version == '2021-10-01': from .v2021_10_01.operations import ComponentVersionsOperations", "'2022-02-01-preview': from .v2022_02_01_preview.operations import DataContainersOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations", "= self._get_api_version('model_containers') if api_version == '2021-10-01': from .v2021_10_01.operations import ModelContainersOperations as OperationClass elif", "depends on the API version: * 2021-10-01-dataplanepreview: :class:`TemporaryDataReferencesOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.TemporaryDataReferencesOperations>` \"\"\" api_version = self._get_api_version('temporary_data_references') if", ".v2022_01_01_preview.operations import QuotasOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import QuotasOperations", "operation group 'datastores'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def delete(self): \"\"\"Instance depends", "from .v2022_01_01_preview.operations import WorkspaceConnectionsOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import", "2022-05-01: :class:`WorkspaceFeaturesOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.WorkspaceFeaturesOperations>` \"\"\" api_version = self._get_api_version('workspace_features') if api_version == '2021-10-01': from .v2021_10_01.operations import", "PrivateLinkResourcesOperations as OperationClass elif api_version == '2022-01-01-preview': from .v2022_01_01_preview.operations import PrivateLinkResourcesOperations as OperationClass", "profile sets a mapping between an operation group and its API version. The", ".v2022_02_01_preview.operations import ComponentVersionsOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import ComponentVersionsOperations", "self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def dataset_controller_v2(self): \"\"\"Instance depends on the API version: *", ".v2021_10_01_dataplanepreview.operations import ComponentVersionsOperations as OperationClass elif api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import ComponentVersionsOperations", "the Azure clouds (Azure Stack, Azure Government, Azure China, etc.). By default, it", ".dataset_dataplane.operations import DataContainerOperations as OperationClass else: raise ValueError(\"API version {} does not have", "self._get_api_version('metric') if api_version == 'v1.0': from .runhistory.operations import MetricOperations as OperationClass else: raise", "@property def spans(self): \"\"\"Instance depends on the API version: * v1.0: :class:`SpansOperations<azure.mgmt.machinelearningservices.runhistory.operations.SpansOperations>` \"\"\"", "on the API version: * 2021-10-01-dataplanepreview: :class:`TemporaryDataReferencesOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.TemporaryDataReferencesOperations>` \"\"\" api_version = self._get_api_version('temporary_data_references') if api_version", "else: raise ValueError(\"API version {} does not have operation group 'dataset_v2'\".format(api_version)) return OperationClass(self._client,", "\"\"\"Instance depends on the API version: * 1.5.0: :class:`GetOperationStatusOperations<azure.mgmt.machinelearningservices.dataset_dataplane.operations.GetOperationStatusOperations>` \"\"\" api_version = self._get_api_version('get_operation_status')", "raise ValueError(\"API version {} does not have operation group 'datasets_v1'\".format(api_version)) return OperationClass(self._client, self._config,", "'dataset_containers': '2021-10-01', 'dataset_controller_v2': '1.5.0', 'dataset_v2': '1.5.0', 'dataset_versions': '2021-10-01', 'datasets_v1': '1.5.0', 'delete': 'v1.0', 'events':", "api_version == '2022-05-01': from .v2022_05_01.operations import ModelContainersOperations as OperationClass else: raise ValueError(\"API version", "else: raise ValueError(\"API version {} does not have operation group 'online_deployments'\".format(api_version)) return OperationClass(self._client,", "as OperationClass else: raise ValueError(\"API version {} does not have operation group 'registry_management_non_workspace'\".format(api_version))", "api_version = self._get_api_version('dataset_controller_v2') if api_version == '1.5.0': from .dataset_dataplane.operations import DatasetControllerV2Operations as OperationClass", "api_version == '2022-01-01-preview': from .v2022_01_01_preview.operations import PrivateLinkResourcesOperations as OperationClass elif api_version == '2022-05-01':", "self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def component_containers(self): \"\"\"Instance depends on the API version: *", ":class:`RunArtifactsOperations<azure.mgmt.machinelearningservices.runhistory.operations.RunArtifactsOperations>` \"\"\" api_version = self._get_api_version('run_artifacts') if api_version == 'v1.0': from .runhistory.operations import RunArtifactsOperations", "OnlineEndpointsOperations as OperationClass else: raise ValueError(\"API version {} does not have operation group", "elif api_version == '2022-02-01-preview': from .v2022_02_01_preview import models return models elif api_version ==", "api_version == '2021-10-01': from .v2021_10_01.operations import ModelContainersOperations as OperationClass elif api_version == '2021-10-01-dataplanepreview':", "def delete(self): \"\"\"Instance depends on the API version: * 1.5.0: :class:`DeleteOperations<azure.mgmt.machinelearningservices.dataset_dataplane.operations.DeleteOperations>` * v1.0:", "else: raise ValueError(\"API version {} does not have operation group 'async_operations'\".format(api_version)) return OperationClass(self._client,", "group 'private_link_resources'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def quotas(self): \"\"\"Instance depends on", "does not have operation group 'workspace_connections'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def", "API version: * 2021-10-01: :class:`PrivateEndpointConnectionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.PrivateEndpointConnectionsOperations>` * 2022-01-01-preview: :class:`PrivateEndpointConnectionsOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.PrivateEndpointConnectionsOperations>` * 2022-05-01: :class:`PrivateEndpointConnectionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.PrivateEndpointConnectionsOperations>` \"\"\" api_version", "Deserializer(self._models_dict(api_version))) @property def registry_management_non_workspace(self): \"\"\"Instance depends on the API version: * v1.0: :class:`RegistryManagementNonWorkspaceOperations<azure.mgmt.machinelearningservices.registry_discovery.operations.RegistryManagementNonWorkspaceOperations>`", ".v2022_01_01_preview.operations import UsagesOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import UsagesOperations", "from .v2022_01_01_preview.operations import VirtualMachineSizesOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import", "OperationClass else: raise ValueError(\"API version {} does not have operation group 'batch_job_endpoint'\".format(api_version)) return", "return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def temporary_data_references(self): \"\"\"Instance depends on the API", "* 2022-02-01-preview: :class:`OnlineEndpointsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.OnlineEndpointsOperations>` * 2022-05-01: :class:`OnlineEndpointsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.OnlineEndpointsOperations>` \"\"\" api_version = self._get_api_version('online_endpoints') if api_version ==", "2021-10-01-dataplanepreview: :class:`TemporaryDataReferencesOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.TemporaryDataReferencesOperations>` \"\"\" api_version = self._get_api_version('temporary_data_references') if api_version == '2021-10-01-dataplanepreview': from .v2021_10_01_dataplanepreview.operations import", "self._get_api_version('models') if api_version == '1.0.0': from .model_dataplane.operations import ModelsOperations as OperationClass else: raise", "EventsOperations as OperationClass else: raise ValueError(\"API version {} does not have operation group", "API version: * 2021-10-01: :class:`CodeContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.CodeContainersOperations>` * 2021-10-01-dataplanepreview: :class:`CodeContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.CodeContainersOperations>` * 2022-02-01-preview: :class:`CodeContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.CodeContainersOperations>` * 2022-05-01:", "'dataset_versions'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def datasets_v1(self): \"\"\"Instance depends on the", "OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import Operations as OperationClass else: raise", "'2021-10-01', 'datasets_v1': '1.5.0', 'delete': 'v1.0', 'events': 'v1.0', 'experiments': 'v1.0', 'extensive_model': '1.0.0', 'get_operation_status': '1.5.0',", "versions, to help you deal with all of the Azure clouds (Azure Stack,", ":class:`ComponentVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.ComponentVersionsOperations>` * 2022-02-01-preview: :class:`ComponentVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.ComponentVersionsOperations>` * 2022-05-01: :class:`ComponentVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.ComponentVersionsOperations>` \"\"\" api_version = self._get_api_version('component_versions') if api_version", "self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def jobs(self): \"\"\"Instance depends on the API version: *", "elif api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import CodeContainersOperations as OperationClass elif api_version ==", "OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import ModelVersionsOperations as OperationClass else: raise", "version {} does not have operation group 'component_containers'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))", ".runhistory.operations import RunsOperations as OperationClass else: raise ValueError(\"API version {} does not have", "Azure China, etc.). By default, it uses the latest API version available on", "end users to operate on Azure Machine Learning Workspace resources. This ready contains", "elif api_version == '2022-05-01': from .v2022_05_01.operations import ModelVersionsOperations as OperationClass else: raise ValueError(\"API", "depends on the API version: * v1.0: :class:`SpansOperations<azure.mgmt.machinelearningservices.runhistory.operations.SpansOperations>` \"\"\" api_version = self._get_api_version('spans') if", "== '2022-05-01': from .v2022_05_01.operations import CodeVersionsOperations as OperationClass else: raise ValueError(\"API version {}", "import models return models elif api_version == '2022-01-01-preview': from .v2022_01_01_preview import models return", "group 'async_operations'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def batch_deployments(self): \"\"\"Instance depends on", "import DatasetControllerV2Operations as OperationClass else: raise ValueError(\"API version {} does not have operation", "Deserializer(self._models_dict(api_version))) @property def spans(self): \"\"\"Instance depends on the API version: * v1.0: :class:`SpansOperations<azure.mgmt.machinelearningservices.runhistory.operations.SpansOperations>`", ":mod:`v2022_05_01.models<azure.mgmt.machinelearningservices.v2022_05_01.models>` \"\"\" if api_version == '1.5.0': from .dataset_dataplane import models return models elif", "coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed", "import DataCallOperations as OperationClass else: raise ValueError(\"API version {} does not have operation", "missing in profile. :type api_version: str :param base_url: Service URL :type base_url: str", "ValueError(\"API version {} does not have operation group 'model_versions'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)),", "on the API version: * 1.5.0: :class:`DeleteOperations<azure.mgmt.machinelearningservices.dataset_dataplane.operations.DeleteOperations>` * v1.0: :class:`DeleteOperations<azure.mgmt.machinelearningservices.runhistory.operations.DeleteOperations>` \"\"\" api_version =", "\"\"\"Instance depends on the API version: * 2021-10-01: :class:`PrivateLinkResourcesOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.PrivateLinkResourcesOperations>` * 2022-01-01-preview: :class:`PrivateLinkResourcesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.PrivateLinkResourcesOperations>` *", "from .v2022_05_01.operations import PrivateLinkResourcesOperations as OperationClass else: raise ValueError(\"API version {} does not", "'model_containers'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def model_versions(self): \"\"\"Instance depends on the", "return models elif api_version == '2022-01-01-preview': from .v2022_01_01_preview import models return models elif", "the API version: * 2021-10-01: :class:`WorkspaceFeaturesOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.WorkspaceFeaturesOperations>` * 2022-01-01-preview: :class:`WorkspaceFeaturesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.WorkspaceFeaturesOperations>` * 2022-05-01: :class:`WorkspaceFeaturesOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.WorkspaceFeaturesOperations>` \"\"\"", "'v1.0', 'spans': 'v1.0', 'temporary_data_references': '2021-10-01-dataplanepreview', }}, _PROFILE_TAG + \" latest\" ) def __init__(", "= self._get_api_version('jobs') if api_version == '2021-10-01': from .v2021_10_01.operations import JobsOperations as OperationClass elif", "as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import BatchEndpointsOperations as OperationClass else:", "version: * 2021-10-01: :class:`PrivateEndpointConnectionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.PrivateEndpointConnectionsOperations>` * 2022-01-01-preview: :class:`PrivateEndpointConnectionsOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.PrivateEndpointConnectionsOperations>` * 2022-05-01: :class:`PrivateEndpointConnectionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.PrivateEndpointConnectionsOperations>` \"\"\" api_version =", ".v2022_02_01_preview.operations import ModelContainersOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import ModelContainersOperations", "elif api_version == '1.0.0': from .model_dataplane import models return models elif api_version ==", "api_version == '2022-05-01': from .v2022_05_01.operations import BatchDeploymentsOperations as OperationClass else: raise ValueError(\"API version", "depends on the API version: * 2021-10-01: :class:`BatchEndpointsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.BatchEndpointsOperations>` * 2022-02-01-preview: :class:`BatchEndpointsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.BatchEndpointsOperations>` * 2022-05-01:", "version of multiapi azure-core based client \"\"\" pass class AzureMachineLearningWorkspaces(MultiApiClientMixin, _SDKClient): \"\"\"These APIs", ".v2022_02_01_preview import models return models elif api_version == '2022-05-01': from .v2022_05_01 import models", "OperationClass else: raise ValueError(\"API version {} does not have operation group 'datasets_v1'\".format(api_version)) return", "api_version = self._get_api_version('quotas') if api_version == '2021-10-01': from .v2021_10_01.operations import QuotasOperations as OperationClass", "if api_version == '2021-10-01': from .v2021_10_01.operations import UsagesOperations as OperationClass elif api_version ==", "the API version: * 2021-10-01: :class:`DatastoresOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.DatastoresOperations>` * 2022-02-01-preview: :class:`DatastoresOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.DatastoresOperations>` * 2022-05-01: :class:`DatastoresOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.DatastoresOperations>` \"\"\"", "@property def workspaces(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`WorkspacesOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.WorkspacesOperations>` *", "\"\"\" api_version = self._get_api_version('component_containers') if api_version == '2021-10-01': from .v2021_10_01.operations import ComponentContainersOperations as", "* 2021-10-01: :class:`WorkspaceConnectionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.WorkspaceConnectionsOperations>` * 2022-01-01-preview: :class:`WorkspaceConnectionsOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.WorkspaceConnectionsOperations>` * 2022-05-01: :class:`WorkspaceConnectionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.WorkspaceConnectionsOperations>` \"\"\" api_version = self._get_api_version('workspace_connections')", "from .runhistory import models return models elif api_version == '2020-09-01-dataplanepreview': from .v2020_09_01_dataplanepreview import", "'data_version'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def data_versions(self): \"\"\"Instance depends on the", "Deserializer(self._models_dict(api_version))) @property def workspaces(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`WorkspacesOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.WorkspacesOperations>`", "Service URL :type base_url: str :param profile: A profile definition, from KnownProfiles to", "api_version == '2021-10-01': from .v2021_10_01.operations import VirtualMachineSizesOperations as OperationClass elif api_version == '2022-01-01-preview':", "== '2021-10-01': from .v2021_10_01.operations import BatchDeploymentsOperations as OperationClass elif api_version == '2022-02-01-preview': from", ".v2022_05_01.operations import PrivateLinkResourcesOperations as OperationClass else: raise ValueError(\"API version {} does not have", "2022-05-01: :class:`PrivateLinkResourcesOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.PrivateLinkResourcesOperations>` \"\"\" api_version = self._get_api_version('private_link_resources') if api_version == '2021-10-01': from .v2021_10_01.operations import", "from .v2022_05_01.operations import ComputeOperations as OperationClass else: raise ValueError(\"API version {} does not", "== '2022-01-01-preview': from .v2022_01_01_preview.operations import Operations as OperationClass elif api_version == '2022-05-01': from", "\"\"\" api_version = self._get_api_version('compute') if api_version == '2021-10-01': from .v2021_10_01.operations import ComputeOperations as", "2022-01-01-preview: :class:`WorkspaceConnectionsOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.WorkspaceConnectionsOperations>` * 2022-05-01: :class:`WorkspaceConnectionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.WorkspaceConnectionsOperations>` \"\"\" api_version = self._get_api_version('workspace_connections') if api_version == '2021-10-01':", "version {} does not have operation group 'usages'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))", "'dataset_controller_v2'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def dataset_v2(self): \"\"\"Instance depends on the", "version: * 2021-10-01: :class:`EnvironmentVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.EnvironmentVersionsOperations>` * 2021-10-01-dataplanepreview: :class:`EnvironmentVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.EnvironmentVersionsOperations>` * 2022-02-01-preview: :class:`EnvironmentVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.EnvironmentVersionsOperations>` * 2022-05-01: :class:`EnvironmentVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.EnvironmentVersionsOperations>`", "raise ValueError(\"API version {} does not have operation group 'virtual_machine_sizes'\".format(api_version)) return OperationClass(self._client, self._config,", "class AzureMachineLearningWorkspaces(MultiApiClientMixin, _SDKClient): \"\"\"These APIs allow end users to operate on Azure Machine", "particular api-version and/or profile. The profile sets a mapping between an operation group", "api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import JobsOperations as OperationClass elif api_version == '2022-05-01':", "OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def quotas(self): \"\"\"Instance depends on the API version:", "AzureMachineLearningWorkspacesConfiguration(credential, subscription_id, **kwargs) self._client = ARMPipelineClient(base_url=base_url, config=self._config, **kwargs) super(AzureMachineLearningWorkspaces, self).__init__( api_version=api_version, profile=profile )", "Deserializer(self._models_dict(api_version))) @property def component_containers(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`ComponentContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.ComponentContainersOperations>`", "not have operation group 'online_endpoints'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def operations(self):", "operation group 'events'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def experiments(self): \"\"\"Instance depends", "ValueError(\"API version {} is not available\".format(api_version)) @property def assets(self): \"\"\"Instance depends on the", "def quotas(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`QuotasOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.QuotasOperations>` * 2022-01-01-preview:", "api_version == '2022-05-01': from .v2022_05_01.operations import QuotasOperations as OperationClass else: raise ValueError(\"API version", "API versions, to help you deal with all of the Azure clouds (Azure", ".v2021_10_01.operations import EnvironmentVersionsOperations as OperationClass elif api_version == '2021-10-01-dataplanepreview': from .v2021_10_01_dataplanepreview.operations import EnvironmentVersionsOperations", "as OperationClass else: raise ValueError(\"API version {} does not have operation group 'migration'\".format(api_version))", "latest\" ) def __init__( self, credential, # type: \"TokenCredential\" subscription_id, # type: str", "api_version = self._get_api_version('usages') if api_version == '2021-10-01': from .v2021_10_01.operations import UsagesOperations as OperationClass", "Code Generator. # Changes may cause incorrect behavior and will be lost if", "API version: * 1.5.0: :class:`DataVersionOperations<azure.mgmt.machinelearningservices.dataset_dataplane.operations.DataVersionOperations>` \"\"\" api_version = self._get_api_version('data_version') if api_version == '1.5.0':", "from .v2021_10_01.operations import ComputeOperations as OperationClass elif api_version == '2022-01-01-preview': from .v2022_01_01_preview.operations import", "version {} does not have operation group 'workspace_features'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))", "\"\"\"Instance depends on the API version: * 1.5.0: :class:`DatasetV2Operations<azure.mgmt.machinelearningservices.dataset_dataplane.operations.DatasetV2Operations>` \"\"\" api_version = self._get_api_version('dataset_v2')", "'model_versions'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def models(self): \"\"\"Instance depends on the", "\"\"\" api_version = self._get_api_version('workspace_features') if api_version == '2021-10-01': from .v2021_10_01.operations import WorkspaceFeaturesOperations as", "from .v2021_10_01_dataplanepreview.operations import ComponentContainersOperations as OperationClass elif api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import", "not have operation group 'batch_deployments'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def batch_endpoints(self):", "import DatasetContainersOperations as OperationClass else: raise ValueError(\"API version {} does not have operation", "as OperationClass else: raise ValueError(\"API version {} does not have operation group 'dataset_v2'\".format(api_version))", "= self._get_api_version('environment_containers') if api_version == '2021-10-01': from .v2021_10_01.operations import EnvironmentContainersOperations as OperationClass elif", "the API version: * 1.0.0: :class:`ExtensiveModelOperations<azure.mgmt.machinelearningservices.model_dataplane.operations.ExtensiveModelOperations>` \"\"\" api_version = self._get_api_version('extensive_model') if api_version ==", "{} does not have operation group 'extensive_model'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property", "* 2022-05-01: :class:`OnlineDeploymentsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.OnlineDeploymentsOperations>` \"\"\" api_version = self._get_api_version('online_deployments') if api_version == '2021-10-01': from .v2021_10_01.operations", "operation group 'online_deployments'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def online_endpoints(self): \"\"\"Instance depends", "Deserializer(self._models_dict(api_version))) @property def online_endpoints(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`OnlineEndpointsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.OnlineEndpointsOperations>`", "* 2021-10-01: :class:`PrivateLinkResourcesOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.PrivateLinkResourcesOperations>` * 2022-01-01-preview: :class:`PrivateLinkResourcesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.PrivateLinkResourcesOperations>` * 2022-05-01: :class:`PrivateLinkResourcesOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.PrivateLinkResourcesOperations>` \"\"\" api_version = self._get_api_version('private_link_resources')", "__init__(self, *args, **kwargs): \"\"\"This is a fake class to support current implemetation of", "api_version = self._get_api_version('workspace_features') if api_version == '2021-10-01': from .v2021_10_01.operations import WorkspaceFeaturesOperations as OperationClass", "{} does not have operation group 'registry_management_non_workspace'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property", "else: raise ValueError(\"API version {} does not have operation group 'batch_job_endpoint'\".format(api_version)) return OperationClass(self._client,", "OperationClass elif api_version == '2021-10-01-dataplanepreview': from .v2021_10_01_dataplanepreview.operations import ComponentVersionsOperations as OperationClass elif api_version", "operation group 'component_versions'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def compute(self): \"\"\"Instance depends", "type: str profile=KnownProfiles.default, # type: KnownProfiles **kwargs # type: Any ): self._config =", "self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def datasets_v1(self): \"\"\"Instance depends on the API version: *", "not have operation group 'workspace_connections'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def workspace_features(self):", "'data_call': '1.5.0', 'data_container': '1.5.0', 'data_version': '1.5.0', 'dataset_containers': '2021-10-01', 'dataset_controller_v2': '1.5.0', 'dataset_v2': '1.5.0', 'dataset_versions':", "if api_version == '2021-10-01': from .v2021_10_01.operations import WorkspaceConnectionsOperations as OperationClass elif api_version ==", "raise ValueError(\"API version {} does not have operation group 'datastores'\".format(api_version)) return OperationClass(self._client, self._config,", "* 1.5.0: :class:`DeleteOperations<azure.mgmt.machinelearningservices.dataset_dataplane.operations.DeleteOperations>` * v1.0: :class:`DeleteOperations<azure.mgmt.machinelearningservices.runhistory.operations.DeleteOperations>` \"\"\" api_version = self._get_api_version('delete') if api_version ==", "@property def workspace_features(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`WorkspaceFeaturesOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.WorkspaceFeaturesOperations>` *", "self._get_api_version('model_versions') if api_version == '2021-10-01': from .v2021_10_01.operations import ModelVersionsOperations as OperationClass elif api_version", "models return models elif api_version == '2022-01-01-preview': from .v2022_01_01_preview import models return models", "@property def quotas(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`QuotasOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.QuotasOperations>` *", "group 'events'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def experiments(self): \"\"\"Instance depends on", ".v2021_10_01.operations import PrivateEndpointConnectionsOperations as OperationClass elif api_version == '2022-01-01-preview': from .v2022_01_01_preview.operations import PrivateEndpointConnectionsOperations", "elif api_version == '2022-01-01-preview': from .v2022_01_01_preview.operations import Operations as OperationClass elif api_version ==", "the API version: * 1.0.0: :class:`AssetsOperations<azure.mgmt.machinelearningservices.model_dataplane.operations.AssetsOperations>` \"\"\" api_version = self._get_api_version('assets') if api_version ==", "api_version == '2022-05-01': from .v2022_05_01.operations import CodeVersionsOperations as OperationClass else: raise ValueError(\"API version", "{} does not have operation group 'jobs'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property", "\"\"\"Instance depends on the API version: * v1.0: :class:`EventsOperations<azure.mgmt.machinelearningservices.runhistory.operations.EventsOperations>` \"\"\" api_version = self._get_api_version('events')", "== 'v1.0': from .runhistory.operations import RunOperations as OperationClass else: raise ValueError(\"API version {}", "= self._get_api_version('migration') if api_version == '1.0.0': from .model_dataplane.operations import MigrationOperations as OperationClass else:", "China, etc.). By default, it uses the latest API version available on public", "# # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may", "elif api_version == '2022-05-01': from .v2022_05_01.operations import OnlineDeploymentsOperations as OperationClass else: raise ValueError(\"API", "def dataset_versions(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`DatasetVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.DatasetVersionsOperations>` \"\"\" api_version", "@property def events(self): \"\"\"Instance depends on the API version: * v1.0: :class:`EventsOperations<azure.mgmt.machinelearningservices.runhistory.operations.EventsOperations>` \"\"\"", "elif api_version == '2022-05-01': from .v2022_05_01.operations import EnvironmentContainersOperations as OperationClass else: raise ValueError(\"API", "Azure Government, Azure China, etc.). By default, it uses the latest API version", "api_version = self._get_api_version('data_version') if api_version == '1.5.0': from .dataset_dataplane.operations import DataVersionOperations as OperationClass", "on the API version: * 1.5.0: :class:`GetOperationStatusOperations<azure.mgmt.machinelearningservices.dataset_dataplane.operations.GetOperationStatusOperations>` \"\"\" api_version = self._get_api_version('get_operation_status') if api_version", "== '2022-01-01-preview': from .v2022_01_01_preview.operations import ComputeOperations as OperationClass elif api_version == '2022-05-01': from", ":class:`EnvironmentVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.EnvironmentVersionsOperations>` * 2022-02-01-preview: :class:`EnvironmentVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.EnvironmentVersionsOperations>` * 2022-05-01: :class:`EnvironmentVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.EnvironmentVersionsOperations>` \"\"\" api_version = self._get_api_version('environment_versions') if api_version", "api_version == '2022-05-01': from .v2022_05_01.operations import DatastoresOperations as OperationClass else: raise ValueError(\"API version", "OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def experiments(self): \"\"\"Instance depends on the API version:", "API version: * 2021-10-01: :class:`EnvironmentVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.EnvironmentVersionsOperations>` * 2021-10-01-dataplanepreview: :class:`EnvironmentVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.EnvironmentVersionsOperations>` * 2022-02-01-preview: :class:`EnvironmentVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.EnvironmentVersionsOperations>` * 2022-05-01:", "* 1.5.0: :class:`GetOperationStatusOperations<azure.mgmt.machinelearningservices.dataset_dataplane.operations.GetOperationStatusOperations>` \"\"\" api_version = self._get_api_version('get_operation_status') if api_version == '1.5.0': from .dataset_dataplane.operations", "does not have operation group 'async_operations'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def", "is not available\".format(api_version)) @property def assets(self): \"\"\"Instance depends on the API version: *", "else: raise ValueError(\"API version {} does not have operation group 'jobs'\".format(api_version)) return OperationClass(self._client,", "on the API version: * 2021-10-01: :class:`WorkspaceConnectionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.WorkspaceConnectionsOperations>` * 2022-01-01-preview: :class:`WorkspaceConnectionsOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.WorkspaceConnectionsOperations>` * 2022-05-01: :class:`WorkspaceConnectionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.WorkspaceConnectionsOperations>`", "OperationClass elif api_version == '2021-10-01-dataplanepreview': from .v2021_10_01_dataplanepreview.operations import ModelVersionsOperations as OperationClass elif api_version", "have operation group 'component_containers'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def component_versions(self): \"\"\"Instance", "from .model_dataplane import models return models elif api_version == 'v1.0': from .registry_discovery import", "from .v2021_10_01.operations import CodeContainersOperations as OperationClass elif api_version == '2021-10-01-dataplanepreview': from .v2021_10_01_dataplanepreview.operations import", "API version: * 1.5.0: :class:`DatasetsV1Operations<azure.mgmt.machinelearningservices.dataset_dataplane.operations.DatasetsV1Operations>` \"\"\" api_version = self._get_api_version('datasets_v1') if api_version == '1.5.0':", "not have operation group 'run'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def run_artifacts(self):", "Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def workspace_connections(self): \"\"\"Instance depends on the API version: * 2021-10-01:", "2022-01-01-preview: :class:`WorkspaceFeaturesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.WorkspaceFeaturesOperations>` * 2022-05-01: :class:`WorkspaceFeaturesOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.WorkspaceFeaturesOperations>` \"\"\" api_version = self._get_api_version('workspace_features') if api_version == '2021-10-01':", ":class:`CodeContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.CodeContainersOperations>` * 2022-02-01-preview: :class:`CodeContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.CodeContainersOperations>` * 2022-05-01: :class:`CodeContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.CodeContainersOperations>` \"\"\" api_version = self._get_api_version('code_containers') if api_version", "from .dataset_dataplane.operations import DataVersionOperations as OperationClass else: raise ValueError(\"API version {} does not", ":class:`WorkspaceConnectionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.WorkspaceConnectionsOperations>` \"\"\" api_version = self._get_api_version('workspace_connections') if api_version == '2021-10-01': from .v2021_10_01.operations import WorkspaceConnectionsOperations", "* 2022-02-01-preview: :class:`BatchDeploymentsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.BatchDeploymentsOperations>` * 2022-05-01: :class:`BatchDeploymentsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.BatchDeploymentsOperations>` \"\"\" api_version = self._get_api_version('batch_deployments') if api_version ==", ":class:`DatastoresOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.DatastoresOperations>` \"\"\" api_version = self._get_api_version('datastores') if api_version == '2021-10-01': from .v2021_10_01.operations import DatastoresOperations", "version: * 2021-10-01: :class:`WorkspaceFeaturesOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.WorkspaceFeaturesOperations>` * 2022-01-01-preview: :class:`WorkspaceFeaturesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.WorkspaceFeaturesOperations>` * 2022-05-01: :class:`WorkspaceFeaturesOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.WorkspaceFeaturesOperations>` \"\"\" api_version =", ":class:`BatchDeploymentsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.BatchDeploymentsOperations>` * 2022-05-01: :class:`BatchDeploymentsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.BatchDeploymentsOperations>` \"\"\" api_version = self._get_api_version('batch_deployments') if api_version == '2021-10-01': from", "elif api_version == '2022-05-01': from .v2022_05_01.operations import DataContainersOperations as OperationClass else: raise ValueError(\"API", "have operation group 'dataset_controller_v2'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def dataset_v2(self): \"\"\"Instance", "== '2021-10-01-dataplanepreview': from .v2021_10_01_dataplanepreview.operations import EnvironmentContainersOperations as OperationClass elif api_version == '2022-02-01-preview': from", "import QuotasOperations as OperationClass else: raise ValueError(\"API version {} does not have operation", "1.0.0: :mod:`model_dataplane.models<azure.mgmt.machinelearningservices.model_dataplane.models>` * v1.0: :mod:`registry_discovery.models<azure.mgmt.machinelearningservices.registry_discovery.models>` * v1.0: :mod:`runhistory.models<azure.mgmt.machinelearningservices.runhistory.models>` * 2020-09-01-dataplanepreview: :mod:`v2020_09_01_dataplanepreview.models<azure.mgmt.machinelearningservices.v2020_09_01_dataplanepreview.models>` * 2021-10-01:", "* 2022-05-01: :class:`OnlineEndpointsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.OnlineEndpointsOperations>` \"\"\" api_version = self._get_api_version('online_endpoints') if api_version == '2021-10-01': from .v2021_10_01.operations", "self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def compute(self): \"\"\"Instance depends on the API version: *", "from .v2022_02_01_preview.operations import BatchEndpointsOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import", ".v2022_02_01_preview.operations import DataContainersOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import DataContainersOperations", "not have operation group 'datastores'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def delete(self):", "api_version == 'v1.0': from .runhistory.operations import EventsOperations as OperationClass else: raise ValueError(\"API version", "2021-10-01-dataplanepreview: :class:`CodeVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.CodeVersionsOperations>` * 2022-02-01-preview: :class:`CodeVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.CodeVersionsOperations>` * 2022-05-01: :class:`CodeVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.CodeVersionsOperations>` \"\"\" api_version = self._get_api_version('code_versions') if", "the API version: * 1.0.0: :class:`MigrationOperations<azure.mgmt.machinelearningservices.model_dataplane.operations.MigrationOperations>` \"\"\" api_version = self._get_api_version('migration') if api_version ==", "'2022-05-01': from .v2022_05_01.operations import DataVersionsOperations as OperationClass else: raise ValueError(\"API version {} does", "ID of the target subscription. :type subscription_id: str :param api_version: API version to", "models elif api_version == '2022-05-01': from .v2022_05_01 import models return models raise ValueError(\"API", "api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import EnvironmentContainersOperations as OperationClass elif api_version == '2022-05-01':", "elif api_version == '2021-10-01-dataplanepreview': from .v2021_10_01_dataplanepreview import models return models elif api_version ==", ":class:`WorkspacesOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.WorkspacesOperations>` \"\"\" api_version = self._get_api_version('workspaces') if api_version == '2021-10-01': from .v2021_10_01.operations import WorkspacesOperations", "as OperationClass else: raise ValueError(\"API version {} does not have operation group 'experiments'\".format(api_version))", "depends on the API version: * 2021-10-01: :class:`Operations<azure.mgmt.machinelearningservices.v2021_10_01.operations.Operations>` * 2022-01-01-preview: :class:`Operations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.Operations>` * 2022-05-01:", "the API version: * v1.0: :class:`ExperimentsOperations<azure.mgmt.machinelearningservices.runhistory.operations.ExperimentsOperations>` \"\"\" api_version = self._get_api_version('experiments') if api_version ==", "group 'environment_versions'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def events(self): \"\"\"Instance depends on", "= self._get_api_version('run') if api_version == 'v1.0': from .runhistory.operations import RunOperations as OperationClass else:", "ValueError(\"API version {} does not have operation group 'private_link_resources'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)),", "2021-10-01: :class:`CodeContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.CodeContainersOperations>` * 2021-10-01-dataplanepreview: :class:`CodeContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.CodeContainersOperations>` * 2022-02-01-preview: :class:`CodeContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.CodeContainersOperations>` * 2022-05-01: :class:`CodeContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.CodeContainersOperations>` \"\"\" api_version", "'2021-10-01': from .v2021_10_01.operations import WorkspaceFeaturesOperations as OperationClass elif api_version == '2022-01-01-preview': from .v2022_01_01_preview.operations", "'batch_job_endpoint'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def code_containers(self): \"\"\"Instance depends on the", "to support current implemetation of MultiApiClientMixin.\" Will be removed in final version of", "1.5.0: :class:`DataVersionOperations<azure.mgmt.machinelearningservices.dataset_dataplane.operations.DataVersionOperations>` \"\"\" api_version = self._get_api_version('data_version') if api_version == '1.5.0': from .dataset_dataplane.operations import", "models elif api_version == '1.0.0': from .model_dataplane import models return models elif api_version", "api_version = self._get_api_version('datasets_v1') if api_version == '1.5.0': from .dataset_dataplane.operations import DatasetsV1Operations as OperationClass", "EnvironmentContainersOperations as OperationClass elif api_version == '2021-10-01-dataplanepreview': from .v2021_10_01_dataplanepreview.operations import EnvironmentContainersOperations as OperationClass", ":type profile: azure.profiles.KnownProfiles :keyword int polling_interval: Default waiting time between two polls for", "self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def dataset_containers(self): \"\"\"Instance depends on the API version: *", "component_containers(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`ComponentContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.ComponentContainersOperations>` * 2021-10-01-dataplanepreview: :class:`ComponentContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.ComponentContainersOperations>`", "if api_version == 'v1.0': from .registry_discovery.operations import RegistryManagementNonWorkspaceOperations as OperationClass else: raise ValueError(\"API", "import CodeContainersOperations as OperationClass else: raise ValueError(\"API version {} does not have operation", "== '2022-05-01': from .v2022_05_01.operations import ComputeOperations as OperationClass else: raise ValueError(\"API version {}", "* 1.5.0: :class:`DataCallOperations<azure.mgmt.machinelearningservices.dataset_dataplane.operations.DataCallOperations>` \"\"\" api_version = self._get_api_version('data_call') if api_version == '1.5.0': from .dataset_dataplane.operations", ":class:`BatchJobDeploymentOperations<azure.mgmt.machinelearningservices.v2020_09_01_dataplanepreview.operations.BatchJobDeploymentOperations>` \"\"\" api_version = self._get_api_version('batch_job_deployment') if api_version == '2020-09-01-dataplanepreview': from .v2020_09_01_dataplanepreview.operations import BatchJobDeploymentOperations", "(R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be", "v1.0: :class:`RunOperations<azure.mgmt.machinelearningservices.runhistory.operations.RunOperations>` \"\"\" api_version = self._get_api_version('run') if api_version == 'v1.0': from .runhistory.operations import", "API version: * 2021-10-01: :class:`WorkspaceFeaturesOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.WorkspaceFeaturesOperations>` * 2022-01-01-preview: :class:`WorkspaceFeaturesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.WorkspaceFeaturesOperations>` * 2022-05-01: :class:`WorkspaceFeaturesOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.WorkspaceFeaturesOperations>` \"\"\" api_version", "else: raise ValueError(\"API version {} does not have operation group 'assets'\".format(api_version)) return OperationClass(self._client,", ":class:`CodeVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.CodeVersionsOperations>` * 2022-05-01: :class:`CodeVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.CodeVersionsOperations>` \"\"\" api_version = self._get_api_version('code_versions') if api_version == '2021-10-01': from", "2022-02-01-preview: :class:`EnvironmentVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.EnvironmentVersionsOperations>` * 2022-05-01: :class:`EnvironmentVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.EnvironmentVersionsOperations>` \"\"\" api_version = self._get_api_version('environment_versions') if api_version == '2021-10-01':", "import BatchDeploymentsOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import BatchDeploymentsOperations as", "QuotasOperations as OperationClass else: raise ValueError(\"API version {} does not have operation group", "return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def run_artifacts(self): \"\"\"Instance depends on the API", "Serializer from ._configuration import AzureMachineLearningWorkspacesConfiguration if TYPE_CHECKING: # pylint: disable=unused-import,ungrouped-imports from typing import", "as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import BatchDeploymentsOperations as OperationClass else:", "@property def runs(self): \"\"\"Instance depends on the API version: * v1.0: :class:`RunsOperations<azure.mgmt.machinelearningservices.runhistory.operations.RunsOperations>` \"\"\"", "import ComponentContainersOperations as OperationClass elif api_version == '2021-10-01-dataplanepreview': from .v2021_10_01_dataplanepreview.operations import ComponentContainersOperations as", "group 'spans'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def temporary_data_references(self): \"\"\"Instance depends on", "from .v2022_02_01_preview.operations import CodeVersionsOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import", "self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def workspaces(self): \"\"\"Instance depends on the API version: *", ".v2022_01_01_preview.operations import Operations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import Operations", "code_versions(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`CodeVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.CodeVersionsOperations>` * 2021-10-01-dataplanepreview: :class:`CodeVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.CodeVersionsOperations>`", "* 2021-10-01-dataplanepreview: :class:`ModelVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.ModelVersionsOperations>` * 2022-02-01-preview: :class:`ModelVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.ModelVersionsOperations>` * 2022-05-01: :class:`ModelVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.ModelVersionsOperations>` \"\"\" api_version = self._get_api_version('model_versions')", "'2022-01-01-preview': from .v2022_01_01_preview.operations import PrivateEndpointConnectionsOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations", ".v2021_10_01_dataplanepreview import models return models elif api_version == '2022-01-01-preview': from .v2022_01_01_preview import models", "== '2021-10-01': from .v2021_10_01.operations import ComponentContainersOperations as OperationClass elif api_version == '2021-10-01-dataplanepreview': from", "self._get_api_version('code_containers') if api_version == '2021-10-01': from .v2021_10_01.operations import CodeContainersOperations as OperationClass elif api_version", "'2022-01-01-preview': from .v2022_01_01_preview.operations import ComputeOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations", "OperationClass else: raise ValueError(\"API version {} does not have operation group 'data_versions'\".format(api_version)) return", "self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def metric(self): \"\"\"Instance depends on the API version: *", ".v2022_02_01_preview.operations import OnlineEndpointsOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import OnlineEndpointsOperations", "'jobs'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def metric(self): \"\"\"Instance depends on the", "== '2022-05-01': from .v2022_05_01.operations import ModelVersionsOperations as OperationClass else: raise ValueError(\"API version {}", "polls for LRO operations if no Retry-After header is present. \"\"\" DEFAULT_API_VERSION =", "1.5.0: :class:`DatasetsV1Operations<azure.mgmt.machinelearningservices.dataset_dataplane.operations.DatasetsV1Operations>` \"\"\" api_version = self._get_api_version('datasets_v1') if api_version == '1.5.0': from .dataset_dataplane.operations import", "usages(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`UsagesOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.UsagesOperations>` * 2022-01-01-preview: :class:`UsagesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.UsagesOperations>`", "raise ValueError(\"API version {} does not have operation group 'online_deployments'\".format(api_version)) return OperationClass(self._client, self._config,", "private_endpoint_connections(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`PrivateEndpointConnectionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.PrivateEndpointConnectionsOperations>` * 2022-01-01-preview: :class:`PrivateEndpointConnectionsOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.PrivateEndpointConnectionsOperations>`", "2021-10-01: :class:`DatasetContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.DatasetContainersOperations>` \"\"\" api_version = self._get_api_version('dataset_containers') if api_version == '2021-10-01': from .v2021_10_01.operations import", "def get_operation_status(self): \"\"\"Instance depends on the API version: * 1.5.0: :class:`GetOperationStatusOperations<azure.mgmt.machinelearningservices.dataset_dataplane.operations.GetOperationStatusOperations>` \"\"\" api_version", "OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def model_versions(self): \"\"\"Instance depends on the API version:", "== '2021-10-01': from .v2021_10_01.operations import PrivateLinkResourcesOperations as OperationClass elif api_version == '2022-01-01-preview': from", "LRO operations if no Retry-After header is present. \"\"\" DEFAULT_API_VERSION = '2022-05-01' _PROFILE_TAG", "else: raise ValueError(\"API version {} does not have operation group 'workspace_connections'\".format(api_version)) return OperationClass(self._client,", "== '2021-10-01': from .v2021_10_01.operations import WorkspaceFeaturesOperations as OperationClass elif api_version == '2022-01-01-preview': from", "raise ValueError(\"API version {} does not have operation group 'batch_job_endpoint'\".format(api_version)) return OperationClass(self._client, self._config,", "as OperationClass elif api_version == '2021-10-01-dataplanepreview': from .v2021_10_01_dataplanepreview.operations import EnvironmentVersionsOperations as OperationClass elif", "import PrivateEndpointConnectionsOperations as OperationClass elif api_version == '2022-01-01-preview': from .v2022_01_01_preview.operations import PrivateEndpointConnectionsOperations as", "not described in the profile. :param credential: Credential needed for the client to", "import CodeContainersOperations as OperationClass elif api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import CodeContainersOperations as", "operation group 'models'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def online_deployments(self): \"\"\"Instance depends", "API version: * 2021-10-01: :class:`ModelContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.ModelContainersOperations>` * 2021-10-01-dataplanepreview: :class:`ModelContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.ModelContainersOperations>` * 2022-02-01-preview: :class:`ModelContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.ModelContainersOperations>` * 2022-05-01:", "2021-10-01-dataplanepreview: :class:`EnvironmentContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.EnvironmentContainersOperations>` * 2022-02-01-preview: :class:`EnvironmentContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.EnvironmentContainersOperations>` * 2022-05-01: :class:`EnvironmentContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.EnvironmentContainersOperations>` \"\"\" api_version = self._get_api_version('environment_containers') if", "from .v2022_01_01_preview.operations import WorkspacesOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import", "{} does not have operation group 'quotas'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property", "== '2021-10-01': from .v2021_10_01.operations import PrivateEndpointConnectionsOperations as OperationClass elif api_version == '2022-01-01-preview': from", "type: Optional[str] base_url=\"https://management.azure.com\", # type: str profile=KnownProfiles.default, # type: KnownProfiles **kwargs # type:", ":class:`BatchDeploymentsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.BatchDeploymentsOperations>` * 2022-02-01-preview: :class:`BatchDeploymentsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.BatchDeploymentsOperations>` * 2022-05-01: :class:`BatchDeploymentsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.BatchDeploymentsOperations>` \"\"\" api_version = self._get_api_version('batch_deployments') if api_version", ".v2021_10_01.operations import DatasetVersionsOperations as OperationClass else: raise ValueError(\"API version {} does not have", "ExperimentsOperations as OperationClass else: raise ValueError(\"API version {} does not have operation group", "api_version == '2022-05-01': from .v2022_05_01.operations import PrivateEndpointConnectionsOperations as OperationClass else: raise ValueError(\"API version", "as OperationClass else: raise ValueError(\"API version {} does not have operation group 'dataset_versions'\".format(api_version))", "Stack, Azure Government, Azure China, etc.). By default, it uses the latest API", "migration(self): \"\"\"Instance depends on the API version: * 1.0.0: :class:`MigrationOperations<azure.mgmt.machinelearningservices.model_dataplane.operations.MigrationOperations>` \"\"\" api_version =", "'v1.0': from .runhistory.operations import RunArtifactsOperations as OperationClass else: raise ValueError(\"API version {} does", "_models_dict(cls, api_version): return {k: v for k, v in cls.models(api_version).__dict__.items() if isinstance(v, type)}", "DataVersionsOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import DataVersionsOperations as OperationClass", "the API version: * 1.5.0: :class:`DeleteOperations<azure.mgmt.machinelearningservices.dataset_dataplane.operations.DeleteOperations>` * v1.0: :class:`DeleteOperations<azure.mgmt.machinelearningservices.runhistory.operations.DeleteOperations>` \"\"\" api_version = self._get_api_version('delete')", "@property def data_call(self): \"\"\"Instance depends on the API version: * 1.5.0: :class:`DataCallOperations<azure.mgmt.machinelearningservices.dataset_dataplane.operations.DataCallOperations>` \"\"\"", "from .v2021_10_01.operations import PrivateLinkResourcesOperations as OperationClass elif api_version == '2022-01-01-preview': from .v2022_01_01_preview.operations import", "api_version == '2021-10-01': from .v2021_10_01.operations import UsagesOperations as OperationClass elif api_version == '2022-01-01-preview':", "api_version == '2021-10-01': from .v2021_10_01.operations import WorkspaceConnectionsOperations as OperationClass elif api_version == '2022-01-01-preview':", ":class:`ComponentContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.ComponentContainersOperations>` * 2021-10-01-dataplanepreview: :class:`ComponentContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.ComponentContainersOperations>` * 2022-02-01-preview: :class:`ComponentContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.ComponentContainersOperations>` * 2022-05-01: :class:`ComponentContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.ComponentContainersOperations>` \"\"\" api_version =", "import models return models elif api_version == '2020-09-01-dataplanepreview': from .v2020_09_01_dataplanepreview import models return", ":class:`QuotasOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.QuotasOperations>` * 2022-01-01-preview: :class:`QuotasOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.QuotasOperations>` * 2022-05-01: :class:`QuotasOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.QuotasOperations>` \"\"\" api_version = self._get_api_version('quotas') if api_version", "api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import DataVersionsOperations as OperationClass elif api_version == '2022-05-01':", "the API version: * 1.5.0: :class:`DataVersionOperations<azure.mgmt.machinelearningservices.dataset_dataplane.operations.DataVersionOperations>` \"\"\" api_version = self._get_api_version('data_version') if api_version ==", "version {} does not have operation group 'dataset_controller_v2'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))", "elif api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import ComponentVersionsOperations as OperationClass elif api_version ==", "api_version == '2022-05-01': from .v2022_05_01.operations import DataContainersOperations as OperationClass else: raise ValueError(\"API version", "api_version == '2021-10-01': from .v2021_10_01.operations import OnlineDeploymentsOperations as OperationClass elif api_version == '2022-02-01-preview':", "\"\"\" api_version = self._get_api_version('code_containers') if api_version == '2021-10-01': from .v2021_10_01.operations import CodeContainersOperations as", "return {k: v for k, v in cls.models(api_version).__dict__.items() if isinstance(v, type)} @classmethod def", "version: * 2021-10-01: :class:`DatasetVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.DatasetVersionsOperations>` \"\"\" api_version = self._get_api_version('dataset_versions') if api_version == '2021-10-01': from", "*args, **kwargs): \"\"\"This is a fake class to support current implemetation of MultiApiClientMixin.\"", "import WorkspaceFeaturesOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import WorkspaceFeaturesOperations as", "OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def code_containers(self): \"\"\"Instance depends on the API version:", "self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def private_link_resources(self): \"\"\"Instance depends on the API version: *", "The api-version parameter sets the default API version if the operation group is", "'registry_management_non_workspace': 'v1.0', 'run': 'v1.0', 'run_artifacts': 'v1.0', 'runs': 'v1.0', 'spans': 'v1.0', 'temporary_data_references': '2021-10-01-dataplanepreview', }},", "== '2021-10-01': from .v2021_10_01.operations import DatasetContainersOperations as OperationClass else: raise ValueError(\"API version {}", "'2021-10-01-dataplanepreview': from .v2021_10_01_dataplanepreview.operations import ModelVersionsOperations as OperationClass elif api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations", "v1.0: :class:`ExperimentsOperations<azure.mgmt.machinelearningservices.runhistory.operations.ExperimentsOperations>` \"\"\" api_version = self._get_api_version('experiments') if api_version == 'v1.0': from .runhistory.operations import", "Deserializer(self._models_dict(api_version))) @property def dataset_controller_v2(self): \"\"\"Instance depends on the API version: * 1.5.0: :class:`DatasetControllerV2Operations<azure.mgmt.machinelearningservices.dataset_dataplane.operations.DatasetControllerV2Operations>`", "time between two polls for LRO operations if no Retry-After header is present.", "self._get_api_version('dataset_controller_v2') if api_version == '1.5.0': from .dataset_dataplane.operations import DatasetControllerV2Operations as OperationClass else: raise", "self._get_api_version('jobs') if api_version == '2021-10-01': from .v2021_10_01.operations import JobsOperations as OperationClass elif api_version", "registry_management_non_workspace(self): \"\"\"Instance depends on the API version: * v1.0: :class:`RegistryManagementNonWorkspaceOperations<azure.mgmt.machinelearningservices.registry_discovery.operations.RegistryManagementNonWorkspaceOperations>` \"\"\" api_version =", "group 'operations'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def private_endpoint_connections(self): \"\"\"Instance depends on", "import ARMPipelineClient from azure.profiles import KnownProfiles, ProfileDefinition from azure.profiles.multiapiclient import MultiApiClientMixin from msrest", "if api_version == '1.5.0': from .dataset_dataplane.operations import GetOperationStatusOperations as OperationClass else: raise ValueError(\"API", "operation group 'jobs'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def metric(self): \"\"\"Instance depends", "self._get_api_version('assets') if api_version == '1.0.0': from .model_dataplane.operations import AssetsOperations as OperationClass else: raise", "Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt", "api_version == '2021-10-01': from .v2021_10_01.operations import Operations as OperationClass elif api_version == '2022-01-01-preview':", "api_version = self._get_api_version('delete') if api_version == '1.5.0': from .dataset_dataplane.operations import DeleteOperations as OperationClass", "JobsOperations as OperationClass else: raise ValueError(\"API version {} does not have operation group", "else: raise ValueError(\"API version {} does not have operation group 'experiments'\".format(api_version)) return OperationClass(self._client,", "\"\"\" pass class AzureMachineLearningWorkspaces(MultiApiClientMixin, _SDKClient): \"\"\"These APIs allow end users to operate on", "api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import BatchEndpointsOperations as OperationClass elif api_version == '2022-05-01':", "not have operation group 'batch_endpoints'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def batch_job_deployment(self):", "elif api_version == '2022-05-01': from .v2022_05_01.operations import WorkspaceFeaturesOperations as OperationClass else: raise ValueError(\"API", "== '2022-05-01': from .v2022_05_01.operations import OnlineDeploymentsOperations as OperationClass else: raise ValueError(\"API version {}", "as OperationClass elif api_version == '2022-01-01-preview': from .v2022_01_01_preview.operations import ComputeOperations as OperationClass elif", "elif api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import EnvironmentVersionsOperations as OperationClass elif api_version ==", "Deserializer(self._models_dict(api_version))) @property def delete(self): \"\"\"Instance depends on the API version: * 1.5.0: :class:`DeleteOperations<azure.mgmt.machinelearningservices.dataset_dataplane.operations.DeleteOperations>`", "models return models elif api_version == '2020-09-01-dataplanepreview': from .v2020_09_01_dataplanepreview import models return models", "* 2022-05-01: :class:`ComponentVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.ComponentVersionsOperations>` \"\"\" api_version = self._get_api_version('component_versions') if api_version == '2021-10-01': from .v2021_10_01.operations", "'environment_versions'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def events(self): \"\"\"Instance depends on the", "version: * v1.0: :class:`MetricOperations<azure.mgmt.machinelearningservices.runhistory.operations.MetricOperations>` \"\"\" api_version = self._get_api_version('metric') if api_version == 'v1.0': from", "1.5.0: :class:`DatasetControllerV2Operations<azure.mgmt.machinelearningservices.dataset_dataplane.operations.DatasetControllerV2Operations>` \"\"\" api_version = self._get_api_version('dataset_controller_v2') if api_version == '1.5.0': from .dataset_dataplane.operations import", "import OnlineEndpointsOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import OnlineEndpointsOperations as", "== 'v1.0': from .runhistory.operations import RunsOperations as OperationClass else: raise ValueError(\"API version {}", "UsagesOperations as OperationClass else: raise ValueError(\"API version {} does not have operation group", "does not have operation group 'dataset_versions'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def", "'2021-10-01': from .v2021_10_01.operations import ComputeOperations as OperationClass elif api_version == '2022-01-01-preview': from .v2022_01_01_preview.operations", "api_version = self._get_api_version('code_versions') if api_version == '2021-10-01': from .v2021_10_01.operations import CodeVersionsOperations as OperationClass", "if isinstance(v, type)} @classmethod def models(cls, api_version=DEFAULT_API_VERSION): \"\"\"Module depends on the API version:", "import RunArtifactsOperations as OperationClass else: raise ValueError(\"API version {} does not have operation", "== '2022-02-01-preview': from .v2022_02_01_preview.operations import DataContainersOperations as OperationClass elif api_version == '2022-05-01': from", "the API version: * 2021-10-01: :class:`JobsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.JobsOperations>` * 2022-02-01-preview: :class:`JobsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.JobsOperations>` * 2022-05-01: :class:`JobsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.JobsOperations>` \"\"\"", "'2022-02-01-preview': from .v2022_02_01_preview.operations import JobsOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations", "API version: * 2021-10-01: :class:`BatchDeploymentsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.BatchDeploymentsOperations>` * 2022-02-01-preview: :class:`BatchDeploymentsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.BatchDeploymentsOperations>` * 2022-05-01: :class:`BatchDeploymentsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.BatchDeploymentsOperations>` \"\"\" api_version", "api_version = self._get_api_version('assets') if api_version == '1.0.0': from .model_dataplane.operations import AssetsOperations as OperationClass", "raise ValueError(\"API version {} does not have operation group 'environment_versions'\".format(api_version)) return OperationClass(self._client, self._config,", "CodeContainersOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import CodeContainersOperations as OperationClass", "pylint: disable=unused-import,ungrouped-imports from typing import Any, Optional from azure.core.credentials import TokenCredential class _SDKClient(object):", "'workspace_features'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def workspaces(self): \"\"\"Instance depends on the", "\"\"\"Instance depends on the API version: * 2021-10-01: :class:`PrivateEndpointConnectionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.PrivateEndpointConnectionsOperations>` * 2022-01-01-preview: :class:`PrivateEndpointConnectionsOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.PrivateEndpointConnectionsOperations>` *", ":class:`QuotasOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.QuotasOperations>` \"\"\" api_version = self._get_api_version('quotas') if api_version == '2021-10-01': from .v2021_10_01.operations import QuotasOperations", ".v2021_10_01_dataplanepreview.operations import CodeVersionsOperations as OperationClass elif api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import CodeVersionsOperations", "api_version == '2020-09-01-dataplanepreview': from .v2020_09_01_dataplanepreview.operations import BatchJobEndpointOperations as OperationClass else: raise ValueError(\"API version", "return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def run(self): \"\"\"Instance depends on the API", "from .model_dataplane.operations import AssetsOperations as OperationClass else: raise ValueError(\"API version {} does not", "version {} does not have operation group 'datasets_v1'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))", ".v2021_10_01.operations import WorkspaceConnectionsOperations as OperationClass elif api_version == '2022-01-01-preview': from .v2022_01_01_preview.operations import WorkspaceConnectionsOperations", "users to operate on Azure Machine Learning Workspace resources. This ready contains multiple", "self._get_api_version('online_endpoints') if api_version == '2021-10-01': from .v2021_10_01.operations import OnlineEndpointsOperations as OperationClass elif api_version", "= self._get_api_version('data_version') if api_version == '1.5.0': from .dataset_dataplane.operations import DataVersionOperations as OperationClass else:", "'2021-10-01-dataplanepreview': from .v2021_10_01_dataplanepreview.operations import TemporaryDataReferencesOperations as OperationClass else: raise ValueError(\"API version {} does", "as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import PrivateEndpointConnectionsOperations as OperationClass else:", "ValueError(\"API version {} does not have operation group 'dataset_containers'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)),", "Government, Azure China, etc.). By default, it uses the latest API version available", "Azure. :type credential: ~azure.core.credentials.TokenCredential :param subscription_id: The ID of the target subscription. :type", "version {} does not have operation group 'extensive_model'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))", ":class:`ExperimentsOperations<azure.mgmt.machinelearningservices.runhistory.operations.ExperimentsOperations>` \"\"\" api_version = self._get_api_version('experiments') if api_version == 'v1.0': from .runhistory.operations import ExperimentsOperations", "api_version == '2021-10-01': from .v2021_10_01.operations import CodeContainersOperations as OperationClass elif api_version == '2021-10-01-dataplanepreview':", "'2022-01-01-preview': from .v2022_01_01_preview.operations import WorkspaceConnectionsOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations", "raise ValueError(\"API version {} does not have operation group 'data_versions'\".format(api_version)) return OperationClass(self._client, self._config,", "operation group 'usages'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def virtual_machine_sizes(self): \"\"\"Instance depends", "\"\"\" api_version = self._get_api_version('async_operations') if api_version == 'v1.0': from .registry_discovery.operations import AsyncOperationsOperations as", "OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import DataVersionsOperations as OperationClass else: raise", ":param base_url: Service URL :type base_url: str :param profile: A profile definition, from", "@property def virtual_machine_sizes(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`VirtualMachineSizesOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.VirtualMachineSizesOperations>` *", "DatasetV2Operations as OperationClass else: raise ValueError(\"API version {} does not have operation group", "as OperationClass elif api_version == '2021-10-01-dataplanepreview': from .v2021_10_01_dataplanepreview.operations import ComponentVersionsOperations as OperationClass elif", "Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def batch_deployments(self): \"\"\"Instance depends on the API version: * 2021-10-01:", "it uses the latest API version available on public Azure. For production, you", ":class:`BatchEndpointsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.BatchEndpointsOperations>` * 2022-05-01: :class:`BatchEndpointsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.BatchEndpointsOperations>` \"\"\" api_version = self._get_api_version('batch_endpoints') if api_version == '2021-10-01': from", "from .v2022_02_01_preview.operations import JobsOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import", "'2021-10-01': from .v2021_10_01.operations import VirtualMachineSizesOperations as OperationClass elif api_version == '2022-01-01-preview': from .v2022_01_01_preview.operations", "API version: * 2021-10-01: :class:`CodeVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.CodeVersionsOperations>` * 2021-10-01-dataplanepreview: :class:`CodeVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.CodeVersionsOperations>` * 2022-02-01-preview: :class:`CodeVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.CodeVersionsOperations>` * 2022-05-01:", "* 2022-05-01: :class:`WorkspacesOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.WorkspacesOperations>` \"\"\" api_version = self._get_api_version('workspaces') if api_version == '2021-10-01': from .v2021_10_01.operations", ".v2021_10_01.operations import UsagesOperations as OperationClass elif api_version == '2022-01-01-preview': from .v2022_01_01_preview.operations import UsagesOperations", "Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def jobs(self): \"\"\"Instance depends on the API version: * 2021-10-01:", "return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def component_versions(self): \"\"\"Instance depends on the API", "2021-10-01: :class:`ModelVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.ModelVersionsOperations>` * 2021-10-01-dataplanepreview: :class:`ModelVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.ModelVersionsOperations>` * 2022-02-01-preview: :class:`ModelVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.ModelVersionsOperations>` * 2022-05-01: :class:`ModelVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.ModelVersionsOperations>` \"\"\" api_version", ":class:`ComponentVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.ComponentVersionsOperations>` * 2021-10-01-dataplanepreview: :class:`ComponentVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.ComponentVersionsOperations>` * 2022-02-01-preview: :class:`ComponentVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.ComponentVersionsOperations>` * 2022-05-01: :class:`ComponentVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.ComponentVersionsOperations>` \"\"\" api_version =", "# Licensed under the MIT License. See License.txt in the project root for", ":class:`CodeContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.CodeContainersOperations>` * 2021-10-01-dataplanepreview: :class:`CodeContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.CodeContainersOperations>` * 2022-02-01-preview: :class:`CodeContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.CodeContainersOperations>` * 2022-05-01: :class:`CodeContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.CodeContainersOperations>` \"\"\" api_version =", ":class:`DataCallOperations<azure.mgmt.machinelearningservices.dataset_dataplane.operations.DataCallOperations>` \"\"\" api_version = self._get_api_version('data_call') if api_version == '1.5.0': from .dataset_dataplane.operations import DataCallOperations", "workspace_connections(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`WorkspaceConnectionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.WorkspaceConnectionsOperations>` * 2022-01-01-preview: :class:`WorkspaceConnectionsOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.WorkspaceConnectionsOperations>`", ":class:`ModelContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.ModelContainersOperations>` * 2022-02-01-preview: :class:`ModelContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.ModelContainersOperations>` * 2022-05-01: :class:`ModelContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.ModelContainersOperations>` \"\"\" api_version = self._get_api_version('model_containers') if api_version", "return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def data_versions(self): \"\"\"Instance depends on the API", "Deserializer(self._models_dict(api_version))) @property def workspace_features(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`WorkspaceFeaturesOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.WorkspaceFeaturesOperations>`", "return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def code_containers(self): \"\"\"Instance depends on the API", "from .v2022_05_01.operations import DataContainersOperations as OperationClass else: raise ValueError(\"API version {} does not", "not have operation group 'model_containers'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def model_versions(self):", "api_version = self._get_api_version('get_operation_status') if api_version == '1.5.0': from .dataset_dataplane.operations import GetOperationStatusOperations as OperationClass", "self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def private_endpoint_connections(self): \"\"\"Instance depends on the API version: *", "api_version == '2022-01-01-preview': from .v2022_01_01_preview.operations import WorkspaceFeaturesOperations as OperationClass elif api_version == '2022-05-01':", "operation group 'datasets_v1'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def datastores(self): \"\"\"Instance depends", "elif api_version == '2022-05-01': from .v2022_05_01.operations import PrivateEndpointConnectionsOperations as OperationClass else: raise ValueError(\"API", "OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def code_versions(self): \"\"\"Instance depends on the API version:", "version: * 2022-02-01-preview: :class:`DataContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.DataContainersOperations>` * 2022-05-01: :class:`DataContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.DataContainersOperations>` \"\"\" api_version = self._get_api_version('data_containers') if api_version", "does not have operation group 'operations'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def", "api_version == '2022-05-01': from .v2022_05_01.operations import OnlineDeploymentsOperations as OperationClass else: raise ValueError(\"API version", "* 1.5.0: :class:`DatasetsV1Operations<azure.mgmt.machinelearningservices.dataset_dataplane.operations.DatasetsV1Operations>` \"\"\" api_version = self._get_api_version('datasets_v1') if api_version == '1.5.0': from .dataset_dataplane.operations", "the code is # regenerated. # -------------------------------------------------------------------------- from typing import TYPE_CHECKING from azure.mgmt.core", "ValueError(\"API version {} does not have operation group 'model_containers'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)),", "with all of the Azure clouds (Azure Stack, Azure Government, Azure China, etc.).", "api_version = self._get_api_version('dataset_versions') if api_version == '2021-10-01': from .v2021_10_01.operations import DatasetVersionsOperations as OperationClass", "as OperationClass elif api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import OnlineDeploymentsOperations as OperationClass elif", "'2022-05-01': from .v2022_05_01.operations import BatchEndpointsOperations as OperationClass else: raise ValueError(\"API version {} does", "import models return models elif api_version == '2021-10-01-dataplanepreview': from .v2021_10_01_dataplanepreview import models return", "operation group 'environment_containers'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def environment_versions(self): \"\"\"Instance depends", "Deserializer(self._models_dict(api_version))) @property def batch_job_endpoint(self): \"\"\"Instance depends on the API version: * 2020-09-01-dataplanepreview: :class:`BatchJobEndpointOperations<azure.mgmt.machinelearningservices.v2020_09_01_dataplanepreview.operations.BatchJobEndpointOperations>`", "\"\"\"Instance depends on the API version: * 1.5.0: :class:`DatasetsV1Operations<azure.mgmt.machinelearningservices.dataset_dataplane.operations.DatasetsV1Operations>` \"\"\" api_version = self._get_api_version('datasets_v1')", "import RunsOperations as OperationClass else: raise ValueError(\"API version {} does not have operation", "operation group 'runs'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def spans(self): \"\"\"Instance depends", "'2021-10-01': from .v2021_10_01.operations import DatastoresOperations as OperationClass elif api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations", "{} does not have operation group 'environment_containers'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property", "api_version == 'v1.0': from .runhistory.operations import RunArtifactsOperations as OperationClass else: raise ValueError(\"API version", "available\".format(api_version)) @property def assets(self): \"\"\"Instance depends on the API version: * 1.0.0: :class:`AssetsOperations<azure.mgmt.machinelearningservices.model_dataplane.operations.AssetsOperations>`", "on the API version: * 1.5.0: :class:`DataContainerOperations<azure.mgmt.machinelearningservices.dataset_dataplane.operations.DataContainerOperations>` \"\"\" api_version = self._get_api_version('data_container') if api_version", "OperationClass else: raise ValueError(\"API version {} does not have operation group 'component_containers'\".format(api_version)) return", "\"\"\"Instance depends on the API version: * 2021-10-01: :class:`BatchEndpointsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.BatchEndpointsOperations>` * 2022-02-01-preview: :class:`BatchEndpointsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.BatchEndpointsOperations>` *", "as OperationClass else: raise ValueError(\"API version {} does not have operation group 'assets'\".format(api_version))", "elif api_version == '2022-01-01-preview': from .v2022_01_01_preview.operations import VirtualMachineSizesOperations as OperationClass elif api_version ==", "2021-10-01: :class:`WorkspacesOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.WorkspacesOperations>` * 2022-01-01-preview: :class:`WorkspacesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.WorkspacesOperations>` * 2022-05-01: :class:`WorkspacesOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.WorkspacesOperations>` \"\"\" api_version = self._get_api_version('workspaces') if", "== '2022-05-01': from .v2022_05_01.operations import PrivateEndpointConnectionsOperations as OperationClass else: raise ValueError(\"API version {}", "'batch_job_endpoint': '2020-09-01-dataplanepreview', 'data_call': '1.5.0', 'data_container': '1.5.0', 'data_version': '1.5.0', 'dataset_containers': '2021-10-01', 'dataset_controller_v2': '1.5.0', 'dataset_v2':", "self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def online_endpoints(self): \"\"\"Instance depends on the API version: *", "def __init__(self, *args, **kwargs): \"\"\"This is a fake class to support current implemetation", "Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def dataset_versions(self): \"\"\"Instance depends on the API version: * 2021-10-01:", "api_version = self._get_api_version('spans') if api_version == 'v1.0': from .runhistory.operations import SpansOperations as OperationClass", "the API version: * 2021-10-01: :class:`WorkspacesOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.WorkspacesOperations>` * 2022-01-01-preview: :class:`WorkspacesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.WorkspacesOperations>` * 2022-05-01: :class:`WorkspacesOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.WorkspacesOperations>` \"\"\"", "by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and", "from .v2022_05_01.operations import ComponentContainersOperations as OperationClass else: raise ValueError(\"API version {} does not", "api_version == '1.5.0': from .dataset_dataplane.operations import DatasetV2Operations as OperationClass else: raise ValueError(\"API version", "if api_version == '2021-10-01': from .v2021_10_01.operations import ComponentContainersOperations as OperationClass elif api_version ==", "ComputeOperations as OperationClass elif api_version == '2022-01-01-preview': from .v2022_01_01_preview.operations import ComputeOperations as OperationClass", "have operation group 'online_endpoints'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def operations(self): \"\"\"Instance", ":class:`OnlineDeploymentsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.OnlineDeploymentsOperations>` * 2022-05-01: :class:`OnlineDeploymentsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.OnlineDeploymentsOperations>` \"\"\" api_version = self._get_api_version('online_deployments') if api_version == '2021-10-01': from", "return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def batch_job_endpoint(self): \"\"\"Instance depends on the API", "from .v2021_10_01_dataplanepreview.operations import CodeContainersOperations as OperationClass elif api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import", "from .v2022_05_01.operations import Operations as OperationClass else: raise ValueError(\"API version {} does not", "2022-01-01-preview: :class:`Operations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.Operations>` * 2022-05-01: :class:`Operations<azure.mgmt.machinelearningservices.v2022_05_01.operations.Operations>` \"\"\" api_version = self._get_api_version('operations') if api_version == '2021-10-01':", "OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def spans(self): \"\"\"Instance depends on the API version:", "2021-10-01: :class:`BatchEndpointsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.BatchEndpointsOperations>` * 2022-02-01-preview: :class:`BatchEndpointsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.BatchEndpointsOperations>` * 2022-05-01: :class:`BatchEndpointsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.BatchEndpointsOperations>` \"\"\" api_version = self._get_api_version('batch_endpoints') if", "API version: * 2020-09-01-dataplanepreview: :class:`BatchJobEndpointOperations<azure.mgmt.machinelearningservices.v2020_09_01_dataplanepreview.operations.BatchJobEndpointOperations>` \"\"\" api_version = self._get_api_version('batch_job_endpoint') if api_version == '2020-09-01-dataplanepreview':", "@property def workspace_connections(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`WorkspaceConnectionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.WorkspaceConnectionsOperations>` *", "\"\"\" api_version = self._get_api_version('data_version') if api_version == '1.5.0': from .dataset_dataplane.operations import DataVersionOperations as", "operation group 'data_version'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def data_versions(self): \"\"\"Instance depends", ":class:`DatasetsV1Operations<azure.mgmt.machinelearningservices.dataset_dataplane.operations.DatasetsV1Operations>` \"\"\" api_version = self._get_api_version('datasets_v1') if api_version == '1.5.0': from .dataset_dataplane.operations import DatasetsV1Operations", "as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import QuotasOperations as OperationClass else:", ".dataset_dataplane.operations import DatasetsV1Operations as OperationClass else: raise ValueError(\"API version {} does not have", "an operation group and its API version. The api-version parameter sets the default", "batch_deployments(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`BatchDeploymentsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.BatchDeploymentsOperations>` * 2022-02-01-preview: :class:`BatchDeploymentsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.BatchDeploymentsOperations>`", "== '2020-09-01-dataplanepreview': from .v2020_09_01_dataplanepreview.operations import BatchJobEndpointOperations as OperationClass else: raise ValueError(\"API version {}", "as OperationClass elif api_version == 'v1.0': from .runhistory.operations import DeleteOperations as OperationClass else:", ".model_dataplane import models return models elif api_version == 'v1.0': from .registry_discovery import models", "version: * 1.0.0: :class:`ExtensiveModelOperations<azure.mgmt.machinelearningservices.model_dataplane.operations.ExtensiveModelOperations>` \"\"\" api_version = self._get_api_version('extensive_model') if api_version == '1.0.0': from", "does not have operation group 'data_container'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def", "* 2022-05-01: :class:`QuotasOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.QuotasOperations>` \"\"\" api_version = self._get_api_version('quotas') if api_version == '2021-10-01': from .v2021_10_01.operations", "\"\"\" api_version = self._get_api_version('models') if api_version == '1.0.0': from .model_dataplane.operations import ModelsOperations as", "'1.5.0', 'dataset_v2': '1.5.0', 'dataset_versions': '2021-10-01', 'datasets_v1': '1.5.0', 'delete': 'v1.0', 'events': 'v1.0', 'experiments': 'v1.0',", "return models elif api_version == '2022-05-01': from .v2022_05_01 import models return models raise", "version: * 2020-09-01-dataplanepreview: :class:`BatchJobEndpointOperations<azure.mgmt.machinelearningservices.v2020_09_01_dataplanepreview.operations.BatchJobEndpointOperations>` \"\"\" api_version = self._get_api_version('batch_job_endpoint') if api_version == '2020-09-01-dataplanepreview': from", "== '2022-02-01-preview': from .v2022_02_01_preview.operations import EnvironmentContainersOperations as OperationClass elif api_version == '2022-05-01': from", "OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import ModelContainersOperations as OperationClass else: raise", "not have operation group 'workspaces'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) def close(self): self._client.close()", "is # regenerated. # -------------------------------------------------------------------------- from typing import TYPE_CHECKING from azure.mgmt.core import ARMPipelineClient", "raise ValueError(\"API version {} does not have operation group 'assets'\".format(api_version)) return OperationClass(self._client, self._config,", "from .v2021_10_01_dataplanepreview.operations import ModelContainersOperations as OperationClass elif api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import", "\"\"\" api_version = self._get_api_version('delete') if api_version == '1.5.0': from .dataset_dataplane.operations import DeleteOperations as", "the API version: * v1.0: :class:`RunsOperations<azure.mgmt.machinelearningservices.runhistory.operations.RunsOperations>` \"\"\" api_version = self._get_api_version('runs') if api_version ==", "the API version: * 2021-10-01: :class:`UsagesOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.UsagesOperations>` * 2022-01-01-preview: :class:`UsagesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.UsagesOperations>` * 2022-05-01: :class:`UsagesOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.UsagesOperations>` \"\"\"", "from .runhistory.operations import EventsOperations as OperationClass else: raise ValueError(\"API version {} does not", "\"\"\"Instance depends on the API version: * 1.5.0: :class:`DataContainerOperations<azure.mgmt.machinelearningservices.dataset_dataplane.operations.DataContainerOperations>` \"\"\" api_version = self._get_api_version('data_container')", "on the API version: * v1.0: :class:`RegistryManagementNonWorkspaceOperations<azure.mgmt.machinelearningservices.registry_discovery.operations.RegistryManagementNonWorkspaceOperations>` \"\"\" api_version = self._get_api_version('registry_management_non_workspace') if api_version", "2022-05-01: :class:`DataVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.DataVersionsOperations>` \"\"\" api_version = self._get_api_version('data_versions') if api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import", ".v2021_10_01.operations import DatastoresOperations as OperationClass elif api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import DatastoresOperations", "def runs(self): \"\"\"Instance depends on the API version: * v1.0: :class:`RunsOperations<azure.mgmt.machinelearningservices.runhistory.operations.RunsOperations>` \"\"\" api_version", "elif api_version == '2021-10-01-dataplanepreview': from .v2021_10_01_dataplanepreview.operations import ModelVersionsOperations as OperationClass elif api_version ==", ".v2021_10_01.operations import ComponentVersionsOperations as OperationClass elif api_version == '2021-10-01-dataplanepreview': from .v2021_10_01_dataplanepreview.operations import ComponentVersionsOperations", "depends on the API version: * 2020-09-01-dataplanepreview: :class:`BatchJobDeploymentOperations<azure.mgmt.machinelearningservices.v2020_09_01_dataplanepreview.operations.BatchJobDeploymentOperations>` \"\"\" api_version = self._get_api_version('batch_job_deployment') if", "OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def run_artifacts(self): \"\"\"Instance depends on the API version:", "\"\"\"Instance depends on the API version: * 2021-10-01: :class:`WorkspaceConnectionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.WorkspaceConnectionsOperations>` * 2022-01-01-preview: :class:`WorkspaceConnectionsOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.WorkspaceConnectionsOperations>` *", "from .v2020_09_01_dataplanepreview import models return models elif api_version == '2021-10-01': from .v2021_10_01 import", "if api_version == '1.5.0': from .dataset_dataplane.operations import DatasetControllerV2Operations as OperationClass else: raise ValueError(\"API", "the API version: * 2021-10-01: :class:`BatchEndpointsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.BatchEndpointsOperations>` * 2022-02-01-preview: :class:`BatchEndpointsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.BatchEndpointsOperations>` * 2022-05-01: :class:`BatchEndpointsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.BatchEndpointsOperations>` \"\"\"", "\"\"\" api_version = self._get_api_version('quotas') if api_version == '2021-10-01': from .v2021_10_01.operations import QuotasOperations as", "and its API version. The api-version parameter sets the default API version if", "'2022-02-01-preview': from .v2022_02_01_preview.operations import BatchEndpointsOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations", "\"\"\" if api_version == '1.5.0': from .dataset_dataplane import models return models elif api_version", "'metric'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def migration(self): \"\"\"Instance depends on the", "* 2022-05-01: :class:`BatchEndpointsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.BatchEndpointsOperations>` \"\"\" api_version = self._get_api_version('batch_endpoints') if api_version == '2021-10-01': from .v2021_10_01.operations", "\"\"\" api_version = self._get_api_version('datastores') if api_version == '2021-10-01': from .v2021_10_01.operations import DatastoresOperations as", "depends on the API version: * 2021-10-01: :class:`JobsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.JobsOperations>` * 2022-02-01-preview: :class:`JobsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.JobsOperations>` * 2022-05-01:", "if api_version == '1.5.0': from .dataset_dataplane.operations import DataCallOperations as OperationClass else: raise ValueError(\"API", "depends on the API version: * 2021-10-01: :class:`CodeVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.CodeVersionsOperations>` * 2021-10-01-dataplanepreview: :class:`CodeVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.CodeVersionsOperations>` * 2022-02-01-preview:", "@property def assets(self): \"\"\"Instance depends on the API version: * 1.0.0: :class:`AssetsOperations<azure.mgmt.machinelearningservices.model_dataplane.operations.AssetsOperations>` \"\"\"", "version: * 2021-10-01: :class:`JobsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.JobsOperations>` * 2022-02-01-preview: :class:`JobsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.JobsOperations>` * 2022-05-01: :class:`JobsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.JobsOperations>` \"\"\" api_version =", "raise ValueError(\"API version {} does not have operation group 'spans'\".format(api_version)) return OperationClass(self._client, self._config,", "does not have operation group 'data_call'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def", "depends on the API version: * 2022-02-01-preview: :class:`DataVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.DataVersionsOperations>` * 2022-05-01: :class:`DataVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.DataVersionsOperations>` \"\"\" api_version", "not have operation group 'runs'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def spans(self):", "* 2022-02-01-preview: :class:`DataVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.DataVersionsOperations>` * 2022-05-01: :class:`DataVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.DataVersionsOperations>` \"\"\" api_version = self._get_api_version('data_versions') if api_version ==", "== '2022-01-01-preview': from .v2022_01_01_preview.operations import WorkspaceFeaturesOperations as OperationClass elif api_version == '2022-05-01': from", "operation group 'component_containers'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def component_versions(self): \"\"\"Instance depends", "version: * 1.0.0: :class:`AssetsOperations<azure.mgmt.machinelearningservices.model_dataplane.operations.AssetsOperations>` \"\"\" api_version = self._get_api_version('assets') if api_version == '1.0.0': from", "* 2021-10-01: :class:`OnlineDeploymentsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.OnlineDeploymentsOperations>` * 2022-02-01-preview: :class:`OnlineDeploymentsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.OnlineDeploymentsOperations>` * 2022-05-01: :class:`OnlineDeploymentsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.OnlineDeploymentsOperations>` \"\"\" api_version = self._get_api_version('online_deployments')", "version {} does not have operation group 'private_endpoint_connections'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))", "return models elif api_version == '2021-10-01': from .v2021_10_01 import models return models elif", "depends on the API version: * 2021-10-01: :class:`UsagesOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.UsagesOperations>` * 2022-01-01-preview: :class:`UsagesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.UsagesOperations>` * 2022-05-01:", "(c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See", "* 2021-10-01: :class:`BatchDeploymentsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.BatchDeploymentsOperations>` * 2022-02-01-preview: :class:`BatchDeploymentsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.BatchDeploymentsOperations>` * 2022-05-01: :class:`BatchDeploymentsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.BatchDeploymentsOperations>` \"\"\" api_version = self._get_api_version('batch_deployments')", "raise ValueError(\"API version {} does not have operation group 'run'\".format(api_version)) return OperationClass(self._client, self._config,", "import CodeVersionsOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import CodeVersionsOperations as", "return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def workspace_connections(self): \"\"\"Instance depends on the API", "on the API version: * 2021-10-01: :class:`JobsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.JobsOperations>` * 2022-02-01-preview: :class:`JobsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.JobsOperations>` * 2022-05-01: :class:`JobsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.JobsOperations>`", "def environment_versions(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`EnvironmentVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.EnvironmentVersionsOperations>` * 2021-10-01-dataplanepreview:", "* 1.0.0: :mod:`model_dataplane.models<azure.mgmt.machinelearningservices.model_dataplane.models>` * v1.0: :mod:`registry_discovery.models<azure.mgmt.machinelearningservices.registry_discovery.models>` * v1.0: :mod:`runhistory.models<azure.mgmt.machinelearningservices.runhistory.models>` * 2020-09-01-dataplanepreview: :mod:`v2020_09_01_dataplanepreview.models<azure.mgmt.machinelearningservices.v2020_09_01_dataplanepreview.models>` *", "api_version = self._get_api_version('batch_job_deployment') if api_version == '2020-09-01-dataplanepreview': from .v2020_09_01_dataplanepreview.operations import BatchJobDeploymentOperations as OperationClass", "version {} does not have operation group 'data_container'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))", "the API version: * 2021-10-01: :class:`DatasetContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.DatasetContainersOperations>` \"\"\" api_version = self._get_api_version('dataset_containers') if api_version ==", "api_version = self._get_api_version('workspaces') if api_version == '2021-10-01': from .v2021_10_01.operations import WorkspacesOperations as OperationClass", ".v2022_05_01.operations import DatastoresOperations as OperationClass else: raise ValueError(\"API version {} does not have", "OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def dataset_v2(self): \"\"\"Instance depends on the API version:", "operation group 'workspace_connections'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def workspace_features(self): \"\"\"Instance depends", "ready contains multiple API versions, to help you deal with all of the", "== '2021-10-01-dataplanepreview': from .v2021_10_01_dataplanepreview.operations import CodeVersionsOperations as OperationClass elif api_version == '2022-02-01-preview': from", "BatchEndpointsOperations as OperationClass else: raise ValueError(\"API version {} does not have operation group", "{} does not have operation group 'data_container'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property", "self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def models(self): \"\"\"Instance depends on the API version: *", "For production, you should stick to a particular api-version and/or profile. The profile", "self._get_api_version('run_artifacts') if api_version == 'v1.0': from .runhistory.operations import RunArtifactsOperations as OperationClass else: raise", "if api_version == '2021-10-01': from .v2021_10_01.operations import DatasetContainersOperations as OperationClass else: raise ValueError(\"API", "as OperationClass else: raise ValueError(\"API version {} does not have operation group 'batch_endpoints'\".format(api_version))", "OperationClass else: raise ValueError(\"API version {} does not have operation group 'usages'\".format(api_version)) return", "OperationClass else: raise ValueError(\"API version {} does not have operation group 'dataset_controller_v2'\".format(api_version)) return", ".v2022_02_01_preview.operations import CodeVersionsOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import CodeVersionsOperations", "\"\"\"Instance depends on the API version: * 1.0.0: :class:`MigrationOperations<azure.mgmt.machinelearningservices.model_dataplane.operations.MigrationOperations>` \"\"\" api_version = self._get_api_version('migration')", "for LRO operations if no Retry-After header is present. \"\"\" DEFAULT_API_VERSION = '2022-05-01'", "}}, _PROFILE_TAG + \" latest\" ) def __init__( self, credential, # type: \"TokenCredential\"", "version: * 2021-10-01: :class:`QuotasOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.QuotasOperations>` * 2022-01-01-preview: :class:`QuotasOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.QuotasOperations>` * 2022-05-01: :class:`QuotasOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.QuotasOperations>` \"\"\" api_version =", "def extensive_model(self): \"\"\"Instance depends on the API version: * 1.0.0: :class:`ExtensiveModelOperations<azure.mgmt.machinelearningservices.model_dataplane.operations.ExtensiveModelOperations>` \"\"\" api_version", "'2021-10-01': from .v2021_10_01.operations import JobsOperations as OperationClass elif api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations", "2022-02-01-preview: :class:`ModelVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.ModelVersionsOperations>` * 2022-05-01: :class:`ModelVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.ModelVersionsOperations>` \"\"\" api_version = self._get_api_version('model_versions') if api_version == '2021-10-01':", "\"\"\"Instance depends on the API version: * 2021-10-01: :class:`DatasetContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.DatasetContainersOperations>` \"\"\" api_version = self._get_api_version('dataset_containers')", "# type: str profile=KnownProfiles.default, # type: KnownProfiles **kwargs # type: Any ): self._config", "does not have operation group 'private_link_resources'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def", "between an operation group and its API version. The api-version parameter sets the", "group 'code_versions'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def component_containers(self): \"\"\"Instance depends on", "import VirtualMachineSizesOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import VirtualMachineSizesOperations as", "Deserializer(self._models_dict(api_version))) @property def dataset_containers(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`DatasetContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.DatasetContainersOperations>`", "# pylint: disable=unused-import,ungrouped-imports from typing import Any, Optional from azure.core.credentials import TokenCredential class", "OperationClass elif api_version == '2021-10-01-dataplanepreview': from .v2021_10_01_dataplanepreview.operations import CodeVersionsOperations as OperationClass elif api_version", "version if the operation group is not described in the profile. :param credential:", "* 2022-02-01-preview: :class:`ComponentContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.ComponentContainersOperations>` * 2022-05-01: :class:`ComponentContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.ComponentContainersOperations>` \"\"\" api_version = self._get_api_version('component_containers') if api_version ==", ".model_dataplane.operations import MigrationOperations as OperationClass else: raise ValueError(\"API version {} does not have", "* 2021-10-01: :class:`ModelVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.ModelVersionsOperations>` * 2021-10-01-dataplanepreview: :class:`ModelVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.ModelVersionsOperations>` * 2022-02-01-preview: :class:`ModelVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.ModelVersionsOperations>` * 2022-05-01: :class:`ModelVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.ModelVersionsOperations>` \"\"\"", "self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def model_versions(self): \"\"\"Instance depends on the API version: *", "if api_version == 'v1.0': from .runhistory.operations import SpansOperations as OperationClass else: raise ValueError(\"API", "Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def workspace_features(self): \"\"\"Instance depends on the API version: * 2021-10-01:", "return models elif api_version == '2021-10-01-dataplanepreview': from .v2021_10_01_dataplanepreview import models return models elif", "'1.0.0': from .model_dataplane import models return models elif api_version == 'v1.0': from .registry_discovery", ".v2022_02_01_preview.operations import OnlineDeploymentsOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import OnlineDeploymentsOperations", "DataContainerOperations as OperationClass else: raise ValueError(\"API version {} does not have operation group", "the API version: * 1.5.0: :class:`DatasetsV1Operations<azure.mgmt.machinelearningservices.dataset_dataplane.operations.DatasetsV1Operations>` \"\"\" api_version = self._get_api_version('datasets_v1') if api_version ==", "OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def temporary_data_references(self): \"\"\"Instance depends on the API version:", "if api_version == '2021-10-01': from .v2021_10_01.operations import WorkspacesOperations as OperationClass elif api_version ==", "= '2022-05-01' _PROFILE_TAG = \"azure.mgmt.machinelearningservices.AzureMachineLearningWorkspaces\" LATEST_PROFILE = ProfileDefinition({ _PROFILE_TAG: { None: DEFAULT_API_VERSION, 'assets':", "in the project root for # license information. # # Code generated by", "OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import OnlineEndpointsOperations as OperationClass else: raise", "2020-09-01-dataplanepreview: :class:`BatchJobEndpointOperations<azure.mgmt.machinelearningservices.v2020_09_01_dataplanepreview.operations.BatchJobEndpointOperations>` \"\"\" api_version = self._get_api_version('batch_job_endpoint') if api_version == '2020-09-01-dataplanepreview': from .v2020_09_01_dataplanepreview.operations import", "OperationClass else: raise ValueError(\"API version {} does not have operation group 'dataset_containers'\".format(api_version)) return", "as OperationClass elif api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import CodeContainersOperations as OperationClass elif", "{} does not have operation group 'batch_job_endpoint'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property", ":class:`CodeVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.CodeVersionsOperations>` * 2021-10-01-dataplanepreview: :class:`CodeVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.CodeVersionsOperations>` * 2022-02-01-preview: :class:`CodeVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.CodeVersionsOperations>` * 2022-05-01: :class:`CodeVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.CodeVersionsOperations>` \"\"\" api_version =", "in cls.models(api_version).__dict__.items() if isinstance(v, type)} @classmethod def models(cls, api_version=DEFAULT_API_VERSION): \"\"\"Module depends on the", "have operation group 'run'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def run_artifacts(self): \"\"\"Instance", "have operation group 'data_version'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def data_versions(self): \"\"\"Instance", "not have operation group 'workspace_features'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def workspaces(self):", "ValueError(\"API version {} does not have operation group 'workspaces'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)),", "2021-10-01: :mod:`v2021_10_01.models<azure.mgmt.machinelearningservices.v2021_10_01.models>` * 2021-10-01-dataplanepreview: :mod:`v2021_10_01_dataplanepreview.models<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.models>` * 2022-01-01-preview: :mod:`v2022_01_01_preview.models<azure.mgmt.machinelearningservices.v2022_01_01_preview.models>` * 2022-02-01-preview: :mod:`v2022_02_01_preview.models<azure.mgmt.machinelearningservices.v2022_02_01_preview.models>` * 2022-05-01:", "MultiApiClientMixin from msrest import Deserializer, Serializer from ._configuration import AzureMachineLearningWorkspacesConfiguration if TYPE_CHECKING: #", "CodeVersionsOperations as OperationClass elif api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import CodeVersionsOperations as OperationClass", "return models elif api_version == '2020-09-01-dataplanepreview': from .v2020_09_01_dataplanepreview import models return models elif", "return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def code_versions(self): \"\"\"Instance depends on the API", "clouds (Azure Stack, Azure Government, Azure China, etc.). By default, it uses the", "def _models_dict(cls, api_version): return {k: v for k, v in cls.models(api_version).__dict__.items() if isinstance(v,", "== '1.5.0': from .dataset_dataplane.operations import DatasetControllerV2Operations as OperationClass else: raise ValueError(\"API version {}", "group 'batch_job_endpoint'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def code_containers(self): \"\"\"Instance depends on", "provided, or if missing in profile. :type api_version: str :param base_url: Service URL", "not have operation group 'extensive_model'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def get_operation_status(self):", "'operations'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def private_endpoint_connections(self): \"\"\"Instance depends on the", "as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import DatastoresOperations as OperationClass else:", "2022-01-01-preview: :class:`VirtualMachineSizesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.VirtualMachineSizesOperations>` * 2022-05-01: :class:`VirtualMachineSizesOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.VirtualMachineSizesOperations>` \"\"\" api_version = self._get_api_version('virtual_machine_sizes') if api_version == '2021-10-01':", "'v1.0', 'runs': 'v1.0', 'spans': 'v1.0', 'temporary_data_references': '2021-10-01-dataplanepreview', }}, _PROFILE_TAG + \" latest\" )", "'2022-05-01': from .v2022_05_01.operations import DatastoresOperations as OperationClass else: raise ValueError(\"API version {} does", "import CodeVersionsOperations as OperationClass elif api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import CodeVersionsOperations as", "Machine Learning Workspace resources. This ready contains multiple API versions, to help you", "return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def batch_endpoints(self): \"\"\"Instance depends on the API", "import ComputeOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import ComputeOperations as", ":class:`JobsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.JobsOperations>` * 2022-02-01-preview: :class:`JobsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.JobsOperations>` * 2022-05-01: :class:`JobsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.JobsOperations>` \"\"\" api_version = self._get_api_version('jobs') if api_version", "OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def async_operations(self): \"\"\"Instance depends on the API version:", "\"\"\" api_version = self._get_api_version('environment_versions') if api_version == '2021-10-01': from .v2021_10_01.operations import EnvironmentVersionsOperations as", "have operation group 'dataset_v2'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def dataset_versions(self): \"\"\"Instance", "\"\"\"Instance depends on the API version: * v1.0: :class:`RunOperations<azure.mgmt.machinelearningservices.runhistory.operations.RunOperations>` \"\"\" api_version = self._get_api_version('run')", "ModelContainersOperations as OperationClass elif api_version == '2021-10-01-dataplanepreview': from .v2021_10_01_dataplanepreview.operations import ModelContainersOperations as OperationClass", "self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def migration(self): \"\"\"Instance depends on the API version: *", "int polling_interval: Default waiting time between two polls for LRO operations if no", "api_version = self._get_api_version('datastores') if api_version == '2021-10-01': from .v2021_10_01.operations import DatastoresOperations as OperationClass", "you deal with all of the Azure clouds (Azure Stack, Azure Government, Azure", "if api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import DataContainersOperations as OperationClass elif api_version ==", "not have operation group 'data_versions'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def dataset_containers(self):", "import EnvironmentVersionsOperations as OperationClass elif api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import EnvironmentVersionsOperations as", "OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import PrivateLinkResourcesOperations as OperationClass else: raise", "on the API version: * 2021-10-01: :class:`ModelVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.ModelVersionsOperations>` * 2021-10-01-dataplanepreview: :class:`ModelVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.ModelVersionsOperations>` * 2022-02-01-preview: :class:`ModelVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.ModelVersionsOperations>`", "= self._get_api_version('runs') if api_version == 'v1.0': from .runhistory.operations import RunsOperations as OperationClass else:", "elif api_version == '2022-05-01': from .v2022_05_01.operations import WorkspacesOperations as OperationClass else: raise ValueError(\"API", "* 2021-10-01: :class:`DatastoresOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.DatastoresOperations>` * 2022-02-01-preview: :class:`DatastoresOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.DatastoresOperations>` * 2022-05-01: :class:`DatastoresOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.DatastoresOperations>` \"\"\" api_version = self._get_api_version('datastores')", "API version: * 1.0.0: :class:`ExtensiveModelOperations<azure.mgmt.machinelearningservices.model_dataplane.operations.ExtensiveModelOperations>` \"\"\" api_version = self._get_api_version('extensive_model') if api_version == '1.0.0':", "import OnlineEndpointsOperations as OperationClass elif api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import OnlineEndpointsOperations as", "OperationClass else: raise ValueError(\"API version {} does not have operation group 'spans'\".format(api_version)) return", "from .v2022_02_01_preview.operations import DatastoresOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import", "if api_version == '2021-10-01': from .v2021_10_01.operations import CodeContainersOperations as OperationClass elif api_version ==", "experiments(self): \"\"\"Instance depends on the API version: * v1.0: :class:`ExperimentsOperations<azure.mgmt.machinelearningservices.runhistory.operations.ExperimentsOperations>` \"\"\" api_version =", "waiting time between two polls for LRO operations if no Retry-After header is", "the API version: * 2022-02-01-preview: :class:`DataContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.DataContainersOperations>` * 2022-05-01: :class:`DataContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.DataContainersOperations>` \"\"\" api_version = self._get_api_version('data_containers')", "version {} does not have operation group 'dataset_containers'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))", "if api_version == 'v1.0': from .runhistory.operations import RunsOperations as OperationClass else: raise ValueError(\"API", "self._get_api_version('data_call') if api_version == '1.5.0': from .dataset_dataplane.operations import DataCallOperations as OperationClass else: raise", "return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def metric(self): \"\"\"Instance depends on the API", "from .v2021_10_01.operations import VirtualMachineSizesOperations as OperationClass elif api_version == '2022-01-01-preview': from .v2022_01_01_preview.operations import", "as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import ModelContainersOperations as OperationClass else:", "RunsOperations as OperationClass else: raise ValueError(\"API version {} does not have operation group", "return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def compute(self): \"\"\"Instance depends on the API", "== 'v1.0': from .runhistory.operations import ExperimentsOperations as OperationClass else: raise ValueError(\"API version {}", "as OperationClass elif api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import ComponentVersionsOperations as OperationClass elif", "\"\"\" api_version = self._get_api_version('run_artifacts') if api_version == 'v1.0': from .runhistory.operations import RunArtifactsOperations as", "== '2022-05-01': from .v2022_05_01.operations import DataVersionsOperations as OperationClass else: raise ValueError(\"API version {}", ":class:`OnlineEndpointsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.OnlineEndpointsOperations>` * 2022-05-01: :class:`OnlineEndpointsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.OnlineEndpointsOperations>` \"\"\" api_version = self._get_api_version('online_endpoints') if api_version == '2021-10-01': from", "= self._get_api_version('dataset_containers') if api_version == '2021-10-01': from .v2021_10_01.operations import DatasetContainersOperations as OperationClass else:", "import OnlineDeploymentsOperations as OperationClass elif api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import OnlineDeploymentsOperations as", "== '2022-05-01': from .v2022_05_01.operations import OnlineEndpointsOperations as OperationClass else: raise ValueError(\"API version {}", "elif api_version == '2021-10-01-dataplanepreview': from .v2021_10_01_dataplanepreview.operations import ComponentVersionsOperations as OperationClass elif api_version ==", "def workspace_connections(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`WorkspaceConnectionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.WorkspaceConnectionsOperations>` * 2022-01-01-preview:", "profile. :param credential: Credential needed for the client to connect to Azure. :type", "if the code is # regenerated. # -------------------------------------------------------------------------- from typing import TYPE_CHECKING from", "Deserializer(self._models_dict(api_version))) @property def environment_versions(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`EnvironmentVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.EnvironmentVersionsOperations>`", "the API version: * 2021-10-01: :class:`VirtualMachineSizesOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.VirtualMachineSizesOperations>` * 2022-01-01-preview: :class:`VirtualMachineSizesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.VirtualMachineSizesOperations>` * 2022-05-01: :class:`VirtualMachineSizesOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.VirtualMachineSizesOperations>` \"\"\"", "OperationClass elif api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import BatchEndpointsOperations as OperationClass elif api_version", "OperationClass elif api_version == '2021-10-01-dataplanepreview': from .v2021_10_01_dataplanepreview.operations import EnvironmentVersionsOperations as OperationClass elif api_version", "Azure clouds (Azure Stack, Azure Government, Azure China, etc.). By default, it uses", "group 'code_containers'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def code_versions(self): \"\"\"Instance depends on", "= self._get_api_version('environment_versions') if api_version == '2021-10-01': from .v2021_10_01.operations import EnvironmentVersionsOperations as OperationClass elif", "group 'experiments'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def extensive_model(self): \"\"\"Instance depends on", "as OperationClass else: raise ValueError(\"API version {} does not have operation group 'data_versions'\".format(api_version))", "api_version = self._get_api_version('temporary_data_references') if api_version == '2021-10-01-dataplanepreview': from .v2021_10_01_dataplanepreview.operations import TemporaryDataReferencesOperations as OperationClass", "version {} does not have operation group 'metric'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))", "models(self): \"\"\"Instance depends on the API version: * 1.0.0: :class:`ModelsOperations<azure.mgmt.machinelearningservices.model_dataplane.operations.ModelsOperations>` \"\"\" api_version =", "as OperationClass else: raise ValueError(\"API version {} does not have operation group 'data_call'\".format(api_version))", "\"\"\"Instance depends on the API version: * 1.0.0: :class:`ModelsOperations<azure.mgmt.machinelearningservices.model_dataplane.operations.ModelsOperations>` \"\"\" api_version = self._get_api_version('models')", "def private_link_resources(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`PrivateLinkResourcesOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.PrivateLinkResourcesOperations>` * 2022-01-01-preview:", ".v2021_10_01.operations import PrivateLinkResourcesOperations as OperationClass elif api_version == '2022-01-01-preview': from .v2022_01_01_preview.operations import PrivateLinkResourcesOperations", "'2021-10-01', 'dataset_controller_v2': '1.5.0', 'dataset_v2': '1.5.0', 'dataset_versions': '2021-10-01', 'datasets_v1': '1.5.0', 'delete': 'v1.0', 'events': 'v1.0',", "'2020-09-01-dataplanepreview': from .v2020_09_01_dataplanepreview.operations import BatchJobDeploymentOperations as OperationClass else: raise ValueError(\"API version {} does", "dataset_versions(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`DatasetVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.DatasetVersionsOperations>` \"\"\" api_version =", "group 'dataset_controller_v2'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def dataset_v2(self): \"\"\"Instance depends on", "{} does not have operation group 'temporary_data_references'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property", "subscription_id: The ID of the target subscription. :type subscription_id: str :param api_version: API", "'2021-10-01': from .v2021_10_01.operations import WorkspacesOperations as OperationClass elif api_version == '2022-01-01-preview': from .v2022_01_01_preview.operations", "version available on public Azure. For production, you should stick to a particular", "api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import OnlineDeploymentsOperations as OperationClass elif api_version == '2022-05-01':", "have operation group 'compute'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def data_call(self): \"\"\"Instance", "'workspaces'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) def close(self): self._client.close() def __enter__(self): self._client.__enter__() return", "resources. This ready contains multiple API versions, to help you deal with all", "if missing in profile. :type api_version: str :param base_url: Service URL :type base_url:", "'extensive_model': '1.0.0', 'get_operation_status': '1.5.0', 'metric': 'v1.0', 'migration': '1.0.0', 'models': '1.0.0', 'registry_management_non_workspace': 'v1.0', 'run':", "API version: * 2022-02-01-preview: :class:`DataContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.DataContainersOperations>` * 2022-05-01: :class:`DataContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.DataContainersOperations>` \"\"\" api_version = self._get_api_version('data_containers') if", "if api_version == '2021-10-01': from .v2021_10_01.operations import ModelVersionsOperations as OperationClass elif api_version ==", "2022-05-01: :class:`Operations<azure.mgmt.machinelearningservices.v2022_05_01.operations.Operations>` \"\"\" api_version = self._get_api_version('operations') if api_version == '2021-10-01': from .v2021_10_01.operations import", "api_version == '2021-10-01': from .v2021_10_01.operations import PrivateEndpointConnectionsOperations as OperationClass elif api_version == '2022-01-01-preview':", "v1.0: :class:`SpansOperations<azure.mgmt.machinelearningservices.runhistory.operations.SpansOperations>` \"\"\" api_version = self._get_api_version('spans') if api_version == 'v1.0': from .runhistory.operations import", "'v1.0': from .runhistory.operations import RunsOperations as OperationClass else: raise ValueError(\"API version {} does", "2021-10-01: :class:`BatchDeploymentsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.BatchDeploymentsOperations>` * 2022-02-01-preview: :class:`BatchDeploymentsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.BatchDeploymentsOperations>` * 2022-05-01: :class:`BatchDeploymentsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.BatchDeploymentsOperations>` \"\"\" api_version = self._get_api_version('batch_deployments') if", "== '2022-05-01': from .v2022_05_01.operations import ModelContainersOperations as OperationClass else: raise ValueError(\"API version {}", "api_version == '2022-05-01': from .v2022_05_01.operations import CodeContainersOperations as OperationClass else: raise ValueError(\"API version", "as OperationClass elif api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import DatastoresOperations as OperationClass elif", "api_version = self._get_api_version('batch_job_endpoint') if api_version == '2020-09-01-dataplanepreview': from .v2020_09_01_dataplanepreview.operations import BatchJobEndpointOperations as OperationClass", "version {} does not have operation group 'async_operations'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))", "'2021-10-01': from .v2021_10_01.operations import DatasetVersionsOperations as OperationClass else: raise ValueError(\"API version {} does", "import CodeVersionsOperations as OperationClass else: raise ValueError(\"API version {} does not have operation", "as OperationClass else: raise ValueError(\"API version {} does not have operation group 'operations'\".format(api_version))", "version {} does not have operation group 'temporary_data_references'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))", "self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def environment_versions(self): \"\"\"Instance depends on the API version: *", "version {} does not have operation group 'jobs'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))", "online_endpoints(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`OnlineEndpointsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.OnlineEndpointsOperations>` * 2022-02-01-preview: :class:`OnlineEndpointsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.OnlineEndpointsOperations>`", "version {} does not have operation group 'model_versions'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))", "Deserializer(self._models_dict(api_version))) @property def get_operation_status(self): \"\"\"Instance depends on the API version: * 1.5.0: :class:`GetOperationStatusOperations<azure.mgmt.machinelearningservices.dataset_dataplane.operations.GetOperationStatusOperations>`", ":class:`CodeVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.CodeVersionsOperations>` * 2022-02-01-preview: :class:`CodeVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.CodeVersionsOperations>` * 2022-05-01: :class:`CodeVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.CodeVersionsOperations>` \"\"\" api_version = self._get_api_version('code_versions') if api_version", "have operation group 'jobs'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def metric(self): \"\"\"Instance", "ValueError(\"API version {} does not have operation group 'quotas'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)),", "* 2021-10-01-dataplanepreview: :class:`EnvironmentContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.EnvironmentContainersOperations>` * 2022-02-01-preview: :class:`EnvironmentContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.EnvironmentContainersOperations>` * 2022-05-01: :class:`EnvironmentContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.EnvironmentContainersOperations>` \"\"\" api_version = self._get_api_version('environment_containers')", "operation group 'model_versions'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def models(self): \"\"\"Instance depends", "return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def private_endpoint_connections(self): \"\"\"Instance depends on the API", "operation group 'batch_job_deployment'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def batch_job_endpoint(self): \"\"\"Instance depends", "OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def models(self): \"\"\"Instance depends on the API version:", "Deserializer(self._models_dict(api_version))) @property def async_operations(self): \"\"\"Instance depends on the API version: * v1.0: :class:`AsyncOperationsOperations<azure.mgmt.machinelearningservices.registry_discovery.operations.AsyncOperationsOperations>`", "api_version == '2022-01-01-preview': from .v2022_01_01_preview.operations import WorkspaceConnectionsOperations as OperationClass elif api_version == '2022-05-01':", ":class:`VirtualMachineSizesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.VirtualMachineSizesOperations>` * 2022-05-01: :class:`VirtualMachineSizesOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.VirtualMachineSizesOperations>` \"\"\" api_version = self._get_api_version('virtual_machine_sizes') if api_version == '2021-10-01': from", "OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def online_deployments(self): \"\"\"Instance depends on the API version:", "api_version: str :param base_url: Service URL :type base_url: str :param profile: A profile", "elif api_version == '2021-10-01-dataplanepreview': from .v2021_10_01_dataplanepreview.operations import ComponentContainersOperations as OperationClass elif api_version ==", "ComponentVersionsOperations as OperationClass elif api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import ComponentVersionsOperations as OperationClass", "isinstance(v, type)} @classmethod def models(cls, api_version=DEFAULT_API_VERSION): \"\"\"Module depends on the API version: *", ":class:`ComputeOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.ComputeOperations>` * 2022-05-01: :class:`ComputeOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.ComputeOperations>` \"\"\" api_version = self._get_api_version('compute') if api_version == '2021-10-01': from", "_PROFILE_TAG = \"azure.mgmt.machinelearningservices.AzureMachineLearningWorkspaces\" LATEST_PROFILE = ProfileDefinition({ _PROFILE_TAG: { None: DEFAULT_API_VERSION, 'assets': '1.0.0', 'async_operations':", "@property def data_container(self): \"\"\"Instance depends on the API version: * 1.5.0: :class:`DataContainerOperations<azure.mgmt.machinelearningservices.dataset_dataplane.operations.DataContainerOperations>` \"\"\"", "2022-02-01-preview: :class:`ComponentVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.ComponentVersionsOperations>` * 2022-05-01: :class:`ComponentVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.ComponentVersionsOperations>` \"\"\" api_version = self._get_api_version('component_versions') if api_version == '2021-10-01':", "group 'virtual_machine_sizes'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def workspace_connections(self): \"\"\"Instance depends on", "self._get_api_version('batch_job_deployment') if api_version == '2020-09-01-dataplanepreview': from .v2020_09_01_dataplanepreview.operations import BatchJobDeploymentOperations as OperationClass else: raise", "from .v2022_05_01.operations import JobsOperations as OperationClass else: raise ValueError(\"API version {} does not", "as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import EnvironmentContainersOperations as OperationClass else:", "Deserializer(self._models_dict(api_version))) @property def events(self): \"\"\"Instance depends on the API version: * v1.0: :class:`EventsOperations<azure.mgmt.machinelearningservices.runhistory.operations.EventsOperations>`", "does not have operation group 'model_containers'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def", "\"\"\" api_version = self._get_api_version('assets') if api_version == '1.0.0': from .model_dataplane.operations import AssetsOperations as", "@classmethod def _models_dict(cls, api_version): return {k: v for k, v in cls.models(api_version).__dict__.items() if", "2022-02-01-preview: :class:`BatchEndpointsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.BatchEndpointsOperations>` * 2022-05-01: :class:`BatchEndpointsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.BatchEndpointsOperations>` \"\"\" api_version = self._get_api_version('batch_endpoints') if api_version == '2021-10-01':", "# -------------------------------------------------------------------------- from typing import TYPE_CHECKING from azure.mgmt.core import ARMPipelineClient from azure.profiles import", "self._get_api_version('experiments') if api_version == 'v1.0': from .runhistory.operations import ExperimentsOperations as OperationClass else: raise", "ValueError(\"API version {} does not have operation group 'dataset_v2'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)),", "the API version: * 2021-10-01: :class:`OnlineEndpointsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.OnlineEndpointsOperations>` * 2022-02-01-preview: :class:`OnlineEndpointsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.OnlineEndpointsOperations>` * 2022-05-01: :class:`OnlineEndpointsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.OnlineEndpointsOperations>` \"\"\"", "elif api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import OnlineEndpointsOperations as OperationClass elif api_version ==", "'2022-02-01-preview': from .v2022_02_01_preview.operations import BatchDeploymentsOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations", "\"\"\"Instance depends on the API version: * 2021-10-01: :class:`ModelVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.ModelVersionsOperations>` * 2021-10-01-dataplanepreview: :class:`ModelVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.ModelVersionsOperations>` *", "version {} does not have operation group 'get_operation_status'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))", "from .v2021_10_01.operations import DatastoresOperations as OperationClass elif api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import", "on the API version: * 2021-10-01: :class:`ComputeOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.ComputeOperations>` * 2022-01-01-preview: :class:`ComputeOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.ComputeOperations>` * 2022-05-01: :class:`ComputeOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.ComputeOperations>`", "= self._get_api_version('dataset_versions') if api_version == '2021-10-01': from .v2021_10_01.operations import DatasetVersionsOperations as OperationClass else:", "as OperationClass else: raise ValueError(\"API version {} does not have operation group 'model_containers'\".format(api_version))", "'async_operations': 'v1.0', 'batch_job_deployment': '2020-09-01-dataplanepreview', 'batch_job_endpoint': '2020-09-01-dataplanepreview', 'data_call': '1.5.0', 'data_container': '1.5.0', 'data_version': '1.5.0', 'dataset_containers':", "== '1.0.0': from .model_dataplane import models return models elif api_version == 'v1.0': from", "'2021-10-01': from .v2021_10_01.operations import BatchEndpointsOperations as OperationClass elif api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations", "OperationClass elif api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import ComponentVersionsOperations as OperationClass elif api_version", "* 2021-10-01: :class:`DatasetContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.DatasetContainersOperations>` \"\"\" api_version = self._get_api_version('dataset_containers') if api_version == '2021-10-01': from .v2021_10_01.operations", "not have operation group 'dataset_controller_v2'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def dataset_v2(self):", "group 'dataset_v2'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def dataset_versions(self): \"\"\"Instance depends on", "KnownProfiles, ProfileDefinition from azure.profiles.multiapiclient import MultiApiClientMixin from msrest import Deserializer, Serializer from ._configuration", "ValueError(\"API version {} does not have operation group 'operations'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)),", "API version: * 2021-10-01: :class:`VirtualMachineSizesOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.VirtualMachineSizesOperations>` * 2022-01-01-preview: :class:`VirtualMachineSizesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.VirtualMachineSizesOperations>` * 2022-05-01: :class:`VirtualMachineSizesOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.VirtualMachineSizesOperations>` \"\"\" api_version", "project root for # license information. # # Code generated by Microsoft (R)", "'2022-05-01': from .v2022_05_01.operations import ModelVersionsOperations as OperationClass else: raise ValueError(\"API version {} does", "@property def operations(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`Operations<azure.mgmt.machinelearningservices.v2021_10_01.operations.Operations>` *", "* 2022-02-01-preview: :class:`ModelVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.ModelVersionsOperations>` * 2022-05-01: :class:`ModelVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.ModelVersionsOperations>` \"\"\" api_version = self._get_api_version('model_versions') if api_version ==", "OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def dataset_versions(self): \"\"\"Instance depends on the API version:", "raise ValueError(\"API version {} does not have operation group 'runs'\".format(api_version)) return OperationClass(self._client, self._config,", "import EnvironmentContainersOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import EnvironmentContainersOperations as", "Deserializer(self._models_dict(api_version))) @property def datasets_v1(self): \"\"\"Instance depends on the API version: * 1.5.0: :class:`DatasetsV1Operations<azure.mgmt.machinelearningservices.dataset_dataplane.operations.DatasetsV1Operations>`", "'2021-10-01-dataplanepreview': from .v2021_10_01_dataplanepreview.operations import EnvironmentContainersOperations as OperationClass elif api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations", "does not have operation group 'dataset_controller_v2'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def", "from .registry_discovery.operations import RegistryManagementNonWorkspaceOperations as OperationClass else: raise ValueError(\"API version {} does not", "from .v2022_05_01.operations import UsagesOperations as OperationClass else: raise ValueError(\"API version {} does not", "from .v2021_10_01_dataplanepreview import models return models elif api_version == '2022-01-01-preview': from .v2022_01_01_preview import", "have operation group 'private_endpoint_connections'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def private_link_resources(self): \"\"\"Instance", "does not have operation group 'virtual_machine_sizes'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def", "'temporary_data_references'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def usages(self): \"\"\"Instance depends on the", "as OperationClass elif api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import BatchEndpointsOperations as OperationClass elif", "Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def data_versions(self): \"\"\"Instance depends on the API version: * 2022-02-01-preview:", "does not have operation group 'workspaces'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) def close(self):", "OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def usages(self): \"\"\"Instance depends on the API version:", "models raise ValueError(\"API version {} is not available\".format(api_version)) @property def assets(self): \"\"\"Instance depends", "OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import DataContainersOperations as OperationClass else: raise", "ComputeOperations as OperationClass else: raise ValueError(\"API version {} does not have operation group", "import Operations as OperationClass elif api_version == '2022-01-01-preview': from .v2022_01_01_preview.operations import Operations as", "return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) def close(self): self._client.close() def __enter__(self): self._client.__enter__() return self", "group 'run'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def run_artifacts(self): \"\"\"Instance depends on", "== '2022-01-01-preview': from .v2022_01_01_preview.operations import VirtualMachineSizesOperations as OperationClass elif api_version == '2022-05-01': from", "Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def virtual_machine_sizes(self): \"\"\"Instance depends on the API version: * 2021-10-01:", ".runhistory.operations import RunOperations as OperationClass else: raise ValueError(\"API version {} does not have", ".v2021_10_01_dataplanepreview.operations import EnvironmentContainersOperations as OperationClass elif api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import EnvironmentContainersOperations", "environment_containers(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`EnvironmentContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.EnvironmentContainersOperations>` * 2021-10-01-dataplanepreview: :class:`EnvironmentContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.EnvironmentContainersOperations>`", "UsagesOperations as OperationClass elif api_version == '2022-01-01-preview': from .v2022_01_01_preview.operations import UsagesOperations as OperationClass", ":class:`WorkspaceFeaturesOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.WorkspaceFeaturesOperations>` * 2022-01-01-preview: :class:`WorkspaceFeaturesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.WorkspaceFeaturesOperations>` * 2022-05-01: :class:`WorkspaceFeaturesOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.WorkspaceFeaturesOperations>` \"\"\" api_version = self._get_api_version('workspace_features') if api_version", "import ModelContainersOperations as OperationClass elif api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import ModelContainersOperations as", "\"\"\"Instance depends on the API version: * v1.0: :class:`RegistryManagementNonWorkspaceOperations<azure.mgmt.machinelearningservices.registry_discovery.operations.RegistryManagementNonWorkspaceOperations>` \"\"\" api_version = self._get_api_version('registry_management_non_workspace')", "as OperationClass elif api_version == '2021-10-01-dataplanepreview': from .v2021_10_01_dataplanepreview.operations import CodeContainersOperations as OperationClass elif", "'1.5.0': from .dataset_dataplane.operations import DatasetV2Operations as OperationClass else: raise ValueError(\"API version {} does", "OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def data_call(self): \"\"\"Instance depends on the API version:", "API version: * 1.5.0: :class:`DataCallOperations<azure.mgmt.machinelearningservices.dataset_dataplane.operations.DataCallOperations>` \"\"\" api_version = self._get_api_version('data_call') if api_version == '1.5.0':", "* 2022-05-01: :class:`ComponentContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.ComponentContainersOperations>` \"\"\" api_version = self._get_api_version('component_containers') if api_version == '2021-10-01': from .v2021_10_01.operations", "operation group 'dataset_v2'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def dataset_versions(self): \"\"\"Instance depends", "raise ValueError(\"API version {} does not have operation group 'dataset_versions'\".format(api_version)) return OperationClass(self._client, self._config,", "elif api_version == '2022-05-01': from .v2022_05_01.operations import UsagesOperations as OperationClass else: raise ValueError(\"API", "does not have operation group 'batch_job_deployment'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def", "as OperationClass else: raise ValueError(\"API version {} does not have operation group 'delete'\".format(api_version))", "self._get_api_version('extensive_model') if api_version == '1.0.0': from .model_dataplane.operations import ExtensiveModelOperations as OperationClass else: raise", "Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def environment_containers(self): \"\"\"Instance depends on the API version: * 2021-10-01:", "{} does not have operation group 'data_version'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property", "if api_version == '2021-10-01': from .v2021_10_01.operations import ComputeOperations as OperationClass elif api_version ==", "def jobs(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`JobsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.JobsOperations>` * 2022-02-01-preview:", "\"\"\"Instance depends on the API version: * 1.5.0: :class:`DatasetControllerV2Operations<azure.mgmt.machinelearningservices.dataset_dataplane.operations.DatasetControllerV2Operations>` \"\"\" api_version = self._get_api_version('dataset_controller_v2')", "api_version == '2021-10-01': from .v2021_10_01.operations import OnlineEndpointsOperations as OperationClass elif api_version == '2022-02-01-preview':", "depends on the API version: * 2021-10-01: :class:`VirtualMachineSizesOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.VirtualMachineSizesOperations>` * 2022-01-01-preview: :class:`VirtualMachineSizesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.VirtualMachineSizesOperations>` * 2022-05-01:", "group 'component_versions'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def compute(self): \"\"\"Instance depends on", "raise ValueError(\"API version {} does not have operation group 'models'\".format(api_version)) return OperationClass(self._client, self._config,", "api_version = self._get_api_version('component_versions') if api_version == '2021-10-01': from .v2021_10_01.operations import ComponentVersionsOperations as OperationClass", "import DataVersionsOperations as OperationClass else: raise ValueError(\"API version {} does not have operation", "version: * 2021-10-01: :class:`OnlineEndpointsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.OnlineEndpointsOperations>` * 2022-02-01-preview: :class:`OnlineEndpointsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.OnlineEndpointsOperations>` * 2022-05-01: :class:`OnlineEndpointsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.OnlineEndpointsOperations>` \"\"\" api_version =", "== '2020-09-01-dataplanepreview': from .v2020_09_01_dataplanepreview.operations import BatchJobDeploymentOperations as OperationClass else: raise ValueError(\"API version {}", "import ComponentContainersOperations as OperationClass elif api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import ComponentContainersOperations as", "'extensive_model'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def get_operation_status(self): \"\"\"Instance depends on the", "~azure.core.credentials.TokenCredential :param subscription_id: The ID of the target subscription. :type subscription_id: str :param", "you should stick to a particular api-version and/or profile. The profile sets a", "connect to Azure. :type credential: ~azure.core.credentials.TokenCredential :param subscription_id: The ID of the target", "raise ValueError(\"API version {} does not have operation group 'async_operations'\".format(api_version)) return OperationClass(self._client, self._config,", "BatchEndpointsOperations as OperationClass elif api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import BatchEndpointsOperations as OperationClass", "import WorkspacesOperations as OperationClass elif api_version == '2022-01-01-preview': from .v2022_01_01_preview.operations import WorkspacesOperations as", "= self._get_api_version('usages') if api_version == '2021-10-01': from .v2021_10_01.operations import UsagesOperations as OperationClass elif", ".dataset_dataplane.operations import GetOperationStatusOperations as OperationClass else: raise ValueError(\"API version {} does not have", "'2022-05-01': from .v2022_05_01.operations import ComponentVersionsOperations as OperationClass else: raise ValueError(\"API version {} does", "import DatasetV2Operations as OperationClass else: raise ValueError(\"API version {} does not have operation", "Deserializer(self._models_dict(api_version))) @property def runs(self): \"\"\"Instance depends on the API version: * v1.0: :class:`RunsOperations<azure.mgmt.machinelearningservices.runhistory.operations.RunsOperations>`", ".v2022_01_01_preview.operations import WorkspaceConnectionsOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import WorkspaceConnectionsOperations", "as OperationClass elif api_version == '2022-01-01-preview': from .v2022_01_01_preview.operations import UsagesOperations as OperationClass elif", "* 2021-10-01-dataplanepreview: :class:`ComponentVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.ComponentVersionsOperations>` * 2022-02-01-preview: :class:`ComponentVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.ComponentVersionsOperations>` * 2022-05-01: :class:`ComponentVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.ComponentVersionsOperations>` \"\"\" api_version = self._get_api_version('component_versions')", ":class:`WorkspaceFeaturesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.WorkspaceFeaturesOperations>` * 2022-05-01: :class:`WorkspaceFeaturesOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.WorkspaceFeaturesOperations>` \"\"\" api_version = self._get_api_version('workspace_features') if api_version == '2021-10-01': from", "raise ValueError(\"API version {} does not have operation group 'component_versions'\".format(api_version)) return OperationClass(self._client, self._config,", "else: raise ValueError(\"API version {} does not have operation group 'data_container'\".format(api_version)) return OperationClass(self._client,", "self._get_api_version('online_deployments') if api_version == '2021-10-01': from .v2021_10_01.operations import OnlineDeploymentsOperations as OperationClass elif api_version", "else: raise ValueError(\"API version {} does not have operation group 'usages'\".format(api_version)) return OperationClass(self._client,", "@property def extensive_model(self): \"\"\"Instance depends on the API version: * 1.0.0: :class:`ExtensiveModelOperations<azure.mgmt.machinelearningservices.model_dataplane.operations.ExtensiveModelOperations>` \"\"\"", "models return models elif api_version == 'v1.0': from .registry_discovery import models return models", "to Azure. :type credential: ~azure.core.credentials.TokenCredential :param subscription_id: The ID of the target subscription.", "else: raise ValueError(\"API version {} does not have operation group 'temporary_data_references'\".format(api_version)) return OperationClass(self._client,", "does not have operation group 'data_containers'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def", "'dataset_v2': '1.5.0', 'dataset_versions': '2021-10-01', 'datasets_v1': '1.5.0', 'delete': 'v1.0', 'events': 'v1.0', 'experiments': 'v1.0', 'extensive_model':", ":class:`BatchDeploymentsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.BatchDeploymentsOperations>` \"\"\" api_version = self._get_api_version('batch_deployments') if api_version == '2021-10-01': from .v2021_10_01.operations import BatchDeploymentsOperations", "'get_operation_status'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def jobs(self): \"\"\"Instance depends on the", "= self._get_api_version('metric') if api_version == 'v1.0': from .runhistory.operations import MetricOperations as OperationClass else:", "'2021-10-01-dataplanepreview': from .v2021_10_01_dataplanepreview import models return models elif api_version == '2022-01-01-preview': from .v2022_01_01_preview", ":class:`UsagesOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.UsagesOperations>` * 2022-01-01-preview: :class:`UsagesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.UsagesOperations>` * 2022-05-01: :class:`UsagesOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.UsagesOperations>` \"\"\" api_version = self._get_api_version('usages') if api_version", "2021-10-01: :class:`EnvironmentVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.EnvironmentVersionsOperations>` * 2021-10-01-dataplanepreview: :class:`EnvironmentVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.EnvironmentVersionsOperations>` * 2022-02-01-preview: :class:`EnvironmentVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.EnvironmentVersionsOperations>` * 2022-05-01: :class:`EnvironmentVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.EnvironmentVersionsOperations>` \"\"\" api_version", "\"\"\" api_version = self._get_api_version('events') if api_version == 'v1.0': from .runhistory.operations import EventsOperations as", "KnownProfiles to dict. :type profile: azure.profiles.KnownProfiles :keyword int polling_interval: Default waiting time between", "as OperationClass else: raise ValueError(\"API version {} does not have operation group 'component_versions'\".format(api_version))", "else: raise ValueError(\"API version {} does not have operation group 'workspaces'\".format(api_version)) return OperationClass(self._client,", "as OperationClass else: raise ValueError(\"API version {} does not have operation group 'datasets_v1'\".format(api_version))", "* 2022-02-01-preview: :class:`DatastoresOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.DatastoresOperations>` * 2022-05-01: :class:`DatastoresOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.DatastoresOperations>` \"\"\" api_version = self._get_api_version('datastores') if api_version ==", "as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import CodeContainersOperations as OperationClass else:", "production, you should stick to a particular api-version and/or profile. The profile sets", "API version to use if no profile is provided, or if missing in", "* 1.5.0: :class:`DatasetV2Operations<azure.mgmt.machinelearningservices.dataset_dataplane.operations.DatasetV2Operations>` \"\"\" api_version = self._get_api_version('dataset_v2') if api_version == '1.5.0': from .dataset_dataplane.operations", "'2021-10-01': from .v2021_10_01.operations import PrivateLinkResourcesOperations as OperationClass elif api_version == '2022-01-01-preview': from .v2022_01_01_preview.operations", "ValueError(\"API version {} does not have operation group 'get_operation_status'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)),", "def spans(self): \"\"\"Instance depends on the API version: * v1.0: :class:`SpansOperations<azure.mgmt.machinelearningservices.runhistory.operations.SpansOperations>` \"\"\" api_version", "operation group 'compute'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def data_call(self): \"\"\"Instance depends", "elif api_version == '2021-10-01-dataplanepreview': from .v2021_10_01_dataplanepreview.operations import EnvironmentContainersOperations as OperationClass elif api_version ==", "else: raise ValueError(\"API version {} does not have operation group 'run_artifacts'\".format(api_version)) return OperationClass(self._client,", "have operation group 'registry_management_non_workspace'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def run(self): \"\"\"Instance", "depends on the API version: * 2022-02-01-preview: :class:`DataContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.DataContainersOperations>` * 2022-05-01: :class:`DataContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.DataContainersOperations>` \"\"\" api_version", "* 2022-05-01: :class:`DatastoresOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.DatastoresOperations>` \"\"\" api_version = self._get_api_version('datastores') if api_version == '2021-10-01': from .v2021_10_01.operations", "* 2021-10-01: :class:`Operations<azure.mgmt.machinelearningservices.v2021_10_01.operations.Operations>` * 2022-01-01-preview: :class:`Operations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.Operations>` * 2022-05-01: :class:`Operations<azure.mgmt.machinelearningservices.v2022_05_01.operations.Operations>` \"\"\" api_version = self._get_api_version('operations')", "else: raise ValueError(\"API version {} does not have operation group 'environment_containers'\".format(api_version)) return OperationClass(self._client,", "version to use if no profile is provided, or if missing in profile.", "not have operation group 'temporary_data_references'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def usages(self):", "2021-10-01: :class:`OnlineDeploymentsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.OnlineDeploymentsOperations>` * 2022-02-01-preview: :class:`OnlineDeploymentsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.OnlineDeploymentsOperations>` * 2022-05-01: :class:`OnlineDeploymentsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.OnlineDeploymentsOperations>` \"\"\" api_version = self._get_api_version('online_deployments') if", "operation group 'model_containers'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def model_versions(self): \"\"\"Instance depends", "if api_version == '2021-10-01': from .v2021_10_01.operations import DatasetVersionsOperations as OperationClass else: raise ValueError(\"API", "api_version == '2021-10-01-dataplanepreview': from .v2021_10_01_dataplanepreview.operations import ComponentContainersOperations as OperationClass elif api_version == '2022-02-01-preview':", "import BatchDeploymentsOperations as OperationClass else: raise ValueError(\"API version {} does not have operation", ":param api_version: API version to use if no profile is provided, or if", "raise ValueError(\"API version {} does not have operation group 'dataset_containers'\".format(api_version)) return OperationClass(self._client, self._config,", "have operation group 'models'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def online_deployments(self): \"\"\"Instance", "API version: * 2021-10-01: :class:`UsagesOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.UsagesOperations>` * 2022-01-01-preview: :class:`UsagesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.UsagesOperations>` * 2022-05-01: :class:`UsagesOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.UsagesOperations>` \"\"\" api_version", "import BatchDeploymentsOperations as OperationClass elif api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import BatchDeploymentsOperations as", "from .v2020_09_01_dataplanepreview.operations import BatchJobEndpointOperations as OperationClass else: raise ValueError(\"API version {} does not", "raise ValueError(\"API version {} does not have operation group 'code_containers'\".format(api_version)) return OperationClass(self._client, self._config,", "* 2021-10-01: :class:`VirtualMachineSizesOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.VirtualMachineSizesOperations>` * 2022-01-01-preview: :class:`VirtualMachineSizesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.VirtualMachineSizesOperations>` * 2022-05-01: :class:`VirtualMachineSizesOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.VirtualMachineSizesOperations>` \"\"\" api_version = self._get_api_version('virtual_machine_sizes')", "WorkspaceFeaturesOperations as OperationClass else: raise ValueError(\"API version {} does not have operation group", ":class:`PrivateEndpointConnectionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.PrivateEndpointConnectionsOperations>` * 2022-01-01-preview: :class:`PrivateEndpointConnectionsOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.PrivateEndpointConnectionsOperations>` * 2022-05-01: :class:`PrivateEndpointConnectionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.PrivateEndpointConnectionsOperations>` \"\"\" api_version = self._get_api_version('private_endpoint_connections') if api_version", "1.5.0: :class:`GetOperationStatusOperations<azure.mgmt.machinelearningservices.dataset_dataplane.operations.GetOperationStatusOperations>` \"\"\" api_version = self._get_api_version('get_operation_status') if api_version == '1.5.0': from .dataset_dataplane.operations import", "OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def environment_versions(self): \"\"\"Instance depends on the API version:", "== '2021-10-01': from .v2021_10_01 import models return models elif api_version == '2021-10-01-dataplanepreview': from", "== '2022-05-01': from .v2022_05_01.operations import ComponentContainersOperations as OperationClass else: raise ValueError(\"API version {}", "DatastoresOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import DatastoresOperations as OperationClass", "== '2021-10-01-dataplanepreview': from .v2021_10_01_dataplanepreview.operations import ModelContainersOperations as OperationClass elif api_version == '2022-02-01-preview': from", "v1.0: :class:`RunArtifactsOperations<azure.mgmt.machinelearningservices.runhistory.operations.RunArtifactsOperations>` \"\"\" api_version = self._get_api_version('run_artifacts') if api_version == 'v1.0': from .runhistory.operations import", ".v2022_05_01.operations import ComponentVersionsOperations as OperationClass else: raise ValueError(\"API version {} does not have", "version: * v1.0: :class:`RunsOperations<azure.mgmt.machinelearningservices.runhistory.operations.RunsOperations>` \"\"\" api_version = self._get_api_version('runs') if api_version == 'v1.0': from", "OperationClass else: raise ValueError(\"API version {} does not have operation group 'dataset_v2'\".format(api_version)) return", "definition, from KnownProfiles to dict. :type profile: azure.profiles.KnownProfiles :keyword int polling_interval: Default waiting", "not have operation group 'data_version'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def data_versions(self):", "def metric(self): \"\"\"Instance depends on the API version: * v1.0: :class:`MetricOperations<azure.mgmt.machinelearningservices.runhistory.operations.MetricOperations>` \"\"\" api_version", ":class:`BatchJobEndpointOperations<azure.mgmt.machinelearningservices.v2020_09_01_dataplanepreview.operations.BatchJobEndpointOperations>` \"\"\" api_version = self._get_api_version('batch_job_endpoint') if api_version == '2020-09-01-dataplanepreview': from .v2020_09_01_dataplanepreview.operations import BatchJobEndpointOperations", "api_version == '2021-10-01': from .v2021_10_01.operations import ComponentContainersOperations as OperationClass elif api_version == '2021-10-01-dataplanepreview':", "else: raise ValueError(\"API version {} does not have operation group 'dataset_containers'\".format(api_version)) return OperationClass(self._client,", "'delete'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def environment_containers(self): \"\"\"Instance depends on the", "operation group 'dataset_controller_v2'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def dataset_v2(self): \"\"\"Instance depends", ".v2022_05_01.operations import VirtualMachineSizesOperations as OperationClass else: raise ValueError(\"API version {} does not have", "from .dataset_dataplane.operations import DatasetV2Operations as OperationClass else: raise ValueError(\"API version {} does not", "@property def get_operation_status(self): \"\"\"Instance depends on the API version: * 1.5.0: :class:`GetOperationStatusOperations<azure.mgmt.machinelearningservices.dataset_dataplane.operations.GetOperationStatusOperations>` \"\"\"", "version {} does not have operation group 'assets'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))", "OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def batch_job_deployment(self): \"\"\"Instance depends on the API version:", "import PrivateLinkResourcesOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import PrivateLinkResourcesOperations as", "sets a mapping between an operation group and its API version. The api-version", "import models return models raise ValueError(\"API version {} is not available\".format(api_version)) @property def", "v1.0: :class:`RegistryManagementNonWorkspaceOperations<azure.mgmt.machinelearningservices.registry_discovery.operations.RegistryManagementNonWorkspaceOperations>` \"\"\" api_version = self._get_api_version('registry_management_non_workspace') if api_version == 'v1.0': from .registry_discovery.operations import", "a mapping between an operation group and its API version. The api-version parameter", "group 'data_container'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def data_containers(self): \"\"\"Instance depends on", "version {} does not have operation group 'run_artifacts'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))", "have operation group 'extensive_model'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def get_operation_status(self): \"\"\"Instance", "Deserializer(self._models_dict(api_version))) @property def jobs(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`JobsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.JobsOperations>`", ".runhistory.operations import MetricOperations as OperationClass else: raise ValueError(\"API version {} does not have", "* v1.0: :class:`DeleteOperations<azure.mgmt.machinelearningservices.runhistory.operations.DeleteOperations>` \"\"\" api_version = self._get_api_version('delete') if api_version == '1.5.0': from .dataset_dataplane.operations", "MetricOperations as OperationClass else: raise ValueError(\"API version {} does not have operation group", "import ModelVersionsOperations as OperationClass elif api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import ModelVersionsOperations as", "APIs allow end users to operate on Azure Machine Learning Workspace resources. This", "if api_version == '1.0.0': from .model_dataplane.operations import ModelsOperations as OperationClass else: raise ValueError(\"API", "'2022-02-01-preview': from .v2022_02_01_preview.operations import OnlineEndpointsOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations", "* 2022-01-01-preview: :class:`QuotasOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.QuotasOperations>` * 2022-05-01: :class:`QuotasOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.QuotasOperations>` \"\"\" api_version = self._get_api_version('quotas') if api_version ==", "__init__( self, credential, # type: \"TokenCredential\" subscription_id, # type: str api_version=None, # type:", "batch_job_endpoint(self): \"\"\"Instance depends on the API version: * 2020-09-01-dataplanepreview: :class:`BatchJobEndpointOperations<azure.mgmt.machinelearningservices.v2020_09_01_dataplanepreview.operations.BatchJobEndpointOperations>` \"\"\" api_version =", ".v2021_10_01.operations import EnvironmentContainersOperations as OperationClass elif api_version == '2021-10-01-dataplanepreview': from .v2021_10_01_dataplanepreview.operations import EnvironmentContainersOperations", "raise ValueError(\"API version {} does not have operation group 'run_artifacts'\".format(api_version)) return OperationClass(self._client, self._config,", "'2022-05-01': from .v2022_05_01.operations import EnvironmentContainersOperations as OperationClass else: raise ValueError(\"API version {} does", "import DataContainersOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import DataContainersOperations as", "* 2021-10-01: :class:`BatchEndpointsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.BatchEndpointsOperations>` * 2022-02-01-preview: :class:`BatchEndpointsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.BatchEndpointsOperations>` * 2022-05-01: :class:`BatchEndpointsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.BatchEndpointsOperations>` \"\"\" api_version = self._get_api_version('batch_endpoints')", "PrivateLinkResourcesOperations as OperationClass else: raise ValueError(\"API version {} does not have operation group", "from .v2022_05_01.operations import VirtualMachineSizesOperations as OperationClass else: raise ValueError(\"API version {} does not", "== '2021-10-01': from .v2021_10_01.operations import Operations as OperationClass elif api_version == '2022-01-01-preview': from", "* v1.0: :class:`RunOperations<azure.mgmt.machinelearningservices.runhistory.operations.RunOperations>` \"\"\" api_version = self._get_api_version('run') if api_version == 'v1.0': from .runhistory.operations", "OperationClass else: raise ValueError(\"API version {} does not have operation group 'workspace_features'\".format(api_version)) return", ":class:`ComponentContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.ComponentContainersOperations>` * 2022-05-01: :class:`ComponentContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.ComponentContainersOperations>` \"\"\" api_version = self._get_api_version('component_containers') if api_version == '2021-10-01': from", "2021-10-01-dataplanepreview: :class:`ModelVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.ModelVersionsOperations>` * 2022-02-01-preview: :class:`ModelVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.ModelVersionsOperations>` * 2022-05-01: :class:`ModelVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.ModelVersionsOperations>` \"\"\" api_version = self._get_api_version('model_versions') if", "'1.0.0', 'get_operation_status': '1.5.0', 'metric': 'v1.0', 'migration': '1.0.0', 'models': '1.0.0', 'registry_management_non_workspace': 'v1.0', 'run': 'v1.0',", "ValueError(\"API version {} does not have operation group 'extensive_model'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)),", "{} does not have operation group 'code_containers'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property", "root for # license information. # # Code generated by Microsoft (R) AutoRest", ":class:`VirtualMachineSizesOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.VirtualMachineSizesOperations>` \"\"\" api_version = self._get_api_version('virtual_machine_sizes') if api_version == '2021-10-01': from .v2021_10_01.operations import VirtualMachineSizesOperations", "Deserializer(self._models_dict(api_version))) @property def data_container(self): \"\"\"Instance depends on the API version: * 1.5.0: :class:`DataContainerOperations<azure.mgmt.machinelearningservices.dataset_dataplane.operations.DataContainerOperations>`", "Credential needed for the client to connect to Azure. :type credential: ~azure.core.credentials.TokenCredential :param", "@property def batch_endpoints(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`BatchEndpointsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.BatchEndpointsOperations>` *", "OperationClass else: raise ValueError(\"API version {} does not have operation group 'code_versions'\".format(api_version)) return", "import UsagesOperations as OperationClass elif api_version == '2022-01-01-preview': from .v2022_01_01_preview.operations import UsagesOperations as", "@property def models(self): \"\"\"Instance depends on the API version: * 1.0.0: :class:`ModelsOperations<azure.mgmt.machinelearningservices.model_dataplane.operations.ModelsOperations>` \"\"\"", "as OperationClass else: raise ValueError(\"API version {} does not have operation group 'events'\".format(api_version))", "else: raise ValueError(\"API version {} does not have operation group 'delete'\".format(api_version)) return OperationClass(self._client,", "OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) def close(self): self._client.close() def __enter__(self): self._client.__enter__() return self def", "self._get_api_version('runs') if api_version == 'v1.0': from .runhistory.operations import RunsOperations as OperationClass else: raise", "'spans'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def temporary_data_references(self): \"\"\"Instance depends on the", "api_version = self._get_api_version('online_deployments') if api_version == '2021-10-01': from .v2021_10_01.operations import OnlineDeploymentsOperations as OperationClass", "version: * 2021-10-01: :class:`ComponentVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.ComponentVersionsOperations>` * 2021-10-01-dataplanepreview: :class:`ComponentVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.ComponentVersionsOperations>` * 2022-02-01-preview: :class:`ComponentVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.ComponentVersionsOperations>` * 2022-05-01: :class:`ComponentVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.ComponentVersionsOperations>`", "version: * 2021-10-01-dataplanepreview: :class:`TemporaryDataReferencesOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.TemporaryDataReferencesOperations>` \"\"\" api_version = self._get_api_version('temporary_data_references') if api_version == '2021-10-01-dataplanepreview': from", "ComponentContainersOperations as OperationClass else: raise ValueError(\"API version {} does not have operation group", "if api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import DataVersionsOperations as OperationClass elif api_version ==", "* 2022-01-01-preview: :class:`ComputeOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.ComputeOperations>` * 2022-05-01: :class:`ComputeOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.ComputeOperations>` \"\"\" api_version = self._get_api_version('compute') if api_version ==", "\"\"\"Instance depends on the API version: * 2021-10-01: :class:`OnlineEndpointsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.OnlineEndpointsOperations>` * 2022-02-01-preview: :class:`OnlineEndpointsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.OnlineEndpointsOperations>` *", "self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def temporary_data_references(self): \"\"\"Instance depends on the API version: *", "api_version == '2021-10-01': from .v2021_10_01.operations import BatchEndpointsOperations as OperationClass elif api_version == '2022-02-01-preview':", "ValueError(\"API version {} does not have operation group 'experiments'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)),", "def model_versions(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`ModelVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.ModelVersionsOperations>` * 2021-10-01-dataplanepreview:", "from .v2022_01_01_preview.operations import Operations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import", "= self._get_api_version('temporary_data_references') if api_version == '2021-10-01-dataplanepreview': from .v2021_10_01_dataplanepreview.operations import TemporaryDataReferencesOperations as OperationClass else:", "have operation group 'workspace_connections'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def workspace_features(self): \"\"\"Instance", "from .v2021_10_01.operations import ModelVersionsOperations as OperationClass elif api_version == '2021-10-01-dataplanepreview': from .v2021_10_01_dataplanepreview.operations import", "api_version = self._get_api_version('runs') if api_version == 'v1.0': from .runhistory.operations import RunsOperations as OperationClass", "from .v2021_10_01.operations import BatchEndpointsOperations as OperationClass elif api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import", "* 2022-02-01-preview: :class:`ComponentVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.ComponentVersionsOperations>` * 2022-05-01: :class:`ComponentVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.ComponentVersionsOperations>` \"\"\" api_version = self._get_api_version('component_versions') if api_version ==", "{} does not have operation group 'workspace_connections'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property", "API version: * 1.5.0: :mod:`dataset_dataplane.models<azure.mgmt.machinelearningservices.dataset_dataplane.models>` * 1.0.0: :mod:`model_dataplane.models<azure.mgmt.machinelearningservices.model_dataplane.models>` * v1.0: :mod:`registry_discovery.models<azure.mgmt.machinelearningservices.registry_discovery.models>` * v1.0:", "version: * 2021-10-01: :class:`ModelContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.ModelContainersOperations>` * 2021-10-01-dataplanepreview: :class:`ModelContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.ModelContainersOperations>` * 2022-02-01-preview: :class:`ModelContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.ModelContainersOperations>` * 2022-05-01: :class:`ModelContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.ModelContainersOperations>`", "return models elif api_version == 'v1.0': from .runhistory import models return models elif", "ValueError(\"API version {} does not have operation group 'batch_job_deployment'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)),", ":class:`DeleteOperations<azure.mgmt.machinelearningservices.dataset_dataplane.operations.DeleteOperations>` * v1.0: :class:`DeleteOperations<azure.mgmt.machinelearningservices.runhistory.operations.DeleteOperations>` \"\"\" api_version = self._get_api_version('delete') if api_version == '1.5.0': from", "OperationClass else: raise ValueError(\"API version {} does not have operation group 'extensive_model'\".format(api_version)) return", "on the API version: * 1.0.0: :class:`MigrationOperations<azure.mgmt.machinelearningservices.model_dataplane.operations.MigrationOperations>` \"\"\" api_version = self._get_api_version('migration') if api_version", "'1.5.0': from .dataset_dataplane.operations import DataVersionOperations as OperationClass else: raise ValueError(\"API version {} does", "group 'dataset_versions'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def datasets_v1(self): \"\"\"Instance depends on", "ComputeOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import ComputeOperations as OperationClass", "ValueError(\"API version {} does not have operation group 'events'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)),", "@property def compute(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`ComputeOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.ComputeOperations>` *", "\"\"\" api_version = self._get_api_version('private_link_resources') if api_version == '2021-10-01': from .v2021_10_01.operations import PrivateLinkResourcesOperations as", "JobsOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import JobsOperations as OperationClass", "OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import ComponentVersionsOperations as OperationClass else: raise", "def data_versions(self): \"\"\"Instance depends on the API version: * 2022-02-01-preview: :class:`DataVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.DataVersionsOperations>` * 2022-05-01:", "api_version = self._get_api_version('operations') if api_version == '2021-10-01': from .v2021_10_01.operations import Operations as OperationClass", "Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def batch_job_deployment(self): \"\"\"Instance depends on the API version: * 2020-09-01-dataplanepreview:", "operation group 'extensive_model'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def get_operation_status(self): \"\"\"Instance depends", "\"\"\" api_version = self._get_api_version('model_containers') if api_version == '2021-10-01': from .v2021_10_01.operations import ModelContainersOperations as", "ComponentVersionsOperations as OperationClass elif api_version == '2021-10-01-dataplanepreview': from .v2021_10_01_dataplanepreview.operations import ComponentVersionsOperations as OperationClass", "API version: * 1.5.0: :class:`DatasetV2Operations<azure.mgmt.machinelearningservices.dataset_dataplane.operations.DatasetV2Operations>` \"\"\" api_version = self._get_api_version('dataset_v2') if api_version == '1.5.0':", "from .v2021_10_01.operations import EnvironmentVersionsOperations as OperationClass elif api_version == '2021-10-01-dataplanepreview': from .v2021_10_01_dataplanepreview.operations import", "2022-02-01-preview: :class:`JobsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.JobsOperations>` * 2022-05-01: :class:`JobsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.JobsOperations>` \"\"\" api_version = self._get_api_version('jobs') if api_version == '2021-10-01':", "elif api_version == '2022-05-01': from .v2022_05_01.operations import PrivateLinkResourcesOperations as OperationClass else: raise ValueError(\"API", "@property def dataset_versions(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`DatasetVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.DatasetVersionsOperations>` \"\"\"", "on the API version: * 2021-10-01: :class:`BatchEndpointsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.BatchEndpointsOperations>` * 2022-02-01-preview: :class:`BatchEndpointsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.BatchEndpointsOperations>` * 2022-05-01: :class:`BatchEndpointsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.BatchEndpointsOperations>`", "not have operation group 'component_versions'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def compute(self):", "to connect to Azure. :type credential: ~azure.core.credentials.TokenCredential :param subscription_id: The ID of the", "{} does not have operation group 'datasets_v1'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property", "'datasets_v1': '1.5.0', 'delete': 'v1.0', 'events': 'v1.0', 'experiments': 'v1.0', 'extensive_model': '1.0.0', 'get_operation_status': '1.5.0', 'metric':", "def online_deployments(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`OnlineDeploymentsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.OnlineDeploymentsOperations>` * 2022-02-01-preview:", "ValueError(\"API version {} does not have operation group 'component_versions'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)),", "ComponentContainersOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import ComponentContainersOperations as OperationClass", "self._get_api_version('data_versions') if api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import DataVersionsOperations as OperationClass elif api_version", "on the API version: * 2021-10-01: :class:`DatasetContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.DatasetContainersOperations>` \"\"\" api_version = self._get_api_version('dataset_containers') if api_version", "API version: * 2021-10-01: :class:`ComponentContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.ComponentContainersOperations>` * 2021-10-01-dataplanepreview: :class:`ComponentContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.ComponentContainersOperations>` * 2022-02-01-preview: :class:`ComponentContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.ComponentContainersOperations>` * 2022-05-01:", "self._get_api_version('run') if api_version == 'v1.0': from .runhistory.operations import RunOperations as OperationClass else: raise", "dataset_v2(self): \"\"\"Instance depends on the API version: * 1.5.0: :class:`DatasetV2Operations<azure.mgmt.machinelearningservices.dataset_dataplane.operations.DatasetV2Operations>` \"\"\" api_version =", "return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def workspaces(self): \"\"\"Instance depends on the API", "* 2022-05-01: :class:`CodeVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.CodeVersionsOperations>` \"\"\" api_version = self._get_api_version('code_versions') if api_version == '2021-10-01': from .v2021_10_01.operations", "2022-05-01: :class:`WorkspacesOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.WorkspacesOperations>` \"\"\" api_version = self._get_api_version('workspaces') if api_version == '2021-10-01': from .v2021_10_01.operations import", "elif api_version == '2022-05-01': from .v2022_05_01.operations import DataVersionsOperations as OperationClass else: raise ValueError(\"API", ":param subscription_id: The ID of the target subscription. :type subscription_id: str :param api_version:", "== '2022-05-01': from .v2022_05_01.operations import EnvironmentContainersOperations as OperationClass else: raise ValueError(\"API version {}", "== '2021-10-01': from .v2021_10_01.operations import EnvironmentVersionsOperations as OperationClass elif api_version == '2021-10-01-dataplanepreview': from", "its API version. The api-version parameter sets the default API version if the", "* 2021-10-01-dataplanepreview: :mod:`v2021_10_01_dataplanepreview.models<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.models>` * 2022-01-01-preview: :mod:`v2022_01_01_preview.models<azure.mgmt.machinelearningservices.v2022_01_01_preview.models>` * 2022-02-01-preview: :mod:`v2022_02_01_preview.models<azure.mgmt.machinelearningservices.v2022_02_01_preview.models>` * 2022-05-01: :mod:`v2022_05_01.models<azure.mgmt.machinelearningservices.v2022_05_01.models>` \"\"\"", "@property def migration(self): \"\"\"Instance depends on the API version: * 1.0.0: :class:`MigrationOperations<azure.mgmt.machinelearningservices.model_dataplane.operations.MigrationOperations>` \"\"\"", "config=self._config, **kwargs) super(AzureMachineLearningWorkspaces, self).__init__( api_version=api_version, profile=profile ) @classmethod def _models_dict(cls, api_version): return {k:", "\"\"\"Instance depends on the API version: * 2021-10-01: :class:`BatchDeploymentsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.BatchDeploymentsOperations>` * 2022-02-01-preview: :class:`BatchDeploymentsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.BatchDeploymentsOperations>` *", "* 2022-01-01-preview: :class:`PrivateEndpointConnectionsOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.PrivateEndpointConnectionsOperations>` * 2022-05-01: :class:`PrivateEndpointConnectionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.PrivateEndpointConnectionsOperations>` \"\"\" api_version = self._get_api_version('private_endpoint_connections') if api_version ==", "@property def run_artifacts(self): \"\"\"Instance depends on the API version: * v1.0: :class:`RunArtifactsOperations<azure.mgmt.machinelearningservices.runhistory.operations.RunArtifactsOperations>` \"\"\"", "Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def temporary_data_references(self): \"\"\"Instance depends on the API version: * 2021-10-01-dataplanepreview:", "as OperationClass elif api_version == '2022-01-01-preview': from .v2022_01_01_preview.operations import VirtualMachineSizesOperations as OperationClass elif", "api_version = self._get_api_version('async_operations') if api_version == 'v1.0': from .registry_discovery.operations import AsyncOperationsOperations as OperationClass", "\"\"\"Instance depends on the API version: * 2020-09-01-dataplanepreview: :class:`BatchJobEndpointOperations<azure.mgmt.machinelearningservices.v2020_09_01_dataplanepreview.operations.BatchJobEndpointOperations>` \"\"\" api_version = self._get_api_version('batch_job_endpoint')", "'v1.0': from .runhistory.operations import MetricOperations as OperationClass else: raise ValueError(\"API version {} does", "== '2021-10-01': from .v2021_10_01.operations import BatchEndpointsOperations as OperationClass elif api_version == '2022-02-01-preview': from", ".v2022_02_01_preview.operations import BatchEndpointsOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import BatchEndpointsOperations", "the API version: * 2021-10-01: :class:`EnvironmentVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.EnvironmentVersionsOperations>` * 2021-10-01-dataplanepreview: :class:`EnvironmentVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.EnvironmentVersionsOperations>` * 2022-02-01-preview: :class:`EnvironmentVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.EnvironmentVersionsOperations>` *", "elif api_version == '2022-01-01-preview': from .v2022_01_01_preview.operations import PrivateEndpointConnectionsOperations as OperationClass elif api_version ==", "str :param profile: A profile definition, from KnownProfiles to dict. :type profile: azure.profiles.KnownProfiles", "'2022-02-01-preview': from .v2022_02_01_preview.operations import ComponentVersionsOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations", "return models elif api_version == '1.0.0': from .model_dataplane import models return models elif", "2021-10-01-dataplanepreview: :class:`ModelContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.ModelContainersOperations>` * 2022-02-01-preview: :class:`ModelContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.ModelContainersOperations>` * 2022-05-01: :class:`ModelContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.ModelContainersOperations>` \"\"\" api_version = self._get_api_version('model_containers') if", "TemporaryDataReferencesOperations as OperationClass else: raise ValueError(\"API version {} does not have operation group", "depends on the API version: * 2020-09-01-dataplanepreview: :class:`BatchJobEndpointOperations<azure.mgmt.machinelearningservices.v2020_09_01_dataplanepreview.operations.BatchJobEndpointOperations>` \"\"\" api_version = self._get_api_version('batch_job_endpoint') if", "sets the default API version if the operation group is not described in", "a particular api-version and/or profile. The profile sets a mapping between an operation", "api_version == '2021-10-01': from .v2021_10_01.operations import ComponentVersionsOperations as OperationClass elif api_version == '2021-10-01-dataplanepreview':", "version: * 1.0.0: :class:`MigrationOperations<azure.mgmt.machinelearningservices.model_dataplane.operations.MigrationOperations>` \"\"\" api_version = self._get_api_version('migration') if api_version == '1.0.0': from", "= self._get_api_version('component_containers') if api_version == '2021-10-01': from .v2021_10_01.operations import ComponentContainersOperations as OperationClass elif", "* 2021-10-01-dataplanepreview: :class:`CodeContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.CodeContainersOperations>` * 2022-02-01-preview: :class:`CodeContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.CodeContainersOperations>` * 2022-05-01: :class:`CodeContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.CodeContainersOperations>` \"\"\" api_version = self._get_api_version('code_containers')", ".v2021_10_01.operations import QuotasOperations as OperationClass elif api_version == '2022-01-01-preview': from .v2022_01_01_preview.operations import QuotasOperations", "of the Azure clouds (Azure Stack, Azure Government, Azure China, etc.). By default,", "from .runhistory.operations import RunArtifactsOperations as OperationClass else: raise ValueError(\"API version {} does not", "raise ValueError(\"API version {} does not have operation group 'quotas'\".format(api_version)) return OperationClass(self._client, self._config,", "not have operation group 'async_operations'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def batch_deployments(self):", "from .v2021_10_01_dataplanepreview.operations import TemporaryDataReferencesOperations as OperationClass else: raise ValueError(\"API version {} does not", "OperationClass else: raise ValueError(\"API version {} does not have operation group 'assets'\".format(api_version)) return", "group 'dataset_containers'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def dataset_controller_v2(self): \"\"\"Instance depends on", "@property def temporary_data_references(self): \"\"\"Instance depends on the API version: * 2021-10-01-dataplanepreview: :class:`TemporaryDataReferencesOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.TemporaryDataReferencesOperations>` \"\"\"", "cause incorrect behavior and will be lost if the code is # regenerated.", "version {} does not have operation group 'component_versions'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))", "\"\"\"Instance depends on the API version: * 2021-10-01: :class:`UsagesOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.UsagesOperations>` * 2022-01-01-preview: :class:`UsagesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.UsagesOperations>` *", "@property def delete(self): \"\"\"Instance depends on the API version: * 1.5.0: :class:`DeleteOperations<azure.mgmt.machinelearningservices.dataset_dataplane.operations.DeleteOperations>` *", "'2022-01-01-preview': from .v2022_01_01_preview.operations import PrivateLinkResourcesOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations", "api_version == '1.5.0': from .dataset_dataplane.operations import DataCallOperations as OperationClass else: raise ValueError(\"API version", "group 'online_endpoints'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def operations(self): \"\"\"Instance depends on", "self._get_api_version('events') if api_version == 'v1.0': from .runhistory.operations import EventsOperations as OperationClass else: raise", "else: raise ValueError(\"API version {} does not have operation group 'spans'\".format(api_version)) return OperationClass(self._client,", "str api_version=None, # type: Optional[str] base_url=\"https://management.azure.com\", # type: str profile=KnownProfiles.default, # type: KnownProfiles", "'1.5.0', 'metric': 'v1.0', 'migration': '1.0.0', 'models': '1.0.0', 'registry_management_non_workspace': 'v1.0', 'run': 'v1.0', 'run_artifacts': 'v1.0',", "{} does not have operation group 'private_link_resources'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property", "compute(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`ComputeOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.ComputeOperations>` * 2022-01-01-preview: :class:`ComputeOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.ComputeOperations>`", "'v1.0', 'temporary_data_references': '2021-10-01-dataplanepreview', }}, _PROFILE_TAG + \" latest\" ) def __init__( self, credential,", "api_version = self._get_api_version('batch_deployments') if api_version == '2021-10-01': from .v2021_10_01.operations import BatchDeploymentsOperations as OperationClass", "from .v2022_05_01.operations import ComponentVersionsOperations as OperationClass else: raise ValueError(\"API version {} does not", "import WorkspaceFeaturesOperations as OperationClass elif api_version == '2022-01-01-preview': from .v2022_01_01_preview.operations import WorkspaceFeaturesOperations as", "else: raise ValueError(\"API version {} does not have operation group 'events'\".format(api_version)) return OperationClass(self._client,", "ValueError(\"API version {} does not have operation group 'data_call'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)),", "import UsagesOperations as OperationClass else: raise ValueError(\"API version {} does not have operation", "reserved. # Licensed under the MIT License. See License.txt in the project root", "self._get_api_version('dataset_v2') if api_version == '1.5.0': from .dataset_dataplane.operations import DatasetV2Operations as OperationClass else: raise", "group 'data_containers'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def data_version(self): \"\"\"Instance depends on", "the API version: * 2020-09-01-dataplanepreview: :class:`BatchJobEndpointOperations<azure.mgmt.machinelearningservices.v2020_09_01_dataplanepreview.operations.BatchJobEndpointOperations>` \"\"\" api_version = self._get_api_version('batch_job_endpoint') if api_version ==", "ValueError(\"API version {} does not have operation group 'data_versions'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)),", "'migration'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def model_containers(self): \"\"\"Instance depends on the", "class _SDKClient(object): def __init__(self, *args, **kwargs): \"\"\"This is a fake class to support", "* 2021-10-01: :class:`ComponentContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.ComponentContainersOperations>` * 2021-10-01-dataplanepreview: :class:`ComponentContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.ComponentContainersOperations>` * 2022-02-01-preview: :class:`ComponentContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.ComponentContainersOperations>` * 2022-05-01: :class:`ComponentContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.ComponentContainersOperations>` \"\"\"", "2022-05-01: :class:`EnvironmentVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.EnvironmentVersionsOperations>` \"\"\" api_version = self._get_api_version('environment_versions') if api_version == '2021-10-01': from .v2021_10_01.operations import", "= self._get_api_version('registry_management_non_workspace') if api_version == 'v1.0': from .registry_discovery.operations import RegistryManagementNonWorkspaceOperations as OperationClass else:", "def batch_job_endpoint(self): \"\"\"Instance depends on the API version: * 2020-09-01-dataplanepreview: :class:`BatchJobEndpointOperations<azure.mgmt.machinelearningservices.v2020_09_01_dataplanepreview.operations.BatchJobEndpointOperations>` \"\"\" api_version", ".v2022_05_01.operations import ComponentContainersOperations as OperationClass else: raise ValueError(\"API version {} does not have", "on the API version: * 2021-10-01: :class:`DatastoresOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.DatastoresOperations>` * 2022-02-01-preview: :class:`DatastoresOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.DatastoresOperations>` * 2022-05-01: :class:`DatastoresOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.DatastoresOperations>`", "== '2021-10-01': from .v2021_10_01.operations import WorkspacesOperations as OperationClass elif api_version == '2022-01-01-preview': from", "as OperationClass elif api_version == '2021-10-01-dataplanepreview': from .v2021_10_01_dataplanepreview.operations import EnvironmentContainersOperations as OperationClass elif", "depends on the API version: * 2021-10-01: :class:`DatastoresOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.DatastoresOperations>` * 2022-02-01-preview: :class:`DatastoresOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.DatastoresOperations>` * 2022-05-01:", "on the API version: * 2021-10-01: :class:`ComponentVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.ComponentVersionsOperations>` * 2021-10-01-dataplanepreview: :class:`ComponentVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.ComponentVersionsOperations>` * 2022-02-01-preview: :class:`ComponentVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.ComponentVersionsOperations>`", "Deserializer(self._models_dict(api_version))) @property def batch_deployments(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`BatchDeploymentsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.BatchDeploymentsOperations>`", "from .v2022_02_01_preview.operations import ComponentContainersOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import", "return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def operations(self): \"\"\"Instance depends on the API", "operation group 'delete'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def environment_containers(self): \"\"\"Instance depends", "2022-05-01: :class:`ModelVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.ModelVersionsOperations>` \"\"\" api_version = self._get_api_version('model_versions') if api_version == '2021-10-01': from .v2021_10_01.operations import", "'1.5.0': from .dataset_dataplane.operations import DeleteOperations as OperationClass elif api_version == 'v1.0': from .runhistory.operations", "implemetation of MultiApiClientMixin.\" Will be removed in final version of multiapi azure-core based", "return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def data_call(self): \"\"\"Instance depends on the API", "Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def data_container(self): \"\"\"Instance depends on the API version: * 1.5.0:", "OperationClass elif api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import ComponentContainersOperations as OperationClass elif api_version", "self._get_api_version('dataset_containers') if api_version == '2021-10-01': from .v2021_10_01.operations import DatasetContainersOperations as OperationClass else: raise", "* 1.5.0: :class:`DataContainerOperations<azure.mgmt.machinelearningservices.dataset_dataplane.operations.DataContainerOperations>` \"\"\" api_version = self._get_api_version('data_container') if api_version == '1.5.0': from .dataset_dataplane.operations", "import ComputeOperations as OperationClass elif api_version == '2022-01-01-preview': from .v2022_01_01_preview.operations import ComputeOperations as", "raise ValueError(\"API version {} does not have operation group 'dataset_controller_v2'\".format(api_version)) return OperationClass(self._client, self._config,", "self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def usages(self): \"\"\"Instance depends on the API version: *", "from .registry_discovery import models return models elif api_version == 'v1.0': from .runhistory import", "on the API version: * v1.0: :class:`RunsOperations<azure.mgmt.machinelearningservices.runhistory.operations.RunsOperations>` \"\"\" api_version = self._get_api_version('runs') if api_version", "= ARMPipelineClient(base_url=base_url, config=self._config, **kwargs) super(AzureMachineLearningWorkspaces, self).__init__( api_version=api_version, profile=profile ) @classmethod def _models_dict(cls, api_version):", "the API version: * v1.0: :class:`EventsOperations<azure.mgmt.machinelearningservices.runhistory.operations.EventsOperations>` \"\"\" api_version = self._get_api_version('events') if api_version ==", "elif api_version == '2022-05-01': from .v2022_05_01.operations import WorkspaceConnectionsOperations as OperationClass else: raise ValueError(\"API", "api_version == 'v1.0': from .runhistory.operations import ExperimentsOperations as OperationClass else: raise ValueError(\"API version", "self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def get_operation_status(self): \"\"\"Instance depends on the API version: *", "ValueError(\"API version {} does not have operation group 'compute'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)),", "'2022-05-01': from .v2022_05_01.operations import PrivateEndpointConnectionsOperations as OperationClass else: raise ValueError(\"API version {} does", "from .v2022_05_01.operations import EnvironmentVersionsOperations as OperationClass else: raise ValueError(\"API version {} does not", ".v2021_10_01_dataplanepreview.operations import EnvironmentVersionsOperations as OperationClass elif api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import EnvironmentVersionsOperations", "'2022-02-01-preview': from .v2022_02_01_preview.operations import ComponentContainersOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations", "else: raise ValueError(\"API version {} does not have operation group 'component_containers'\".format(api_version)) return OperationClass(self._client,", ":param profile: A profile definition, from KnownProfiles to dict. :type profile: azure.profiles.KnownProfiles :keyword", "api_version == '2021-10-01-dataplanepreview': from .v2021_10_01_dataplanepreview.operations import CodeVersionsOperations as OperationClass elif api_version == '2022-02-01-preview':", "== '2021-10-01-dataplanepreview': from .v2021_10_01_dataplanepreview.operations import ComponentVersionsOperations as OperationClass elif api_version == '2022-02-01-preview': from", "\"\"\" api_version = self._get_api_version('model_versions') if api_version == '2021-10-01': from .v2021_10_01.operations import ModelVersionsOperations as", "* 2021-10-01: :class:`QuotasOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.QuotasOperations>` * 2022-01-01-preview: :class:`QuotasOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.QuotasOperations>` * 2022-05-01: :class:`QuotasOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.QuotasOperations>` \"\"\" api_version = self._get_api_version('quotas')", ":class:`DataContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.DataContainersOperations>` \"\"\" api_version = self._get_api_version('data_containers') if api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import DataContainersOperations", ".v2022_05_01.operations import WorkspaceConnectionsOperations as OperationClass else: raise ValueError(\"API version {} does not have", "as OperationClass else: raise ValueError(\"API version {} does not have operation group 'private_link_resources'\".format(api_version))", "support current implemetation of MultiApiClientMixin.\" Will be removed in final version of multiapi", "as OperationClass elif api_version == '2021-10-01-dataplanepreview': from .v2021_10_01_dataplanepreview.operations import ComponentContainersOperations as OperationClass elif", ":class:`PrivateLinkResourcesOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.PrivateLinkResourcesOperations>` * 2022-01-01-preview: :class:`PrivateLinkResourcesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.PrivateLinkResourcesOperations>` * 2022-05-01: :class:`PrivateLinkResourcesOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.PrivateLinkResourcesOperations>` \"\"\" api_version = self._get_api_version('private_link_resources') if api_version", "@property def registry_management_non_workspace(self): \"\"\"Instance depends on the API version: * v1.0: :class:`RegistryManagementNonWorkspaceOperations<azure.mgmt.machinelearningservices.registry_discovery.operations.RegistryManagementNonWorkspaceOperations>` \"\"\"", "OperationClass elif api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import ModelVersionsOperations as OperationClass elif api_version", "== 'v1.0': from .runhistory.operations import SpansOperations as OperationClass else: raise ValueError(\"API version {}", "operation group 'get_operation_status'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def jobs(self): \"\"\"Instance depends", "Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def spans(self): \"\"\"Instance depends on the API version: * v1.0:", "1.0.0: :class:`MigrationOperations<azure.mgmt.machinelearningservices.model_dataplane.operations.MigrationOperations>` \"\"\" api_version = self._get_api_version('migration') if api_version == '1.0.0': from .model_dataplane.operations import", "== '2022-05-01': from .v2022_05_01.operations import JobsOperations as OperationClass else: raise ValueError(\"API version {}", "operation group 'data_versions'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def dataset_containers(self): \"\"\"Instance depends", "version: * 2021-10-01: :class:`CodeContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.CodeContainersOperations>` * 2021-10-01-dataplanepreview: :class:`CodeContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.CodeContainersOperations>` * 2022-02-01-preview: :class:`CodeContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.CodeContainersOperations>` * 2022-05-01: :class:`CodeContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.CodeContainersOperations>`", "ValueError(\"API version {} does not have operation group 'delete'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)),", "raise ValueError(\"API version {} does not have operation group 'events'\".format(api_version)) return OperationClass(self._client, self._config,", "{} does not have operation group 'dataset_v2'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property", "elif api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import BatchEndpointsOperations as OperationClass elif api_version ==", "as OperationClass else: raise ValueError(\"API version {} does not have operation group 'environment_versions'\".format(api_version))", "import models return models elif api_version == '1.0.0': from .model_dataplane import models return", ":class:`DataContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.DataContainersOperations>` * 2022-05-01: :class:`DataContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.DataContainersOperations>` \"\"\" api_version = self._get_api_version('data_containers') if api_version == '2022-02-01-preview': from", "ValueError(\"API version {} does not have operation group 'metric'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)),", "else: raise ValueError(\"API version {} does not have operation group 'dataset_versions'\".format(api_version)) return OperationClass(self._client,", "have operation group 'get_operation_status'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def jobs(self): \"\"\"Instance", "2022-01-01-preview: :class:`UsagesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.UsagesOperations>` * 2022-05-01: :class:`UsagesOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.UsagesOperations>` \"\"\" api_version = self._get_api_version('usages') if api_version == '2021-10-01':", "not have operation group 'component_containers'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def component_versions(self):", "+ \" latest\" ) def __init__( self, credential, # type: \"TokenCredential\" subscription_id, #", "== '2021-10-01': from .v2021_10_01.operations import CodeVersionsOperations as OperationClass elif api_version == '2021-10-01-dataplanepreview': from", "API version. The api-version parameter sets the default API version if the operation", "Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def model_containers(self): \"\"\"Instance depends on the API version: * 2021-10-01:", "**kwargs): \"\"\"This is a fake class to support current implemetation of MultiApiClientMixin.\" Will", "as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import ComponentVersionsOperations as OperationClass else:", "{} does not have operation group 'dataset_controller_v2'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property", "is not described in the profile. :param credential: Credential needed for the client", "== '2022-05-01': from .v2022_05_01.operations import WorkspaceFeaturesOperations as OperationClass else: raise ValueError(\"API version {}", "ValueError(\"API version {} does not have operation group 'async_operations'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)),", "== '2022-05-01': from .v2022_05_01.operations import EnvironmentVersionsOperations as OperationClass else: raise ValueError(\"API version {}", "Deserializer(self._models_dict(api_version))) @property def compute(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`ComputeOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.ComputeOperations>`", "as OperationClass elif api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import CodeVersionsOperations as OperationClass elif", "api_version = self._get_api_version('workspace_connections') if api_version == '2021-10-01': from .v2021_10_01.operations import WorkspaceConnectionsOperations as OperationClass", "import ComponentVersionsOperations as OperationClass elif api_version == '2021-10-01-dataplanepreview': from .v2021_10_01_dataplanepreview.operations import ComponentVersionsOperations as", "@property def usages(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`UsagesOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.UsagesOperations>` *", "== '2021-10-01': from .v2021_10_01.operations import ComputeOperations as OperationClass elif api_version == '2022-01-01-preview': from", "= self._get_api_version('batch_job_endpoint') if api_version == '2020-09-01-dataplanepreview': from .v2020_09_01_dataplanepreview.operations import BatchJobEndpointOperations as OperationClass else:", "contains multiple API versions, to help you deal with all of the Azure", "api-version and/or profile. The profile sets a mapping between an operation group and", "'1.0.0', 'async_operations': 'v1.0', 'batch_job_deployment': '2020-09-01-dataplanepreview', 'batch_job_endpoint': '2020-09-01-dataplanepreview', 'data_call': '1.5.0', 'data_container': '1.5.0', 'data_version': '1.5.0',", "\"\"\"This is a fake class to support current implemetation of MultiApiClientMixin.\" Will be", "elif api_version == '2021-10-01-dataplanepreview': from .v2021_10_01_dataplanepreview.operations import CodeContainersOperations as OperationClass elif api_version ==", "OperationClass else: raise ValueError(\"API version {} does not have operation group 'online_deployments'\".format(api_version)) return", "str :param base_url: Service URL :type base_url: str :param profile: A profile definition,", "raise ValueError(\"API version {} does not have operation group 'workspaces'\".format(api_version)) return OperationClass(self._client, self._config,", "'migration': '1.0.0', 'models': '1.0.0', 'registry_management_non_workspace': 'v1.0', 'run': 'v1.0', 'run_artifacts': 'v1.0', 'runs': 'v1.0', 'spans':", "== '2021-10-01': from .v2021_10_01.operations import UsagesOperations as OperationClass elif api_version == '2022-01-01-preview': from", "'1.0.0': from .model_dataplane.operations import ExtensiveModelOperations as OperationClass else: raise ValueError(\"API version {} does", "base_url: str :param profile: A profile definition, from KnownProfiles to dict. :type profile:", "Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def run_artifacts(self): \"\"\"Instance depends on the API version: * v1.0:", "Deserializer(self._models_dict(api_version))) @property def run_artifacts(self): \"\"\"Instance depends on the API version: * v1.0: :class:`RunArtifactsOperations<azure.mgmt.machinelearningservices.runhistory.operations.RunArtifactsOperations>`", "{} does not have operation group 'usages'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property", "'v1.0': from .runhistory.operations import EventsOperations as OperationClass else: raise ValueError(\"API version {} does", "OperationClass else: raise ValueError(\"API version {} does not have operation group 'batch_endpoints'\".format(api_version)) return", "the API version: * 1.5.0: :mod:`dataset_dataplane.models<azure.mgmt.machinelearningservices.dataset_dataplane.models>` * 1.0.0: :mod:`model_dataplane.models<azure.mgmt.machinelearningservices.model_dataplane.models>` * v1.0: :mod:`registry_discovery.models<azure.mgmt.machinelearningservices.registry_discovery.models>` *", ":class:`EnvironmentContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.EnvironmentContainersOperations>` * 2022-02-01-preview: :class:`EnvironmentContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.EnvironmentContainersOperations>` * 2022-05-01: :class:`EnvironmentContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.EnvironmentContainersOperations>` \"\"\" api_version = self._get_api_version('environment_containers') if api_version", "== '1.5.0': from .dataset_dataplane.operations import DataCallOperations as OperationClass else: raise ValueError(\"API version {}", "'2021-10-01': from .v2021_10_01.operations import PrivateEndpointConnectionsOperations as OperationClass elif api_version == '2022-01-01-preview': from .v2022_01_01_preview.operations", "as OperationClass else: raise ValueError(\"API version {} does not have operation group 'environment_containers'\".format(api_version))", "from .v2021_10_01.operations import DatasetVersionsOperations as OperationClass else: raise ValueError(\"API version {} does not", "elif api_version == '2022-01-01-preview': from .v2022_01_01_preview.operations import QuotasOperations as OperationClass elif api_version ==", "OperationClass else: raise ValueError(\"API version {} does not have operation group 'experiments'\".format(api_version)) return", "ValueError(\"API version {} does not have operation group 'dataset_versions'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)),", "API version: * 1.0.0: :class:`MigrationOperations<azure.mgmt.machinelearningservices.model_dataplane.operations.MigrationOperations>` \"\"\" api_version = self._get_api_version('migration') if api_version == '1.0.0':", "URL :type base_url: str :param profile: A profile definition, from KnownProfiles to dict.", "API version: * 2021-10-01: :class:`WorkspacesOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.WorkspacesOperations>` * 2022-01-01-preview: :class:`WorkspacesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.WorkspacesOperations>` * 2022-05-01: :class:`WorkspacesOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.WorkspacesOperations>` \"\"\" api_version", "is present. \"\"\" DEFAULT_API_VERSION = '2022-05-01' _PROFILE_TAG = \"azure.mgmt.machinelearningservices.AzureMachineLearningWorkspaces\" LATEST_PROFILE = ProfileDefinition({ _PROFILE_TAG:", "version: * 2021-10-01: :class:`WorkspacesOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.WorkspacesOperations>` * 2022-01-01-preview: :class:`WorkspacesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.WorkspacesOperations>` * 2022-05-01: :class:`WorkspacesOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.WorkspacesOperations>` \"\"\" api_version =", "default API version if the operation group is not described in the profile.", "from .v2020_09_01_dataplanepreview.operations import BatchJobDeploymentOperations as OperationClass else: raise ValueError(\"API version {} does not", "elif api_version == '2022-01-01-preview': from .v2022_01_01_preview import models return models elif api_version ==", "\"\"\" api_version = self._get_api_version('batch_deployments') if api_version == '2021-10-01': from .v2021_10_01.operations import BatchDeploymentsOperations as", "in profile. :type api_version: str :param base_url: Service URL :type base_url: str :param", ":class:`ComponentVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.ComponentVersionsOperations>` * 2022-05-01: :class:`ComponentVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.ComponentVersionsOperations>` \"\"\" api_version = self._get_api_version('component_versions') if api_version == '2021-10-01': from", "API version: * 2022-02-01-preview: :class:`DataVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.DataVersionsOperations>` * 2022-05-01: :class:`DataVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.DataVersionsOperations>` \"\"\" api_version = self._get_api_version('data_versions') if", "v in cls.models(api_version).__dict__.items() if isinstance(v, type)} @classmethod def models(cls, api_version=DEFAULT_API_VERSION): \"\"\"Module depends on", "'2022-01-01-preview': from .v2022_01_01_preview.operations import UsagesOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations", "not have operation group 'delete'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def environment_containers(self):", "Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def registry_management_non_workspace(self): \"\"\"Instance depends on the API version: * v1.0:", "and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from", "2021-10-01-dataplanepreview: :mod:`v2021_10_01_dataplanepreview.models<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.models>` * 2022-01-01-preview: :mod:`v2022_01_01_preview.models<azure.mgmt.machinelearningservices.v2022_01_01_preview.models>` * 2022-02-01-preview: :mod:`v2022_02_01_preview.models<azure.mgmt.machinelearningservices.v2022_02_01_preview.models>` * 2022-05-01: :mod:`v2022_05_01.models<azure.mgmt.machinelearningservices.v2022_05_01.models>` \"\"\" if", "= self._get_api_version('data_call') if api_version == '1.5.0': from .dataset_dataplane.operations import DataCallOperations as OperationClass else:", "== '2021-10-01-dataplanepreview': from .v2021_10_01_dataplanepreview.operations import CodeContainersOperations as OperationClass elif api_version == '2022-02-01-preview': from", "2022-05-01: :class:`ComponentContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.ComponentContainersOperations>` \"\"\" api_version = self._get_api_version('component_containers') if api_version == '2021-10-01': from .v2021_10_01.operations import", "return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def dataset_controller_v2(self): \"\"\"Instance depends on the API", "'v1.0', 'run_artifacts': 'v1.0', 'runs': 'v1.0', 'spans': 'v1.0', 'temporary_data_references': '2021-10-01-dataplanepreview', }}, _PROFILE_TAG + \"", "\"\"\" api_version = self._get_api_version('run') if api_version == 'v1.0': from .runhistory.operations import RunOperations as", "'datastores'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def delete(self): \"\"\"Instance depends on the", "API version: * 1.5.0: :class:`DatasetControllerV2Operations<azure.mgmt.machinelearningservices.dataset_dataplane.operations.DatasetControllerV2Operations>` \"\"\" api_version = self._get_api_version('dataset_controller_v2') if api_version == '1.5.0':", "OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def extensive_model(self): \"\"\"Instance depends on the API version:", "* v1.0: :mod:`runhistory.models<azure.mgmt.machinelearningservices.runhistory.models>` * 2020-09-01-dataplanepreview: :mod:`v2020_09_01_dataplanepreview.models<azure.mgmt.machinelearningservices.v2020_09_01_dataplanepreview.models>` * 2021-10-01: :mod:`v2021_10_01.models<azure.mgmt.machinelearningservices.v2021_10_01.models>` * 2021-10-01-dataplanepreview: :mod:`v2021_10_01_dataplanepreview.models<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.models>` *", "to help you deal with all of the Azure clouds (Azure Stack, Azure", ".model_dataplane.operations import AssetsOperations as OperationClass else: raise ValueError(\"API version {} does not have", "* 2021-10-01: :class:`ComponentVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.ComponentVersionsOperations>` * 2021-10-01-dataplanepreview: :class:`ComponentVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.ComponentVersionsOperations>` * 2022-02-01-preview: :class:`ComponentVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.ComponentVersionsOperations>` * 2022-05-01: :class:`ComponentVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.ComponentVersionsOperations>` \"\"\"", "= self._get_api_version('batch_endpoints') if api_version == '2021-10-01': from .v2021_10_01.operations import BatchEndpointsOperations as OperationClass elif", "from .v2022_05_01.operations import CodeContainersOperations as OperationClass else: raise ValueError(\"API version {} does not", "does not have operation group 'runs'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def", "api_version == '2022-01-01-preview': from .v2022_01_01_preview.operations import VirtualMachineSizesOperations as OperationClass elif api_version == '2022-05-01':", "for the client to connect to Azure. :type credential: ~azure.core.credentials.TokenCredential :param subscription_id: The", "* 2022-02-01-preview: :class:`BatchEndpointsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.BatchEndpointsOperations>` * 2022-05-01: :class:`BatchEndpointsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.BatchEndpointsOperations>` \"\"\" api_version = self._get_api_version('batch_endpoints') if api_version ==", "{} is not available\".format(api_version)) @property def assets(self): \"\"\"Instance depends on the API version:", "elif api_version == '2022-05-01': from .v2022_05_01.operations import DatastoresOperations as OperationClass else: raise ValueError(\"API", "elif api_version == '2022-05-01': from .v2022_05_01.operations import VirtualMachineSizesOperations as OperationClass else: raise ValueError(\"API", "DatasetControllerV2Operations as OperationClass else: raise ValueError(\"API version {} does not have operation group", "== '2022-02-01-preview': from .v2022_02_01_preview.operations import BatchEndpointsOperations as OperationClass elif api_version == '2022-05-01': from", "ModelVersionsOperations as OperationClass elif api_version == '2021-10-01-dataplanepreview': from .v2021_10_01_dataplanepreview.operations import ModelVersionsOperations as OperationClass", "if api_version == '2021-10-01': from .v2021_10_01.operations import BatchDeploymentsOperations as OperationClass elif api_version ==", "on the API version: * 1.0.0: :class:`ModelsOperations<azure.mgmt.machinelearningservices.model_dataplane.operations.ModelsOperations>` \"\"\" api_version = self._get_api_version('models') if api_version", "operate on Azure Machine Learning Workspace resources. This ready contains multiple API versions,", "import Any, Optional from azure.core.credentials import TokenCredential class _SDKClient(object): def __init__(self, *args, **kwargs):", "from .dataset_dataplane.operations import DataContainerOperations as OperationClass else: raise ValueError(\"API version {} does not", "raise ValueError(\"API version {} does not have operation group 'experiments'\".format(api_version)) return OperationClass(self._client, self._config,", "Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will", "== '2022-02-01-preview': from .v2022_02_01_preview.operations import OnlineEndpointsOperations as OperationClass elif api_version == '2022-05-01': from", "'workspace_connections'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def workspace_features(self): \"\"\"Instance depends on the", "DatasetContainersOperations as OperationClass else: raise ValueError(\"API version {} does not have operation group", "== '2022-02-01-preview': from .v2022_02_01_preview.operations import DatastoresOperations as OperationClass elif api_version == '2022-05-01': from", "VirtualMachineSizesOperations as OperationClass else: raise ValueError(\"API version {} does not have operation group", "as OperationClass else: raise ValueError(\"API version {} does not have operation group 'get_operation_status'\".format(api_version))", "have operation group 'environment_versions'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def events(self): \"\"\"Instance", "self._get_api_version('operations') if api_version == '2021-10-01': from .v2021_10_01.operations import Operations as OperationClass elif api_version", "import models return models elif api_version == '2021-10-01': from .v2021_10_01 import models return", "'1.5.0': from .dataset_dataplane.operations import GetOperationStatusOperations as OperationClass else: raise ValueError(\"API version {} does", "ARMPipelineClient(base_url=base_url, config=self._config, **kwargs) super(AzureMachineLearningWorkspaces, self).__init__( api_version=api_version, profile=profile ) @classmethod def _models_dict(cls, api_version): return", "DEFAULT_API_VERSION = '2022-05-01' _PROFILE_TAG = \"azure.mgmt.machinelearningservices.AzureMachineLearningWorkspaces\" LATEST_PROFILE = ProfileDefinition({ _PROFILE_TAG: { None: DEFAULT_API_VERSION,", "import VirtualMachineSizesOperations as OperationClass else: raise ValueError(\"API version {} does not have operation", "raise ValueError(\"API version {} does not have operation group 'extensive_model'\".format(api_version)) return OperationClass(self._client, self._config,", "api_version = self._get_api_version('experiments') if api_version == 'v1.0': from .runhistory.operations import ExperimentsOperations as OperationClass", "version: * 1.0.0: :class:`ModelsOperations<azure.mgmt.machinelearningservices.model_dataplane.operations.ModelsOperations>` \"\"\" api_version = self._get_api_version('models') if api_version == '1.0.0': from", "self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def virtual_machine_sizes(self): \"\"\"Instance depends on the API version: *", "OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import BatchEndpointsOperations as OperationClass else: raise", "raise ValueError(\"API version {} does not have operation group 'registry_management_non_workspace'\".format(api_version)) return OperationClass(self._client, self._config,", "the default API version if the operation group is not described in the", "== '2021-10-01': from .v2021_10_01.operations import ModelVersionsOperations as OperationClass elif api_version == '2021-10-01-dataplanepreview': from", "\"\"\" DEFAULT_API_VERSION = '2022-05-01' _PROFILE_TAG = \"azure.mgmt.machinelearningservices.AzureMachineLearningWorkspaces\" LATEST_PROFILE = ProfileDefinition({ _PROFILE_TAG: { None:", "Operations as OperationClass elif api_version == '2022-01-01-preview': from .v2022_01_01_preview.operations import Operations as OperationClass", "== '1.5.0': from .dataset_dataplane import models return models elif api_version == '1.0.0': from", "as OperationClass else: raise ValueError(\"API version {} does not have operation group 'online_endpoints'\".format(api_version))", "'2022-02-01-preview': from .v2022_02_01_preview import models return models elif api_version == '2022-05-01': from .v2022_05_01", "all of the Azure clouds (Azure Stack, Azure Government, Azure China, etc.). By", "version {} does not have operation group 'private_link_resources'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))", "self._get_api_version('quotas') if api_version == '2021-10-01': from .v2021_10_01.operations import QuotasOperations as OperationClass elif api_version", "OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def private_endpoint_connections(self): \"\"\"Instance depends on the API version:", ":class:`VirtualMachineSizesOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.VirtualMachineSizesOperations>` * 2022-01-01-preview: :class:`VirtualMachineSizesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.VirtualMachineSizesOperations>` * 2022-05-01: :class:`VirtualMachineSizesOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.VirtualMachineSizesOperations>` \"\"\" api_version = self._get_api_version('virtual_machine_sizes') if api_version", "as OperationClass else: raise ValueError(\"API version {} does not have operation group 'datastores'\".format(api_version))", "group 'private_endpoint_connections'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def private_link_resources(self): \"\"\"Instance depends on", "api_version == '2021-10-01-dataplanepreview': from .v2021_10_01_dataplanepreview.operations import TemporaryDataReferencesOperations as OperationClass else: raise ValueError(\"API version", "OperationClass elif api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import EnvironmentContainersOperations as OperationClass elif api_version", "= self._get_api_version('private_endpoint_connections') if api_version == '2021-10-01': from .v2021_10_01.operations import PrivateEndpointConnectionsOperations as OperationClass elif", "'data_container'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def data_containers(self): \"\"\"Instance depends on the", "needed for the client to connect to Azure. :type credential: ~azure.core.credentials.TokenCredential :param subscription_id:", "= \"azure.mgmt.machinelearningservices.AzureMachineLearningWorkspaces\" LATEST_PROFILE = ProfileDefinition({ _PROFILE_TAG: { None: DEFAULT_API_VERSION, 'assets': '1.0.0', 'async_operations': 'v1.0',", ".v2021_10_01 import models return models elif api_version == '2021-10-01-dataplanepreview': from .v2021_10_01_dataplanepreview import models", "api_version == '2022-05-01': from .v2022_05_01.operations import EnvironmentContainersOperations as OperationClass else: raise ValueError(\"API version", "group is not described in the profile. :param credential: Credential needed for the", ":mod:`v2022_01_01_preview.models<azure.mgmt.machinelearningservices.v2022_01_01_preview.models>` * 2022-02-01-preview: :mod:`v2022_02_01_preview.models<azure.mgmt.machinelearningservices.v2022_02_01_preview.models>` * 2022-05-01: :mod:`v2022_05_01.models<azure.mgmt.machinelearningservices.v2022_05_01.models>` \"\"\" if api_version == '1.5.0': from", "2022-02-01-preview: :mod:`v2022_02_01_preview.models<azure.mgmt.machinelearningservices.v2022_02_01_preview.models>` * 2022-05-01: :mod:`v2022_05_01.models<azure.mgmt.machinelearningservices.v2022_05_01.models>` \"\"\" if api_version == '1.5.0': from .dataset_dataplane import", "* 2022-05-01: :class:`PrivateEndpointConnectionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.PrivateEndpointConnectionsOperations>` \"\"\" api_version = self._get_api_version('private_endpoint_connections') if api_version == '2021-10-01': from .v2021_10_01.operations", "API version: * v1.0: :class:`RunOperations<azure.mgmt.machinelearningservices.runhistory.operations.RunOperations>` \"\"\" api_version = self._get_api_version('run') if api_version == 'v1.0':", "\"\"\"Instance depends on the API version: * 1.5.0: :class:`DataVersionOperations<azure.mgmt.machinelearningservices.dataset_dataplane.operations.DataVersionOperations>` \"\"\" api_version = self._get_api_version('data_version')", "group 'data_call'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def data_container(self): \"\"\"Instance depends on", "have operation group 'events'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def experiments(self): \"\"\"Instance", "ValueError(\"API version {} does not have operation group 'dataset_controller_v2'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)),", "version {} does not have operation group 'migration'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))", "import EnvironmentVersionsOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import EnvironmentVersionsOperations as", ".runhistory.operations import ExperimentsOperations as OperationClass else: raise ValueError(\"API version {} does not have", "from .v2022_02_01_preview.operations import ModelContainersOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import", "does not have operation group 'online_endpoints'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def", "self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def environment_containers(self): \"\"\"Instance depends on the API version: *", "models return models elif api_version == '2021-10-01-dataplanepreview': from .v2021_10_01_dataplanepreview import models return models", "on the API version: * v1.0: :class:`MetricOperations<azure.mgmt.machinelearningservices.runhistory.operations.MetricOperations>` \"\"\" api_version = self._get_api_version('metric') if api_version", "self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def model_containers(self): \"\"\"Instance depends on the API version: *", "no Retry-After header is present. \"\"\" DEFAULT_API_VERSION = '2022-05-01' _PROFILE_TAG = \"azure.mgmt.machinelearningservices.AzureMachineLearningWorkspaces\" LATEST_PROFILE", "\"\"\"Instance depends on the API version: * 2021-10-01: :class:`ModelContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.ModelContainersOperations>` * 2021-10-01-dataplanepreview: :class:`ModelContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.ModelContainersOperations>` *", "depends on the API version: * 2021-10-01: :class:`PrivateLinkResourcesOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.PrivateLinkResourcesOperations>` * 2022-01-01-preview: :class:`PrivateLinkResourcesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.PrivateLinkResourcesOperations>` * 2022-05-01:", "version {} does not have operation group 'code_containers'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))", "OperationClass else: raise ValueError(\"API version {} does not have operation group 'runs'\".format(api_version)) return", "version: * 2021-10-01: :class:`CodeVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.CodeVersionsOperations>` * 2021-10-01-dataplanepreview: :class:`CodeVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.CodeVersionsOperations>` * 2022-02-01-preview: :class:`CodeVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.CodeVersionsOperations>` * 2022-05-01: :class:`CodeVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.CodeVersionsOperations>`", "* 2022-01-01-preview: :class:`Operations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.Operations>` * 2022-05-01: :class:`Operations<azure.mgmt.machinelearningservices.v2022_05_01.operations.Operations>` \"\"\" api_version = self._get_api_version('operations') if api_version ==", "MigrationOperations as OperationClass else: raise ValueError(\"API version {} does not have operation group", "return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def dataset_containers(self): \"\"\"Instance depends on the API", "elif api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import JobsOperations as OperationClass elif api_version ==", "== '2022-02-01-preview': from .v2022_02_01_preview.operations import OnlineDeploymentsOperations as OperationClass elif api_version == '2022-05-01': from", "= self._get_api_version('async_operations') if api_version == 'v1.0': from .registry_discovery.operations import AsyncOperationsOperations as OperationClass else:", ".v2022_05_01.operations import BatchEndpointsOperations as OperationClass else: raise ValueError(\"API version {} does not have", "from .model_dataplane.operations import MigrationOperations as OperationClass else: raise ValueError(\"API version {} does not", "OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def events(self): \"\"\"Instance depends on the API version:", "else: raise ValueError(\"API version {} does not have operation group 'model_versions'\".format(api_version)) return OperationClass(self._client,", ":mod:`model_dataplane.models<azure.mgmt.machinelearningservices.model_dataplane.models>` * v1.0: :mod:`registry_discovery.models<azure.mgmt.machinelearningservices.registry_discovery.models>` * v1.0: :mod:`runhistory.models<azure.mgmt.machinelearningservices.runhistory.models>` * 2020-09-01-dataplanepreview: :mod:`v2020_09_01_dataplanepreview.models<azure.mgmt.machinelearningservices.v2020_09_01_dataplanepreview.models>` * 2021-10-01: :mod:`v2021_10_01.models<azure.mgmt.machinelearningservices.v2021_10_01.models>`", ".v2021_10_01.operations import OnlineDeploymentsOperations as OperationClass elif api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import OnlineDeploymentsOperations", "@property def model_containers(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`ModelContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.ModelContainersOperations>` *", "self._get_api_version('private_endpoint_connections') if api_version == '2021-10-01': from .v2021_10_01.operations import PrivateEndpointConnectionsOperations as OperationClass elif api_version", "version {} does not have operation group 'environment_versions'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))", "base_url: Service URL :type base_url: str :param profile: A profile definition, from KnownProfiles", "as OperationClass else: raise ValueError(\"API version {} does not have operation group 'extensive_model'\".format(api_version))", "import WorkspaceConnectionsOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import WorkspaceConnectionsOperations as", "if api_version == '1.5.0': from .dataset_dataplane import models return models elif api_version ==", "api_version = self._get_api_version('code_containers') if api_version == '2021-10-01': from .v2021_10_01.operations import CodeContainersOperations as OperationClass", "@property def jobs(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`JobsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.JobsOperations>` *", "as OperationClass else: raise ValueError(\"API version {} does not have operation group 'runs'\".format(api_version))", "from .v2022_05_01.operations import PrivateEndpointConnectionsOperations as OperationClass else: raise ValueError(\"API version {} does not", "from .v2021_10_01 import models return models elif api_version == '2021-10-01-dataplanepreview': from .v2021_10_01_dataplanepreview import", "ValueError(\"API version {} does not have operation group 'run'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)),", "'experiments'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def extensive_model(self): \"\"\"Instance depends on the", "as OperationClass elif api_version == '2021-10-01-dataplanepreview': from .v2021_10_01_dataplanepreview.operations import ModelVersionsOperations as OperationClass elif", "OperationClass elif api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import BatchDeploymentsOperations as OperationClass elif api_version", "import BatchEndpointsOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import BatchEndpointsOperations as", "not have operation group 'quotas'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def registry_management_non_workspace(self):", "have operation group 'batch_endpoints'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def batch_job_deployment(self): \"\"\"Instance", "* 2021-10-01: :class:`ComputeOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.ComputeOperations>` * 2022-01-01-preview: :class:`ComputeOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.ComputeOperations>` * 2022-05-01: :class:`ComputeOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.ComputeOperations>` \"\"\" api_version = self._get_api_version('compute')", "@property def private_endpoint_connections(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`PrivateEndpointConnectionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.PrivateEndpointConnectionsOperations>` *", "elif api_version == '2021-10-01-dataplanepreview': from .v2021_10_01_dataplanepreview.operations import EnvironmentVersionsOperations as OperationClass elif api_version ==", "as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import WorkspacesOperations as OperationClass else:", "ModelVersionsOperations as OperationClass elif api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import ModelVersionsOperations as OperationClass", ":class:`DatastoresOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.DatastoresOperations>` * 2022-02-01-preview: :class:`DatastoresOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.DatastoresOperations>` * 2022-05-01: :class:`DatastoresOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.DatastoresOperations>` \"\"\" api_version = self._get_api_version('datastores') if api_version", "Generator. # Changes may cause incorrect behavior and will be lost if the", "fake class to support current implemetation of MultiApiClientMixin.\" Will be removed in final", "v1.0: :mod:`runhistory.models<azure.mgmt.machinelearningservices.runhistory.models>` * 2020-09-01-dataplanepreview: :mod:`v2020_09_01_dataplanepreview.models<azure.mgmt.machinelearningservices.v2020_09_01_dataplanepreview.models>` * 2021-10-01: :mod:`v2021_10_01.models<azure.mgmt.machinelearningservices.v2021_10_01.models>` * 2021-10-01-dataplanepreview: :mod:`v2021_10_01_dataplanepreview.models<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.models>` * 2022-01-01-preview:", "self._get_api_version('get_operation_status') if api_version == '1.5.0': from .dataset_dataplane.operations import GetOperationStatusOperations as OperationClass else: raise", "if api_version == '2020-09-01-dataplanepreview': from .v2020_09_01_dataplanepreview.operations import BatchJobDeploymentOperations as OperationClass else: raise ValueError(\"API", "* 2022-02-01-preview: :class:`JobsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.JobsOperations>` * 2022-05-01: :class:`JobsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.JobsOperations>` \"\"\" api_version = self._get_api_version('jobs') if api_version ==", "\"\"\" api_version = self._get_api_version('dataset_versions') if api_version == '2021-10-01': from .v2021_10_01.operations import DatasetVersionsOperations as", "profile definition, from KnownProfiles to dict. :type profile: azure.profiles.KnownProfiles :keyword int polling_interval: Default", "BatchDeploymentsOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import BatchDeploymentsOperations as OperationClass", "on the API version: * 2021-10-01: :class:`OnlineDeploymentsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.OnlineDeploymentsOperations>` * 2022-02-01-preview: :class:`OnlineDeploymentsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.OnlineDeploymentsOperations>` * 2022-05-01: :class:`OnlineDeploymentsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.OnlineDeploymentsOperations>`", "* 2022-05-01: :class:`Operations<azure.mgmt.machinelearningservices.v2022_05_01.operations.Operations>` \"\"\" api_version = self._get_api_version('operations') if api_version == '2021-10-01': from .v2021_10_01.operations", "import EnvironmentVersionsOperations as OperationClass else: raise ValueError(\"API version {} does not have operation", "as OperationClass elif api_version == '2022-01-01-preview': from .v2022_01_01_preview.operations import PrivateEndpointConnectionsOperations as OperationClass elif", "OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import ComponentContainersOperations as OperationClass else: raise", "@property def async_operations(self): \"\"\"Instance depends on the API version: * v1.0: :class:`AsyncOperationsOperations<azure.mgmt.machinelearningservices.registry_discovery.operations.AsyncOperationsOperations>` \"\"\"", "* 2022-02-01-preview: :class:`OnlineDeploymentsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.OnlineDeploymentsOperations>` * 2022-05-01: :class:`OnlineDeploymentsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.OnlineDeploymentsOperations>` \"\"\" api_version = self._get_api_version('online_deployments') if api_version ==", "v for k, v in cls.models(api_version).__dict__.items() if isinstance(v, type)} @classmethod def models(cls, api_version=DEFAULT_API_VERSION):", "2020-09-01-dataplanepreview: :class:`BatchJobDeploymentOperations<azure.mgmt.machinelearningservices.v2020_09_01_dataplanepreview.operations.BatchJobDeploymentOperations>` \"\"\" api_version = self._get_api_version('batch_job_deployment') if api_version == '2020-09-01-dataplanepreview': from .v2020_09_01_dataplanepreview.operations import", "Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def private_link_resources(self): \"\"\"Instance depends on the API version: * 2021-10-01:", "'data_containers'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def data_version(self): \"\"\"Instance depends on the", "operation group 'code_containers'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def code_versions(self): \"\"\"Instance depends", "OperationClass else: raise ValueError(\"API version {} does not have operation group 'run'\".format(api_version)) return", "== '2022-05-01': from .v2022_05_01.operations import VirtualMachineSizesOperations as OperationClass else: raise ValueError(\"API version {}", "LATEST_PROFILE = ProfileDefinition({ _PROFILE_TAG: { None: DEFAULT_API_VERSION, 'assets': '1.0.0', 'async_operations': 'v1.0', 'batch_job_deployment': '2020-09-01-dataplanepreview',", "ValueError(\"API version {} does not have operation group 'data_version'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)),", "ValueError(\"API version {} does not have operation group 'online_endpoints'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)),", "* 1.5.0: :mod:`dataset_dataplane.models<azure.mgmt.machinelearningservices.dataset_dataplane.models>` * 1.0.0: :mod:`model_dataplane.models<azure.mgmt.machinelearningservices.model_dataplane.models>` * v1.0: :mod:`registry_discovery.models<azure.mgmt.machinelearningservices.registry_discovery.models>` * v1.0: :mod:`runhistory.models<azure.mgmt.machinelearningservices.runhistory.models>` *", "data_containers(self): \"\"\"Instance depends on the API version: * 2022-02-01-preview: :class:`DataContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.DataContainersOperations>` * 2022-05-01: :class:`DataContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.DataContainersOperations>`", "header is present. \"\"\" DEFAULT_API_VERSION = '2022-05-01' _PROFILE_TAG = \"azure.mgmt.machinelearningservices.AzureMachineLearningWorkspaces\" LATEST_PROFILE = ProfileDefinition({", ".v2020_09_01_dataplanepreview import models return models elif api_version == '2021-10-01': from .v2021_10_01 import models", "import JobsOperations as OperationClass else: raise ValueError(\"API version {} does not have operation", "import WorkspaceConnectionsOperations as OperationClass else: raise ValueError(\"API version {} does not have operation", "version: * 1.5.0: :class:`DeleteOperations<azure.mgmt.machinelearningservices.dataset_dataplane.operations.DeleteOperations>` * v1.0: :class:`DeleteOperations<azure.mgmt.machinelearningservices.runhistory.operations.DeleteOperations>` \"\"\" api_version = self._get_api_version('delete') if api_version", "'online_endpoints'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def operations(self): \"\"\"Instance depends on the", "group 'extensive_model'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def get_operation_status(self): \"\"\"Instance depends on", ":class:`ExtensiveModelOperations<azure.mgmt.machinelearningservices.model_dataplane.operations.ExtensiveModelOperations>` \"\"\" api_version = self._get_api_version('extensive_model') if api_version == '1.0.0': from .model_dataplane.operations import ExtensiveModelOperations", ":class:`OnlineEndpointsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.OnlineEndpointsOperations>` \"\"\" api_version = self._get_api_version('online_endpoints') if api_version == '2021-10-01': from .v2021_10_01.operations import OnlineEndpointsOperations", "2022-02-01-preview: :class:`DataVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.DataVersionsOperations>` * 2022-05-01: :class:`DataVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.DataVersionsOperations>` \"\"\" api_version = self._get_api_version('data_versions') if api_version == '2022-02-01-preview':", "from .dataset_dataplane.operations import DatasetControllerV2Operations as OperationClass else: raise ValueError(\"API version {} does not", "2022-05-01: :class:`DatastoresOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.DatastoresOperations>` \"\"\" api_version = self._get_api_version('datastores') if api_version == '2021-10-01': from .v2021_10_01.operations import", "version {} does not have operation group 'data_call'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))", "OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def dataset_containers(self): \"\"\"Instance depends on the API version:", "does not have operation group 'migration'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def", "'2022-01-01-preview': from .v2022_01_01_preview.operations import QuotasOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations", "= self._get_api_version('spans') if api_version == 'v1.0': from .runhistory.operations import SpansOperations as OperationClass else:", "Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def data_call(self): \"\"\"Instance depends on the API version: * 1.5.0:", "\"\"\"Instance depends on the API version: * v1.0: :class:`ExperimentsOperations<azure.mgmt.machinelearningservices.runhistory.operations.ExperimentsOperations>` \"\"\" api_version = self._get_api_version('experiments')", "api_version=api_version, profile=profile ) @classmethod def _models_dict(cls, api_version): return {k: v for k, v", ":class:`ModelContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.ModelContainersOperations>` \"\"\" api_version = self._get_api_version('model_containers') if api_version == '2021-10-01': from .v2021_10_01.operations import ModelContainersOperations", "'data_call'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def data_container(self): \"\"\"Instance depends on the", "as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import WorkspaceFeaturesOperations as OperationClass else:", "== 'v1.0': from .runhistory.operations import MetricOperations as OperationClass else: raise ValueError(\"API version {}", "Deserializer(self._models_dict(api_version))) @property def extensive_model(self): \"\"\"Instance depends on the API version: * 1.0.0: :class:`ExtensiveModelOperations<azure.mgmt.machinelearningservices.model_dataplane.operations.ExtensiveModelOperations>`", ":mod:`registry_discovery.models<azure.mgmt.machinelearningservices.registry_discovery.models>` * v1.0: :mod:`runhistory.models<azure.mgmt.machinelearningservices.runhistory.models>` * 2020-09-01-dataplanepreview: :mod:`v2020_09_01_dataplanepreview.models<azure.mgmt.machinelearningservices.v2020_09_01_dataplanepreview.models>` * 2021-10-01: :mod:`v2021_10_01.models<azure.mgmt.machinelearningservices.v2021_10_01.models>` * 2021-10-01-dataplanepreview: :mod:`v2021_10_01_dataplanepreview.models<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.models>`", "'dataset_versions': '2021-10-01', 'datasets_v1': '1.5.0', 'delete': 'v1.0', 'events': 'v1.0', 'experiments': 'v1.0', 'extensive_model': '1.0.0', 'get_operation_status':", "API version: * 2020-09-01-dataplanepreview: :class:`BatchJobDeploymentOperations<azure.mgmt.machinelearningservices.v2020_09_01_dataplanepreview.operations.BatchJobDeploymentOperations>` \"\"\" api_version = self._get_api_version('batch_job_deployment') if api_version == '2020-09-01-dataplanepreview':", "else: raise ValueError(\"API version {} does not have operation group 'datasets_v1'\".format(api_version)) return OperationClass(self._client,", "DEFAULT_API_VERSION, 'assets': '1.0.0', 'async_operations': 'v1.0', 'batch_job_deployment': '2020-09-01-dataplanepreview', 'batch_job_endpoint': '2020-09-01-dataplanepreview', 'data_call': '1.5.0', 'data_container': '1.5.0',", "\"\"\" api_version = self._get_api_version('registry_management_non_workspace') if api_version == 'v1.0': from .registry_discovery.operations import RegistryManagementNonWorkspaceOperations as", "OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def dataset_controller_v2(self): \"\"\"Instance depends on the API version:", "api_version=DEFAULT_API_VERSION): \"\"\"Module depends on the API version: * 1.5.0: :mod:`dataset_dataplane.models<azure.mgmt.machinelearningservices.dataset_dataplane.models>` * 1.0.0: :mod:`model_dataplane.models<azure.mgmt.machinelearningservices.model_dataplane.models>`", ".v2022_05_01.operations import EnvironmentVersionsOperations as OperationClass else: raise ValueError(\"API version {} does not have", "2022-02-01-preview: :class:`CodeContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.CodeContainersOperations>` * 2022-05-01: :class:`CodeContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.CodeContainersOperations>` \"\"\" api_version = self._get_api_version('code_containers') if api_version == '2021-10-01':", "def assets(self): \"\"\"Instance depends on the API version: * 1.0.0: :class:`AssetsOperations<azure.mgmt.machinelearningservices.model_dataplane.operations.AssetsOperations>` \"\"\" api_version", "'1.0.0': from .model_dataplane.operations import MigrationOperations as OperationClass else: raise ValueError(\"API version {} does", "api_version == '1.0.0': from .model_dataplane import models return models elif api_version == 'v1.0':", "the API version: * 2021-10-01: :class:`DatasetVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.DatasetVersionsOperations>` \"\"\" api_version = self._get_api_version('dataset_versions') if api_version ==", "import DatasetsV1Operations as OperationClass else: raise ValueError(\"API version {} does not have operation", "depends on the API version: * 2021-10-01: :class:`ComputeOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.ComputeOperations>` * 2022-01-01-preview: :class:`ComputeOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.ComputeOperations>` * 2022-05-01:", "OperationClass else: raise ValueError(\"API version {} does not have operation group 'data_call'\".format(api_version)) return", "api_version = self._get_api_version('data_container') if api_version == '1.5.0': from .dataset_dataplane.operations import DataContainerOperations as OperationClass", "not have operation group 'private_link_resources'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def quotas(self):", "== '2022-05-01': from .v2022_05_01.operations import ComponentVersionsOperations as OperationClass else: raise ValueError(\"API version {}", "from .v2021_10_01.operations import UsagesOperations as OperationClass elif api_version == '2022-01-01-preview': from .v2022_01_01_preview.operations import", "have operation group 'dataset_versions'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def datasets_v1(self): \"\"\"Instance", "have operation group 'data_call'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def data_container(self): \"\"\"Instance", "from .v2022_05_01 import models return models raise ValueError(\"API version {} is not available\".format(api_version))", "as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import ComputeOperations as OperationClass else:", "Operations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import Operations as OperationClass", "ValueError(\"API version {} does not have operation group 'workspace_connections'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)),", "as OperationClass else: raise ValueError(\"API version {} does not have operation group 'code_containers'\".format(api_version))", "PrivateLinkResourcesOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import PrivateLinkResourcesOperations as OperationClass", "\"\"\"Instance depends on the API version: * 2021-10-01: :class:`CodeContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.CodeContainersOperations>` * 2021-10-01-dataplanepreview: :class:`CodeContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.CodeContainersOperations>` *", ":class:`DatasetV2Operations<azure.mgmt.machinelearningservices.dataset_dataplane.operations.DatasetV2Operations>` \"\"\" api_version = self._get_api_version('dataset_v2') if api_version == '1.5.0': from .dataset_dataplane.operations import DatasetV2Operations", "api_version == '2022-05-01': from .v2022_05_01.operations import Operations as OperationClass else: raise ValueError(\"API version", "if api_version == '2021-10-01': from .v2021_10_01.operations import ModelContainersOperations as OperationClass elif api_version ==", "2022-05-01: :class:`PrivateEndpointConnectionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.PrivateEndpointConnectionsOperations>` \"\"\" api_version = self._get_api_version('private_endpoint_connections') if api_version == '2021-10-01': from .v2021_10_01.operations import", "depends on the API version: * 1.5.0: :class:`DataContainerOperations<azure.mgmt.machinelearningservices.dataset_dataplane.operations.DataContainerOperations>` \"\"\" api_version = self._get_api_version('data_container') if", "import DatastoresOperations as OperationClass elif api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import DatastoresOperations as", "\"\"\" api_version = self._get_api_version('operations') if api_version == '2021-10-01': from .v2021_10_01.operations import Operations as", "import DataVersionOperations as OperationClass else: raise ValueError(\"API version {} does not have operation", "be removed in final version of multiapi azure-core based client \"\"\" pass class", "Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def data_version(self): \"\"\"Instance depends on the API version: * 1.5.0:", "raise ValueError(\"API version {} does not have operation group 'private_endpoint_connections'\".format(api_version)) return OperationClass(self._client, self._config,", "import QuotasOperations as OperationClass elif api_version == '2022-01-01-preview': from .v2022_01_01_preview.operations import QuotasOperations as", "version {} does not have operation group 'online_endpoints'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))", "from .v2022_01_01_preview.operations import ComputeOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import", "on the API version: * 2020-09-01-dataplanepreview: :class:`BatchJobDeploymentOperations<azure.mgmt.machinelearningservices.v2020_09_01_dataplanepreview.operations.BatchJobDeploymentOperations>` \"\"\" api_version = self._get_api_version('batch_job_deployment') if api_version", "from .v2022_02_01_preview.operations import ComponentVersionsOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import", "as OperationClass elif api_version == '2022-01-01-preview': from .v2022_01_01_preview.operations import WorkspacesOperations as OperationClass elif", "* v1.0: :mod:`registry_discovery.models<azure.mgmt.machinelearningservices.registry_discovery.models>` * v1.0: :mod:`runhistory.models<azure.mgmt.machinelearningservices.runhistory.models>` * 2020-09-01-dataplanepreview: :mod:`v2020_09_01_dataplanepreview.models<azure.mgmt.machinelearningservices.v2020_09_01_dataplanepreview.models>` * 2021-10-01: :mod:`v2021_10_01.models<azure.mgmt.machinelearningservices.v2021_10_01.models>` *", "if api_version == '2020-09-01-dataplanepreview': from .v2020_09_01_dataplanepreview.operations import BatchJobEndpointOperations as OperationClass else: raise ValueError(\"API", "'spans': 'v1.0', 'temporary_data_references': '2021-10-01-dataplanepreview', }}, _PROFILE_TAG + \" latest\" ) def __init__( self,", "== '2021-10-01': from .v2021_10_01.operations import CodeContainersOperations as OperationClass elif api_version == '2021-10-01-dataplanepreview': from", "OperationClass else: raise ValueError(\"API version {} does not have operation group 'online_endpoints'\".format(api_version)) return", "import ComponentContainersOperations as OperationClass else: raise ValueError(\"API version {} does not have operation", "group 'datastores'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def delete(self): \"\"\"Instance depends on", "* 2022-01-01-preview: :class:`WorkspacesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.WorkspacesOperations>` * 2022-05-01: :class:`WorkspacesOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.WorkspacesOperations>` \"\"\" api_version = self._get_api_version('workspaces') if api_version ==", "api_version == '2022-05-01': from .v2022_05_01.operations import PrivateLinkResourcesOperations as OperationClass else: raise ValueError(\"API version", "import RunOperations as OperationClass else: raise ValueError(\"API version {} does not have operation", "on the API version: * 2021-10-01: :class:`PrivateLinkResourcesOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.PrivateLinkResourcesOperations>` * 2022-01-01-preview: :class:`PrivateLinkResourcesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.PrivateLinkResourcesOperations>` * 2022-05-01: :class:`PrivateLinkResourcesOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.PrivateLinkResourcesOperations>`", "does not have operation group 'component_versions'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def", "target subscription. :type subscription_id: str :param api_version: API version to use if no", "OperationClass else: raise ValueError(\"API version {} does not have operation group 'jobs'\".format(api_version)) return", "RegistryManagementNonWorkspaceOperations as OperationClass else: raise ValueError(\"API version {} does not have operation group", "return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def get_operation_status(self): \"\"\"Instance depends on the API", "environment_versions(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`EnvironmentVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.EnvironmentVersionsOperations>` * 2021-10-01-dataplanepreview: :class:`EnvironmentVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.EnvironmentVersionsOperations>`", "have operation group 'online_deployments'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def online_endpoints(self): \"\"\"Instance", "= self._get_api_version('batch_job_deployment') if api_version == '2020-09-01-dataplanepreview': from .v2020_09_01_dataplanepreview.operations import BatchJobDeploymentOperations as OperationClass else:", "elif api_version == 'v1.0': from .runhistory.operations import DeleteOperations as OperationClass else: raise ValueError(\"API", "else: raise ValueError(\"API version {} does not have operation group 'get_operation_status'\".format(api_version)) return OperationClass(self._client,", "'2022-05-01': from .v2022_05_01.operations import OnlineDeploymentsOperations as OperationClass else: raise ValueError(\"API version {} does", "have operation group 'runs'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def spans(self): \"\"\"Instance", ":class:`WorkspaceConnectionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.WorkspaceConnectionsOperations>` * 2022-01-01-preview: :class:`WorkspaceConnectionsOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.WorkspaceConnectionsOperations>` * 2022-05-01: :class:`WorkspaceConnectionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.WorkspaceConnectionsOperations>` \"\"\" api_version = self._get_api_version('workspace_connections') if api_version", "Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License.", "import ComponentContainersOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import ComponentContainersOperations as", "latest API version available on public Azure. For production, you should stick to", "present. \"\"\" DEFAULT_API_VERSION = '2022-05-01' _PROFILE_TAG = \"azure.mgmt.machinelearningservices.AzureMachineLearningWorkspaces\" LATEST_PROFILE = ProfileDefinition({ _PROFILE_TAG: {", "import CodeContainersOperations as OperationClass elif api_version == '2021-10-01-dataplanepreview': from .v2021_10_01_dataplanepreview.operations import CodeContainersOperations as", "ValueError(\"API version {} does not have operation group 'code_versions'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)),", "'2020-09-01-dataplanepreview': from .v2020_09_01_dataplanepreview.operations import BatchJobEndpointOperations as OperationClass else: raise ValueError(\"API version {} does", "operation group 'dataset_versions'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def datasets_v1(self): \"\"\"Instance depends", "OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def migration(self): \"\"\"Instance depends on the API version:", "WorkspaceConnectionsOperations as OperationClass elif api_version == '2022-01-01-preview': from .v2022_01_01_preview.operations import WorkspaceConnectionsOperations as OperationClass", "UsagesOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import UsagesOperations as OperationClass", "self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def code_versions(self): \"\"\"Instance depends on the API version: *", "self._get_api_version('datastores') if api_version == '2021-10-01': from .v2021_10_01.operations import DatastoresOperations as OperationClass elif api_version", "'2022-02-01-preview': from .v2022_02_01_preview.operations import DatastoresOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations", "Operations as OperationClass else: raise ValueError(\"API version {} does not have operation group", "== '2021-10-01-dataplanepreview': from .v2021_10_01_dataplanepreview.operations import TemporaryDataReferencesOperations as OperationClass else: raise ValueError(\"API version {}", "elif api_version == '2022-05-01': from .v2022_05_01.operations import ModelContainersOperations as OperationClass else: raise ValueError(\"API", "have operation group 'quotas'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def registry_management_non_workspace(self): \"\"\"Instance", "def temporary_data_references(self): \"\"\"Instance depends on the API version: * 2021-10-01-dataplanepreview: :class:`TemporaryDataReferencesOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.TemporaryDataReferencesOperations>` \"\"\" api_version", "as OperationClass else: raise ValueError(\"API version {} does not have operation group 'code_versions'\".format(api_version))", "'2022-05-01': from .v2022_05_01.operations import ComputeOperations as OperationClass else: raise ValueError(\"API version {} does", "import ComponentVersionsOperations as OperationClass elif api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import ComponentVersionsOperations as", "api_version == '2021-10-01': from .v2021_10_01 import models return models elif api_version == '2021-10-01-dataplanepreview':", "operation group 'migration'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def model_containers(self): \"\"\"Instance depends", "'assets': '1.0.0', 'async_operations': 'v1.0', 'batch_job_deployment': '2020-09-01-dataplanepreview', 'batch_job_endpoint': '2020-09-01-dataplanepreview', 'data_call': '1.5.0', 'data_container': '1.5.0', 'data_version':", "elif api_version == '2022-01-01-preview': from .v2022_01_01_preview.operations import WorkspacesOperations as OperationClass elif api_version ==", "on Azure Machine Learning Workspace resources. This ready contains multiple API versions, to", "on the API version: * 2021-10-01: :class:`OnlineEndpointsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.OnlineEndpointsOperations>` * 2022-02-01-preview: :class:`OnlineEndpointsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.OnlineEndpointsOperations>` * 2022-05-01: :class:`OnlineEndpointsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.OnlineEndpointsOperations>`", "* v1.0: :class:`EventsOperations<azure.mgmt.machinelearningservices.runhistory.operations.EventsOperations>` \"\"\" api_version = self._get_api_version('events') if api_version == 'v1.0': from .runhistory.operations", "== '2022-01-01-preview': from .v2022_01_01_preview.operations import WorkspaceConnectionsOperations as OperationClass elif api_version == '2022-05-01': from", "Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def delete(self): \"\"\"Instance depends on the API version: * 1.5.0:", "KnownProfiles **kwargs # type: Any ): self._config = AzureMachineLearningWorkspacesConfiguration(credential, subscription_id, **kwargs) self._client =", "OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def component_versions(self): \"\"\"Instance depends on the API version:", "models elif api_version == 'v1.0': from .registry_discovery import models return models elif api_version", "the API version: * 1.5.0: :class:`DatasetV2Operations<azure.mgmt.machinelearningservices.dataset_dataplane.operations.DatasetV2Operations>` \"\"\" api_version = self._get_api_version('dataset_v2') if api_version ==", "subscription_id: str :param api_version: API version to use if no profile is provided,", "VirtualMachineSizesOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import VirtualMachineSizesOperations as OperationClass", "from .v2021_10_01.operations import OnlineEndpointsOperations as OperationClass elif api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import", "def migration(self): \"\"\"Instance depends on the API version: * 1.0.0: :class:`MigrationOperations<azure.mgmt.machinelearningservices.model_dataplane.operations.MigrationOperations>` \"\"\" api_version", "2021-10-01: :class:`VirtualMachineSizesOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.VirtualMachineSizesOperations>` * 2022-01-01-preview: :class:`VirtualMachineSizesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.VirtualMachineSizesOperations>` * 2022-05-01: :class:`VirtualMachineSizesOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.VirtualMachineSizesOperations>` \"\"\" api_version = self._get_api_version('virtual_machine_sizes') if", "api_version == '2021-10-01': from .v2021_10_01.operations import JobsOperations as OperationClass elif api_version == '2022-02-01-preview':", "import OnlineEndpointsOperations as OperationClass else: raise ValueError(\"API version {} does not have operation", "== '1.0.0': from .model_dataplane.operations import AssetsOperations as OperationClass else: raise ValueError(\"API version {}", "type: \"TokenCredential\" subscription_id, # type: str api_version=None, # type: Optional[str] base_url=\"https://management.azure.com\", # type:", "'v1.0', 'events': 'v1.0', 'experiments': 'v1.0', 'extensive_model': '1.0.0', 'get_operation_status': '1.5.0', 'metric': 'v1.0', 'migration': '1.0.0',", "models elif api_version == '2021-10-01-dataplanepreview': from .v2021_10_01_dataplanepreview import models return models elif api_version", "depends on the API version: * v1.0: :class:`ExperimentsOperations<azure.mgmt.machinelearningservices.runhistory.operations.ExperimentsOperations>` \"\"\" api_version = self._get_api_version('experiments') if", "if api_version == '1.5.0': from .dataset_dataplane.operations import DatasetV2Operations as OperationClass else: raise ValueError(\"API", "api_version): return {k: v for k, v in cls.models(api_version).__dict__.items() if isinstance(v, type)} @classmethod", "MultiApiClientMixin.\" Will be removed in final version of multiapi azure-core based client \"\"\"", "OperationClass else: raise ValueError(\"API version {} does not have operation group 'private_endpoint_connections'\".format(api_version)) return", "== '2022-02-01-preview': from .v2022_02_01_preview.operations import ComponentContainersOperations as OperationClass elif api_version == '2022-05-01': from", "'2021-10-01-dataplanepreview': from .v2021_10_01_dataplanepreview.operations import ModelContainersOperations as OperationClass elif api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations", "'data_version': '1.5.0', 'dataset_containers': '2021-10-01', 'dataset_controller_v2': '1.5.0', 'dataset_v2': '1.5.0', 'dataset_versions': '2021-10-01', 'datasets_v1': '1.5.0', 'delete':", "as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import Operations as OperationClass else:", "have operation group 'usages'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def virtual_machine_sizes(self): \"\"\"Instance", "api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import ComponentContainersOperations as OperationClass elif api_version == '2022-05-01':", "model_versions(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`ModelVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.ModelVersionsOperations>` * 2021-10-01-dataplanepreview: :class:`ModelVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.ModelVersionsOperations>`", "to operate on Azure Machine Learning Workspace resources. This ready contains multiple API", "'delete': 'v1.0', 'events': 'v1.0', 'experiments': 'v1.0', 'extensive_model': '1.0.0', 'get_operation_status': '1.5.0', 'metric': 'v1.0', 'migration':", "elif api_version == '2022-05-01': from .v2022_05_01.operations import CodeContainersOperations as OperationClass else: raise ValueError(\"API", "on the API version: * 2021-10-01: :class:`UsagesOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.UsagesOperations>` * 2022-01-01-preview: :class:`UsagesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.UsagesOperations>` * 2022-05-01: :class:`UsagesOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.UsagesOperations>`", "operation group 'assets'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def async_operations(self): \"\"\"Instance depends", "OperationClass else: raise ValueError(\"API version {} does not have operation group 'code_containers'\".format(api_version)) return", "operation group 'workspace_features'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def workspaces(self): \"\"\"Instance depends", "on the API version: * 1.5.0: :class:`DatasetV2Operations<azure.mgmt.machinelearningservices.dataset_dataplane.operations.DatasetV2Operations>` \"\"\" api_version = self._get_api_version('dataset_v2') if api_version", "from .model_dataplane.operations import ModelsOperations as OperationClass else: raise ValueError(\"API version {} does not", "'1.5.0', 'delete': 'v1.0', 'events': 'v1.0', 'experiments': 'v1.0', 'extensive_model': '1.0.0', 'get_operation_status': '1.5.0', 'metric': 'v1.0',", "import models return models elif api_version == '2022-02-01-preview': from .v2022_02_01_preview import models return", "OperationClass elif api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import CodeVersionsOperations as OperationClass elif api_version", "have operation group 'code_containers'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def code_versions(self): \"\"\"Instance", "from azure.profiles import KnownProfiles, ProfileDefinition from azure.profiles.multiapiclient import MultiApiClientMixin from msrest import Deserializer,", "return models raise ValueError(\"API version {} is not available\".format(api_version)) @property def assets(self): \"\"\"Instance", "from msrest import Deserializer, Serializer from ._configuration import AzureMachineLearningWorkspacesConfiguration if TYPE_CHECKING: # pylint:", "\"\"\" api_version = self._get_api_version('batch_job_deployment') if api_version == '2020-09-01-dataplanepreview': from .v2020_09_01_dataplanepreview.operations import BatchJobDeploymentOperations as", "Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def component_containers(self): \"\"\"Instance depends on the API version: * 2021-10-01:", "TokenCredential class _SDKClient(object): def __init__(self, *args, **kwargs): \"\"\"This is a fake class to", "* v1.0: :class:`RunArtifactsOperations<azure.mgmt.machinelearningservices.runhistory.operations.RunArtifactsOperations>` \"\"\" api_version = self._get_api_version('run_artifacts') if api_version == 'v1.0': from .runhistory.operations", "if api_version == '2021-10-01': from .v2021_10_01.operations import Operations as OperationClass elif api_version ==", "return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def virtual_machine_sizes(self): \"\"\"Instance depends on the API", "from .v2021_10_01.operations import WorkspaceConnectionsOperations as OperationClass elif api_version == '2022-01-01-preview': from .v2022_01_01_preview.operations import", "not have operation group 'data_call'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def data_container(self):", "2022-05-01: :class:`BatchEndpointsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.BatchEndpointsOperations>` \"\"\" api_version = self._get_api_version('batch_endpoints') if api_version == '2021-10-01': from .v2021_10_01.operations import", "OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def batch_job_endpoint(self): \"\"\"Instance depends on the API version:", "from .v2022_02_01_preview.operations import CodeContainersOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import", "azure.profiles.KnownProfiles :keyword int polling_interval: Default waiting time between two polls for LRO operations", "@property def online_endpoints(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`OnlineEndpointsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.OnlineEndpointsOperations>` *", "api_version = self._get_api_version('batch_endpoints') if api_version == '2021-10-01': from .v2021_10_01.operations import BatchEndpointsOperations as OperationClass", "version {} does not have operation group 'batch_job_endpoint'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))", "get_operation_status(self): \"\"\"Instance depends on the API version: * 1.5.0: :class:`GetOperationStatusOperations<azure.mgmt.machinelearningservices.dataset_dataplane.operations.GetOperationStatusOperations>` \"\"\" api_version =", ":class:`OnlineEndpointsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.OnlineEndpointsOperations>` * 2022-02-01-preview: :class:`OnlineEndpointsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.OnlineEndpointsOperations>` * 2022-05-01: :class:`OnlineEndpointsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.OnlineEndpointsOperations>` \"\"\" api_version = self._get_api_version('online_endpoints') if api_version", "= self._get_api_version('data_containers') if api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import DataContainersOperations as OperationClass elif", "2022-02-01-preview: :class:`ModelContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.ModelContainersOperations>` * 2022-05-01: :class:`ModelContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.ModelContainersOperations>` \"\"\" api_version = self._get_api_version('model_containers') if api_version == '2021-10-01':", "as OperationClass elif api_version == '2021-10-01-dataplanepreview': from .v2021_10_01_dataplanepreview.operations import ModelContainersOperations as OperationClass elif", "Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def usages(self): \"\"\"Instance depends on the API version: * 2021-10-01:", "OperationClass elif api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import OnlineDeploymentsOperations as OperationClass elif api_version", "as OperationClass else: raise ValueError(\"API version {} does not have operation group 'dataset_controller_v2'\".format(api_version))", "def models(cls, api_version=DEFAULT_API_VERSION): \"\"\"Module depends on the API version: * 1.5.0: :mod:`dataset_dataplane.models<azure.mgmt.machinelearningservices.dataset_dataplane.models>` *", "== '1.5.0': from .dataset_dataplane.operations import DatasetV2Operations as OperationClass else: raise ValueError(\"API version {}", "@property def metric(self): \"\"\"Instance depends on the API version: * v1.0: :class:`MetricOperations<azure.mgmt.machinelearningservices.runhistory.operations.MetricOperations>` \"\"\"", "self._get_api_version('delete') if api_version == '1.5.0': from .dataset_dataplane.operations import DeleteOperations as OperationClass elif api_version", ".v2021_10_01.operations import ComputeOperations as OperationClass elif api_version == '2022-01-01-preview': from .v2022_01_01_preview.operations import ComputeOperations", "Deserializer(self._models_dict(api_version))) @property def quotas(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`QuotasOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.QuotasOperations>`", "\"\"\"Instance depends on the API version: * 2021-10-01: :class:`EnvironmentVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.EnvironmentVersionsOperations>` * 2021-10-01-dataplanepreview: :class:`EnvironmentVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.EnvironmentVersionsOperations>` *", "Optional from azure.core.credentials import TokenCredential class _SDKClient(object): def __init__(self, *args, **kwargs): \"\"\"This is", "self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def online_deployments(self): \"\"\"Instance depends on the API version: *", ":class:`RunsOperations<azure.mgmt.machinelearningservices.runhistory.operations.RunsOperations>` \"\"\" api_version = self._get_api_version('runs') if api_version == 'v1.0': from .runhistory.operations import RunsOperations", "OperationClass else: raise ValueError(\"API version {} does not have operation group 'dataset_versions'\".format(api_version)) return", "OperationClass elif api_version == '2022-01-01-preview': from .v2022_01_01_preview.operations import PrivateEndpointConnectionsOperations as OperationClass elif api_version", "as OperationClass else: raise ValueError(\"API version {} does not have operation group 'workspace_connections'\".format(api_version))", "def datastores(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`DatastoresOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.DatastoresOperations>` * 2022-02-01-preview:", "def data_call(self): \"\"\"Instance depends on the API version: * 1.5.0: :class:`DataCallOperations<azure.mgmt.machinelearningservices.dataset_dataplane.operations.DataCallOperations>` \"\"\" api_version", "elif api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import OnlineDeploymentsOperations as OperationClass elif api_version ==", ":class:`Operations<azure.mgmt.machinelearningservices.v2021_10_01.operations.Operations>` * 2022-01-01-preview: :class:`Operations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.Operations>` * 2022-05-01: :class:`Operations<azure.mgmt.machinelearningservices.v2022_05_01.operations.Operations>` \"\"\" api_version = self._get_api_version('operations') if api_version", "depends on the API version: * 2021-10-01: :class:`OnlineDeploymentsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.OnlineDeploymentsOperations>` * 2022-02-01-preview: :class:`OnlineDeploymentsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.OnlineDeploymentsOperations>` * 2022-05-01:", "== '1.5.0': from .dataset_dataplane.operations import GetOperationStatusOperations as OperationClass else: raise ValueError(\"API version {}", "on the API version: * 2021-10-01: :class:`EnvironmentContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.EnvironmentContainersOperations>` * 2021-10-01-dataplanepreview: :class:`EnvironmentContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.EnvironmentContainersOperations>` * 2022-02-01-preview: :class:`EnvironmentContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.EnvironmentContainersOperations>`", "* 2021-10-01-dataplanepreview: :class:`ComponentContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.ComponentContainersOperations>` * 2022-02-01-preview: :class:`ComponentContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.ComponentContainersOperations>` * 2022-05-01: :class:`ComponentContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.ComponentContainersOperations>` \"\"\" api_version = self._get_api_version('component_containers')", "api_version == '2021-10-01': from .v2021_10_01.operations import EnvironmentVersionsOperations as OperationClass elif api_version == '2021-10-01-dataplanepreview':", "if api_version == 'v1.0': from .runhistory.operations import ExperimentsOperations as OperationClass else: raise ValueError(\"API", "profile. The profile sets a mapping between an operation group and its API", "api_version == '2022-05-01': from .v2022_05_01.operations import JobsOperations as OperationClass else: raise ValueError(\"API version", "api_version == '2021-10-01-dataplanepreview': from .v2021_10_01_dataplanepreview.operations import ModelVersionsOperations as OperationClass elif api_version == '2022-02-01-preview':", "AzureMachineLearningWorkspaces(MultiApiClientMixin, _SDKClient): \"\"\"These APIs allow end users to operate on Azure Machine Learning", "Any, Optional from azure.core.credentials import TokenCredential class _SDKClient(object): def __init__(self, *args, **kwargs): \"\"\"This", "deal with all of the Azure clouds (Azure Stack, Azure Government, Azure China,", "return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def online_deployments(self): \"\"\"Instance depends on the API", "return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def models(self): \"\"\"Instance depends on the API", "of multiapi azure-core based client \"\"\" pass class AzureMachineLearningWorkspaces(MultiApiClientMixin, _SDKClient): \"\"\"These APIs allow", "'metric': 'v1.0', 'migration': '1.0.0', 'models': '1.0.0', 'registry_management_non_workspace': 'v1.0', 'run': 'v1.0', 'run_artifacts': 'v1.0', 'runs':", "str profile=KnownProfiles.default, # type: KnownProfiles **kwargs # type: Any ): self._config = AzureMachineLearningWorkspacesConfiguration(credential,", "raise ValueError(\"API version {} does not have operation group 'metric'\".format(api_version)) return OperationClass(self._client, self._config,", "the API version: * 2021-10-01: :class:`CodeVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.CodeVersionsOperations>` * 2021-10-01-dataplanepreview: :class:`CodeVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.CodeVersionsOperations>` * 2022-02-01-preview: :class:`CodeVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.CodeVersionsOperations>` *", "have operation group 'datasets_v1'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def datastores(self): \"\"\"Instance", "api_version == 'v1.0': from .runhistory.operations import DeleteOperations as OperationClass else: raise ValueError(\"API version", "if api_version == 'v1.0': from .runhistory.operations import RunArtifactsOperations as OperationClass else: raise ValueError(\"API", "Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in", "as OperationClass else: raise ValueError(\"API version {} does not have operation group 'data_container'\".format(api_version))", "'2022-05-01': from .v2022_05_01.operations import WorkspaceConnectionsOperations as OperationClass else: raise ValueError(\"API version {} does", "* 2021-10-01: :class:`DatasetVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.DatasetVersionsOperations>` \"\"\" api_version = self._get_api_version('dataset_versions') if api_version == '2021-10-01': from .v2021_10_01.operations", "'2021-10-01': from .v2021_10_01.operations import CodeVersionsOperations as OperationClass elif api_version == '2021-10-01-dataplanepreview': from .v2021_10_01_dataplanepreview.operations", "version: * 2021-10-01: :class:`OnlineDeploymentsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.OnlineDeploymentsOperations>` * 2022-02-01-preview: :class:`OnlineDeploymentsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.OnlineDeploymentsOperations>` * 2022-05-01: :class:`OnlineDeploymentsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.OnlineDeploymentsOperations>` \"\"\" api_version =", "operation group 'batch_deployments'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def batch_endpoints(self): \"\"\"Instance depends", "have operation group 'batch_job_deployment'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def batch_job_endpoint(self): \"\"\"Instance", "the API version: * v1.0: :class:`RunArtifactsOperations<azure.mgmt.machinelearningservices.runhistory.operations.RunArtifactsOperations>` \"\"\" api_version = self._get_api_version('run_artifacts') if api_version ==", "credential, # type: \"TokenCredential\" subscription_id, # type: str api_version=None, # type: Optional[str] base_url=\"https://management.azure.com\",", ":class:`CodeContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.CodeContainersOperations>` * 2022-05-01: :class:`CodeContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.CodeContainersOperations>` \"\"\" api_version = self._get_api_version('code_containers') if api_version == '2021-10-01': from", "'2021-10-01': from .v2021_10_01.operations import CodeContainersOperations as OperationClass elif api_version == '2021-10-01-dataplanepreview': from .v2021_10_01_dataplanepreview.operations", "from .v2022_05_01.operations import ModelContainersOperations as OperationClass else: raise ValueError(\"API version {} does not", "api_version == 'v1.0': from .registry_discovery import models return models elif api_version == 'v1.0':", "api_version == 'v1.0': from .runhistory.operations import SpansOperations as OperationClass else: raise ValueError(\"API version", "datasets_v1(self): \"\"\"Instance depends on the API version: * 1.5.0: :class:`DatasetsV1Operations<azure.mgmt.machinelearningservices.dataset_dataplane.operations.DatasetsV1Operations>` \"\"\" api_version =", "version: * v1.0: :class:`SpansOperations<azure.mgmt.machinelearningservices.runhistory.operations.SpansOperations>` \"\"\" api_version = self._get_api_version('spans') if api_version == 'v1.0': from", "def models(self): \"\"\"Instance depends on the API version: * 1.0.0: :class:`ModelsOperations<azure.mgmt.machinelearningservices.model_dataplane.operations.ModelsOperations>` \"\"\" api_version", "== 'v1.0': from .runhistory import models return models elif api_version == '2020-09-01-dataplanepreview': from", "* 2020-09-01-dataplanepreview: :class:`BatchJobEndpointOperations<azure.mgmt.machinelearningservices.v2020_09_01_dataplanepreview.operations.BatchJobEndpointOperations>` \"\"\" api_version = self._get_api_version('batch_job_endpoint') if api_version == '2020-09-01-dataplanepreview': from .v2020_09_01_dataplanepreview.operations", "'v1.0': from .runhistory import models return models elif api_version == '2020-09-01-dataplanepreview': from .v2020_09_01_dataplanepreview", "API version: * 2021-10-01: :class:`DatasetContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.DatasetContainersOperations>` \"\"\" api_version = self._get_api_version('dataset_containers') if api_version == '2021-10-01':", "depends on the API version: * v1.0: :class:`RunsOperations<azure.mgmt.machinelearningservices.runhistory.operations.RunsOperations>` \"\"\" api_version = self._get_api_version('runs') if", "have operation group 'delete'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def environment_containers(self): \"\"\"Instance", "'2021-10-01-dataplanepreview': from .v2021_10_01_dataplanepreview.operations import EnvironmentVersionsOperations as OperationClass elif api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations", "not have operation group 'dataset_versions'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def datasets_v1(self):", "* 2022-02-01-preview: :class:`ModelContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.ModelContainersOperations>` * 2022-05-01: :class:`ModelContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.ModelContainersOperations>` \"\"\" api_version = self._get_api_version('model_containers') if api_version ==", "**kwargs) self._client = ARMPipelineClient(base_url=base_url, config=self._config, **kwargs) super(AzureMachineLearningWorkspaces, self).__init__( api_version=api_version, profile=profile ) @classmethod def", ":mod:`v2022_02_01_preview.models<azure.mgmt.machinelearningservices.v2022_02_01_preview.models>` * 2022-05-01: :mod:`v2022_05_01.models<azure.mgmt.machinelearningservices.v2022_05_01.models>` \"\"\" if api_version == '1.5.0': from .dataset_dataplane import models", "* v1.0: :class:`MetricOperations<azure.mgmt.machinelearningservices.runhistory.operations.MetricOperations>` \"\"\" api_version = self._get_api_version('metric') if api_version == 'v1.0': from .runhistory.operations", "data_container(self): \"\"\"Instance depends on the API version: * 1.5.0: :class:`DataContainerOperations<azure.mgmt.machinelearningservices.dataset_dataplane.operations.DataContainerOperations>` \"\"\" api_version =", ".v2022_05_01.operations import ModelVersionsOperations as OperationClass else: raise ValueError(\"API version {} does not have", "version {} is not available\".format(api_version)) @property def assets(self): \"\"\"Instance depends on the API", "Deserializer(self._models_dict(api_version))) @property def private_endpoint_connections(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`PrivateEndpointConnectionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.PrivateEndpointConnectionsOperations>`", "'run': 'v1.0', 'run_artifacts': 'v1.0', 'runs': 'v1.0', 'spans': 'v1.0', 'temporary_data_references': '2021-10-01-dataplanepreview', }}, _PROFILE_TAG +", "version {} does not have operation group 'environment_containers'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))", "raise ValueError(\"API version {} does not have operation group 'data_container'\".format(api_version)) return OperationClass(self._client, self._config,", "self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def events(self): \"\"\"Instance depends on the API version: *", "API version: * 2021-10-01: :class:`QuotasOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.QuotasOperations>` * 2022-01-01-preview: :class:`QuotasOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.QuotasOperations>` * 2022-05-01: :class:`QuotasOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.QuotasOperations>` \"\"\" api_version", "def data_containers(self): \"\"\"Instance depends on the API version: * 2022-02-01-preview: :class:`DataContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.DataContainersOperations>` * 2022-05-01:", "'component_versions'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def compute(self): \"\"\"Instance depends on the", ".runhistory.operations import DeleteOperations as OperationClass else: raise ValueError(\"API version {} does not have", "# Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause", "OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import EnvironmentVersionsOperations as OperationClass else: raise", "data_call(self): \"\"\"Instance depends on the API version: * 1.5.0: :class:`DataCallOperations<azure.mgmt.machinelearningservices.dataset_dataplane.operations.DataCallOperations>` \"\"\" api_version =", "self._get_api_version('data_container') if api_version == '1.5.0': from .dataset_dataplane.operations import DataContainerOperations as OperationClass else: raise", "the project root for # license information. # # Code generated by Microsoft", "TYPE_CHECKING: # pylint: disable=unused-import,ungrouped-imports from typing import Any, Optional from azure.core.credentials import TokenCredential", "version: * v1.0: :class:`AsyncOperationsOperations<azure.mgmt.machinelearningservices.registry_discovery.operations.AsyncOperationsOperations>` \"\"\" api_version = self._get_api_version('async_operations') if api_version == 'v1.0': from", "have operation group 'datastores'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def delete(self): \"\"\"Instance", "= self._get_api_version('dataset_v2') if api_version == '1.5.0': from .dataset_dataplane.operations import DatasetV2Operations as OperationClass else:", "* 2022-05-01: :class:`EnvironmentContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.EnvironmentContainersOperations>` \"\"\" api_version = self._get_api_version('environment_containers') if api_version == '2021-10-01': from .v2021_10_01.operations", "from .v2022_02_01_preview.operations import DataContainersOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import", "as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import DataContainersOperations as OperationClass else:", "'2022-01-01-preview': from .v2022_01_01_preview.operations import WorkspacesOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations", "-------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the", "API version: * 2021-10-01: :class:`OnlineDeploymentsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.OnlineDeploymentsOperations>` * 2022-02-01-preview: :class:`OnlineDeploymentsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.OnlineDeploymentsOperations>` * 2022-05-01: :class:`OnlineDeploymentsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.OnlineDeploymentsOperations>` \"\"\" api_version", "@property def environment_containers(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`EnvironmentContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.EnvironmentContainersOperations>` *", "api_version == '1.0.0': from .model_dataplane.operations import AssetsOperations as OperationClass else: raise ValueError(\"API version", "on the API version: * 2021-10-01: :class:`CodeContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.CodeContainersOperations>` * 2021-10-01-dataplanepreview: :class:`CodeContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.CodeContainersOperations>` * 2022-02-01-preview: :class:`CodeContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.CodeContainersOperations>`", "on the API version: * 2022-02-01-preview: :class:`DataContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.DataContainersOperations>` * 2022-05-01: :class:`DataContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.DataContainersOperations>` \"\"\" api_version =", "return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def spans(self): \"\"\"Instance depends on the API", "\"\"\"Instance depends on the API version: * 2021-10-01: :class:`WorkspacesOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.WorkspacesOperations>` * 2022-01-01-preview: :class:`WorkspacesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.WorkspacesOperations>` *", "@property def datastores(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`DatastoresOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.DatastoresOperations>` *", "data_version(self): \"\"\"Instance depends on the API version: * 1.5.0: :class:`DataVersionOperations<azure.mgmt.machinelearningservices.dataset_dataplane.operations.DataVersionOperations>` \"\"\" api_version =", "'2022-01-01-preview': from .v2022_01_01_preview.operations import VirtualMachineSizesOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations", "group 'data_version'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def data_versions(self): \"\"\"Instance depends on", "self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def extensive_model(self): \"\"\"Instance depends on the API version: *", "return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def workspace_features(self): \"\"\"Instance depends on the API", "._configuration import AzureMachineLearningWorkspacesConfiguration if TYPE_CHECKING: # pylint: disable=unused-import,ungrouped-imports from typing import Any, Optional", "on the API version: * 2022-02-01-preview: :class:`DataVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.DataVersionsOperations>` * 2022-05-01: :class:`DataVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.DataVersionsOperations>` \"\"\" api_version =", "'registry_management_non_workspace'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def run(self): \"\"\"Instance depends on the", "operation group 'quotas'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def registry_management_non_workspace(self): \"\"\"Instance depends", "2022-01-01-preview: :class:`QuotasOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.QuotasOperations>` * 2022-05-01: :class:`QuotasOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.QuotasOperations>` \"\"\" api_version = self._get_api_version('quotas') if api_version == '2021-10-01':", ".v2022_02_01_preview.operations import DataVersionsOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import DataVersionsOperations", "{} does not have operation group 'get_operation_status'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property", "from .v2021_10_01_dataplanepreview.operations import CodeVersionsOperations as OperationClass elif api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import", "as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import CodeVersionsOperations as OperationClass else:", "models elif api_version == '2021-10-01': from .v2021_10_01 import models return models elif api_version", "ARMPipelineClient from azure.profiles import KnownProfiles, ProfileDefinition from azure.profiles.multiapiclient import MultiApiClientMixin from msrest import", "does not have operation group 'environment_containers'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def", "api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import ModelVersionsOperations as OperationClass elif api_version == '2022-05-01':", "from .v2022_05_01.operations import BatchEndpointsOperations as OperationClass else: raise ValueError(\"API version {} does not", "batch_job_deployment(self): \"\"\"Instance depends on the API version: * 2020-09-01-dataplanepreview: :class:`BatchJobDeploymentOperations<azure.mgmt.machinelearningservices.v2020_09_01_dataplanepreview.operations.BatchJobDeploymentOperations>` \"\"\" api_version =", "as OperationClass else: raise ValueError(\"API version {} does not have operation group 'batch_job_endpoint'\".format(api_version))", "if api_version == '2021-10-01': from .v2021_10_01.operations import DatastoresOperations as OperationClass elif api_version ==", ".v2022_01_01_preview.operations import PrivateEndpointConnectionsOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import PrivateEndpointConnectionsOperations", "api_version = self._get_api_version('metric') if api_version == 'v1.0': from .runhistory.operations import MetricOperations as OperationClass", ".v2022_05_01.operations import PrivateEndpointConnectionsOperations as OperationClass else: raise ValueError(\"API version {} does not have", "api_version == '2022-05-01': from .v2022_05_01.operations import WorkspacesOperations as OperationClass else: raise ValueError(\"API version", ":class:`ModelContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.ModelContainersOperations>` * 2021-10-01-dataplanepreview: :class:`ModelContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.ModelContainersOperations>` * 2022-02-01-preview: :class:`ModelContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.ModelContainersOperations>` * 2022-05-01: :class:`ModelContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.ModelContainersOperations>` \"\"\" api_version =", "EnvironmentContainersOperations as OperationClass else: raise ValueError(\"API version {} does not have operation group", "extensive_model(self): \"\"\"Instance depends on the API version: * 1.0.0: :class:`ExtensiveModelOperations<azure.mgmt.machinelearningservices.model_dataplane.operations.ExtensiveModelOperations>` \"\"\" api_version =", "OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def batch_endpoints(self): \"\"\"Instance depends on the API version:", "== '2021-10-01': from .v2021_10_01.operations import OnlineEndpointsOperations as OperationClass elif api_version == '2022-02-01-preview': from", "on the API version: * 2021-10-01: :class:`Operations<azure.mgmt.machinelearningservices.v2021_10_01.operations.Operations>` * 2022-01-01-preview: :class:`Operations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.Operations>` * 2022-05-01: :class:`Operations<azure.mgmt.machinelearningservices.v2022_05_01.operations.Operations>`", "2021-10-01: :class:`WorkspaceFeaturesOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.WorkspaceFeaturesOperations>` * 2022-01-01-preview: :class:`WorkspaceFeaturesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.WorkspaceFeaturesOperations>` * 2022-05-01: :class:`WorkspaceFeaturesOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.WorkspaceFeaturesOperations>` \"\"\" api_version = self._get_api_version('workspace_features') if", "as OperationClass elif api_version == '2022-01-01-preview': from .v2022_01_01_preview.operations import WorkspaceFeaturesOperations as OperationClass elif", "'batch_job_deployment'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def batch_job_endpoint(self): \"\"\"Instance depends on the", "raise ValueError(\"API version {} does not have operation group 'batch_deployments'\".format(api_version)) return OperationClass(self._client, self._config,", "* 2021-10-01-dataplanepreview: :class:`CodeVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.CodeVersionsOperations>` * 2022-02-01-preview: :class:`CodeVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.CodeVersionsOperations>` * 2022-05-01: :class:`CodeVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.CodeVersionsOperations>` \"\"\" api_version = self._get_api_version('code_versions')", "\"\"\"Instance depends on the API version: * 2022-02-01-preview: :class:`DataContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.DataContainersOperations>` * 2022-05-01: :class:`DataContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.DataContainersOperations>` \"\"\"", "= self._get_api_version('get_operation_status') if api_version == '1.5.0': from .dataset_dataplane.operations import GetOperationStatusOperations as OperationClass else:", "version: * v1.0: :class:`EventsOperations<azure.mgmt.machinelearningservices.runhistory.operations.EventsOperations>` \"\"\" api_version = self._get_api_version('events') if api_version == 'v1.0': from", "else: raise ValueError(\"API version {} does not have operation group 'batch_endpoints'\".format(api_version)) return OperationClass(self._client,", "@property def data_versions(self): \"\"\"Instance depends on the API version: * 2022-02-01-preview: :class:`DataVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.DataVersionsOperations>` *", ":class:`DatasetVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.DatasetVersionsOperations>` \"\"\" api_version = self._get_api_version('dataset_versions') if api_version == '2021-10-01': from .v2021_10_01.operations import DatasetVersionsOperations", "OperationClass else: raise ValueError(\"API version {} does not have operation group 'model_versions'\".format(api_version)) return", "operation group 'batch_endpoints'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def batch_job_deployment(self): \"\"\"Instance depends", ".v2021_10_01.operations import OnlineEndpointsOperations as OperationClass elif api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import OnlineEndpointsOperations", "'experiments': 'v1.0', 'extensive_model': '1.0.0', 'get_operation_status': '1.5.0', 'metric': 'v1.0', 'migration': '1.0.0', 'models': '1.0.0', 'registry_management_non_workspace':", "v1.0: :class:`RunsOperations<azure.mgmt.machinelearningservices.runhistory.operations.RunsOperations>` \"\"\" api_version = self._get_api_version('runs') if api_version == 'v1.0': from .runhistory.operations import", "return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def runs(self): \"\"\"Instance depends on the API", "raise ValueError(\"API version {} does not have operation group 'compute'\".format(api_version)) return OperationClass(self._client, self._config,", "have operation group 'operations'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def private_endpoint_connections(self): \"\"\"Instance", "BatchDeploymentsOperations as OperationClass else: raise ValueError(\"API version {} does not have operation group", "== '2022-05-01': from .v2022_05_01 import models return models raise ValueError(\"API version {} is", "API version: * 2021-10-01: :class:`WorkspaceConnectionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.WorkspaceConnectionsOperations>` * 2022-01-01-preview: :class:`WorkspaceConnectionsOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.WorkspaceConnectionsOperations>` * 2022-05-01: :class:`WorkspaceConnectionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.WorkspaceConnectionsOperations>` \"\"\" api_version", "self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def dataset_v2(self): \"\"\"Instance depends on the API version: *", "'1.5.0': from .dataset_dataplane.operations import DataContainerOperations as OperationClass else: raise ValueError(\"API version {} does", "'2022-01-01-preview': from .v2022_01_01_preview.operations import WorkspaceFeaturesOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations", "{} does not have operation group 'data_containers'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property", "API version available on public Azure. For production, you should stick to a", "api_version = self._get_api_version('events') if api_version == 'v1.0': from .runhistory.operations import EventsOperations as OperationClass", "if api_version == '2021-10-01': from .v2021_10_01.operations import EnvironmentContainersOperations as OperationClass elif api_version ==", "spans(self): \"\"\"Instance depends on the API version: * v1.0: :class:`SpansOperations<azure.mgmt.machinelearningservices.runhistory.operations.SpansOperations>` \"\"\" api_version =", "'2021-10-01-dataplanepreview', }}, _PROFILE_TAG + \" latest\" ) def __init__( self, credential, # type:", "else: raise ValueError(\"API version {} does not have operation group 'data_call'\".format(api_version)) return OperationClass(self._client,", "OperationClass elif api_version == '2021-10-01-dataplanepreview': from .v2021_10_01_dataplanepreview.operations import ModelContainersOperations as OperationClass elif api_version", "== '2022-02-01-preview': from .v2022_02_01_preview.operations import ModelContainersOperations as OperationClass elif api_version == '2022-05-01': from", "EnvironmentVersionsOperations as OperationClass elif api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import EnvironmentVersionsOperations as OperationClass", "2022-05-01: :class:`ComponentVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.ComponentVersionsOperations>` \"\"\" api_version = self._get_api_version('component_versions') if api_version == '2021-10-01': from .v2021_10_01.operations import", "available on public Azure. For production, you should stick to a particular api-version", "return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def batch_job_deployment(self): \"\"\"Instance depends on the API", "and/or profile. The profile sets a mapping between an operation group and its", "not have operation group 'code_versions'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def component_containers(self):", "delete(self): \"\"\"Instance depends on the API version: * 1.5.0: :class:`DeleteOperations<azure.mgmt.machinelearningservices.dataset_dataplane.operations.DeleteOperations>` * v1.0: :class:`DeleteOperations<azure.mgmt.machinelearningservices.runhistory.operations.DeleteOperations>`", "\"\"\" api_version = self._get_api_version('usages') if api_version == '2021-10-01': from .v2021_10_01.operations import UsagesOperations as", "def component_containers(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`ComponentContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.ComponentContainersOperations>` * 2021-10-01-dataplanepreview:", "return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def environment_containers(self): \"\"\"Instance depends on the API", "api_version == '2020-09-01-dataplanepreview': from .v2020_09_01_dataplanepreview.operations import BatchJobDeploymentOperations as OperationClass else: raise ValueError(\"API version", "Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def dataset_containers(self): \"\"\"Instance depends on the API version: * 2021-10-01:", "operations if no Retry-After header is present. \"\"\" DEFAULT_API_VERSION = '2022-05-01' _PROFILE_TAG =", "depends on the API version: * 1.5.0: :class:`DatasetControllerV2Operations<azure.mgmt.machinelearningservices.dataset_dataplane.operations.DatasetControllerV2Operations>` \"\"\" api_version = self._get_api_version('dataset_controller_v2') if", "\"\"\" api_version = self._get_api_version('metric') if api_version == 'v1.0': from .runhistory.operations import MetricOperations as", "WorkspacesOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import WorkspacesOperations as OperationClass", ":class:`ComputeOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.ComputeOperations>` \"\"\" api_version = self._get_api_version('compute') if api_version == '2021-10-01': from .v2021_10_01.operations import ComputeOperations", "return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def batch_deployments(self): \"\"\"Instance depends on the API", ".v2022_05_01.operations import BatchDeploymentsOperations as OperationClass else: raise ValueError(\"API version {} does not have", ":class:`JobsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.JobsOperations>` * 2022-05-01: :class:`JobsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.JobsOperations>` \"\"\" api_version = self._get_api_version('jobs') if api_version == '2021-10-01': from", "have operation group 'data_versions'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def dataset_containers(self): \"\"\"Instance", "depends on the API version: * 1.5.0: :class:`DatasetsV1Operations<azure.mgmt.machinelearningservices.dataset_dataplane.operations.DatasetsV1Operations>` \"\"\" api_version = self._get_api_version('datasets_v1') if", "import DataVersionsOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import DataVersionsOperations as", "== '2021-10-01': from .v2021_10_01.operations import VirtualMachineSizesOperations as OperationClass elif api_version == '2022-01-01-preview': from", "import MetricOperations as OperationClass else: raise ValueError(\"API version {} does not have operation", "api_version: API version to use if no profile is provided, or if missing", "'2022-05-01': from .v2022_05_01.operations import VirtualMachineSizesOperations as OperationClass else: raise ValueError(\"API version {} does", "Will be removed in final version of multiapi azure-core based client \"\"\" pass", "stick to a particular api-version and/or profile. The profile sets a mapping between", "depends on the API version: * 1.5.0: :class:`DataVersionOperations<azure.mgmt.machinelearningservices.dataset_dataplane.operations.DataVersionOperations>` \"\"\" api_version = self._get_api_version('data_version') if", "api_version == '1.0.0': from .model_dataplane.operations import ExtensiveModelOperations as OperationClass else: raise ValueError(\"API version", "import TokenCredential class _SDKClient(object): def __init__(self, *args, **kwargs): \"\"\"This is a fake class", "version: * 1.5.0: :class:`DatasetControllerV2Operations<azure.mgmt.machinelearningservices.dataset_dataplane.operations.DatasetControllerV2Operations>` \"\"\" api_version = self._get_api_version('dataset_controller_v2') if api_version == '1.5.0': from", "raise ValueError(\"API version {} does not have operation group 'model_containers'\".format(api_version)) return OperationClass(self._client, self._config,", "1.0.0: :class:`ExtensiveModelOperations<azure.mgmt.machinelearningservices.model_dataplane.operations.ExtensiveModelOperations>` \"\"\" api_version = self._get_api_version('extensive_model') if api_version == '1.0.0': from .model_dataplane.operations import", "OperationClass else: raise ValueError(\"API version {} does not have operation group 'model_containers'\".format(api_version)) return", "on public Azure. For production, you should stick to a particular api-version and/or", "'data_versions'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def dataset_containers(self): \"\"\"Instance depends on the", "self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def code_containers(self): \"\"\"Instance depends on the API version: *", "import EnvironmentContainersOperations as OperationClass elif api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import EnvironmentContainersOperations as", "Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def models(self): \"\"\"Instance depends on the API version: * 1.0.0:", "* 2022-05-01: :class:`ModelVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.ModelVersionsOperations>` \"\"\" api_version = self._get_api_version('model_versions') if api_version == '2021-10-01': from .v2021_10_01.operations", "as OperationClass elif api_version == '2022-01-01-preview': from .v2022_01_01_preview.operations import WorkspaceConnectionsOperations as OperationClass elif", "== '2022-05-01': from .v2022_05_01.operations import DatastoresOperations as OperationClass else: raise ValueError(\"API version {}", "= self._get_api_version('dataset_controller_v2') if api_version == '1.5.0': from .dataset_dataplane.operations import DatasetControllerV2Operations as OperationClass else:", "@property def code_versions(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`CodeVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.CodeVersionsOperations>` *", "CodeContainersOperations as OperationClass elif api_version == '2021-10-01-dataplanepreview': from .v2021_10_01_dataplanepreview.operations import CodeContainersOperations as OperationClass", "Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def batch_endpoints(self): \"\"\"Instance depends on the API version: * 2021-10-01:", "from .runhistory.operations import RunsOperations as OperationClass else: raise ValueError(\"API version {} does not", "= self._get_api_version('private_link_resources') if api_version == '2021-10-01': from .v2021_10_01.operations import PrivateLinkResourcesOperations as OperationClass elif", "2020-09-01-dataplanepreview: :mod:`v2020_09_01_dataplanepreview.models<azure.mgmt.machinelearningservices.v2020_09_01_dataplanepreview.models>` * 2021-10-01: :mod:`v2021_10_01.models<azure.mgmt.machinelearningservices.v2021_10_01.models>` * 2021-10-01-dataplanepreview: :mod:`v2021_10_01_dataplanepreview.models<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.models>` * 2022-01-01-preview: :mod:`v2022_01_01_preview.models<azure.mgmt.machinelearningservices.v2022_01_01_preview.models>` * 2022-02-01-preview:", "# type: KnownProfiles **kwargs # type: Any ): self._config = AzureMachineLearningWorkspacesConfiguration(credential, subscription_id, **kwargs)", "api_version == '2022-05-01': from .v2022_05_01.operations import BatchEndpointsOperations as OperationClass else: raise ValueError(\"API version", "if api_version == '1.0.0': from .model_dataplane.operations import AssetsOperations as OperationClass else: raise ValueError(\"API", "'2021-10-01-dataplanepreview': from .v2021_10_01_dataplanepreview.operations import CodeContainersOperations as OperationClass elif api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations", "self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def data_call(self): \"\"\"Instance depends on the API version: *", "OperationClass elif api_version == '2022-01-01-preview': from .v2022_01_01_preview.operations import UsagesOperations as OperationClass elif api_version", "@property def dataset_controller_v2(self): \"\"\"Instance depends on the API version: * 1.5.0: :class:`DatasetControllerV2Operations<azure.mgmt.machinelearningservices.dataset_dataplane.operations.DatasetControllerV2Operations>` \"\"\"", "Deserializer(self._models_dict(api_version))) @property def code_versions(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`CodeVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.CodeVersionsOperations>`", "self._get_api_version('workspace_features') if api_version == '2021-10-01': from .v2021_10_01.operations import WorkspaceFeaturesOperations as OperationClass elif api_version", ".dataset_dataplane.operations import DeleteOperations as OperationClass elif api_version == 'v1.0': from .runhistory.operations import DeleteOperations", "on the API version: * 1.5.0: :class:`DataVersionOperations<azure.mgmt.machinelearningservices.dataset_dataplane.operations.DataVersionOperations>` \"\"\" api_version = self._get_api_version('data_version') if api_version", "the API version: * v1.0: :class:`RegistryManagementNonWorkspaceOperations<azure.mgmt.machinelearningservices.registry_discovery.operations.RegistryManagementNonWorkspaceOperations>` \"\"\" api_version = self._get_api_version('registry_management_non_workspace') if api_version ==", "as OperationClass else: raise ValueError(\"API version {} does not have operation group 'online_deployments'\".format(api_version))", ".v2022_05_01.operations import JobsOperations as OperationClass else: raise ValueError(\"API version {} does not have", "api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import ComponentVersionsOperations as OperationClass elif api_version == '2022-05-01':", "else: raise ValueError(\"API version {} does not have operation group 'run'\".format(api_version)) return OperationClass(self._client,", "api_version == 'v1.0': from .runhistory.operations import RunOperations as OperationClass else: raise ValueError(\"API version", "= self._get_api_version('model_versions') if api_version == '2021-10-01': from .v2021_10_01.operations import ModelVersionsOperations as OperationClass elif", "OnlineDeploymentsOperations as OperationClass elif api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import OnlineDeploymentsOperations as OperationClass", "def registry_management_non_workspace(self): \"\"\"Instance depends on the API version: * v1.0: :class:`RegistryManagementNonWorkspaceOperations<azure.mgmt.machinelearningservices.registry_discovery.operations.RegistryManagementNonWorkspaceOperations>` \"\"\" api_version", "from ._configuration import AzureMachineLearningWorkspacesConfiguration if TYPE_CHECKING: # pylint: disable=unused-import,ungrouped-imports from typing import Any,", "incorrect behavior and will be lost if the code is # regenerated. #", "self._get_api_version('data_version') if api_version == '1.5.0': from .dataset_dataplane.operations import DataVersionOperations as OperationClass else: raise", ".v2022_05_01.operations import DataContainersOperations as OperationClass else: raise ValueError(\"API version {} does not have", "Azure Machine Learning Workspace resources. This ready contains multiple API versions, to help", "\"\"\" api_version = self._get_api_version('datasets_v1') if api_version == '1.5.0': from .dataset_dataplane.operations import DatasetsV1Operations as", "OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def component_containers(self): \"\"\"Instance depends on the API version:", "'dataset_containers'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def dataset_controller_v2(self): \"\"\"Instance depends on the", "version: * 2021-10-01: :class:`Operations<azure.mgmt.machinelearningservices.v2021_10_01.operations.Operations>` * 2022-01-01-preview: :class:`Operations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.Operations>` * 2022-05-01: :class:`Operations<azure.mgmt.machinelearningservices.v2022_05_01.operations.Operations>` \"\"\" api_version =", "raise ValueError(\"API version {} does not have operation group 'temporary_data_references'\".format(api_version)) return OperationClass(self._client, self._config,", "polling_interval: Default waiting time between two polls for LRO operations if no Retry-After", "api_version == '1.5.0': from .dataset_dataplane.operations import DeleteOperations as OperationClass elif api_version == 'v1.0':", "Deserializer(self._models_dict(api_version))) @property def virtual_machine_sizes(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`VirtualMachineSizesOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.VirtualMachineSizesOperations>`", "import GetOperationStatusOperations as OperationClass else: raise ValueError(\"API version {} does not have operation", "\"\"\" api_version = self._get_api_version('data_container') if api_version == '1.5.0': from .dataset_dataplane.operations import DataContainerOperations as", "import VirtualMachineSizesOperations as OperationClass elif api_version == '2022-01-01-preview': from .v2022_01_01_preview.operations import VirtualMachineSizesOperations as", "JobsOperations as OperationClass elif api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import JobsOperations as OperationClass", "\"\"\"Instance depends on the API version: * 1.5.0: :class:`DeleteOperations<azure.mgmt.machinelearningservices.dataset_dataplane.operations.DeleteOperations>` * v1.0: :class:`DeleteOperations<azure.mgmt.machinelearningservices.runhistory.operations.DeleteOperations>` \"\"\"", ":class:`Operations<azure.mgmt.machinelearningservices.v2022_05_01.operations.Operations>` \"\"\" api_version = self._get_api_version('operations') if api_version == '2021-10-01': from .v2021_10_01.operations import Operations", "'dataset_v2'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def dataset_versions(self): \"\"\"Instance depends on the", "class to support current implemetation of MultiApiClientMixin.\" Will be removed in final version", "import AsyncOperationsOperations as OperationClass else: raise ValueError(\"API version {} does not have operation", "else: raise ValueError(\"API version {} does not have operation group 'data_containers'\".format(api_version)) return OperationClass(self._client,", "of the target subscription. :type subscription_id: str :param api_version: API version to use", "the target subscription. :type subscription_id: str :param api_version: API version to use if", "from .v2021_10_01.operations import JobsOperations as OperationClass elif api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import", "if api_version == '2021-10-01': from .v2021_10_01.operations import OnlineEndpointsOperations as OperationClass elif api_version ==", "1.5.0: :mod:`dataset_dataplane.models<azure.mgmt.machinelearningservices.dataset_dataplane.models>` * 1.0.0: :mod:`model_dataplane.models<azure.mgmt.machinelearningservices.model_dataplane.models>` * v1.0: :mod:`registry_discovery.models<azure.mgmt.machinelearningservices.registry_discovery.models>` * v1.0: :mod:`runhistory.models<azure.mgmt.machinelearningservices.runhistory.models>` * 2020-09-01-dataplanepreview:", "ValueError(\"API version {} does not have operation group 'run_artifacts'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)),", "2022-02-01-preview: :class:`DatastoresOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.DatastoresOperations>` * 2022-05-01: :class:`DatastoresOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.DatastoresOperations>` \"\"\" api_version = self._get_api_version('datastores') if api_version == '2021-10-01':", "def dataset_containers(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`DatasetContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.DatasetContainersOperations>` \"\"\" api_version", ":class:`BatchEndpointsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.BatchEndpointsOperations>` * 2022-02-01-preview: :class:`BatchEndpointsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.BatchEndpointsOperations>` * 2022-05-01: :class:`BatchEndpointsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.BatchEndpointsOperations>` \"\"\" api_version = self._get_api_version('batch_endpoints') if api_version", ".runhistory.operations import SpansOperations as OperationClass else: raise ValueError(\"API version {} does not have", "@property def component_containers(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`ComponentContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.ComponentContainersOperations>` *", "{} does not have operation group 'run'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property", "raise ValueError(\"API version {} does not have operation group 'jobs'\".format(api_version)) return OperationClass(self._client, self._config,", "OperationClass else: raise ValueError(\"API version {} does not have operation group 'run_artifacts'\".format(api_version)) return", "Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def experiments(self): \"\"\"Instance depends on the API version: * v1.0:", "group 'run_artifacts'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def runs(self): \"\"\"Instance depends on", "Deserializer(self._models_dict(api_version))) @property def batch_job_deployment(self): \"\"\"Instance depends on the API version: * 2020-09-01-dataplanepreview: :class:`BatchJobDeploymentOperations<azure.mgmt.machinelearningservices.v2020_09_01_dataplanepreview.operations.BatchJobDeploymentOperations>`", "_PROFILE_TAG: { None: DEFAULT_API_VERSION, 'assets': '1.0.0', 'async_operations': 'v1.0', 'batch_job_deployment': '2020-09-01-dataplanepreview', 'batch_job_endpoint': '2020-09-01-dataplanepreview', 'data_call':", "import AzureMachineLearningWorkspacesConfiguration if TYPE_CHECKING: # pylint: disable=unused-import,ungrouped-imports from typing import Any, Optional from", "models elif api_version == '2020-09-01-dataplanepreview': from .v2020_09_01_dataplanepreview import models return models elif api_version", "the API version: * 2021-10-01: :class:`PrivateEndpointConnectionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.PrivateEndpointConnectionsOperations>` * 2022-01-01-preview: :class:`PrivateEndpointConnectionsOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.PrivateEndpointConnectionsOperations>` * 2022-05-01: :class:`PrivateEndpointConnectionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.PrivateEndpointConnectionsOperations>` \"\"\"", "version: * 2021-10-01: :class:`VirtualMachineSizesOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.VirtualMachineSizesOperations>` * 2022-01-01-preview: :class:`VirtualMachineSizesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.VirtualMachineSizesOperations>` * 2022-05-01: :class:`VirtualMachineSizesOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.VirtualMachineSizesOperations>` \"\"\" api_version =", "dict. :type profile: azure.profiles.KnownProfiles :keyword int polling_interval: Default waiting time between two polls", "api_version = self._get_api_version('component_containers') if api_version == '2021-10-01': from .v2021_10_01.operations import ComponentContainersOperations as OperationClass", "'1.0.0', 'registry_management_non_workspace': 'v1.0', 'run': 'v1.0', 'run_artifacts': 'v1.0', 'runs': 'v1.0', 'spans': 'v1.0', 'temporary_data_references': '2021-10-01-dataplanepreview',", "'2021-10-01': from .v2021_10_01.operations import ModelVersionsOperations as OperationClass elif api_version == '2021-10-01-dataplanepreview': from .v2021_10_01_dataplanepreview.operations", "api_version == '2021-10-01': from .v2021_10_01.operations import BatchDeploymentsOperations as OperationClass elif api_version == '2022-02-01-preview':", "as OperationClass else: raise ValueError(\"API version {} does not have operation group 'workspace_features'\".format(api_version))", "from azure.profiles.multiapiclient import MultiApiClientMixin from msrest import Deserializer, Serializer from ._configuration import AzureMachineLearningWorkspacesConfiguration", "* v1.0: :class:`RegistryManagementNonWorkspaceOperations<azure.mgmt.machinelearningservices.registry_discovery.operations.RegistryManagementNonWorkspaceOperations>` \"\"\" api_version = self._get_api_version('registry_management_non_workspace') if api_version == 'v1.0': from .registry_discovery.operations", "api_version == '2021-10-01': from .v2021_10_01.operations import PrivateLinkResourcesOperations as OperationClass elif api_version == '2022-01-01-preview':", "to use if no profile is provided, or if missing in profile. :type", "API version: * 1.5.0: :class:`DeleteOperations<azure.mgmt.machinelearningservices.dataset_dataplane.operations.DeleteOperations>` * v1.0: :class:`DeleteOperations<azure.mgmt.machinelearningservices.runhistory.operations.DeleteOperations>` \"\"\" api_version = self._get_api_version('delete') if", "operation group 'workspaces'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) def close(self): self._client.close() def __enter__(self):", "import ComponentVersionsOperations as OperationClass else: raise ValueError(\"API version {} does not have operation", "DatastoresOperations as OperationClass else: raise ValueError(\"API version {} does not have operation group", "Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def dataset_controller_v2(self): \"\"\"Instance depends on the API version: * 1.5.0:", "return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def data_version(self): \"\"\"Instance depends on the API", "profile. :type api_version: str :param base_url: Service URL :type base_url: str :param profile:", "): self._config = AzureMachineLearningWorkspacesConfiguration(credential, subscription_id, **kwargs) self._client = ARMPipelineClient(base_url=base_url, config=self._config, **kwargs) super(AzureMachineLearningWorkspaces, self).__init__(", ".v2021_10_01_dataplanepreview.operations import ComponentContainersOperations as OperationClass elif api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import ComponentContainersOperations", "api_version == '2021-10-01': from .v2021_10_01.operations import CodeVersionsOperations as OperationClass elif api_version == '2021-10-01-dataplanepreview':", "* 2022-05-01: :class:`UsagesOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.UsagesOperations>` \"\"\" api_version = self._get_api_version('usages') if api_version == '2021-10-01': from .v2021_10_01.operations", "OperationClass else: raise ValueError(\"API version {} does not have operation group 'datastores'\".format(api_version)) return", "API version: * 2021-10-01: :class:`PrivateLinkResourcesOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.PrivateLinkResourcesOperations>` * 2022-01-01-preview: :class:`PrivateLinkResourcesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.PrivateLinkResourcesOperations>` * 2022-05-01: :class:`PrivateLinkResourcesOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.PrivateLinkResourcesOperations>` \"\"\" api_version", "import EventsOperations as OperationClass else: raise ValueError(\"API version {} does not have operation", "\"\"\"Instance depends on the API version: * 2021-10-01: :class:`ComponentContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.ComponentContainersOperations>` * 2021-10-01-dataplanepreview: :class:`ComponentContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.ComponentContainersOperations>` *", "version: * 1.5.0: :class:`GetOperationStatusOperations<azure.mgmt.machinelearningservices.dataset_dataplane.operations.GetOperationStatusOperations>` \"\"\" api_version = self._get_api_version('get_operation_status') if api_version == '1.5.0': from", "-------------------------------------------------------------------------- from typing import TYPE_CHECKING from azure.mgmt.core import ARMPipelineClient from azure.profiles import KnownProfiles,", "not have operation group 'batch_job_endpoint'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def code_containers(self):", ".v2022_05_01.operations import ModelContainersOperations as OperationClass else: raise ValueError(\"API version {} does not have", "of MultiApiClientMixin.\" Will be removed in final version of multiapi azure-core based client", "azure.profiles.multiapiclient import MultiApiClientMixin from msrest import Deserializer, Serializer from ._configuration import AzureMachineLearningWorkspacesConfiguration if", "# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT", "2022-01-01-preview: :class:`PrivateEndpointConnectionsOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.PrivateEndpointConnectionsOperations>` * 2022-05-01: :class:`PrivateEndpointConnectionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.PrivateEndpointConnectionsOperations>` \"\"\" api_version = self._get_api_version('private_endpoint_connections') if api_version == '2021-10-01':", "api_version = self._get_api_version('online_endpoints') if api_version == '2021-10-01': from .v2021_10_01.operations import OnlineEndpointsOperations as OperationClass", "the API version: * 1.5.0: :class:`DataContainerOperations<azure.mgmt.machinelearningservices.dataset_dataplane.operations.DataContainerOperations>` \"\"\" api_version = self._get_api_version('data_container') if api_version ==", "== '1.5.0': from .dataset_dataplane.operations import DataContainerOperations as OperationClass else: raise ValueError(\"API version {}", "* 2021-10-01: :class:`WorkspacesOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.WorkspacesOperations>` * 2022-01-01-preview: :class:`WorkspacesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.WorkspacesOperations>` * 2022-05-01: :class:`WorkspacesOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.WorkspacesOperations>` \"\"\" api_version = self._get_api_version('workspaces')", "return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def events(self): \"\"\"Instance depends on the API", "have operation group 'batch_job_endpoint'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def code_containers(self): \"\"\"Instance", "version {} does not have operation group 'delete'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))", "{} does not have operation group 'online_deployments'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property", "== '2022-01-01-preview': from .v2022_01_01_preview import models return models elif api_version == '2022-02-01-preview': from", "from .v2022_05_01.operations import DataVersionsOperations as OperationClass else: raise ValueError(\"API version {} does not", "profile=profile ) @classmethod def _models_dict(cls, api_version): return {k: v for k, v in", "'v1.0': from .runhistory.operations import ExperimentsOperations as OperationClass else: raise ValueError(\"API version {} does", "self._get_api_version('environment_versions') if api_version == '2021-10-01': from .v2021_10_01.operations import EnvironmentVersionsOperations as OperationClass elif api_version", "Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def component_versions(self): \"\"\"Instance depends on the API version: * 2021-10-01:", "'2020-09-01-dataplanepreview', 'batch_job_endpoint': '2020-09-01-dataplanepreview', 'data_call': '1.5.0', 'data_container': '1.5.0', 'data_version': '1.5.0', 'dataset_containers': '2021-10-01', 'dataset_controller_v2': '1.5.0',", "{} does not have operation group 'migration'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property", "version: * v1.0: :class:`RunOperations<azure.mgmt.machinelearningservices.runhistory.operations.RunOperations>` \"\"\" api_version = self._get_api_version('run') if api_version == 'v1.0': from", "or if missing in profile. :type api_version: str :param base_url: Service URL :type", "'run_artifacts'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def runs(self): \"\"\"Instance depends on the", "API version if the operation group is not described in the profile. :param", ".v2022_02_01_preview.operations import BatchDeploymentsOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import BatchDeploymentsOperations", "raise ValueError(\"API version {} does not have operation group 'batch_job_deployment'\".format(api_version)) return OperationClass(self._client, self._config,", "from .v2022_05_01.operations import BatchDeploymentsOperations as OperationClass else: raise ValueError(\"API version {} does not", "'temporary_data_references': '2021-10-01-dataplanepreview', }}, _PROFILE_TAG + \" latest\" ) def __init__( self, credential, #", "the API version: * 2021-10-01: :class:`PrivateLinkResourcesOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.PrivateLinkResourcesOperations>` * 2022-01-01-preview: :class:`PrivateLinkResourcesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.PrivateLinkResourcesOperations>` * 2022-05-01: :class:`PrivateLinkResourcesOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.PrivateLinkResourcesOperations>` \"\"\"", "group 'workspaces'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) def close(self): self._client.close() def __enter__(self): self._client.__enter__()", "OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def run(self): \"\"\"Instance depends on the API version:", "import ModelVersionsOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import ModelVersionsOperations as", "def run_artifacts(self): \"\"\"Instance depends on the API version: * v1.0: :class:`RunArtifactsOperations<azure.mgmt.machinelearningservices.runhistory.operations.RunArtifactsOperations>` \"\"\" api_version", "api_version == 'v1.0': from .registry_discovery.operations import AsyncOperationsOperations as OperationClass else: raise ValueError(\"API version", "Optional[str] base_url=\"https://management.azure.com\", # type: str profile=KnownProfiles.default, # type: KnownProfiles **kwargs # type: Any", "\"\"\"Module depends on the API version: * 1.5.0: :mod:`dataset_dataplane.models<azure.mgmt.machinelearningservices.dataset_dataplane.models>` * 1.0.0: :mod:`model_dataplane.models<azure.mgmt.machinelearningservices.model_dataplane.models>` *", "models return models elif api_version == '1.0.0': from .model_dataplane import models return models", "* 2022-05-01: :class:`DataVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.DataVersionsOperations>` \"\"\" api_version = self._get_api_version('data_versions') if api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations", "elif api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import DatastoresOperations as OperationClass elif api_version ==", "quotas(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`QuotasOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.QuotasOperations>` * 2022-01-01-preview: :class:`QuotasOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.QuotasOperations>`", "self._get_api_version('model_containers') if api_version == '2021-10-01': from .v2021_10_01.operations import ModelContainersOperations as OperationClass elif api_version", "operation group 'operations'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def private_endpoint_connections(self): \"\"\"Instance depends", "does not have operation group 'compute'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def", "the API version: * 1.5.0: :class:`DatasetControllerV2Operations<azure.mgmt.machinelearningservices.dataset_dataplane.operations.DatasetControllerV2Operations>` \"\"\" api_version = self._get_api_version('dataset_controller_v2') if api_version ==", "OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def data_containers(self): \"\"\"Instance depends on the API version:", "on the API version: * 2021-10-01: :class:`ComponentContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.ComponentContainersOperations>` * 2021-10-01-dataplanepreview: :class:`ComponentContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.ComponentContainersOperations>` * 2022-02-01-preview: :class:`ComponentContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.ComponentContainersOperations>`", "ValueError(\"API version {} does not have operation group 'runs'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)),", "not have operation group 'spans'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def temporary_data_references(self):", "== '2021-10-01': from .v2021_10_01.operations import WorkspaceConnectionsOperations as OperationClass elif api_version == '2022-01-01-preview': from", "should stick to a particular api-version and/or profile. The profile sets a mapping", "version {} does not have operation group 'runs'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))", "from .v2021_10_01_dataplanepreview.operations import ModelVersionsOperations as OperationClass elif api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import", "import OnlineDeploymentsOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import OnlineDeploymentsOperations as", "as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import ComponentContainersOperations as OperationClass else:", "from .runhistory.operations import RunOperations as OperationClass else: raise ValueError(\"API version {} does not", "api_version == '2021-10-01': from .v2021_10_01.operations import WorkspaceFeaturesOperations as OperationClass elif api_version == '2022-01-01-preview':", "OperationClass elif api_version == '2022-01-01-preview': from .v2022_01_01_preview.operations import VirtualMachineSizesOperations as OperationClass elif api_version", "OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def datastores(self): \"\"\"Instance depends on the API version:", ".v2022_02_01_preview.operations import CodeContainersOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import CodeContainersOperations", "== '2022-02-01-preview': from .v2022_02_01_preview.operations import ModelVersionsOperations as OperationClass elif api_version == '2022-05-01': from", "version {} does not have operation group 'workspace_connections'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))", "api_version == '2021-10-01-dataplanepreview': from .v2021_10_01_dataplanepreview.operations import ModelContainersOperations as OperationClass elif api_version == '2022-02-01-preview':", "'1.5.0': from .dataset_dataplane.operations import DataCallOperations as OperationClass else: raise ValueError(\"API version {} does", "to dict. :type profile: azure.profiles.KnownProfiles :keyword int polling_interval: Default waiting time between two", "PrivateEndpointConnectionsOperations as OperationClass elif api_version == '2022-01-01-preview': from .v2022_01_01_preview.operations import PrivateEndpointConnectionsOperations as OperationClass", "# -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under", "def run(self): \"\"\"Instance depends on the API version: * v1.0: :class:`RunOperations<azure.mgmt.machinelearningservices.runhistory.operations.RunOperations>` \"\"\" api_version", "models return models elif api_version == '2022-02-01-preview': from .v2022_02_01_preview import models return models", "ValueError(\"API version {} does not have operation group 'usages'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)),", ":class:`WorkspaceConnectionsOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.WorkspaceConnectionsOperations>` * 2022-05-01: :class:`WorkspaceConnectionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.WorkspaceConnectionsOperations>` \"\"\" api_version = self._get_api_version('workspace_connections') if api_version == '2021-10-01': from", "allow end users to operate on Azure Machine Learning Workspace resources. This ready", "'code_containers'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def code_versions(self): \"\"\"Instance depends on the", "* 2021-10-01: :class:`WorkspaceFeaturesOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.WorkspaceFeaturesOperations>` * 2022-01-01-preview: :class:`WorkspaceFeaturesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.WorkspaceFeaturesOperations>` * 2022-05-01: :class:`WorkspaceFeaturesOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.WorkspaceFeaturesOperations>` \"\"\" api_version = self._get_api_version('workspace_features')", "have operation group 'component_versions'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def compute(self): \"\"\"Instance", "elif api_version == '2022-01-01-preview': from .v2022_01_01_preview.operations import PrivateLinkResourcesOperations as OperationClass elif api_version ==", "self._get_api_version('usages') if api_version == '2021-10-01': from .v2021_10_01.operations import UsagesOperations as OperationClass elif api_version", "Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def run(self): \"\"\"Instance depends on the API version: * v1.0:", "2022-05-01: :class:`CodeVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.CodeVersionsOperations>` \"\"\" api_version = self._get_api_version('code_versions') if api_version == '2021-10-01': from .v2021_10_01.operations import", "Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect", "import ComputeOperations as OperationClass else: raise ValueError(\"API version {} does not have operation", "'2022-05-01': from .v2022_05_01.operations import Operations as OperationClass else: raise ValueError(\"API version {} does", "= self._get_api_version('quotas') if api_version == '2021-10-01': from .v2021_10_01.operations import QuotasOperations as OperationClass elif", "api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import CodeContainersOperations as OperationClass elif api_version == '2022-05-01':", "@property def batch_job_endpoint(self): \"\"\"Instance depends on the API version: * 2020-09-01-dataplanepreview: :class:`BatchJobEndpointOperations<azure.mgmt.machinelearningservices.v2020_09_01_dataplanepreview.operations.BatchJobEndpointOperations>` \"\"\"", "* 2021-10-01: :class:`UsagesOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.UsagesOperations>` * 2022-01-01-preview: :class:`UsagesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.UsagesOperations>` * 2022-05-01: :class:`UsagesOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.UsagesOperations>` \"\"\" api_version = self._get_api_version('usages')", "def component_versions(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`ComponentVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.ComponentVersionsOperations>` * 2021-10-01-dataplanepreview:", "return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def extensive_model(self): \"\"\"Instance depends on the API", "not have operation group 'usages'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def virtual_machine_sizes(self):", "version {} does not have operation group 'online_deployments'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))", "the API version: * 2021-10-01: :class:`EnvironmentContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.EnvironmentContainersOperations>` * 2021-10-01-dataplanepreview: :class:`EnvironmentContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.EnvironmentContainersOperations>` * 2022-02-01-preview: :class:`EnvironmentContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.EnvironmentContainersOperations>` *", "on the API version: * 2021-10-01: :class:`PrivateEndpointConnectionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.PrivateEndpointConnectionsOperations>` * 2022-01-01-preview: :class:`PrivateEndpointConnectionsOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.PrivateEndpointConnectionsOperations>` * 2022-05-01: :class:`PrivateEndpointConnectionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.PrivateEndpointConnectionsOperations>`", "import TYPE_CHECKING from azure.mgmt.core import ARMPipelineClient from azure.profiles import KnownProfiles, ProfileDefinition from azure.profiles.multiapiclient", "import JobsOperations as OperationClass elif api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import JobsOperations as", "group 'assets'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def async_operations(self): \"\"\"Instance depends on", "on the API version: * 2021-10-01: :class:`EnvironmentVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.EnvironmentVersionsOperations>` * 2021-10-01-dataplanepreview: :class:`EnvironmentVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.EnvironmentVersionsOperations>` * 2022-02-01-preview: :class:`EnvironmentVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.EnvironmentVersionsOperations>`", "OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import CodeContainersOperations as OperationClass else: raise", ":class:`BatchEndpointsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.BatchEndpointsOperations>` \"\"\" api_version = self._get_api_version('batch_endpoints') if api_version == '2021-10-01': from .v2021_10_01.operations import BatchEndpointsOperations", ".v2022_01_01_preview import models return models elif api_version == '2022-02-01-preview': from .v2022_02_01_preview import models", "{} does not have operation group 'component_versions'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property", "Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def datastores(self): \"\"\"Instance depends on the API version: * 2021-10-01:", "API version: * 2021-10-01: :class:`OnlineEndpointsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.OnlineEndpointsOperations>` * 2022-02-01-preview: :class:`OnlineEndpointsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.OnlineEndpointsOperations>` * 2022-05-01: :class:`OnlineEndpointsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.OnlineEndpointsOperations>` \"\"\" api_version", "group 'migration'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def model_containers(self): \"\"\"Instance depends on", ".v2021_10_01.operations import BatchEndpointsOperations as OperationClass elif api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import BatchEndpointsOperations", "regenerated. # -------------------------------------------------------------------------- from typing import TYPE_CHECKING from azure.mgmt.core import ARMPipelineClient from azure.profiles", "from .v2021_10_01_dataplanepreview.operations import EnvironmentVersionsOperations as OperationClass elif api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import", "client \"\"\" pass class AzureMachineLearningWorkspaces(MultiApiClientMixin, _SDKClient): \"\"\"These APIs allow end users to operate", "operation group 'private_endpoint_connections'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def private_link_resources(self): \"\"\"Instance depends", ":class:`DatasetContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.DatasetContainersOperations>` \"\"\" api_version = self._get_api_version('dataset_containers') if api_version == '2021-10-01': from .v2021_10_01.operations import DatasetContainersOperations", "from .runhistory.operations import DeleteOperations as OperationClass else: raise ValueError(\"API version {} does not", "import BatchEndpointsOperations as OperationClass else: raise ValueError(\"API version {} does not have operation", "\"\"\"Instance depends on the API version: * 2021-10-01: :class:`ComponentVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.ComponentVersionsOperations>` * 2021-10-01-dataplanepreview: :class:`ComponentVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.ComponentVersionsOperations>` *", "else: raise ValueError(\"API version {} does not have operation group 'batch_job_deployment'\".format(api_version)) return OperationClass(self._client,", "= self._get_api_version('experiments') if api_version == 'v1.0': from .runhistory.operations import ExperimentsOperations as OperationClass else:", "= self._get_api_version('workspace_connections') if api_version == '2021-10-01': from .v2021_10_01.operations import WorkspaceConnectionsOperations as OperationClass elif", "OperationClass elif api_version == '2022-01-01-preview': from .v2022_01_01_preview.operations import Operations as OperationClass elif api_version", "Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def extensive_model(self): \"\"\"Instance depends on the API version: * 1.0.0:", "OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def data_versions(self): \"\"\"Instance depends on the API version:", "api_version == '2021-10-01': from .v2021_10_01.operations import WorkspacesOperations as OperationClass elif api_version == '2022-01-01-preview':", "does not have operation group 'spans'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def", "\"azure.mgmt.machinelearningservices.AzureMachineLearningWorkspaces\" LATEST_PROFILE = ProfileDefinition({ _PROFILE_TAG: { None: DEFAULT_API_VERSION, 'assets': '1.0.0', 'async_operations': 'v1.0', 'batch_job_deployment':", "import UsagesOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import UsagesOperations as", "else: raise ValueError(\"API version {} does not have operation group 'datastores'\".format(api_version)) return OperationClass(self._client,", "import PrivateLinkResourcesOperations as OperationClass else: raise ValueError(\"API version {} does not have operation", "does not have operation group 'data_versions'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def", "See License.txt in the project root for # license information. # # Code", "AssetsOperations as OperationClass else: raise ValueError(\"API version {} does not have operation group", "2021-10-01: :class:`Operations<azure.mgmt.machinelearningservices.v2021_10_01.operations.Operations>` * 2022-01-01-preview: :class:`Operations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.Operations>` * 2022-05-01: :class:`Operations<azure.mgmt.machinelearningservices.v2022_05_01.operations.Operations>` \"\"\" api_version = self._get_api_version('operations') if", "disable=unused-import,ungrouped-imports from typing import Any, Optional from azure.core.credentials import TokenCredential class _SDKClient(object): def", "Deserializer(self._models_dict(api_version))) @property def operations(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`Operations<azure.mgmt.machinelearningservices.v2021_10_01.operations.Operations>`", "WorkspaceConnectionsOperations as OperationClass else: raise ValueError(\"API version {} does not have operation group", "== '2021-10-01-dataplanepreview': from .v2021_10_01_dataplanepreview.operations import ComponentContainersOperations as OperationClass elif api_version == '2022-02-01-preview': from", ".v2021_10_01.operations import CodeContainersOperations as OperationClass elif api_version == '2021-10-01-dataplanepreview': from .v2021_10_01_dataplanepreview.operations import CodeContainersOperations", "2022-02-01-preview: :class:`ComponentContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.ComponentContainersOperations>` * 2022-05-01: :class:`ComponentContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.ComponentContainersOperations>` \"\"\" api_version = self._get_api_version('component_containers') if api_version == '2021-10-01':", ":class:`ModelVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.ModelVersionsOperations>` * 2022-02-01-preview: :class:`ModelVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.ModelVersionsOperations>` * 2022-05-01: :class:`ModelVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.ModelVersionsOperations>` \"\"\" api_version = self._get_api_version('model_versions') if api_version", "version: * 2021-10-01: :class:`ComputeOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.ComputeOperations>` * 2022-01-01-preview: :class:`ComputeOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.ComputeOperations>` * 2022-05-01: :class:`ComputeOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.ComputeOperations>` \"\"\" api_version =", "Deserializer(self._models_dict(api_version))) @property def dataset_v2(self): \"\"\"Instance depends on the API version: * 1.5.0: :class:`DatasetV2Operations<azure.mgmt.machinelearningservices.dataset_dataplane.operations.DatasetV2Operations>`", "def async_operations(self): \"\"\"Instance depends on the API version: * v1.0: :class:`AsyncOperationsOperations<azure.mgmt.machinelearningservices.registry_discovery.operations.AsyncOperationsOperations>` \"\"\" api_version", "'1.5.0', 'data_version': '1.5.0', 'dataset_containers': '2021-10-01', 'dataset_controller_v2': '1.5.0', 'dataset_v2': '1.5.0', 'dataset_versions': '2021-10-01', 'datasets_v1': '1.5.0',", "as OperationClass else: raise ValueError(\"API version {} does not have operation group 'metric'\".format(api_version))", "def dataset_v2(self): \"\"\"Instance depends on the API version: * 1.5.0: :class:`DatasetV2Operations<azure.mgmt.machinelearningservices.dataset_dataplane.operations.DatasetV2Operations>` \"\"\" api_version", "import QuotasOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import QuotasOperations as", "group 'workspace_connections'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def workspace_features(self): \"\"\"Instance depends on", "self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def async_operations(self): \"\"\"Instance depends on the API version: *", "api_version == '1.0.0': from .model_dataplane.operations import ModelsOperations as OperationClass else: raise ValueError(\"API version", "else: raise ValueError(\"API version {} does not have operation group 'batch_deployments'\".format(api_version)) return OperationClass(self._client,", "api_version = self._get_api_version('data_versions') if api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import DataVersionsOperations as OperationClass", "\"\"\"Instance depends on the API version: * v1.0: :class:`RunsOperations<azure.mgmt.machinelearningservices.runhistory.operations.RunsOperations>` \"\"\" api_version = self._get_api_version('runs')", "OperationClass else: raise ValueError(\"API version {} does not have operation group 'environment_versions'\".format(api_version)) return", "not have operation group 'model_versions'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def models(self):", "API version: * 2021-10-01: :class:`EnvironmentContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.EnvironmentContainersOperations>` * 2021-10-01-dataplanepreview: :class:`EnvironmentContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.EnvironmentContainersOperations>` * 2022-02-01-preview: :class:`EnvironmentContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.EnvironmentContainersOperations>` * 2022-05-01:", "version: * 2021-10-01: :class:`DatastoresOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.DatastoresOperations>` * 2022-02-01-preview: :class:`DatastoresOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.DatastoresOperations>` * 2022-05-01: :class:`DatastoresOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.DatastoresOperations>` \"\"\" api_version =", "@property def experiments(self): \"\"\"Instance depends on the API version: * v1.0: :class:`ExperimentsOperations<azure.mgmt.machinelearningservices.runhistory.operations.ExperimentsOperations>` \"\"\"", "api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import EnvironmentVersionsOperations as OperationClass elif api_version == '2022-05-01':", "depends on the API version: * 2021-10-01: :class:`ComponentContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.ComponentContainersOperations>` * 2021-10-01-dataplanepreview: :class:`ComponentContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.ComponentContainersOperations>` * 2022-02-01-preview:", "version: * 2021-10-01: :class:`ComponentContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.ComponentContainersOperations>` * 2021-10-01-dataplanepreview: :class:`ComponentContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.ComponentContainersOperations>` * 2022-02-01-preview: :class:`ComponentContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.ComponentContainersOperations>` * 2022-05-01: :class:`ComponentContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.ComponentContainersOperations>`", "workspaces(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`WorkspacesOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.WorkspacesOperations>` * 2022-01-01-preview: :class:`WorkspacesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.WorkspacesOperations>`", "does not have operation group 'environment_versions'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def", "self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def data_version(self): \"\"\"Instance depends on the API version: *", "API version: * 1.0.0: :class:`ModelsOperations<azure.mgmt.machinelearningservices.model_dataplane.operations.ModelsOperations>` \"\"\" api_version = self._get_api_version('models') if api_version == '1.0.0':", ".v2021_10_01.operations import JobsOperations as OperationClass elif api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import JobsOperations", "as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import PrivateLinkResourcesOperations as OperationClass else:", "\"\"\" api_version = self._get_api_version('experiments') if api_version == 'v1.0': from .runhistory.operations import ExperimentsOperations as", "OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def model_containers(self): \"\"\"Instance depends on the API version:", "if api_version == '2021-10-01': from .v2021_10_01.operations import OnlineDeploymentsOperations as OperationClass elif api_version ==", "as OperationClass elif api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import EnvironmentContainersOperations as OperationClass elif", "= self._get_api_version('models') if api_version == '1.0.0': from .model_dataplane.operations import ModelsOperations as OperationClass else:", "api_version == '2022-01-01-preview': from .v2022_01_01_preview.operations import ComputeOperations as OperationClass elif api_version == '2022-05-01':", "API version: * v1.0: :class:`MetricOperations<azure.mgmt.machinelearningservices.runhistory.operations.MetricOperations>` \"\"\" api_version = self._get_api_version('metric') if api_version == 'v1.0':", "on the API version: * v1.0: :class:`SpansOperations<azure.mgmt.machinelearningservices.runhistory.operations.SpansOperations>` \"\"\" api_version = self._get_api_version('spans') if api_version", "API version: * 2021-10-01: :class:`ModelVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.ModelVersionsOperations>` * 2021-10-01-dataplanepreview: :class:`ModelVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.ModelVersionsOperations>` * 2022-02-01-preview: :class:`ModelVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.ModelVersionsOperations>` * 2022-05-01:", "ValueError(\"API version {} does not have operation group 'registry_management_non_workspace'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)),", "2022-05-01: :class:`ComputeOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.ComputeOperations>` \"\"\" api_version = self._get_api_version('compute') if api_version == '2021-10-01': from .v2021_10_01.operations import", "have operation group 'async_operations'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def batch_deployments(self): \"\"\"Instance", "else: raise ValueError(\"API version {} does not have operation group 'workspace_features'\".format(api_version)) return OperationClass(self._client,", "return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def model_versions(self): \"\"\"Instance depends on the API", "api_version == '2022-05-01': from .v2022_05_01 import models return models raise ValueError(\"API version {}", "msrest import Deserializer, Serializer from ._configuration import AzureMachineLearningWorkspacesConfiguration if TYPE_CHECKING: # pylint: disable=unused-import,ungrouped-imports", "version {} does not have operation group 'data_version'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))", "Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def private_endpoint_connections(self): \"\"\"Instance depends on the API version: * 2021-10-01:", "* 2022-02-01-preview: :class:`CodeVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.CodeVersionsOperations>` * 2022-05-01: :class:`CodeVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.CodeVersionsOperations>` \"\"\" api_version = self._get_api_version('code_versions') if api_version ==", "'2022-05-01': from .v2022_05_01 import models return models raise ValueError(\"API version {} is not", "version: * 1.5.0: :mod:`dataset_dataplane.models<azure.mgmt.machinelearningservices.dataset_dataplane.models>` * 1.0.0: :mod:`model_dataplane.models<azure.mgmt.machinelearningservices.model_dataplane.models>` * v1.0: :mod:`registry_discovery.models<azure.mgmt.machinelearningservices.registry_discovery.models>` * v1.0: :mod:`runhistory.models<azure.mgmt.machinelearningservices.runhistory.models>`", "the API version: * 1.0.0: :class:`ModelsOperations<azure.mgmt.machinelearningservices.model_dataplane.operations.ModelsOperations>` \"\"\" api_version = self._get_api_version('models') if api_version ==", "does not have operation group 'dataset_v2'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def", "not have operation group 'get_operation_status'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def jobs(self):", "have operation group 'assets'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def async_operations(self): \"\"\"Instance", "as OperationClass else: raise ValueError(\"API version {} does not have operation group 'jobs'\".format(api_version))", "OperationClass elif api_version == '2022-01-01-preview': from .v2022_01_01_preview.operations import ComputeOperations as OperationClass elif api_version", "version: * 2022-02-01-preview: :class:`DataVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.DataVersionsOperations>` * 2022-05-01: :class:`DataVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.DataVersionsOperations>` \"\"\" api_version = self._get_api_version('data_versions') if api_version", "* 2021-10-01: :class:`EnvironmentVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.EnvironmentVersionsOperations>` * 2021-10-01-dataplanepreview: :class:`EnvironmentVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.EnvironmentVersionsOperations>` * 2022-02-01-preview: :class:`EnvironmentVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.EnvironmentVersionsOperations>` * 2022-05-01: :class:`EnvironmentVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.EnvironmentVersionsOperations>` \"\"\"", "code_containers(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`CodeContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.CodeContainersOperations>` * 2021-10-01-dataplanepreview: :class:`CodeContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.CodeContainersOperations>`", "OperationClass elif api_version == 'v1.0': from .runhistory.operations import DeleteOperations as OperationClass else: raise", "on the API version: * 1.0.0: :class:`ExtensiveModelOperations<azure.mgmt.machinelearningservices.model_dataplane.operations.ExtensiveModelOperations>` \"\"\" api_version = self._get_api_version('extensive_model') if api_version", "@property def run(self): \"\"\"Instance depends on the API version: * v1.0: :class:`RunOperations<azure.mgmt.machinelearningservices.runhistory.operations.RunOperations>` \"\"\"", "OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def virtual_machine_sizes(self): \"\"\"Instance depends on the API version:", "\"\"\" api_version = self._get_api_version('get_operation_status') if api_version == '1.5.0': from .dataset_dataplane.operations import GetOperationStatusOperations as", "the API version: * 2021-10-01: :class:`ComputeOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.ComputeOperations>` * 2022-01-01-preview: :class:`ComputeOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.ComputeOperations>` * 2022-05-01: :class:`ComputeOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.ComputeOperations>` \"\"\"", "2022-05-01: :class:`EnvironmentContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.EnvironmentContainersOperations>` \"\"\" api_version = self._get_api_version('environment_containers') if api_version == '2021-10-01': from .v2021_10_01.operations import", "1.5.0: :class:`DatasetV2Operations<azure.mgmt.machinelearningservices.dataset_dataplane.operations.DatasetV2Operations>` \"\"\" api_version = self._get_api_version('dataset_v2') if api_version == '1.5.0': from .dataset_dataplane.operations import", "as OperationClass else: raise ValueError(\"API version {} does not have operation group 'batch_job_deployment'\".format(api_version))", "api_version == '2022-05-01': from .v2022_05_01.operations import UsagesOperations as OperationClass else: raise ValueError(\"API version", "not have operation group 'data_container'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def data_containers(self):", "does not have operation group 'temporary_data_references'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def", "virtual_machine_sizes(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`VirtualMachineSizesOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.VirtualMachineSizesOperations>` * 2022-01-01-preview: :class:`VirtualMachineSizesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.VirtualMachineSizesOperations>`", ".v2021_10_01.operations import Operations as OperationClass elif api_version == '2022-01-01-preview': from .v2022_01_01_preview.operations import Operations", "api_version == 'v1.0': from .registry_discovery.operations import RegistryManagementNonWorkspaceOperations as OperationClass else: raise ValueError(\"API version", "api_version == '1.5.0': from .dataset_dataplane.operations import DatasetControllerV2Operations as OperationClass else: raise ValueError(\"API version", "# type: \"TokenCredential\" subscription_id, # type: str api_version=None, # type: Optional[str] base_url=\"https://management.azure.com\", #", "'dataset_controller_v2': '1.5.0', 'dataset_v2': '1.5.0', 'dataset_versions': '2021-10-01', 'datasets_v1': '1.5.0', 'delete': 'v1.0', 'events': 'v1.0', 'experiments':", "from .v2022_05_01.operations import EnvironmentContainersOperations as OperationClass else: raise ValueError(\"API version {} does not", "api_version = self._get_api_version('run') if api_version == 'v1.0': from .runhistory.operations import RunOperations as OperationClass", "self._get_api_version('code_versions') if api_version == '2021-10-01': from .v2021_10_01.operations import CodeVersionsOperations as OperationClass elif api_version", "import WorkspaceFeaturesOperations as OperationClass else: raise ValueError(\"API version {} does not have operation", "the API version: * 2020-09-01-dataplanepreview: :class:`BatchJobDeploymentOperations<azure.mgmt.machinelearningservices.v2020_09_01_dataplanepreview.operations.BatchJobDeploymentOperations>` \"\"\" api_version = self._get_api_version('batch_job_deployment') if api_version ==", "OperationClass elif api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import EnvironmentVersionsOperations as OperationClass elif api_version", "import ModelsOperations as OperationClass else: raise ValueError(\"API version {} does not have operation", "api_version = self._get_api_version('migration') if api_version == '1.0.0': from .model_dataplane.operations import MigrationOperations as OperationClass", "base_url=\"https://management.azure.com\", # type: str profile=KnownProfiles.default, # type: KnownProfiles **kwargs # type: Any ):", "on the API version: * 1.5.0: :class:`DataCallOperations<azure.mgmt.machinelearningservices.dataset_dataplane.operations.DataCallOperations>` \"\"\" api_version = self._get_api_version('data_call') if api_version", "* 2021-10-01: :class:`EnvironmentContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.EnvironmentContainersOperations>` * 2021-10-01-dataplanepreview: :class:`EnvironmentContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.EnvironmentContainersOperations>` * 2022-02-01-preview: :class:`EnvironmentContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.EnvironmentContainersOperations>` * 2022-05-01: :class:`EnvironmentContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.EnvironmentContainersOperations>` \"\"\"", "as OperationClass elif api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import ModelVersionsOperations as OperationClass elif", "raise ValueError(\"API version {} does not have operation group 'data_call'\".format(api_version)) return OperationClass(self._client, self._config,", "{} does not have operation group 'metric'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property", "CodeVersionsOperations as OperationClass else: raise ValueError(\"API version {} does not have operation group", "group 'batch_deployments'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def batch_endpoints(self): \"\"\"Instance depends on", "on the API version: * 1.5.0: :class:`DatasetControllerV2Operations<azure.mgmt.machinelearningservices.dataset_dataplane.operations.DatasetControllerV2Operations>` \"\"\" api_version = self._get_api_version('dataset_controller_v2') if api_version", "api_version = self._get_api_version('models') if api_version == '1.0.0': from .model_dataplane.operations import ModelsOperations as OperationClass", "from .v2021_10_01.operations import DatasetContainersOperations as OperationClass else: raise ValueError(\"API version {} does not", "DataContainersOperations as OperationClass else: raise ValueError(\"API version {} does not have operation group", "from .v2021_10_01_dataplanepreview.operations import EnvironmentContainersOperations as OperationClass elif api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import", ":class:`OnlineDeploymentsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.OnlineDeploymentsOperations>` \"\"\" api_version = self._get_api_version('online_deployments') if api_version == '2021-10-01': from .v2021_10_01.operations import OnlineDeploymentsOperations", "Workspace resources. This ready contains multiple API versions, to help you deal with", "not have operation group 'assets'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def async_operations(self):", "\"\"\"Instance depends on the API version: * v1.0: :class:`SpansOperations<azure.mgmt.machinelearningservices.runhistory.operations.SpansOperations>` \"\"\" api_version = self._get_api_version('spans')", "ComponentContainersOperations as OperationClass elif api_version == '2021-10-01-dataplanepreview': from .v2021_10_01_dataplanepreview.operations import ComponentContainersOperations as OperationClass", "version: * v1.0: :class:`RunArtifactsOperations<azure.mgmt.machinelearningservices.runhistory.operations.RunArtifactsOperations>` \"\"\" api_version = self._get_api_version('run_artifacts') if api_version == 'v1.0': from", "version: * 1.5.0: :class:`DatasetV2Operations<azure.mgmt.machinelearningservices.dataset_dataplane.operations.DatasetV2Operations>` \"\"\" api_version = self._get_api_version('dataset_v2') if api_version == '1.5.0': from", "if api_version == '2021-10-01': from .v2021_10_01.operations import EnvironmentVersionsOperations as OperationClass elif api_version ==", "if api_version == '1.0.0': from .model_dataplane.operations import ExtensiveModelOperations as OperationClass else: raise ValueError(\"API", "All rights reserved. # Licensed under the MIT License. See License.txt in the", "azure.core.credentials import TokenCredential class _SDKClient(object): def __init__(self, *args, **kwargs): \"\"\"This is a fake", "api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import DataContainersOperations as OperationClass elif api_version == '2022-05-01':", "ModelVersionsOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import ModelVersionsOperations as OperationClass", "Deserializer(self._models_dict(api_version))) @property def temporary_data_references(self): \"\"\"Instance depends on the API version: * 2021-10-01-dataplanepreview: :class:`TemporaryDataReferencesOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.TemporaryDataReferencesOperations>`", "api_version == '1.5.0': from .dataset_dataplane.operations import DatasetsV1Operations as OperationClass else: raise ValueError(\"API version", "@property def component_versions(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`ComponentVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.ComponentVersionsOperations>` *", "ModelContainersOperations as OperationClass else: raise ValueError(\"API version {} does not have operation group", "not have operation group 'experiments'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def extensive_model(self):", "operation group 'experiments'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def extensive_model(self): \"\"\"Instance depends", "* 2022-05-01: :class:`EnvironmentVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.EnvironmentVersionsOperations>` \"\"\" api_version = self._get_api_version('environment_versions') if api_version == '2021-10-01': from .v2021_10_01.operations", "* 2021-10-01-dataplanepreview: :class:`ModelContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.ModelContainersOperations>` * 2022-02-01-preview: :class:`ModelContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.ModelContainersOperations>` * 2022-05-01: :class:`ModelContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.ModelContainersOperations>` \"\"\" api_version = self._get_api_version('model_containers')", "ValueError(\"API version {} does not have operation group 'online_deployments'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)),", ":class:`ComputeOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.ComputeOperations>` * 2022-01-01-preview: :class:`ComputeOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.ComputeOperations>` * 2022-05-01: :class:`ComputeOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.ComputeOperations>` \"\"\" api_version = self._get_api_version('compute') if api_version", "as OperationClass else: raise ValueError(\"API version {} does not have operation group 'run_artifacts'\".format(api_version))", "The ID of the target subscription. :type subscription_id: str :param api_version: API version", "does not have operation group 'experiments'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def", "group 'models'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def online_deployments(self): \"\"\"Instance depends on", "\"\"\" api_version = self._get_api_version('online_endpoints') if api_version == '2021-10-01': from .v2021_10_01.operations import OnlineEndpointsOperations as", "raise ValueError(\"API version {} does not have operation group 'private_link_resources'\".format(api_version)) return OperationClass(self._client, self._config,", "self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def quotas(self): \"\"\"Instance depends on the API version: *", "license information. # # Code generated by Microsoft (R) AutoRest Code Generator. #", "version {} does not have operation group 'virtual_machine_sizes'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))", "raise ValueError(\"API version {} does not have operation group 'model_versions'\".format(api_version)) return OperationClass(self._client, self._config,", "api_version == '2021-10-01-dataplanepreview': from .v2021_10_01_dataplanepreview.operations import EnvironmentContainersOperations as OperationClass elif api_version == '2022-02-01-preview':", "* 2021-10-01: :class:`OnlineEndpointsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.OnlineEndpointsOperations>` * 2022-02-01-preview: :class:`OnlineEndpointsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.OnlineEndpointsOperations>` * 2022-05-01: :class:`OnlineEndpointsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.OnlineEndpointsOperations>` \"\"\" api_version = self._get_api_version('online_endpoints')", "WorkspacesOperations as OperationClass else: raise ValueError(\"API version {} does not have operation group", "2021-10-01: :class:`DatastoresOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.DatastoresOperations>` * 2022-02-01-preview: :class:`DatastoresOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.DatastoresOperations>` * 2022-05-01: :class:`DatastoresOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.DatastoresOperations>` \"\"\" api_version = self._get_api_version('datastores') if", "not have operation group 'environment_containers'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def environment_versions(self):", "'quotas'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def registry_management_non_workspace(self): \"\"\"Instance depends on the", ".v2021_10_01.operations import WorkspacesOperations as OperationClass elif api_version == '2022-01-01-preview': from .v2022_01_01_preview.operations import WorkspacesOperations", "the API version: * 1.5.0: :class:`GetOperationStatusOperations<azure.mgmt.machinelearningservices.dataset_dataplane.operations.GetOperationStatusOperations>` \"\"\" api_version = self._get_api_version('get_operation_status') if api_version ==", "as OperationClass else: raise ValueError(\"API version {} does not have operation group 'models'\".format(api_version))", "import DeleteOperations as OperationClass elif api_version == 'v1.0': from .runhistory.operations import DeleteOperations as", "depends on the API version: * 2021-10-01: :class:`EnvironmentVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.EnvironmentVersionsOperations>` * 2021-10-01-dataplanepreview: :class:`EnvironmentVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.EnvironmentVersionsOperations>` * 2022-02-01-preview:", "from .model_dataplane.operations import ExtensiveModelOperations as OperationClass else: raise ValueError(\"API version {} does not", "version {} does not have operation group 'batch_job_deployment'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))", "== '2022-02-01-preview': from .v2022_02_01_preview import models return models elif api_version == '2022-05-01': from", "'2022-05-01': from .v2022_05_01.operations import WorkspaceFeaturesOperations as OperationClass else: raise ValueError(\"API version {} does", "version {} does not have operation group 'registry_management_non_workspace'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))", "AzureMachineLearningWorkspacesConfiguration if TYPE_CHECKING: # pylint: disable=unused-import,ungrouped-imports from typing import Any, Optional from azure.core.credentials", "does not have operation group 'code_containers'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def", "'v1.0': from .registry_discovery.operations import RegistryManagementNonWorkspaceOperations as OperationClass else: raise ValueError(\"API version {} does", "elif api_version == '2020-09-01-dataplanepreview': from .v2020_09_01_dataplanepreview import models return models elif api_version ==", "{k: v for k, v in cls.models(api_version).__dict__.items() if isinstance(v, type)} @classmethod def models(cls,", "operation group 'virtual_machine_sizes'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def workspace_connections(self): \"\"\"Instance depends", "== '1.0.0': from .model_dataplane.operations import ModelsOperations as OperationClass else: raise ValueError(\"API version {}", "VirtualMachineSizesOperations as OperationClass elif api_version == '2022-01-01-preview': from .v2022_01_01_preview.operations import VirtualMachineSizesOperations as OperationClass", "OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import WorkspaceFeaturesOperations as OperationClass else: raise", "'run_artifacts': 'v1.0', 'runs': 'v1.0', 'spans': 'v1.0', 'temporary_data_references': '2021-10-01-dataplanepreview', }}, _PROFILE_TAG + \" latest\"", ".v2022_02_01_preview.operations import EnvironmentVersionsOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import EnvironmentVersionsOperations", "the API version: * 2021-10-01: :class:`ComponentContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.ComponentContainersOperations>` * 2021-10-01-dataplanepreview: :class:`ComponentContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.ComponentContainersOperations>` * 2022-02-01-preview: :class:`ComponentContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.ComponentContainersOperations>` *", "from .v2022_02_01_preview.operations import EnvironmentContainersOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import", "= self._get_api_version('code_containers') if api_version == '2021-10-01': from .v2021_10_01.operations import CodeContainersOperations as OperationClass elif", "OperationClass else: raise ValueError(\"API version {} does not have operation group 'models'\".format(api_version)) return", "version {} does not have operation group 'batch_endpoints'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))", "* v1.0: :class:`AsyncOperationsOperations<azure.mgmt.machinelearningservices.registry_discovery.operations.AsyncOperationsOperations>` \"\"\" api_version = self._get_api_version('async_operations') if api_version == 'v1.0': from .registry_discovery.operations", "DataVersionsOperations as OperationClass else: raise ValueError(\"API version {} does not have operation group", "does not have operation group 'batch_deployments'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def", "import Operations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import Operations as", "'2022-05-01': from .v2022_05_01.operations import QuotasOperations as OperationClass else: raise ValueError(\"API version {} does", "ValueError(\"API version {} does not have operation group 'datasets_v1'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)),", "'2022-02-01-preview': from .v2022_02_01_preview.operations import ModelVersionsOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations", "== '2021-10-01': from .v2021_10_01.operations import ComponentVersionsOperations as OperationClass elif api_version == '2021-10-01-dataplanepreview': from", "= self._get_api_version('datasets_v1') if api_version == '1.5.0': from .dataset_dataplane.operations import DatasetsV1Operations as OperationClass else:", "import DeleteOperations as OperationClass else: raise ValueError(\"API version {} does not have operation", "else: raise ValueError(\"API version {} does not have operation group 'models'\".format(api_version)) return OperationClass(self._client,", "OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def runs(self): \"\"\"Instance depends on the API version:", "== '2021-10-01': from .v2021_10_01.operations import QuotasOperations as OperationClass elif api_version == '2022-01-01-preview': from", "from .registry_discovery.operations import AsyncOperationsOperations as OperationClass else: raise ValueError(\"API version {} does not", "= self._get_api_version('delete') if api_version == '1.5.0': from .dataset_dataplane.operations import DeleteOperations as OperationClass elif", "if api_version == '1.5.0': from .dataset_dataplane.operations import DeleteOperations as OperationClass elif api_version ==", "@property def batch_job_deployment(self): \"\"\"Instance depends on the API version: * 2020-09-01-dataplanepreview: :class:`BatchJobDeploymentOperations<azure.mgmt.machinelearningservices.v2020_09_01_dataplanepreview.operations.BatchJobDeploymentOperations>` \"\"\"", "'2021-10-01': from .v2021_10_01.operations import UsagesOperations as OperationClass elif api_version == '2022-01-01-preview': from .v2022_01_01_preview.operations", "operation group 'dataset_containers'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def dataset_controller_v2(self): \"\"\"Instance depends", "Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) def close(self): self._client.close() def __enter__(self): self._client.__enter__() return self def __exit__(self, *exc_details):", "'models': '1.0.0', 'registry_management_non_workspace': 'v1.0', 'run': 'v1.0', 'run_artifacts': 'v1.0', 'runs': 'v1.0', 'spans': 'v1.0', 'temporary_data_references':", "does not have operation group 'quotas'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def", "self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def data_container(self): \"\"\"Instance depends on the API version: *", "OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import WorkspaceConnectionsOperations as OperationClass else: raise", "api_version == '2021-10-01-dataplanepreview': from .v2021_10_01_dataplanepreview.operations import ComponentVersionsOperations as OperationClass elif api_version == '2022-02-01-preview':", "2022-05-01: :class:`JobsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.JobsOperations>` \"\"\" api_version = self._get_api_version('jobs') if api_version == '2021-10-01': from .v2021_10_01.operations import", "OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def operations(self): \"\"\"Instance depends on the API version:", "as OperationClass elif api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import EnvironmentVersionsOperations as OperationClass elif", "DatasetsV1Operations as OperationClass else: raise ValueError(\"API version {} does not have operation group", "version: * v1.0: :class:`ExperimentsOperations<azure.mgmt.machinelearningservices.runhistory.operations.ExperimentsOperations>` \"\"\" api_version = self._get_api_version('experiments') if api_version == 'v1.0': from", "def events(self): \"\"\"Instance depends on the API version: * v1.0: :class:`EventsOperations<azure.mgmt.machinelearningservices.runhistory.operations.EventsOperations>` \"\"\" api_version", ":class:`SpansOperations<azure.mgmt.machinelearningservices.runhistory.operations.SpansOperations>` \"\"\" api_version = self._get_api_version('spans') if api_version == 'v1.0': from .runhistory.operations import SpansOperations", ".registry_discovery import models return models elif api_version == 'v1.0': from .runhistory import models", "depends on the API version: * 2021-10-01: :class:`DatasetVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.DatasetVersionsOperations>` \"\"\" api_version = self._get_api_version('dataset_versions') if", "\"\"\"Instance depends on the API version: * 2020-09-01-dataplanepreview: :class:`BatchJobDeploymentOperations<azure.mgmt.machinelearningservices.v2020_09_01_dataplanepreview.operations.BatchJobDeploymentOperations>` \"\"\" api_version = self._get_api_version('batch_job_deployment')", "if api_version == 'v1.0': from .runhistory.operations import EventsOperations as OperationClass else: raise ValueError(\"API", "DeleteOperations as OperationClass else: raise ValueError(\"API version {} does not have operation group", "from azure.core.credentials import TokenCredential class _SDKClient(object): def __init__(self, *args, **kwargs): \"\"\"This is a", "* 2022-05-01: :class:`ComputeOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.ComputeOperations>` \"\"\" api_version = self._get_api_version('compute') if api_version == '2021-10-01': from .v2021_10_01.operations", "import WorkspacesOperations as OperationClass else: raise ValueError(\"API version {} does not have operation", "'runs': 'v1.0', 'spans': 'v1.0', 'temporary_data_references': '2021-10-01-dataplanepreview', }}, _PROFILE_TAG + \" latest\" ) def", "Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def async_operations(self): \"\"\"Instance depends on the API version: * v1.0:", "2021-10-01: :class:`CodeVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.CodeVersionsOperations>` * 2021-10-01-dataplanepreview: :class:`CodeVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.CodeVersionsOperations>` * 2022-02-01-preview: :class:`CodeVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.CodeVersionsOperations>` * 2022-05-01: :class:`CodeVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.CodeVersionsOperations>` \"\"\" api_version", ":mod:`dataset_dataplane.models<azure.mgmt.machinelearningservices.dataset_dataplane.models>` * 1.0.0: :mod:`model_dataplane.models<azure.mgmt.machinelearningservices.model_dataplane.models>` * v1.0: :mod:`registry_discovery.models<azure.mgmt.machinelearningservices.registry_discovery.models>` * v1.0: :mod:`runhistory.models<azure.mgmt.machinelearningservices.runhistory.models>` * 2020-09-01-dataplanepreview: :mod:`v2020_09_01_dataplanepreview.models<azure.mgmt.machinelearningservices.v2020_09_01_dataplanepreview.models>`", "* 2022-01-01-preview: :class:`WorkspaceConnectionsOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.WorkspaceConnectionsOperations>` * 2022-05-01: :class:`WorkspaceConnectionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.WorkspaceConnectionsOperations>` \"\"\" api_version = self._get_api_version('workspace_connections') if api_version ==", ":class:`EnvironmentContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.EnvironmentContainersOperations>` \"\"\" api_version = self._get_api_version('environment_containers') if api_version == '2021-10-01': from .v2021_10_01.operations import EnvironmentContainersOperations", "def batch_job_deployment(self): \"\"\"Instance depends on the API version: * 2020-09-01-dataplanepreview: :class:`BatchJobDeploymentOperations<azure.mgmt.machinelearningservices.v2020_09_01_dataplanepreview.operations.BatchJobDeploymentOperations>` \"\"\" api_version", "elif api_version == '2022-05-01': from .v2022_05_01.operations import OnlineEndpointsOperations as OperationClass else: raise ValueError(\"API", "ValueError(\"API version {} does not have operation group 'virtual_machine_sizes'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)),", "depends on the API version: * 2021-10-01: :class:`WorkspaceFeaturesOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.WorkspaceFeaturesOperations>` * 2022-01-01-preview: :class:`WorkspaceFeaturesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.WorkspaceFeaturesOperations>` * 2022-05-01:", "operation group 'async_operations'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def batch_deployments(self): \"\"\"Instance depends", "== '1.0.0': from .model_dataplane.operations import ExtensiveModelOperations as OperationClass else: raise ValueError(\"API version {}", "'2021-10-01': from .v2021_10_01.operations import OnlineDeploymentsOperations as OperationClass elif api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations", "{} does not have operation group 'workspace_features'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property", "2021-10-01: :class:`ComponentVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.ComponentVersionsOperations>` * 2021-10-01-dataplanepreview: :class:`ComponentVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.ComponentVersionsOperations>` * 2022-02-01-preview: :class:`ComponentVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.ComponentVersionsOperations>` * 2022-05-01: :class:`ComponentVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.ComponentVersionsOperations>` \"\"\" api_version", "import AssetsOperations as OperationClass else: raise ValueError(\"API version {} does not have operation", "the API version: * 2021-10-01: :class:`Operations<azure.mgmt.machinelearningservices.v2021_10_01.operations.Operations>` * 2022-01-01-preview: :class:`Operations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.Operations>` * 2022-05-01: :class:`Operations<azure.mgmt.machinelearningservices.v2022_05_01.operations.Operations>` \"\"\"", "Deserializer(self._models_dict(api_version))) @property def online_deployments(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`OnlineDeploymentsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.OnlineDeploymentsOperations>`", "import Deserializer, Serializer from ._configuration import AzureMachineLearningWorkspacesConfiguration if TYPE_CHECKING: # pylint: disable=unused-import,ungrouped-imports from", "= self._get_api_version('component_versions') if api_version == '2021-10-01': from .v2021_10_01.operations import ComponentVersionsOperations as OperationClass elif", "# regenerated. # -------------------------------------------------------------------------- from typing import TYPE_CHECKING from azure.mgmt.core import ARMPipelineClient from", "does not have operation group 'private_endpoint_connections'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def", "elif api_version == '2022-05-01': from .v2022_05_01.operations import ComponentContainersOperations as OperationClass else: raise ValueError(\"API", "does not have operation group 'workspace_features'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def", "as OperationClass elif api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import BatchDeploymentsOperations as OperationClass elif", "profile is provided, or if missing in profile. :type api_version: str :param base_url:", "\"\"\" api_version = self._get_api_version('code_versions') if api_version == '2021-10-01': from .v2021_10_01.operations import CodeVersionsOperations as", ":class:`DataVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.DataVersionsOperations>` * 2022-05-01: :class:`DataVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.DataVersionsOperations>` \"\"\" api_version = self._get_api_version('data_versions') if api_version == '2022-02-01-preview': from", "2021-10-01: :class:`JobsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.JobsOperations>` * 2022-02-01-preview: :class:`JobsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.JobsOperations>` * 2022-05-01: :class:`JobsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.JobsOperations>` \"\"\" api_version = self._get_api_version('jobs') if", "== '2022-01-01-preview': from .v2022_01_01_preview.operations import WorkspacesOperations as OperationClass elif api_version == '2022-05-01': from", "== '1.0.0': from .model_dataplane.operations import MigrationOperations as OperationClass else: raise ValueError(\"API version {}", "group 'usages'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def virtual_machine_sizes(self): \"\"\"Instance depends on", "2022-02-01-preview: :class:`DataContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.DataContainersOperations>` * 2022-05-01: :class:`DataContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.DataContainersOperations>` \"\"\" api_version = self._get_api_version('data_containers') if api_version == '2022-02-01-preview':", "API version: * 1.0.0: :class:`AssetsOperations<azure.mgmt.machinelearningservices.model_dataplane.operations.AssetsOperations>` \"\"\" api_version = self._get_api_version('assets') if api_version == '1.0.0':", "if api_version == '2021-10-01': from .v2021_10_01.operations import ComponentVersionsOperations as OperationClass elif api_version ==", "as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import WorkspaceConnectionsOperations as OperationClass else:", "from .dataset_dataplane.operations import DataCallOperations as OperationClass else: raise ValueError(\"API version {} does not", ":param credential: Credential needed for the client to connect to Azure. :type credential:", "self._get_api_version('data_containers') if api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import DataContainersOperations as OperationClass elif api_version", ".v2022_02_01_preview.operations import ComponentContainersOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import ComponentContainersOperations", "group 'online_deployments'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def online_endpoints(self): \"\"\"Instance depends on", "operation group and its API version. The api-version parameter sets the default API", "def private_endpoint_connections(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`PrivateEndpointConnectionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.PrivateEndpointConnectionsOperations>` * 2022-01-01-preview:", "from .v2022_01_01_preview.operations import WorkspaceFeaturesOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import", "ValueError(\"API version {} does not have operation group 'assets'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)),", "group 'environment_containers'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def environment_versions(self): \"\"\"Instance depends on", ".v2020_09_01_dataplanepreview.operations import BatchJobDeploymentOperations as OperationClass else: raise ValueError(\"API version {} does not have", "{} does not have operation group 'events'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property", "as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import OnlineDeploymentsOperations as OperationClass else:", "== '2022-05-01': from .v2022_05_01.operations import WorkspacesOperations as OperationClass else: raise ValueError(\"API version {}", "have operation group 'data_container'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def data_containers(self): \"\"\"Instance", "= self._get_api_version('events') if api_version == 'v1.0': from .runhistory.operations import EventsOperations as OperationClass else:", "dataset_containers(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`DatasetContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.DatasetContainersOperations>` \"\"\" api_version =", "depends on the API version: * 2021-10-01: :class:`ModelContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.ModelContainersOperations>` * 2021-10-01-dataplanepreview: :class:`ModelContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.ModelContainersOperations>` * 2022-02-01-preview:", "== '2021-10-01-dataplanepreview': from .v2021_10_01_dataplanepreview.operations import ModelVersionsOperations as OperationClass elif api_version == '2022-02-01-preview': from", "self._get_api_version('migration') if api_version == '1.0.0': from .model_dataplane.operations import MigrationOperations as OperationClass else: raise", "API version: * 2021-10-01: :class:`ComponentVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.ComponentVersionsOperations>` * 2021-10-01-dataplanepreview: :class:`ComponentVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.ComponentVersionsOperations>` * 2022-02-01-preview: :class:`ComponentVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.ComponentVersionsOperations>` * 2022-05-01:", "not have operation group 'jobs'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def metric(self):", "group 'delete'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def environment_containers(self): \"\"\"Instance depends on", "API version: * v1.0: :class:`AsyncOperationsOperations<azure.mgmt.machinelearningservices.registry_discovery.operations.AsyncOperationsOperations>` \"\"\" api_version = self._get_api_version('async_operations') if api_version == 'v1.0':", "else: raise ValueError(\"API version {} does not have operation group 'code_containers'\".format(api_version)) return OperationClass(self._client,", "self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def datastores(self): \"\"\"Instance depends on the API version: *", "azure.profiles import KnownProfiles, ProfileDefinition from azure.profiles.multiapiclient import MultiApiClientMixin from msrest import Deserializer, Serializer", "elif api_version == 'v1.0': from .registry_discovery import models return models elif api_version ==", "return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def registry_management_non_workspace(self): \"\"\"Instance depends on the API", "API version: * 2021-10-01: :class:`DatasetVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.DatasetVersionsOperations>` \"\"\" api_version = self._get_api_version('dataset_versions') if api_version == '2021-10-01':", ".v2021_10_01.operations import ComponentContainersOperations as OperationClass elif api_version == '2021-10-01-dataplanepreview': from .v2021_10_01_dataplanepreview.operations import ComponentContainersOperations", "self._get_api_version('batch_endpoints') if api_version == '2021-10-01': from .v2021_10_01.operations import BatchEndpointsOperations as OperationClass elif api_version", "== '1.5.0': from .dataset_dataplane.operations import DataVersionOperations as OperationClass else: raise ValueError(\"API version {}", "EnvironmentContainersOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import EnvironmentContainersOperations as OperationClass", "else: raise ValueError(\"API version {} does not have operation group 'private_link_resources'\".format(api_version)) return OperationClass(self._client,", "Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def code_versions(self): \"\"\"Instance depends on the API version: * 2021-10-01:", "def experiments(self): \"\"\"Instance depends on the API version: * v1.0: :class:`ExperimentsOperations<azure.mgmt.machinelearningservices.runhistory.operations.ExperimentsOperations>` \"\"\" api_version", "import ModelContainersOperations as OperationClass elif api_version == '2021-10-01-dataplanepreview': from .v2021_10_01_dataplanepreview.operations import ModelContainersOperations as", "_SDKClient): \"\"\"These APIs allow end users to operate on Azure Machine Learning Workspace", ".v2022_05_01.operations import OnlineDeploymentsOperations as OperationClass else: raise ValueError(\"API version {} does not have", "etc.). By default, it uses the latest API version available on public Azure.", "have operation group 'batch_deployments'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def batch_endpoints(self): \"\"\"Instance", "This ready contains multiple API versions, to help you deal with all of", "Deserializer(self._models_dict(api_version))) @property def dataset_versions(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`DatasetVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.DatasetVersionsOperations>`", "== '2022-05-01': from .v2022_05_01.operations import WorkspaceConnectionsOperations as OperationClass else: raise ValueError(\"API version {}", "profile=KnownProfiles.default, # type: KnownProfiles **kwargs # type: Any ): self._config = AzureMachineLearningWorkspacesConfiguration(credential, subscription_id,", "online_deployments(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`OnlineDeploymentsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.OnlineDeploymentsOperations>` * 2022-02-01-preview: :class:`OnlineDeploymentsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.OnlineDeploymentsOperations>`", "Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def code_containers(self): \"\"\"Instance depends on the API version: * 2021-10-01:", "{} does not have operation group 'run_artifacts'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property", "OperationClass else: raise ValueError(\"API version {} does not have operation group 'events'\".format(api_version)) return", "def virtual_machine_sizes(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`VirtualMachineSizesOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.VirtualMachineSizesOperations>` * 2022-01-01-preview:", "return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def experiments(self): \"\"\"Instance depends on the API", "DeleteOperations as OperationClass elif api_version == 'v1.0': from .runhistory.operations import DeleteOperations as OperationClass", "Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def metric(self): \"\"\"Instance depends on the API version: * v1.0:", "* 2022-01-01-preview: :class:`WorkspaceFeaturesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.WorkspaceFeaturesOperations>` * 2022-05-01: :class:`WorkspaceFeaturesOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.WorkspaceFeaturesOperations>` \"\"\" api_version = self._get_api_version('workspace_features') if api_version ==", "2022-05-01: :class:`OnlineDeploymentsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.OnlineDeploymentsOperations>` \"\"\" api_version = self._get_api_version('online_deployments') if api_version == '2021-10-01': from .v2021_10_01.operations import", "typing import Any, Optional from azure.core.credentials import TokenCredential class _SDKClient(object): def __init__(self, *args,", "runs(self): \"\"\"Instance depends on the API version: * v1.0: :class:`RunsOperations<azure.mgmt.machinelearningservices.runhistory.operations.RunsOperations>` \"\"\" api_version =", "ValueError(\"API version {} does not have operation group 'datastores'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)),", "'2021-10-01': from .v2021_10_01.operations import ComponentContainersOperations as OperationClass elif api_version == '2021-10-01-dataplanepreview': from .v2021_10_01_dataplanepreview.operations", "depends on the API version: * v1.0: :class:`AsyncOperationsOperations<azure.mgmt.machinelearningservices.registry_discovery.operations.AsyncOperationsOperations>` \"\"\" api_version = self._get_api_version('async_operations') if", "'2022-02-01-preview': from .v2022_02_01_preview.operations import DataVersionsOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations", "will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from typing", "does not have operation group 'extensive_model'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def", "== '2022-05-01': from .v2022_05_01.operations import Operations as OperationClass else: raise ValueError(\"API version {}", "self._get_api_version('registry_management_non_workspace') if api_version == 'v1.0': from .registry_discovery.operations import RegistryManagementNonWorkspaceOperations as OperationClass else: raise", "return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def datasets_v1(self): \"\"\"Instance depends on the API", "OperationClass elif api_version == '2021-10-01-dataplanepreview': from .v2021_10_01_dataplanepreview.operations import CodeContainersOperations as OperationClass elif api_version", "OperationClass else: raise ValueError(\"API version {} does not have operation group 'virtual_machine_sizes'\".format(api_version)) return", "the latest API version available on public Azure. For production, you should stick", "operation group 'data_call'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def data_container(self): \"\"\"Instance depends", "the API version: * 2021-10-01: :class:`QuotasOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.QuotasOperations>` * 2022-01-01-preview: :class:`QuotasOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.QuotasOperations>` * 2022-05-01: :class:`QuotasOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.QuotasOperations>` \"\"\"", "(Azure Stack, Azure Government, Azure China, etc.). By default, it uses the latest", "'assets'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def async_operations(self): \"\"\"Instance depends on the", "def __init__( self, credential, # type: \"TokenCredential\" subscription_id, # type: str api_version=None, #", ":class:`CodeContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.CodeContainersOperations>` \"\"\" api_version = self._get_api_version('code_containers') if api_version == '2021-10-01': from .v2021_10_01.operations import CodeContainersOperations", "OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import CodeVersionsOperations as OperationClass else: raise", ".dataset_dataplane.operations import DatasetV2Operations as OperationClass else: raise ValueError(\"API version {} does not have", "@property def batch_deployments(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`BatchDeploymentsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.BatchDeploymentsOperations>` *", ".v2022_05_01.operations import UsagesOperations as OperationClass else: raise ValueError(\"API version {} does not have", "as OperationClass elif api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import JobsOperations as OperationClass elif", "if no Retry-After header is present. \"\"\" DEFAULT_API_VERSION = '2022-05-01' _PROFILE_TAG = \"azure.mgmt.machinelearningservices.AzureMachineLearningWorkspaces\"", "self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def experiments(self): \"\"\"Instance depends on the API version: *", "batch_endpoints(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`BatchEndpointsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.BatchEndpointsOperations>` * 2022-02-01-preview: :class:`BatchEndpointsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.BatchEndpointsOperations>`", "from .v2022_01_01_preview.operations import PrivateEndpointConnectionsOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import", "version: * 1.5.0: :class:`DatasetsV1Operations<azure.mgmt.machinelearningservices.dataset_dataplane.operations.DatasetsV1Operations>` \"\"\" api_version = self._get_api_version('datasets_v1') if api_version == '1.5.0': from", "Licensed under the MIT License. See License.txt in the project root for #", "the operation group is not described in the profile. :param credential: Credential needed", "for # license information. # # Code generated by Microsoft (R) AutoRest Code", "else: raise ValueError(\"API version {} does not have operation group 'model_containers'\".format(api_version)) return OperationClass(self._client,", "API version: * v1.0: :class:`RegistryManagementNonWorkspaceOperations<azure.mgmt.machinelearningservices.registry_discovery.operations.RegistryManagementNonWorkspaceOperations>` \"\"\" api_version = self._get_api_version('registry_management_non_workspace') if api_version == 'v1.0':", "parameter sets the default API version if the operation group is not described", ":class:`MigrationOperations<azure.mgmt.machinelearningservices.model_dataplane.operations.MigrationOperations>` \"\"\" api_version = self._get_api_version('migration') if api_version == '1.0.0': from .model_dataplane.operations import MigrationOperations", "* 1.5.0: :class:`DataVersionOperations<azure.mgmt.machinelearningservices.dataset_dataplane.operations.DataVersionOperations>` \"\"\" api_version = self._get_api_version('data_version') if api_version == '1.5.0': from .dataset_dataplane.operations", "@property def data_version(self): \"\"\"Instance depends on the API version: * 1.5.0: :class:`DataVersionOperations<azure.mgmt.machinelearningservices.dataset_dataplane.operations.DataVersionOperations>` \"\"\"", "= self._get_api_version('run_artifacts') if api_version == 'v1.0': from .runhistory.operations import RunArtifactsOperations as OperationClass else:", ".v2021_10_01.operations import DatasetContainersOperations as OperationClass else: raise ValueError(\"API version {} does not have", "on the API version: * 2021-10-01: :class:`WorkspacesOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.WorkspacesOperations>` * 2022-01-01-preview: :class:`WorkspacesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.WorkspacesOperations>` * 2022-05-01: :class:`WorkspacesOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.WorkspacesOperations>`", "from .v2022_05_01.operations import WorkspacesOperations as OperationClass else: raise ValueError(\"API version {} does not", "not have operation group 'events'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def experiments(self):", "import CodeContainersOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import CodeContainersOperations as", "on the API version: * 2021-10-01: :class:`WorkspaceFeaturesOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.WorkspaceFeaturesOperations>` * 2022-01-01-preview: :class:`WorkspaceFeaturesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.WorkspaceFeaturesOperations>` * 2022-05-01: :class:`WorkspaceFeaturesOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.WorkspaceFeaturesOperations>`", "operation group 'temporary_data_references'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def usages(self): \"\"\"Instance depends", "from .v2022_05_01.operations import OnlineDeploymentsOperations as OperationClass else: raise ValueError(\"API version {} does not", "'models'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def online_deployments(self): \"\"\"Instance depends on the", ".v2021_10_01.operations import BatchDeploymentsOperations as OperationClass elif api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import BatchDeploymentsOperations", "not have operation group 'environment_versions'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def events(self):", "'v1.0': from .runhistory.operations import DeleteOperations as OperationClass else: raise ValueError(\"API version {} does", "Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def operations(self): \"\"\"Instance depends on the API version: * 2021-10-01:", "self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def run_artifacts(self): \"\"\"Instance depends on the API version: *", "2022-05-01: :class:`VirtualMachineSizesOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.VirtualMachineSizesOperations>` \"\"\" api_version = self._get_api_version('virtual_machine_sizes') if api_version == '2021-10-01': from .v2021_10_01.operations import", "api_version = self._get_api_version('private_endpoint_connections') if api_version == '2021-10-01': from .v2021_10_01.operations import PrivateEndpointConnectionsOperations as OperationClass", "the MIT License. See License.txt in the project root for # license information.", "if api_version == '1.5.0': from .dataset_dataplane.operations import DatasetsV1Operations as OperationClass else: raise ValueError(\"API", "elif api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import ModelContainersOperations as OperationClass elif api_version ==", "from .v2022_05_01.operations import QuotasOperations as OperationClass else: raise ValueError(\"API version {} does not", "azure-core based client \"\"\" pass class AzureMachineLearningWorkspaces(MultiApiClientMixin, _SDKClient): \"\"\"These APIs allow end users", "MIT License. See License.txt in the project root for # license information. #", "\"\"\" api_version = self._get_api_version('private_endpoint_connections') if api_version == '2021-10-01': from .v2021_10_01.operations import PrivateEndpointConnectionsOperations as", "import WorkspacesOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import WorkspacesOperations as", "self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def batch_deployments(self): \"\"\"Instance depends on the API version: *", "2021-10-01: :class:`PrivateEndpointConnectionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.PrivateEndpointConnectionsOperations>` * 2022-01-01-preview: :class:`PrivateEndpointConnectionsOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.PrivateEndpointConnectionsOperations>` * 2022-05-01: :class:`PrivateEndpointConnectionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.PrivateEndpointConnectionsOperations>` \"\"\" api_version = self._get_api_version('private_endpoint_connections') if", "in final version of multiapi azure-core based client \"\"\" pass class AzureMachineLearningWorkspaces(MultiApiClientMixin, _SDKClient):", "the API version: * 2021-10-01: :class:`ModelVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.ModelVersionsOperations>` * 2021-10-01-dataplanepreview: :class:`ModelVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.ModelVersionsOperations>` * 2022-02-01-preview: :class:`ModelVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.ModelVersionsOperations>` *", "return models elif api_version == 'v1.0': from .registry_discovery import models return models elif", "'2020-09-01-dataplanepreview', 'data_call': '1.5.0', 'data_container': '1.5.0', 'data_version': '1.5.0', 'dataset_containers': '2021-10-01', 'dataset_controller_v2': '1.5.0', 'dataset_v2': '1.5.0',", "1.0.0: :class:`ModelsOperations<azure.mgmt.machinelearningservices.model_dataplane.operations.ModelsOperations>` \"\"\" api_version = self._get_api_version('models') if api_version == '1.0.0': from .model_dataplane.operations import", "ValueError(\"API version {} does not have operation group 'workspace_features'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)),", "OperationClass elif api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import ModelContainersOperations as OperationClass elif api_version", "* 2021-10-01-dataplanepreview: :class:`EnvironmentVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.EnvironmentVersionsOperations>` * 2022-02-01-preview: :class:`EnvironmentVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.EnvironmentVersionsOperations>` * 2022-05-01: :class:`EnvironmentVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.EnvironmentVersionsOperations>` \"\"\" api_version = self._get_api_version('environment_versions')", "self._get_api_version('spans') if api_version == 'v1.0': from .runhistory.operations import SpansOperations as OperationClass else: raise", "elif api_version == '2022-05-01': from .v2022_05_01 import models return models raise ValueError(\"API version", "import ModelContainersOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import ModelContainersOperations as", "for k, v in cls.models(api_version).__dict__.items() if isinstance(v, type)} @classmethod def models(cls, api_version=DEFAULT_API_VERSION): \"\"\"Module", "from KnownProfiles to dict. :type profile: azure.profiles.KnownProfiles :keyword int polling_interval: Default waiting time", "Azure. For production, you should stick to a particular api-version and/or profile. The", "operation group 'run'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def run_artifacts(self): \"\"\"Instance depends", "elif api_version == 'v1.0': from .runhistory import models return models elif api_version ==", "'1.5.0': from .dataset_dataplane.operations import DatasetsV1Operations as OperationClass else: raise ValueError(\"API version {} does", "on the API version: * 2021-10-01: :class:`DatasetVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.DatasetVersionsOperations>` \"\"\" api_version = self._get_api_version('dataset_versions') if api_version", "'datasets_v1'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def datastores(self): \"\"\"Instance depends on the", "from .v2022_05_01.operations import WorkspaceConnectionsOperations as OperationClass else: raise ValueError(\"API version {} does not", "@classmethod def models(cls, api_version=DEFAULT_API_VERSION): \"\"\"Module depends on the API version: * 1.5.0: :mod:`dataset_dataplane.models<azure.mgmt.machinelearningservices.dataset_dataplane.models>`", "as OperationClass elif api_version == '2022-01-01-preview': from .v2022_01_01_preview.operations import Operations as OperationClass elif", "AsyncOperationsOperations as OperationClass else: raise ValueError(\"API version {} does not have operation group", "from .v2022_02_01_preview.operations import OnlineEndpointsOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import", "OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def compute(self): \"\"\"Instance depends on the API version:", "current implemetation of MultiApiClientMixin.\" Will be removed in final version of multiapi azure-core", "in the profile. :param credential: Credential needed for the client to connect to", "else: raise ValueError(\"API version {} does not have operation group 'compute'\".format(api_version)) return OperationClass(self._client,", "OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import DatastoresOperations as OperationClass else: raise", "OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import QuotasOperations as OperationClass else: raise", "API version: * 2021-10-01-dataplanepreview: :class:`TemporaryDataReferencesOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.TemporaryDataReferencesOperations>` \"\"\" api_version = self._get_api_version('temporary_data_references') if api_version == '2021-10-01-dataplanepreview':", ".v2022_01_01_preview.operations import ComputeOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import ComputeOperations", "else: raise ValueError(\"API version {} does not have operation group 'runs'\".format(api_version)) return OperationClass(self._client,", "'2021-10-01': from .v2021_10_01.operations import OnlineEndpointsOperations as OperationClass elif api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations", "'events': 'v1.0', 'experiments': 'v1.0', 'extensive_model': '1.0.0', 'get_operation_status': '1.5.0', 'metric': 'v1.0', 'migration': '1.0.0', 'models':", "api_version = self._get_api_version('jobs') if api_version == '2021-10-01': from .v2021_10_01.operations import JobsOperations as OperationClass", "operation group 'spans'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def temporary_data_references(self): \"\"\"Instance depends", ".v2022_05_01.operations import DataVersionsOperations as OperationClass else: raise ValueError(\"API version {} does not have", "version {} does not have operation group 'operations'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))", "BatchJobDeploymentOperations as OperationClass else: raise ValueError(\"API version {} does not have operation group", "Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def quotas(self): \"\"\"Instance depends on the API version: * 2021-10-01:", "\"\"\"Instance depends on the API version: * 2021-10-01: :class:`DatasetVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.DatasetVersionsOperations>` \"\"\" api_version = self._get_api_version('dataset_versions')", "WorkspaceFeaturesOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import WorkspaceFeaturesOperations as OperationClass", "elif api_version == '2022-01-01-preview': from .v2022_01_01_preview.operations import UsagesOperations as OperationClass elif api_version ==", "'2021-10-01-dataplanepreview': from .v2021_10_01_dataplanepreview.operations import ComponentContainersOperations as OperationClass elif api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations", "import ComponentVersionsOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import ComponentVersionsOperations as", ") def __init__( self, credential, # type: \"TokenCredential\" subscription_id, # type: str api_version=None,", "api_version = self._get_api_version('virtual_machine_sizes') if api_version == '2021-10-01': from .v2021_10_01.operations import VirtualMachineSizesOperations as OperationClass", "have operation group 'data_containers'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def data_version(self): \"\"\"Instance", "from .v2021_10_01.operations import EnvironmentContainersOperations as OperationClass elif api_version == '2021-10-01-dataplanepreview': from .v2021_10_01_dataplanepreview.operations import", "'2022-05-01': from .v2022_05_01.operations import WorkspacesOperations as OperationClass else: raise ValueError(\"API version {} does", "'2022-02-01-preview': from .v2022_02_01_preview.operations import CodeVersionsOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations", "from .runhistory.operations import MetricOperations as OperationClass else: raise ValueError(\"API version {} does not", "== '2021-10-01': from .v2021_10_01.operations import ModelContainersOperations as OperationClass elif api_version == '2021-10-01-dataplanepreview': from", "api_version = self._get_api_version('dataset_v2') if api_version == '1.5.0': from .dataset_dataplane.operations import DatasetV2Operations as OperationClass", "ComponentVersionsOperations as OperationClass else: raise ValueError(\"API version {} does not have operation group", "\"\"\"Instance depends on the API version: * 2021-10-01: :class:`CodeVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.CodeVersionsOperations>` * 2021-10-01-dataplanepreview: :class:`CodeVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.CodeVersionsOperations>` *", "the API version: * v1.0: :class:`MetricOperations<azure.mgmt.machinelearningservices.runhistory.operations.MetricOperations>` \"\"\" api_version = self._get_api_version('metric') if api_version ==", "raise ValueError(\"API version {} does not have operation group 'dataset_v2'\".format(api_version)) return OperationClass(self._client, self._config,", "not have operation group 'dataset_v2'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def dataset_versions(self):", "return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def migration(self): \"\"\"Instance depends on the API", "OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import OnlineDeploymentsOperations as OperationClass else: raise", "code is # regenerated. # -------------------------------------------------------------------------- from typing import TYPE_CHECKING from azure.mgmt.core import", "the API version: * v1.0: :class:`SpansOperations<azure.mgmt.machinelearningservices.runhistory.operations.SpansOperations>` \"\"\" api_version = self._get_api_version('spans') if api_version ==", "run_artifacts(self): \"\"\"Instance depends on the API version: * v1.0: :class:`RunArtifactsOperations<azure.mgmt.machinelearningservices.runhistory.operations.RunArtifactsOperations>` \"\"\" api_version =", "2021-10-01: :class:`ComponentContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.ComponentContainersOperations>` * 2021-10-01-dataplanepreview: :class:`ComponentContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.ComponentContainersOperations>` * 2022-02-01-preview: :class:`ComponentContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.ComponentContainersOperations>` * 2022-05-01: :class:`ComponentContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.ComponentContainersOperations>` \"\"\" api_version", "{} does not have operation group 'data_versions'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property", "== '2022-01-01-preview': from .v2022_01_01_preview.operations import PrivateLinkResourcesOperations as OperationClass elif api_version == '2022-05-01': from", "type)} @classmethod def models(cls, api_version=DEFAULT_API_VERSION): \"\"\"Module depends on the API version: * 1.5.0:", "api_version == '1.5.0': from .dataset_dataplane.operations import DataContainerOperations as OperationClass else: raise ValueError(\"API version", "import models return models elif api_version == 'v1.0': from .registry_discovery import models return", "'2022-05-01': from .v2022_05_01.operations import UsagesOperations as OperationClass else: raise ValueError(\"API version {} does", "API version: * v1.0: :class:`EventsOperations<azure.mgmt.machinelearningservices.runhistory.operations.EventsOperations>` \"\"\" api_version = self._get_api_version('events') if api_version == 'v1.0':", "ValueError(\"API version {} does not have operation group 'data_container'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)),", "def code_versions(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`CodeVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.CodeVersionsOperations>` * 2021-10-01-dataplanepreview:", "'2021-10-01': from .v2021_10_01.operations import DatasetContainersOperations as OperationClass else: raise ValueError(\"API version {} does", "depends on the API version: * 2021-10-01: :class:`DatasetContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.DatasetContainersOperations>` \"\"\" api_version = self._get_api_version('dataset_containers') if", "Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def datasets_v1(self): \"\"\"Instance depends on the API version: * 1.5.0:", "\"\"\" api_version = self._get_api_version('component_versions') if api_version == '2021-10-01': from .v2021_10_01.operations import ComponentVersionsOperations as", "OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def data_version(self): \"\"\"Instance depends on the API version:", "elif api_version == '2022-05-01': from .v2022_05_01.operations import ComputeOperations as OperationClass else: raise ValueError(\"API", "self._get_api_version('datasets_v1') if api_version == '1.5.0': from .dataset_dataplane.operations import DatasetsV1Operations as OperationClass else: raise", "group 'registry_management_non_workspace'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def run(self): \"\"\"Instance depends on", "None: DEFAULT_API_VERSION, 'assets': '1.0.0', 'async_operations': 'v1.0', 'batch_job_deployment': '2020-09-01-dataplanepreview', 'batch_job_endpoint': '2020-09-01-dataplanepreview', 'data_call': '1.5.0', 'data_container':", "Any ): self._config = AzureMachineLearningWorkspacesConfiguration(credential, subscription_id, **kwargs) self._client = ARMPipelineClient(base_url=base_url, config=self._config, **kwargs) super(AzureMachineLearningWorkspaces,", ":class:`AssetsOperations<azure.mgmt.machinelearningservices.model_dataplane.operations.AssetsOperations>` \"\"\" api_version = self._get_api_version('assets') if api_version == '1.0.0': from .model_dataplane.operations import AssetsOperations", "# type: Optional[str] base_url=\"https://management.azure.com\", # type: str profile=KnownProfiles.default, # type: KnownProfiles **kwargs #", "raise ValueError(\"API version {} does not have operation group 'online_endpoints'\".format(api_version)) return OperationClass(self._client, self._config,", "elif api_version == '2022-05-01': from .v2022_05_01.operations import BatchDeploymentsOperations as OperationClass else: raise ValueError(\"API", "version {} does not have operation group 'workspaces'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))", "'1.5.0': from .dataset_dataplane.operations import DatasetControllerV2Operations as OperationClass else: raise ValueError(\"API version {} does", "Deserializer(self._models_dict(api_version))) @property def environment_containers(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`EnvironmentContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.EnvironmentContainersOperations>`", "version {} does not have operation group 'dataset_v2'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))", "'private_link_resources'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def quotas(self): \"\"\"Instance depends on the", "public Azure. For production, you should stick to a particular api-version and/or profile.", ":type api_version: str :param base_url: Service URL :type base_url: str :param profile: A", "does not have operation group 'batch_endpoints'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def", "'get_operation_status': '1.5.0', 'metric': 'v1.0', 'migration': '1.0.0', 'models': '1.0.0', 'registry_management_non_workspace': 'v1.0', 'run': 'v1.0', 'run_artifacts':", "self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def delete(self): \"\"\"Instance depends on the API version: *", "'1.0.0': from .model_dataplane.operations import ModelsOperations as OperationClass else: raise ValueError(\"API version {} does", "== '2022-05-01': from .v2022_05_01.operations import UsagesOperations as OperationClass else: raise ValueError(\"API version {}", "* 2022-05-01: :class:`CodeContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.CodeContainersOperations>` \"\"\" api_version = self._get_api_version('code_containers') if api_version == '2021-10-01': from .v2021_10_01.operations", "TYPE_CHECKING from azure.mgmt.core import ARMPipelineClient from azure.profiles import KnownProfiles, ProfileDefinition from azure.profiles.multiapiclient import", "= self._get_api_version('data_container') if api_version == '1.5.0': from .dataset_dataplane.operations import DataContainerOperations as OperationClass else:", "not have operation group 'migration'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def model_containers(self):", "OnlineEndpointsOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import OnlineEndpointsOperations as OperationClass", "2021-10-01: :class:`DatasetVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.DatasetVersionsOperations>` \"\"\" api_version = self._get_api_version('dataset_versions') if api_version == '2021-10-01': from .v2021_10_01.operations import", "from .v2021_10_01.operations import QuotasOperations as OperationClass elif api_version == '2022-01-01-preview': from .v2022_01_01_preview.operations import", "OperationClass elif api_version == '2022-01-01-preview': from .v2022_01_01_preview.operations import PrivateLinkResourcesOperations as OperationClass elif api_version", "as OperationClass else: raise ValueError(\"API version {} does not have operation group 'dataset_containers'\".format(api_version))", "depends on the API version: * v1.0: :class:`EventsOperations<azure.mgmt.machinelearningservices.runhistory.operations.EventsOperations>` \"\"\" api_version = self._get_api_version('events') if", "else: raise ValueError(\"API version {} does not have operation group 'data_versions'\".format(api_version)) return OperationClass(self._client,", "return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def dataset_versions(self): \"\"\"Instance depends on the API", "ValueError(\"API version {} does not have operation group 'environment_containers'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)),", "Deserializer(self._models_dict(api_version))) @property def datastores(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`DatastoresOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.DatastoresOperations>`", "if api_version == '2021-10-01-dataplanepreview': from .v2021_10_01_dataplanepreview.operations import TemporaryDataReferencesOperations as OperationClass else: raise ValueError(\"API", "\"\"\"Instance depends on the API version: * 2021-10-01: :class:`Operations<azure.mgmt.machinelearningservices.v2021_10_01.operations.Operations>` * 2022-01-01-preview: :class:`Operations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.Operations>` *", "* 2022-05-01: :class:`PrivateLinkResourcesOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.PrivateLinkResourcesOperations>` \"\"\" api_version = self._get_api_version('private_link_resources') if api_version == '2021-10-01': from .v2021_10_01.operations", "have operation group 'temporary_data_references'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def usages(self): \"\"\"Instance", "does not have operation group 'datasets_v1'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def", "the API version: * 2021-10-01: :class:`CodeContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.CodeContainersOperations>` * 2021-10-01-dataplanepreview: :class:`CodeContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.CodeContainersOperations>` * 2022-02-01-preview: :class:`CodeContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.CodeContainersOperations>` *", ":class:`ModelVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.ModelVersionsOperations>` * 2021-10-01-dataplanepreview: :class:`ModelVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.ModelVersionsOperations>` * 2022-02-01-preview: :class:`ModelVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.ModelVersionsOperations>` * 2022-05-01: :class:`ModelVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.ModelVersionsOperations>` \"\"\" api_version =", ":class:`UsagesOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.UsagesOperations>` \"\"\" api_version = self._get_api_version('usages') if api_version == '2021-10-01': from .v2021_10_01.operations import UsagesOperations", "\"\"\" api_version = self._get_api_version('migration') if api_version == '1.0.0': from .model_dataplane.operations import MigrationOperations as", "depends on the API version: * 1.0.0: :class:`MigrationOperations<azure.mgmt.machinelearningservices.model_dataplane.operations.MigrationOperations>` \"\"\" api_version = self._get_api_version('migration') if", "group 'metric'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def migration(self): \"\"\"Instance depends on", "== 'v1.0': from .registry_discovery.operations import AsyncOperationsOperations as OperationClass else: raise ValueError(\"API version {}", "OnlineEndpointsOperations as OperationClass elif api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import OnlineEndpointsOperations as OperationClass", "self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def batch_job_deployment(self): \"\"\"Instance depends on the API version: *", "def data_version(self): \"\"\"Instance depends on the API version: * 1.5.0: :class:`DataVersionOperations<azure.mgmt.machinelearningservices.dataset_dataplane.operations.DataVersionOperations>` \"\"\" api_version", "does not have operation group 'models'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def", "'v1.0', 'migration': '1.0.0', 'models': '1.0.0', 'registry_management_non_workspace': 'v1.0', 'run': 'v1.0', 'run_artifacts': 'v1.0', 'runs': 'v1.0',", "{} does not have operation group 'batch_job_deployment'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property", "2022-05-01: :class:`QuotasOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.QuotasOperations>` \"\"\" api_version = self._get_api_version('quotas') if api_version == '2021-10-01': from .v2021_10_01.operations import", "\"\"\" api_version = self._get_api_version('batch_job_endpoint') if api_version == '2020-09-01-dataplanepreview': from .v2020_09_01_dataplanepreview.operations import BatchJobEndpointOperations as", ":class:`EnvironmentContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.EnvironmentContainersOperations>` * 2021-10-01-dataplanepreview: :class:`EnvironmentContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.EnvironmentContainersOperations>` * 2022-02-01-preview: :class:`EnvironmentContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.EnvironmentContainersOperations>` * 2022-05-01: :class:`EnvironmentContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.EnvironmentContainersOperations>` \"\"\" api_version =", "self._get_api_version('batch_deployments') if api_version == '2021-10-01': from .v2021_10_01.operations import BatchDeploymentsOperations as OperationClass elif api_version", "Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def batch_job_endpoint(self): \"\"\"Instance depends on the API version: * 2020-09-01-dataplanepreview:", "{} does not have operation group 'model_containers'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property", "self._get_api_version('component_versions') if api_version == '2021-10-01': from .v2021_10_01.operations import ComponentVersionsOperations as OperationClass elif api_version", "'2020-09-01-dataplanepreview': from .v2020_09_01_dataplanepreview import models return models elif api_version == '2021-10-01': from .v2021_10_01", "api_version == '2021-10-01-dataplanepreview': from .v2021_10_01_dataplanepreview.operations import CodeContainersOperations as OperationClass elif api_version == '2022-02-01-preview':", "return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def jobs(self): \"\"\"Instance depends on the API", "as OperationClass else: raise ValueError(\"API version {} does not have operation group 'virtual_machine_sizes'\".format(api_version))", "else: raise ValueError(\"API version {} does not have operation group 'operations'\".format(api_version)) return OperationClass(self._client,", "* v1.0: :class:`SpansOperations<azure.mgmt.machinelearningservices.runhistory.operations.SpansOperations>` \"\"\" api_version = self._get_api_version('spans') if api_version == 'v1.0': from .runhistory.operations", "EnvironmentVersionsOperations as OperationClass elif api_version == '2021-10-01-dataplanepreview': from .v2021_10_01_dataplanepreview.operations import EnvironmentVersionsOperations as OperationClass", "API version: * 2021-10-01: :class:`Operations<azure.mgmt.machinelearningservices.v2021_10_01.operations.Operations>` * 2022-01-01-preview: :class:`Operations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.Operations>` * 2022-05-01: :class:`Operations<azure.mgmt.machinelearningservices.v2022_05_01.operations.Operations>` \"\"\" api_version", "== '2022-05-01': from .v2022_05_01.operations import BatchDeploymentsOperations as OperationClass else: raise ValueError(\"API version {}", "{} does not have operation group 'experiments'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property", "self._config = AzureMachineLearningWorkspacesConfiguration(credential, subscription_id, **kwargs) self._client = ARMPipelineClient(base_url=base_url, config=self._config, **kwargs) super(AzureMachineLearningWorkspaces, self).__init__( api_version=api_version,", "'async_operations'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def batch_deployments(self): \"\"\"Instance depends on the", "version {} does not have operation group 'data_versions'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))", "api_version = self._get_api_version('environment_versions') if api_version == '2021-10-01': from .v2021_10_01.operations import EnvironmentVersionsOperations as OperationClass", ".dataset_dataplane.operations import DataCallOperations as OperationClass else: raise ValueError(\"API version {} does not have", "from .dataset_dataplane.operations import DeleteOperations as OperationClass elif api_version == 'v1.0': from .runhistory.operations import", "* 2022-02-01-preview: :class:`EnvironmentVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.EnvironmentVersionsOperations>` * 2022-05-01: :class:`EnvironmentVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.EnvironmentVersionsOperations>` \"\"\" api_version = self._get_api_version('environment_versions') if api_version ==", "group 'batch_job_deployment'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def batch_job_endpoint(self): \"\"\"Instance depends on", "based client \"\"\" pass class AzureMachineLearningWorkspaces(MultiApiClientMixin, _SDKClient): \"\"\"These APIs allow end users to", "api_version == '2021-10-01-dataplanepreview': from .v2021_10_01_dataplanepreview import models return models elif api_version == '2022-01-01-preview':", "as OperationClass elif api_version == '2022-02-01-preview': from .v2022_02_01_preview.operations import OnlineEndpointsOperations as OperationClass elif", "group 'component_containers'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def component_versions(self): \"\"\"Instance depends on", "api_version == '2021-10-01': from .v2021_10_01.operations import ModelVersionsOperations as OperationClass elif api_version == '2021-10-01-dataplanepreview':", ") @classmethod def _models_dict(cls, api_version): return {k: v for k, v in cls.models(api_version).__dict__.items()", "= self._get_api_version('batch_deployments') if api_version == '2021-10-01': from .v2021_10_01.operations import BatchDeploymentsOperations as OperationClass elif", "== '2022-02-01-preview': from .v2022_02_01_preview.operations import ComponentVersionsOperations as OperationClass elif api_version == '2022-05-01': from", "generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior", "'2022-02-01-preview': from .v2022_02_01_preview.operations import CodeContainersOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations", "1.5.0: :class:`DataContainerOperations<azure.mgmt.machinelearningservices.dataset_dataplane.operations.DataContainerOperations>` \"\"\" api_version = self._get_api_version('data_container') if api_version == '1.5.0': from .dataset_dataplane.operations import", "version. The api-version parameter sets the default API version if the operation group", "depends on the API version: * 2021-10-01: :class:`PrivateEndpointConnectionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.PrivateEndpointConnectionsOperations>` * 2022-01-01-preview: :class:`PrivateEndpointConnectionsOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.PrivateEndpointConnectionsOperations>` * 2022-05-01:", "* 2022-05-01: :class:`VirtualMachineSizesOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.VirtualMachineSizesOperations>` \"\"\" api_version = self._get_api_version('virtual_machine_sizes') if api_version == '2021-10-01': from .v2021_10_01.operations", "to a particular api-version and/or profile. The profile sets a mapping between an", "import WorkspaceConnectionsOperations as OperationClass elif api_version == '2022-01-01-preview': from .v2022_01_01_preview.operations import WorkspaceConnectionsOperations as", "else: raise ValueError(\"API version {} does not have operation group 'migration'\".format(api_version)) return OperationClass(self._client,", ":class:`DatasetControllerV2Operations<azure.mgmt.machinelearningservices.dataset_dataplane.operations.DatasetControllerV2Operations>` \"\"\" api_version = self._get_api_version('dataset_controller_v2') if api_version == '1.5.0': from .dataset_dataplane.operations import DatasetControllerV2Operations", "as OperationClass else: raise ValueError(\"API version {} does not have operation group 'component_containers'\".format(api_version))", "api_version == '2021-10-01': from .v2021_10_01.operations import DatasetContainersOperations as OperationClass else: raise ValueError(\"API version", ".v2022_02_01_preview.operations import JobsOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import JobsOperations", "self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def batch_endpoints(self): \"\"\"Instance depends on the API version: *", "version {} does not have operation group 'model_containers'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))", "== '2021-10-01': from .v2021_10_01.operations import OnlineDeploymentsOperations as OperationClass elif api_version == '2022-02-01-preview': from", "Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def workspaces(self): \"\"\"Instance depends on the API version: * 2021-10-01:", "'2022-02-01-preview': from .v2022_02_01_preview.operations import EnvironmentContainersOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations", "{} does not have operation group 'data_call'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property", "have operation group 'dataset_containers'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def dataset_controller_v2(self): \"\"\"Instance", ":class:`WorkspaceFeaturesOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.WorkspaceFeaturesOperations>` \"\"\" api_version = self._get_api_version('workspace_features') if api_version == '2021-10-01': from .v2021_10_01.operations import WorkspaceFeaturesOperations", "does not have operation group 'run_artifacts'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def", "= self._get_api_version('online_endpoints') if api_version == '2021-10-01': from .v2021_10_01.operations import OnlineEndpointsOperations as OperationClass elif", "else: raise ValueError(\"API version {} does not have operation group 'private_endpoint_connections'\".format(api_version)) return OperationClass(self._client,", "QuotasOperations as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import QuotasOperations as OperationClass", ":class:`ComponentContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.ComponentContainersOperations>` * 2022-02-01-preview: :class:`ComponentContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.ComponentContainersOperations>` * 2022-05-01: :class:`ComponentContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.ComponentContainersOperations>` \"\"\" api_version = self._get_api_version('component_containers') if api_version", "version: * 1.5.0: :class:`DataContainerOperations<azure.mgmt.machinelearningservices.dataset_dataplane.operations.DataContainerOperations>` \"\"\" api_version = self._get_api_version('data_container') if api_version == '1.5.0': from", ".v2022_05_01.operations import QuotasOperations as OperationClass else: raise ValueError(\"API version {} does not have", "= ProfileDefinition({ _PROFILE_TAG: { None: DEFAULT_API_VERSION, 'assets': '1.0.0', 'async_operations': 'v1.0', 'batch_job_deployment': '2020-09-01-dataplanepreview', 'batch_job_endpoint':", "if the operation group is not described in the profile. :param credential: Credential", "self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def batch_job_endpoint(self): \"\"\"Instance depends on the API version: *", "self._get_api_version('workspaces') if api_version == '2021-10-01': from .v2021_10_01.operations import WorkspacesOperations as OperationClass elif api_version", "OperationClass elif api_version == '2022-01-01-preview': from .v2022_01_01_preview.operations import WorkspacesOperations as OperationClass elif api_version", "have operation group 'workspaces'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) def close(self): self._client.close() def", "api_version == '2022-02-01-preview': from .v2022_02_01_preview import models return models elif api_version == '2022-05-01':", "not have operation group 'compute'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def data_call(self):", "does not have operation group 'run'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def", ":class:`EnvironmentVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.EnvironmentVersionsOperations>` \"\"\" api_version = self._get_api_version('environment_versions') if api_version == '2021-10-01': from .v2021_10_01.operations import EnvironmentVersionsOperations", "# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. #", "as OperationClass elif api_version == '2022-05-01': from .v2022_05_01.operations import ModelVersionsOperations as OperationClass else:" ]
[ "name=\"login\", uri=\"/api/login\", mock=False, mock_data=None) self.winney.register(method=\"get\", name=\"get_user\", uri=\"/api/user\", mock=True, mock_data=UserMock()) @retry def login(self, account,", "self.winney.register(method=\"get\", name=\"get_user\", uri=\"/api/user\", mock=True, mock_data=UserMock()) @retry def login(self, account, password): r = self.winney.login(json={\"account\":", "password): r = self.winney.login(json={\"account\": account, \"password\": password}) return r.json() @retry def get_user(self, user_id):", "= self.winney.get_user(data={\"user_id\": user_id}) return r.json() if __name__ == \"__main__\": uc = UserCenter() uc.login(\"hello\",", "= Address(host=\"localhost\", port=5000) self.winney = Winney(host=\"localhost\", port=5000, addrs=[addr]) self.init_functions() def init_functions(self): self.winney.register(method=\"post\", name=\"login\",", "r = self.winney.login(json={\"account\": account, \"password\": password}) return r.json() @retry def get_user(self, user_id): r", "Address(host=\"localhost\", port=5000) self.winney = Winney(host=\"localhost\", port=5000, addrs=[addr]) self.init_functions() def init_functions(self): self.winney.register(method=\"post\", name=\"login\", uri=\"/api/login\",", "self.init_functions() def init_functions(self): self.winney.register(method=\"post\", name=\"login\", uri=\"/api/login\", mock=False, mock_data=None) self.winney.register(method=\"get\", name=\"get_user\", uri=\"/api/user\", mock=True, mock_data=UserMock())", "self.winney = Winney(host=\"localhost\", port=5000, addrs=[addr]) self.init_functions() def init_functions(self): self.winney.register(method=\"post\", name=\"login\", uri=\"/api/login\", mock=False, mock_data=None)", "addr = Address(host=\"localhost\", port=5000) self.winney = Winney(host=\"localhost\", port=5000, addrs=[addr]) self.init_functions() def init_functions(self): self.winney.register(method=\"post\",", "import Address from winney import Winney, retry from winney.mock import Mock class UserMock(Mock):", "from winney.mock import Mock class UserMock(Mock): data = {\"name\": \"olivetree\"} class UserCenter(object): def", "import Winney, retry from winney.mock import Mock class UserMock(Mock): data = {\"name\": \"olivetree\"}", "uri=\"/api/login\", mock=False, mock_data=None) self.winney.register(method=\"get\", name=\"get_user\", uri=\"/api/user\", mock=True, mock_data=UserMock()) @retry def login(self, account, password):", "mock_data=UserMock()) @retry def login(self, account, password): r = self.winney.login(json={\"account\": account, \"password\": password}) return", "login(self, account, password): r = self.winney.login(json={\"account\": account, \"password\": password}) return r.json() @retry def", "\"olivetree\"} class UserCenter(object): def __init__(self): addr = Address(host=\"localhost\", port=5000) self.winney = Winney(host=\"localhost\", port=5000,", "class UserCenter(object): def __init__(self): addr = Address(host=\"localhost\", port=5000) self.winney = Winney(host=\"localhost\", port=5000, addrs=[addr])", "<filename>test.py from winney.winney import Address from winney import Winney, retry from winney.mock import", "__init__(self): addr = Address(host=\"localhost\", port=5000) self.winney = Winney(host=\"localhost\", port=5000, addrs=[addr]) self.init_functions() def init_functions(self):", "return r.json() @retry def get_user(self, user_id): r = self.winney.get_user(data={\"user_id\": user_id}) return r.json() if", "self.winney.login(json={\"account\": account, \"password\": password}) return r.json() @retry def get_user(self, user_id): r = self.winney.get_user(data={\"user_id\":", "\"password\": password}) return r.json() @retry def get_user(self, user_id): r = self.winney.get_user(data={\"user_id\": user_id}) return", "user_id): r = self.winney.get_user(data={\"user_id\": user_id}) return r.json() if __name__ == \"__main__\": uc =", "winney.winney import Address from winney import Winney, retry from winney.mock import Mock class", "name=\"get_user\", uri=\"/api/user\", mock=True, mock_data=UserMock()) @retry def login(self, account, password): r = self.winney.login(json={\"account\": account,", "mock=False, mock_data=None) self.winney.register(method=\"get\", name=\"get_user\", uri=\"/api/user\", mock=True, mock_data=UserMock()) @retry def login(self, account, password): r", "def login(self, account, password): r = self.winney.login(json={\"account\": account, \"password\": password}) return r.json() @retry", "winney import Winney, retry from winney.mock import Mock class UserMock(Mock): data = {\"name\":", "UserCenter(object): def __init__(self): addr = Address(host=\"localhost\", port=5000) self.winney = Winney(host=\"localhost\", port=5000, addrs=[addr]) self.init_functions()", "self.winney.get_user(data={\"user_id\": user_id}) return r.json() if __name__ == \"__main__\": uc = UserCenter() uc.login(\"hello\", \"123456\")", "mock_data=None) self.winney.register(method=\"get\", name=\"get_user\", uri=\"/api/user\", mock=True, mock_data=UserMock()) @retry def login(self, account, password): r =", "= {\"name\": \"olivetree\"} class UserCenter(object): def __init__(self): addr = Address(host=\"localhost\", port=5000) self.winney =", "account, \"password\": password}) return r.json() @retry def get_user(self, user_id): r = self.winney.get_user(data={\"user_id\": user_id})", "UserMock(Mock): data = {\"name\": \"olivetree\"} class UserCenter(object): def __init__(self): addr = Address(host=\"localhost\", port=5000)", "Winney(host=\"localhost\", port=5000, addrs=[addr]) self.init_functions() def init_functions(self): self.winney.register(method=\"post\", name=\"login\", uri=\"/api/login\", mock=False, mock_data=None) self.winney.register(method=\"get\", name=\"get_user\",", "= self.winney.login(json={\"account\": account, \"password\": password}) return r.json() @retry def get_user(self, user_id): r =", "uri=\"/api/user\", mock=True, mock_data=UserMock()) @retry def login(self, account, password): r = self.winney.login(json={\"account\": account, \"password\":", "addrs=[addr]) self.init_functions() def init_functions(self): self.winney.register(method=\"post\", name=\"login\", uri=\"/api/login\", mock=False, mock_data=None) self.winney.register(method=\"get\", name=\"get_user\", uri=\"/api/user\", mock=True,", "def get_user(self, user_id): r = self.winney.get_user(data={\"user_id\": user_id}) return r.json() if __name__ == \"__main__\":", "self.winney.register(method=\"post\", name=\"login\", uri=\"/api/login\", mock=False, mock_data=None) self.winney.register(method=\"get\", name=\"get_user\", uri=\"/api/user\", mock=True, mock_data=UserMock()) @retry def login(self,", "class UserMock(Mock): data = {\"name\": \"olivetree\"} class UserCenter(object): def __init__(self): addr = Address(host=\"localhost\",", "Winney, retry from winney.mock import Mock class UserMock(Mock): data = {\"name\": \"olivetree\"} class", "@retry def get_user(self, user_id): r = self.winney.get_user(data={\"user_id\": user_id}) return r.json() if __name__ ==", "Mock class UserMock(Mock): data = {\"name\": \"olivetree\"} class UserCenter(object): def __init__(self): addr =", "init_functions(self): self.winney.register(method=\"post\", name=\"login\", uri=\"/api/login\", mock=False, mock_data=None) self.winney.register(method=\"get\", name=\"get_user\", uri=\"/api/user\", mock=True, mock_data=UserMock()) @retry def", "winney.mock import Mock class UserMock(Mock): data = {\"name\": \"olivetree\"} class UserCenter(object): def __init__(self):", "retry from winney.mock import Mock class UserMock(Mock): data = {\"name\": \"olivetree\"} class UserCenter(object):", "port=5000) self.winney = Winney(host=\"localhost\", port=5000, addrs=[addr]) self.init_functions() def init_functions(self): self.winney.register(method=\"post\", name=\"login\", uri=\"/api/login\", mock=False,", "from winney.winney import Address from winney import Winney, retry from winney.mock import Mock", "r = self.winney.get_user(data={\"user_id\": user_id}) return r.json() if __name__ == \"__main__\": uc = UserCenter()", "{\"name\": \"olivetree\"} class UserCenter(object): def __init__(self): addr = Address(host=\"localhost\", port=5000) self.winney = Winney(host=\"localhost\",", "= Winney(host=\"localhost\", port=5000, addrs=[addr]) self.init_functions() def init_functions(self): self.winney.register(method=\"post\", name=\"login\", uri=\"/api/login\", mock=False, mock_data=None) self.winney.register(method=\"get\",", "port=5000, addrs=[addr]) self.init_functions() def init_functions(self): self.winney.register(method=\"post\", name=\"login\", uri=\"/api/login\", mock=False, mock_data=None) self.winney.register(method=\"get\", name=\"get_user\", uri=\"/api/user\",", "mock=True, mock_data=UserMock()) @retry def login(self, account, password): r = self.winney.login(json={\"account\": account, \"password\": password})", "account, password): r = self.winney.login(json={\"account\": account, \"password\": password}) return r.json() @retry def get_user(self,", "password}) return r.json() @retry def get_user(self, user_id): r = self.winney.get_user(data={\"user_id\": user_id}) return r.json()", "Address from winney import Winney, retry from winney.mock import Mock class UserMock(Mock): data", "def __init__(self): addr = Address(host=\"localhost\", port=5000) self.winney = Winney(host=\"localhost\", port=5000, addrs=[addr]) self.init_functions() def", "def init_functions(self): self.winney.register(method=\"post\", name=\"login\", uri=\"/api/login\", mock=False, mock_data=None) self.winney.register(method=\"get\", name=\"get_user\", uri=\"/api/user\", mock=True, mock_data=UserMock()) @retry", "@retry def login(self, account, password): r = self.winney.login(json={\"account\": account, \"password\": password}) return r.json()", "import Mock class UserMock(Mock): data = {\"name\": \"olivetree\"} class UserCenter(object): def __init__(self): addr", "data = {\"name\": \"olivetree\"} class UserCenter(object): def __init__(self): addr = Address(host=\"localhost\", port=5000) self.winney", "r.json() @retry def get_user(self, user_id): r = self.winney.get_user(data={\"user_id\": user_id}) return r.json() if __name__", "from winney import Winney, retry from winney.mock import Mock class UserMock(Mock): data =", "get_user(self, user_id): r = self.winney.get_user(data={\"user_id\": user_id}) return r.json() if __name__ == \"__main__\": uc" ]
[ "(width_bits <= 64): width_bytes = 8 else: width_bytes = 1 dimension = (width_bits", ".bsv file Gen_BSV (spec_filename, package_name, C_to_BSV_structs, C_to_BSV_packet_bytes, BSV_to_C_structs, BSV_to_C_packet_bytes) # Generate .h and", "wide, fit in C scalars (uint8_t/uint16_t/uint32_t/uint64_t) # have dimension 1 and width_bytes of", "The C/BSV code contains: Struct defs for each struct, where each field has", "= 8 else: width_bytes = 1 dimension = (width_bits + 7) // 8", "[N] # have dimension N and width_bytes 1 def compute_width_bytes (struct_spec_in): fields_out =", "0 elif (width_bits <= 8): width_bytes = 1 elif (width_bits <= 16): width_bytes", "(only works if both # this Python executable and spec_filename.py are in the", "name: '{:s}'\\n\".format (package_name)) # Compute all necessary byte-widths for transmission and C structs", "field_name, 'width_bits' : width_bits, 'width_bytes': width_bytes, 'dimension' : dimension} fields_out.append (field_out) size_bytes +=", "ok for a field-width to be 0 (e.g., unused 'user' field in an", "Functions for C application code to dequeue each type of receive-struct from a", "struct_spec_out = {'struct_name': struct_spec_in ['struct_name'], 'fields' : fields_out, 'size_bytes' : size_bytes} return struct_spec_out", "of receive-struct from a pending queue A function for the C application code", "['field_name'] width_bits = f ['width_bits'] width_bytes = 0 dimension = 1; if (width_bits", "to be 0 (e.g., unused 'user' field in an AXI channel). Generates three", "for details # ================================================================ import sys import os import stat import importlib import", "(spec_filename)) package_name = spec.package_name sys.stdout.write (\"Package name: '{:s}'\\n\".format (package_name)) # Compute all necessary", "+ argv [0] + \" <spec_file.py>\" + ''' <spec_file.py> should be a Python", "<= 32): width_bytes = 4 elif (width_bits <= 64): width_bytes = 8 else:", "stat import importlib import pprint from Gen_Bytevec_Mux_BSV import * from Gen_Bytevec_Mux_C import *", "field spec has attributes 'field_name' and 'width_bits' # In struct_spec_out, we add attributes", "to dequeue each type of receive-struct from a pending queue A function for", "defining three variables: C_to_BSV_structs BSV_to_C_structs package_name The first two are lists of 'struct", "This dynamic import of the spec_filename spec file is fragile (only works if", "{ 'field_name' : 'fieldfoo', 'width_bits': width }, ... { 'field_name' : 'fieldfoo', 'width_bits':", "send-struct into a bytevec ready for transmission A function for the C application", "]) C_to_BSV_packet_bytes = { 'packet_len' : 1, 'num_credits' : len (BSV_to_C_structs), 'channel_id' :", "See README for details # ================================================================ import sys import os import stat import", "2020 <NAME> # See README for details # ================================================================ import sys import os", "fragile (only works if both # this Python executable and spec_filename.py are in", "max ([ s ['size_bytes'] for s in C_to_BSV_structs ]) C_to_BSV_packet_bytes = { 'packet_len'", "if ((len (argv) != 2) or (argv [1] == \"-h\") or (argv [1]", "<spec_file.py> should be a Python source file defining three variables: C_to_BSV_structs BSV_to_C_structs package_name", "received bytevec into a queued receive-struct ''' # ================================================================ def main (argv =", "Bit #(w) where w is the specified bit-width C: uint8_t, uint16_t, uint32_t or", "\"Foo\", 'fields' : [ { 'field_name' : 'fieldfoo', 'width_bits': width }, ... {", "== 0): width_bytes = 0 elif (width_bits <= 8): width_bytes = 1 elif", ": 'fieldfoo', 'width_bits': width }, ... { 'field_name' : 'fieldfoo', 'width_bits': width }", "etc. spec = importlib.import_module (spec_filename) # (\"type_specs\") except: sys.stdout.write (\"ERROR: unable to import", "in struct_spec_in ['fields']: field_name = f ['field_name'] width_bits = f ['width_bits'] width_bytes =", ": max_BSV_to_C_struct_bytes } # Generate the .bsv file Gen_BSV (spec_filename, package_name, C_to_BSV_structs, C_to_BSV_packet_bytes,", "s ['size_bytes'] for s in C_to_BSV_structs ]) C_to_BSV_packet_bytes = { 'packet_len' : 1,", "} # Generate the .bsv file Gen_BSV (spec_filename, package_name, C_to_BSV_structs, C_to_BSV_packet_bytes, BSV_to_C_structs, BSV_to_C_packet_bytes)", "The first two are lists of 'struct specs', each of which has the", "f ['field_name'] width_bits = f ['width_bits'] width_bytes = 0 dimension = 1; if", "[1] == \"-h\") or (argv [1] == \"--help\")): sys.stdout.write (mkHelp_text (argv)) return 0", "'width_bits': width } ]} Struct names should be globally unique. Field names should", "for C application code to enqueue each type of send-struct into a pending", "if spec_filename.endswith (\".py\"): spec_filename = spec_filename [:-3] try: # Warning: # This dynamic", "0 # ================================================================ # This is a struct spec -> struct spec function", "(struct_spec_in): fields_out = [] size_bytes = 0 for f in struct_spec_in ['fields']: field_name", "(mkHelp_text (argv)) return 0 spec_filename = argv [1] if spec_filename.endswith (\".py\"): spec_filename =", "[ { 'field_name' : 'fieldfoo', 'width_bits': width }, ... { 'field_name' : 'fieldfoo',", "except: sys.stdout.write (\"ERROR: unable to import module '{:s}'\\n\".format (spec_filename)) sys.exit (1) sys.stdout.write (\"Spec", "''' # ================================================================ def main (argv = None): if ((len (argv) != 2)", "1,2,4 or 8 # Larger fields are represented in C as uint8_t [N]", "'width_bits' : width_bits, 'width_bytes': width_bytes, 'dimension' : dimension} fields_out.append (field_out) size_bytes += width_bytes", "spec function # In struct_spec_in, each field spec has attributes 'field_name' and 'width_bits'", "each struct, where each field has type: BSV: Bit #(w) where w is", "'width_bytes' and 'dimension' sys.stdout.write (\"Computing all necessary byte-widths for packet formats and C", "max ([ s ['size_bytes'] for s in BSV_to_C_structs ]) BSV_to_C_packet_bytes = { 'packet_len'", "first two are lists of 'struct specs', each of which has the following", "s in C_to_BSV_structs ]) C_to_BSV_packet_bytes = { 'packet_len' : 1, 'num_credits' : len", "where there is some notion of 'finding' from a path etc. spec =", "mkHelp_text (argv): return \"Usage: \" + argv [0] + \" <spec_file.py>\" + '''", "if width <= 64 bits, uint8_t [..] if wider A 'state' struct containing", "parts of a packet: C to BSV max_C_to_BSV_struct_bytes = max ([ s ['size_bytes']", "1, 'payload' : max_BSV_to_C_struct_bytes } # Generate the .bsv file Gen_BSV (spec_filename, package_name,", "C_to_BSV_packet_bytes = { 'packet_len' : 1, 'num_credits' : len (BSV_to_C_structs), 'channel_id' : 1,", "'field_name' : 'fieldfoo', 'width_bits': width } ]} Struct names should be globally unique.", "both # this Python executable and spec_filename.py are in the current dir. #", "a packet: C to BSV max_C_to_BSV_struct_bytes = max ([ s ['size_bytes'] for s", "to encode an already queued send-struct into a bytevec ready for transmission A", "is ok for a field-width to be 0 (e.g., unused 'user' field in", "'field' structs extends with 'width_bytes' and 'dimension' sys.stdout.write (\"Computing all necessary byte-widths for", "max_C_to_BSV_struct_bytes } # Data structure for different parts of a packet: BSV to", "width }, ... { 'field_name' : 'fieldfoo', 'width_bits': width } ]} Struct names", "a path etc. spec = importlib.import_module (spec_filename) # (\"type_specs\") except: sys.stdout.write (\"ERROR: unable", "else: width_bytes = 1 dimension = (width_bits + 7) // 8 field_out =", "[compute_width_bytes (s) for s in spec.BSV_to_C_structs] # Data structure for different parts of", "function for the C application code to decode a received bytevec into a", "field_out = {'field_name' : field_name, 'width_bits' : width_bits, 'width_bytes': width_bytes, 'dimension' : dimension}", "A 'state' struct containing queues and communication 'credits' for each struct type, Functions", "queued receive-struct ''' # ================================================================ def main (argv = None): if ((len (argv)", "= 1 dimension = (width_bits + 7) // 8 field_out = {'field_name' :", "pprint.PrettyPrinter() # ================================================================ def mkHelp_text (argv): return \"Usage: \" + argv [0] +", "'field_name' and 'width_bits' # In struct_spec_out, we add attributes 'width_bytes' and 'dimension' #", "64b wide, fit in C scalars (uint8_t/uint16_t/uint32_t/uint64_t) # have dimension 1 and width_bytes", "if (width_bits == 0): width_bytes = 0 elif (width_bits <= 8): width_bytes =", "size_bytes} return struct_spec_out # ================================================================ # For non-interactive invocations, call main() and use", "Functions for C application code to enqueue each type of send-struct into a", "as uint8_t [N] # have dimension N and width_bytes 1 def compute_width_bytes (struct_spec_in):", "= 1; if (width_bits == 0): width_bytes = 0 elif (width_bits <= 8):", "extends with 'width_bytes' and 'dimension' sys.stdout.write (\"Computing all necessary byte-widths for packet formats", "spec.BSV_to_C_structs] # Data structure for different parts of a packet: C to BSV", "32): width_bytes = 4 elif (width_bits <= 64): width_bytes = 8 else: width_bytes", "C_to_BSV_structs ]) C_to_BSV_packet_bytes = { 'packet_len' : 1, 'num_credits' : len (BSV_to_C_structs), 'channel_id'", "elif (width_bits <= 16): width_bytes = 2 elif (width_bits <= 32): width_bytes =", ": 'fieldfoo', 'width_bits': width } ]} Struct names should be globally unique. Field", "'fields' : [ { 'field_name' : 'fieldfoo', 'width_bits': width }, ... { 'field_name'", "Gen_Bytevec_Mux_BSV import * from Gen_Bytevec_Mux_C import * pp = pprint.PrettyPrinter() # ================================================================ def", "'dimension' : dimension} fields_out.append (field_out) size_bytes += width_bytes * dimension struct_spec_out = {'struct_name':", "'width_bits': width }, ... { 'field_name' : 'fieldfoo', 'width_bits': width } ]} Struct", "(spec_filename) # (\"type_specs\") except: sys.stdout.write (\"ERROR: unable to import module '{:s}'\\n\".format (spec_filename)) sys.exit", "sys.stdout.write (\"Spec file imported: '{:s}'\\n\".format (spec_filename)) package_name = spec.package_name sys.stdout.write (\"Package name: '{:s}'\\n\".format", ": len (C_to_BSV_structs), 'channel_id' : 1, 'payload' : max_BSV_to_C_struct_bytes } # Generate the", "# See README for details # ================================================================ import sys import os import stat", "================================================================ import sys import os import stat import importlib import pprint from Gen_Bytevec_Mux_BSV", "(package_name)) # Compute all necessary byte-widths for transmission and C structs # Each", "for packet formats and C structs.\\n\") C_to_BSV_structs = [compute_width_bytes (s) for s in", "spec_filename = spec_filename [:-3] try: # Warning: # This dynamic import of the", "add struct attribute 'size_bytes' for total # of bytes # Fields <= 64b", "f ['width_bits'] width_bytes = 0 dimension = 1; if (width_bits == 0): width_bytes", "should be a Python source file defining three variables: C_to_BSV_structs BSV_to_C_structs package_name The", "fields_out, 'size_bytes' : size_bytes} return struct_spec_out # ================================================================ # For non-interactive invocations, call", "'num_credits' : len (C_to_BSV_structs), 'channel_id' : 1, 'payload' : max_BSV_to_C_struct_bytes } # Generate", "notion of 'finding' from a path etc. spec = importlib.import_module (spec_filename) # (\"type_specs\")", "for different parts of a packet: C to BSV max_C_to_BSV_struct_bytes = max ([", "to enqueue each type of send-struct into a pending queue Functions for C", "package_name, C_to_BSV_structs, C_to_BSV_packet_bytes, BSV_to_C_structs, BSV_to_C_packet_bytes) return 0 # ================================================================ # This is a", "+ ''' <spec_file.py> should be a Python source file defining three variables: C_to_BSV_structs", "compute_width_bytes (struct_spec_in): fields_out = [] size_bytes = 0 for f in struct_spec_in ['fields']:", "or 8 # Larger fields are represented in C as uint8_t [N] #", "= {'field_name' : field_name, 'width_bits' : width_bits, 'width_bytes': width_bytes, 'dimension' : dimension} fields_out.append", "# and we add struct attribute 'size_bytes' for total # of bytes #", "width_bytes 1 def compute_width_bytes (struct_spec_in): fields_out = [] size_bytes = 0 for f", "(uint8_t/uint16_t/uint32_t/uint64_t) # have dimension 1 and width_bytes of 1,2,4 or 8 # Larger", "a struct spec -> struct spec function # In struct_spec_in, each field spec", "Study importlib examples where there is some notion of 'finding' from a path", "8 # Larger fields are represented in C as uint8_t [N] # have", "return 0 spec_filename = argv [1] if spec_filename.endswith (\".py\"): spec_filename = spec_filename [:-3]", "-B # Copyright (c) 2020 <NAME> # See README for details # ================================================================", "imported: '{:s}'\\n\".format (spec_filename)) package_name = spec.package_name sys.stdout.write (\"Package name: '{:s}'\\n\".format (package_name)) # Compute", "each type of receive-struct from a pending queue A function for the C", "<= 16): width_bytes = 2 elif (width_bits <= 32): width_bytes = 4 elif", "'fieldfoo', 'width_bits': width }, ... { 'field_name' : 'fieldfoo', 'width_bits': width } ]}", "where w is the specified bit-width C: uint8_t, uint16_t, uint32_t or uint64_t, as", "has the following form: { 'struct_name': \"Foo\", 'fields' : [ { 'field_name' :", "* pp = pprint.PrettyPrinter() # ================================================================ def mkHelp_text (argv): return \"Usage: \" +", "from Gen_Bytevec_Mux_C import * pp = pprint.PrettyPrinter() # ================================================================ def mkHelp_text (argv): return", "for different parts of a packet: BSV to C max_BSV_to_C_struct_bytes = max ([", "8 field_out = {'field_name' : field_name, 'width_bits' : width_bits, 'width_bytes': width_bytes, 'dimension' :", "in spec.BSV_to_C_structs] # Data structure for different parts of a packet: C to", "each struct type, Functions for C application code to enqueue each type of", "(argv [1] == \"--help\")): sys.stdout.write (mkHelp_text (argv)) return 0 spec_filename = argv [1]", "Struct names should be globally unique. Field names should be unique within a", "in an AXI channel). Generates three output files: package_name.bsv package_name.h package_name.c The C/BSV", "encode an already queued send-struct into a bytevec ready for transmission A function", "width_bytes = 0 dimension = 1; if (width_bits == 0): width_bytes = 0", ": width_bits, 'width_bytes': width_bytes, 'dimension' : dimension} fields_out.append (field_out) size_bytes += width_bytes *", "and C structs # Each of the 'field' structs extends with 'width_bytes' and", "width_bytes, 'dimension' : dimension} fields_out.append (field_out) size_bytes += width_bytes * dimension struct_spec_out =", "importlib examples where there is some notion of 'finding' from a path etc.", "C_to_BSV_structs BSV_to_C_structs package_name The first two are lists of 'struct specs', each of", "'payload' : max_BSV_to_C_struct_bytes } # Generate the .bsv file Gen_BSV (spec_filename, package_name, C_to_BSV_structs,", "= spec_filename [:-3] try: # Warning: # This dynamic import of the spec_filename", "'struct specs', each of which has the following form: { 'struct_name': \"Foo\", 'fields'", "a queued receive-struct ''' # ================================================================ def main (argv = None): if ((len", "= None): if ((len (argv) != 2) or (argv [1] == \"-h\") or", "spec.package_name sys.stdout.write (\"Package name: '{:s}'\\n\".format (package_name)) # Compute all necessary byte-widths for transmission", "= [compute_width_bytes (s) for s in spec.C_to_BSV_structs] BSV_to_C_structs = [compute_width_bytes (s) for s", "an already queued send-struct into a bytevec ready for transmission A function for", "max_BSV_to_C_struct_bytes = max ([ s ['size_bytes'] for s in BSV_to_C_structs ]) BSV_to_C_packet_bytes =", "module '{:s}'\\n\".format (spec_filename)) sys.exit (1) sys.stdout.write (\"Spec file imported: '{:s}'\\n\".format (spec_filename)) package_name =", "# In struct_spec_in, each field spec has attributes 'field_name' and 'width_bits' # In", "width_bytes = 0 elif (width_bits <= 8): width_bytes = 1 elif (width_bits <=", "package_name, C_to_BSV_structs, C_to_BSV_packet_bytes, BSV_to_C_structs, BSV_to_C_packet_bytes) # Generate .h and .c files Gen_C (spec_filename,", "'{:s}'\\n\".format (spec_filename)) package_name = spec.package_name sys.stdout.write (\"Package name: '{:s}'\\n\".format (package_name)) # Compute all", "BSV_to_C_packet_bytes = { 'packet_len' : 1, 'num_credits' : len (C_to_BSV_structs), 'channel_id' : 1,", "spec.C_to_BSV_structs] BSV_to_C_structs = [compute_width_bytes (s) for s in spec.BSV_to_C_structs] # Data structure for", "importlib.import_module (spec_filename) # (\"type_specs\") except: sys.stdout.write (\"ERROR: unable to import module '{:s}'\\n\".format (spec_filename))", "import module '{:s}'\\n\".format (spec_filename)) sys.exit (1) sys.stdout.write (\"Spec file imported: '{:s}'\\n\".format (spec_filename)) package_name", "files: package_name.bsv package_name.h package_name.c The C/BSV code contains: Struct defs for each struct,", "output files: package_name.bsv package_name.h package_name.c The C/BSV code contains: Struct defs for each", "================================================================ def mkHelp_text (argv): return \"Usage: \" + argv [0] + \" <spec_file.py>\"", "package_name.bsv package_name.h package_name.c The C/BSV code contains: Struct defs for each struct, where", "fit in C scalars (uint8_t/uint16_t/uint32_t/uint64_t) # have dimension 1 and width_bytes of 1,2,4", "unable to import module '{:s}'\\n\".format (spec_filename)) sys.exit (1) sys.stdout.write (\"Spec file imported: '{:s}'\\n\".format", "+ 7) // 8 field_out = {'field_name' : field_name, 'width_bits' : width_bits, 'width_bytes':", "(argv [1] == \"-h\") or (argv [1] == \"--help\")): sys.stdout.write (mkHelp_text (argv)) return", "(argv) != 2) or (argv [1] == \"-h\") or (argv [1] == \"--help\")):", "have dimension N and width_bytes 1 def compute_width_bytes (struct_spec_in): fields_out = [] size_bytes", "all necessary byte-widths for packet formats and C structs.\\n\") C_to_BSV_structs = [compute_width_bytes (s)", "================================================================ def main (argv = None): if ((len (argv) != 2) or (argv", "packet: C to BSV max_C_to_BSV_struct_bytes = max ([ s ['size_bytes'] for s in", ".c files Gen_C (spec_filename, package_name, C_to_BSV_structs, C_to_BSV_packet_bytes, BSV_to_C_structs, BSV_to_C_packet_bytes) return 0 # ================================================================", "0 (e.g., unused 'user' field in an AXI channel). Generates three output files:", "sys.stdout.write (\"Package name: '{:s}'\\n\".format (package_name)) # Compute all necessary byte-widths for transmission and", "queued send-struct into a bytevec ready for transmission A function for the C", "import importlib import pprint from Gen_Bytevec_Mux_BSV import * from Gen_Bytevec_Mux_C import * pp", "1 and width_bytes of 1,2,4 or 8 # Larger fields are represented in", "struct spec -> struct spec function # In struct_spec_in, each field spec has", "s ['size_bytes'] for s in BSV_to_C_structs ]) BSV_to_C_packet_bytes = { 'packet_len' : 1,", "8 else: width_bytes = 1 dimension = (width_bits + 7) // 8 field_out", "<spec_file.py>\" + ''' <spec_file.py> should be a Python source file defining three variables:", "different parts of a packet: BSV to C max_BSV_to_C_struct_bytes = max ([ s", "of 1,2,4 or 8 # Larger fields are represented in C as uint8_t", "} # Data structure for different parts of a packet: BSV to C", "file Gen_BSV (spec_filename, package_name, C_to_BSV_structs, C_to_BSV_packet_bytes, BSV_to_C_structs, BSV_to_C_packet_bytes) # Generate .h and .c", "['struct_name'], 'fields' : fields_out, 'size_bytes' : size_bytes} return struct_spec_out # ================================================================ # For", "# ================================================================ def main (argv = None): if ((len (argv) != 2) or", "= [] size_bytes = 0 for f in struct_spec_in ['fields']: field_name = f", "application code to dequeue each type of receive-struct from a pending queue A", "1 elif (width_bits <= 16): width_bytes = 2 elif (width_bits <= 32): width_bytes", "None): if ((len (argv) != 2) or (argv [1] == \"-h\") or (argv", "C as uint8_t [N] # have dimension N and width_bytes 1 def compute_width_bytes", "be globally unique. Field names should be unique within a struct. It is", "{'struct_name': struct_spec_in ['struct_name'], 'fields' : fields_out, 'size_bytes' : size_bytes} return struct_spec_out # ================================================================", "each field spec has attributes 'field_name' and 'width_bits' # In struct_spec_out, we add", "BSV_to_C_packet_bytes) # Generate .h and .c files Gen_C (spec_filename, package_name, C_to_BSV_structs, C_to_BSV_packet_bytes, BSV_to_C_structs,", "non-interactive invocations, call main() and use its return value # as the exit", "'size_bytes' for total # of bytes # Fields <= 64b wide, fit in", "Gen_BSV (spec_filename, package_name, C_to_BSV_structs, C_to_BSV_packet_bytes, BSV_to_C_structs, BSV_to_C_packet_bytes) # Generate .h and .c files", "#(w) where w is the specified bit-width C: uint8_t, uint16_t, uint32_t or uint64_t,", "application code to encode an already queued send-struct into a bytevec ready for", "package_name = spec.package_name sys.stdout.write (\"Package name: '{:s}'\\n\".format (package_name)) # Compute all necessary byte-widths", "width_bits, 'width_bytes': width_bytes, 'dimension' : dimension} fields_out.append (field_out) size_bytes += width_bytes * dimension", "a received bytevec into a queued receive-struct ''' # ================================================================ def main (argv", "= {'struct_name': struct_spec_in ['struct_name'], 'fields' : fields_out, 'size_bytes' : size_bytes} return struct_spec_out #", "dimension = 1; if (width_bits == 0): width_bytes = 0 elif (width_bits <=", "([ s ['size_bytes'] for s in C_to_BSV_structs ]) C_to_BSV_packet_bytes = { 'packet_len' :", "\" + argv [0] + \" <spec_file.py>\" + ''' <spec_file.py> should be a", "for s in spec.C_to_BSV_structs] BSV_to_C_structs = [compute_width_bytes (s) for s in spec.BSV_to_C_structs] #", "# In struct_spec_out, we add attributes 'width_bytes' and 'dimension' # and we add", "'num_credits' : len (BSV_to_C_structs), 'channel_id' : 1, 'payload' : max_C_to_BSV_struct_bytes } # Data", "Field names should be unique within a struct. It is ok for a", "sys.exit (1) sys.stdout.write (\"Spec file imported: '{:s}'\\n\".format (spec_filename)) package_name = spec.package_name sys.stdout.write (\"Package", "width_bits = f ['width_bits'] width_bytes = 0 dimension = 1; if (width_bits ==", "'width_bytes': width_bytes, 'dimension' : dimension} fields_out.append (field_out) size_bytes += width_bytes * dimension struct_spec_out", "width_bytes = 8 else: width_bytes = 1 dimension = (width_bits + 7) //", "communication 'credits' for each struct type, Functions for C application code to enqueue", "(spec_filename)) sys.exit (1) sys.stdout.write (\"Spec file imported: '{:s}'\\n\".format (spec_filename)) package_name = spec.package_name sys.stdout.write", "(\"Spec file imported: '{:s}'\\n\".format (spec_filename)) package_name = spec.package_name sys.stdout.write (\"Package name: '{:s}'\\n\".format (package_name))", "with 'width_bytes' and 'dimension' sys.stdout.write (\"Computing all necessary byte-widths for packet formats and", "spec_filename.py are in the current dir. # Study importlib examples where there is", "'width_bytes' and 'dimension' # and we add struct attribute 'size_bytes' for total #", "and width_bytes 1 def compute_width_bytes (struct_spec_in): fields_out = [] size_bytes = 0 for", "BSV: Bit #(w) where w is the specified bit-width C: uint8_t, uint16_t, uint32_t", "BSV_to_C_structs ]) BSV_to_C_packet_bytes = { 'packet_len' : 1, 'num_credits' : len (C_to_BSV_structs), 'channel_id'", "w is the specified bit-width C: uint8_t, uint16_t, uint32_t or uint64_t, as appropriate,", "return value # as the exit code. if __name__ == '__main__': sys.exit (main", "specs', each of which has the following form: { 'struct_name': \"Foo\", 'fields' :", "lists of 'struct specs', each of which has the following form: { 'struct_name':", "should be globally unique. Field names should be unique within a struct. It", ": 1, 'payload' : max_C_to_BSV_struct_bytes } # Data structure for different parts of", "fields_out = [] size_bytes = 0 for f in struct_spec_in ['fields']: field_name =", "dimension = (width_bits + 7) // 8 field_out = {'field_name' : field_name, 'width_bits'", "or (argv [1] == \"--help\")): sys.stdout.write (mkHelp_text (argv)) return 0 spec_filename = argv", "================================================================ # For non-interactive invocations, call main() and use its return value #", "uint64_t, as appropriate, if width <= 64 bits, uint8_t [..] if wider A", "we add struct attribute 'size_bytes' for total # of bytes # Fields <=", "return \"Usage: \" + argv [0] + \" <spec_file.py>\" + ''' <spec_file.py> should", "[..] if wider A 'state' struct containing queues and communication 'credits' for each", "Data structure for different parts of a packet: C to BSV max_C_to_BSV_struct_bytes =", "package_name.h package_name.c The C/BSV code contains: Struct defs for each struct, where each", "Struct defs for each struct, where each field has type: BSV: Bit #(w)", "send-struct into a pending queue Functions for C application code to dequeue each", "uint8_t [..] if wider A 'state' struct containing queues and communication 'credits' for", "into a bytevec ready for transmission A function for the C application code", "In struct_spec_in, each field spec has attributes 'field_name' and 'width_bits' # In struct_spec_out,", "s in spec.BSV_to_C_structs] # Data structure for different parts of a packet: C", "}, ... { 'field_name' : 'fieldfoo', 'width_bits': width } ]} Struct names should", "type of receive-struct from a pending queue A function for the C application", "of bytes # Fields <= 64b wide, fit in C scalars (uint8_t/uint16_t/uint32_t/uint64_t) #", "code to enqueue each type of send-struct into a pending queue Functions for", "(BSV_to_C_structs), 'channel_id' : 1, 'payload' : max_C_to_BSV_struct_bytes } # Data structure for different", "formats and C structs.\\n\") C_to_BSV_structs = [compute_width_bytes (s) for s in spec.C_to_BSV_structs] BSV_to_C_structs", "elif (width_bits <= 8): width_bytes = 1 elif (width_bits <= 16): width_bytes =", "unique within a struct. It is ok for a field-width to be 0", "C application code to dequeue each type of receive-struct from a pending queue", "[1] == \"--help\")): sys.stdout.write (mkHelp_text (argv)) return 0 spec_filename = argv [1] if", "# have dimension 1 and width_bytes of 1,2,4 or 8 # Larger fields", "try: # Warning: # This dynamic import of the spec_filename spec file is", "(width_bits <= 8): width_bytes = 1 elif (width_bits <= 16): width_bytes = 2", "the specified bit-width C: uint8_t, uint16_t, uint32_t or uint64_t, as appropriate, if width", "C_to_BSV_packet_bytes, BSV_to_C_structs, BSV_to_C_packet_bytes) # Generate .h and .c files Gen_C (spec_filename, package_name, C_to_BSV_structs,", "and .c files Gen_C (spec_filename, package_name, C_to_BSV_structs, C_to_BSV_packet_bytes, BSV_to_C_structs, BSV_to_C_packet_bytes) return 0 #", "a Python source file defining three variables: C_to_BSV_structs BSV_to_C_structs package_name The first two", "= f ['width_bits'] width_bytes = 0 dimension = 1; if (width_bits == 0):", "three output files: package_name.bsv package_name.h package_name.c The C/BSV code contains: Struct defs for", "transmission A function for the C application code to decode a received bytevec", "= spec.package_name sys.stdout.write (\"Package name: '{:s}'\\n\".format (package_name)) # Compute all necessary byte-widths for", "width <= 64 bits, uint8_t [..] if wider A 'state' struct containing queues", "{ 'packet_len' : 1, 'num_credits' : len (BSV_to_C_structs), 'channel_id' : 1, 'payload' :", "of a packet: BSV to C max_BSV_to_C_struct_bytes = max ([ s ['size_bytes'] for", "main (argv = None): if ((len (argv) != 2) or (argv [1] ==", "Data structure for different parts of a packet: BSV to C max_BSV_to_C_struct_bytes =", "([ s ['size_bytes'] for s in BSV_to_C_structs ]) BSV_to_C_packet_bytes = { 'packet_len' :", "'fields' : fields_out, 'size_bytes' : size_bytes} return struct_spec_out # ================================================================ # For non-interactive", "of 'struct specs', each of which has the following form: { 'struct_name': \"Foo\",", "for transmission and C structs # Each of the 'field' structs extends with", "in spec.C_to_BSV_structs] BSV_to_C_structs = [compute_width_bytes (s) for s in spec.BSV_to_C_structs] # Data structure", "for the C application code to decode a received bytevec into a queued", "# For non-interactive invocations, call main() and use its return value # as", "Gen_Bytevec_Mux_C import * pp = pprint.PrettyPrinter() # ================================================================ def mkHelp_text (argv): return \"Usage:", "len (BSV_to_C_structs), 'channel_id' : 1, 'payload' : max_C_to_BSV_struct_bytes } # Data structure for", "in BSV_to_C_structs ]) BSV_to_C_packet_bytes = { 'packet_len' : 1, 'num_credits' : len (C_to_BSV_structs),", "package_name The first two are lists of 'struct specs', each of which has", "= max ([ s ['size_bytes'] for s in C_to_BSV_structs ]) C_to_BSV_packet_bytes = {", "the 'field' structs extends with 'width_bytes' and 'dimension' sys.stdout.write (\"Computing all necessary byte-widths", "(C_to_BSV_structs), 'channel_id' : 1, 'payload' : max_BSV_to_C_struct_bytes } # Generate the .bsv file", "details # ================================================================ import sys import os import stat import importlib import pprint", "or (argv [1] == \"-h\") or (argv [1] == \"--help\")): sys.stdout.write (mkHelp_text (argv))", "Python executable and spec_filename.py are in the current dir. # Study importlib examples", "be unique within a struct. It is ok for a field-width to be", "unused 'user' field in an AXI channel). Generates three output files: package_name.bsv package_name.h", "of the spec_filename spec file is fragile (only works if both # this", "<= 64b wide, fit in C scalars (uint8_t/uint16_t/uint32_t/uint64_t) # have dimension 1 and", "C_to_BSV_structs = [compute_width_bytes (s) for s in spec.C_to_BSV_structs] BSV_to_C_structs = [compute_width_bytes (s) for", "\" <spec_file.py>\" + ''' <spec_file.py> should be a Python source file defining three", "the following form: { 'struct_name': \"Foo\", 'fields' : [ { 'field_name' : 'fieldfoo',", "type: BSV: Bit #(w) where w is the specified bit-width C: uint8_t, uint16_t,", "width_bytes = 4 elif (width_bits <= 64): width_bytes = 8 else: width_bytes =", "1, 'num_credits' : len (C_to_BSV_structs), 'channel_id' : 1, 'payload' : max_BSV_to_C_struct_bytes } #", "'user' field in an AXI channel). Generates three output files: package_name.bsv package_name.h package_name.c", "type, Functions for C application code to enqueue each type of send-struct into", "'state' struct containing queues and communication 'credits' for each struct type, Functions for", "max_C_to_BSV_struct_bytes = max ([ s ['size_bytes'] for s in C_to_BSV_structs ]) C_to_BSV_packet_bytes =", "C: uint8_t, uint16_t, uint32_t or uint64_t, as appropriate, if width <= 64 bits,", "the spec_filename spec file is fragile (only works if both # this Python", ": 1, 'num_credits' : len (C_to_BSV_structs), 'channel_id' : 1, 'payload' : max_BSV_to_C_struct_bytes }", ": 1, 'payload' : max_BSV_to_C_struct_bytes } # Generate the .bsv file Gen_BSV (spec_filename,", "we add attributes 'width_bytes' and 'dimension' # and we add struct attribute 'size_bytes'", "to BSV max_C_to_BSV_struct_bytes = max ([ s ['size_bytes'] for s in C_to_BSV_structs ])", ": size_bytes} return struct_spec_out # ================================================================ # For non-interactive invocations, call main() and", "a pending queue A function for the C application code to encode an", "Compute all necessary byte-widths for transmission and C structs # Each of the", "in C scalars (uint8_t/uint16_t/uint32_t/uint64_t) # have dimension 1 and width_bytes of 1,2,4 or", "as appropriate, if width <= 64 bits, uint8_t [..] if wider A 'state'", "pprint from Gen_Bytevec_Mux_BSV import * from Gen_Bytevec_Mux_C import * pp = pprint.PrettyPrinter() #", "(\"Computing all necessary byte-widths for packet formats and C structs.\\n\") C_to_BSV_structs = [compute_width_bytes", "]) BSV_to_C_packet_bytes = { 'packet_len' : 1, 'num_credits' : len (C_to_BSV_structs), 'channel_id' :", "pp = pprint.PrettyPrinter() # ================================================================ def mkHelp_text (argv): return \"Usage: \" + argv", "queue A function for the C application code to encode an already queued", "add attributes 'width_bytes' and 'dimension' # and we add struct attribute 'size_bytes' for", "each field has type: BSV: Bit #(w) where w is the specified bit-width", "[] size_bytes = 0 for f in struct_spec_in ['fields']: field_name = f ['field_name']", "queues and communication 'credits' for each struct type, Functions for C application code", "within a struct. It is ok for a field-width to be 0 (e.g.,", "source file defining three variables: C_to_BSV_structs BSV_to_C_structs package_name The first two are lists", "} ]} Struct names should be globally unique. Field names should be unique", "each type of send-struct into a pending queue Functions for C application code", "spec = importlib.import_module (spec_filename) # (\"type_specs\") except: sys.stdout.write (\"ERROR: unable to import module", "(spec_filename, package_name, C_to_BSV_structs, C_to_BSV_packet_bytes, BSV_to_C_structs, BSV_to_C_packet_bytes) return 0 # ================================================================ # This is", "in C_to_BSV_structs ]) C_to_BSV_packet_bytes = { 'packet_len' : 1, 'num_credits' : len (BSV_to_C_structs),", "and spec_filename.py are in the current dir. # Study importlib examples where there", "a pending queue Functions for C application code to dequeue each type of", "or uint64_t, as appropriate, if width <= 64 bits, uint8_t [..] if wider", "code to encode an already queued send-struct into a bytevec ready for transmission", "fields are represented in C as uint8_t [N] # have dimension N and", "'channel_id' : 1, 'payload' : max_C_to_BSV_struct_bytes } # Data structure for different parts", "'{:s}'\\n\".format (package_name)) # Compute all necessary byte-widths for transmission and C structs #", "struct_spec_in ['struct_name'], 'fields' : fields_out, 'size_bytes' : size_bytes} return struct_spec_out # ================================================================ #", "bit-width C: uint8_t, uint16_t, uint32_t or uint64_t, as appropriate, if width <= 64", "spec_filename.endswith (\".py\"): spec_filename = spec_filename [:-3] try: # Warning: # This dynamic import", "elif (width_bits <= 64): width_bytes = 8 else: width_bytes = 1 dimension =", "dimension 1 and width_bytes of 1,2,4 or 8 # Larger fields are represented", "struct spec function # In struct_spec_in, each field spec has attributes 'field_name' and", "and C structs.\\n\") C_to_BSV_structs = [compute_width_bytes (s) for s in spec.C_to_BSV_structs] BSV_to_C_structs =", "of which has the following form: { 'struct_name': \"Foo\", 'fields' : [ {", "for each struct, where each field has type: BSV: Bit #(w) where w", "(argv)) return 0 spec_filename = argv [1] if spec_filename.endswith (\".py\"): spec_filename = spec_filename", "for transmission A function for the C application code to decode a received", "value # as the exit code. if __name__ == '__main__': sys.exit (main (sys.argv))", "= 0 elif (width_bits <= 8): width_bytes = 1 elif (width_bits <= 16):", "code to dequeue each type of receive-struct from a pending queue A function", "C_to_BSV_packet_bytes, BSV_to_C_structs, BSV_to_C_packet_bytes) return 0 # ================================================================ # This is a struct spec", "spec -> struct spec function # In struct_spec_in, each field spec has attributes", "1 def compute_width_bytes (struct_spec_in): fields_out = [] size_bytes = 0 for f in", "of 'finding' from a path etc. spec = importlib.import_module (spec_filename) # (\"type_specs\") except:", "this Python executable and spec_filename.py are in the current dir. # Study importlib", "for s in BSV_to_C_structs ]) BSV_to_C_packet_bytes = { 'packet_len' : 1, 'num_credits' :", "s in BSV_to_C_structs ]) BSV_to_C_packet_bytes = { 'packet_len' : 1, 'num_credits' : len", "code to decode a received bytevec into a queued receive-struct ''' # ================================================================", "structs # Each of the 'field' structs extends with 'width_bytes' and 'dimension' sys.stdout.write", "(width_bits + 7) // 8 field_out = {'field_name' : field_name, 'width_bits' : width_bits,", "# Fields <= 64b wide, fit in C scalars (uint8_t/uint16_t/uint32_t/uint64_t) # have dimension", "structure for different parts of a packet: BSV to C max_BSV_to_C_struct_bytes = max", "4 elif (width_bits <= 64): width_bytes = 8 else: width_bytes = 1 dimension", "7) // 8 field_out = {'field_name' : field_name, 'width_bits' : width_bits, 'width_bytes': width_bytes,", "# Generate the .bsv file Gen_BSV (spec_filename, package_name, C_to_BSV_structs, C_to_BSV_packet_bytes, BSV_to_C_structs, BSV_to_C_packet_bytes) #", "structs extends with 'width_bytes' and 'dimension' sys.stdout.write (\"Computing all necessary byte-widths for packet", "struct type, Functions for C application code to enqueue each type of send-struct", "variables: C_to_BSV_structs BSV_to_C_structs package_name The first two are lists of 'struct specs', each", "width_bytes of 1,2,4 or 8 # Larger fields are represented in C as", "# have dimension N and width_bytes 1 def compute_width_bytes (struct_spec_in): fields_out = []", "uint8_t [N] # have dimension N and width_bytes 1 def compute_width_bytes (struct_spec_in): fields_out", "(1) sys.stdout.write (\"Spec file imported: '{:s}'\\n\".format (spec_filename)) package_name = spec.package_name sys.stdout.write (\"Package name:", "'packet_len' : 1, 'num_credits' : len (BSV_to_C_structs), 'channel_id' : 1, 'payload' : max_C_to_BSV_struct_bytes", "different parts of a packet: C to BSV max_C_to_BSV_struct_bytes = max ([ s", "width } ]} Struct names should be globally unique. Field names should be", "enqueue each type of send-struct into a pending queue Functions for C application", "# ================================================================ # This is a struct spec -> struct spec function #", "importlib import pprint from Gen_Bytevec_Mux_BSV import * from Gen_Bytevec_Mux_C import * pp =", "sys.stdout.write (mkHelp_text (argv)) return 0 spec_filename = argv [1] if spec_filename.endswith (\".py\"): spec_filename", "import of the spec_filename spec file is fragile (only works if both #", "struct_spec_in, each field spec has attributes 'field_name' and 'width_bits' # In struct_spec_out, we", "(c) 2020 <NAME> # See README for details # ================================================================ import sys import", "a struct. It is ok for a field-width to be 0 (e.g., unused", "in C as uint8_t [N] # have dimension N and width_bytes 1 def", "in the current dir. # Study importlib examples where there is some notion", "attributes 'width_bytes' and 'dimension' # and we add struct attribute 'size_bytes' for total", "spec_filename = argv [1] if spec_filename.endswith (\".py\"): spec_filename = spec_filename [:-3] try: #", "from a pending queue A function for the C application code to encode", "function for the C application code to encode an already queued send-struct into", "{'field_name' : field_name, 'width_bits' : width_bits, 'width_bytes': width_bytes, 'dimension' : dimension} fields_out.append (field_out)", "[compute_width_bytes (s) for s in spec.C_to_BSV_structs] BSV_to_C_structs = [compute_width_bytes (s) for s in", "from a path etc. spec = importlib.import_module (spec_filename) # (\"type_specs\") except: sys.stdout.write (\"ERROR:", "is fragile (only works if both # this Python executable and spec_filename.py are", "appropriate, if width <= 64 bits, uint8_t [..] if wider A 'state' struct", "]} Struct names should be globally unique. Field names should be unique within", "the current dir. # Study importlib examples where there is some notion of", "= { 'packet_len' : 1, 'num_credits' : len (C_to_BSV_structs), 'channel_id' : 1, 'payload'", "'struct_name': \"Foo\", 'fields' : [ { 'field_name' : 'fieldfoo', 'width_bits': width }, ...", "# This dynamic import of the spec_filename spec file is fragile (only works", "import * from Gen_Bytevec_Mux_C import * pp = pprint.PrettyPrinter() # ================================================================ def mkHelp_text", "(e.g., unused 'user' field in an AXI channel). Generates three output files: package_name.bsv", "dynamic import of the spec_filename spec file is fragile (only works if both", "= max ([ s ['size_bytes'] for s in BSV_to_C_structs ]) BSV_to_C_packet_bytes = {", "return struct_spec_out # ================================================================ # For non-interactive invocations, call main() and use its", "C scalars (uint8_t/uint16_t/uint32_t/uint64_t) # have dimension 1 and width_bytes of 1,2,4 or 8", "from Gen_Bytevec_Mux_BSV import * from Gen_Bytevec_Mux_C import * pp = pprint.PrettyPrinter() # ================================================================", "BSV_to_C_structs, BSV_to_C_packet_bytes) # Generate .h and .c files Gen_C (spec_filename, package_name, C_to_BSV_structs, C_to_BSV_packet_bytes,", "In struct_spec_out, we add attributes 'width_bytes' and 'dimension' # and we add struct", "are lists of 'struct specs', each of which has the following form: {", "16): width_bytes = 2 elif (width_bits <= 32): width_bytes = 4 elif (width_bits", "// 8 field_out = {'field_name' : field_name, 'width_bits' : width_bits, 'width_bytes': width_bytes, 'dimension'", "a field-width to be 0 (e.g., unused 'user' field in an AXI channel).", "containing queues and communication 'credits' for each struct type, Functions for C application", "application code to decode a received bytevec into a queued receive-struct ''' #", "total # of bytes # Fields <= 64b wide, fit in C scalars", "'payload' : max_C_to_BSV_struct_bytes } # Data structure for different parts of a packet:", "into a queued receive-struct ''' # ================================================================ def main (argv = None): if", "for a field-width to be 0 (e.g., unused 'user' field in an AXI", "for total # of bytes # Fields <= 64b wide, fit in C", "# ================================================================ def mkHelp_text (argv): return \"Usage: \" + argv [0] + \"", "= 2 elif (width_bits <= 32): width_bytes = 4 elif (width_bits <= 64):", "<NAME> # See README for details # ================================================================ import sys import os import", "import pprint from Gen_Bytevec_Mux_BSV import * from Gen_Bytevec_Mux_C import * pp = pprint.PrettyPrinter()", "'packet_len' : 1, 'num_credits' : len (C_to_BSV_structs), 'channel_id' : 1, 'payload' : max_BSV_to_C_struct_bytes", "-> struct spec function # In struct_spec_in, each field spec has attributes 'field_name'", "attribute 'size_bytes' for total # of bytes # Fields <= 64b wide, fit", "Each of the 'field' structs extends with 'width_bytes' and 'dimension' sys.stdout.write (\"Computing all", "the C application code to encode an already queued send-struct into a bytevec", "two are lists of 'struct specs', each of which has the following form:", "C application code to enqueue each type of send-struct into a pending queue", "C structs.\\n\") C_to_BSV_structs = [compute_width_bytes (s) for s in spec.C_to_BSV_structs] BSV_to_C_structs = [compute_width_bytes", "(\"type_specs\") except: sys.stdout.write (\"ERROR: unable to import module '{:s}'\\n\".format (spec_filename)) sys.exit (1) sys.stdout.write", "for C application code to dequeue each type of receive-struct from a pending", "width_bytes * dimension struct_spec_out = {'struct_name': struct_spec_in ['struct_name'], 'fields' : fields_out, 'size_bytes' :", "main() and use its return value # as the exit code. if __name__", ".h and .c files Gen_C (spec_filename, package_name, C_to_BSV_structs, C_to_BSV_packet_bytes, BSV_to_C_structs, BSV_to_C_packet_bytes) return 0", "has attributes 'field_name' and 'width_bits' # In struct_spec_out, we add attributes 'width_bytes' and", "Python source file defining three variables: C_to_BSV_structs BSV_to_C_structs package_name The first two are", "unique. Field names should be unique within a struct. It is ok for", "receive-struct ''' # ================================================================ def main (argv = None): if ((len (argv) !=", "'width_bits' # In struct_spec_out, we add attributes 'width_bytes' and 'dimension' # and we", "def compute_width_bytes (struct_spec_in): fields_out = [] size_bytes = 0 for f in struct_spec_in", "= 1 elif (width_bits <= 16): width_bytes = 2 elif (width_bits <= 32):", "an AXI channel). Generates three output files: package_name.bsv package_name.h package_name.c The C/BSV code", "code contains: Struct defs for each struct, where each field has type: BSV:", "path etc. spec = importlib.import_module (spec_filename) # (\"type_specs\") except: sys.stdout.write (\"ERROR: unable to", "for f in struct_spec_in ['fields']: field_name = f ['field_name'] width_bits = f ['width_bits']", "packet formats and C structs.\\n\") C_to_BSV_structs = [compute_width_bytes (s) for s in spec.C_to_BSV_structs]", "names should be globally unique. Field names should be unique within a struct.", "Copyright (c) 2020 <NAME> # See README for details # ================================================================ import sys", "field in an AXI channel). Generates three output files: package_name.bsv package_name.h package_name.c The", "Warning: # This dynamic import of the spec_filename spec file is fragile (only", "C application code to decode a received bytevec into a queued receive-struct '''", ": [ { 'field_name' : 'fieldfoo', 'width_bits': width }, ... { 'field_name' :", "into a pending queue Functions for C application code to dequeue each type", "len (C_to_BSV_structs), 'channel_id' : 1, 'payload' : max_BSV_to_C_struct_bytes } # Generate the .bsv", "BSV_to_C_structs = [compute_width_bytes (s) for s in spec.BSV_to_C_structs] # Data structure for different", "some notion of 'finding' from a path etc. spec = importlib.import_module (spec_filename) #", "s in spec.C_to_BSV_structs] BSV_to_C_structs = [compute_width_bytes (s) for s in spec.BSV_to_C_structs] # Data", "= 0 for f in struct_spec_in ['fields']: field_name = f ['field_name'] width_bits =", "['size_bytes'] for s in C_to_BSV_structs ]) C_to_BSV_packet_bytes = { 'packet_len' : 1, 'num_credits'", "field-width to be 0 (e.g., unused 'user' field in an AXI channel). Generates", "(argv = None): if ((len (argv) != 2) or (argv [1] == \"-h\")", "# Warning: # This dynamic import of the spec_filename spec file is fragile", "bytevec into a queued receive-struct ''' # ================================================================ def main (argv = None):", "C_to_BSV_structs, C_to_BSV_packet_bytes, BSV_to_C_structs, BSV_to_C_packet_bytes) # Generate .h and .c files Gen_C (spec_filename, package_name,", "[1] if spec_filename.endswith (\".py\"): spec_filename = spec_filename [:-3] try: # Warning: # This", "if both # this Python executable and spec_filename.py are in the current dir.", "packet: BSV to C max_BSV_to_C_struct_bytes = max ([ s ['size_bytes'] for s in", "max_BSV_to_C_struct_bytes } # Generate the .bsv file Gen_BSV (spec_filename, package_name, C_to_BSV_structs, C_to_BSV_packet_bytes, BSV_to_C_structs,", "and width_bytes of 1,2,4 or 8 # Larger fields are represented in C", "BSV_to_C_packet_bytes) return 0 # ================================================================ # This is a struct spec -> struct", "C/BSV code contains: Struct defs for each struct, where each field has type:", "#!/usr/bin/python3 -B # Copyright (c) 2020 <NAME> # See README for details #", "field has type: BSV: Bit #(w) where w is the specified bit-width C:", "2 elif (width_bits <= 32): width_bytes = 4 elif (width_bits <= 64): width_bytes", ": max_C_to_BSV_struct_bytes } # Data structure for different parts of a packet: BSV", "elif (width_bits <= 32): width_bytes = 4 elif (width_bits <= 64): width_bytes =", "0 dimension = 1; if (width_bits == 0): width_bytes = 0 elif (width_bits", "bits, uint8_t [..] if wider A 'state' struct containing queues and communication 'credits'", "and 'dimension' sys.stdout.write (\"Computing all necessary byte-widths for packet formats and C structs.\\n\")", "and we add struct attribute 'size_bytes' for total # of bytes # Fields", "Generates three output files: package_name.bsv package_name.h package_name.c The C/BSV code contains: Struct defs", "(\"Package name: '{:s}'\\n\".format (package_name)) # Compute all necessary byte-widths for transmission and C", "* dimension struct_spec_out = {'struct_name': struct_spec_in ['struct_name'], 'fields' : fields_out, 'size_bytes' : size_bytes}", "sys import os import stat import importlib import pprint from Gen_Bytevec_Mux_BSV import *", "to decode a received bytevec into a queued receive-struct ''' # ================================================================ def", "be 0 (e.g., unused 'user' field in an AXI channel). Generates three output", "spec has attributes 'field_name' and 'width_bits' # In struct_spec_out, we add attributes 'width_bytes'", "attributes 'field_name' and 'width_bits' # In struct_spec_out, we add attributes 'width_bytes' and 'dimension'", ": 1, 'num_credits' : len (BSV_to_C_structs), 'channel_id' : 1, 'payload' : max_C_to_BSV_struct_bytes }", "for each struct type, Functions for C application code to enqueue each type", "(width_bits <= 32): width_bytes = 4 elif (width_bits <= 64): width_bytes = 8", "file imported: '{:s}'\\n\".format (spec_filename)) package_name = spec.package_name sys.stdout.write (\"Package name: '{:s}'\\n\".format (package_name)) #", "spec_filename [:-3] try: # Warning: # This dynamic import of the spec_filename spec", "works if both # this Python executable and spec_filename.py are in the current", "{ 'field_name' : 'fieldfoo', 'width_bits': width } ]} Struct names should be globally", "defs for each struct, where each field has type: BSV: Bit #(w) where", "if wider A 'state' struct containing queues and communication 'credits' for each struct", "of send-struct into a pending queue Functions for C application code to dequeue", "and communication 'credits' for each struct type, Functions for C application code to", "!= 2) or (argv [1] == \"-h\") or (argv [1] == \"--help\")): sys.stdout.write", "Generate the .bsv file Gen_BSV (spec_filename, package_name, C_to_BSV_structs, C_to_BSV_packet_bytes, BSV_to_C_structs, BSV_to_C_packet_bytes) # Generate", "three variables: C_to_BSV_structs BSV_to_C_structs package_name The first two are lists of 'struct specs',", "# Generate .h and .c files Gen_C (spec_filename, package_name, C_to_BSV_structs, C_to_BSV_packet_bytes, BSV_to_C_structs, BSV_to_C_packet_bytes)", "for the C application code to encode an already queued send-struct into a", "pending queue Functions for C application code to dequeue each type of receive-struct", "and 'width_bits' # In struct_spec_out, we add attributes 'width_bytes' and 'dimension' # and", "\"Usage: \" + argv [0] + \" <spec_file.py>\" + ''' <spec_file.py> should be", "to import module '{:s}'\\n\".format (spec_filename)) sys.exit (1) sys.stdout.write (\"Spec file imported: '{:s}'\\n\".format (spec_filename))", "'size_bytes' : size_bytes} return struct_spec_out # ================================================================ # For non-interactive invocations, call main()", "== \"-h\") or (argv [1] == \"--help\")): sys.stdout.write (mkHelp_text (argv)) return 0 spec_filename", "has type: BSV: Bit #(w) where w is the specified bit-width C: uint8_t,", "'dimension' # and we add struct attribute 'size_bytes' for total # of bytes", "files Gen_C (spec_filename, package_name, C_to_BSV_structs, C_to_BSV_packet_bytes, BSV_to_C_structs, BSV_to_C_packet_bytes) return 0 # ================================================================ #", "['width_bits'] width_bytes = 0 dimension = 1; if (width_bits == 0): width_bytes =", "necessary byte-widths for transmission and C structs # Each of the 'field' structs", "<= 64): width_bytes = 8 else: width_bytes = 1 dimension = (width_bits +", "struct containing queues and communication 'credits' for each struct type, Functions for C", "sys.stdout.write (\"ERROR: unable to import module '{:s}'\\n\".format (spec_filename)) sys.exit (1) sys.stdout.write (\"Spec file", "width_bytes = 1 elif (width_bits <= 16): width_bytes = 2 elif (width_bits <=", "is some notion of 'finding' from a path etc. spec = importlib.import_module (spec_filename)", "= importlib.import_module (spec_filename) # (\"type_specs\") except: sys.stdout.write (\"ERROR: unable to import module '{:s}'\\n\".format", "= 4 elif (width_bits <= 64): width_bytes = 8 else: width_bytes = 1", "Generate .h and .c files Gen_C (spec_filename, package_name, C_to_BSV_structs, C_to_BSV_packet_bytes, BSV_to_C_structs, BSV_to_C_packet_bytes) return", "+= width_bytes * dimension struct_spec_out = {'struct_name': struct_spec_in ['struct_name'], 'fields' : fields_out, 'size_bytes'", "os import stat import importlib import pprint from Gen_Bytevec_Mux_BSV import * from Gen_Bytevec_Mux_C", "byte-widths for packet formats and C structs.\\n\") C_to_BSV_structs = [compute_width_bytes (s) for s", "{ 'packet_len' : 1, 'num_credits' : len (C_to_BSV_structs), 'channel_id' : 1, 'payload' :", "2) or (argv [1] == \"-h\") or (argv [1] == \"--help\")): sys.stdout.write (mkHelp_text", "dimension struct_spec_out = {'struct_name': struct_spec_in ['struct_name'], 'fields' : fields_out, 'size_bytes' : size_bytes} return", "1; if (width_bits == 0): width_bytes = 0 elif (width_bits <= 8): width_bytes", "+ \" <spec_file.py>\" + ''' <spec_file.py> should be a Python source file defining", "<= 8): width_bytes = 1 elif (width_bits <= 16): width_bytes = 2 elif", "A function for the C application code to decode a received bytevec into", "and 'dimension' # and we add struct attribute 'size_bytes' for total # of", "AXI channel). Generates three output files: package_name.bsv package_name.h package_name.c The C/BSV code contains:", "to C max_BSV_to_C_struct_bytes = max ([ s ['size_bytes'] for s in BSV_to_C_structs ])", "['size_bytes'] for s in BSV_to_C_structs ]) BSV_to_C_packet_bytes = { 'packet_len' : 1, 'num_credits'", "'field_name' : 'fieldfoo', 'width_bits': width }, ... { 'field_name' : 'fieldfoo', 'width_bits': width", "0 for f in struct_spec_in ['fields']: field_name = f ['field_name'] width_bits = f", "spec file is fragile (only works if both # this Python executable and", "(field_out) size_bytes += width_bytes * dimension struct_spec_out = {'struct_name': struct_spec_in ['struct_name'], 'fields' :", "\"-h\") or (argv [1] == \"--help\")): sys.stdout.write (mkHelp_text (argv)) return 0 spec_filename =", "\"--help\")): sys.stdout.write (mkHelp_text (argv)) return 0 spec_filename = argv [1] if spec_filename.endswith (\".py\"):", "structs.\\n\") C_to_BSV_structs = [compute_width_bytes (s) for s in spec.C_to_BSV_structs] BSV_to_C_structs = [compute_width_bytes (s)", "application code to enqueue each type of send-struct into a pending queue Functions", "C application code to encode an already queued send-struct into a bytevec ready", "... { 'field_name' : 'fieldfoo', 'width_bits': width } ]} Struct names should be", "import stat import importlib import pprint from Gen_Bytevec_Mux_BSV import * from Gen_Bytevec_Mux_C import", "import os import stat import importlib import pprint from Gen_Bytevec_Mux_BSV import * from", "are represented in C as uint8_t [N] # have dimension N and width_bytes", "a packet: BSV to C max_BSV_to_C_struct_bytes = max ([ s ['size_bytes'] for s", "1 dimension = (width_bits + 7) // 8 field_out = {'field_name' : field_name,", "C to BSV max_C_to_BSV_struct_bytes = max ([ s ['size_bytes'] for s in C_to_BSV_structs", "globally unique. Field names should be unique within a struct. It is ok", "struct_spec_out, we add attributes 'width_bytes' and 'dimension' # and we add struct attribute", "64): width_bytes = 8 else: width_bytes = 1 dimension = (width_bits + 7)", "README for details # ================================================================ import sys import os import stat import importlib", "A function for the C application code to encode an already queued send-struct", "struct_spec_out # ================================================================ # For non-interactive invocations, call main() and use its return", "import * pp = pprint.PrettyPrinter() # ================================================================ def mkHelp_text (argv): return \"Usage: \"", "= { 'packet_len' : 1, 'num_credits' : len (BSV_to_C_structs), 'channel_id' : 1, 'payload'", "dimension} fields_out.append (field_out) size_bytes += width_bytes * dimension struct_spec_out = {'struct_name': struct_spec_in ['struct_name'],", "{ 'struct_name': \"Foo\", 'fields' : [ { 'field_name' : 'fieldfoo', 'width_bits': width },", "C structs # Each of the 'field' structs extends with 'width_bytes' and 'dimension'", "form: { 'struct_name': \"Foo\", 'fields' : [ { 'field_name' : 'fieldfoo', 'width_bits': width", "BSV max_C_to_BSV_struct_bytes = max ([ s ['size_bytes'] for s in C_to_BSV_structs ]) C_to_BSV_packet_bytes", "return 0 # ================================================================ # This is a struct spec -> struct spec", "''' <spec_file.py> should be a Python source file defining three variables: C_to_BSV_structs BSV_to_C_structs", "uint32_t or uint64_t, as appropriate, if width <= 64 bits, uint8_t [..] if", "package_name.c The C/BSV code contains: Struct defs for each struct, where each field", "current dir. # Study importlib examples where there is some notion of 'finding'", "# Data structure for different parts of a packet: BSV to C max_BSV_to_C_struct_bytes", "<= 64 bits, uint8_t [..] if wider A 'state' struct containing queues and", "sys.stdout.write (\"Computing all necessary byte-widths for packet formats and C structs.\\n\") C_to_BSV_structs =", "# Data structure for different parts of a packet: C to BSV max_C_to_BSV_struct_bytes", "BSV_to_C_structs package_name The first two are lists of 'struct specs', each of which", "1, 'num_credits' : len (BSV_to_C_structs), 'channel_id' : 1, 'payload' : max_C_to_BSV_struct_bytes } #", "(width_bits <= 16): width_bytes = 2 elif (width_bits <= 32): width_bytes = 4", "def main (argv = None): if ((len (argv) != 2) or (argv [1]", "dimension N and width_bytes 1 def compute_width_bytes (struct_spec_in): fields_out = [] size_bytes =", "executable and spec_filename.py are in the current dir. # Study importlib examples where", "function # In struct_spec_in, each field spec has attributes 'field_name' and 'width_bits' #", "have dimension 1 and width_bytes of 1,2,4 or 8 # Larger fields are", "Fields <= 64b wide, fit in C scalars (uint8_t/uint16_t/uint32_t/uint64_t) # have dimension 1", "== \"--help\")): sys.stdout.write (mkHelp_text (argv)) return 0 spec_filename = argv [1] if spec_filename.endswith", "# Copyright (c) 2020 <NAME> # See README for details # ================================================================ import", "for s in spec.BSV_to_C_structs] # Data structure for different parts of a packet:", "all necessary byte-widths for transmission and C structs # Each of the 'field'", "struct, where each field has type: BSV: Bit #(w) where w is the", "examples where there is some notion of 'finding' from a path etc. spec", "# Each of the 'field' structs extends with 'width_bytes' and 'dimension' sys.stdout.write (\"Computing", "file is fragile (only works if both # this Python executable and spec_filename.py", "dir. # Study importlib examples where there is some notion of 'finding' from", "(\".py\"): spec_filename = spec_filename [:-3] try: # Warning: # This dynamic import of", ": fields_out, 'size_bytes' : size_bytes} return struct_spec_out # ================================================================ # For non-interactive invocations,", "8): width_bytes = 1 elif (width_bits <= 16): width_bytes = 2 elif (width_bits", "['fields']: field_name = f ['field_name'] width_bits = f ['width_bits'] width_bytes = 0 dimension", "use its return value # as the exit code. if __name__ == '__main__':", "names should be unique within a struct. It is ok for a field-width", "(s) for s in spec.BSV_to_C_structs] # Data structure for different parts of a", "struct. It is ok for a field-width to be 0 (e.g., unused 'user'", "channel). Generates three output files: package_name.bsv package_name.h package_name.c The C/BSV code contains: Struct", "(spec_filename, package_name, C_to_BSV_structs, C_to_BSV_packet_bytes, BSV_to_C_structs, BSV_to_C_packet_bytes) # Generate .h and .c files Gen_C", "# ================================================================ import sys import os import stat import importlib import pprint from", "receive-struct from a pending queue A function for the C application code to", "'finding' from a path etc. spec = importlib.import_module (spec_filename) # (\"type_specs\") except: sys.stdout.write", "byte-widths for transmission and C structs # Each of the 'field' structs extends", "which has the following form: { 'struct_name': \"Foo\", 'fields' : [ { 'field_name'", "queue Functions for C application code to dequeue each type of receive-struct from", "fields_out.append (field_out) size_bytes += width_bytes * dimension struct_spec_out = {'struct_name': struct_spec_in ['struct_name'], 'fields'", "there is some notion of 'finding' from a path etc. spec = importlib.import_module", "contains: Struct defs for each struct, where each field has type: BSV: Bit", "struct_spec_in ['fields']: field_name = f ['field_name'] width_bits = f ['width_bits'] width_bytes = 0", "a bytevec ready for transmission A function for the C application code to", "of a packet: C to BSV max_C_to_BSV_struct_bytes = max ([ s ['size_bytes'] for", "dequeue each type of receive-struct from a pending queue A function for the", "# of bytes # Fields <= 64b wide, fit in C scalars (uint8_t/uint16_t/uint32_t/uint64_t)", "= 0 dimension = 1; if (width_bits == 0): width_bytes = 0 elif", "# Study importlib examples where there is some notion of 'finding' from a", "the .bsv file Gen_BSV (spec_filename, package_name, C_to_BSV_structs, C_to_BSV_packet_bytes, BSV_to_C_structs, BSV_to_C_packet_bytes) # Generate .h", "be a Python source file defining three variables: C_to_BSV_structs BSV_to_C_structs package_name The first", "# this Python executable and spec_filename.py are in the current dir. # Study", "(s) for s in spec.C_to_BSV_structs] BSV_to_C_structs = [compute_width_bytes (s) for s in spec.BSV_to_C_structs]", "necessary byte-widths for packet formats and C structs.\\n\") C_to_BSV_structs = [compute_width_bytes (s) for", "(argv): return \"Usage: \" + argv [0] + \" <spec_file.py>\" + ''' <spec_file.py>", "should be unique within a struct. It is ok for a field-width to", "ready for transmission A function for the C application code to decode a", "width_bytes = 1 dimension = (width_bits + 7) // 8 field_out = {'field_name'", "where each field has type: BSV: Bit #(w) where w is the specified", ": len (BSV_to_C_structs), 'channel_id' : 1, 'payload' : max_C_to_BSV_struct_bytes } # Data structure", "# ================================================================ # For non-interactive invocations, call main() and use its return value", "is a struct spec -> struct spec function # In struct_spec_in, each field", "type of send-struct into a pending queue Functions for C application code to", "each of which has the following form: { 'struct_name': \"Foo\", 'fields' : [", "bytevec ready for transmission A function for the C application code to decode", "f in struct_spec_in ['fields']: field_name = f ['field_name'] width_bits = f ['width_bits'] width_bytes", "its return value # as the exit code. if __name__ == '__main__': sys.exit", ": dimension} fields_out.append (field_out) size_bytes += width_bytes * dimension struct_spec_out = {'struct_name': struct_spec_in", "field_name = f ['field_name'] width_bits = f ['width_bits'] width_bytes = 0 dimension =", "for s in C_to_BSV_structs ]) C_to_BSV_packet_bytes = { 'packet_len' : 1, 'num_credits' :", "((len (argv) != 2) or (argv [1] == \"-h\") or (argv [1] ==", "= argv [1] if spec_filename.endswith (\".py\"): spec_filename = spec_filename [:-3] try: # Warning:", "uint16_t, uint32_t or uint64_t, as appropriate, if width <= 64 bits, uint8_t [..]", "width_bytes = 2 elif (width_bits <= 32): width_bytes = 4 elif (width_bits <=", "structure for different parts of a packet: C to BSV max_C_to_BSV_struct_bytes = max", "================================================================ # This is a struct spec -> struct spec function # In", "scalars (uint8_t/uint16_t/uint32_t/uint64_t) # have dimension 1 and width_bytes of 1,2,4 or 8 #", "This is a struct spec -> struct spec function # In struct_spec_in, each", "# (\"type_specs\") except: sys.stdout.write (\"ERROR: unable to import module '{:s}'\\n\".format (spec_filename)) sys.exit (1)", "are in the current dir. # Study importlib examples where there is some", "decode a received bytevec into a queued receive-struct ''' # ================================================================ def main", "# Compute all necessary byte-widths for transmission and C structs # Each of", "call main() and use its return value # as the exit code. if", "# This is a struct spec -> struct spec function # In struct_spec_in,", "already queued send-struct into a bytevec ready for transmission A function for the", "'channel_id' : 1, 'payload' : max_BSV_to_C_struct_bytes } # Generate the .bsv file Gen_BSV", "pending queue A function for the C application code to encode an already", "= f ['field_name'] width_bits = f ['width_bits'] width_bytes = 0 dimension = 1;", "argv [0] + \" <spec_file.py>\" + ''' <spec_file.py> should be a Python source", "(\"ERROR: unable to import module '{:s}'\\n\".format (spec_filename)) sys.exit (1) sys.stdout.write (\"Spec file imported:", "(width_bits == 0): width_bytes = 0 elif (width_bits <= 8): width_bytes = 1", "invocations, call main() and use its return value # as the exit code.", "'credits' for each struct type, Functions for C application code to enqueue each", "[:-3] try: # Warning: # This dynamic import of the spec_filename spec file", "bytes # Fields <= 64b wide, fit in C scalars (uint8_t/uint16_t/uint32_t/uint64_t) # have", "uint8_t, uint16_t, uint32_t or uint64_t, as appropriate, if width <= 64 bits, uint8_t", "file defining three variables: C_to_BSV_structs BSV_to_C_structs package_name The first two are lists of", "Larger fields are represented in C as uint8_t [N] # have dimension N", "'dimension' sys.stdout.write (\"Computing all necessary byte-widths for packet formats and C structs.\\n\") C_to_BSV_structs", "64 bits, uint8_t [..] if wider A 'state' struct containing queues and communication", "C_to_BSV_structs, C_to_BSV_packet_bytes, BSV_to_C_structs, BSV_to_C_packet_bytes) return 0 # ================================================================ # This is a struct", "BSV_to_C_structs, BSV_to_C_packet_bytes) return 0 # ================================================================ # This is a struct spec ->", "size_bytes += width_bytes * dimension struct_spec_out = {'struct_name': struct_spec_in ['struct_name'], 'fields' : fields_out,", "argv [1] if spec_filename.endswith (\".py\"): spec_filename = spec_filename [:-3] try: # Warning: #", "N and width_bytes 1 def compute_width_bytes (struct_spec_in): fields_out = [] size_bytes = 0", "0): width_bytes = 0 elif (width_bits <= 8): width_bytes = 1 elif (width_bits", "struct attribute 'size_bytes' for total # of bytes # Fields <= 64b wide,", "Gen_C (spec_filename, package_name, C_to_BSV_structs, C_to_BSV_packet_bytes, BSV_to_C_structs, BSV_to_C_packet_bytes) return 0 # ================================================================ # This", "transmission and C structs # Each of the 'field' structs extends with 'width_bytes'", "represented in C as uint8_t [N] # have dimension N and width_bytes 1", "BSV to C max_BSV_to_C_struct_bytes = max ([ s ['size_bytes'] for s in BSV_to_C_structs", "of the 'field' structs extends with 'width_bytes' and 'dimension' sys.stdout.write (\"Computing all necessary", "1, 'payload' : max_C_to_BSV_struct_bytes } # Data structure for different parts of a", "following form: { 'struct_name': \"Foo\", 'fields' : [ { 'field_name' : 'fieldfoo', 'width_bits':", "= pprint.PrettyPrinter() # ================================================================ def mkHelp_text (argv): return \"Usage: \" + argv [0]", "[0] + \" <spec_file.py>\" + ''' <spec_file.py> should be a Python source file", "parts of a packet: BSV to C max_BSV_to_C_struct_bytes = max ([ s ['size_bytes']", "and use its return value # as the exit code. if __name__ ==", "specified bit-width C: uint8_t, uint16_t, uint32_t or uint64_t, as appropriate, if width <=", "* from Gen_Bytevec_Mux_C import * pp = pprint.PrettyPrinter() # ================================================================ def mkHelp_text (argv):", "For non-interactive invocations, call main() and use its return value # as the", "It is ok for a field-width to be 0 (e.g., unused 'user' field", "size_bytes = 0 for f in struct_spec_in ['fields']: field_name = f ['field_name'] width_bits", "0 spec_filename = argv [1] if spec_filename.endswith (\".py\"): spec_filename = spec_filename [:-3] try:", "def mkHelp_text (argv): return \"Usage: \" + argv [0] + \" <spec_file.py>\" +", "# Larger fields are represented in C as uint8_t [N] # have dimension", "'{:s}'\\n\".format (spec_filename)) sys.exit (1) sys.stdout.write (\"Spec file imported: '{:s}'\\n\".format (spec_filename)) package_name = spec.package_name", "wider A 'state' struct containing queues and communication 'credits' for each struct type,", "the C application code to decode a received bytevec into a queued receive-struct", "spec_filename spec file is fragile (only works if both # this Python executable", "= (width_bits + 7) // 8 field_out = {'field_name' : field_name, 'width_bits' :", "'fieldfoo', 'width_bits': width } ]} Struct names should be globally unique. Field names", "is the specified bit-width C: uint8_t, uint16_t, uint32_t or uint64_t, as appropriate, if", "C max_BSV_to_C_struct_bytes = max ([ s ['size_bytes'] for s in BSV_to_C_structs ]) BSV_to_C_packet_bytes", "import sys import os import stat import importlib import pprint from Gen_Bytevec_Mux_BSV import", "= [compute_width_bytes (s) for s in spec.BSV_to_C_structs] # Data structure for different parts", ": field_name, 'width_bits' : width_bits, 'width_bytes': width_bytes, 'dimension' : dimension} fields_out.append (field_out) size_bytes" ]
[ "class StatisticsPrinter: countProps: dict[str, dict[str, int]] inputDir: str ytypItems: dict[str, YtypItem] def __init__(self,", "LodLevel.HD + \"|\" + LodLevel.ORPHAN_HD + ')</lodLevel>' + \\ '(?:\\\\s*<[^/].*>)*?' + \\ '\\\\s*</Item>'", "for ytyp in natsorted(list(ytypCounts.keys())): print(ytyp + \":\\t\\t\" + str(ytypCounts[ytyp])) print(\"----------------------------------------------\") print(\"total:\\t\\t\" + str(totalCount))", "from common.ymap.LodLevel import LodLevel from common.ytyp.YtypItem import YtypItem from common.ytyp.YtypParser import YtypParser class", "ytypName = self.ytypItems[archetypeName].parent else: ytypName = \"others\" # if not tree.startswith(\"prop_s_pine_\") and not", "+ \"|\" + LodLevel.ORPHAN_HD + ')</lodLevel>' + \\ '(?:\\\\s*<[^/].*>)*?' + \\ '\\\\s*</Item>' for", "str(num)) totalCount += ytypCounts[ytyp] print(\"\\t----------------------------------------------\") print(\"\\t\" + ytyp + \" total:\\t\\t\" + str(ytypCounts[ytyp])", "continue f = open(os.path.join(self.inputDir, filename), 'r') content = f.read() expression = '<Item type=\"CEntityDef\">'", "open(os.path.join(self.inputDir, filename), 'r') content = f.read() expression = '<Item type=\"CEntityDef\">' + \\ '\\\\s*<archetypeName>([^<]+)</archetypeName>'", "import LodLevel from common.ytyp.YtypItem import YtypItem from common.ytyp.YtypParser import YtypParser class StatisticsPrinter: countProps:", "total:\\t\\t\" + str(ytypCounts[ytyp]) + \"\\n\") print(\"\\nsummary:\") for ytyp in natsorted(list(ytypCounts.keys())): print(ytyp + \":\\t\\t\"", "+ LodLevel.ORPHAN_HD + ')</lodLevel>' + \\ '(?:\\\\s*<[^/].*>)*?' + \\ '\\\\s*</Item>' for match in", "not in self.countProps[ytypName]: self.countProps[ytypName][archetypeName] = 0 self.countProps[ytypName][archetypeName] += 1 totalCount = 0 ytypCounts", "totalCount += ytypCounts[ytyp] print(\"\\t----------------------------------------------\") print(\"\\t\" + ytyp + \" total:\\t\\t\" + str(ytypCounts[ytyp]) +", "= \"others\" # if not tree.startswith(\"prop_s_pine_\") and not tree.startswith(\"prop_tree_\") and not tree.startswith(\"prop_w_r_cedar_\") and", "natsorted(os.listdir(self.inputDir)): if not filename.endswith(\".ymap.xml\") or filename.endswith(\"_lod.ymap.xml\"): continue f = open(os.path.join(self.inputDir, filename), 'r') content", "= {} for ytyp in natsorted(list(self.countProps.keys())): ytypCounts[ytyp] = 0 print(ytyp + \":\") for", "self.inputDir = inputDir def run(self): self.readYtypItems() self.countProps = {} self.processFiles() def readYtypItems(self): self.ytypItems", "common.ytyp.YtypParser import YtypParser class StatisticsPrinter: countProps: dict[str, dict[str, int]] inputDir: str ytypItems: dict[str,", "import os import re from natsort import natsorted from common.ymap.LodLevel import LodLevel from", "\"resources\", \"ytyp\")) def processFiles(self): for filename in natsorted(os.listdir(self.inputDir)): if not filename.endswith(\".ymap.xml\") or filename.endswith(\"_lod.ymap.xml\"):", "num print(\"\\t\" + prop + \":\\t\\t\" + str(num)) totalCount += ytypCounts[ytyp] print(\"\\t----------------------------------------------\") print(\"\\t\"", "filename in natsorted(os.listdir(self.inputDir)): if not filename.endswith(\".ymap.xml\") or filename.endswith(\"_lod.ymap.xml\"): continue f = open(os.path.join(self.inputDir, filename),", "natsorted(list(self.countProps.keys())): ytypCounts[ytyp] = 0 print(ytyp + \":\") for prop in natsorted(list(self.countProps[ytyp])): num =", "countProps: dict[str, dict[str, int]] inputDir: str ytypItems: dict[str, YtypItem] def __init__(self, inputDir: str):", "num = self.countProps[ytyp][prop] ytypCounts[ytyp] += num print(\"\\t\" + prop + \":\\t\\t\" + str(num))", "import re from natsort import natsorted from common.ymap.LodLevel import LodLevel from common.ytyp.YtypItem import", "self.ytypItems = YtypParser.readYtypDirectory(os.path.join(os.path.dirname(__file__), \"..\", \"..\", \"resources\", \"ytyp\")) def processFiles(self): for filename in natsorted(os.listdir(self.inputDir)):", "\":\") for prop in natsorted(list(self.countProps[ytyp])): num = self.countProps[ytyp][prop] ytypCounts[ytyp] += num print(\"\\t\" +", "inputDir def run(self): self.readYtypItems() self.countProps = {} self.processFiles() def readYtypItems(self): self.ytypItems = YtypParser.readYtypDirectory(os.path.join(os.path.dirname(__file__),", "from natsort import natsorted from common.ymap.LodLevel import LodLevel from common.ytyp.YtypItem import YtypItem from", "+ \"\\n\") print(\"\\nsummary:\") for ytyp in natsorted(list(ytypCounts.keys())): print(ytyp + \":\\t\\t\" + str(ytypCounts[ytyp])) print(\"----------------------------------------------\")", "str ytypItems: dict[str, YtypItem] def __init__(self, inputDir: str): self.inputDir = inputDir def run(self):", "natsort import natsorted from common.ymap.LodLevel import LodLevel from common.ytyp.YtypItem import YtypItem from common.ytyp.YtypParser", "in natsorted(list(self.countProps[ytyp])): num = self.countProps[ytyp][prop] ytypCounts[ytyp] += num print(\"\\t\" + prop + \":\\t\\t\"", "self.countProps = {} self.processFiles() def readYtypItems(self): self.ytypItems = YtypParser.readYtypDirectory(os.path.join(os.path.dirname(__file__), \"..\", \"..\", \"resources\", \"ytyp\"))", "\"ytyp\")) def processFiles(self): for filename in natsorted(os.listdir(self.inputDir)): if not filename.endswith(\".ymap.xml\") or filename.endswith(\"_lod.ymap.xml\"): continue", "not tree.startswith(\"prop_tree_\") and not tree.startswith(\"prop_w_r_cedar_\") and not tree.startswith(\"test_tree_\"): # continue if ytypName not", "= '<Item type=\"CEntityDef\">' + \\ '\\\\s*<archetypeName>([^<]+)</archetypeName>' + \\ '(?:\\\\s*<[^/].*>)*?' + \\ '\\\\s*<lodLevel>(?:' +", "not tree.startswith(\"test_tree_\"): # continue if ytypName not in self.countProps: self.countProps[ytypName] = {} if", "if ytypName not in self.countProps: self.countProps[ytypName] = {} if archetypeName not in self.countProps[ytypName]:", "def __init__(self, inputDir: str): self.inputDir = inputDir def run(self): self.readYtypItems() self.countProps = {}", "not tree.startswith(\"prop_s_pine_\") and not tree.startswith(\"prop_tree_\") and not tree.startswith(\"prop_w_r_cedar_\") and not tree.startswith(\"test_tree_\"): # continue", "common.ytyp.YtypItem import YtypItem from common.ytyp.YtypParser import YtypParser class StatisticsPrinter: countProps: dict[str, dict[str, int]]", "expression = '<Item type=\"CEntityDef\">' + \\ '\\\\s*<archetypeName>([^<]+)</archetypeName>' + \\ '(?:\\\\s*<[^/].*>)*?' + \\ '\\\\s*<lodLevel>(?:'", "\\ '\\\\s*<lodLevel>(?:' + LodLevel.HD + \"|\" + LodLevel.ORPHAN_HD + ')</lodLevel>' + \\ '(?:\\\\s*<[^/].*>)*?'", "'(?:\\\\s*<[^/].*>)*?' + \\ '\\\\s*</Item>' for match in re.finditer(expression, content): archetypeName = match.group(1).lower() if", "def run(self): self.readYtypItems() self.countProps = {} self.processFiles() def readYtypItems(self): self.ytypItems = YtypParser.readYtypDirectory(os.path.join(os.path.dirname(__file__), \"..\",", "# if not tree.startswith(\"prop_s_pine_\") and not tree.startswith(\"prop_tree_\") and not tree.startswith(\"prop_w_r_cedar_\") and not tree.startswith(\"test_tree_\"):", "filename.endswith(\".ymap.xml\") or filename.endswith(\"_lod.ymap.xml\"): continue f = open(os.path.join(self.inputDir, filename), 'r') content = f.read() expression", "or filename.endswith(\"_lod.ymap.xml\"): continue f = open(os.path.join(self.inputDir, filename), 'r') content = f.read() expression =", "dict[str, dict[str, int]] inputDir: str ytypItems: dict[str, YtypItem] def __init__(self, inputDir: str): self.inputDir", "filename), 'r') content = f.read() expression = '<Item type=\"CEntityDef\">' + \\ '\\\\s*<archetypeName>([^<]+)</archetypeName>' +", "if archetypeName not in self.countProps[ytypName]: self.countProps[ytypName][archetypeName] = 0 self.countProps[ytypName][archetypeName] += 1 totalCount =", "+ \" total:\\t\\t\" + str(ytypCounts[ytyp]) + \"\\n\") print(\"\\nsummary:\") for ytyp in natsorted(list(ytypCounts.keys())): print(ytyp", "self.countProps[ytypName] = {} if archetypeName not in self.countProps[ytypName]: self.countProps[ytypName][archetypeName] = 0 self.countProps[ytypName][archetypeName] +=", "\" total:\\t\\t\" + str(ytypCounts[ytyp]) + \"\\n\") print(\"\\nsummary:\") for ytyp in natsorted(list(ytypCounts.keys())): print(ytyp +", "+ str(ytypCounts[ytyp]) + \"\\n\") print(\"\\nsummary:\") for ytyp in natsorted(list(ytypCounts.keys())): print(ytyp + \":\\t\\t\" +", "readYtypItems(self): self.ytypItems = YtypParser.readYtypDirectory(os.path.join(os.path.dirname(__file__), \"..\", \"..\", \"resources\", \"ytyp\")) def processFiles(self): for filename in", "and not tree.startswith(\"test_tree_\"): # continue if ytypName not in self.countProps: self.countProps[ytypName] = {}", "tree.startswith(\"test_tree_\"): # continue if ytypName not in self.countProps: self.countProps[ytypName] = {} if archetypeName", "archetypeName not in self.countProps[ytypName]: self.countProps[ytypName][archetypeName] = 0 self.countProps[ytypName][archetypeName] += 1 totalCount = 0", "= f.read() expression = '<Item type=\"CEntityDef\">' + \\ '\\\\s*<archetypeName>([^<]+)</archetypeName>' + \\ '(?:\\\\s*<[^/].*>)*?' +", "LodLevel from common.ytyp.YtypItem import YtypItem from common.ytyp.YtypParser import YtypParser class StatisticsPrinter: countProps: dict[str,", "content = f.read() expression = '<Item type=\"CEntityDef\">' + \\ '\\\\s*<archetypeName>([^<]+)</archetypeName>' + \\ '(?:\\\\s*<[^/].*>)*?'", "tree.startswith(\"prop_tree_\") and not tree.startswith(\"prop_w_r_cedar_\") and not tree.startswith(\"test_tree_\"): # continue if ytypName not in", "+= 1 totalCount = 0 ytypCounts = {} for ytyp in natsorted(list(self.countProps.keys())): ytypCounts[ytyp]", "in self.countProps[ytypName]: self.countProps[ytypName][archetypeName] = 0 self.countProps[ytypName][archetypeName] += 1 totalCount = 0 ytypCounts =", "totalCount = 0 ytypCounts = {} for ytyp in natsorted(list(self.countProps.keys())): ytypCounts[ytyp] = 0", "print(\"\\t\" + ytyp + \" total:\\t\\t\" + str(ytypCounts[ytyp]) + \"\\n\") print(\"\\nsummary:\") for ytyp", "\\ '\\\\s*</Item>' for match in re.finditer(expression, content): archetypeName = match.group(1).lower() if archetypeName in", "<reponame>Larcius/gta5-modder-utils import os import re from natsort import natsorted from common.ymap.LodLevel import LodLevel", "+ \\ '\\\\s*<lodLevel>(?:' + LodLevel.HD + \"|\" + LodLevel.ORPHAN_HD + ')</lodLevel>' + \\", "in natsorted(list(self.countProps.keys())): ytypCounts[ytyp] = 0 print(ytyp + \":\") for prop in natsorted(list(self.countProps[ytyp])): num", "= 0 print(ytyp + \":\") for prop in natsorted(list(self.countProps[ytyp])): num = self.countProps[ytyp][prop] ytypCounts[ytyp]", "if not filename.endswith(\".ymap.xml\") or filename.endswith(\"_lod.ymap.xml\"): continue f = open(os.path.join(self.inputDir, filename), 'r') content =", "int]] inputDir: str ytypItems: dict[str, YtypItem] def __init__(self, inputDir: str): self.inputDir = inputDir", "ytypCounts[ytyp] = 0 print(ytyp + \":\") for prop in natsorted(list(self.countProps[ytyp])): num = self.countProps[ytyp][prop]", "print(\"\\t----------------------------------------------\") print(\"\\t\" + ytyp + \" total:\\t\\t\" + str(ytypCounts[ytyp]) + \"\\n\") print(\"\\nsummary:\") for", "prop in natsorted(list(self.countProps[ytyp])): num = self.countProps[ytyp][prop] ytypCounts[ytyp] += num print(\"\\t\" + prop +", "processFiles(self): for filename in natsorted(os.listdir(self.inputDir)): if not filename.endswith(\".ymap.xml\") or filename.endswith(\"_lod.ymap.xml\"): continue f =", "in natsorted(os.listdir(self.inputDir)): if not filename.endswith(\".ymap.xml\") or filename.endswith(\"_lod.ymap.xml\"): continue f = open(os.path.join(self.inputDir, filename), 'r')", "inputDir: str ytypItems: dict[str, YtypItem] def __init__(self, inputDir: str): self.inputDir = inputDir def", "YtypItem] def __init__(self, inputDir: str): self.inputDir = inputDir def run(self): self.readYtypItems() self.countProps =", "+ \":\\t\\t\" + str(num)) totalCount += ytypCounts[ytyp] print(\"\\t----------------------------------------------\") print(\"\\t\" + ytyp + \"", "0 ytypCounts = {} for ytyp in natsorted(list(self.countProps.keys())): ytypCounts[ytyp] = 0 print(ytyp +", "= 0 ytypCounts = {} for ytyp in natsorted(list(self.countProps.keys())): ytypCounts[ytyp] = 0 print(ytyp", "content): archetypeName = match.group(1).lower() if archetypeName in self.ytypItems: ytypName = self.ytypItems[archetypeName].parent else: ytypName", "natsorted from common.ymap.LodLevel import LodLevel from common.ytyp.YtypItem import YtypItem from common.ytyp.YtypParser import YtypParser", "self.countProps[ytypName][archetypeName] = 0 self.countProps[ytypName][archetypeName] += 1 totalCount = 0 ytypCounts = {} for", "1 totalCount = 0 ytypCounts = {} for ytyp in natsorted(list(self.countProps.keys())): ytypCounts[ytyp] =", "ytypCounts[ytyp] print(\"\\t----------------------------------------------\") print(\"\\t\" + ytyp + \" total:\\t\\t\" + str(ytypCounts[ytyp]) + \"\\n\") print(\"\\nsummary:\")", "+ \":\") for prop in natsorted(list(self.countProps[ytyp])): num = self.countProps[ytyp][prop] ytypCounts[ytyp] += num print(\"\\t\"", "f = open(os.path.join(self.inputDir, filename), 'r') content = f.read() expression = '<Item type=\"CEntityDef\">' +", "LodLevel.ORPHAN_HD + ')</lodLevel>' + \\ '(?:\\\\s*<[^/].*>)*?' + \\ '\\\\s*</Item>' for match in re.finditer(expression,", "ytypItems: dict[str, YtypItem] def __init__(self, inputDir: str): self.inputDir = inputDir def run(self): self.readYtypItems()", "match.group(1).lower() if archetypeName in self.ytypItems: ytypName = self.ytypItems[archetypeName].parent else: ytypName = \"others\" #", "= open(os.path.join(self.inputDir, filename), 'r') content = f.read() expression = '<Item type=\"CEntityDef\">' + \\", "ytypCounts = {} for ytyp in natsorted(list(self.countProps.keys())): ytypCounts[ytyp] = 0 print(ytyp + \":\")", "{} for ytyp in natsorted(list(self.countProps.keys())): ytypCounts[ytyp] = 0 print(ytyp + \":\") for prop", "ytypCounts[ytyp] += num print(\"\\t\" + prop + \":\\t\\t\" + str(num)) totalCount += ytypCounts[ytyp]", "print(ytyp + \":\") for prop in natsorted(list(self.countProps[ytyp])): num = self.countProps[ytyp][prop] ytypCounts[ytyp] += num", "self.countProps[ytypName]: self.countProps[ytypName][archetypeName] = 0 self.countProps[ytypName][archetypeName] += 1 totalCount = 0 ytypCounts = {}", "for ytyp in natsorted(list(self.countProps.keys())): ytypCounts[ytyp] = 0 print(ytyp + \":\") for prop in", "common.ymap.LodLevel import LodLevel from common.ytyp.YtypItem import YtypItem from common.ytyp.YtypParser import YtypParser class StatisticsPrinter:", "+ \\ '\\\\s*<archetypeName>([^<]+)</archetypeName>' + \\ '(?:\\\\s*<[^/].*>)*?' + \\ '\\\\s*<lodLevel>(?:' + LodLevel.HD + \"|\"", "\\ '\\\\s*<archetypeName>([^<]+)</archetypeName>' + \\ '(?:\\\\s*<[^/].*>)*?' + \\ '\\\\s*<lodLevel>(?:' + LodLevel.HD + \"|\" +", "= {} if archetypeName not in self.countProps[ytypName]: self.countProps[ytypName][archetypeName] = 0 self.countProps[ytypName][archetypeName] += 1", "not tree.startswith(\"prop_w_r_cedar_\") and not tree.startswith(\"test_tree_\"): # continue if ytypName not in self.countProps: self.countProps[ytypName]", "for prop in natsorted(list(self.countProps[ytyp])): num = self.countProps[ytyp][prop] ytypCounts[ytyp] += num print(\"\\t\" + prop", "ytypName not in self.countProps: self.countProps[ytypName] = {} if archetypeName not in self.countProps[ytypName]: self.countProps[ytypName][archetypeName]", "dict[str, int]] inputDir: str ytypItems: dict[str, YtypItem] def __init__(self, inputDir: str): self.inputDir =", "tree.startswith(\"prop_s_pine_\") and not tree.startswith(\"prop_tree_\") and not tree.startswith(\"prop_w_r_cedar_\") and not tree.startswith(\"test_tree_\"): # continue if", "'\\\\s*<archetypeName>([^<]+)</archetypeName>' + \\ '(?:\\\\s*<[^/].*>)*?' + \\ '\\\\s*<lodLevel>(?:' + LodLevel.HD + \"|\" + LodLevel.ORPHAN_HD", "in re.finditer(expression, content): archetypeName = match.group(1).lower() if archetypeName in self.ytypItems: ytypName = self.ytypItems[archetypeName].parent", "+ prop + \":\\t\\t\" + str(num)) totalCount += ytypCounts[ytyp] print(\"\\t----------------------------------------------\") print(\"\\t\" + ytyp", "from common.ytyp.YtypParser import YtypParser class StatisticsPrinter: countProps: dict[str, dict[str, int]] inputDir: str ytypItems:", "= {} self.processFiles() def readYtypItems(self): self.ytypItems = YtypParser.readYtypDirectory(os.path.join(os.path.dirname(__file__), \"..\", \"..\", \"resources\", \"ytyp\")) def", "ytyp + \" total:\\t\\t\" + str(ytypCounts[ytyp]) + \"\\n\") print(\"\\nsummary:\") for ytyp in natsorted(list(ytypCounts.keys())):", "and not tree.startswith(\"prop_w_r_cedar_\") and not tree.startswith(\"test_tree_\"): # continue if ytypName not in self.countProps:", "filename.endswith(\"_lod.ymap.xml\"): continue f = open(os.path.join(self.inputDir, filename), 'r') content = f.read() expression = '<Item", "not in self.countProps: self.countProps[ytypName] = {} if archetypeName not in self.countProps[ytypName]: self.countProps[ytypName][archetypeName] =", "f.read() expression = '<Item type=\"CEntityDef\">' + \\ '\\\\s*<archetypeName>([^<]+)</archetypeName>' + \\ '(?:\\\\s*<[^/].*>)*?' + \\", "if not tree.startswith(\"prop_s_pine_\") and not tree.startswith(\"prop_tree_\") and not tree.startswith(\"prop_w_r_cedar_\") and not tree.startswith(\"test_tree_\"): #", "import YtypItem from common.ytyp.YtypParser import YtypParser class StatisticsPrinter: countProps: dict[str, dict[str, int]] inputDir:", "continue if ytypName not in self.countProps: self.countProps[ytypName] = {} if archetypeName not in", "match in re.finditer(expression, content): archetypeName = match.group(1).lower() if archetypeName in self.ytypItems: ytypName =", "self.readYtypItems() self.countProps = {} self.processFiles() def readYtypItems(self): self.ytypItems = YtypParser.readYtypDirectory(os.path.join(os.path.dirname(__file__), \"..\", \"..\", \"resources\",", "'<Item type=\"CEntityDef\">' + \\ '\\\\s*<archetypeName>([^<]+)</archetypeName>' + \\ '(?:\\\\s*<[^/].*>)*?' + \\ '\\\\s*<lodLevel>(?:' + LodLevel.HD", "+ LodLevel.HD + \"|\" + LodLevel.ORPHAN_HD + ')</lodLevel>' + \\ '(?:\\\\s*<[^/].*>)*?' + \\", "self.countProps: self.countProps[ytypName] = {} if archetypeName not in self.countProps[ytypName]: self.countProps[ytypName][archetypeName] = 0 self.countProps[ytypName][archetypeName]", "self.countProps[ytypName][archetypeName] += 1 totalCount = 0 ytypCounts = {} for ytyp in natsorted(list(self.countProps.keys())):", "+= ytypCounts[ytyp] print(\"\\t----------------------------------------------\") print(\"\\t\" + ytyp + \" total:\\t\\t\" + str(ytypCounts[ytyp]) + \"\\n\")", "for match in re.finditer(expression, content): archetypeName = match.group(1).lower() if archetypeName in self.ytypItems: ytypName", "prop + \":\\t\\t\" + str(num)) totalCount += ytypCounts[ytyp] print(\"\\t----------------------------------------------\") print(\"\\t\" + ytyp +", "ytyp in natsorted(list(self.countProps.keys())): ytypCounts[ytyp] = 0 print(ytyp + \":\") for prop in natsorted(list(self.countProps[ytyp])):", "__init__(self, inputDir: str): self.inputDir = inputDir def run(self): self.readYtypItems() self.countProps = {} self.processFiles()", "\":\\t\\t\" + str(num)) totalCount += ytypCounts[ytyp] print(\"\\t----------------------------------------------\") print(\"\\t\" + ytyp + \" total:\\t\\t\"", "+= num print(\"\\t\" + prop + \":\\t\\t\" + str(num)) totalCount += ytypCounts[ytyp] print(\"\\t----------------------------------------------\")", "self.ytypItems[archetypeName].parent else: ytypName = \"others\" # if not tree.startswith(\"prop_s_pine_\") and not tree.startswith(\"prop_tree_\") and", "in self.ytypItems: ytypName = self.ytypItems[archetypeName].parent else: ytypName = \"others\" # if not tree.startswith(\"prop_s_pine_\")", "YtypParser class StatisticsPrinter: countProps: dict[str, dict[str, int]] inputDir: str ytypItems: dict[str, YtypItem] def", "+ ')</lodLevel>' + \\ '(?:\\\\s*<[^/].*>)*?' + \\ '\\\\s*</Item>' for match in re.finditer(expression, content):", "type=\"CEntityDef\">' + \\ '\\\\s*<archetypeName>([^<]+)</archetypeName>' + \\ '(?:\\\\s*<[^/].*>)*?' + \\ '\\\\s*<lodLevel>(?:' + LodLevel.HD +", "inputDir: str): self.inputDir = inputDir def run(self): self.readYtypItems() self.countProps = {} self.processFiles() def", "'(?:\\\\s*<[^/].*>)*?' + \\ '\\\\s*<lodLevel>(?:' + LodLevel.HD + \"|\" + LodLevel.ORPHAN_HD + ')</lodLevel>' +", "def processFiles(self): for filename in natsorted(os.listdir(self.inputDir)): if not filename.endswith(\".ymap.xml\") or filename.endswith(\"_lod.ymap.xml\"): continue f", "str(ytypCounts[ytyp]) + \"\\n\") print(\"\\nsummary:\") for ytyp in natsorted(list(ytypCounts.keys())): print(ytyp + \":\\t\\t\" + str(ytypCounts[ytyp]))", "0 print(ytyp + \":\") for prop in natsorted(list(self.countProps[ytyp])): num = self.countProps[ytyp][prop] ytypCounts[ytyp] +=", "natsorted(list(self.countProps[ytyp])): num = self.countProps[ytyp][prop] ytypCounts[ytyp] += num print(\"\\t\" + prop + \":\\t\\t\" +", "\"..\", \"..\", \"resources\", \"ytyp\")) def processFiles(self): for filename in natsorted(os.listdir(self.inputDir)): if not filename.endswith(\".ymap.xml\")", "')</lodLevel>' + \\ '(?:\\\\s*<[^/].*>)*?' + \\ '\\\\s*</Item>' for match in re.finditer(expression, content): archetypeName", "YtypParser.readYtypDirectory(os.path.join(os.path.dirname(__file__), \"..\", \"..\", \"resources\", \"ytyp\")) def processFiles(self): for filename in natsorted(os.listdir(self.inputDir)): if not", "\"..\", \"resources\", \"ytyp\")) def processFiles(self): for filename in natsorted(os.listdir(self.inputDir)): if not filename.endswith(\".ymap.xml\") or", "\"others\" # if not tree.startswith(\"prop_s_pine_\") and not tree.startswith(\"prop_tree_\") and not tree.startswith(\"prop_w_r_cedar_\") and not", "self.processFiles() def readYtypItems(self): self.ytypItems = YtypParser.readYtypDirectory(os.path.join(os.path.dirname(__file__), \"..\", \"..\", \"resources\", \"ytyp\")) def processFiles(self): for", "dict[str, YtypItem] def __init__(self, inputDir: str): self.inputDir = inputDir def run(self): self.readYtypItems() self.countProps", "\\ '(?:\\\\s*<[^/].*>)*?' + \\ '\\\\s*<lodLevel>(?:' + LodLevel.HD + \"|\" + LodLevel.ORPHAN_HD + ')</lodLevel>'", "= 0 self.countProps[ytypName][archetypeName] += 1 totalCount = 0 ytypCounts = {} for ytyp", "+ \\ '\\\\s*</Item>' for match in re.finditer(expression, content): archetypeName = match.group(1).lower() if archetypeName", "self.ytypItems: ytypName = self.ytypItems[archetypeName].parent else: ytypName = \"others\" # if not tree.startswith(\"prop_s_pine_\") and", "print(\"\\nsummary:\") for ytyp in natsorted(list(ytypCounts.keys())): print(ytyp + \":\\t\\t\" + str(ytypCounts[ytyp])) print(\"----------------------------------------------\") print(\"total:\\t\\t\" +", "+ str(num)) totalCount += ytypCounts[ytyp] print(\"\\t----------------------------------------------\") print(\"\\t\" + ytyp + \" total:\\t\\t\" +", "YtypItem from common.ytyp.YtypParser import YtypParser class StatisticsPrinter: countProps: dict[str, dict[str, int]] inputDir: str", "for filename in natsorted(os.listdir(self.inputDir)): if not filename.endswith(\".ymap.xml\") or filename.endswith(\"_lod.ymap.xml\"): continue f = open(os.path.join(self.inputDir,", "run(self): self.readYtypItems() self.countProps = {} self.processFiles() def readYtypItems(self): self.ytypItems = YtypParser.readYtypDirectory(os.path.join(os.path.dirname(__file__), \"..\", \"..\",", "archetypeName = match.group(1).lower() if archetypeName in self.ytypItems: ytypName = self.ytypItems[archetypeName].parent else: ytypName =", "from common.ytyp.YtypItem import YtypItem from common.ytyp.YtypParser import YtypParser class StatisticsPrinter: countProps: dict[str, dict[str,", "+ ytyp + \" total:\\t\\t\" + str(ytypCounts[ytyp]) + \"\\n\") print(\"\\nsummary:\") for ytyp in", "+ \\ '(?:\\\\s*<[^/].*>)*?' + \\ '\\\\s*<lodLevel>(?:' + LodLevel.HD + \"|\" + LodLevel.ORPHAN_HD +", "'r') content = f.read() expression = '<Item type=\"CEntityDef\">' + \\ '\\\\s*<archetypeName>([^<]+)</archetypeName>' + \\", "= inputDir def run(self): self.readYtypItems() self.countProps = {} self.processFiles() def readYtypItems(self): self.ytypItems =", "# continue if ytypName not in self.countProps: self.countProps[ytypName] = {} if archetypeName not", "\"|\" + LodLevel.ORPHAN_HD + ')</lodLevel>' + \\ '(?:\\\\s*<[^/].*>)*?' + \\ '\\\\s*</Item>' for match", "ytypName = \"others\" # if not tree.startswith(\"prop_s_pine_\") and not tree.startswith(\"prop_tree_\") and not tree.startswith(\"prop_w_r_cedar_\")", "os import re from natsort import natsorted from common.ymap.LodLevel import LodLevel from common.ytyp.YtypItem", "= match.group(1).lower() if archetypeName in self.ytypItems: ytypName = self.ytypItems[archetypeName].parent else: ytypName = \"others\"", "import natsorted from common.ymap.LodLevel import LodLevel from common.ytyp.YtypItem import YtypItem from common.ytyp.YtypParser import", "in self.countProps: self.countProps[ytypName] = {} if archetypeName not in self.countProps[ytypName]: self.countProps[ytypName][archetypeName] = 0", "{} if archetypeName not in self.countProps[ytypName]: self.countProps[ytypName][archetypeName] = 0 self.countProps[ytypName][archetypeName] += 1 totalCount", "'\\\\s*<lodLevel>(?:' + LodLevel.HD + \"|\" + LodLevel.ORPHAN_HD + ')</lodLevel>' + \\ '(?:\\\\s*<[^/].*>)*?' +", "= YtypParser.readYtypDirectory(os.path.join(os.path.dirname(__file__), \"..\", \"..\", \"resources\", \"ytyp\")) def processFiles(self): for filename in natsorted(os.listdir(self.inputDir)): if", "import YtypParser class StatisticsPrinter: countProps: dict[str, dict[str, int]] inputDir: str ytypItems: dict[str, YtypItem]", "+ \\ '(?:\\\\s*<[^/].*>)*?' + \\ '\\\\s*</Item>' for match in re.finditer(expression, content): archetypeName =", "self.countProps[ytyp][prop] ytypCounts[ytyp] += num print(\"\\t\" + prop + \":\\t\\t\" + str(num)) totalCount +=", "tree.startswith(\"prop_w_r_cedar_\") and not tree.startswith(\"test_tree_\"): # continue if ytypName not in self.countProps: self.countProps[ytypName] =", "re from natsort import natsorted from common.ymap.LodLevel import LodLevel from common.ytyp.YtypItem import YtypItem", "def readYtypItems(self): self.ytypItems = YtypParser.readYtypDirectory(os.path.join(os.path.dirname(__file__), \"..\", \"..\", \"resources\", \"ytyp\")) def processFiles(self): for filename", "\"\\n\") print(\"\\nsummary:\") for ytyp in natsorted(list(ytypCounts.keys())): print(ytyp + \":\\t\\t\" + str(ytypCounts[ytyp])) print(\"----------------------------------------------\") print(\"total:\\t\\t\"", "\\ '(?:\\\\s*<[^/].*>)*?' + \\ '\\\\s*</Item>' for match in re.finditer(expression, content): archetypeName = match.group(1).lower()", "StatisticsPrinter: countProps: dict[str, dict[str, int]] inputDir: str ytypItems: dict[str, YtypItem] def __init__(self, inputDir:", "re.finditer(expression, content): archetypeName = match.group(1).lower() if archetypeName in self.ytypItems: ytypName = self.ytypItems[archetypeName].parent else:", "and not tree.startswith(\"prop_tree_\") and not tree.startswith(\"prop_w_r_cedar_\") and not tree.startswith(\"test_tree_\"): # continue if ytypName", "not filename.endswith(\".ymap.xml\") or filename.endswith(\"_lod.ymap.xml\"): continue f = open(os.path.join(self.inputDir, filename), 'r') content = f.read()", "= self.countProps[ytyp][prop] ytypCounts[ytyp] += num print(\"\\t\" + prop + \":\\t\\t\" + str(num)) totalCount", "archetypeName in self.ytypItems: ytypName = self.ytypItems[archetypeName].parent else: ytypName = \"others\" # if not", "if archetypeName in self.ytypItems: ytypName = self.ytypItems[archetypeName].parent else: ytypName = \"others\" # if", "print(\"\\t\" + prop + \":\\t\\t\" + str(num)) totalCount += ytypCounts[ytyp] print(\"\\t----------------------------------------------\") print(\"\\t\" +", "{} self.processFiles() def readYtypItems(self): self.ytypItems = YtypParser.readYtypDirectory(os.path.join(os.path.dirname(__file__), \"..\", \"..\", \"resources\", \"ytyp\")) def processFiles(self):", "= self.ytypItems[archetypeName].parent else: ytypName = \"others\" # if not tree.startswith(\"prop_s_pine_\") and not tree.startswith(\"prop_tree_\")", "else: ytypName = \"others\" # if not tree.startswith(\"prop_s_pine_\") and not tree.startswith(\"prop_tree_\") and not", "0 self.countProps[ytypName][archetypeName] += 1 totalCount = 0 ytypCounts = {} for ytyp in", "str): self.inputDir = inputDir def run(self): self.readYtypItems() self.countProps = {} self.processFiles() def readYtypItems(self):", "'\\\\s*</Item>' for match in re.finditer(expression, content): archetypeName = match.group(1).lower() if archetypeName in self.ytypItems:" ]
[ "<gh_stars>0 import os import numpy as np import scipy.io as sio def get_dataset(opt):", "import os import numpy as np import scipy.io as sio def get_dataset(opt): if", "as np import scipy.io as sio def get_dataset(opt): if opt.dataset == 'IP': mat_contents", "sio.loadmat(os.path.join(opt.data_root, 'Indian_pines_corrected.mat')) data = mat_contents['indian_pines_corrected'].astype(np.float32) data /= np.max(np.abs(data)) mat_contents = sio.loadmat(os.path.join(opt.data_root, 'Indian_pines_gt.mat')) labels", "if opt.dataset == 'IP': mat_contents = sio.loadmat(os.path.join(opt.data_root, 'Indian_pines_corrected.mat')) data = mat_contents['indian_pines_corrected'].astype(np.float32) data /=", "os import numpy as np import scipy.io as sio def get_dataset(opt): if opt.dataset", "mat_contents['indian_pines_corrected'].astype(np.float32) data /= np.max(np.abs(data)) mat_contents = sio.loadmat(os.path.join(opt.data_root, 'Indian_pines_gt.mat')) labels = mat_contents['indian_pines_gt'] else: raise", "def get_dataset(opt): if opt.dataset == 'IP': mat_contents = sio.loadmat(os.path.join(opt.data_root, 'Indian_pines_corrected.mat')) data = mat_contents['indian_pines_corrected'].astype(np.float32)", "import scipy.io as sio def get_dataset(opt): if opt.dataset == 'IP': mat_contents = sio.loadmat(os.path.join(opt.data_root,", "as sio def get_dataset(opt): if opt.dataset == 'IP': mat_contents = sio.loadmat(os.path.join(opt.data_root, 'Indian_pines_corrected.mat')) data", "= sio.loadmat(os.path.join(opt.data_root, 'Indian_pines_corrected.mat')) data = mat_contents['indian_pines_corrected'].astype(np.float32) data /= np.max(np.abs(data)) mat_contents = sio.loadmat(os.path.join(opt.data_root, 'Indian_pines_gt.mat'))", "= mat_contents['indian_pines_corrected'].astype(np.float32) data /= np.max(np.abs(data)) mat_contents = sio.loadmat(os.path.join(opt.data_root, 'Indian_pines_gt.mat')) labels = mat_contents['indian_pines_gt'] else:", "mat_contents = sio.loadmat(os.path.join(opt.data_root, 'Indian_pines_gt.mat')) labels = mat_contents['indian_pines_gt'] else: raise NotImplementedError('dataset: %s' % opt.dataset)", "get_dataset(opt): if opt.dataset == 'IP': mat_contents = sio.loadmat(os.path.join(opt.data_root, 'Indian_pines_corrected.mat')) data = mat_contents['indian_pines_corrected'].astype(np.float32) data", "scipy.io as sio def get_dataset(opt): if opt.dataset == 'IP': mat_contents = sio.loadmat(os.path.join(opt.data_root, 'Indian_pines_corrected.mat'))", "data /= np.max(np.abs(data)) mat_contents = sio.loadmat(os.path.join(opt.data_root, 'Indian_pines_gt.mat')) labels = mat_contents['indian_pines_gt'] else: raise NotImplementedError('dataset:", "mat_contents = sio.loadmat(os.path.join(opt.data_root, 'Indian_pines_corrected.mat')) data = mat_contents['indian_pines_corrected'].astype(np.float32) data /= np.max(np.abs(data)) mat_contents = sio.loadmat(os.path.join(opt.data_root,", "sio def get_dataset(opt): if opt.dataset == 'IP': mat_contents = sio.loadmat(os.path.join(opt.data_root, 'Indian_pines_corrected.mat')) data =", "opt.dataset == 'IP': mat_contents = sio.loadmat(os.path.join(opt.data_root, 'Indian_pines_corrected.mat')) data = mat_contents['indian_pines_corrected'].astype(np.float32) data /= np.max(np.abs(data))", "/= np.max(np.abs(data)) mat_contents = sio.loadmat(os.path.join(opt.data_root, 'Indian_pines_gt.mat')) labels = mat_contents['indian_pines_gt'] else: raise NotImplementedError('dataset: %s'", "sio.loadmat(os.path.join(opt.data_root, 'Indian_pines_gt.mat')) labels = mat_contents['indian_pines_gt'] else: raise NotImplementedError('dataset: %s' % opt.dataset) return data,", "'Indian_pines_corrected.mat')) data = mat_contents['indian_pines_corrected'].astype(np.float32) data /= np.max(np.abs(data)) mat_contents = sio.loadmat(os.path.join(opt.data_root, 'Indian_pines_gt.mat')) labels =", "np.max(np.abs(data)) mat_contents = sio.loadmat(os.path.join(opt.data_root, 'Indian_pines_gt.mat')) labels = mat_contents['indian_pines_gt'] else: raise NotImplementedError('dataset: %s' %", "'Indian_pines_gt.mat')) labels = mat_contents['indian_pines_gt'] else: raise NotImplementedError('dataset: %s' % opt.dataset) return data, labels", "== 'IP': mat_contents = sio.loadmat(os.path.join(opt.data_root, 'Indian_pines_corrected.mat')) data = mat_contents['indian_pines_corrected'].astype(np.float32) data /= np.max(np.abs(data)) mat_contents", "numpy as np import scipy.io as sio def get_dataset(opt): if opt.dataset == 'IP':", "import numpy as np import scipy.io as sio def get_dataset(opt): if opt.dataset ==", "= sio.loadmat(os.path.join(opt.data_root, 'Indian_pines_gt.mat')) labels = mat_contents['indian_pines_gt'] else: raise NotImplementedError('dataset: %s' % opt.dataset) return", "'IP': mat_contents = sio.loadmat(os.path.join(opt.data_root, 'Indian_pines_corrected.mat')) data = mat_contents['indian_pines_corrected'].astype(np.float32) data /= np.max(np.abs(data)) mat_contents =", "data = mat_contents['indian_pines_corrected'].astype(np.float32) data /= np.max(np.abs(data)) mat_contents = sio.loadmat(os.path.join(opt.data_root, 'Indian_pines_gt.mat')) labels = mat_contents['indian_pines_gt']", "np import scipy.io as sio def get_dataset(opt): if opt.dataset == 'IP': mat_contents =" ]
[ "# CallFlow Project Developers. See the top-level LICENSE file for details. # #", "self.entire_df = state.new_entire_gf.df self.name = name self.entry_funcs = {} self.result = self.run() def", "idx, func in enumerate(func_in_module): ret.append( { \"name\": func, \"time (inc)\": self.df.loc[self.df[\"name\"] == func][", "Livermore National Security, LLC and other # CallFlow Project Developers. See the top-level", "pandas as pd class RankHistogram: def __init__(self, state, name): self.graph = state.new_gf.graph self.df", "ret.append( { \"name\": func, \"time (inc)\": self.df.loc[self.df[\"name\"] == func][ \"time (inc)\" ].tolist(), \"time\":", "class RankHistogram: def __init__(self, state, name): self.graph = state.new_gf.graph self.df = state.new_gf.df self.entire_df", "[] module = self.name.split(\"=\")[0] func_in_module = self.df[self.df.module == module][\"name\"].unique().tolist() for idx, func in", "top-level LICENSE file for details. # # SPDX-License-Identifier: MIT import pandas as pd", "Project Developers. See the top-level LICENSE file for details. # # SPDX-License-Identifier: MIT", "and other # CallFlow Project Developers. See the top-level LICENSE file for details.", "= name self.entry_funcs = {} self.result = self.run() def run(self): ret = []", "func][\"time\"].tolist(), \"rank\": self.df.loc[self.df[\"name\"] == func][\"rank\"].tolist(), \"dataset\": self.df.loc[self.df[\"name\"] == func][\"dataset\"].tolist(), } ) ret_df =", "== func][\"rank\"].tolist(), \"dataset\": self.df.loc[self.df[\"name\"] == func][\"dataset\"].tolist(), } ) ret_df = pd.DataFrame(ret) return ret_df.to_json(orient=\"columns\")", "= state.new_entire_gf.df self.name = name self.entry_funcs = {} self.result = self.run() def run(self):", "self.df[self.df.module == module][\"name\"].unique().tolist() for idx, func in enumerate(func_in_module): ret.append( { \"name\": func, \"time", "= self.name.split(\"=\")[0] func_in_module = self.df[self.df.module == module][\"name\"].unique().tolist() for idx, func in enumerate(func_in_module): ret.append(", "2017-2020 Lawrence Livermore National Security, LLC and other # CallFlow Project Developers. See", "# Copyright 2017-2020 Lawrence Livermore National Security, LLC and other # CallFlow Project", "].tolist(), \"time\": self.df.loc[self.df[\"name\"] == func][\"time\"].tolist(), \"rank\": self.df.loc[self.df[\"name\"] == func][\"rank\"].tolist(), \"dataset\": self.df.loc[self.df[\"name\"] == func][\"dataset\"].tolist(),", "\"rank\": self.df.loc[self.df[\"name\"] == func][\"rank\"].tolist(), \"dataset\": self.df.loc[self.df[\"name\"] == func][\"dataset\"].tolist(), } ) ret_df = pd.DataFrame(ret)", "state, name): self.graph = state.new_gf.graph self.df = state.new_gf.df self.entire_df = state.new_entire_gf.df self.name =", "CallFlow Project Developers. See the top-level LICENSE file for details. # # SPDX-License-Identifier:", "SPDX-License-Identifier: MIT import pandas as pd class RankHistogram: def __init__(self, state, name): self.graph", "state.new_entire_gf.df self.name = name self.entry_funcs = {} self.result = self.run() def run(self): ret", "Lawrence Livermore National Security, LLC and other # CallFlow Project Developers. See the", "Copyright 2017-2020 Lawrence Livermore National Security, LLC and other # CallFlow Project Developers.", "MIT import pandas as pd class RankHistogram: def __init__(self, state, name): self.graph =", "\"name\": func, \"time (inc)\": self.df.loc[self.df[\"name\"] == func][ \"time (inc)\" ].tolist(), \"time\": self.df.loc[self.df[\"name\"] ==", "func_in_module = self.df[self.df.module == module][\"name\"].unique().tolist() for idx, func in enumerate(func_in_module): ret.append( { \"name\":", "= state.new_gf.df self.entire_df = state.new_entire_gf.df self.name = name self.entry_funcs = {} self.result =", "= [] module = self.name.split(\"=\")[0] func_in_module = self.df[self.df.module == module][\"name\"].unique().tolist() for idx, func", "= {} self.result = self.run() def run(self): ret = [] module = self.name.split(\"=\")[0]", "self.df = state.new_gf.df self.entire_df = state.new_entire_gf.df self.name = name self.entry_funcs = {} self.result", "{} self.result = self.run() def run(self): ret = [] module = self.name.split(\"=\")[0] func_in_module", "import pandas as pd class RankHistogram: def __init__(self, state, name): self.graph = state.new_gf.graph", "self.df.loc[self.df[\"name\"] == func][\"rank\"].tolist(), \"dataset\": self.df.loc[self.df[\"name\"] == func][\"dataset\"].tolist(), } ) ret_df = pd.DataFrame(ret) return", "name): self.graph = state.new_gf.graph self.df = state.new_gf.df self.entire_df = state.new_entire_gf.df self.name = name", "self.result = self.run() def run(self): ret = [] module = self.name.split(\"=\")[0] func_in_module =", "= self.run() def run(self): ret = [] module = self.name.split(\"=\")[0] func_in_module = self.df[self.df.module", "def __init__(self, state, name): self.graph = state.new_gf.graph self.df = state.new_gf.df self.entire_df = state.new_entire_gf.df", "name self.entry_funcs = {} self.result = self.run() def run(self): ret = [] module", "self.entry_funcs = {} self.result = self.run() def run(self): ret = [] module =", "== module][\"name\"].unique().tolist() for idx, func in enumerate(func_in_module): ret.append( { \"name\": func, \"time (inc)\":", "Security, LLC and other # CallFlow Project Developers. See the top-level LICENSE file", "# SPDX-License-Identifier: MIT import pandas as pd class RankHistogram: def __init__(self, state, name):", "for idx, func in enumerate(func_in_module): ret.append( { \"name\": func, \"time (inc)\": self.df.loc[self.df[\"name\"] ==", "other # CallFlow Project Developers. See the top-level LICENSE file for details. #", "LICENSE file for details. # # SPDX-License-Identifier: MIT import pandas as pd class", "func in enumerate(func_in_module): ret.append( { \"name\": func, \"time (inc)\": self.df.loc[self.df[\"name\"] == func][ \"time", "(inc)\" ].tolist(), \"time\": self.df.loc[self.df[\"name\"] == func][\"time\"].tolist(), \"rank\": self.df.loc[self.df[\"name\"] == func][\"rank\"].tolist(), \"dataset\": self.df.loc[self.df[\"name\"] ==", "state.new_gf.graph self.df = state.new_gf.df self.entire_df = state.new_entire_gf.df self.name = name self.entry_funcs = {}", "self.df.loc[self.df[\"name\"] == func][\"time\"].tolist(), \"rank\": self.df.loc[self.df[\"name\"] == func][\"rank\"].tolist(), \"dataset\": self.df.loc[self.df[\"name\"] == func][\"dataset\"].tolist(), } )", "= self.df[self.df.module == module][\"name\"].unique().tolist() for idx, func in enumerate(func_in_module): ret.append( { \"name\": func,", "National Security, LLC and other # CallFlow Project Developers. See the top-level LICENSE", "run(self): ret = [] module = self.name.split(\"=\")[0] func_in_module = self.df[self.df.module == module][\"name\"].unique().tolist() for", "\"time (inc)\": self.df.loc[self.df[\"name\"] == func][ \"time (inc)\" ].tolist(), \"time\": self.df.loc[self.df[\"name\"] == func][\"time\"].tolist(), \"rank\":", "# # SPDX-License-Identifier: MIT import pandas as pd class RankHistogram: def __init__(self, state,", "= state.new_gf.graph self.df = state.new_gf.df self.entire_df = state.new_entire_gf.df self.name = name self.entry_funcs =", "func][ \"time (inc)\" ].tolist(), \"time\": self.df.loc[self.df[\"name\"] == func][\"time\"].tolist(), \"rank\": self.df.loc[self.df[\"name\"] == func][\"rank\"].tolist(), \"dataset\":", "Developers. See the top-level LICENSE file for details. # # SPDX-License-Identifier: MIT import", "self.name = name self.entry_funcs = {} self.result = self.run() def run(self): ret =", "RankHistogram: def __init__(self, state, name): self.graph = state.new_gf.graph self.df = state.new_gf.df self.entire_df =", "enumerate(func_in_module): ret.append( { \"name\": func, \"time (inc)\": self.df.loc[self.df[\"name\"] == func][ \"time (inc)\" ].tolist(),", "module = self.name.split(\"=\")[0] func_in_module = self.df[self.df.module == module][\"name\"].unique().tolist() for idx, func in enumerate(func_in_module):", "{ \"name\": func, \"time (inc)\": self.df.loc[self.df[\"name\"] == func][ \"time (inc)\" ].tolist(), \"time\": self.df.loc[self.df[\"name\"]", "for details. # # SPDX-License-Identifier: MIT import pandas as pd class RankHistogram: def", "== func][ \"time (inc)\" ].tolist(), \"time\": self.df.loc[self.df[\"name\"] == func][\"time\"].tolist(), \"rank\": self.df.loc[self.df[\"name\"] == func][\"rank\"].tolist(),", "func, \"time (inc)\": self.df.loc[self.df[\"name\"] == func][ \"time (inc)\" ].tolist(), \"time\": self.df.loc[self.df[\"name\"] == func][\"time\"].tolist(),", "as pd class RankHistogram: def __init__(self, state, name): self.graph = state.new_gf.graph self.df =", "the top-level LICENSE file for details. # # SPDX-License-Identifier: MIT import pandas as", "self.name.split(\"=\")[0] func_in_module = self.df[self.df.module == module][\"name\"].unique().tolist() for idx, func in enumerate(func_in_module): ret.append( {", "(inc)\": self.df.loc[self.df[\"name\"] == func][ \"time (inc)\" ].tolist(), \"time\": self.df.loc[self.df[\"name\"] == func][\"time\"].tolist(), \"rank\": self.df.loc[self.df[\"name\"]", "LLC and other # CallFlow Project Developers. See the top-level LICENSE file for", "details. # # SPDX-License-Identifier: MIT import pandas as pd class RankHistogram: def __init__(self,", "__init__(self, state, name): self.graph = state.new_gf.graph self.df = state.new_gf.df self.entire_df = state.new_entire_gf.df self.name", "file for details. # # SPDX-License-Identifier: MIT import pandas as pd class RankHistogram:", "self.graph = state.new_gf.graph self.df = state.new_gf.df self.entire_df = state.new_entire_gf.df self.name = name self.entry_funcs", "== func][\"time\"].tolist(), \"rank\": self.df.loc[self.df[\"name\"] == func][\"rank\"].tolist(), \"dataset\": self.df.loc[self.df[\"name\"] == func][\"dataset\"].tolist(), } ) ret_df", "\"time\": self.df.loc[self.df[\"name\"] == func][\"time\"].tolist(), \"rank\": self.df.loc[self.df[\"name\"] == func][\"rank\"].tolist(), \"dataset\": self.df.loc[self.df[\"name\"] == func][\"dataset\"].tolist(), }", "in enumerate(func_in_module): ret.append( { \"name\": func, \"time (inc)\": self.df.loc[self.df[\"name\"] == func][ \"time (inc)\"", "self.run() def run(self): ret = [] module = self.name.split(\"=\")[0] func_in_module = self.df[self.df.module ==", "pd class RankHistogram: def __init__(self, state, name): self.graph = state.new_gf.graph self.df = state.new_gf.df", "self.df.loc[self.df[\"name\"] == func][ \"time (inc)\" ].tolist(), \"time\": self.df.loc[self.df[\"name\"] == func][\"time\"].tolist(), \"rank\": self.df.loc[self.df[\"name\"] ==", "state.new_gf.df self.entire_df = state.new_entire_gf.df self.name = name self.entry_funcs = {} self.result = self.run()", "ret = [] module = self.name.split(\"=\")[0] func_in_module = self.df[self.df.module == module][\"name\"].unique().tolist() for idx,", "module][\"name\"].unique().tolist() for idx, func in enumerate(func_in_module): ret.append( { \"name\": func, \"time (inc)\": self.df.loc[self.df[\"name\"]", "\"time (inc)\" ].tolist(), \"time\": self.df.loc[self.df[\"name\"] == func][\"time\"].tolist(), \"rank\": self.df.loc[self.df[\"name\"] == func][\"rank\"].tolist(), \"dataset\": self.df.loc[self.df[\"name\"]", "def run(self): ret = [] module = self.name.split(\"=\")[0] func_in_module = self.df[self.df.module == module][\"name\"].unique().tolist()", "See the top-level LICENSE file for details. # # SPDX-License-Identifier: MIT import pandas" ]
[ "returns a map contents of all test cases. Parameters ---------- dir : str", "dict of contents of all test cases. \"\"\" path = os.path.join(dir, file_name) test_cases_files", "number = int(file.readline().strip()) case = list() for i in range(number): case.append(file.readline().strip()) file.close() return", "dict() for file_name in test_cases_files.readlines(): case_name = file_name.strip().split(\".\")[0] file_path = os.path.join(dir, file_name.strip()) test_cases[case_name]", "str directory of the files to load. file_name : str the name of", "all test cases. \"\"\" path = os.path.join(dir, file_name) test_cases_files = open(path, \"r\") test_cases", "file_name) test_cases_files = open(path, \"r\") test_cases = dict() for file_name in test_cases_files.readlines(): case_name", "test case files name to read. Returns ------- dict a dict of contents", "case def load_test_cases(dir, file_name): \"\"\" loads one test case from file. returns a", "test cases. Parameters ---------- dir : str directory of the files to load.", "test case from file. returns a map contents of all test cases. Parameters", "file. returns a map contents of all test cases. Parameters ---------- dir :", "that contains all test case files name to read. Returns ------- dict a", "\"\"\" reads one test case from file. returns contents of test case Parameters", "all test case files name to read. Returns ------- dict a dict of", "the test case file to read. Returns ------- list a list of contents", "in test_cases_files.readlines(): case_name = file_name.strip().split(\".\")[0] file_path = os.path.join(dir, file_name.strip()) test_cases[case_name] = read_test_case(file_path) test_cases_files.close()", ": str the path of the test case file to read. Returns -------", "the test case. \"\"\" file = open(file_path, \"r\") number = int(file.readline().strip()) case =", "contents of the test case. \"\"\" file = open(file_path, \"r\") number = int(file.readline().strip())", "to read. Returns ------- list a list of contents of the test case.", "case files name to read. Returns ------- dict a dict of contents of", "open(path, \"r\") test_cases = dict() for file_name in test_cases_files.readlines(): case_name = file_name.strip().split(\".\")[0] file_path", "test_cases_files.readlines(): case_name = file_name.strip().split(\".\")[0] file_path = os.path.join(dir, file_name.strip()) test_cases[case_name] = read_test_case(file_path) test_cases_files.close() return", "test case. \"\"\" file = open(file_path, \"r\") number = int(file.readline().strip()) case = list()", "\"r\") number = int(file.readline().strip()) case = list() for i in range(number): case.append(file.readline().strip()) file.close()", "name to read. Returns ------- dict a dict of contents of all test", "case from file. returns a map contents of all test cases. Parameters ----------", "\"r\") test_cases = dict() for file_name in test_cases_files.readlines(): case_name = file_name.strip().split(\".\")[0] file_path =", "case Parameters ---------- file_path : str the path of the test case file", "load. file_name : str the name of the file that contains all test", "file_name : str the name of the file that contains all test case", "from file. returns contents of test case Parameters ---------- file_path : str the", "of the files to load. file_name : str the name of the file", "------- dict a dict of contents of all test cases. \"\"\" path =", "contents of all test cases. \"\"\" path = os.path.join(dir, file_name) test_cases_files = open(path,", "test_cases_files = open(path, \"r\") test_cases = dict() for file_name in test_cases_files.readlines(): case_name =", "read. Returns ------- list a list of contents of the test case. \"\"\"", "return case def load_test_cases(dir, file_name): \"\"\" loads one test case from file. returns", "import os def read_test_case(file_path): \"\"\" reads one test case from file. returns contents", "returns contents of test case Parameters ---------- file_path : str the path of", "in range(number): case.append(file.readline().strip()) file.close() return case def load_test_cases(dir, file_name): \"\"\" loads one test", "for file_name in test_cases_files.readlines(): case_name = file_name.strip().split(\".\")[0] file_path = os.path.join(dir, file_name.strip()) test_cases[case_name] =", "file.close() return case def load_test_cases(dir, file_name): \"\"\" loads one test case from file.", "= dict() for file_name in test_cases_files.readlines(): case_name = file_name.strip().split(\".\")[0] file_path = os.path.join(dir, file_name.strip())", "contents of all test cases. Parameters ---------- dir : str directory of the", "of all test cases. Parameters ---------- dir : str directory of the files", "for i in range(number): case.append(file.readline().strip()) file.close() return case def load_test_cases(dir, file_name): \"\"\" loads", "the path of the test case file to read. Returns ------- list a", "of the file that contains all test case files name to read. Returns", "of the test case file to read. Returns ------- list a list of", ": str directory of the files to load. file_name : str the name", "str the name of the file that contains all test case files name", "of contents of all test cases. \"\"\" path = os.path.join(dir, file_name) test_cases_files =", "path of the test case file to read. Returns ------- list a list", "of all test cases. \"\"\" path = os.path.join(dir, file_name) test_cases_files = open(path, \"r\")", "os def read_test_case(file_path): \"\"\" reads one test case from file. returns contents of", "of contents of the test case. \"\"\" file = open(file_path, \"r\") number =", "i in range(number): case.append(file.readline().strip()) file.close() return case def load_test_cases(dir, file_name): \"\"\" loads one", "one test case from file. returns contents of test case Parameters ---------- file_path", "= open(path, \"r\") test_cases = dict() for file_name in test_cases_files.readlines(): case_name = file_name.strip().split(\".\")[0]", "file that contains all test case files name to read. Returns ------- dict", "test case file to read. Returns ------- list a list of contents of", "read. Returns ------- dict a dict of contents of all test cases. \"\"\"", "range(number): case.append(file.readline().strip()) file.close() return case def load_test_cases(dir, file_name): \"\"\" loads one test case", "Parameters ---------- file_path : str the path of the test case file to", "one test case from file. returns a map contents of all test cases.", "Returns ------- dict a dict of contents of all test cases. \"\"\" path", "file_path : str the path of the test case file to read. Returns", "test cases. \"\"\" path = os.path.join(dir, file_name) test_cases_files = open(path, \"r\") test_cases =", "def read_test_case(file_path): \"\"\" reads one test case from file. returns contents of test", "a dict of contents of all test cases. \"\"\" path = os.path.join(dir, file_name)", "read_test_case(file_path): \"\"\" reads one test case from file. returns contents of test case", "case = list() for i in range(number): case.append(file.readline().strip()) file.close() return case def load_test_cases(dir,", "---------- file_path : str the path of the test case file to read.", "Returns ------- list a list of contents of the test case. \"\"\" file", "file_name in test_cases_files.readlines(): case_name = file_name.strip().split(\".\")[0] file_path = os.path.join(dir, file_name.strip()) test_cases[case_name] = read_test_case(file_path)", "------- list a list of contents of the test case. \"\"\" file =", "file = open(file_path, \"r\") number = int(file.readline().strip()) case = list() for i in", "to load. file_name : str the name of the file that contains all", "---------- dir : str directory of the files to load. file_name : str", "= open(file_path, \"r\") number = int(file.readline().strip()) case = list() for i in range(number):", "files name to read. Returns ------- dict a dict of contents of all", "list of contents of the test case. \"\"\" file = open(file_path, \"r\") number", "case. \"\"\" file = open(file_path, \"r\") number = int(file.readline().strip()) case = list() for", "contents of test case Parameters ---------- file_path : str the path of the", "to read. Returns ------- dict a dict of contents of all test cases.", "load_test_cases(dir, file_name): \"\"\" loads one test case from file. returns a map contents", "os.path.join(dir, file_name) test_cases_files = open(path, \"r\") test_cases = dict() for file_name in test_cases_files.readlines():", "case.append(file.readline().strip()) file.close() return case def load_test_cases(dir, file_name): \"\"\" loads one test case from", "= list() for i in range(number): case.append(file.readline().strip()) file.close() return case def load_test_cases(dir, file_name):", "= os.path.join(dir, file_name) test_cases_files = open(path, \"r\") test_cases = dict() for file_name in", "str the path of the test case file to read. Returns ------- list", "file to read. Returns ------- list a list of contents of the test", "the files to load. file_name : str the name of the file that", "file. returns contents of test case Parameters ---------- file_path : str the path", "cases. Parameters ---------- dir : str directory of the files to load. file_name", "list() for i in range(number): case.append(file.readline().strip()) file.close() return case def load_test_cases(dir, file_name): \"\"\"", "test_cases = dict() for file_name in test_cases_files.readlines(): case_name = file_name.strip().split(\".\")[0] file_path = os.path.join(dir,", "all test cases. Parameters ---------- dir : str directory of the files to", "= int(file.readline().strip()) case = list() for i in range(number): case.append(file.readline().strip()) file.close() return case", "map contents of all test cases. Parameters ---------- dir : str directory of", "from file. returns a map contents of all test cases. Parameters ---------- dir", "dict a dict of contents of all test cases. \"\"\" path = os.path.join(dir,", "def load_test_cases(dir, file_name): \"\"\" loads one test case from file. returns a map", "Parameters ---------- dir : str directory of the files to load. file_name :", "contains all test case files name to read. Returns ------- dict a dict", "file_name): \"\"\" loads one test case from file. returns a map contents of", "name of the file that contains all test case files name to read.", "directory of the files to load. file_name : str the name of the", "case file to read. Returns ------- list a list of contents of the", "a list of contents of the test case. \"\"\" file = open(file_path, \"r\")", "\"\"\" path = os.path.join(dir, file_name) test_cases_files = open(path, \"r\") test_cases = dict() for", "list a list of contents of the test case. \"\"\" file = open(file_path,", "\"\"\" file = open(file_path, \"r\") number = int(file.readline().strip()) case = list() for i", "of the test case. \"\"\" file = open(file_path, \"r\") number = int(file.readline().strip()) case", "reads one test case from file. returns contents of test case Parameters ----------", "files to load. file_name : str the name of the file that contains", "test case Parameters ---------- file_path : str the path of the test case", "of test case Parameters ---------- file_path : str the path of the test", "int(file.readline().strip()) case = list() for i in range(number): case.append(file.readline().strip()) file.close() return case def", "the name of the file that contains all test case files name to", "open(file_path, \"r\") number = int(file.readline().strip()) case = list() for i in range(number): case.append(file.readline().strip())", ": str the name of the file that contains all test case files", "dir : str directory of the files to load. file_name : str the", "cases. \"\"\" path = os.path.join(dir, file_name) test_cases_files = open(path, \"r\") test_cases = dict()", "path = os.path.join(dir, file_name) test_cases_files = open(path, \"r\") test_cases = dict() for file_name", "case from file. returns contents of test case Parameters ---------- file_path : str", "test case from file. returns contents of test case Parameters ---------- file_path :", "\"\"\" loads one test case from file. returns a map contents of all", "a map contents of all test cases. Parameters ---------- dir : str directory", "loads one test case from file. returns a map contents of all test", "the file that contains all test case files name to read. Returns -------", "case_name = file_name.strip().split(\".\")[0] file_path = os.path.join(dir, file_name.strip()) test_cases[case_name] = read_test_case(file_path) test_cases_files.close() return test_cases" ]
[ "# \"error_msg\": \"bad request\", # } # return render(request, \"questionnaire/q_error.html\", context=context, status=400) #", "q_error(request, error_msg=\"\", status_code=400): # print error_msg... q_logger.error(error_msg) # gather all the extra information", "{ # \"error_msg\": \"permission_denied\", # } # return render(request, \"questionnaire/q_error.html\", context=context, status=403) def", "= {} return render(request, \"questionnaire/q_404.html\", context=context, status=404) def q_500(request): context = {} return", "{ # \"error_msg\": \"bad request\", # } # return render(request, \"questionnaire/q_error.html\", context=context, status=400)", "All rights reserved. # # University of Colorado, Boulder # http://cires.colorado.edu/ # #", "\"questionnaire/q_error.html\", context=context, status=status_code) # def q_400(request): # context = { # \"error_msg\": \"bad", "from django.shortcuts import render from Q.questionnaire import q_logger def q_error(request, error_msg=\"\", status_code=400): #", "reserved. # # University of Colorado, Boulder # http://cires.colorado.edu/ # # This project", "Colorado, Boulder # http://cires.colorado.edu/ # # This project is distributed according to the", "context = { # \"error_msg\": \"permission_denied\", # } # return render(request, \"questionnaire/q_error.html\", context=context,", "django.shortcuts import render from Q.questionnaire import q_logger def q_error(request, error_msg=\"\", status_code=400): # print", "the terms of the MIT license [http://www.opensource.org/licenses/MIT]. #################### from django.shortcuts import render from", "return render(request, \"questionnaire/q_error.html\", context=context, status=status_code) # def q_400(request): # context = { #", "rights reserved. # # University of Colorado, Boulder # http://cires.colorado.edu/ # # This", "gather all the extra information required by the template... context = { \"error_msg\":", "def q_error(request, error_msg=\"\", status_code=400): # print error_msg... q_logger.error(error_msg) # gather all the extra", "Q.questionnaire import q_logger def q_error(request, error_msg=\"\", status_code=400): # print error_msg... q_logger.error(error_msg) # gather", "context=context, status=403) def q_404(request): context = {} return render(request, \"questionnaire/q_404.html\", context=context, status=404) def", "CIM Questionnaire # Copyright (c) 2017 ES-DOC. All rights reserved. # # University", "error_msg=\"\", status_code=400): # print error_msg... q_logger.error(error_msg) # gather all the extra information required", "\"error_msg\": error_msg, \"status_code\": status_code, } return render(request, \"questionnaire/q_error.html\", context=context, status=status_code) # def q_400(request):", "status=400) # # # def q_403(request): # context = { # \"error_msg\": \"permission_denied\",", "= { # \"error_msg\": \"permission_denied\", # } # return render(request, \"questionnaire/q_error.html\", context=context, status=403)", "MIT license [http://www.opensource.org/licenses/MIT]. #################### from django.shortcuts import render from Q.questionnaire import q_logger def", "the MIT license [http://www.opensource.org/licenses/MIT]. #################### from django.shortcuts import render from Q.questionnaire import q_logger", "render(request, \"questionnaire/q_404.html\", context=context, status=404) def q_500(request): context = {} return render(request, \"questionnaire/q_500.html\", context=context,", "the extra information required by the template... context = { \"error_msg\": error_msg, \"status_code\":", "request\", # } # return render(request, \"questionnaire/q_error.html\", context=context, status=400) # # # def", "return render(request, \"questionnaire/q_404.html\", context=context, status=404) def q_500(request): context = {} return render(request, \"questionnaire/q_500.html\",", "# This project is distributed according to the terms of the MIT license", "from Q.questionnaire import q_logger def q_error(request, error_msg=\"\", status_code=400): # print error_msg... q_logger.error(error_msg) #", "render(request, \"questionnaire/q_error.html\", context=context, status=403) def q_404(request): context = {} return render(request, \"questionnaire/q_404.html\", context=context,", "required by the template... context = { \"error_msg\": error_msg, \"status_code\": status_code, } return", "distributed according to the terms of the MIT license [http://www.opensource.org/licenses/MIT]. #################### from django.shortcuts", "q_404(request): context = {} return render(request, \"questionnaire/q_404.html\", context=context, status=404) def q_500(request): context =", "# \"error_msg\": \"permission_denied\", # } # return render(request, \"questionnaire/q_error.html\", context=context, status=403) def q_404(request):", "# def q_400(request): # context = { # \"error_msg\": \"bad request\", # }", "return render(request, \"questionnaire/q_error.html\", context=context, status=403) def q_404(request): context = {} return render(request, \"questionnaire/q_404.html\",", "# } # return render(request, \"questionnaire/q_error.html\", context=context, status=400) # # # def q_403(request):", "template... context = { \"error_msg\": error_msg, \"status_code\": status_code, } return render(request, \"questionnaire/q_error.html\", context=context,", "\"error_msg\": \"bad request\", # } # return render(request, \"questionnaire/q_error.html\", context=context, status=400) # #", "of Colorado, Boulder # http://cires.colorado.edu/ # # This project is distributed according to", "is distributed according to the terms of the MIT license [http://www.opensource.org/licenses/MIT]. #################### from", "by the template... context = { \"error_msg\": error_msg, \"status_code\": status_code, } return render(request,", "q_logger.error(error_msg) # gather all the extra information required by the template... context =", "\"status_code\": status_code, } return render(request, \"questionnaire/q_error.html\", context=context, status=status_code) # def q_400(request): # context", "status_code=400): # print error_msg... q_logger.error(error_msg) # gather all the extra information required by", "# # This project is distributed according to the terms of the MIT", "information required by the template... context = { \"error_msg\": error_msg, \"status_code\": status_code, }", "terms of the MIT license [http://www.opensource.org/licenses/MIT]. #################### from django.shortcuts import render from Q.questionnaire", "# print error_msg... q_logger.error(error_msg) # gather all the extra information required by the", "} return render(request, \"questionnaire/q_error.html\", context=context, status=status_code) # def q_400(request): # context = {", "#################### from django.shortcuts import render from Q.questionnaire import q_logger def q_error(request, error_msg=\"\", status_code=400):", "the template... context = { \"error_msg\": error_msg, \"status_code\": status_code, } return render(request, \"questionnaire/q_error.html\",", "render(request, \"questionnaire/q_error.html\", context=context, status=status_code) # def q_400(request): # context = { # \"error_msg\":", "# # # def q_403(request): # context = { # \"error_msg\": \"permission_denied\", #", "\"bad request\", # } # return render(request, \"questionnaire/q_error.html\", context=context, status=400) # # #", "of the MIT license [http://www.opensource.org/licenses/MIT]. #################### from django.shortcuts import render from Q.questionnaire import", "Boulder # http://cires.colorado.edu/ # # This project is distributed according to the terms", "q_logger def q_error(request, error_msg=\"\", status_code=400): # print error_msg... q_logger.error(error_msg) # gather all the", "\"questionnaire/q_error.html\", context=context, status=403) def q_404(request): context = {} return render(request, \"questionnaire/q_404.html\", context=context, status=404)", "{ \"error_msg\": error_msg, \"status_code\": status_code, } return render(request, \"questionnaire/q_error.html\", context=context, status=status_code) # def", "# Copyright (c) 2017 ES-DOC. All rights reserved. # # University of Colorado,", "extra information required by the template... context = { \"error_msg\": error_msg, \"status_code\": status_code,", "def q_404(request): context = {} return render(request, \"questionnaire/q_404.html\", context=context, status=404) def q_500(request): context", "def q_400(request): # context = { # \"error_msg\": \"bad request\", # } #", "context = {} return render(request, \"questionnaire/q_404.html\", context=context, status=404) def q_500(request): context = {}", "= { # \"error_msg\": \"bad request\", # } # return render(request, \"questionnaire/q_error.html\", context=context,", "# context = { # \"error_msg\": \"bad request\", # } # return render(request,", "# return render(request, \"questionnaire/q_error.html\", context=context, status=403) def q_404(request): context = {} return render(request,", "} # return render(request, \"questionnaire/q_error.html\", context=context, status=400) # # # def q_403(request): #", "<filename>Q/questionnaire/views/views_errors.py #################### # ES-DOC CIM Questionnaire # Copyright (c) 2017 ES-DOC. All rights", "project is distributed according to the terms of the MIT license [http://www.opensource.org/licenses/MIT]. ####################", "license [http://www.opensource.org/licenses/MIT]. #################### from django.shortcuts import render from Q.questionnaire import q_logger def q_error(request,", "\"permission_denied\", # } # return render(request, \"questionnaire/q_error.html\", context=context, status=403) def q_404(request): context =", "2017 ES-DOC. All rights reserved. # # University of Colorado, Boulder # http://cires.colorado.edu/", "\"error_msg\": \"permission_denied\", # } # return render(request, \"questionnaire/q_error.html\", context=context, status=403) def q_404(request): context", "# University of Colorado, Boulder # http://cires.colorado.edu/ # # This project is distributed", "University of Colorado, Boulder # http://cires.colorado.edu/ # # This project is distributed according", "http://cires.colorado.edu/ # # This project is distributed according to the terms of the", "def q_403(request): # context = { # \"error_msg\": \"permission_denied\", # } # return", "# } # return render(request, \"questionnaire/q_error.html\", context=context, status=403) def q_404(request): context = {}", "# gather all the extra information required by the template... context = {", "context=context, status=status_code) # def q_400(request): # context = { # \"error_msg\": \"bad request\",", "ES-DOC. All rights reserved. # # University of Colorado, Boulder # http://cires.colorado.edu/ #", "status=status_code) # def q_400(request): # context = { # \"error_msg\": \"bad request\", #", "\"questionnaire/q_404.html\", context=context, status=404) def q_500(request): context = {} return render(request, \"questionnaire/q_500.html\", context=context, status=404)", "{} return render(request, \"questionnaire/q_404.html\", context=context, status=404) def q_500(request): context = {} return render(request,", "(c) 2017 ES-DOC. All rights reserved. # # University of Colorado, Boulder #", "# # def q_403(request): # context = { # \"error_msg\": \"permission_denied\", # }", "context = { # \"error_msg\": \"bad request\", # } # return render(request, \"questionnaire/q_error.html\",", "\"questionnaire/q_error.html\", context=context, status=400) # # # def q_403(request): # context = { #", "Copyright (c) 2017 ES-DOC. All rights reserved. # # University of Colorado, Boulder", "import render from Q.questionnaire import q_logger def q_error(request, error_msg=\"\", status_code=400): # print error_msg...", "ES-DOC CIM Questionnaire # Copyright (c) 2017 ES-DOC. All rights reserved. # #", "return render(request, \"questionnaire/q_error.html\", context=context, status=400) # # # def q_403(request): # context =", "to the terms of the MIT license [http://www.opensource.org/licenses/MIT]. #################### from django.shortcuts import render", "q_400(request): # context = { # \"error_msg\": \"bad request\", # } # return", "This project is distributed according to the terms of the MIT license [http://www.opensource.org/licenses/MIT].", "q_403(request): # context = { # \"error_msg\": \"permission_denied\", # } # return render(request,", "print error_msg... q_logger.error(error_msg) # gather all the extra information required by the template...", "import q_logger def q_error(request, error_msg=\"\", status_code=400): # print error_msg... q_logger.error(error_msg) # gather all", "context=context, status=400) # # # def q_403(request): # context = { # \"error_msg\":", "# ES-DOC CIM Questionnaire # Copyright (c) 2017 ES-DOC. All rights reserved. #", "according to the terms of the MIT license [http://www.opensource.org/licenses/MIT]. #################### from django.shortcuts import", "# context = { # \"error_msg\": \"permission_denied\", # } # return render(request, \"questionnaire/q_error.html\",", "all the extra information required by the template... context = { \"error_msg\": error_msg,", "render from Q.questionnaire import q_logger def q_error(request, error_msg=\"\", status_code=400): # print error_msg... q_logger.error(error_msg)", "} # return render(request, \"questionnaire/q_error.html\", context=context, status=403) def q_404(request): context = {} return", "# def q_403(request): # context = { # \"error_msg\": \"permission_denied\", # } #", "# # University of Colorado, Boulder # http://cires.colorado.edu/ # # This project is", "error_msg... q_logger.error(error_msg) # gather all the extra information required by the template... context", "render(request, \"questionnaire/q_error.html\", context=context, status=400) # # # def q_403(request): # context = {", "#################### # ES-DOC CIM Questionnaire # Copyright (c) 2017 ES-DOC. All rights reserved.", "[http://www.opensource.org/licenses/MIT]. #################### from django.shortcuts import render from Q.questionnaire import q_logger def q_error(request, error_msg=\"\",", "status_code, } return render(request, \"questionnaire/q_error.html\", context=context, status=status_code) # def q_400(request): # context =", "Questionnaire # Copyright (c) 2017 ES-DOC. All rights reserved. # # University of", "# http://cires.colorado.edu/ # # This project is distributed according to the terms of", "status=403) def q_404(request): context = {} return render(request, \"questionnaire/q_404.html\", context=context, status=404) def q_500(request):", "error_msg, \"status_code\": status_code, } return render(request, \"questionnaire/q_error.html\", context=context, status=status_code) # def q_400(request): #", "= { \"error_msg\": error_msg, \"status_code\": status_code, } return render(request, \"questionnaire/q_error.html\", context=context, status=status_code) #", "# return render(request, \"questionnaire/q_error.html\", context=context, status=400) # # # def q_403(request): # context", "context = { \"error_msg\": error_msg, \"status_code\": status_code, } return render(request, \"questionnaire/q_error.html\", context=context, status=status_code)" ]
[ "@author: oekenta \"\"\" from ctypes import c_ulonglong, c_double, cdll, byref import numpy as", "import numpy as np class lusol: liblusol = 0 @classmethod def loadlibrary(cls): cls.liblusol", "loadlibrary(cls): cls.liblusol = cdll.LoadLibrary('/home/grad/oekenta/sr-cur/src/libclusol.so') def __init__(self, A : np.array ): # LUSOL input", "= 0 self.pivot = 0 self.keepLU = 0 self.Ltol1 = 0 self.Ltol2 =", "= c_ulonglong(A.shape[1]) self.nelem = c_ulonglong(np.count_nonzero(A)) self.lena = c_ulonglong(10000) self.ap = c_ulonglong*A.shape[0] self.aq =", "= cdll.LoadLibrary('/home/grad/oekenta/sr-cur/src/libclusol.so') def __init__(self, A : np.array ): # LUSOL input parameters self.rank", "self.dens2 = 0 #LU1FAC Inputs self.m = c_ulonglong(A.shape[0]) self.n = c_ulonglong(A.shape[1]) self.nelem =", "\"\"\" from ctypes import c_ulonglong, c_double, cdll, byref import numpy as np class", "c_ulonglong*A.shape[0] self.aq = c_ulonglong*A.shape[1] def factorize(): A = np.array([[1,2],[3,4]]) l = lusol(A) l.loadlibrary()", "c_ulonglong(A.shape[1]) self.nelem = c_ulonglong(np.count_nonzero(A)) self.lena = c_ulonglong(10000) self.ap = c_ulonglong*A.shape[0] self.aq = c_ulonglong*A.shape[1]", "on Tue Feb 8 12:18:32 2022 @author: oekenta \"\"\" from ctypes import c_ulonglong,", "12:18:32 2022 @author: oekenta \"\"\" from ctypes import c_ulonglong, c_double, cdll, byref import", "-*- coding: utf-8 -*- \"\"\" Created on Tue Feb 8 12:18:32 2022 @author:", "def loadlibrary(cls): cls.liblusol = cdll.LoadLibrary('/home/grad/oekenta/sr-cur/src/libclusol.so') def __init__(self, A : np.array ): # LUSOL", "0 self.Ltol1 = 0 self.Ltol2 = 0 self.small = 0 self.Utol1 = 0", "# LUSOL input parameters self.rank = 0 self.maxcol = 0 self.pivot = 0", "2022 @author: oekenta \"\"\" from ctypes import c_ulonglong, c_double, cdll, byref import numpy", "\"\"\" Created on Tue Feb 8 12:18:32 2022 @author: oekenta \"\"\" from ctypes", "= 0 self.Uspace = 0 self.dens1 = 0 self.dens2 = 0 #LU1FAC Inputs", "self.rank = 0 self.maxcol = 0 self.pivot = 0 self.keepLU = 0 self.Ltol1", "__init__(self, A : np.array ): # LUSOL input parameters self.rank = 0 self.maxcol", "self.Ltol2 = 0 self.small = 0 self.Utol1 = 0 self.Utol2 = 0 self.Uspace", "parameters self.rank = 0 self.maxcol = 0 self.pivot = 0 self.keepLU = 0", "self.Utol2 = 0 self.Uspace = 0 self.dens1 = 0 self.dens2 = 0 #LU1FAC", "self.nelem = c_ulonglong(np.count_nonzero(A)) self.lena = c_ulonglong(10000) self.ap = c_ulonglong*A.shape[0] self.aq = c_ulonglong*A.shape[1] def", "Feb 8 12:18:32 2022 @author: oekenta \"\"\" from ctypes import c_ulonglong, c_double, cdll,", "= 0 self.keepLU = 0 self.Ltol1 = 0 self.Ltol2 = 0 self.small =", "0 self.dens2 = 0 #LU1FAC Inputs self.m = c_ulonglong(A.shape[0]) self.n = c_ulonglong(A.shape[1]) self.nelem", "self.maxcol = 0 self.pivot = 0 self.keepLU = 0 self.Ltol1 = 0 self.Ltol2", "-*- \"\"\" Created on Tue Feb 8 12:18:32 2022 @author: oekenta \"\"\" from", "np.array ): # LUSOL input parameters self.rank = 0 self.maxcol = 0 self.pivot", "self.m = c_ulonglong(A.shape[0]) self.n = c_ulonglong(A.shape[1]) self.nelem = c_ulonglong(np.count_nonzero(A)) self.lena = c_ulonglong(10000) self.ap", "= 0 self.Ltol1 = 0 self.Ltol2 = 0 self.small = 0 self.Utol1 =", "np class lusol: liblusol = 0 @classmethod def loadlibrary(cls): cls.liblusol = cdll.LoadLibrary('/home/grad/oekenta/sr-cur/src/libclusol.so') def", "python3 # -*- coding: utf-8 -*- \"\"\" Created on Tue Feb 8 12:18:32", "= c_ulonglong(A.shape[0]) self.n = c_ulonglong(A.shape[1]) self.nelem = c_ulonglong(np.count_nonzero(A)) self.lena = c_ulonglong(10000) self.ap =", "Created on Tue Feb 8 12:18:32 2022 @author: oekenta \"\"\" from ctypes import", "0 self.dens1 = 0 self.dens2 = 0 #LU1FAC Inputs self.m = c_ulonglong(A.shape[0]) self.n", "= 0 self.Utol1 = 0 self.Utol2 = 0 self.Uspace = 0 self.dens1 =", "self.dens1 = 0 self.dens2 = 0 #LU1FAC Inputs self.m = c_ulonglong(A.shape[0]) self.n =", "8 12:18:32 2022 @author: oekenta \"\"\" from ctypes import c_ulonglong, c_double, cdll, byref", "Inputs self.m = c_ulonglong(A.shape[0]) self.n = c_ulonglong(A.shape[1]) self.nelem = c_ulonglong(np.count_nonzero(A)) self.lena = c_ulonglong(10000)", "input parameters self.rank = 0 self.maxcol = 0 self.pivot = 0 self.keepLU =", "liblusol = 0 @classmethod def loadlibrary(cls): cls.liblusol = cdll.LoadLibrary('/home/grad/oekenta/sr-cur/src/libclusol.so') def __init__(self, A :", "self.lena = c_ulonglong(10000) self.ap = c_ulonglong*A.shape[0] self.aq = c_ulonglong*A.shape[1] def factorize(): A =", "oekenta \"\"\" from ctypes import c_ulonglong, c_double, cdll, byref import numpy as np", "= 0 self.dens1 = 0 self.dens2 = 0 #LU1FAC Inputs self.m = c_ulonglong(A.shape[0])", "self.ap = c_ulonglong*A.shape[0] self.aq = c_ulonglong*A.shape[1] def factorize(): A = np.array([[1,2],[3,4]]) l =", "= 0 self.Ltol2 = 0 self.small = 0 self.Utol1 = 0 self.Utol2 =", "= 0 #LU1FAC Inputs self.m = c_ulonglong(A.shape[0]) self.n = c_ulonglong(A.shape[1]) self.nelem = c_ulonglong(np.count_nonzero(A))", "self.Utol1 = 0 self.Utol2 = 0 self.Uspace = 0 self.dens1 = 0 self.dens2", "= 0 self.Utol2 = 0 self.Uspace = 0 self.dens1 = 0 self.dens2 =", "0 self.pivot = 0 self.keepLU = 0 self.Ltol1 = 0 self.Ltol2 = 0", "self.small = 0 self.Utol1 = 0 self.Utol2 = 0 self.Uspace = 0 self.dens1", "0 @classmethod def loadlibrary(cls): cls.liblusol = cdll.LoadLibrary('/home/grad/oekenta/sr-cur/src/libclusol.so') def __init__(self, A : np.array ):", "0 self.maxcol = 0 self.pivot = 0 self.keepLU = 0 self.Ltol1 = 0", "self.pivot = 0 self.keepLU = 0 self.Ltol1 = 0 self.Ltol2 = 0 self.small", "utf-8 -*- \"\"\" Created on Tue Feb 8 12:18:32 2022 @author: oekenta \"\"\"", "import c_ulonglong, c_double, cdll, byref import numpy as np class lusol: liblusol =", "0 self.Utol2 = 0 self.Uspace = 0 self.dens1 = 0 self.dens2 = 0", "c_ulonglong(np.count_nonzero(A)) self.lena = c_ulonglong(10000) self.ap = c_ulonglong*A.shape[0] self.aq = c_ulonglong*A.shape[1] def factorize(): A", "from ctypes import c_ulonglong, c_double, cdll, byref import numpy as np class lusol:", "@classmethod def loadlibrary(cls): cls.liblusol = cdll.LoadLibrary('/home/grad/oekenta/sr-cur/src/libclusol.so') def __init__(self, A : np.array ): #", "c_ulonglong, c_double, cdll, byref import numpy as np class lusol: liblusol = 0", "0 self.Ltol2 = 0 self.small = 0 self.Utol1 = 0 self.Utol2 = 0", "= 0 self.dens2 = 0 #LU1FAC Inputs self.m = c_ulonglong(A.shape[0]) self.n = c_ulonglong(A.shape[1])", "Tue Feb 8 12:18:32 2022 @author: oekenta \"\"\" from ctypes import c_ulonglong, c_double,", "0 self.Uspace = 0 self.dens1 = 0 self.dens2 = 0 #LU1FAC Inputs self.m", "LUSOL input parameters self.rank = 0 self.maxcol = 0 self.pivot = 0 self.keepLU", "= 0 @classmethod def loadlibrary(cls): cls.liblusol = cdll.LoadLibrary('/home/grad/oekenta/sr-cur/src/libclusol.so') def __init__(self, A : np.array", "A : np.array ): # LUSOL input parameters self.rank = 0 self.maxcol =", "# -*- coding: utf-8 -*- \"\"\" Created on Tue Feb 8 12:18:32 2022", "class lusol: liblusol = 0 @classmethod def loadlibrary(cls): cls.liblusol = cdll.LoadLibrary('/home/grad/oekenta/sr-cur/src/libclusol.so') def __init__(self,", "0 #LU1FAC Inputs self.m = c_ulonglong(A.shape[0]) self.n = c_ulonglong(A.shape[1]) self.nelem = c_ulonglong(np.count_nonzero(A)) self.lena", "= c_ulonglong(np.count_nonzero(A)) self.lena = c_ulonglong(10000) self.ap = c_ulonglong*A.shape[0] self.aq = c_ulonglong*A.shape[1] def factorize():", "0 self.Utol1 = 0 self.Utol2 = 0 self.Uspace = 0 self.dens1 = 0", "c_ulonglong(10000) self.ap = c_ulonglong*A.shape[0] self.aq = c_ulonglong*A.shape[1] def factorize(): A = np.array([[1,2],[3,4]]) l", "ctypes import c_ulonglong, c_double, cdll, byref import numpy as np class lusol: liblusol", "self.n = c_ulonglong(A.shape[1]) self.nelem = c_ulonglong(np.count_nonzero(A)) self.lena = c_ulonglong(10000) self.ap = c_ulonglong*A.shape[0] self.aq", "= c_ulonglong(10000) self.ap = c_ulonglong*A.shape[0] self.aq = c_ulonglong*A.shape[1] def factorize(): A = np.array([[1,2],[3,4]])", "as np class lusol: liblusol = 0 @classmethod def loadlibrary(cls): cls.liblusol = cdll.LoadLibrary('/home/grad/oekenta/sr-cur/src/libclusol.so')", "): # LUSOL input parameters self.rank = 0 self.maxcol = 0 self.pivot =", "cdll, byref import numpy as np class lusol: liblusol = 0 @classmethod def", "0 self.small = 0 self.Utol1 = 0 self.Utol2 = 0 self.Uspace = 0", "#!/usr/bin/env python3 # -*- coding: utf-8 -*- \"\"\" Created on Tue Feb 8", "numpy as np class lusol: liblusol = 0 @classmethod def loadlibrary(cls): cls.liblusol =", "#LU1FAC Inputs self.m = c_ulonglong(A.shape[0]) self.n = c_ulonglong(A.shape[1]) self.nelem = c_ulonglong(np.count_nonzero(A)) self.lena =", "cdll.LoadLibrary('/home/grad/oekenta/sr-cur/src/libclusol.so') def __init__(self, A : np.array ): # LUSOL input parameters self.rank =", "0 self.keepLU = 0 self.Ltol1 = 0 self.Ltol2 = 0 self.small = 0", "= 0 self.small = 0 self.Utol1 = 0 self.Utol2 = 0 self.Uspace =", "coding: utf-8 -*- \"\"\" Created on Tue Feb 8 12:18:32 2022 @author: oekenta", ": np.array ): # LUSOL input parameters self.rank = 0 self.maxcol = 0", "= 0 self.maxcol = 0 self.pivot = 0 self.keepLU = 0 self.Ltol1 =", "self.Ltol1 = 0 self.Ltol2 = 0 self.small = 0 self.Utol1 = 0 self.Utol2", "cls.liblusol = cdll.LoadLibrary('/home/grad/oekenta/sr-cur/src/libclusol.so') def __init__(self, A : np.array ): # LUSOL input parameters", "self.keepLU = 0 self.Ltol1 = 0 self.Ltol2 = 0 self.small = 0 self.Utol1", "lusol: liblusol = 0 @classmethod def loadlibrary(cls): cls.liblusol = cdll.LoadLibrary('/home/grad/oekenta/sr-cur/src/libclusol.so') def __init__(self, A", "self.Uspace = 0 self.dens1 = 0 self.dens2 = 0 #LU1FAC Inputs self.m =", "c_double, cdll, byref import numpy as np class lusol: liblusol = 0 @classmethod", "def __init__(self, A : np.array ): # LUSOL input parameters self.rank = 0", "c_ulonglong(A.shape[0]) self.n = c_ulonglong(A.shape[1]) self.nelem = c_ulonglong(np.count_nonzero(A)) self.lena = c_ulonglong(10000) self.ap = c_ulonglong*A.shape[0]", "= c_ulonglong*A.shape[0] self.aq = c_ulonglong*A.shape[1] def factorize(): A = np.array([[1,2],[3,4]]) l = lusol(A)", "byref import numpy as np class lusol: liblusol = 0 @classmethod def loadlibrary(cls):" ]
[ "status=check.OK) def test_warning(aggregator, check, instance_warning): check.check(instance_warning) aggregator.assert_service_check('system.reboot_required', status=check.WARNING) def test_critical(aggregator, check, instance_critical): check.check(instance_critical)", "status=check.OK) def test_not_present_ok(aggregator, check, instance_not_present): assert not isfile(instance_not_present['created_at_file']) check.check(instance_not_present) aggregator.assert_service_check('system.reboot_required', status=check.OK) def test_warning(aggregator,", "2018 # All rights reserved # Licensed under Simplified BSD License (see LICENSE)", "BSD License (see LICENSE) from os.path import isfile def test_ok(aggregator, check, instance_ok): assert", "aggregator.assert_service_check('system.reboot_required', status=check.OK) def test_not_present_ok(aggregator, check, instance_not_present): assert not isfile(instance_not_present['created_at_file']) check.check(instance_not_present) aggregator.assert_service_check('system.reboot_required', status=check.OK) def", "check, instance_not_present): assert not isfile(instance_not_present['created_at_file']) check.check(instance_not_present) aggregator.assert_service_check('system.reboot_required', status=check.OK) def test_warning(aggregator, check, instance_warning): check.check(instance_warning)", "# All rights reserved # Licensed under Simplified BSD License (see LICENSE) from", "aggregator.assert_service_check('system.reboot_required', status=check.OK) def test_warning(aggregator, check, instance_warning): check.check(instance_warning) aggregator.assert_service_check('system.reboot_required', status=check.WARNING) def test_critical(aggregator, check, instance_critical):", "import isfile def test_ok(aggregator, check, instance_ok): assert isfile(instance_ok['created_at_file']) check.check(instance_ok) aggregator.assert_service_check('system.reboot_required', status=check.OK) def test_not_present_ok(aggregator,", "Licensed under Simplified BSD License (see LICENSE) from os.path import isfile def test_ok(aggregator,", "rights reserved # Licensed under Simplified BSD License (see LICENSE) from os.path import", "# Licensed under Simplified BSD License (see LICENSE) from os.path import isfile def", "License (see LICENSE) from os.path import isfile def test_ok(aggregator, check, instance_ok): assert isfile(instance_ok['created_at_file'])", "from os.path import isfile def test_ok(aggregator, check, instance_ok): assert isfile(instance_ok['created_at_file']) check.check(instance_ok) aggregator.assert_service_check('system.reboot_required', status=check.OK)", "check, instance_ok): assert isfile(instance_ok['created_at_file']) check.check(instance_ok) aggregator.assert_service_check('system.reboot_required', status=check.OK) def test_not_present_ok(aggregator, check, instance_not_present): assert not", "check.check(instance_not_present) aggregator.assert_service_check('system.reboot_required', status=check.OK) def test_warning(aggregator, check, instance_warning): check.check(instance_warning) aggregator.assert_service_check('system.reboot_required', status=check.WARNING) def test_critical(aggregator, check,", "assert not isfile(instance_not_present['created_at_file']) check.check(instance_not_present) aggregator.assert_service_check('system.reboot_required', status=check.OK) def test_warning(aggregator, check, instance_warning): check.check(instance_warning) aggregator.assert_service_check('system.reboot_required', status=check.WARNING)", "Inc. 2018 # All rights reserved # Licensed under Simplified BSD License (see", "(see LICENSE) from os.path import isfile def test_ok(aggregator, check, instance_ok): assert isfile(instance_ok['created_at_file']) check.check(instance_ok)", "isfile(instance_not_present['created_at_file']) check.check(instance_not_present) aggregator.assert_service_check('system.reboot_required', status=check.OK) def test_warning(aggregator, check, instance_warning): check.check(instance_warning) aggregator.assert_service_check('system.reboot_required', status=check.WARNING) def test_critical(aggregator,", "under Simplified BSD License (see LICENSE) from os.path import isfile def test_ok(aggregator, check,", "test_not_present_ok(aggregator, check, instance_not_present): assert not isfile(instance_not_present['created_at_file']) check.check(instance_not_present) aggregator.assert_service_check('system.reboot_required', status=check.OK) def test_warning(aggregator, check, instance_warning):", "# (C) Datadog, Inc. 2018 # All rights reserved # Licensed under Simplified", "def test_ok(aggregator, check, instance_ok): assert isfile(instance_ok['created_at_file']) check.check(instance_ok) aggregator.assert_service_check('system.reboot_required', status=check.OK) def test_not_present_ok(aggregator, check, instance_not_present):", "test_ok(aggregator, check, instance_ok): assert isfile(instance_ok['created_at_file']) check.check(instance_ok) aggregator.assert_service_check('system.reboot_required', status=check.OK) def test_not_present_ok(aggregator, check, instance_not_present): assert", "isfile def test_ok(aggregator, check, instance_ok): assert isfile(instance_ok['created_at_file']) check.check(instance_ok) aggregator.assert_service_check('system.reboot_required', status=check.OK) def test_not_present_ok(aggregator, check,", "reserved # Licensed under Simplified BSD License (see LICENSE) from os.path import isfile", "LICENSE) from os.path import isfile def test_ok(aggregator, check, instance_ok): assert isfile(instance_ok['created_at_file']) check.check(instance_ok) aggregator.assert_service_check('system.reboot_required',", "instance_not_present): assert not isfile(instance_not_present['created_at_file']) check.check(instance_not_present) aggregator.assert_service_check('system.reboot_required', status=check.OK) def test_warning(aggregator, check, instance_warning): check.check(instance_warning) aggregator.assert_service_check('system.reboot_required',", "Datadog, Inc. 2018 # All rights reserved # Licensed under Simplified BSD License", "assert isfile(instance_ok['created_at_file']) check.check(instance_ok) aggregator.assert_service_check('system.reboot_required', status=check.OK) def test_not_present_ok(aggregator, check, instance_not_present): assert not isfile(instance_not_present['created_at_file']) check.check(instance_not_present)", "def test_warning(aggregator, check, instance_warning): check.check(instance_warning) aggregator.assert_service_check('system.reboot_required', status=check.WARNING) def test_critical(aggregator, check, instance_critical): check.check(instance_critical) aggregator.assert_service_check('system.reboot_required',", "os.path import isfile def test_ok(aggregator, check, instance_ok): assert isfile(instance_ok['created_at_file']) check.check(instance_ok) aggregator.assert_service_check('system.reboot_required', status=check.OK) def", "def test_not_present_ok(aggregator, check, instance_not_present): assert not isfile(instance_not_present['created_at_file']) check.check(instance_not_present) aggregator.assert_service_check('system.reboot_required', status=check.OK) def test_warning(aggregator, check,", "Simplified BSD License (see LICENSE) from os.path import isfile def test_ok(aggregator, check, instance_ok):", "isfile(instance_ok['created_at_file']) check.check(instance_ok) aggregator.assert_service_check('system.reboot_required', status=check.OK) def test_not_present_ok(aggregator, check, instance_not_present): assert not isfile(instance_not_present['created_at_file']) check.check(instance_not_present) aggregator.assert_service_check('system.reboot_required',", "instance_ok): assert isfile(instance_ok['created_at_file']) check.check(instance_ok) aggregator.assert_service_check('system.reboot_required', status=check.OK) def test_not_present_ok(aggregator, check, instance_not_present): assert not isfile(instance_not_present['created_at_file'])", "test_warning(aggregator, check, instance_warning): check.check(instance_warning) aggregator.assert_service_check('system.reboot_required', status=check.WARNING) def test_critical(aggregator, check, instance_critical): check.check(instance_critical) aggregator.assert_service_check('system.reboot_required', status=check.CRITICAL)", "check.check(instance_ok) aggregator.assert_service_check('system.reboot_required', status=check.OK) def test_not_present_ok(aggregator, check, instance_not_present): assert not isfile(instance_not_present['created_at_file']) check.check(instance_not_present) aggregator.assert_service_check('system.reboot_required', status=check.OK)", "(C) Datadog, Inc. 2018 # All rights reserved # Licensed under Simplified BSD", "All rights reserved # Licensed under Simplified BSD License (see LICENSE) from os.path", "not isfile(instance_not_present['created_at_file']) check.check(instance_not_present) aggregator.assert_service_check('system.reboot_required', status=check.OK) def test_warning(aggregator, check, instance_warning): check.check(instance_warning) aggregator.assert_service_check('system.reboot_required', status=check.WARNING) def" ]
[ "address command\"\"\" if isinstance(peerList, Peer): peerList = [peerList] if not peerList: return b''", "isinstance(peerList, Peer): peerList = [peerList] if not peerList: return b'' retval = b''", "Peer): peerList = [peerList] if not peerList: return b'' retval = b'' for", "= [peerList] if not peerList: return b'' retval = b'' for i in", "peerList: return b'' retval = b'' for i in range(0, len(peerList), MAX_ADDR_COUNT): payload", "this node payload += struct.pack('>q', 1) payload += encodeHost(peer.host) # remote port payload", "payload += encodeHost(peer.host) # remote port payload += struct.pack('>H', peer.port) retval += CreatePacket('addr',", "import CreatePacket, encodeHost def assemble_addr(peerList): \"\"\"Create address command\"\"\" if isinstance(peerList, Peer): peerList =", "if not peerList: return b'' retval = b'' for i in range(0, len(peerList),", "struct.pack('>q', 1) payload += encodeHost(peer.host) # remote port payload += struct.pack('>H', peer.port) retval", "network.constants import MAX_ADDR_COUNT from network.node import Peer from protocol import CreatePacket, encodeHost def", "[peerList] if not peerList: return b'' retval = b'' for i in range(0,", "struct import addresses from network.constants import MAX_ADDR_COUNT from network.node import Peer from protocol", "range(0, len(peerList), MAX_ADDR_COUNT): payload = addresses.encodeVarint(len(peerList[i:i + MAX_ADDR_COUNT])) for stream, peer, timestamp in", "+= struct.pack('>I', stream) # service bit flags offered by this node payload +=", "import Peer from protocol import CreatePacket, encodeHost def assemble_addr(peerList): \"\"\"Create address command\"\"\" if", "\"\"\"Create address command\"\"\" if isinstance(peerList, Peer): peerList = [peerList] if not peerList: return", "command\"\"\" if isinstance(peerList, Peer): peerList = [peerList] if not peerList: return b'' retval", "len(peerList), MAX_ADDR_COUNT): payload = addresses.encodeVarint(len(peerList[i:i + MAX_ADDR_COUNT])) for stream, peer, timestamp in peerList[i:i", "flags offered by this node payload += struct.pack('>q', 1) payload += encodeHost(peer.host) #", "bit flags offered by this node payload += struct.pack('>q', 1) payload += encodeHost(peer.host)", "b'' retval = b'' for i in range(0, len(peerList), MAX_ADDR_COUNT): payload = addresses.encodeVarint(len(peerList[i:i", "i in range(0, len(peerList), MAX_ADDR_COUNT): payload = addresses.encodeVarint(len(peerList[i:i + MAX_ADDR_COUNT])) for stream, peer,", "for stream, peer, timestamp in peerList[i:i + MAX_ADDR_COUNT]: # 64-bit time payload +=", "import struct import addresses from network.constants import MAX_ADDR_COUNT from network.node import Peer from", "node payload += struct.pack('>q', 1) payload += encodeHost(peer.host) # remote port payload +=", "# 64-bit time payload += struct.pack('>Q', timestamp) payload += struct.pack('>I', stream) # service", "command packets \"\"\" import struct import addresses from network.constants import MAX_ADDR_COUNT from network.node", "in range(0, len(peerList), MAX_ADDR_COUNT): payload = addresses.encodeVarint(len(peerList[i:i + MAX_ADDR_COUNT])) for stream, peer, timestamp", "addresses from network.constants import MAX_ADDR_COUNT from network.node import Peer from protocol import CreatePacket,", "from protocol import CreatePacket, encodeHost def assemble_addr(peerList): \"\"\"Create address command\"\"\" if isinstance(peerList, Peer):", "CreatePacket, encodeHost def assemble_addr(peerList): \"\"\"Create address command\"\"\" if isinstance(peerList, Peer): peerList = [peerList]", "import MAX_ADDR_COUNT from network.node import Peer from protocol import CreatePacket, encodeHost def assemble_addr(peerList):", "MAX_ADDR_COUNT from network.node import Peer from protocol import CreatePacket, encodeHost def assemble_addr(peerList): \"\"\"Create", "\"\"\" import struct import addresses from network.constants import MAX_ADDR_COUNT from network.node import Peer", "+= struct.pack('>q', 1) payload += encodeHost(peer.host) # remote port payload += struct.pack('>H', peer.port)", "for i in range(0, len(peerList), MAX_ADDR_COUNT): payload = addresses.encodeVarint(len(peerList[i:i + MAX_ADDR_COUNT])) for stream,", "import addresses from network.constants import MAX_ADDR_COUNT from network.node import Peer from protocol import", "protocol command packets \"\"\" import struct import addresses from network.constants import MAX_ADDR_COUNT from", "return b'' retval = b'' for i in range(0, len(peerList), MAX_ADDR_COUNT): payload =", "struct.pack('>Q', timestamp) payload += struct.pack('>I', stream) # service bit flags offered by this", "+ MAX_ADDR_COUNT]: # 64-bit time payload += struct.pack('>Q', timestamp) payload += struct.pack('>I', stream)", "network.node import Peer from protocol import CreatePacket, encodeHost def assemble_addr(peerList): \"\"\"Create address command\"\"\"", "+ MAX_ADDR_COUNT])) for stream, peer, timestamp in peerList[i:i + MAX_ADDR_COUNT]: # 64-bit time", "MAX_ADDR_COUNT])) for stream, peer, timestamp in peerList[i:i + MAX_ADDR_COUNT]: # 64-bit time payload", "encodeHost(peer.host) # remote port payload += struct.pack('>H', peer.port) retval += CreatePacket('addr', payload) return", "bitmessage protocol command packets \"\"\" import struct import addresses from network.constants import MAX_ADDR_COUNT", "struct.pack('>I', stream) # service bit flags offered by this node payload += struct.pack('>q',", "= b'' for i in range(0, len(peerList), MAX_ADDR_COUNT): payload = addresses.encodeVarint(len(peerList[i:i + MAX_ADDR_COUNT]))", "peer, timestamp in peerList[i:i + MAX_ADDR_COUNT]: # 64-bit time payload += struct.pack('>Q', timestamp)", "+= struct.pack('>Q', timestamp) payload += struct.pack('>I', stream) # service bit flags offered by", "64-bit time payload += struct.pack('>Q', timestamp) payload += struct.pack('>I', stream) # service bit", "by this node payload += struct.pack('>q', 1) payload += encodeHost(peer.host) # remote port", "# remote port payload += struct.pack('>H', peer.port) retval += CreatePacket('addr', payload) return retval", "protocol import CreatePacket, encodeHost def assemble_addr(peerList): \"\"\"Create address command\"\"\" if isinstance(peerList, Peer): peerList", "Create bitmessage protocol command packets \"\"\" import struct import addresses from network.constants import", "\"\"\" Create bitmessage protocol command packets \"\"\" import struct import addresses from network.constants", "payload = addresses.encodeVarint(len(peerList[i:i + MAX_ADDR_COUNT])) for stream, peer, timestamp in peerList[i:i + MAX_ADDR_COUNT]:", "stream) # service bit flags offered by this node payload += struct.pack('>q', 1)", "not peerList: return b'' retval = b'' for i in range(0, len(peerList), MAX_ADDR_COUNT):", "if isinstance(peerList, Peer): peerList = [peerList] if not peerList: return b'' retval =", "assemble_addr(peerList): \"\"\"Create address command\"\"\" if isinstance(peerList, Peer): peerList = [peerList] if not peerList:", "from network.node import Peer from protocol import CreatePacket, encodeHost def assemble_addr(peerList): \"\"\"Create address", "in peerList[i:i + MAX_ADDR_COUNT]: # 64-bit time payload += struct.pack('>Q', timestamp) payload +=", "retval = b'' for i in range(0, len(peerList), MAX_ADDR_COUNT): payload = addresses.encodeVarint(len(peerList[i:i +", "Peer from protocol import CreatePacket, encodeHost def assemble_addr(peerList): \"\"\"Create address command\"\"\" if isinstance(peerList,", "from network.constants import MAX_ADDR_COUNT from network.node import Peer from protocol import CreatePacket, encodeHost", "offered by this node payload += struct.pack('>q', 1) payload += encodeHost(peer.host) # remote", "b'' for i in range(0, len(peerList), MAX_ADDR_COUNT): payload = addresses.encodeVarint(len(peerList[i:i + MAX_ADDR_COUNT])) for", "MAX_ADDR_COUNT]: # 64-bit time payload += struct.pack('>Q', timestamp) payload += struct.pack('>I', stream) #", "1) payload += encodeHost(peer.host) # remote port payload += struct.pack('>H', peer.port) retval +=", "<gh_stars>1000+ \"\"\" Create bitmessage protocol command packets \"\"\" import struct import addresses from", "peerList[i:i + MAX_ADDR_COUNT]: # 64-bit time payload += struct.pack('>Q', timestamp) payload += struct.pack('>I',", "= addresses.encodeVarint(len(peerList[i:i + MAX_ADDR_COUNT])) for stream, peer, timestamp in peerList[i:i + MAX_ADDR_COUNT]: #", "addresses.encodeVarint(len(peerList[i:i + MAX_ADDR_COUNT])) for stream, peer, timestamp in peerList[i:i + MAX_ADDR_COUNT]: # 64-bit", "payload += struct.pack('>Q', timestamp) payload += struct.pack('>I', stream) # service bit flags offered", "payload += struct.pack('>I', stream) # service bit flags offered by this node payload", "# service bit flags offered by this node payload += struct.pack('>q', 1) payload", "time payload += struct.pack('>Q', timestamp) payload += struct.pack('>I', stream) # service bit flags", "MAX_ADDR_COUNT): payload = addresses.encodeVarint(len(peerList[i:i + MAX_ADDR_COUNT])) for stream, peer, timestamp in peerList[i:i +", "service bit flags offered by this node payload += struct.pack('>q', 1) payload +=", "timestamp) payload += struct.pack('>I', stream) # service bit flags offered by this node", "encodeHost def assemble_addr(peerList): \"\"\"Create address command\"\"\" if isinstance(peerList, Peer): peerList = [peerList] if", "def assemble_addr(peerList): \"\"\"Create address command\"\"\" if isinstance(peerList, Peer): peerList = [peerList] if not", "packets \"\"\" import struct import addresses from network.constants import MAX_ADDR_COUNT from network.node import", "peerList = [peerList] if not peerList: return b'' retval = b'' for i", "+= encodeHost(peer.host) # remote port payload += struct.pack('>H', peer.port) retval += CreatePacket('addr', payload)", "stream, peer, timestamp in peerList[i:i + MAX_ADDR_COUNT]: # 64-bit time payload += struct.pack('>Q',", "timestamp in peerList[i:i + MAX_ADDR_COUNT]: # 64-bit time payload += struct.pack('>Q', timestamp) payload", "payload += struct.pack('>q', 1) payload += encodeHost(peer.host) # remote port payload += struct.pack('>H'," ]
[ "Connect x = shortcut + self.drop_path(x) x = x + self.drop_path(self.mlp(self.norm2(x))) return x", "trained ImageNet-22K # https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window12_384_22k.pth model = SwinTransformer(in_chans=3, patch_size=4, window_size=12, embed_dim=128, depths=(2, 2, 18,", "BasicModule import window_partition, window_reverse \"\"\"SwinT window_size = 7 img_size = 224 Trained ImageNet-1k", "**kwargs) return model def swin_base_patch4_window7_224_in22k(num_classes: int = 21841, **kwargs): # trained ImageNet-22K #", "layer in self.layers: x,H,W = layer(x,H,W) x = self.norm(x) # [B, L, C]", "SW-MSA ? # cyclic shift if self.shift_size > 0: shifted_x = torch.roll(x, shifts=(-self.shift_size,", "= torch.flatten(x, 1) x = self.head(x) # 分类头 return x \"\"\"一个Stage内的基本SwinTransformer模块\"\"\" class BasicLayer(nn.Module):", "= 21841, **kwargs): # trained ImageNet-22K # https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window12_384_22k.pth model = SwinTransformer(in_chans=3, patch_size=4, window_size=12,", "= window_reverse(attn_windows, self.window_size, Hp, Wp) # [B, H', W', C] # 如果是SW-MSA,需要逆shift过程 #", "x = shifted_x # 移除Pad数据 if pad_r > 0 or pad_b > 0:", "attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) # [nW, 1, Mh*Mw] - [nW, Mh*Mw, 1]", "pad_b > 0: # 把前面pad的数据移除掉 x = x[:, :H, :W, :].contiguous() x =", "1, 4) # [batch_size*num_windows, num_heads, Mh*Mw, embed_dim_per_head] q,k,v = qkv.unbind(0) # QK^T/sqrt(d) #", "embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4, 8, 16, 32), num_classes=num_classes, **kwargs) return model", "x的输入维度是(num_windows窗口数*Batch Size) 在窗口内进行Attention Op \"\"\" # [batch_size*num_windows, Mh*Mw, total_embed_dim] B_, N, C =", "类似于Pooling下采样 if downsample is not None: self.downsample = downsample(dim=dim, norm_layer=norm_layer) else: self.downsample =", "= x x = self.norm1(x) # reshape feature map x = x.view(B, H,", "3, 1, 4) # [batch_size*num_windows, num_heads, Mh*Mw, embed_dim_per_head] q,k,v = qkv.unbind(0) # QK^T/sqrt(d)", "the window. num_heads (int): Number of attention heads. qkv_bias (bool, optional): If True,", "depth=depths[i_layer], num_heads=num_heads[i_layer], window_size=window_size, mlp_ratio=self.mlp_ratio, qkv_bias=qkv_bias, drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])], norm_layer=norm_layer, downsample=PatchMerging if", "Hierarchical Vision Transformer using Shifted Windows` - https://arxiv.org/pdf/2103.14030 Code/weights from https://github.com/microsoft/Swin-Transformer \"\"\" import", "在窗口内进行Attention Op \"\"\" # [batch_size*num_windows, Mh*Mw, total_embed_dim] B_, N, C = x.shape #", "> 0: shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2)) else: shifted_x = x", "= 0 pad_r = (self.window_size - W % self.window_size) % self.window_size pad_b =", "Mh*Mw, embed_dim_per_head] # transpose: -> [batch_size*num_windows, Mh*Mw, num_heads, embed_dim_per_head] # reshape: -> [batch_size*num_windows,", "32), num_classes=num_classes, **kwargs) return model def swin_base_patch4_window7_224_in22k(num_classes: int = 21841, **kwargs): # trained", "self.downsample is not None: x = self.downsample(x, H, W) H, W = (H", "coords_flatten[:, None, :] # [2, Mh*Mw, Mh*Mw] relative_coords = relative_coords.permute(1, 2, 0).contiguous() #", "list) else drop_path, norm_layer=norm_layer) for i in range(depth)]) # Patch Merging Layer 类似于Pooling下采样", "1) * (2 * window_size[1] - 1), num_heads)) # [2*Mh-1 * 2*Mw-1, nH]", "def forward(self,x,mask=None): \"\"\" Args: x: input features with shape of (num_windows*B, Mh*Mw, C)", "drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm): \"\"\" Args参数定义: dim (int): Number of input channels. num_heads (int):", "Multi-head Self-Atten来让信息在相邻窗口进行传递。 A PyTorch impl of : `Swin Transformer: Hierarchical Vision Transformer using", "of (num_windows, Wh*Ww, Wh*Ww) or None x的输入维度是(num_windows窗口数*Batch Size) 在窗口内进行Attention Op \"\"\" # [batch_size*num_windows,", "optional): Activation layer. Default: nn.GELU norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm \"\"\"", "# permute: -> [3, batch_size*num_windows, num_heads, Mh*Mw, embed_dim_per_head] qkv = self.qkv(x).reshape(B_, N, 3,", "i_layer), depth=depths[i_layer], num_heads=num_heads[i_layer], window_size=window_size, mlp_ratio=self.mlp_ratio, qkv_bias=qkv_bias, drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])], norm_layer=norm_layer, downsample=PatchMerging", "model = SwinTransformer(in_chans=3, patch_size=4, window_size=7, embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4, 8, 16,", "* self.window_size) # [nW, Mh*Mw] # 掩码生成 attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) #", "将分割的Windows进行还原 attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C) # [nW*B, Mh, Mw, C] shifted_x", "x, H, W \"\"\"一个基本的SwinTransformerBlock的构成Model\"\"\" class SwinTransformerBlock(nn.Module): \"\"\" Swin Transformer Block包括: Feature Map Input", "= dim self.depth = depth self.window_size = window_size self.use_checkpoint = use_checkpoint # pre-trained", "dims=(1, 2)) else: x = shifted_x # 移除Pad数据 if pad_r > 0 or", "Mh*Mw, Mh*Mw] # # mask.unsqueeze: [1, nW, 1, Mh*Mw, Mh*Mw] attn = attn.view(B_", "H, W = (H + 1) // 2, (W + 1) // 2", "self.create_mask(x,H,W) for blk in self.blocks: blk.H, blk.W = H, W # self.H =", "num_classes=num_classes, **kwargs) return model def swin_base_patch4_window12_384_in22k(num_classes: int = 21841, **kwargs): # trained ImageNet-22K", "Default: False. \"\"\" super(BasicLayer, self).__init__() self.dim = dim self.depth = depth self.window_size =", "= x.shape # qkv(): -> [batch_size*num_windows, Mh*Mw, 3 * total_embed_dim] # reshape: ->", "2, 6, 2), num_heads=(3, 6, 12, 24), num_classes=num_classes, **kwargs) return model \"\"\"Swin-S depths->2,2,18,2", "= self.norm(x) # [B, L, C] x = self.avgpool(x.transpose(1, 2)) # [B, C,", "memory. Default: False. \"\"\" super(BasicLayer, self).__init__() self.dim = dim self.depth = depth self.window_size", "attn_drop=0., drop_path=0., norm_layer=nn.LayerNorm, downsample=None, use_checkpoint=False): \"\"\" Args: dim (int): Number of input channels.", "F import torch.utils.checkpoint as checkpoint import numpy as np from typing import Optional", "= self.attn(x_windows, mask=attn_mask) # [nW*B, Mh*Mw, C] # 将分割的Windows进行还原 attn_windows = attn_windows.view(-1, self.window_size,", "return model def swin_large_patch4_window12_384_in22k(num_classes: int = 21841, **kwargs): # trained ImageNet-22K # https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window12_384_22k.pth", "1] - [2, 1, Mh*Mw] relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None,", "https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window12_384_22k.pth model = SwinTransformer(in_chans=3, patch_size=4, window_size=12, embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4, 8,", "x = self.norm(x) # [B, L, C] x = self.avgpool(x.transpose(1, 2)) # [B,", "model def swin_base_patch4_window12_384(num_classes: int = 1000, **kwargs): # trained ImageNet-1K # https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window12_384.pth model", "+ 1) // 2, (W + 1) // 2 # DownSample之后,H,W应该减半 return x,", "optional): Dropout ratio of output. Default: 0.0 \"\"\" # Mh: Windows Size Height", "One Stage SwinTransformer Layer包括: \"\"\" def __init__(self, dim, depth, num_heads, window_size, mlp_ratio=4., qkv_bias=True,", "Windows` - https://arxiv.org/pdf/2103.14030 Code/weights from https://github.com/microsoft/Swin-Transformer \"\"\" import torch import torch.nn as nn", "= (self.window_size - W % self.window_size) % self.window_size pad_b = (self.window_size - H", "# reshape: -> [batch_size*num_windows, Mh*Mw, total_embed_dim] x = (attn @ v).transpose(1, 2).reshape(B_, N,", "Transformer using Shifted Windows` - https://arxiv.org/pdf/2103.14030 Code/weights from https://github.com/microsoft/Swin-Transformer \"\"\" import torch import", "B: # relative_position_bias_table.view: [Mh*Mw*Mh*Mw,nH] -> [Mh*Mw,Mh*Mw,nH] relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view( self.window_size[0] * self.window_size[1], self.window_size[0]", "attn = self.attn_drop(attn) # @: multiply -> [batch_size*num_windows, num_heads, Mh*Mw, embed_dim_per_head] # transpose:", "-> LayerNorm-> MLP --------> |--------------------------------------||----------------------| \"\"\" def __init__(self, dim, num_heads, window_size=7, shift_size=0, mlp_ratio=4.,", "norm_layer(dim) # MLP Layer mlp_hidden_dim = int(dim * mlp_ratio) self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim,", "W/8 x 2C(Stage2) -> H/16 x W/16 x 4C(stage3) ... self.num_features = int(embed_dim", "SW-MSA. mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. qkv_bias (bool,", "for layer in self.layers: x,H,W = layer(x,H,W) x = self.norm(x) # [B, L,", "= num_heads self.window_size = window_size self.shift_size = shift_size self.mlp_ratio = mlp_ratio # shift_size必须小于windows_size", "-> SW-MSA/W-MSA -> LayerNorm-> MLP --------> |--------------------------------------||----------------------| \"\"\" def __init__(self, dim, num_heads, window_size=7,", "patch 其实来自于上一层别的窗口里的 patch,这也就是作者说的能起到 cross-window connection,就是窗口和窗口之间可以交互了 上述过程配合之后的Patch Merging,合并到Transformer最后几层的时候,每一个patch本身的感受 野就已经很大了。 \"\"\" def __init__(self, dim, window_size,", "*= 2 * self.window_size[1] - 1 relative_position_index = relative_coords.sum(-1) # [Mh*Mw, Mh*Mw] #", "% 2 == 0) else self.shift_size, #当i为偶,就是W-MSA,i为奇,就是SW-MSA,与论文一致, 保证窗口之间通信 mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, drop=drop, attn_drop=attn_drop, drop_path=drop_path[i]", "or pad_b > 0: # 把前面pad的数据移除掉 x = x[:, :H, :W, :].contiguous() x", "= 21841, **kwargs): # trained ImageNet-22K # https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window12_384_22k.pth model = SwinTransformer(in_chans=3, patch_size=4, window_size=12,", "int = 1000, **kwargs): # trained ImageNet-1K # https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window12_384.pth model = SwinTransformer(in_chans=3, patch_size=4,", "relative_coords.permute(1, 2, 0).contiguous() # [Mh*Mw, Mh*Mw, 2] relative_coords[:, :, 0] += self.window_size[0] -", "mask is not None: nW = mask.shape[0] # SW-MSA 需要做attention Mask # mask:", "len(depths) self.embed_dim = embed_dim self.patch_norm = patch_norm # 输出特征矩阵的Channels (C) # H/4 x", "norm_layer=norm_layer) for i in range(depth)]) # Patch Merging Layer 类似于Pooling下采样 if downsample is", "H, W, C) # 对feature map进行pad,pad到windows size的整数倍 pad_l = 0 pad_t = 0", "把前面pad的数据移除掉 x = x[:, :H, :W, :].contiguous() x = x.view(B,H*W,C) # FFN #", "drop_path_rate=0.1, norm_layer=nn.LayerNorm, patch_norm=True, use_checkpoint=False, **kwargs): super().__init__() self.num_classes = num_classes self.num_layers = len(depths) self.embed_dim", "- W % self.window_size) % self.window_size pad_b = (self.window_size - H % self.window_size)", "(nn.Module, optional): Normalization layer. Default: nn.LayerNorm downsample (nn.Module | None, optional): Downsample layer", "embed_dim self.patch_norm = patch_norm # 输出特征矩阵的Channels (C) # H/4 x W/4 x 48", "torch.zeros((1, Hp, Wp, 1), device=x.device) # [1, Hp, Wp, 1] # 准备进行区域生成,方便生成Mask h_slices", "drop=drop) def forward(self, x, attn_mask): # feature map的Height & Width H, W =", "mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) # [nW, 1, Mh*Mw] - [nW, Mh*Mw, 1] # [nW,", "4x4x3) self.patch_embed = PatchEmbed( patch_size=patch_size, in_c=in_chans, embed_dim=embed_dim, norm_layer=norm_layer if self.patch_norm else None) self.pos_drop", "to save memory. Default: False. \"\"\" super(BasicLayer, self).__init__() self.dim = dim self.depth =", "downsample is not None: self.downsample = downsample(dim=dim, norm_layer=norm_layer) else: self.downsample = None def", "embed_dim=96, depths=(2, 2, 6, 2), num_heads=(3, 6, 12, 24), window_size=7, mlp_ratio=4., qkv_bias=True, drop_rate=0.,", "0] += self.window_size[0] - 1 # shift to start from 0 relative_coords[:, :,", "ImageNet-1K # https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth model = SwinTransformer(in_chans=3, patch_size=4, window_size=7, embed_dim=96, depths=(2, 2, 6, 2),", "blk in self.blocks: blk.H, blk.W = H, W # self.H = H, self.W", "self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim,", "mask_windows.view(-1, self.window_size * self.window_size) # [nW, Mh*Mw] # 掩码生成 attn_mask = mask_windows.unsqueeze(1) -", "DropPath, PatchEmbed from BasicModule import Mlp from BasicModule import window_partition, window_reverse \"\"\"SwinT window_size", "drop_path_rate, sum(depths))] # stochastic depth decay rule # bulid layers self.layers = nn.ModuleList()", "None, :] # [2, Mh*Mw, Mh*Mw] relative_coords = relative_coords.permute(1, 2, 0).contiguous() # [Mh*Mw,", "for w in w_slices: img_mask[:, h, w, :] = cnt cnt += 1", "多尺度分层Multi-Stage for layer in self.layers: x,H,W = layer(x,H,W) x = self.norm(x) # [B,", "Wp, 1] # 准备进行区域生成,方便生成Mask h_slices = (slice(0, -self.window_size), slice(-self.window_size, -self.shift_size), slice(-self.shift_size, None)) w_slices", "(float, optional): Dropout ratio of attention weight. Default: 0.0 proj_drop (float, optional): Dropout", "(int): Local window size. mlp_ratio (float): Ratio of mlp hidden dim to embedding", "PyTorch impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows` -", "nn.Identity() # LN2 self.norm2 = norm_layer(dim) # MLP Layer mlp_hidden_dim = int(dim *", "* 3, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop)", "= 1000, **kwargs): # trained ImageNet-1K # https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth model = SwinTransformer(in_chans=3, patch_size=4, window_size=7,", "W # self.H = H, self.W = W if not torch.jit.is_scripting() and self.use_checkpoint:", "# attn.view: [batch_size, num_windows, num_heads, Mh*Mw, Mh*Mw] # # mask.unsqueeze: [1, nW, 1,", "feature has wrong size\" # Skip Connect shortcut = x x = self.norm1(x)", "2), num_heads=(3, 6, 12, 24), num_classes=num_classes, **kwargs) return model \"\"\"Swin-B\"\"\" def swin_base_patch4_window7_224(num_classes: int", "Feature Map \"\"\" # 为SW-MSA计算Attention Mask. # 保证Hp和Wp是window_size的整数倍 Hp = int(np.ceil(H / self.window_size))", "窗口划分 # Windows Partition x_windows = window_partition(shifted_x,self.window_size) #[nW*B, Mh, Mw, C] x_windows =", "return attn_mask def forward(self,x,H,W): # [nW, Mh*Mw, Mh*Mw] nW:窗口数 attn_mask = self.create_mask(x,H,W) for", "(e.g patch_size=4: Bs, 56x56, 4x4x3) self.patch_embed = PatchEmbed( patch_size=patch_size, in_c=in_chans, embed_dim=embed_dim, norm_layer=norm_layer if", "DownSample之后,H,W应该减半 return x, H, W \"\"\"一个基本的SwinTransformerBlock的构成Model\"\"\" class SwinTransformerBlock(nn.Module): \"\"\" Swin Transformer Block包括: Feature", "norm_layer=norm_layer) else: self.downsample = None def create_mask(self,x,H,W): \"\"\" SW-MSA后,对于移位后左上角的窗口(也就是移位前最中间的窗口)来说,里面的元素都是互相紧挨着的, 他们之间可以互相两两做自注意力,但是对于剩下几个窗口来说,它们里面的元素是从别的很远的地方搬过来的, 所以他们之间,按道理来说是不应该去做自注意力,也就是说他们之间不应该有什么太大的联系 以14x14个patch为例进行 H:", "* self.window_size Wp = int(np.ceil(W / self.window_size)) * self.window_size # 拥有和feature map一样的通道排列顺序,方便后续window_partition img_mask", "width of the window. num_heads (int): Number of attention heads. qkv_bias (bool, optional):", "dim (int): Number of input channels. num_heads (int): Number of attention heads. window_size", "num_heads, embed_dim_per_head, Mh*Mw] # @: multiply -> [batch_size*num_windows, num_heads, Mh*Mw, Mh*Mw] q =", "[batch_size*num_windows, num_heads, Mh*Mw, Mh*Mw] q = q * self.scale attn = (q @", "Shift size for SW-MSA. mlp_ratio (float): Ratio of mlp hidden dim to embedding", "L, C = x.shape assert L == H * W, \"input feature has", "% self.window_size) % self.window_size pad_b = (self.window_size - H % self.window_size) % self.window_size", "< self.num_layers - 1) else None, use_checkpoint=use_checkpoint) self.layers.append(layers) self.norm = norm_layer(self.num_features) self.avgpool =", "attn_mask def forward(self,x,H,W): # [nW, Mh*Mw, Mh*Mw] nW:窗口数 attn_mask = self.create_mask(x,H,W) for blk", "x, attn_mask) else: x = blk(x, attn_mask) if self.downsample is not None: x", "Mh*Mw, embed_dim_per_head] q,k,v = qkv.unbind(0) # QK^T/sqrt(d) # transpose: -> [batch_size*num_windows, num_heads, embed_dim_per_head,", "for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule # bulid", "A PyTorch impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows`", "H/16 x W/16 x 4C(stage3) ... self.num_features = int(embed_dim * 2 ** (self.num_layers", "* window_size[0] - 1) * (2 * window_size[1] - 1), num_heads)) # [2*Mh-1", "for i_layer in range(self.num_layers): # 注意这里构建的stage和论文图中有些差异 # 这里的stage不包含该stage的patch_merging层,包含的是下个stage的 layers = BasicLayer(dim=int(embed_dim * 2", "16, 32), num_classes=num_classes, **kwargs) return model \"\"\"Swin-Large\"\"\" def swin_large_patch4_window7_224_in22k(num_classes: int = 21841, **kwargs):", "= attn.view(-1, self.num_heads, N, N) attn = self.softmax(attn) else: attn = self.softmax(attn) attn", "= (attn @ v).transpose(1, 2).reshape(B_, N, C) x = self.proj(x) x = self.proj_drop(x)", "Mh*Mw, Mh*Mw] nW:窗口数 attn_mask = self.create_mask(x,H,W) for blk in self.blocks: blk.H, blk.W =", "else nn.Identity() self.apply(self._init_weights) def _init_weights(self, m): if isinstance(m, nn.Linear): nn.init.trunc_normal_(m.weight, std=.02) if isinstance(m,", "Mh*Mw, Mh*Mw] relative_coords = relative_coords.permute(1, 2, 0).contiguous() # [Mh*Mw, Mh*Mw, 2] relative_coords[:, :,", "2, 18, 2), num_heads=(4, 8, 16, 32), num_classes=num_classes, **kwargs) return model \"\"\"Swin-Large\"\"\" def", "stochastic depth decay rule # bulid layers self.layers = nn.ModuleList() for i_layer in", "height and width of the window. num_heads (int): Number of attention heads. qkv_bias", "21841, **kwargs): # trained ImageNet-22K # https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window7_224_22k.pth model = SwinTransformer(in_chans=3, patch_size=4, window_size=7, embed_dim=128,", "drop=0., attn_drop=0., drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm): \"\"\" Args参数定义: dim (int): Number of input channels.", "W = (H + 1) // 2, (W + 1) // 2 #", "# Mw: Windows Size Width # nH: num_heads super(WindowsAttention, self).__init__() self.dim = dim", "self.window_size) % self.window_size x = F.pad(x,(0,0,pad_l,pad_r,pad_t,pad_b)) # Hp, Wp代表pad后的feature map的Height和Width _, Hp, Wp,", "0.0 attn_drop (float, optional): Attention dropout rate. Default: 0.0 drop_path (float | tuple[float],", "relative_position_bias.permute(2, 0, 1).contiguous() # [nH, Mh*Mw, Mh*Mw] # [Bs*nW, nH, Mh*Mw, Mh*Mw] attn", "Size Width # nH: num_heads super(WindowsAttention, self).__init__() self.dim = dim self.window_size = window_size", "Height W: Feature Map Width x: Feature Map \"\"\" # 为SW-MSA计算Attention Mask. #", "= qkv.unbind(0) # QK^T/sqrt(d) # transpose: -> [batch_size*num_windows, num_heads, embed_dim_per_head, Mh*Mw] # @:", "Drop Path dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic", "_ = x.shape # 是W-MSA 还是 SW-MSA ? # cyclic shift if self.shift_size", "torch.flatten(coords, 1) # [2, Mh*Mw] # [2, Mh*Mw, 1] - [2, 1, Mh*Mw]", "Windows_Multi-head Self Attention self.attn = WindowsAttention( dim, window_size=(self.window_size, self.window_size), num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop)", "both of shifted and non-shifted window. VIT中注意力是全局的,复杂度随着图片尺寸的增加指数增加,样当去做视觉里的下游任务,尤其是密集 预测型的任务,或者说遇到非常大尺寸的图片时候,这种全局算自注意力的计算复杂度就非常贵了 SwinTransformer中,采用Windows-based Attention来将计算复杂度与图片尺寸的关系变为线性关系。 General Model: W-MSA", "q = q * self.scale attn = (q @ k.transpose(-2, -1)) # QK^T/sqrt(d)", "optional): Normalization layer. Default: nn.LayerNorm downsample (nn.Module | None, optional): Downsample layer at", "window_size (int): Window size. shift_size (int): Shift size for SW-MSA. mlp_ratio (float): Ratio", "not None: x = self.downsample(x, H, W) H, W = (H + 1)", "not None: nW = mask.shape[0] # SW-MSA 需要做attention Mask # mask: [nW, Mh*Mw,", "Self-Attention (W-MSA)概念。减少计算量。计算复杂度从指数级降到线性级,Multi-head Self-Attention只在每个Windows内部进行。相对于ViT直接对整个Global进行MSA,计算复杂度更低;但是会隔绝不同 窗口之间的信息传递,通过Shifted Windows Multi-head Self-Atten来让信息在相邻窗口进行传递。 A PyTorch impl of : `Swin", "Number of blocks. block数量 num_heads (int): Number of attention heads. window_size (int): Local", "= int(dim * mlp_ratio) self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) def forward(self, x,", "+ self.drop_path(self.mlp(self.norm2(x))) return x class WindowsAttention(nn.Module): \"\"\" Window based multi-head self attention (W-MSA)", "(H + 1) // 2, (W + 1) // 2 # DownSample之后,H,W应该减半 return", "import torch.nn as nn import torch.nn.functional as F import torch.utils.checkpoint as checkpoint import", "\"shift_size must in 0~window_size\" # LN1 self.norm1 = norm_layer(dim) # Windows_Multi-head Self Attention", "self.window_size[1] - 1 relative_position_index = relative_coords.sum(-1) # [Mh*Mw, Mh*Mw] # Register_buffer: 应该就是在内存中定一个常量,同时,模型保存和加载的时候可以写入和读出。 #", "norm_layer(dim) # Windows_Multi-head Self Attention self.attn = WindowsAttention( dim, window_size=(self.window_size, self.window_size), num_heads=num_heads, qkv_bias=qkv_bias,", "patch_size=4, window_size=12, embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4, 8, 16, 32), num_classes=num_classes, **kwargs)", "* mlp_ratio) self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) def forward(self, x, attn_mask): #", "forward(self,x): # x:[B, L, C] x,H,W = self.patch_embed(x) x = self.pos_drop(x) # 多尺度分层Multi-Stage", "attention heads. window_size (int): Window size. shift_size (int): Shift size for SW-MSA. mlp_ratio", "移除Pad数据 if pad_r > 0 or pad_b > 0: # 把前面pad的数据移除掉 x =", "Default: nn.GELU norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm \"\"\" super(SwinTransformerBlock, self).__init__() self.dim", "if downsample is not None: self.downsample = downsample(dim=dim, norm_layer=norm_layer) else: self.downsample = None", ":].contiguous() x = x.view(B,H*W,C) # FFN # 两个Skip Connect x = shortcut +", "trained ImageNet-1K # https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth model = SwinTransformer(in_chans=3, patch_size=4, window_size=7, embed_dim=96, depths=(2, 2, 6,", "wrong size\" # Skip Connect shortcut = x x = self.norm1(x) # reshape", "= torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2)) else: shifted_x = x attn_mask = None", "pad_r = (self.window_size - W % self.window_size) % self.window_size pad_b = (self.window_size -", "[nW, Mh*Mw, Mh*Mw] # attn.view: [batch_size, num_windows, num_heads, Mh*Mw, Mh*Mw] # # mask.unsqueeze:", "(int): Shift size for SW-MSA. mlp_ratio (float): Ratio of mlp hidden dim to", "https://github.com/microsoft/Swin-Transformer \"\"\" import torch import torch.nn as nn import torch.nn.functional as F import", "2, 6, 2), num_heads=(3, 6, 12, 24), window_size=7, mlp_ratio=4., qkv_bias=True, drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1,", "(W-MSA) module with relative position bias. It supports both of shifted and non-shifted", "Mh*Mw] # # mask.unsqueeze: [1, nW, 1, Mh*Mw, Mh*Mw] attn = attn.view(B_ //", "224, 3) # output: (e.g patch_size=4: Bs, 56x56, 4x4x3) self.patch_embed = PatchEmbed( patch_size=patch_size,", "# 定义一个parameter table来存放relative position bias self.relative_position_bias_table = nn.Parameter( torch.zeros((2 * window_size[0] - 1)", "dim. qkv_bias (bool, optional): If True, add a learnable bias to query, key,", "self.attn(x_windows, mask=attn_mask) # [nW*B, Mh*Mw, C] # 将分割的Windows进行还原 attn_windows = attn_windows.view(-1, self.window_size, self.window_size,", "self.window_size, self.window_size, C) # [nW*B, Mh, Mw, C] shifted_x = window_reverse(attn_windows, self.window_size, Hp,", "if isinstance(drop_path, list) else drop_path, norm_layer=norm_layer) for i in range(depth)]) # Patch Merging", "(float, optional): Attention dropout rate. Default: 0.0 drop_path (float, optional): Stochastic depth rate.", "else None) self.pos_drop = nn.Dropout(p=drop_rate) # stochastic depth # Drop Path dpr =", "[Mh*Mw*Mh*Mw,nH] -> [Mh*Mw,Mh*Mw,nH] relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view( self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1)", "def swin_base_patch4_window7_224_in22k(num_classes: int = 21841, **kwargs): # trained ImageNet-22K # https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window7_224_22k.pth model =", "Self-Atten来让信息在相邻窗口进行传递。 A PyTorch impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted", "input: (Bs, 224, 224, 3) # output: (e.g patch_size=4: Bs, 56x56, 4x4x3) self.patch_embed", "Dropout ratio of attention weight. Default: 0.0 proj_drop (float, optional): Dropout ratio of", "\"\"\" def __init__(self, dim, num_heads, window_size=7, shift_size=0, mlp_ratio=4., qkv_bias=True, drop=0., attn_drop=0., drop_path=0., act_layer=nn.GELU,", "self.window_size)) * self.window_size # 拥有和feature map一样的通道排列顺序,方便后续window_partition img_mask = torch.zeros((1, Hp, Wp, 1), device=x.device)", "Layer包括: \"\"\" def __init__(self, dim, depth, num_heads, window_size, mlp_ratio=4., qkv_bias=True, drop=0., attn_drop=0., drop_path=0.,", "self.W = W if not torch.jit.is_scripting() and self.use_checkpoint: x = checkpoint.checkpoint(blk, x, attn_mask)", "# x:[B, L, C] x,H,W = self.patch_embed(x) x = self.pos_drop(x) # 多尺度分层Multi-Stage for", "heads. window_size (int): Window size. shift_size (int): Shift size for SW-MSA. mlp_ratio (float):", "2, 0).contiguous() # [Mh*Mw, Mh*Mw, 2] relative_coords[:, :, 0] += self.window_size[0] - 1", "Windows Size Width # nH: num_heads super(WindowsAttention, self).__init__() self.dim = dim self.window_size =", "C) x = self.proj(x) x = self.proj_drop(x) return x if __name__ == \"__main__\":", "Window 混合区域的窗口分割 mask_windows = window_partition(img_mask, self.window_size) # [nW, Mh, Mw, 1] mask_windows =", "[nW*B, Mh*Mw, C] # 将分割的Windows进行还原 attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C) # [nW*B,", "[Mh*Mw, Mh*Mw, 2] relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to", "\"\"\"Swin-B\"\"\" def swin_base_patch4_window7_224(num_classes: int = 1000, **kwargs): # trained ImageNet-1K # https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window7_224.pth model", "if mask is not None: nW = mask.shape[0] # SW-MSA 需要做attention Mask #", "num_heads, embed_dim_per_head] # reshape: -> [batch_size*num_windows, Mh*Mw, total_embed_dim] x = (attn @ v).transpose(1,", "attn_drop=0., proj_drop=0.): \"\"\" Args: dim (int): Number of input channels. window_size (tuple[int]): The", "x_windows = x_windows.view(-1, self.window_size*self.window_size,C) # [nW*B, Mh*Mw, C] # W-MSA / SW-MSA attn_windows", "non-shifted window. VIT中注意力是全局的,复杂度随着图片尺寸的增加指数增加,样当去做视觉里的下游任务,尤其是密集 预测型的任务,或者说遇到非常大尺寸的图片时候,这种全局算自注意力的计算复杂度就非常贵了 SwinTransformer中,采用Windows-based Attention来将计算复杂度与图片尺寸的关系变为线性关系。 General Model: W-MSA / SW-MSA Shift 操作但如果加上", "# [2, Mh, Mw] coords_flatten = torch.flatten(coords, 1) # [2, Mh*Mw] # [2,", "# [nW, Mh, Mw, 1] mask_windows = mask_windows.view(-1, self.window_size * self.window_size) # [nW,", "* 2*Mw-1, nH] # 相对位置索引获得方法 # get pair-wise relative position index for each", "def swin_base_patch4_window7_224(num_classes: int = 1000, **kwargs): # trained ImageNet-1K # https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window7_224.pth model =", "and self.use_checkpoint: x = checkpoint.checkpoint(blk, x, attn_mask) else: x = blk(x, attn_mask) if", "use_checkpoint=False, **kwargs): super().__init__() self.num_classes = num_classes self.num_layers = len(depths) self.embed_dim = embed_dim self.patch_norm", "1, Mh*Mw] relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # [2,", "= downsample(dim=dim, norm_layer=norm_layer) else: self.downsample = None def create_mask(self,x,H,W): \"\"\" SW-MSA后,对于移位后左上角的窗口(也就是移位前最中间的窗口)来说,里面的元素都是互相紧挨着的, 他们之间可以互相两两做自注意力,但是对于剩下几个窗口来说,它们里面的元素是从别的很远的地方搬过来的, 所以他们之间,按道理来说是不应该去做自注意力,也就是说他们之间不应该有什么太大的联系", "qkv_bias=True, drop=0., attn_drop=0., drop_path=0., norm_layer=nn.LayerNorm, downsample=None, use_checkpoint=False): \"\"\" Args: dim (int): Number of", "model def swin_base_patch4_window7_224_in22k(num_classes: int = 21841, **kwargs): # trained ImageNet-22K # https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window7_224_22k.pth model", "connection,就是窗口和窗口之间可以交互了 上述过程配合之后的Patch Merging,合并到Transformer最后几层的时候,每一个patch本身的感受 野就已经很大了。 \"\"\" def __init__(self, dim, window_size, num_heads, qkv_bias=True, attn_drop=0., proj_drop=0.):", "12, 24), num_classes=num_classes, **kwargs) return model \"\"\"Swin-S depths->2,2,18,2 \"\"\" def swin_small_patch4_window7_224(num_classes: int =", "# scale # 定义一个parameter table来存放relative position bias self.relative_position_bias_table = nn.Parameter( torch.zeros((2 * window_size[0]", "in range(self.num_layers): # 注意这里构建的stage和论文图中有些差异 # 这里的stage不包含该stage的patch_merging层,包含的是下个stage的 layers = BasicLayer(dim=int(embed_dim * 2 ** i_layer),", "= torch.arange(self.window_size[0]) coords_w = torch.arange(self.window_size[1]) coords = torch.stack(torch.meshgrid([coords_h, coords_w], indexing=\"ij\")) # [2, Mh,", "操作但如果加上 shift 的操作,每个 patch 原来只能跟它所在的窗口里的别的 patch 进行 交互,但是 shift 之后,这个 patch就可以跟新的窗口里的别的 patch就进行交互了,而这个新的窗 口里所有的", "forward(self,x,mask=None): \"\"\" Args: x: input features with shape of (num_windows*B, Mh*Mw, C) mask:", "H: Feature Map Height W: Feature Map Width x: Feature Map \"\"\" #", "这样的Backbone有助于再此基础上构建目标检测、实例分割等任务。 2. 使用Windows Multi-Head Self-Attention (W-MSA)概念。减少计算量。计算复杂度从指数级降到线性级,Multi-head Self-Attention只在每个Windows内部进行。相对于ViT直接对整个Global进行MSA,计算复杂度更低;但是会隔绝不同 窗口之间的信息传递,通过Shifted Windows Multi-head Self-Atten来让信息在相邻窗口进行传递。 A PyTorch", "0. else nn.Identity() # LN2 self.norm2 = norm_layer(dim) # MLP Layer mlp_hidden_dim =", "num_classes=num_classes, **kwargs) return model def swin_base_patch4_window12_384(num_classes: int = 1000, **kwargs): # trained ImageNet-1K", "// 2 # DownSample之后,H,W应该减半 return x, H, W \"\"\"一个基本的SwinTransformerBlock的构成Model\"\"\" class SwinTransformerBlock(nn.Module): \"\"\" Swin", "or None x的输入维度是(num_windows窗口数*Batch Size) 在窗口内进行Attention Op \"\"\" # [batch_size*num_windows, Mh*Mw, total_embed_dim] B_, N,", "of input channels. window_size (tuple[int]): The height and width of the window. num_heads", "cyclic shift if self.shift_size > 0: x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2))", "= depth self.window_size = window_size self.use_checkpoint = use_checkpoint # pre-trained self.shift_size = window_size", "Map \"\"\" # 为SW-MSA计算Attention Mask. # 保证Hp和Wp是window_size的整数倍 Hp = int(np.ceil(H / self.window_size)) *", "\"\"\" def __init__(self, dim, depth, num_heads, window_size, mlp_ratio=4., qkv_bias=True, drop=0., attn_drop=0., drop_path=0., norm_layer=nn.LayerNorm,", "https://arxiv.org/pdf/2103.14030 Code/weights from https://github.com/microsoft/Swin-Transformer \"\"\" import torch import torch.nn as nn import torch.nn.functional", "# trained ImageNet-22K # https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window12_384_22k.pth model = SwinTransformer(in_chans=3, patch_size=4, window_size=12, embed_dim=128, depths=(2, 2,", "num_heads self.window_size = window_size self.shift_size = shift_size self.mlp_ratio = mlp_ratio # shift_size必须小于windows_size assert", "x,H,W = self.patch_embed(x) x = self.pos_drop(x) # 多尺度分层Multi-Stage for layer in self.layers: x,H,W", "Mh*Mw] # @: multiply -> [batch_size*num_windows, num_heads, Mh*Mw, Mh*Mw] q = q *", "torch.arange(self.window_size[1]) coords = torch.stack(torch.meshgrid([coords_h, coords_w], indexing=\"ij\")) # [2, Mh, Mw] coords_flatten = torch.flatten(coords,", "self.num_classes = num_classes self.num_layers = len(depths) self.embed_dim = embed_dim self.patch_norm = patch_norm #", "self.shift_size, #当i为偶,就是W-MSA,i为奇,就是SW-MSA,与论文一致, 保证窗口之间通信 mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, drop=drop, attn_drop=attn_drop, drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,", "https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window7_224_22k.pth model = SwinTransformer(in_chans=3, patch_size=4, window_size=7, embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4, 8,", "+= self.window_size[1] - 1 relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1", "h_slices: for w in w_slices: img_mask[:, h, w, :] = cnt cnt +=", "32), num_classes=num_classes, **kwargs) return model def swin_base_patch4_window12_384_in22k(num_classes: int = 21841, **kwargs): # trained", "[batch_size*num_windows, num_heads, Mh*Mw, embed_dim_per_head] # transpose: -> [batch_size*num_windows, Mh*Mw, num_heads, embed_dim_per_head] # reshape:", "drop_path (float, optional): Stochastic depth rate. Default: 0.0 act_layer (nn.Module, optional): Activation layer.", "SwinTransformer(in_chans=3, patch_size=4, window_size=7, embed_dim=192, depths=(2, 2, 18, 2), num_heads=(6, 12, 24, 48), num_classes=num_classes,", "W) H, W = (H + 1) // 2, (W + 1) //", "Default: 0.0 drop_path (float, optional): Stochastic depth rate. Default: 0.0 act_layer (nn.Module, optional):", "(self.window_size - W % self.window_size) % self.window_size pad_b = (self.window_size - H %", "super().__init__() self.num_classes = num_classes self.num_layers = len(depths) self.embed_dim = embed_dim self.patch_norm = patch_norm", "Mh*Mw, 3, num_heads, embed_dim_per_head] # permute: -> [3, batch_size*num_windows, num_heads, Mh*Mw, embed_dim_per_head] qkv", "norm_layer(self.num_features) self.avgpool = nn.AdaptiveAvgPool1d(1) self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else", "Layer中, \"\"\" def __init__(self, patch_size=4, in_chans=3, num_classes=1000, embed_dim=96, depths=(2, 2, 6, 2), num_heads=(3,", "原来只能跟它所在的窗口里的别的 patch 进行 交互,但是 shift 之后,这个 patch就可以跟新的窗口里的别的 patch就进行交互了,而这个新的窗 口里所有的 patch 其实来自于上一层别的窗口里的 patch,这也就是作者说的能起到 cross-window", "return model def swin_base_patch4_window12_384(num_classes: int = 1000, **kwargs): # trained ImageNet-1K # https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window12_384.pth", "torch.arange(self.window_size[0]) coords_w = torch.arange(self.window_size[1]) coords = torch.stack(torch.meshgrid([coords_h, coords_w], indexing=\"ij\")) # [2, Mh, Mw]", "> 0: x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2)) else: x = shifted_x", "Mh*Mw] q = q * self.scale attn = (q @ k.transpose(-2, -1)) #", "None: nW = mask.shape[0] # SW-MSA 需要做attention Mask # mask: [nW, Mh*Mw, Mh*Mw]", "as nn import torch.nn.functional as F import torch.utils.checkpoint as checkpoint import numpy as", "@ v).transpose(1, 2).reshape(B_, N, C) x = self.proj(x) x = self.proj_drop(x) return x", "= window_size # [Mh, Mw] self.num_heads = num_heads head_dim = dim // num_heads", "混合区域的窗口分割 mask_windows = window_partition(img_mask, self.window_size) # [nW, Mh, Mw, 1] mask_windows = mask_windows.view(-1,", "size\" # Skip Connect shortcut = x x = self.norm1(x) # reshape feature", "window_size=window_size, shift_size=0 if (i % 2 == 0) else self.shift_size, #当i为偶,就是W-MSA,i为奇,就是SW-MSA,与论文一致, 保证窗口之间通信 mlp_ratio=mlp_ratio,", "= 21841, **kwargs): # trained ImageNet-22K # https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window7_224_22k.pth model = SwinTransformer(in_chans=3, patch_size=4, window_size=7,", "Feature Map Input -> LayerNorm -> SW-MSA/W-MSA -> LayerNorm-> MLP --------> |--------------------------------------||----------------------| \"\"\"", "qkv_bias (bool, optional): If True, add a learnable bias to query, key, value.", "head_dim = dim // num_heads # 每个head的dim self.scale = head_dim ** -0.5 #", "rate. Default: 0.0 drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0", "(W-MSA)概念。减少计算量。计算复杂度从指数级降到线性级,Multi-head Self-Attention只在每个Windows内部进行。相对于ViT直接对整个Global进行MSA,计算复杂度更低;但是会隔绝不同 窗口之间的信息传递,通过Shifted Windows Multi-head Self-Atten来让信息在相邻窗口进行传递。 A PyTorch impl of : `Swin Transformer:", "self.downsample(x, H, W) H, W = (H + 1) // 2, (W +", "x 2C(Stage2) -> H/16 x W/16 x 4C(stage3) ... self.num_features = int(embed_dim *", "x = blk(x, attn_mask) if self.downsample is not None: x = self.downsample(x, H,", "self.patch_embed = PatchEmbed( patch_size=patch_size, in_c=in_chans, embed_dim=embed_dim, norm_layer=norm_layer if self.patch_norm else None) self.pos_drop =", "with shape of (num_windows*B, Mh*Mw, C) mask: (0/-inf) mask with shape of (num_windows,", "= torch.flatten(coords, 1) # [2, Mh*Mw] # [2, Mh*Mw, 1] - [2, 1,", "SwinTransformer(in_chans=3, patch_size=4, window_size=12, embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4, 8, 16, 32), num_classes=num_classes,", "3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) # [batch_size*num_windows, num_heads, Mh*Mw,", "注意这里构建的stage和论文图中有些差异 # 这里的stage不包含该stage的patch_merging层,包含的是下个stage的 layers = BasicLayer(dim=int(embed_dim * 2 ** i_layer), depth=depths[i_layer], num_heads=num_heads[i_layer], window_size=window_size,", "# @: multiply -> [batch_size*num_windows, num_heads, Mh*Mw, Mh*Mw] q = q * self.scale", "attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C) # [nW*B, Mh, Mw, C] shifted_x =", "drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])], norm_layer=norm_layer, downsample=PatchMerging if (i_layer < self.num_layers - 1)", "`Swin Transformer: Hierarchical Vision Transformer using Shifted Windows` - https://arxiv.org/pdf/2103.14030 Code/weights from https://github.com/microsoft/Swin-Transformer", "None def create_mask(self,x,H,W): \"\"\" SW-MSA后,对于移位后左上角的窗口(也就是移位前最中间的窗口)来说,里面的元素都是互相紧挨着的, 他们之间可以互相两两做自注意力,但是对于剩下几个窗口来说,它们里面的元素是从别的很远的地方搬过来的, 所以他们之间,按道理来说是不应该去做自注意力,也就是说他们之间不应该有什么太大的联系 以14x14个patch为例进行 H: Feature Map Height W:", "channel B, L, C = x.shape assert L == H * W, \"input", "from BasicModule import PatchMerging, DropPath, PatchEmbed from BasicModule import Mlp from BasicModule import", "# pre-trained self.shift_size = window_size // 2 # 构建SwinTransformer Block self.blocks = nn.ModuleList([", "(nn.Module, optional): Activation layer. Default: nn.GELU norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm", "1) # [2, Mh*Mw] # [2, Mh*Mw, 1] - [2, 1, Mh*Mw] relative_coords", "2, 18, 2), num_heads=(4, 8, 16, 32), num_classes=num_classes, **kwargs) return model def swin_base_patch4_window7_224_in22k(num_classes:", "-> [batch_size*num_windows, num_heads, embed_dim_per_head, Mh*Mw] # @: multiply -> [batch_size*num_windows, num_heads, Mh*Mw, Mh*Mw]", "2. 使用Windows Multi-Head Self-Attention (W-MSA)概念。减少计算量。计算复杂度从指数级降到线性级,Multi-head Self-Attention只在每个Windows内部进行。相对于ViT直接对整个Global进行MSA,计算复杂度更低;但是会隔绝不同 窗口之间的信息传递,通过Shifted Windows Multi-head Self-Atten来让信息在相邻窗口进行传递。 A PyTorch impl", "to use checkpointing to save memory. Default: False. \"\"\" super(BasicLayer, self).__init__() self.dim =", "self.softmax(attn) else: attn = self.softmax(attn) attn = self.attn_drop(attn) # @: multiply -> [batch_size*num_windows,", "MLP Layer mlp_hidden_dim = int(dim * mlp_ratio) self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)", "W \"\"\"一个基本的SwinTransformerBlock的构成Model\"\"\" class SwinTransformerBlock(nn.Module): \"\"\" Swin Transformer Block包括: Feature Map Input -> LayerNorm", "N) attn = self.softmax(attn) else: attn = self.softmax(attn) attn = self.attn_drop(attn) # @:", "from 0 relative_coords[:, :, 1] += self.window_size[1] - 1 relative_coords[:, :, 0] *=", "attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0 proj_drop (float, optional):", "1).contiguous() # [nH, Mh*Mw, Mh*Mw] # [Bs*nW, nH, Mh*Mw, Mh*Mw] attn = attn", "** i_layer), depth=depths[i_layer], num_heads=num_heads[i_layer], window_size=window_size, mlp_ratio=self.mlp_ratio, qkv_bias=qkv_bias, drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])], norm_layer=norm_layer,", "patch就进行交互了,而这个新的窗 口里所有的 patch 其实来自于上一层别的窗口里的 patch,这也就是作者说的能起到 cross-window connection,就是窗口和窗口之间可以交互了 上述过程配合之后的Patch Merging,合并到Transformer最后几层的时候,每一个patch本身的感受 野就已经很大了。 \"\"\" def __init__(self,", "这里有个不同之处,就是每个Stage Layer中, \"\"\" def __init__(self, patch_size=4, in_chans=3, num_classes=1000, embed_dim=96, depths=(2, 2, 6, 2),", "# SW-MSA 需要做attention Mask # mask: [nW, Mh*Mw, Mh*Mw] # attn.view: [batch_size, num_windows,", "as checkpoint import numpy as np from typing import Optional from BasicModule import", "1] += self.window_size[1] - 1 relative_coords[:, :, 0] *= 2 * self.window_size[1] -", "[nH, Mh*Mw, Mh*Mw] # [Bs*nW, nH, Mh*Mw, Mh*Mw] attn = attn + relative_position_bias.unsqueeze(0)", "num_classes=num_classes, **kwargs) return model def swin_large_patch4_window12_384_in22k(num_classes: int = 21841, **kwargs): # trained ImageNet-22K", "https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window12_384_22k.pth model = SwinTransformer(in_chans=3, patch_size=4, window_size=12, embed_dim=192, depths=(2, 2, 18, 2), num_heads=(6, 12,", "attn_drop_rate=0., drop_path_rate=0.1, norm_layer=nn.LayerNorm, patch_norm=True, use_checkpoint=False, **kwargs): super().__init__() self.num_classes = num_classes self.num_layers = len(depths)", "self.blocks: blk.H, blk.W = H, W # self.H = H, self.W = W", "mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None x的输入维度是(num_windows窗口数*Batch Size) 在窗口内进行Attention Op", "= SwinTransformer(in_chans=3, patch_size=4, window_size=7, embed_dim=96, depths=(2, 2, 6, 2), num_heads=(3, 6, 12, 24),", "cyclic shift if self.shift_size > 0: shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))", "relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1 relative_position_index = relative_coords.sum(-1) #", "# [nW, Mh*Mw] # 掩码生成 attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) # [nW, 1,", "[x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule #", "range(self.num_layers): # 注意这里构建的stage和论文图中有些差异 # 这里的stage不包含该stage的patch_merging层,包含的是下个stage的 layers = BasicLayer(dim=int(embed_dim * 2 ** i_layer), depth=depths[i_layer],", "+ 1) // 2 # DownSample之后,H,W应该减半 return x, H, W \"\"\"一个基本的SwinTransformerBlock的构成Model\"\"\" class SwinTransformerBlock(nn.Module):", "-> [batch_size*num_windows, Mh*Mw, total_embed_dim] x = (attn @ v).transpose(1, 2).reshape(B_, N, C) x", "https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window7_224_22k.pth model = SwinTransformer(in_chans=3, patch_size=4, window_size=7, embed_dim=192, depths=(2, 2, 18, 2), num_heads=(6, 12,", "- 1)) self.mlp_ratio = mlp_ratio # 将image切分为不重合的Patches # input: (Bs, 224, 224, 3)", "torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2)) else: x = shifted_x # 移除Pad数据 if pad_r", "of output. Default: 0.0 \"\"\" # Mh: Windows Size Height # Mw: Windows", "is not None: x = self.downsample(x, H, W) H, W = (H +", "return model def swin_base_patch4_window12_384_in22k(num_classes: int = 21841, **kwargs): # trained ImageNet-22K # https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window12_384_22k.pth", "is not None: self.downsample = downsample(dim=dim, norm_layer=norm_layer) else: self.downsample = None def create_mask(self,x,H,W):", ":, 0] += self.window_size[0] - 1 # shift to start from 0 relative_coords[:,", "numpy as np from typing import Optional from BasicModule import PatchMerging, DropPath, PatchEmbed", "# 输出特征矩阵的Channels (C) # H/4 x W/4 x 48 -> H/4 x W/4", "None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.LayerNorm): nn.init.constant_(m.bias, 0) nn.init.constant_(m.weight, 1.0) def forward(self,x): #", "2)) else: shifted_x = x attn_mask = None # 窗口划分 # Windows Partition", "return model \"\"\"Swin-S depths->2,2,18,2 \"\"\" def swin_small_patch4_window7_224(num_classes: int = 1000, **kwargs): # trained", "Mw] coords_flatten = torch.flatten(coords, 1) # [2, Mh*Mw] # [2, Mh*Mw, 1] -", "Mh*Mw] attn = attn + relative_position_bias.unsqueeze(0) if mask is not None: nW =", "float(-100.0)).masked_fill(attn_mask == 0, float(0.0)) return attn_mask def forward(self,x,H,W): # [nW, Mh*Mw, Mh*Mw] nW:窗口数", "attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0) attn = attn.view(-1, self.num_heads,", "N, C = x.shape # qkv(): -> [batch_size*num_windows, Mh*Mw, 3 * total_embed_dim] #", "int = 21841, **kwargs): # trained ImageNet-22K # https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window7_224_22k.pth model = SwinTransformer(in_chans=3, patch_size=4,", "# https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window7_224.pth model = SwinTransformer(in_chans=3, patch_size=4, window_size=7, embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4,", "num_heads # 每个head的dim self.scale = head_dim ** -0.5 # scale # 定义一个parameter table来存放relative", "= mask.shape[0] # SW-MSA 需要做attention Mask # mask: [nW, Mh*Mw, Mh*Mw] # attn.view:", "**kwargs): # trained ImageNet-1K # https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth model = SwinTransformer(in_chans=3, patch_size=4, window_size=7, embed_dim=96, depths=(2,", "depth self.window_size = window_size self.use_checkpoint = use_checkpoint # pre-trained self.shift_size = window_size //", "self.norm(x) # [B, L, C] x = self.avgpool(x.transpose(1, 2)) # [B, C, 1]", "[nW, Mh, Mw, 1] mask_windows = mask_windows.view(-1, self.window_size * self.window_size) # [nW, Mh*Mw]", "\"\"\" Args: dim (int): Number of input channels. depth (int): Number of blocks.", "module with relative position bias. It supports both of shifted and non-shifted window.", "# Patch Merging Layer 类似于Pooling下采样 if downsample is not None: self.downsample = downsample(dim=dim,", "[batch_size, num_windows, num_heads, Mh*Mw, Mh*Mw] # # mask.unsqueeze: [1, nW, 1, Mh*Mw, Mh*Mw]", "mlp_ratio=4., qkv_bias=True, drop=0., attn_drop=0., drop_path=0., norm_layer=nn.LayerNorm, downsample=None, use_checkpoint=False): \"\"\" Args: dim (int): Number", "\"\"\"Swin Transformer\"\"\" class SwinTransformer(nn.Module): \"\"\"Swin Transformer结构 这里有个不同之处,就是每个Stage Layer中, \"\"\" def __init__(self, patch_size=4, in_chans=3,", "self.avgpool = nn.AdaptiveAvgPool1d(1) self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()", "https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window7_224.pth model = SwinTransformer(in_chans=3, patch_size=4, window_size=7, embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4, 8,", "0, 1).contiguous() # [nH, Mh*Mw, Mh*Mw] # [Bs*nW, nH, Mh*Mw, Mh*Mw] attn =", "qkv(): -> [batch_size*num_windows, Mh*Mw, 3 * total_embed_dim] # reshape: -> [batch_size*num_windows, Mh*Mw, 3,", "0.0 act_layer (nn.Module, optional): Activation layer. Default: nn.GELU norm_layer (nn.Module, optional): Normalization layer.", "@ k.transpose(-2, -1)) # QK^T/sqrt(d) + B # B: # relative_position_bias_table.view: [Mh*Mw*Mh*Mw,nH] ->", "# [B, H', W', C] # 如果是SW-MSA,需要逆shift过程 # reverse cyclic shift if self.shift_size", "ImageNet-1K # https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window12_384.pth model = SwinTransformer(in_chans=3, patch_size=4, window_size=12, embed_dim=128, depths=(2, 2, 18, 2),", "# feature map的Height & Width H, W = self.H, self.W # Batch, length,", "else: attn = self.softmax(attn) attn = self.attn_drop(attn) # @: multiply -> [batch_size*num_windows, num_heads,", "Hp = int(np.ceil(H / self.window_size)) * self.window_size Wp = int(np.ceil(W / self.window_size)) *", "x = torch.flatten(x, 1) x = self.head(x) # 分类头 return x \"\"\"一个Stage内的基本SwinTransformer模块\"\"\" class", "transpose: -> [batch_size*num_windows, num_heads, embed_dim_per_head, Mh*Mw] # @: multiply -> [batch_size*num_windows, num_heads, Mh*Mw,", "mask.shape[0] # SW-MSA 需要做attention Mask # mask: [nW, Mh*Mw, Mh*Mw] # attn.view: [batch_size,", "dim=dim, num_heads=num_heads, window_size=window_size, shift_size=0 if (i % 2 == 0) else self.shift_size, #当i为偶,就是W-MSA,i为奇,就是SW-MSA,与论文一致,", "head_dim ** -0.5 # scale # 定义一个parameter table来存放relative position bias self.relative_position_bias_table = nn.Parameter(", "self.H, self.W # Batch, length, channel B, L, C = x.shape assert L", "self.window_size[1], -1) relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # [nH, Mh*Mw, Mh*Mw] # [Bs*nW,", "patch 原来只能跟它所在的窗口里的别的 patch 进行 交互,但是 shift 之后,这个 patch就可以跟新的窗口里的别的 patch就进行交互了,而这个新的窗 口里所有的 patch 其实来自于上一层别的窗口里的 patch,这也就是作者说的能起到", "+ self.drop_path(x) x = x + self.drop_path(self.mlp(self.norm2(x))) return x class WindowsAttention(nn.Module): \"\"\" Window", "in_c=in_chans, embed_dim=embed_dim, norm_layer=norm_layer if self.patch_norm else None) self.pos_drop = nn.Dropout(p=drop_rate) # stochastic depth", "= self.avgpool(x.transpose(1, 2)) # [B, C, 1] x = torch.flatten(x, 1) x =", "Dropout rate. Default: 0.0 attn_drop (float, optional): Attention dropout rate. Default: 0.0 drop_path", "Dropout ratio of output. Default: 0.0 \"\"\" # Mh: Windows Size Height #", "coords_w], indexing=\"ij\")) # [2, Mh, Mw] coords_flatten = torch.flatten(coords, 1) # [2, Mh*Mw]", "他们之间可以互相两两做自注意力,但是对于剩下几个窗口来说,它们里面的元素是从别的很远的地方搬过来的, 所以他们之间,按道理来说是不应该去做自注意力,也就是说他们之间不应该有什么太大的联系 以14x14个patch为例进行 H: Feature Map Height W: Feature Map Width x: Feature", "def forward(self,x,H,W): # [nW, Mh*Mw, Mh*Mw] nW:窗口数 attn_mask = self.create_mask(x,H,W) for blk in", "embed_dim=96, depths=(2, 2, 6, 2), num_heads=(3, 6, 12, 24), num_classes=num_classes, **kwargs) return model", "self.avgpool(x.transpose(1, 2)) # [B, C, 1] x = torch.flatten(x, 1) x = self.head(x)", "# LN2 self.norm2 = norm_layer(dim) # MLP Layer mlp_hidden_dim = int(dim * mlp_ratio)", "C] x = self.avgpool(x.transpose(1, 2)) # [B, C, 1] x = torch.flatten(x, 1)", ":H, :W, :].contiguous() x = x.view(B,H*W,C) # FFN # 两个Skip Connect x =", "Windows Partition x_windows = window_partition(shifted_x,self.window_size) #[nW*B, Mh, Mw, C] x_windows = x_windows.view(-1, self.window_size*self.window_size,C)", "> 0 or pad_b > 0: # 把前面pad的数据移除掉 x = x[:, :H, :W,", "= F.pad(x,(0,0,pad_l,pad_r,pad_t,pad_b)) # Hp, Wp代表pad后的feature map的Height和Width _, Hp, Wp, _ = x.shape #", "16, 32), num_classes=num_classes, **kwargs) return model def swin_base_patch4_window12_384(num_classes: int = 1000, **kwargs): #", "= (self.window_size - H % self.window_size) % self.window_size x = F.pad(x,(0,0,pad_l,pad_r,pad_t,pad_b)) # Hp,", "nn.Identity() self.apply(self._init_weights) def _init_weights(self, m): if isinstance(m, nn.Linear): nn.init.trunc_normal_(m.weight, std=.02) if isinstance(m, nn.Linear)", "size. mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. qkv_bias (bool,", "self.head(x) # 分类头 return x \"\"\"一个Stage内的基本SwinTransformer模块\"\"\" class BasicLayer(nn.Module): \"\"\" One Stage SwinTransformer Layer包括:", "layers = BasicLayer(dim=int(embed_dim * 2 ** i_layer), depth=depths[i_layer], num_heads=num_heads[i_layer], window_size=window_size, mlp_ratio=self.mlp_ratio, qkv_bias=qkv_bias, drop=drop_rate,", "mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, drop=drop, attn_drop=attn_drop, drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path, norm_layer=norm_layer) for i", "table来存放relative position bias self.relative_position_bias_table = nn.Parameter( torch.zeros((2 * window_size[0] - 1) * (2", "21841, **kwargs): # trained ImageNet-22K # https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window7_224_22k.pth model = SwinTransformer(in_chans=3, patch_size=4, window_size=7, embed_dim=192,", "in w_slices: img_mask[:, h, w, :] = cnt cnt += 1 # Shift", "has wrong size\" # Skip Connect shortcut = x x = self.norm1(x) #", "attn_windows = self.attn(x_windows, mask=attn_mask) # [nW*B, Mh*Mw, C] # 将分割的Windows进行还原 attn_windows = attn_windows.view(-1,", "drop=0., attn_drop=0., drop_path=0., norm_layer=nn.LayerNorm, downsample=None, use_checkpoint=False): \"\"\" Args: dim (int): Number of input", "mlp_ratio) self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) def forward(self, x, attn_mask): # feature", "C // self.num_heads).permute(2, 0, 3, 1, 4) # [batch_size*num_windows, num_heads, Mh*Mw, embed_dim_per_head] q,k,v", "3) # output: (e.g patch_size=4: Bs, 56x56, 4x4x3) self.patch_embed = PatchEmbed( patch_size=patch_size, in_c=in_chans,", "else nn.Identity() # LN2 self.norm2 = norm_layer(dim) # MLP Layer mlp_hidden_dim = int(dim", "reshape: -> [batch_size*num_windows, Mh*Mw, 3, num_heads, embed_dim_per_head] # permute: -> [3, batch_size*num_windows, num_heads,", "dim * 3, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop =", "!= 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0)) return attn_mask def forward(self,x,H,W): # [nW, Mh*Mw,", "mask=attn_mask) # [nW*B, Mh*Mw, C] # 将分割的Windows进行还原 attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C)", "# [batch_size*num_windows, Mh*Mw, total_embed_dim] B_, N, C = x.shape # qkv(): -> [batch_size*num_windows,", "# author: <NAME> # email: <EMAIL> \"\"\" Swin Transformer 1. 类似CNN的层次化构建方法(Hierarchical Feature Maps),特征图尺寸中有对图像下采样4倍、8倍、以及16倍;", "[1, Hp, Wp, 1] # 准备进行区域生成,方便生成Mask h_slices = (slice(0, -self.window_size), slice(-self.window_size, -self.shift_size), slice(-self.shift_size,", "nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) nn.init.trunc_normal_(self.relative_position_bias_table, std=.02) self.softmax = nn.Softmax(dim=-1) def forward(self,x,mask=None): \"\"\"", "# 区域编码 cnt = 0 for h in h_slices: for w in w_slices:", "swin_base_patch4_window7_224_in22k(num_classes: int = 21841, **kwargs): # trained ImageNet-22K # https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window7_224_22k.pth model = SwinTransformer(in_chans=3,", "nn.ModuleList() for i_layer in range(self.num_layers): # 注意这里构建的stage和论文图中有些差异 # 这里的stage不包含该stage的patch_merging层,包含的是下个stage的 layers = BasicLayer(dim=int(embed_dim *", "Default: nn.LayerNorm \"\"\" super(SwinTransformerBlock, self).__init__() self.dim = dim self.num_heads = num_heads self.window_size =", "depths=(2, 2, 18, 2), num_heads=(6, 12, 24, 48), num_classes=num_classes, **kwargs) return model def", "if pad_r > 0 or pad_b > 0: # 把前面pad的数据移除掉 x = x[:,", "int = 1000, **kwargs): # trained ImageNet-1K # https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_small_patch4_window7_224.pth model = SwinTransformer(in_chans=3, patch_size=4,", "optional): Attention dropout rate. Default: 0.0 drop_path (float | tuple[float], optional): Stochastic depth", "MLP --------> |--------------------------------------||----------------------| \"\"\" def __init__(self, dim, num_heads, window_size=7, shift_size=0, mlp_ratio=4., qkv_bias=True, drop=0.,", "Default: 0.0 \"\"\" # Mh: Windows Size Height # Mw: Windows Size Width", "window_size[0] - 1) * (2 * window_size[1] - 1), num_heads)) # [2*Mh-1 *", "(int): Number of blocks. block数量 num_heads (int): Number of attention heads. window_size (int):", "window_size self.shift_size = shift_size self.mlp_ratio = mlp_ratio # shift_size必须小于windows_size assert 0 <= self.shift_size", "是W-MSA 还是 SW-MSA ? # cyclic shift if self.shift_size > 0: shifted_x =", "shift_size必须小于windows_size assert 0 <= self.shift_size < self.window_size, \"shift_size must in 0~window_size\" # LN1", "if self.shift_size > 0: x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2)) else: x", "x = x.view(B,H*W,C) # FFN # 两个Skip Connect x = shortcut + self.drop_path(x)", "num_heads=(4, 8, 16, 32), num_classes=num_classes, **kwargs) return model def swin_base_patch4_window7_224_in22k(num_classes: int = 21841,", "num_classes=num_classes, **kwargs) return model \"\"\"Swin-B\"\"\" def swin_base_patch4_window7_224(num_classes: int = 1000, **kwargs): # trained", "bulid layers self.layers = nn.ModuleList() for i_layer in range(self.num_layers): # 注意这里构建的stage和论文图中有些差异 # 这里的stage不包含该stage的patch_merging层,包含的是下个stage的", "= nn.ModuleList([ SwinTransformerBlock( dim=dim, num_heads=num_heads, window_size=window_size, shift_size=0 if (i % 2 == 0)", "2), num_heads=(4, 8, 16, 32), num_classes=num_classes, **kwargs) return model \"\"\"Swin-Large\"\"\" def swin_large_patch4_window7_224_in22k(num_classes: int", "trained ImageNet-1K # https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window7_224.pth model = SwinTransformer(in_chans=3, patch_size=4, window_size=7, embed_dim=128, depths=(2, 2, 18,", "torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule # bulid layers self.layers =", "#当i为偶,就是W-MSA,i为奇,就是SW-MSA,与论文一致, 保证窗口之间通信 mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, drop=drop, attn_drop=attn_drop, drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path, norm_layer=norm_layer)", "super(SwinTransformerBlock, self).__init__() self.dim = dim self.num_heads = num_heads self.window_size = window_size self.shift_size =", "Skip Connect shortcut = x x = self.norm1(x) # reshape feature map x", "self.window_size), num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop) self.drop_path = DropPath(drop_path) if drop_path > 0. else", "(int): Number of input channels. window_size (tuple[int]): The height and width of the", "learnable bias to query, key, value. Default: True attn_drop (float, optional): Dropout ratio", "shifted_x = window_reverse(attn_windows, self.window_size, Hp, Wp) # [B, H', W', C] # 如果是SW-MSA,需要逆shift过程", "相对位置索引获得方法 # get pair-wise relative position index for each token inside the window", "num_classes=num_classes, **kwargs) return model \"\"\"Swin-S depths->2,2,18,2 \"\"\" def swin_small_patch4_window7_224(num_classes: int = 1000, **kwargs):", "# [nH, Mh*Mw, Mh*Mw] # [Bs*nW, nH, Mh*Mw, Mh*Mw] attn = attn +", "Self-Attention只在每个Windows内部进行。相对于ViT直接对整个Global进行MSA,计算复杂度更低;但是会隔绝不同 窗口之间的信息传递,通过Shifted Windows Multi-head Self-Atten来让信息在相邻窗口进行传递。 A PyTorch impl of : `Swin Transformer: Hierarchical", "self.pos_drop = nn.Dropout(p=drop_rate) # stochastic depth # Drop Path dpr = [x.item() for", "Attention self.attn = WindowsAttention( dim, window_size=(self.window_size, self.window_size), num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop) self.drop_path =", "= mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) # [nW, 1, Mh*Mw] - [nW, Mh*Mw, 1] #", "# [2, Mh*Mw, 1] - [2, 1, Mh*Mw] relative_coords = coords_flatten[:, :, None]", "num_classes=num_classes, **kwargs) return model \"\"\"Swin Transformer\"\"\" class SwinTransformer(nn.Module): \"\"\"Swin Transformer结构 这里有个不同之处,就是每个Stage Layer中, \"\"\"", "float(0.0)) return attn_mask def forward(self,x,H,W): # [nW, Mh*Mw, Mh*Mw] nW:窗口数 attn_mask = self.create_mask(x,H,W)", "bias self.relative_position_bias_table = nn.Parameter( torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1]", "return x class WindowsAttention(nn.Module): \"\"\" Window based multi-head self attention (W-MSA) module with", "为SW-MSA计算Attention Mask. # 保证Hp和Wp是window_size的整数倍 Hp = int(np.ceil(H / self.window_size)) * self.window_size Wp =", "patch_size=4, in_chans=3, num_classes=1000, embed_dim=96, depths=(2, 2, 6, 2), num_heads=(3, 6, 12, 24), window_size=7,", "# trained ImageNet-1K # https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_small_patch4_window7_224.pth model = SwinTransformer(in_chans=3, patch_size=4, window_size=7, embed_dim=96, depths=(2, 2,", "= blk(x, attn_mask) if self.downsample is not None: x = self.downsample(x, H, W)", "-> [batch_size*num_windows, Mh*Mw, num_heads, embed_dim_per_head] # reshape: -> [batch_size*num_windows, Mh*Mw, total_embed_dim] x =", "self.scale = head_dim ** -0.5 # scale # 定义一个parameter table来存放relative position bias self.relative_position_bias_table", "optional): Dropout rate. Default: 0.0 attn_drop (float, optional): Attention dropout rate. Default: 0.0", "_, Hp, Wp, _ = x.shape # 是W-MSA 还是 SW-MSA ? # cyclic", "self.apply(self._init_weights) def _init_weights(self, m): if isinstance(m, nn.Linear): nn.init.trunc_normal_(m.weight, std=.02) if isinstance(m, nn.Linear) and", "ImageNet-1K # https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window7_224.pth model = SwinTransformer(in_chans=3, patch_size=4, window_size=7, embed_dim=128, depths=(2, 2, 18, 2),", "layers self.layers = nn.ModuleList() for i_layer in range(self.num_layers): # 注意这里构建的stage和论文图中有些差异 # 这里的stage不包含该stage的patch_merging层,包含的是下个stage的 layers", "utf-8 -*- # author: <NAME> # email: <EMAIL> \"\"\" Swin Transformer 1. 类似CNN的层次化构建方法(Hierarchical", "self.scale attn = (q @ k.transpose(-2, -1)) # QK^T/sqrt(d) + B # B:", "# 分类头 return x \"\"\"一个Stage内的基本SwinTransformer模块\"\"\" class BasicLayer(nn.Module): \"\"\" One Stage SwinTransformer Layer包括: \"\"\"", "使用Windows Multi-Head Self-Attention (W-MSA)概念。减少计算量。计算复杂度从指数级降到线性级,Multi-head Self-Attention只在每个Windows内部进行。相对于ViT直接对整个Global进行MSA,计算复杂度更低;但是会隔绝不同 窗口之间的信息传递,通过Shifted Windows Multi-head Self-Atten来让信息在相邻窗口进行传递。 A PyTorch impl of", "depth rate. Default: 0.0 norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm downsample (nn.Module", "= relative_coords.permute(1, 2, 0).contiguous() # [Mh*Mw, Mh*Mw, 2] relative_coords[:, :, 0] += self.window_size[0]", "= [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule", "optional): Normalization layer. Default: nn.LayerNorm \"\"\" super(SwinTransformerBlock, self).__init__() self.dim = dim self.num_heads =", "num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop) self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()", "window. VIT中注意力是全局的,复杂度随着图片尺寸的增加指数增加,样当去做视觉里的下游任务,尤其是密集 预测型的任务,或者说遇到非常大尺寸的图片时候,这种全局算自注意力的计算复杂度就非常贵了 SwinTransformer中,采用Windows-based Attention来将计算复杂度与图片尺寸的关系变为线性关系。 General Model: W-MSA / SW-MSA Shift 操作但如果加上 shift", "(i_layer < self.num_layers - 1) else None, use_checkpoint=use_checkpoint) self.layers.append(layers) self.norm = norm_layer(self.num_features) self.avgpool", "patch_size=4, window_size=7, embed_dim=192, depths=(2, 2, 18, 2), num_heads=(6, 12, 24, 48), num_classes=num_classes, **kwargs)", "= nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) nn.init.trunc_normal_(self.relative_position_bias_table, std=.02) self.softmax = nn.Softmax(dim=-1) def forward(self,x,mask=None):", "= cnt cnt += 1 # Shift Window 混合区域的窗口分割 mask_windows = window_partition(img_mask, self.window_size)", "W/16 x 4C(stage3) ... self.num_features = int(embed_dim * 2 ** (self.num_layers - 1))", "rate. Default: 0.0 attn_drop (float, optional): Attention dropout rate. Default: 0.0 drop_path (float,", "scale # 定义一个parameter table来存放relative position bias self.relative_position_bias_table = nn.Parameter( torch.zeros((2 * window_size[0] -", "depths=(2, 2, 18, 2), num_heads=(4, 8, 16, 32), num_classes=num_classes, **kwargs) return model \"\"\"Swin-Large\"\"\"", "use_checkpoint=use_checkpoint) self.layers.append(layers) self.norm = norm_layer(self.num_features) self.avgpool = nn.AdaptiveAvgPool1d(1) self.head = nn.Linear(self.num_features, num_classes) if", "-self.shift_size), slice(-self.shift_size, None)) w_slices = (slice(0, -self.window_size), slice(-self.window_size, -self.shift_size), slice(-self.shift_size, None)) # 区域编码", "depths->2,2,6,2 \"\"\" def swin_tiny_patch4_window7_224(num_classes: int = 1000, **kwargs): # trained ImageNet-1K # https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth", "nH: num_heads super(WindowsAttention, self).__init__() self.dim = dim self.window_size = window_size # [Mh, Mw]", "FFN # 两个Skip Connect x = shortcut + self.drop_path(x) x = x +", "drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0 norm_layer (nn.Module, optional):", "Code/weights from https://github.com/microsoft/Swin-Transformer \"\"\" import torch import torch.nn as nn import torch.nn.functional as", "口里所有的 patch 其实来自于上一层别的窗口里的 patch,这也就是作者说的能起到 cross-window connection,就是窗口和窗口之间可以交互了 上述过程配合之后的Patch Merging,合并到Transformer最后几层的时候,每一个patch本身的感受 野就已经很大了。 \"\"\" def __init__(self, dim,", "2), num_heads=(3, 6, 12, 24), num_classes=num_classes, **kwargs) return model \"\"\"Swin-S depths->2,2,18,2 \"\"\" def", "Optional from BasicModule import PatchMerging, DropPath, PatchEmbed from BasicModule import Mlp from BasicModule", "= BasicLayer(dim=int(embed_dim * 2 ** i_layer), depth=depths[i_layer], num_heads=num_heads[i_layer], window_size=window_size, mlp_ratio=self.mlp_ratio, qkv_bias=qkv_bias, drop=drop_rate, attn_drop=attn_drop_rate,", "a learnable bias to query, key, value. Default: True attn_drop (float, optional): Dropout", "num_classes self.num_layers = len(depths) self.embed_dim = embed_dim self.patch_norm = patch_norm # 输出特征矩阵的Channels (C)", "for blk in self.blocks: blk.H, blk.W = H, W # self.H = H,", "ratio of attention weight. Default: 0.0 proj_drop (float, optional): Dropout ratio of output.", "(int): Number of input channels. num_heads (int): Number of attention heads. window_size (int):", "SwinTransformer(nn.Module): \"\"\"Swin Transformer结构 这里有个不同之处,就是每个Stage Layer中, \"\"\" def __init__(self, patch_size=4, in_chans=3, num_classes=1000, embed_dim=96, depths=(2,", "h, w, :] = cnt cnt += 1 # Shift Window 混合区域的窗口分割 mask_windows", "# 两个Skip Connect x = shortcut + self.drop_path(x) x = x + self.drop_path(self.mlp(self.norm2(x)))", "= checkpoint.checkpoint(blk, x, attn_mask) else: x = blk(x, attn_mask) if self.downsample is not", "relative_position_index) self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj =", "for SW-MSA. mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. qkv_bias", "of the window. num_heads (int): Number of attention heads. qkv_bias (bool, optional): If", "input channels. window_size (tuple[int]): The height and width of the window. num_heads (int):", "elif isinstance(m, nn.LayerNorm): nn.init.constant_(m.bias, 0) nn.init.constant_(m.weight, 1.0) def forward(self,x): # x:[B, L, C]", "[batch_size*num_windows, Mh*Mw, total_embed_dim] x = (attn @ v).transpose(1, 2).reshape(B_, N, C) x =", "self.softmax = nn.Softmax(dim=-1) def forward(self,x,mask=None): \"\"\" Args: x: input features with shape of", "< self.window_size, \"shift_size must in 0~window_size\" # LN1 self.norm1 = norm_layer(dim) # Windows_Multi-head", "and non-shifted window. VIT中注意力是全局的,复杂度随着图片尺寸的增加指数增加,样当去做视觉里的下游任务,尤其是密集 预测型的任务,或者说遇到非常大尺寸的图片时候,这种全局算自注意力的计算复杂度就非常贵了 SwinTransformer中,采用Windows-based Attention来将计算复杂度与图片尺寸的关系变为线性关系。 General Model: W-MSA / SW-MSA Shift", "0.0 attn_drop (float, optional): Attention dropout rate. Default: 0.0 drop_path (float, optional): Stochastic", "# 注意这里构建的stage和论文图中有些差异 # 这里的stage不包含该stage的patch_merging层,包含的是下个stage的 layers = BasicLayer(dim=int(embed_dim * 2 ** i_layer), depth=depths[i_layer], num_heads=num_heads[i_layer],", "shift_size self.mlp_ratio = mlp_ratio # shift_size必须小于windows_size assert 0 <= self.shift_size < self.window_size, \"shift_size", "(0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None x的输入维度是(num_windows窗口数*Batch Size) 在窗口内进行Attention", "= norm_layer(dim) # Windows_Multi-head Self Attention self.attn = WindowsAttention( dim, window_size=(self.window_size, self.window_size), num_heads=num_heads,", "SwinTransformer(in_chans=3, patch_size=4, window_size=12, embed_dim=192, depths=(2, 2, 18, 2), num_heads=(6, 12, 24, 48), num_classes=num_classes,", "# 相对位置索引获得方法 # get pair-wise relative position index for each token inside the", "2, 18, 2), num_heads=(6, 12, 24, 48), num_classes=num_classes, **kwargs) return model \"\"\"Swin Transformer\"\"\"", "* self.window_size[1], -1) relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # [nH, Mh*Mw, Mh*Mw] #", "x attn_mask = None # 窗口划分 # Windows Partition x_windows = window_partition(shifted_x,self.window_size) #[nW*B,", "2)) else: x = shifted_x # 移除Pad数据 if pad_r > 0 or pad_b", "torch.nn.functional as F import torch.utils.checkpoint as checkpoint import numpy as np from typing", "mlp hidden dim to embedding dim. qkv_bias (bool, optional): If True, add a", "Trained ImageNet-1k depths->2,2,6,2 \"\"\" def swin_tiny_patch4_window7_224(num_classes: int = 1000, **kwargs): # trained ImageNet-1K", "ImageNet-1k depths->2,2,6,2 \"\"\" def swin_tiny_patch4_window7_224(num_classes: int = 1000, **kwargs): # trained ImageNet-1K #", "Multi-Head Self-Attention (W-MSA)概念。减少计算量。计算复杂度从指数级降到线性级,Multi-head Self-Attention只在每个Windows内部进行。相对于ViT直接对整个Global进行MSA,计算复杂度更低;但是会隔绝不同 窗口之间的信息传递,通过Shifted Windows Multi-head Self-Atten来让信息在相邻窗口进行传递。 A PyTorch impl of :", "Merging,合并到Transformer最后几层的时候,每一个patch本身的感受 野就已经很大了。 \"\"\" def __init__(self, dim, window_size, num_heads, qkv_bias=True, attn_drop=0., proj_drop=0.): \"\"\" Args:", "# Skip Connect shortcut = x x = self.norm1(x) # reshape feature map", "% self.window_size) % self.window_size x = F.pad(x,(0,0,pad_l,pad_r,pad_t,pad_b)) # Hp, Wp代表pad后的feature map的Height和Width _, Hp,", "super(BasicLayer, self).__init__() self.dim = dim self.depth = depth self.window_size = window_size self.use_checkpoint =", "两个Skip Connect x = shortcut + self.drop_path(x) x = x + self.drop_path(self.mlp(self.norm2(x))) return", "nn.init.trunc_normal_(m.weight, std=.02) if isinstance(m, nn.Linear) and m.bias is not None: nn.init.constant_(m.bias, 0) elif", "mlp_ratio=4., qkv_bias=True, drop=0., attn_drop=0., drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm): \"\"\" Args参数定义: dim (int): Number of", "# -*- coding: utf-8 -*- # author: <NAME> # email: <EMAIL> \"\"\" Swin", "= nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) nn.init.trunc_normal_(self.relative_position_bias_table, std=.02) self.softmax =", "% self.window_size x = F.pad(x,(0,0,pad_l,pad_r,pad_t,pad_b)) # Hp, Wp代表pad后的feature map的Height和Width _, Hp, Wp, _", "Mh*Mw] # attn.view: [batch_size, num_windows, num_heads, Mh*Mw, Mh*Mw] # # mask.unsqueeze: [1, nW,", "isinstance(m, nn.Linear): nn.init.trunc_normal_(m.weight, std=.02) if isinstance(m, nn.Linear) and m.bias is not None: nn.init.constant_(m.bias,", "SW-MSA/W-MSA -> LayerNorm-> MLP --------> |--------------------------------------||----------------------| \"\"\" def __init__(self, dim, num_heads, window_size=7, shift_size=0,", "Wp, 1), device=x.device) # [1, Hp, Wp, 1] # 准备进行区域生成,方便生成Mask h_slices = (slice(0,", "LayerNorm-> MLP --------> |--------------------------------------||----------------------| \"\"\" def __init__(self, dim, num_heads, window_size=7, shift_size=0, mlp_ratio=4., qkv_bias=True,", "#! /usr/bin/enc python # -*- coding: utf-8 -*- # author: <NAME> # email:", "7 img_size = 224 Trained ImageNet-1k depths->2,2,6,2 \"\"\" def swin_tiny_patch4_window7_224(num_classes: int = 1000,", "coords = torch.stack(torch.meshgrid([coords_h, coords_w], indexing=\"ij\")) # [2, Mh, Mw] coords_flatten = torch.flatten(coords, 1)", "2), num_heads=(4, 8, 16, 32), num_classes=num_classes, **kwargs) return model def swin_base_patch4_window7_224_in22k(num_classes: int =", "dim, num_heads, window_size=7, shift_size=0, mlp_ratio=4., qkv_bias=True, drop=0., attn_drop=0., drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm): \"\"\" Args参数定义:", "mask_windows = window_partition(img_mask, self.window_size) # [nW, Mh, Mw, 1] mask_windows = mask_windows.view(-1, self.window_size", "2, 18, 2), num_heads=(4, 8, 16, 32), num_classes=num_classes, **kwargs) return model def swin_base_patch4_window12_384_in22k(num_classes:", "- 1 relative_position_index = relative_coords.sum(-1) # [Mh*Mw, Mh*Mw] # Register_buffer: 应该就是在内存中定一个常量,同时,模型保存和加载的时候可以写入和读出。 # 不需要学习,但是可以灵活读写", "nn.Parameter( torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads))", "* window_size[1] - 1), num_heads)) # [2*Mh-1 * 2*Mw-1, nH] # 相对位置索引获得方法 #", "** -0.5 # scale # 定义一个parameter table来存放relative position bias self.relative_position_bias_table = nn.Parameter( torch.zeros((2", "[batch_size*num_windows, Mh*Mw, num_heads, embed_dim_per_head] # reshape: -> [batch_size*num_windows, Mh*Mw, total_embed_dim] x = (attn", "LayerNorm -> SW-MSA/W-MSA -> LayerNorm-> MLP --------> |--------------------------------------||----------------------| \"\"\" def __init__(self, dim, num_heads,", "drop_path > 0. else nn.Identity() # LN2 self.norm2 = norm_layer(dim) # MLP Layer", "else: x = shifted_x # 移除Pad数据 if pad_r > 0 or pad_b >", "= x.view(B,H*W,C) # FFN # 两个Skip Connect x = shortcut + self.drop_path(x) x", "18, 2), num_heads=(4, 8, 16, 32), num_classes=num_classes, **kwargs) return model \"\"\"Swin-Large\"\"\" def swin_large_patch4_window7_224_in22k(num_classes:", "# email: <EMAIL> \"\"\" Swin Transformer 1. 类似CNN的层次化构建方法(Hierarchical Feature Maps),特征图尺寸中有对图像下采样4倍、8倍、以及16倍; 这样的Backbone有助于再此基础上构建目标检测、实例分割等任务。 2. 使用Windows", "[Mh*Mw, Mh*Mw] # Register_buffer: 应该就是在内存中定一个常量,同时,模型保存和加载的时候可以写入和读出。 # 不需要学习,但是可以灵活读写 self.register_buffer(\"relative_position_index\", relative_position_index) self.qkv = nn.Linear(dim, dim", "= SwinTransformer(in_chans=3, patch_size=4, window_size=7, embed_dim=96, depths=(2, 2, 18, 2), num_heads=(3, 6, 12, 24),", "self.attn_drop(attn) # @: multiply -> [batch_size*num_windows, num_heads, Mh*Mw, embed_dim_per_head] # transpose: -> [batch_size*num_windows,", "PatchEmbed( patch_size=patch_size, in_c=in_chans, embed_dim=embed_dim, norm_layer=norm_layer if self.patch_norm else None) self.pos_drop = nn.Dropout(p=drop_rate) #", "Default: None use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.", "(int): Window size. shift_size (int): Shift size for SW-MSA. mlp_ratio (float): Ratio of", "- 1 # shift to start from 0 relative_coords[:, :, 1] += self.window_size[1]", "x.shape # qkv(): -> [batch_size*num_windows, Mh*Mw, 3 * total_embed_dim] # reshape: -> [batch_size*num_windows,", "else: self.downsample = None def create_mask(self,x,H,W): \"\"\" SW-MSA后,对于移位后左上角的窗口(也就是移位前最中间的窗口)来说,里面的元素都是互相紧挨着的, 他们之间可以互相两两做自注意力,但是对于剩下几个窗口来说,它们里面的元素是从别的很远的地方搬过来的, 所以他们之间,按道理来说是不应该去做自注意力,也就是说他们之间不应该有什么太大的联系 以14x14个patch为例进行 H: Feature", "# Mh: Windows Size Height # Mw: Windows Size Width # nH: num_heads", "pair-wise relative position index for each token inside the window coords_h = torch.arange(self.window_size[0])", "2*Mw-1, nH] # 相对位置索引获得方法 # get pair-wise relative position index for each token", "# 这里的stage不包含该stage的patch_merging层,包含的是下个stage的 layers = BasicLayer(dim=int(embed_dim * 2 ** i_layer), depth=depths[i_layer], num_heads=num_heads[i_layer], window_size=window_size, mlp_ratio=self.mlp_ratio,", "SwinTransformer中,采用Windows-based Attention来将计算复杂度与图片尺寸的关系变为线性关系。 General Model: W-MSA / SW-MSA Shift 操作但如果加上 shift 的操作,每个 patch 原来只能跟它所在的窗口里的别的", "LN1 self.norm1 = norm_layer(dim) # Windows_Multi-head Self Attention self.attn = WindowsAttention( dim, window_size=(self.window_size,", "qkv.unbind(0) # QK^T/sqrt(d) # transpose: -> [batch_size*num_windows, num_heads, embed_dim_per_head, Mh*Mw] # @: multiply", "= PatchEmbed( patch_size=patch_size, in_c=in_chans, embed_dim=embed_dim, norm_layer=norm_layer if self.patch_norm else None) self.pos_drop = nn.Dropout(p=drop_rate)", "(nn.Module, optional): Normalization layer. Default: nn.LayerNorm \"\"\" super(SwinTransformerBlock, self).__init__() self.dim = dim self.num_heads", "relative_position_bias_table.view: [Mh*Mw*Mh*Mw,nH] -> [Mh*Mw,Mh*Mw,nH] relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view( self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1],", "Mw, C] shifted_x = window_reverse(attn_windows, self.window_size, Hp, Wp) # [B, H', W', C]", "drop_path=0., norm_layer=nn.LayerNorm, downsample=None, use_checkpoint=False): \"\"\" Args: dim (int): Number of input channels. depth", "self.patch_norm else None) self.pos_drop = nn.Dropout(p=drop_rate) # stochastic depth # Drop Path dpr", "cnt cnt += 1 # Shift Window 混合区域的窗口分割 mask_windows = window_partition(img_mask, self.window_size) #", "定义一个parameter table来存放relative position bias self.relative_position_bias_table = nn.Parameter( torch.zeros((2 * window_size[0] - 1) *", "= 224 Trained ImageNet-1k depths->2,2,6,2 \"\"\" def swin_tiny_patch4_window7_224(num_classes: int = 1000, **kwargs): #", "= relative_coords.sum(-1) # [Mh*Mw, Mh*Mw] # Register_buffer: 应该就是在内存中定一个常量,同时,模型保存和加载的时候可以写入和读出。 # 不需要学习,但是可以灵活读写 self.register_buffer(\"relative_position_index\", relative_position_index) self.qkv", "Windows Size Height # Mw: Windows Size Width # nH: num_heads super(WindowsAttention, self).__init__()", "# [batch_size*num_windows, num_heads, Mh*Mw, embed_dim_per_head] q,k,v = qkv.unbind(0) # QK^T/sqrt(d) # transpose: ->", "Mh*Mw] # [2, Mh*Mw, 1] - [2, 1, Mh*Mw] relative_coords = coords_flatten[:, :,", "self.H = H, self.W = W if not torch.jit.is_scripting() and self.use_checkpoint: x =", "dim, window_size, num_heads, qkv_bias=True, attn_drop=0., proj_drop=0.): \"\"\" Args: dim (int): Number of input", "nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) nn.init.trunc_normal_(self.relative_position_bias_table, std=.02) self.softmax = nn.Softmax(dim=-1)", "= DropPath(drop_path) if drop_path > 0. else nn.Identity() # LN2 self.norm2 = norm_layer(dim)", "6, 2), num_heads=(3, 6, 12, 24), window_size=7, mlp_ratio=4., qkv_bias=True, drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1, norm_layer=nn.LayerNorm,", "ImageNet-1K # https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_small_patch4_window7_224.pth model = SwinTransformer(in_chans=3, patch_size=4, window_size=7, embed_dim=96, depths=(2, 2, 18, 2),", "window_size=7, embed_dim=192, depths=(2, 2, 18, 2), num_heads=(6, 12, 24, 48), num_classes=num_classes, **kwargs) return", "self.window_size) # [nW, Mh*Mw] # 掩码生成 attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) # [nW,", "= (q @ k.transpose(-2, -1)) # QK^T/sqrt(d) + B # B: # relative_position_bias_table.view:", "hidden dim to embedding dim. qkv_bias (bool, optional): If True, add a learnable", "= Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) def forward(self, x, attn_mask): # feature map的Height &", "= torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2)) else: x = shifted_x # 移除Pad数据 if", "slice(-self.shift_size, None)) # 区域编码 cnt = 0 for h in h_slices: for w", "cnt = 0 for h in h_slices: for w in w_slices: img_mask[:, h,", "# [B, C, 1] x = torch.flatten(x, 1) x = self.head(x) # 分类头", "If True, add a learnable bias to query, key, value. Default: True attn_drop", "act_layer=nn.GELU, norm_layer=nn.LayerNorm): \"\"\" Args参数定义: dim (int): Number of input channels. num_heads (int): Number", "21841, **kwargs): # trained ImageNet-22K # https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window12_384_22k.pth model = SwinTransformer(in_chans=3, patch_size=4, window_size=12, embed_dim=128,", "# # mask.unsqueeze: [1, nW, 1, Mh*Mw, Mh*Mw] attn = attn.view(B_ // nW,", "relative_position_index = relative_coords.sum(-1) # [Mh*Mw, Mh*Mw] # Register_buffer: 应该就是在内存中定一个常量,同时,模型保存和加载的时候可以写入和读出。 # 不需要学习,但是可以灵活读写 self.register_buffer(\"relative_position_index\", relative_position_index)", "= self.softmax(attn) attn = self.attn_drop(attn) # @: multiply -> [batch_size*num_windows, num_heads, Mh*Mw, embed_dim_per_head]", "self.num_heads).permute(2, 0, 3, 1, 4) # [batch_size*num_windows, num_heads, Mh*Mw, embed_dim_per_head] q,k,v = qkv.unbind(0)", "total_embed_dim] # reshape: -> [batch_size*num_windows, Mh*Mw, 3, num_heads, embed_dim_per_head] # permute: -> [3,", "Args: dim (int): Number of input channels. window_size (tuple[int]): The height and width", "6, 2), num_heads=(3, 6, 12, 24), num_classes=num_classes, **kwargs) return model \"\"\"Swin-S depths->2,2,18,2 \"\"\"", "attn_drop=attn_drop, drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path, norm_layer=norm_layer) for i in range(depth)]) #", "in self.layers: x,H,W = layer(x,H,W) x = self.norm(x) # [B, L, C] x", "python # -*- coding: utf-8 -*- # author: <NAME> # email: <EMAIL> \"\"\"", "mlp_ratio # 将image切分为不重合的Patches # input: (Bs, 224, 224, 3) # output: (e.g patch_size=4:", "window_size=7, shift_size=0, mlp_ratio=4., qkv_bias=True, drop=0., attn_drop=0., drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm): \"\"\" Args参数定义: dim (int):", "[1, nW, 1, Mh*Mw, Mh*Mw] attn = attn.view(B_ // nW, nW, self.num_heads, N,", "18, 2), num_heads=(4, 8, 16, 32), num_classes=num_classes, **kwargs) return model def swin_base_patch4_window12_384_in22k(num_classes: int", "window_size=12, embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4, 8, 16, 32), num_classes=num_classes, **kwargs) return", "attn_mask = None # 窗口划分 # Windows Partition x_windows = window_partition(shifted_x,self.window_size) #[nW*B, Mh,", "2 ** i_layer), depth=depths[i_layer], num_heads=num_heads[i_layer], window_size=window_size, mlp_ratio=self.mlp_ratio, qkv_bias=qkv_bias, drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])],", "shift if self.shift_size > 0: x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2)) else:", "def swin_small_patch4_window7_224(num_classes: int = 1000, **kwargs): # trained ImageNet-1K # https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_small_patch4_window7_224.pth model =", "Merging Layer 类似于Pooling下采样 if downsample is not None: self.downsample = downsample(dim=dim, norm_layer=norm_layer) else:", "h in h_slices: for w in w_slices: img_mask[:, h, w, :] = cnt", "0, float(0.0)) return attn_mask def forward(self,x,H,W): # [nW, Mh*Mw, Mh*Mw] nW:窗口数 attn_mask =", "w in w_slices: img_mask[:, h, w, :] = cnt cnt += 1 #", "\"\"\"Swin-Large\"\"\" def swin_large_patch4_window7_224_in22k(num_classes: int = 21841, **kwargs): # trained ImageNet-22K # https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window7_224_22k.pth model", "torch.jit.is_scripting() and self.use_checkpoint: x = checkpoint.checkpoint(blk, x, attn_mask) else: x = blk(x, attn_mask)", "# https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_small_patch4_window7_224.pth model = SwinTransformer(in_chans=3, patch_size=4, window_size=7, embed_dim=96, depths=(2, 2, 18, 2), num_heads=(3,", "class WindowsAttention(nn.Module): \"\"\" Window based multi-head self attention (W-MSA) module with relative position", "window_size[1] - 1), num_heads)) # [2*Mh-1 * 2*Mw-1, nH] # 相对位置索引获得方法 # get", "nn.Softmax(dim=-1) def forward(self,x,mask=None): \"\"\" Args: x: input features with shape of (num_windows*B, Mh*Mw,", "BasicLayer(dim=int(embed_dim * 2 ** i_layer), depth=depths[i_layer], num_heads=num_heads[i_layer], window_size=window_size, mlp_ratio=self.mlp_ratio, qkv_bias=qkv_bias, drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer", "x = self.pos_drop(x) # 多尺度分层Multi-Stage for layer in self.layers: x,H,W = layer(x,H,W) x", "qkv_bias=True, drop=0., attn_drop=0., drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm): \"\"\" Args参数定义: dim (int): Number of input", "/ self.window_size)) * self.window_size Wp = int(np.ceil(W / self.window_size)) * self.window_size # 拥有和feature", "= attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0)) return attn_mask def forward(self,x,H,W): #", "W = self.H, self.W # Batch, length, channel B, L, C = x.shape", "L, C] x,H,W = self.patch_embed(x) x = self.pos_drop(x) # 多尺度分层Multi-Stage for layer in", "attn = (q @ k.transpose(-2, -1)) # QK^T/sqrt(d) + B # B: #", "# [nW, Mh*Mw, Mh*Mw] attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))", "[2, Mh, Mw] coords_flatten = torch.flatten(coords, 1) # [2, Mh*Mw] # [2, Mh*Mw,", "# Shift Window 混合区域的窗口分割 mask_windows = window_partition(img_mask, self.window_size) # [nW, Mh, Mw, 1]", "num_heads, Mh*Mw, embed_dim_per_head] qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, C // self.num_heads).permute(2, 0,", "4) # [batch_size*num_windows, num_heads, Mh*Mw, embed_dim_per_head] q,k,v = qkv.unbind(0) # QK^T/sqrt(d) # transpose:", "trained ImageNet-22K # https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window7_224_22k.pth model = SwinTransformer(in_chans=3, patch_size=4, window_size=7, embed_dim=128, depths=(2, 2, 18,", "+= 1 # Shift Window 混合区域的窗口分割 mask_windows = window_partition(img_mask, self.window_size) # [nW, Mh,", "**kwargs) return model \"\"\"Swin-Large\"\"\" def swin_large_patch4_window7_224_in22k(num_classes: int = 21841, **kwargs): # trained ImageNet-22K", "W/4 x 48 -> H/4 x W/4 x C(Stage1) -> H/8 x W/8", "nn.init.constant_(m.bias, 0) elif isinstance(m, nn.LayerNorm): nn.init.constant_(m.bias, 0) nn.init.constant_(m.weight, 1.0) def forward(self,x): # x:[B,", "def __init__(self, dim, depth, num_heads, window_size, mlp_ratio=4., qkv_bias=True, drop=0., attn_drop=0., drop_path=0., norm_layer=nn.LayerNorm, downsample=None,", "1), num_heads)) # [2*Mh-1 * 2*Mw-1, nH] # 相对位置索引获得方法 # get pair-wise relative", "Hp, Wp, _ = x.shape # 是W-MSA 还是 SW-MSA ? # cyclic shift", "Register_buffer: 应该就是在内存中定一个常量,同时,模型保存和加载的时候可以写入和读出。 # 不需要学习,但是可以灵活读写 self.register_buffer(\"relative_position_index\", relative_position_index) self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)", "**kwargs) return model def swin_base_patch4_window12_384(num_classes: int = 1000, **kwargs): # trained ImageNet-1K #", "add a learnable bias to query, key, value. Default: True attn_drop (float, optional):", "Mh*Mw, total_embed_dim] B_, N, C = x.shape # qkv(): -> [batch_size*num_windows, Mh*Mw, 3", "0, 3, 1, 4) # [batch_size*num_windows, num_heads, Mh*Mw, embed_dim_per_head] q,k,v = qkv.unbind(0) #", "还是 SW-MSA ? # cyclic shift if self.shift_size > 0: shifted_x = torch.roll(x,", "drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1, norm_layer=nn.LayerNorm, patch_norm=True, use_checkpoint=False, **kwargs): super().__init__() self.num_classes = num_classes self.num_layers =", "map的Height & Width H, W = self.H, self.W # Batch, length, channel B,", "+ relative_position_bias.unsqueeze(0) if mask is not None: nW = mask.shape[0] # SW-MSA 需要做attention", "# @: multiply -> [batch_size*num_windows, num_heads, Mh*Mw, embed_dim_per_head] # transpose: -> [batch_size*num_windows, Mh*Mw,", "x.view(B, H, W, C) # 对feature map进行pad,pad到windows size的整数倍 pad_l = 0 pad_t =", "Normalization layer. Default: nn.LayerNorm \"\"\" super(SwinTransformerBlock, self).__init__() self.dim = dim self.num_heads = num_heads", "self.num_layers - 1) else None, use_checkpoint=use_checkpoint) self.layers.append(layers) self.norm = norm_layer(self.num_features) self.avgpool = nn.AdaptiveAvgPool1d(1)", "2 * self.window_size[1] - 1 relative_position_index = relative_coords.sum(-1) # [Mh*Mw, Mh*Mw] # Register_buffer:", "\"\"\" super(BasicLayer, self).__init__() self.dim = dim self.depth = depth self.window_size = window_size self.use_checkpoint", "std=.02) if isinstance(m, nn.Linear) and m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m,", "-*- coding: utf-8 -*- # author: <NAME> # email: <EMAIL> \"\"\" Swin Transformer", "window_size=12, embed_dim=192, depths=(2, 2, 18, 2), num_heads=(6, 12, 24, 48), num_classes=num_classes, **kwargs) return", "分类头 return x \"\"\"一个Stage内的基本SwinTransformer模块\"\"\" class BasicLayer(nn.Module): \"\"\" One Stage SwinTransformer Layer包括: \"\"\" def", "- 1 relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1 relative_position_index =", "self.dim = dim self.depth = depth self.window_size = window_size self.use_checkpoint = use_checkpoint #", "https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_small_patch4_window7_224.pth model = SwinTransformer(in_chans=3, patch_size=4, window_size=7, embed_dim=96, depths=(2, 2, 18, 2), num_heads=(3, 6,", "= shift_size self.mlp_ratio = mlp_ratio # shift_size必须小于windows_size assert 0 <= self.shift_size < self.window_size,", "\"\"\" def __init__(self, patch_size=4, in_chans=3, num_classes=1000, embed_dim=96, depths=(2, 2, 6, 2), num_heads=(3, 6,", "Shifted Windows` - https://arxiv.org/pdf/2103.14030 Code/weights from https://github.com/microsoft/Swin-Transformer \"\"\" import torch import torch.nn as", "pad_t = 0 pad_r = (self.window_size - W % self.window_size) % self.window_size pad_b", "reshape feature map x = x.view(B, H, W, C) # 对feature map进行pad,pad到windows size的整数倍", "dims=(1, 2)) else: shifted_x = x attn_mask = None # 窗口划分 # Windows", "torch.flatten(x, 1) x = self.head(x) # 分类头 return x \"\"\"一个Stage内的基本SwinTransformer模块\"\"\" class BasicLayer(nn.Module): \"\"\"", "not None: self.downsample = downsample(dim=dim, norm_layer=norm_layer) else: self.downsample = None def create_mask(self,x,H,W): \"\"\"", "0 pad_t = 0 pad_r = (self.window_size - W % self.window_size) % self.window_size", "embed_dim=embed_dim, norm_layer=norm_layer if self.patch_norm else None) self.pos_drop = nn.Dropout(p=drop_rate) # stochastic depth #", "self.window_size*self.window_size,C) # [nW*B, Mh*Mw, C] # W-MSA / SW-MSA attn_windows = self.attn(x_windows, mask=attn_mask)", "self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0) attn = attn.view(-1, self.num_heads, N, N) attn =", "depth decay rule # bulid layers self.layers = nn.ModuleList() for i_layer in range(self.num_layers):", "Size) 在窗口内进行Attention Op \"\"\" # [batch_size*num_windows, Mh*Mw, total_embed_dim] B_, N, C = x.shape", "total_embed_dim] x = (attn @ v).transpose(1, 2).reshape(B_, N, C) x = self.proj(x) x", "It supports both of shifted and non-shifted window. VIT中注意力是全局的,复杂度随着图片尺寸的增加指数增加,样当去做视觉里的下游任务,尤其是密集 预测型的任务,或者说遇到非常大尺寸的图片时候,这种全局算自注意力的计算复杂度就非常贵了 SwinTransformer中,采用Windows-based Attention来将计算复杂度与图片尺寸的关系变为线性关系。 General", "qkv_bias=True, attn_drop=0., proj_drop=0.): \"\"\" Args: dim (int): Number of input channels. window_size (tuple[int]):", "relative_coords = relative_coords.permute(1, 2, 0).contiguous() # [Mh*Mw, Mh*Mw, 2] relative_coords[:, :, 0] +=", "x C(Stage1) -> H/8 x W/8 x 2C(Stage2) -> H/16 x W/16 x", "= dim // num_heads # 每个head的dim self.scale = head_dim ** -0.5 # scale", "48 -> H/4 x W/4 x C(Stage1) -> H/8 x W/8 x 2C(Stage2)", "self.W # Batch, length, channel B, L, C = x.shape assert L ==", "0 <= self.shift_size < self.window_size, \"shift_size must in 0~window_size\" # LN1 self.norm1 =", "num_classes=1000, embed_dim=96, depths=(2, 2, 6, 2), num_heads=(3, 6, 12, 24), window_size=7, mlp_ratio=4., qkv_bias=True,", "-> H/8 x W/8 x 2C(Stage2) -> H/16 x W/16 x 4C(stage3) ...", "// nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0) attn = attn.view(-1, self.num_heads, N,", "8, 16, 32), num_classes=num_classes, **kwargs) return model def swin_base_patch4_window7_224_in22k(num_classes: int = 21841, **kwargs):", "W/4 x C(Stage1) -> H/8 x W/8 x 2C(Stage2) -> H/16 x W/16", "nn.Linear) and m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.LayerNorm): nn.init.constant_(m.bias, 0)", "depths->2,2,18,2 \"\"\" def swin_small_patch4_window7_224(num_classes: int = 1000, **kwargs): # trained ImageNet-1K # https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_small_patch4_window7_224.pth", "C] # 将分割的Windows进行还原 attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C) # [nW*B, Mh, Mw,", "size. shift_size (int): Shift size for SW-MSA. mlp_ratio (float): Ratio of mlp hidden", "2), num_heads=(6, 12, 24, 48), num_classes=num_classes, **kwargs) return model \"\"\"Swin Transformer\"\"\" class SwinTransformer(nn.Module):", "not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.LayerNorm): nn.init.constant_(m.bias, 0) nn.init.constant_(m.weight, 1.0) def forward(self,x):", "window_reverse \"\"\"SwinT window_size = 7 img_size = 224 Trained ImageNet-1k depths->2,2,6,2 \"\"\" def", "num_heads)) # [2*Mh-1 * 2*Mw-1, nH] # 相对位置索引获得方法 # get pair-wise relative position", "from BasicModule import window_partition, window_reverse \"\"\"SwinT window_size = 7 img_size = 224 Trained", "dim // num_heads # 每个head的dim self.scale = head_dim ** -0.5 # scale #", "import Optional from BasicModule import PatchMerging, DropPath, PatchEmbed from BasicModule import Mlp from", "bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) nn.init.trunc_normal_(self.relative_position_bias_table, std=.02)", "email: <EMAIL> \"\"\" Swin Transformer 1. 类似CNN的层次化构建方法(Hierarchical Feature Maps),特征图尺寸中有对图像下采样4倍、8倍、以及16倍; 这样的Backbone有助于再此基础上构建目标检测、实例分割等任务。 2. 使用Windows Multi-Head", "(bool): Whether to use checkpointing to save memory. Default: False. \"\"\" super(BasicLayer, self).__init__()", "mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None x的输入维度是(num_windows窗口数*Batch Size)", "multiply -> [batch_size*num_windows, num_heads, Mh*Mw, Mh*Mw] q = q * self.scale attn =", "the end of the layer. Default: None use_checkpoint (bool): Whether to use checkpointing", "if not torch.jit.is_scripting() and self.use_checkpoint: x = checkpoint.checkpoint(blk, x, attn_mask) else: x =", "if self.shift_size > 0: shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2)) else: shifted_x", "[batch_size*num_windows, Mh*Mw, 3, num_heads, embed_dim_per_head] # permute: -> [3, batch_size*num_windows, num_heads, Mh*Mw, embed_dim_per_head]", "self.depth = depth self.window_size = window_size self.use_checkpoint = use_checkpoint # pre-trained self.shift_size =", "Whether to use checkpointing to save memory. Default: False. \"\"\" super(BasicLayer, self).__init__() self.dim", "H/8 x W/8 x 2C(Stage2) -> H/16 x W/16 x 4C(stage3) ... self.num_features", "of the layer. Default: None use_checkpoint (bool): Whether to use checkpointing to save", "-> [Mh*Mw,Mh*Mw,nH] relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view( self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1) relative_position_bias", "k.transpose(-2, -1)) # QK^T/sqrt(d) + B # B: # relative_position_bias_table.view: [Mh*Mw*Mh*Mw,nH] -> [Mh*Mw,Mh*Mw,nH]", "(2 * window_size[1] - 1), num_heads)) # [2*Mh-1 * 2*Mw-1, nH] # 相对位置索引获得方法", "# https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window12_384.pth model = SwinTransformer(in_chans=3, patch_size=4, window_size=12, embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4,", "bias to query, key, value. Default: True drop (float, optional): Dropout rate. Default:", "blk.W = H, W # self.H = H, self.W = W if not", "forward(self, x, attn_mask): # feature map的Height & Width H, W = self.H, self.W", "(slice(0, -self.window_size), slice(-self.window_size, -self.shift_size), slice(-self.shift_size, None)) w_slices = (slice(0, -self.window_size), slice(-self.window_size, -self.shift_size), slice(-self.shift_size,", "torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads)) #", "== 0, float(0.0)) return attn_mask def forward(self,x,H,W): # [nW, Mh*Mw, Mh*Mw] nW:窗口数 attn_mask", "reverse cyclic shift if self.shift_size > 0: x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1,", "None: x = self.downsample(x, H, W) H, W = (H + 1) //", "patch_size=4, window_size=7, embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4, 8, 16, 32), num_classes=num_classes, **kwargs)", "__init__(self, patch_size=4, in_chans=3, num_classes=1000, embed_dim=96, depths=(2, 2, 6, 2), num_heads=(3, 6, 12, 24),", "** (self.num_layers - 1)) self.mlp_ratio = mlp_ratio # 将image切分为不重合的Patches # input: (Bs, 224,", "**kwargs): super().__init__() self.num_classes = num_classes self.num_layers = len(depths) self.embed_dim = embed_dim self.patch_norm =", "layer. Default: None use_checkpoint (bool): Whether to use checkpointing to save memory. Default:", "[2, Mh*Mw, 1] - [2, 1, Mh*Mw] relative_coords = coords_flatten[:, :, None] -", "pad_l = 0 pad_t = 0 pad_r = (self.window_size - W % self.window_size)", "# QK^T/sqrt(d) + B # B: # relative_position_bias_table.view: [Mh*Mw*Mh*Mw,nH] -> [Mh*Mw,Mh*Mw,nH] relative_position_bias =", "= dim self.window_size = window_size # [Mh, Mw] self.num_heads = num_heads head_dim =", "8, 16, 32), num_classes=num_classes, **kwargs) return model def swin_base_patch4_window12_384(num_classes: int = 1000, **kwargs):", "# stochastic depth decay rule # bulid layers self.layers = nn.ModuleList() for i_layer", "map x = x.view(B, H, W, C) # 对feature map进行pad,pad到windows size的整数倍 pad_l =", "import torch.nn.functional as F import torch.utils.checkpoint as checkpoint import numpy as np from", "slice(-self.shift_size, None)) w_slices = (slice(0, -self.window_size), slice(-self.window_size, -self.shift_size), slice(-self.shift_size, None)) # 区域编码 cnt", "不需要学习,但是可以灵活读写 self.register_buffer(\"relative_position_index\", relative_position_index) self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop)", "norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm \"\"\" super(SwinTransformerBlock, self).__init__() self.dim = dim", "window. num_heads (int): Number of attention heads. qkv_bias (bool, optional): If True, add", "1000, **kwargs): # trained ImageNet-1K # https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_small_patch4_window7_224.pth model = SwinTransformer(in_chans=3, patch_size=4, window_size=7, embed_dim=96,", "range(depth)]) # Patch Merging Layer 类似于Pooling下采样 if downsample is not None: self.downsample =", "/ self.window_size)) * self.window_size # 拥有和feature map一样的通道排列顺序,方便后续window_partition img_mask = torch.zeros((1, Hp, Wp, 1),", "patch_size=4: Bs, 56x56, 4x4x3) self.patch_embed = PatchEmbed( patch_size=patch_size, in_c=in_chans, embed_dim=embed_dim, norm_layer=norm_layer if self.patch_norm", "= layer(x,H,W) x = self.norm(x) # [B, L, C] x = self.avgpool(x.transpose(1, 2))", "Stochastic depth rate. Default: 0.0 norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm downsample", "swin_large_patch4_window7_224_in22k(num_classes: int = 21841, **kwargs): # trained ImageNet-22K # https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window7_224_22k.pth model = SwinTransformer(in_chans=3,", "np from typing import Optional from BasicModule import PatchMerging, DropPath, PatchEmbed from BasicModule", "* W, \"input feature has wrong size\" # Skip Connect shortcut = x", "H, W) H, W = (H + 1) // 2, (W + 1)", "0.0 norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm downsample (nn.Module | None, optional):", "Downsample layer at the end of the layer. Default: None use_checkpoint (bool): Whether", "downsample=None, use_checkpoint=False): \"\"\" Args: dim (int): Number of input channels. depth (int): Number", "0 pad_r = (self.window_size - W % self.window_size) % self.window_size pad_b = (self.window_size", "# https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window7_224_22k.pth model = SwinTransformer(in_chans=3, patch_size=4, window_size=7, embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4,", "act_layer=act_layer, drop=drop) def forward(self, x, attn_mask): # feature map的Height & Width H, W", "mlp_ratio=self.mlp_ratio, qkv_bias=qkv_bias, drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])], norm_layer=norm_layer, downsample=PatchMerging if (i_layer < self.num_layers", "embedding dim. qkv_bias (bool, optional): If True, add a learnable bias to query,", "1)) self.mlp_ratio = mlp_ratio # 将image切分为不重合的Patches # input: (Bs, 224, 224, 3) #", "# Hp, Wp代表pad后的feature map的Height和Width _, Hp, Wp, _ = x.shape # 是W-MSA 还是", "/ SW-MSA attn_windows = self.attn(x_windows, mask=attn_mask) # [nW*B, Mh*Mw, C] # 将分割的Windows进行还原 attn_windows", "= SwinTransformer(in_chans=3, patch_size=4, window_size=12, embed_dim=192, depths=(2, 2, 18, 2), num_heads=(6, 12, 24, 48),", "window_size=7, embed_dim=96, depths=(2, 2, 6, 2), num_heads=(3, 6, 12, 24), num_classes=num_classes, **kwargs) return", "0] *= 2 * self.window_size[1] - 1 relative_position_index = relative_coords.sum(-1) # [Mh*Mw, Mh*Mw]", "x W/8 x 2C(Stage2) -> H/16 x W/16 x 4C(stage3) ... self.num_features =", "24), num_classes=num_classes, **kwargs) return model \"\"\"Swin-S depths->2,2,18,2 \"\"\" def swin_small_patch4_window7_224(num_classes: int = 1000,", "H, W # self.H = H, self.W = W if not torch.jit.is_scripting() and", "# MLP Layer mlp_hidden_dim = int(dim * mlp_ratio) self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer,", "nn.init.constant_(m.bias, 0) nn.init.constant_(m.weight, 1.0) def forward(self,x): # x:[B, L, C] x,H,W = self.patch_embed(x)", "shift 的操作,每个 patch 原来只能跟它所在的窗口里的别的 patch 进行 交互,但是 shift 之后,这个 patch就可以跟新的窗口里的别的 patch就进行交互了,而这个新的窗 口里所有的 patch", "def __init__(self, dim, window_size, num_heads, qkv_bias=True, attn_drop=0., proj_drop=0.): \"\"\" Args: dim (int): Number", "Mw] self.num_heads = num_heads head_dim = dim // num_heads # 每个head的dim self.scale =", "x 4C(stage3) ... self.num_features = int(embed_dim * 2 ** (self.num_layers - 1)) self.mlp_ratio", "Transformer: Hierarchical Vision Transformer using Shifted Windows` - https://arxiv.org/pdf/2103.14030 Code/weights from https://github.com/microsoft/Swin-Transformer \"\"\"", "32), num_classes=num_classes, **kwargs) return model def swin_base_patch4_window12_384(num_classes: int = 1000, **kwargs): # trained", "embed_dim=96, depths=(2, 2, 18, 2), num_heads=(3, 6, 12, 24), num_classes=num_classes, **kwargs) return model", "swin_base_patch4_window7_224(num_classes: int = 1000, **kwargs): # trained ImageNet-1K # https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window7_224.pth model = SwinTransformer(in_chans=3,", "If True, add a learnable bias to query, key, value. Default: True drop", "6, 12, 24), window_size=7, mlp_ratio=4., qkv_bias=True, drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1, norm_layer=nn.LayerNorm, patch_norm=True, use_checkpoint=False, **kwargs):", "coords_w = torch.arange(self.window_size[1]) coords = torch.stack(torch.meshgrid([coords_h, coords_w], indexing=\"ij\")) # [2, Mh, Mw] coords_flatten", "shape of (num_windows*B, Mh*Mw, C) mask: (0/-inf) mask with shape of (num_windows, Wh*Ww,", "(i % 2 == 0) else self.shift_size, #当i为偶,就是W-MSA,i为奇,就是SW-MSA,与论文一致, 保证窗口之间通信 mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, drop=drop, attn_drop=attn_drop,", "= self.H, self.W # Batch, length, channel B, L, C = x.shape assert", "* self.window_size[1] - 1 relative_position_index = relative_coords.sum(-1) # [Mh*Mw, Mh*Mw] # Register_buffer: 应该就是在内存中定一个常量,同时,模型保存和加载的时候可以写入和读出。", "Bs, 56x56, 4x4x3) self.patch_embed = PatchEmbed( patch_size=patch_size, in_c=in_chans, embed_dim=embed_dim, norm_layer=norm_layer if self.patch_norm else", "SW-MSA后,对于移位后左上角的窗口(也就是移位前最中间的窗口)来说,里面的元素都是互相紧挨着的, 他们之间可以互相两两做自注意力,但是对于剩下几个窗口来说,它们里面的元素是从别的很远的地方搬过来的, 所以他们之间,按道理来说是不应该去做自注意力,也就是说他们之间不应该有什么太大的联系 以14x14个patch为例进行 H: Feature Map Height W: Feature Map Width x:", "self.shift_size > 0: shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2)) else: shifted_x =", "Mh*Mw, 2] relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start", "Transformer Block包括: Feature Map Input -> LayerNorm -> SW-MSA/W-MSA -> LayerNorm-> MLP -------->", "nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0) attn = attn.view(-1, self.num_heads, N, N) attn", "* 2 ** i_layer), depth=depths[i_layer], num_heads=num_heads[i_layer], window_size=window_size, mlp_ratio=self.mlp_ratio, qkv_bias=qkv_bias, drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer +", "SW-MSA attn_windows = self.attn(x_windows, mask=attn_mask) # [nW*B, Mh*Mw, C] # 将分割的Windows进行还原 attn_windows =", "# trained ImageNet-22K # https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window12_384_22k.pth model = SwinTransformer(in_chans=3, patch_size=4, window_size=12, embed_dim=192, depths=(2, 2,", "# reshape feature map x = x.view(B, H, W, C) # 对feature map进行pad,pad到windows", "self.shift_size = shift_size self.mlp_ratio = mlp_ratio # shift_size必须小于windows_size assert 0 <= self.shift_size <", "-*- # author: <NAME> # email: <EMAIL> \"\"\" Swin Transformer 1. 类似CNN的层次化构建方法(Hierarchical Feature", "shifts=(self.shift_size, self.shift_size), dims=(1, 2)) else: x = shifted_x # 移除Pad数据 if pad_r >", "if self.downsample is not None: x = self.downsample(x, H, W) H, W =", "Transformer 1. 类似CNN的层次化构建方法(Hierarchical Feature Maps),特征图尺寸中有对图像下采样4倍、8倍、以及16倍; 这样的Backbone有助于再此基础上构建目标检测、实例分割等任务。 2. 使用Windows Multi-Head Self-Attention (W-MSA)概念。减少计算量。计算复杂度从指数级降到线性级,Multi-head Self-Attention只在每个Windows内部进行。相对于ViT直接对整个Global进行MSA,计算复杂度更低;但是会隔绝不同 窗口之间的信息传递,通过Shifted", "H * W, \"input feature has wrong size\" # Skip Connect shortcut =", "Mh: Windows Size Height # Mw: Windows Size Width # nH: num_heads super(WindowsAttention,", "B, L, C = x.shape assert L == H * W, \"input feature", "from BasicModule import Mlp from BasicModule import window_partition, window_reverse \"\"\"SwinT window_size = 7", "with shape of (num_windows, Wh*Ww, Wh*Ww) or None x的输入维度是(num_windows窗口数*Batch Size) 在窗口内进行Attention Op \"\"\"", "= num_classes self.num_layers = len(depths) self.embed_dim = embed_dim self.patch_norm = patch_norm # 输出特征矩阵的Channels", "self.num_heads = num_heads self.window_size = window_size self.shift_size = shift_size self.mlp_ratio = mlp_ratio #", "48), num_classes=num_classes, **kwargs) return model def swin_large_patch4_window12_384_in22k(num_classes: int = 21841, **kwargs): # trained", "(slice(0, -self.window_size), slice(-self.window_size, -self.shift_size), slice(-self.shift_size, None)) # 区域编码 cnt = 0 for h", "use_checkpoint=False): \"\"\" Args: dim (int): Number of input channels. depth (int): Number of", "Mask. # 保证Hp和Wp是window_size的整数倍 Hp = int(np.ceil(H / self.window_size)) * self.window_size Wp = int(np.ceil(W", "-self.window_size), slice(-self.window_size, -self.shift_size), slice(-self.shift_size, None)) # 区域编码 cnt = 0 for h in", "coding: utf-8 -*- # author: <NAME> # email: <EMAIL> \"\"\" Swin Transformer 1.", "24), num_classes=num_classes, **kwargs) return model \"\"\"Swin-B\"\"\" def swin_base_patch4_window7_224(num_classes: int = 1000, **kwargs): #", "... self.num_features = int(embed_dim * 2 ** (self.num_layers - 1)) self.mlp_ratio = mlp_ratio", "dim, window_size=(self.window_size, self.window_size), num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop) self.drop_path = DropPath(drop_path) if drop_path >", "x,H,W = layer(x,H,W) x = self.norm(x) # [B, L, C] x = self.avgpool(x.transpose(1,", "x[:, :H, :W, :].contiguous() x = x.view(B,H*W,C) # FFN # 两个Skip Connect x", "end of the layer. Default: None use_checkpoint (bool): Whether to use checkpointing to", "# reverse cyclic shift if self.shift_size > 0: x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size),", "shifts=(-self.shift_size, -self.shift_size), dims=(1, 2)) else: shifted_x = x attn_mask = None # 窗口划分", "Feature Maps),特征图尺寸中有对图像下采样4倍、8倍、以及16倍; 这样的Backbone有助于再此基础上构建目标检测、实例分割等任务。 2. 使用Windows Multi-Head Self-Attention (W-MSA)概念。减少计算量。计算复杂度从指数级降到线性级,Multi-head Self-Attention只在每个Windows内部进行。相对于ViT直接对整个Global进行MSA,计算复杂度更低;但是会隔绝不同 窗口之间的信息传递,通过Shifted Windows Multi-head Self-Atten来让信息在相邻窗口进行传递。", "model \"\"\"Swin-Large\"\"\" def swin_large_patch4_window7_224_in22k(num_classes: int = 21841, **kwargs): # trained ImageNet-22K # https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window7_224_22k.pth", "def forward(self, x, attn_mask): # feature map的Height & Width H, W = self.H,", "ImageNet-22K # https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window12_384_22k.pth model = SwinTransformer(in_chans=3, patch_size=4, window_size=12, embed_dim=128, depths=(2, 2, 18, 2),", "import torch.utils.checkpoint as checkpoint import numpy as np from typing import Optional from", "based multi-head self attention (W-MSA) module with relative position bias. It supports both", "int = 21841, **kwargs): # trained ImageNet-22K # https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window12_384_22k.pth model = SwinTransformer(in_chans=3, patch_size=4,", "of shifted and non-shifted window. VIT中注意力是全局的,复杂度随着图片尺寸的增加指数增加,样当去做视觉里的下游任务,尤其是密集 预测型的任务,或者说遇到非常大尺寸的图片时候,这种全局算自注意力的计算复杂度就非常贵了 SwinTransformer中,采用Windows-based Attention来将计算复杂度与图片尺寸的关系变为线性关系。 General Model: W-MSA /", "24, 48), num_classes=num_classes, **kwargs) return model def swin_large_patch4_window12_384_in22k(num_classes: int = 21841, **kwargs): #", "def swin_base_patch4_window12_384(num_classes: int = 1000, **kwargs): # trained ImageNet-1K # https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window12_384.pth model =", "model = SwinTransformer(in_chans=3, patch_size=4, window_size=7, embed_dim=192, depths=(2, 2, 18, 2), num_heads=(6, 12, 24,", "Mw: Windows Size Width # nH: num_heads super(WindowsAttention, self).__init__() self.dim = dim self.window_size", "-1)) # QK^T/sqrt(d) + B # B: # relative_position_bias_table.view: [Mh*Mw*Mh*Mw,nH] -> [Mh*Mw,Mh*Mw,nH] relative_position_bias", "from typing import Optional from BasicModule import PatchMerging, DropPath, PatchEmbed from BasicModule import", "slice(-self.window_size, -self.shift_size), slice(-self.shift_size, None)) w_slices = (slice(0, -self.window_size), slice(-self.window_size, -self.shift_size), slice(-self.shift_size, None)) #", "= nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() self.apply(self._init_weights) def _init_weights(self, m):", "= self.relative_position_bias_table[self.relative_position_index.view(-1)].view( self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1) relative_position_bias = relative_position_bias.permute(2, 0,", "key, value. Default: True attn_drop (float, optional): Dropout ratio of attention weight. Default:", "C] x_windows = x_windows.view(-1, self.window_size*self.window_size,C) # [nW*B, Mh*Mw, C] # W-MSA / SW-MSA", "\"\"\" Swin Transformer 1. 类似CNN的层次化构建方法(Hierarchical Feature Maps),特征图尺寸中有对图像下采样4倍、8倍、以及16倍; 这样的Backbone有助于再此基础上构建目标检测、实例分割等任务。 2. 使用Windows Multi-Head Self-Attention (W-MSA)概念。减少计算量。计算复杂度从指数级降到线性级,Multi-head", "num_heads, embed_dim_per_head] # permute: -> [3, batch_size*num_windows, num_heads, Mh*Mw, embed_dim_per_head] qkv = self.qkv(x).reshape(B_,", "Mh*Mw] nW:窗口数 attn_mask = self.create_mask(x,H,W) for blk in self.blocks: blk.H, blk.W = H,", "Feature Map Width x: Feature Map \"\"\" # 为SW-MSA计算Attention Mask. # 保证Hp和Wp是window_size的整数倍 Hp", "(tuple[int]): The height and width of the window. num_heads (int): Number of attention", "= (H + 1) // 2, (W + 1) // 2 # DownSample之后,H,W应该减半", ": `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows` - https://arxiv.org/pdf/2103.14030 Code/weights from", "> 0: # 把前面pad的数据移除掉 x = x[:, :H, :W, :].contiguous() x = x.view(B,H*W,C)", "(float, optional): Dropout rate. Default: 0.0 attn_drop (float, optional): Attention dropout rate. Default:", "# [Bs*nW, nH, Mh*Mw, Mh*Mw] attn = attn + relative_position_bias.unsqueeze(0) if mask is", "num_heads, window_size, mlp_ratio=4., qkv_bias=True, drop=0., attn_drop=0., drop_path=0., norm_layer=nn.LayerNorm, downsample=None, use_checkpoint=False): \"\"\" Args: dim", "Normalization layer. Default: nn.LayerNorm downsample (nn.Module | None, optional): Downsample layer at the", "nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() self.apply(self._init_weights) def _init_weights(self, m): if", "nn.Linear): nn.init.trunc_normal_(m.weight, std=.02) if isinstance(m, nn.Linear) and m.bias is not None: nn.init.constant_(m.bias, 0)", "attn_mask) if self.downsample is not None: x = self.downsample(x, H, W) H, W", "# FFN # 两个Skip Connect x = shortcut + self.drop_path(x) x = x", "224, 224, 3) # output: (e.g patch_size=4: Bs, 56x56, 4x4x3) self.patch_embed = PatchEmbed(", "* (2 * window_size[1] - 1), num_heads)) # [2*Mh-1 * 2*Mw-1, nH] #", "= None # 窗口划分 # Windows Partition x_windows = window_partition(shifted_x,self.window_size) #[nW*B, Mh, Mw,", "= 1000, **kwargs): # trained ImageNet-1K # https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window7_224.pth model = SwinTransformer(in_chans=3, patch_size=4, window_size=7,", "1000, **kwargs): # trained ImageNet-1K # https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window12_384.pth model = SwinTransformer(in_chans=3, patch_size=4, window_size=12, embed_dim=128,", "window_size = 7 img_size = 224 Trained ImageNet-1k depths->2,2,6,2 \"\"\" def swin_tiny_patch4_window7_224(num_classes: int", "__init__(self, dim, num_heads, window_size=7, shift_size=0, mlp_ratio=4., qkv_bias=True, drop=0., attn_drop=0., drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm): \"\"\"", "SW-MSA Shift 操作但如果加上 shift 的操作,每个 patch 原来只能跟它所在的窗口里的别的 patch 进行 交互,但是 shift 之后,这个 patch就可以跟新的窗口里的别的", "# 不需要学习,但是可以灵活读写 self.register_buffer(\"relative_position_index\", relative_position_index) self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) self.attn_drop =", "relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # [nH, Mh*Mw, Mh*Mw] # [Bs*nW, nH, Mh*Mw,", "Default: 0.0 proj_drop (float, optional): Dropout ratio of output. Default: 0.0 \"\"\" #", "self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) def forward(self, x, attn_mask): # feature map的Height", "# QK^T/sqrt(d) # transpose: -> [batch_size*num_windows, num_heads, embed_dim_per_head, Mh*Mw] # @: multiply ->", "nW, 1, Mh*Mw, Mh*Mw] attn = attn.view(B_ // nW, nW, self.num_heads, N, N)", "QK^T/sqrt(d) + B # B: # relative_position_bias_table.view: [Mh*Mw*Mh*Mw,nH] -> [Mh*Mw,Mh*Mw,nH] relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view(", "i_layer in range(self.num_layers): # 注意这里构建的stage和论文图中有些差异 # 这里的stage不包含该stage的patch_merging层,包含的是下个stage的 layers = BasicLayer(dim=int(embed_dim * 2 **", "dim self.num_heads = num_heads self.window_size = window_size self.shift_size = shift_size self.mlp_ratio = mlp_ratio", "= relative_position_bias.permute(2, 0, 1).contiguous() # [nH, Mh*Mw, Mh*Mw] # [Bs*nW, nH, Mh*Mw, Mh*Mw]", "\"\"\" # 为SW-MSA计算Attention Mask. # 保证Hp和Wp是window_size的整数倍 Hp = int(np.ceil(H / self.window_size)) * self.window_size", "(self.num_layers - 1)) self.mlp_ratio = mlp_ratio # 将image切分为不重合的Patches # input: (Bs, 224, 224,", "= H, W # self.H = H, self.W = W if not torch.jit.is_scripting()", "(float | tuple[float], optional): Stochastic depth rate. Default: 0.0 norm_layer (nn.Module, optional): Normalization", "// 2 # 构建SwinTransformer Block self.blocks = nn.ModuleList([ SwinTransformerBlock( dim=dim, num_heads=num_heads, window_size=window_size, shift_size=0", "model def swin_large_patch4_window12_384_in22k(num_classes: int = 21841, **kwargs): # trained ImageNet-22K # https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window12_384_22k.pth model", "window_reverse(attn_windows, self.window_size, Hp, Wp) # [B, H', W', C] # 如果是SW-MSA,需要逆shift过程 # reverse", "embed_dim_per_head] q,k,v = qkv.unbind(0) # QK^T/sqrt(d) # transpose: -> [batch_size*num_windows, num_heads, embed_dim_per_head, Mh*Mw]", "downsample (nn.Module | None, optional): Downsample layer at the end of the layer.", "blk(x, attn_mask) if self.downsample is not None: x = self.downsample(x, H, W) H,", "channels. num_heads (int): Number of attention heads. window_size (int): Window size. shift_size (int):", "num_heads, Mh*Mw, Mh*Mw] q = q * self.scale attn = (q @ k.transpose(-2,", "# [B, L, C] x = self.avgpool(x.transpose(1, 2)) # [B, C, 1] x", "Attention来将计算复杂度与图片尺寸的关系变为线性关系。 General Model: W-MSA / SW-MSA Shift 操作但如果加上 shift 的操作,每个 patch 原来只能跟它所在的窗口里的别的 patch", "3, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) nn.init.trunc_normal_(self.relative_position_bias_table,", "self.shift_size < self.window_size, \"shift_size must in 0~window_size\" # LN1 self.norm1 = norm_layer(dim) #", "key, value. Default: True drop (float, optional): Dropout rate. Default: 0.0 attn_drop (float,", "mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. qkv_bias (bool, optional):", "= x_windows.view(-1, self.window_size*self.window_size,C) # [nW*B, Mh*Mw, C] # W-MSA / SW-MSA attn_windows =", "-> [3, batch_size*num_windows, num_heads, Mh*Mw, embed_dim_per_head] qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, C", "2 ** (self.num_layers - 1)) self.mlp_ratio = mlp_ratio # 将image切分为不重合的Patches # input: (Bs,", "swin_small_patch4_window7_224(num_classes: int = 1000, **kwargs): # trained ImageNet-1K # https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_small_patch4_window7_224.pth model = SwinTransformer(in_chans=3,", "attn = attn.view(-1, self.num_heads, N, N) attn = self.softmax(attn) else: attn = self.softmax(attn)", "C, 1] x = torch.flatten(x, 1) x = self.head(x) # 分类头 return x", "cnt += 1 # Shift Window 混合区域的窗口分割 mask_windows = window_partition(img_mask, self.window_size) # [nW,", "nn.AdaptiveAvgPool1d(1) self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() self.apply(self._init_weights) def", "None x的输入维度是(num_windows窗口数*Batch Size) 在窗口内进行Attention Op \"\"\" # [batch_size*num_windows, Mh*Mw, total_embed_dim] B_, N, C", "(int): Number of input channels. depth (int): Number of blocks. block数量 num_heads (int):", "rate. Default: 0.0 drop_path (float, optional): Stochastic depth rate. Default: 0.0 act_layer (nn.Module,", "(bool, optional): If True, add a learnable bias to query, key, value. Default:", ":] = cnt cnt += 1 # Shift Window 混合区域的窗口分割 mask_windows = window_partition(img_mask,", "at the end of the layer. Default: None use_checkpoint (bool): Whether to use", "B_, N, C = x.shape # qkv(): -> [batch_size*num_windows, Mh*Mw, 3 * total_embed_dim]", "- 1), num_heads)) # [2*Mh-1 * 2*Mw-1, nH] # 相对位置索引获得方法 # get pair-wise", "-1) relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # [nH, Mh*Mw, Mh*Mw] # [Bs*nW, nH,", "nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0) attn = attn.view(-1, self.num_heads, N, N)", "= torch.zeros((1, Hp, Wp, 1), device=x.device) # [1, Hp, Wp, 1] # 准备进行区域生成,方便生成Mask", "The height and width of the window. num_heads (int): Number of attention heads.", "Mh, Mw] coords_flatten = torch.flatten(coords, 1) # [2, Mh*Mw] # [2, Mh*Mw, 1]", "import numpy as np from typing import Optional from BasicModule import PatchMerging, DropPath,", "12, 24), window_size=7, mlp_ratio=4., qkv_bias=True, drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1, norm_layer=nn.LayerNorm, patch_norm=True, use_checkpoint=False, **kwargs): super().__init__()", "window_partition(img_mask, self.window_size) # [nW, Mh, Mw, 1] mask_windows = mask_windows.view(-1, self.window_size * self.window_size)", "= norm_layer(dim) # MLP Layer mlp_hidden_dim = int(dim * mlp_ratio) self.mlp = Mlp(in_features=dim,", "of attention heads. window_size (int): Local window size. mlp_ratio (float): Ratio of mlp", "x_windows.view(-1, self.window_size*self.window_size,C) # [nW*B, Mh*Mw, C] # W-MSA / SW-MSA attn_windows = self.attn(x_windows,", "def create_mask(self,x,H,W): \"\"\" SW-MSA后,对于移位后左上角的窗口(也就是移位前最中间的窗口)来说,里面的元素都是互相紧挨着的, 他们之间可以互相两两做自注意力,但是对于剩下几个窗口来说,它们里面的元素是从别的很远的地方搬过来的, 所以他们之间,按道理来说是不应该去做自注意力,也就是说他们之间不应该有什么太大的联系 以14x14个patch为例进行 H: Feature Map Height W: Feature", "depth, num_heads, window_size, mlp_ratio=4., qkv_bias=True, drop=0., attn_drop=0., drop_path=0., norm_layer=nn.LayerNorm, downsample=None, use_checkpoint=False): \"\"\" Args:", "relative position bias. It supports both of shifted and non-shifted window. VIT中注意力是全局的,复杂度随着图片尺寸的增加指数增加,样当去做视觉里的下游任务,尤其是密集 预测型的任务,或者说遇到非常大尺寸的图片时候,这种全局算自注意力的计算复杂度就非常贵了", "torch.utils.checkpoint as checkpoint import numpy as np from typing import Optional from BasicModule", "position bias. It supports both of shifted and non-shifted window. VIT中注意力是全局的,复杂度随着图片尺寸的增加指数增加,样当去做视觉里的下游任务,尤其是密集 预测型的任务,或者说遇到非常大尺寸的图片时候,这种全局算自注意力的计算复杂度就非常贵了 SwinTransformer中,采用Windows-based", "DropPath(drop_path) if drop_path > 0. else nn.Identity() # LN2 self.norm2 = norm_layer(dim) #", "v).transpose(1, 2).reshape(B_, N, C) x = self.proj(x) x = self.proj_drop(x) return x if", "Wh*Ww, Wh*Ww) or None x的输入维度是(num_windows窗口数*Batch Size) 在窗口内进行Attention Op \"\"\" # [batch_size*num_windows, Mh*Mw, total_embed_dim]", "# https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window12_384_22k.pth model = SwinTransformer(in_chans=3, patch_size=4, window_size=12, embed_dim=192, depths=(2, 2, 18, 2), num_heads=(6,", "qkv_bias=qkv_bias, drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])], norm_layer=norm_layer, downsample=PatchMerging if (i_layer < self.num_layers -", "- H % self.window_size) % self.window_size x = F.pad(x,(0,0,pad_l,pad_r,pad_t,pad_b)) # Hp, Wp代表pad后的feature map的Height和Width", "Mh*Mw, 3 * total_embed_dim] # reshape: -> [batch_size*num_windows, Mh*Mw, 3, num_heads, embed_dim_per_head] #", "nn.LayerNorm downsample (nn.Module | None, optional): Downsample layer at the end of the", "SwinTransformerBlock( dim=dim, num_heads=num_heads, window_size=window_size, shift_size=0 if (i % 2 == 0) else self.shift_size,", "**kwargs): # trained ImageNet-22K # https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window12_384_22k.pth model = SwinTransformer(in_chans=3, patch_size=4, window_size=12, embed_dim=128, depths=(2,", "0~window_size\" # LN1 self.norm1 = norm_layer(dim) # Windows_Multi-head Self Attention self.attn = WindowsAttention(", "query, key, value. Default: True drop (float, optional): Dropout rate. Default: 0.0 attn_drop", "num_classes=num_classes, **kwargs) return model \"\"\"Swin-Large\"\"\" def swin_large_patch4_window7_224_in22k(num_classes: int = 21841, **kwargs): # trained", "# W-MSA / SW-MSA attn_windows = self.attn(x_windows, mask=attn_mask) # [nW*B, Mh*Mw, C] #", "> 0 else nn.Identity() self.apply(self._init_weights) def _init_weights(self, m): if isinstance(m, nn.Linear): nn.init.trunc_normal_(m.weight, std=.02)", "对feature map进行pad,pad到windows size的整数倍 pad_l = 0 pad_t = 0 pad_r = (self.window_size -", "not torch.jit.is_scripting() and self.use_checkpoint: x = checkpoint.checkpoint(blk, x, attn_mask) else: x = blk(x,", "Default: 0.0 drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0 norm_layer", "None)) w_slices = (slice(0, -self.window_size), slice(-self.window_size, -self.shift_size), slice(-self.shift_size, None)) # 区域编码 cnt =", "to start from 0 relative_coords[:, :, 1] += self.window_size[1] - 1 relative_coords[:, :,", "8, 16, 32), num_classes=num_classes, **kwargs) return model \"\"\"Swin-Large\"\"\" def swin_large_patch4_window7_224_in22k(num_classes: int = 21841,", "layer. Default: nn.GELU norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm \"\"\" super(SwinTransformerBlock, self).__init__()", "= mlp_ratio # 将image切分为不重合的Patches # input: (Bs, 224, 224, 3) # output: (e.g", "# https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth model = SwinTransformer(in_chans=3, patch_size=4, window_size=7, embed_dim=96, depths=(2, 2, 6, 2), num_heads=(3,", "def swin_tiny_patch4_window7_224(num_classes: int = 1000, **kwargs): # trained ImageNet-1K # https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth model =", "start from 0 relative_coords[:, :, 1] += self.window_size[1] - 1 relative_coords[:, :, 0]", "构建SwinTransformer Block self.blocks = nn.ModuleList([ SwinTransformerBlock( dim=dim, num_heads=num_heads, window_size=window_size, shift_size=0 if (i %", "> 0. else nn.Identity() # LN2 self.norm2 = norm_layer(dim) # MLP Layer mlp_hidden_dim", "self.window_size, \"shift_size must in 0~window_size\" # LN1 self.norm1 = norm_layer(dim) # Windows_Multi-head Self", "= use_checkpoint # pre-trained self.shift_size = window_size // 2 # 构建SwinTransformer Block self.blocks", "in_chans=3, num_classes=1000, embed_dim=96, depths=(2, 2, 6, 2), num_heads=(3, 6, 12, 24), window_size=7, mlp_ratio=4.,", "Local window size. mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.", "True drop (float, optional): Dropout rate. Default: 0.0 attn_drop (float, optional): Attention dropout", "C) # 对feature map进行pad,pad到windows size的整数倍 pad_l = 0 pad_t = 0 pad_r =", "relative_coords[:, :, 1] += self.window_size[1] - 1 relative_coords[:, :, 0] *= 2 *", "img_mask[:, h, w, :] = cnt cnt += 1 # Shift Window 混合区域的窗口分割", "optional): Stochastic depth rate. Default: 0.0 norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm", "[batch_size*num_windows, Mh*Mw, total_embed_dim] B_, N, C = x.shape # qkv(): -> [batch_size*num_windows, Mh*Mw,", "if isinstance(m, nn.Linear): nn.init.trunc_normal_(m.weight, std=.02) if isinstance(m, nn.Linear) and m.bias is not None:", "position bias self.relative_position_bias_table = nn.Parameter( torch.zeros((2 * window_size[0] - 1) * (2 *", "0: shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2)) else: shifted_x = x attn_mask", "(float, optional): Stochastic depth rate. Default: 0.0 act_layer (nn.Module, optional): Activation layer. Default:", "self.drop_path(self.mlp(self.norm2(x))) return x class WindowsAttention(nn.Module): \"\"\" Window based multi-head self attention (W-MSA) module", "feature map的Height & Width H, W = self.H, self.W # Batch, length, channel", "Map Height W: Feature Map Width x: Feature Map \"\"\" # 为SW-MSA计算Attention Mask.", "hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) def forward(self, x, attn_mask): # feature map的Height & Width H,", "= (slice(0, -self.window_size), slice(-self.window_size, -self.shift_size), slice(-self.shift_size, None)) # 区域编码 cnt = 0 for", "Wp, _ = x.shape # 是W-MSA 还是 SW-MSA ? # cyclic shift if", "W % self.window_size) % self.window_size pad_b = (self.window_size - H % self.window_size) %", "+ 1])], norm_layer=norm_layer, downsample=PatchMerging if (i_layer < self.num_layers - 1) else None, use_checkpoint=use_checkpoint)", "pre-trained self.shift_size = window_size // 2 # 构建SwinTransformer Block self.blocks = nn.ModuleList([ SwinTransformerBlock(", "\"\"\"一个Stage内的基本SwinTransformer模块\"\"\" class BasicLayer(nn.Module): \"\"\" One Stage SwinTransformer Layer包括: \"\"\" def __init__(self, dim, depth,", "else: x = blk(x, attn_mask) if self.downsample is not None: x = self.downsample(x,", "nW = mask.shape[0] # SW-MSA 需要做attention Mask # mask: [nW, Mh*Mw, Mh*Mw] #", "in 0~window_size\" # LN1 self.norm1 = norm_layer(dim) # Windows_Multi-head Self Attention self.attn =", "impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows` - https://arxiv.org/pdf/2103.14030", "nn.LayerNorm \"\"\" super(SwinTransformerBlock, self).__init__() self.dim = dim self.num_heads = num_heads self.window_size = window_size", "Wh*Ww) or None x的输入维度是(num_windows窗口数*Batch Size) 在窗口内进行Attention Op \"\"\" # [batch_size*num_windows, Mh*Mw, total_embed_dim] B_,", "-0.5 # scale # 定义一个parameter table来存放relative position bias self.relative_position_bias_table = nn.Parameter( torch.zeros((2 *", "self.window_size x = F.pad(x,(0,0,pad_l,pad_r,pad_t,pad_b)) # Hp, Wp代表pad后的feature map的Height和Width _, Hp, Wp, _ =", "这里的stage不包含该stage的patch_merging层,包含的是下个stage的 layers = BasicLayer(dim=int(embed_dim * 2 ** i_layer), depth=depths[i_layer], num_heads=num_heads[i_layer], window_size=window_size, mlp_ratio=self.mlp_ratio, qkv_bias=qkv_bias,", "relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view( self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1) relative_position_bias = relative_position_bias.permute(2,", "nn.Dropout(proj_drop) nn.init.trunc_normal_(self.relative_position_bias_table, std=.02) self.softmax = nn.Softmax(dim=-1) def forward(self,x,mask=None): \"\"\" Args: x: input features", "coords_flatten = torch.flatten(coords, 1) # [2, Mh*Mw] # [2, Mh*Mw, 1] - [2,", "\"\"\" def __init__(self, dim, window_size, num_heads, qkv_bias=True, attn_drop=0., proj_drop=0.): \"\"\" Args: dim (int):", "w, :] = cnt cnt += 1 # Shift Window 混合区域的窗口分割 mask_windows =", "= WindowsAttention( dim, window_size=(self.window_size, self.window_size), num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop) self.drop_path = DropPath(drop_path) if", "must in 0~window_size\" # LN1 self.norm1 = norm_layer(dim) # Windows_Multi-head Self Attention self.attn", "isinstance(m, nn.LayerNorm): nn.init.constant_(m.bias, 0) nn.init.constant_(m.weight, 1.0) def forward(self,x): # x:[B, L, C] x,H,W", "self.layers.append(layers) self.norm = norm_layer(self.num_features) self.avgpool = nn.AdaptiveAvgPool1d(1) self.head = nn.Linear(self.num_features, num_classes) if num_classes", "| None, optional): Downsample layer at the end of the layer. Default: None", "dim self.window_size = window_size # [Mh, Mw] self.num_heads = num_heads head_dim = dim", "window_size, mlp_ratio=4., qkv_bias=True, drop=0., attn_drop=0., drop_path=0., norm_layer=nn.LayerNorm, downsample=None, use_checkpoint=False): \"\"\" Args: dim (int):", "feature map x = x.view(B, H, W, C) # 对feature map进行pad,pad到windows size的整数倍 pad_l", "N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) # [batch_size*num_windows, num_heads,", "- mask_windows.unsqueeze(2) # [nW, 1, Mh*Mw] - [nW, Mh*Mw, 1] # [nW, Mh*Mw,", "**kwargs) return model def swin_large_patch4_window12_384_in22k(num_classes: int = 21841, **kwargs): # trained ImageNet-22K #", "swin_base_patch4_window12_384_in22k(num_classes: int = 21841, **kwargs): # trained ImageNet-22K # https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window12_384_22k.pth model = SwinTransformer(in_chans=3,", "relative_position_bias.unsqueeze(0) if mask is not None: nW = mask.shape[0] # SW-MSA 需要做attention Mask", "= self.qkv(x).reshape(B_, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) #", "Args: x: input features with shape of (num_windows*B, Mh*Mw, C) mask: (0/-inf) mask", "mlp_ratio # shift_size必须小于windows_size assert 0 <= self.shift_size < self.window_size, \"shift_size must in 0~window_size\"", "1 # Shift Window 混合区域的窗口分割 mask_windows = window_partition(img_mask, self.window_size) # [nW, Mh, Mw,", "W-MSA / SW-MSA attn_windows = self.attn(x_windows, mask=attn_mask) # [nW*B, Mh*Mw, C] # 将分割的Windows进行还原", "1000, **kwargs): # trained ImageNet-1K # https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth model = SwinTransformer(in_chans=3, patch_size=4, window_size=7, embed_dim=96,", "std=.02) self.softmax = nn.Softmax(dim=-1) def forward(self,x,mask=None): \"\"\" Args: x: input features with shape", "trained ImageNet-1K # https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_small_patch4_window7_224.pth model = SwinTransformer(in_chans=3, patch_size=4, window_size=7, embed_dim=96, depths=(2, 2, 18,", "* self.window_size # 拥有和feature map一样的通道排列顺序,方便后续window_partition img_mask = torch.zeros((1, Hp, Wp, 1), device=x.device) #", "mask_windows = mask_windows.view(-1, self.window_size * self.window_size) # [nW, Mh*Mw] # 掩码生成 attn_mask =", "4C(stage3) ... self.num_features = int(embed_dim * 2 ** (self.num_layers - 1)) self.mlp_ratio =", "self.num_layers = len(depths) self.embed_dim = embed_dim self.patch_norm = patch_norm # 输出特征矩阵的Channels (C) #", "\"\"\" # [batch_size*num_windows, Mh*Mw, total_embed_dim] B_, N, C = x.shape # qkv(): ->", "Mh, Mw, C] shifted_x = window_reverse(attn_windows, self.window_size, Hp, Wp) # [B, H', W',", "0) nn.init.constant_(m.weight, 1.0) def forward(self,x): # x:[B, L, C] x,H,W = self.patch_embed(x) x", "Mh*Mw, Mh*Mw] q = q * self.scale attn = (q @ k.transpose(-2, -1))", "<EMAIL> \"\"\" Swin Transformer 1. 类似CNN的层次化构建方法(Hierarchical Feature Maps),特征图尺寸中有对图像下采样4倍、8倍、以及16倍; 这样的Backbone有助于再此基础上构建目标检测、实例分割等任务。 2. 使用Windows Multi-Head Self-Attention", "patch_norm=True, use_checkpoint=False, **kwargs): super().__init__() self.num_classes = num_classes self.num_layers = len(depths) self.embed_dim = embed_dim", "= 0 for h in h_slices: for w in w_slices: img_mask[:, h, w,", "Size Height # Mw: Windows Size Width # nH: num_heads super(WindowsAttention, self).__init__() self.dim", "[3, batch_size*num_windows, num_heads, Mh*Mw, embed_dim_per_head] qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, C //", "output: (e.g patch_size=4: Bs, 56x56, 4x4x3) self.patch_embed = PatchEmbed( patch_size=patch_size, in_c=in_chans, embed_dim=embed_dim, norm_layer=norm_layer", "= window_partition(img_mask, self.window_size) # [nW, Mh, Mw, 1] mask_windows = mask_windows.view(-1, self.window_size *", "0.0 \"\"\" # Mh: Windows Size Height # Mw: Windows Size Width #", "= embed_dim self.patch_norm = patch_norm # 输出特征矩阵的Channels (C) # H/4 x W/4 x", "Number of input channels. depth (int): Number of blocks. block数量 num_heads (int): Number", "self.register_buffer(\"relative_position_index\", relative_position_index) self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj", "Transformer\"\"\" class SwinTransformer(nn.Module): \"\"\"Swin Transformer结构 这里有个不同之处,就是每个Stage Layer中, \"\"\" def __init__(self, patch_size=4, in_chans=3, num_classes=1000,", "- https://arxiv.org/pdf/2103.14030 Code/weights from https://github.com/microsoft/Swin-Transformer \"\"\" import torch import torch.nn as nn import", "typing import Optional from BasicModule import PatchMerging, DropPath, PatchEmbed from BasicModule import Mlp", "16, 32), num_classes=num_classes, **kwargs) return model def swin_base_patch4_window7_224_in22k(num_classes: int = 21841, **kwargs): #", "window_partition, window_reverse \"\"\"SwinT window_size = 7 img_size = 224 Trained ImageNet-1k depths->2,2,6,2 \"\"\"", "Args: dim (int): Number of input channels. depth (int): Number of blocks. block数量", "[2, Mh*Mw, Mh*Mw] relative_coords = relative_coords.permute(1, 2, 0).contiguous() # [Mh*Mw, Mh*Mw, 2] relative_coords[:,", "== H * W, \"input feature has wrong size\" # Skip Connect shortcut", "(int): Number of attention heads. window_size (int): Window size. shift_size (int): Shift size", "img_mask = torch.zeros((1, Hp, Wp, 1), device=x.device) # [1, Hp, Wp, 1] #", "\"\"\" Args参数定义: dim (int): Number of input channels. num_heads (int): Number of attention", "C] # 如果是SW-MSA,需要逆shift过程 # reverse cyclic shift if self.shift_size > 0: x =", "qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop) self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() #", "x: Feature Map \"\"\" # 为SW-MSA计算Attention Mask. # 保证Hp和Wp是window_size的整数倍 Hp = int(np.ceil(H /", "self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) nn.init.trunc_normal_(self.relative_position_bias_table, std=.02) self.softmax = nn.Softmax(dim=-1) def", "如果是SW-MSA,需要逆shift过程 # reverse cyclic shift if self.shift_size > 0: x = torch.roll(shifted_x, shifts=(self.shift_size,", "Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) def forward(self, x, attn_mask): # feature map的Height & Width", "num_heads, Mh*Mw, Mh*Mw] # # mask.unsqueeze: [1, nW, 1, Mh*Mw, Mh*Mw] attn =", "relative_coords.sum(-1) # [Mh*Mw, Mh*Mw] # Register_buffer: 应该就是在内存中定一个常量,同时,模型保存和加载的时候可以写入和读出。 # 不需要学习,但是可以灵活读写 self.register_buffer(\"relative_position_index\", relative_position_index) self.qkv =", "C) # [nW*B, Mh, Mw, C] shifted_x = window_reverse(attn_windows, self.window_size, Hp, Wp) #", "channels. window_size (tuple[int]): The height and width of the window. num_heads (int): Number", "index for each token inside the window coords_h = torch.arange(self.window_size[0]) coords_w = torch.arange(self.window_size[1])", "x + self.drop_path(self.mlp(self.norm2(x))) return x class WindowsAttention(nn.Module): \"\"\" Window based multi-head self attention", "= torch.stack(torch.meshgrid([coords_h, coords_w], indexing=\"ij\")) # [2, Mh, Mw] coords_flatten = torch.flatten(coords, 1) #", "None use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False. \"\"\"", "shift 之后,这个 patch就可以跟新的窗口里的别的 patch就进行交互了,而这个新的窗 口里所有的 patch 其实来自于上一层别的窗口里的 patch,这也就是作者说的能起到 cross-window connection,就是窗口和窗口之间可以交互了 上述过程配合之后的Patch Merging,合并到Transformer最后几层的时候,每一个patch本身的感受 野就已经很大了。", "0: x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2)) else: x = shifted_x #", "18, 2), num_heads=(6, 12, 24, 48), num_classes=num_classes, **kwargs) return model def swin_large_patch4_window12_384_in22k(num_classes: int", "Mh*Mw, Mh*Mw] attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0)", "2), num_heads=(3, 6, 12, 24), window_size=7, mlp_ratio=4., qkv_bias=True, drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1, norm_layer=nn.LayerNorm, patch_norm=True,", "C(Stage1) -> H/8 x W/8 x 2C(Stage2) -> H/16 x W/16 x 4C(stage3)", "= SwinTransformer(in_chans=3, patch_size=4, window_size=12, embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4, 8, 16, 32),", "self.blocks = nn.ModuleList([ SwinTransformerBlock( dim=dim, num_heads=num_heads, window_size=window_size, shift_size=0 if (i % 2 ==", "H, W = self.H, self.W # Batch, length, channel B, L, C =", "6, 12, 24), num_classes=num_classes, **kwargs) return model \"\"\"Swin-B\"\"\" def swin_base_patch4_window7_224(num_classes: int = 1000,", "import PatchMerging, DropPath, PatchEmbed from BasicModule import Mlp from BasicModule import window_partition, window_reverse", "= int(np.ceil(H / self.window_size)) * self.window_size Wp = int(np.ceil(W / self.window_size)) * self.window_size", "BasicLayer(nn.Module): \"\"\" One Stage SwinTransformer Layer包括: \"\"\" def __init__(self, dim, depth, num_heads, window_size,", "= dim self.num_heads = num_heads self.window_size = window_size self.shift_size = shift_size self.mlp_ratio =", "dropout rate. Default: 0.0 drop_path (float | tuple[float], optional): Stochastic depth rate. Default:", "rate. Default: 0.0 norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm downsample (nn.Module |", "checkpointing to save memory. Default: False. \"\"\" super(BasicLayer, self).__init__() self.dim = dim self.depth", "self.norm2 = norm_layer(dim) # MLP Layer mlp_hidden_dim = int(dim * mlp_ratio) self.mlp =", "input features with shape of (num_windows*B, Mh*Mw, C) mask: (0/-inf) mask with shape", "= (slice(0, -self.window_size), slice(-self.window_size, -self.shift_size), slice(-self.shift_size, None)) w_slices = (slice(0, -self.window_size), slice(-self.window_size, -self.shift_size),", "Number of input channels. num_heads (int): Number of attention heads. window_size (int): Window", "Mh*Mw, C) mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None", "= window_size // 2 # 构建SwinTransformer Block self.blocks = nn.ModuleList([ SwinTransformerBlock( dim=dim, num_heads=num_heads,", "else drop_path, norm_layer=norm_layer) for i in range(depth)]) # Patch Merging Layer 类似于Pooling下采样 if", "self.downsample = downsample(dim=dim, norm_layer=norm_layer) else: self.downsample = None def create_mask(self,x,H,W): \"\"\" SW-MSA后,对于移位后左上角的窗口(也就是移位前最中间的窗口)来说,里面的元素都是互相紧挨着的, 他们之间可以互相两两做自注意力,但是对于剩下几个窗口来说,它们里面的元素是从别的很远的地方搬过来的,", "x_windows = window_partition(shifted_x,self.window_size) #[nW*B, Mh, Mw, C] x_windows = x_windows.view(-1, self.window_size*self.window_size,C) # [nW*B,", "= q * self.scale attn = (q @ k.transpose(-2, -1)) # QK^T/sqrt(d) +", "to query, key, value. Default: True drop (float, optional): Dropout rate. Default: 0.0", "W if not torch.jit.is_scripting() and self.use_checkpoint: x = checkpoint.checkpoint(blk, x, attn_mask) else: x", "C = x.shape # qkv(): -> [batch_size*num_windows, Mh*Mw, 3 * total_embed_dim] # reshape:", "patch_size=4, window_size=7, embed_dim=96, depths=(2, 2, 18, 2), num_heads=(3, 6, 12, 24), num_classes=num_classes, **kwargs)", "[batch_size*num_windows, num_heads, embed_dim_per_head, Mh*Mw] # @: multiply -> [batch_size*num_windows, num_heads, Mh*Mw, Mh*Mw] q", "relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # [2, Mh*Mw, Mh*Mw]", "of attention heads. qkv_bias (bool, optional): If True, add a learnable bias to", "channels. depth (int): Number of blocks. block数量 num_heads (int): Number of attention heads.", "pad_b = (self.window_size - H % self.window_size) % self.window_size x = F.pad(x,(0,0,pad_l,pad_r,pad_t,pad_b)) #", "[nW*B, Mh*Mw, C] # W-MSA / SW-MSA attn_windows = self.attn(x_windows, mask=attn_mask) # [nW*B,", "N, C) x = self.proj(x) x = self.proj_drop(x) return x if __name__ ==", "slice(-self.window_size, -self.shift_size), slice(-self.shift_size, None)) # 区域编码 cnt = 0 for h in h_slices:", "self.norm1 = norm_layer(dim) # Windows_Multi-head Self Attention self.attn = WindowsAttention( dim, window_size=(self.window_size, self.window_size),", "shift if self.shift_size > 0: shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2)) else:", "= norm_layer(self.num_features) self.avgpool = nn.AdaptiveAvgPool1d(1) self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0", "Mh*Mw, Mh*Mw] attn = attn + relative_position_bias.unsqueeze(0) if mask is not None: nW", "window coords_h = torch.arange(self.window_size[0]) coords_w = torch.arange(self.window_size[1]) coords = torch.stack(torch.meshgrid([coords_h, coords_w], indexing=\"ij\")) #", "| tuple[float], optional): Stochastic depth rate. Default: 0.0 norm_layer (nn.Module, optional): Normalization layer.", "of blocks. block数量 num_heads (int): Number of attention heads. window_size (int): Local window", "embed_dim_per_head, Mh*Mw] # @: multiply -> [batch_size*num_windows, num_heads, Mh*Mw, Mh*Mw] q = q", "== 0) else self.shift_size, #当i为偶,就是W-MSA,i为奇,就是SW-MSA,与论文一致, 保证窗口之间通信 mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, drop=drop, attn_drop=attn_drop, drop_path=drop_path[i] if isinstance(drop_path,", "swin_tiny_patch4_window7_224(num_classes: int = 1000, **kwargs): # trained ImageNet-1K # https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth model = SwinTransformer(in_chans=3,", "/usr/bin/enc python # -*- coding: utf-8 -*- # author: <NAME> # email: <EMAIL>", "self.window_size) # [nW, Mh, Mw, 1] mask_windows = mask_windows.view(-1, self.window_size * self.window_size) #", "Mh*Mw] relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # [2, Mh*Mw,", "sum(depths))] # stochastic depth decay rule # bulid layers self.layers = nn.ModuleList() for", "Mh*Mw] # Register_buffer: 应该就是在内存中定一个常量,同时,模型保存和加载的时候可以写入和读出。 # 不需要学习,但是可以灵活读写 self.register_buffer(\"relative_position_index\", relative_position_index) self.qkv = nn.Linear(dim, dim *", "Default: True attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0 proj_drop", "(int): Number of attention heads. qkv_bias (bool, optional): If True, add a learnable", "layer at the end of the layer. Default: None use_checkpoint (bool): Whether to", "q,k,v = qkv.unbind(0) # QK^T/sqrt(d) # transpose: -> [batch_size*num_windows, num_heads, embed_dim_per_head, Mh*Mw] #", "48), num_classes=num_classes, **kwargs) return model \"\"\"Swin Transformer\"\"\" class SwinTransformer(nn.Module): \"\"\"Swin Transformer结构 这里有个不同之处,就是每个Stage Layer中,", "? # cyclic shift if self.shift_size > 0: shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size),", "# 窗口划分 # Windows Partition x_windows = window_partition(shifted_x,self.window_size) #[nW*B, Mh, Mw, C] x_windows", "self.relative_position_bias_table[self.relative_position_index.view(-1)].view( self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1) relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous()", "(W + 1) // 2 # DownSample之后,H,W应该减半 return x, H, W \"\"\"一个基本的SwinTransformerBlock的构成Model\"\"\" class", "= self.downsample(x, H, W) H, W = (H + 1) // 2, (W", "Maps),特征图尺寸中有对图像下采样4倍、8倍、以及16倍; 这样的Backbone有助于再此基础上构建目标检测、实例分割等任务。 2. 使用Windows Multi-Head Self-Attention (W-MSA)概念。减少计算量。计算复杂度从指数级降到线性级,Multi-head Self-Attention只在每个Windows内部进行。相对于ViT直接对整个Global进行MSA,计算复杂度更低;但是会隔绝不同 窗口之间的信息传递,通过Shifted Windows Multi-head Self-Atten来让信息在相邻窗口进行传递。 A", "Shift Window 混合区域的窗口分割 mask_windows = window_partition(img_mask, self.window_size) # [nW, Mh, Mw, 1] mask_windows", "(float, optional): Attention dropout rate. Default: 0.0 drop_path (float | tuple[float], optional): Stochastic", ":] # [2, Mh*Mw, Mh*Mw] relative_coords = relative_coords.permute(1, 2, 0).contiguous() # [Mh*Mw, Mh*Mw,", "dim (int): Number of input channels. depth (int): Number of blocks. block数量 num_heads", "shift_size=0 if (i % 2 == 0) else self.shift_size, #当i为偶,就是W-MSA,i为奇,就是SW-MSA,与论文一致, 保证窗口之间通信 mlp_ratio=mlp_ratio, qkv_bias=qkv_bias,", "batch_size*num_windows, num_heads, Mh*Mw, embed_dim_per_head] qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, C // self.num_heads).permute(2,", "# 每个head的dim self.scale = head_dim ** -0.5 # scale # 定义一个parameter table来存放relative position", "attn + relative_position_bias.unsqueeze(0) if mask is not None: nW = mask.shape[0] # SW-MSA", "window_size=window_size, mlp_ratio=self.mlp_ratio, qkv_bias=qkv_bias, drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])], norm_layer=norm_layer, downsample=PatchMerging if (i_layer <", "24), window_size=7, mlp_ratio=4., qkv_bias=True, drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1, norm_layer=nn.LayerNorm, patch_norm=True, use_checkpoint=False, **kwargs): super().__init__() self.num_classes", "1 # shift to start from 0 relative_coords[:, :, 1] += self.window_size[1] -", "token inside the window coords_h = torch.arange(self.window_size[0]) coords_w = torch.arange(self.window_size[1]) coords = torch.stack(torch.meshgrid([coords_h,", "swin_base_patch4_window12_384(num_classes: int = 1000, **kwargs): # trained ImageNet-1K # https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window12_384.pth model = SwinTransformer(in_chans=3,", "% self.window_size pad_b = (self.window_size - H % self.window_size) % self.window_size x =", "Number of attention heads. window_size (int): Local window size. mlp_ratio (float): Ratio of", "attn.view(-1, self.num_heads, N, N) attn = self.softmax(attn) else: attn = self.softmax(attn) attn =", "# Windows Partition x_windows = window_partition(shifted_x,self.window_size) #[nW*B, Mh, Mw, C] x_windows = x_windows.view(-1,", "coords_flatten[:, :, None] - coords_flatten[:, None, :] # [2, Mh*Mw, Mh*Mw] relative_coords =", "x = x.view(B, H, W, C) # 对feature map进行pad,pad到windows size的整数倍 pad_l = 0", "w_slices = (slice(0, -self.window_size), slice(-self.window_size, -self.shift_size), slice(-self.shift_size, None)) # 区域编码 cnt = 0", "= window_size self.shift_size = shift_size self.mlp_ratio = mlp_ratio # shift_size必须小于windows_size assert 0 <=", "x class WindowsAttention(nn.Module): \"\"\" Window based multi-head self attention (W-MSA) module with relative", "1 relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1 relative_position_index = relative_coords.sum(-1)", "attn = self.softmax(attn) attn = self.attn_drop(attn) # @: multiply -> [batch_size*num_windows, num_heads, Mh*Mw,", "__init__(self, dim, window_size, num_heads, qkv_bias=True, attn_drop=0., proj_drop=0.): \"\"\" Args: dim (int): Number of", "0) elif isinstance(m, nn.LayerNorm): nn.init.constant_(m.bias, 0) nn.init.constant_(m.weight, 1.0) def forward(self,x): # x:[B, L,", "# output: (e.g patch_size=4: Bs, 56x56, 4x4x3) self.patch_embed = PatchEmbed( patch_size=patch_size, in_c=in_chans, embed_dim=embed_dim,", "attn.view: [batch_size, num_windows, num_heads, Mh*Mw, Mh*Mw] # # mask.unsqueeze: [1, nW, 1, Mh*Mw,", "B # B: # relative_position_bias_table.view: [Mh*Mw*Mh*Mw,nH] -> [Mh*Mw,Mh*Mw,nH] relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view( self.window_size[0] *", "rule # bulid layers self.layers = nn.ModuleList() for i_layer in range(self.num_layers): # 注意这里构建的stage和论文图中有些差异", "length, channel B, L, C = x.shape assert L == H * W,", "1, Mh*Mw, Mh*Mw] attn = attn.view(B_ // nW, nW, self.num_heads, N, N) +", "= 21841, **kwargs): # trained ImageNet-22K # https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window7_224_22k.pth model = SwinTransformer(in_chans=3, patch_size=4, window_size=7,", "#[nW*B, Mh, Mw, C] x_windows = x_windows.view(-1, self.window_size*self.window_size,C) # [nW*B, Mh*Mw, C] #", "num_heads, Mh*Mw, embed_dim_per_head] q,k,v = qkv.unbind(0) # QK^T/sqrt(d) # transpose: -> [batch_size*num_windows, num_heads,", "self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) # [batch_size*num_windows, num_heads, Mh*Mw, embed_dim_per_head]", "return model def swin_base_patch4_window7_224_in22k(num_classes: int = 21841, **kwargs): # trained ImageNet-22K # https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window7_224_22k.pth", "_init_weights(self, m): if isinstance(m, nn.Linear): nn.init.trunc_normal_(m.weight, std=.02) if isinstance(m, nn.Linear) and m.bias is", "indexing=\"ij\")) # [2, Mh, Mw] coords_flatten = torch.flatten(coords, 1) # [2, Mh*Mw] #", "x = self.head(x) # 分类头 return x \"\"\"一个Stage内的基本SwinTransformer模块\"\"\" class BasicLayer(nn.Module): \"\"\" One Stage", "= x + self.drop_path(self.mlp(self.norm2(x))) return x class WindowsAttention(nn.Module): \"\"\" Window based multi-head self", "- [nW, Mh*Mw, 1] # [nW, Mh*Mw, Mh*Mw] attn_mask = attn_mask.masked_fill(attn_mask != 0,", "def swin_large_patch4_window7_224_in22k(num_classes: int = 21841, **kwargs): # trained ImageNet-22K # https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window7_224_22k.pth model =", "learnable bias to query, key, value. Default: True drop (float, optional): Dropout rate.", "attn_drop=0., drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm): \"\"\" Args参数定义: dim (int): Number of input channels. num_heads", "checkpoint import numpy as np from typing import Optional from BasicModule import PatchMerging,", "= head_dim ** -0.5 # scale # 定义一个parameter table来存放relative position bias self.relative_position_bias_table =", "\"\"\" Args: x: input features with shape of (num_windows*B, Mh*Mw, C) mask: (0/-inf)", "Ratio of mlp hidden dim to embedding dim. qkv_bias (bool, optional): If True,", "6, 12, 24), num_classes=num_classes, **kwargs) return model \"\"\"Swin-S depths->2,2,18,2 \"\"\" def swin_small_patch4_window7_224(num_classes: int", "* total_embed_dim] # reshape: -> [batch_size*num_windows, Mh*Mw, 3, num_heads, embed_dim_per_head] # permute: ->", "Patch Merging Layer 类似于Pooling下采样 if downsample is not None: self.downsample = downsample(dim=dim, norm_layer=norm_layer)", "-> [batch_size*num_windows, Mh*Mw, 3, num_heads, embed_dim_per_head] # permute: -> [3, batch_size*num_windows, num_heads, Mh*Mw,", "import window_partition, window_reverse \"\"\"SwinT window_size = 7 img_size = 224 Trained ImageNet-1k depths->2,2,6,2", "**kwargs): # trained ImageNet-22K # https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window12_384_22k.pth model = SwinTransformer(in_chans=3, patch_size=4, window_size=12, embed_dim=192, depths=(2,", "= x.shape # 是W-MSA 还是 SW-MSA ? # cyclic shift if self.shift_size >", "window_size=7, mlp_ratio=4., qkv_bias=True, drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1, norm_layer=nn.LayerNorm, patch_norm=True, use_checkpoint=False, **kwargs): super().__init__() self.num_classes =", "of (num_windows*B, Mh*Mw, C) mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww)", "attn_mask) else: x = blk(x, attn_mask) if self.downsample is not None: x =", "# 把前面pad的数据移除掉 x = x[:, :H, :W, :].contiguous() x = x.view(B,H*W,C) # FFN", "# [1, Hp, Wp, 1] # 准备进行区域生成,方便生成Mask h_slices = (slice(0, -self.window_size), slice(-self.window_size, -self.shift_size),", "区域编码 cnt = 0 for h in h_slices: for w in w_slices: img_mask[:,", "# trained ImageNet-1K # https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth model = SwinTransformer(in_chans=3, patch_size=4, window_size=7, embed_dim=96, depths=(2, 2,", "- [2, 1, Mh*Mw] relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :]", "stochastic depth # Drop Path dpr = [x.item() for x in torch.linspace(0, drop_path_rate,", "x W/4 x 48 -> H/4 x W/4 x C(Stage1) -> H/8 x", "<NAME> # email: <EMAIL> \"\"\" Swin Transformer 1. 类似CNN的层次化构建方法(Hierarchical Feature Maps),特征图尺寸中有对图像下采样4倍、8倍、以及16倍; 这样的Backbone有助于再此基础上构建目标检测、实例分割等任务。 2.", "rate. Default: 0.0 attn_drop (float, optional): Attention dropout rate. Default: 0.0 drop_path (float", "1000, **kwargs): # trained ImageNet-1K # https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window7_224.pth model = SwinTransformer(in_chans=3, patch_size=4, window_size=7, embed_dim=128,", "attn_drop (float, optional): Attention dropout rate. Default: 0.0 drop_path (float | tuple[float], optional):", "-self.shift_size), slice(-self.shift_size, None)) # 区域编码 cnt = 0 for h in h_slices: for", "# 将分割的Windows进行还原 attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C) # [nW*B, Mh, Mw, C]", "[batch_size*num_windows, Mh*Mw, 3 * total_embed_dim] # reshape: -> [batch_size*num_windows, Mh*Mw, 3, num_heads, embed_dim_per_head]", "attn_mask): # feature map的Height & Width H, W = self.H, self.W # Batch,", "optional): Dropout ratio of attention weight. Default: 0.0 proj_drop (float, optional): Dropout ratio", "Block包括: Feature Map Input -> LayerNorm -> SW-MSA/W-MSA -> LayerNorm-> MLP --------> |--------------------------------------||----------------------|", "save memory. Default: False. \"\"\" super(BasicLayer, self).__init__() self.dim = dim self.depth = depth", "# 对feature map进行pad,pad到windows size的整数倍 pad_l = 0 pad_t = 0 pad_r = (self.window_size", "= nn.Parameter( torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1),", "ImageNet-22K # https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window7_224_22k.pth model = SwinTransformer(in_chans=3, patch_size=4, window_size=7, embed_dim=128, depths=(2, 2, 18, 2),", "size for SW-MSA. mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.", "depth # Drop Path dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))]", "and width of the window. num_heads (int): Number of attention heads. qkv_bias (bool,", "for each token inside the window coords_h = torch.arange(self.window_size[0]) coords_w = torch.arange(self.window_size[1]) coords", "# 准备进行区域生成,方便生成Mask h_slices = (slice(0, -self.window_size), slice(-self.window_size, -self.shift_size), slice(-self.shift_size, None)) w_slices = (slice(0,", "2, (W + 1) // 2 # DownSample之后,H,W应该减半 return x, H, W \"\"\"一个基本的SwinTransformerBlock的构成Model\"\"\"", "= mlp_ratio # shift_size必须小于windows_size assert 0 <= self.shift_size < self.window_size, \"shift_size must in", "shift_size (int): Shift size for SW-MSA. mlp_ratio (float): Ratio of mlp hidden dim", "= nn.Dropout(proj_drop) nn.init.trunc_normal_(self.relative_position_bias_table, std=.02) self.softmax = nn.Softmax(dim=-1) def forward(self,x,mask=None): \"\"\" Args: x: input", "Shift 操作但如果加上 shift 的操作,每个 patch 原来只能跟它所在的窗口里的别的 patch 进行 交互,但是 shift 之后,这个 patch就可以跟新的窗口里的别的 patch就进行交互了,而这个新的窗", "mask: [nW, Mh*Mw, Mh*Mw] # attn.view: [batch_size, num_windows, num_heads, Mh*Mw, Mh*Mw] # #", "if num_classes > 0 else nn.Identity() self.apply(self._init_weights) def _init_weights(self, m): if isinstance(m, nn.Linear):", "torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2)) else: shifted_x = x attn_mask = None #", "shift to start from 0 relative_coords[:, :, 1] += self.window_size[1] - 1 relative_coords[:,", "self.mlp_ratio = mlp_ratio # shift_size必须小于windows_size assert 0 <= self.shift_size < self.window_size, \"shift_size must", "\"\"\" def swin_tiny_patch4_window7_224(num_classes: int = 1000, **kwargs): # trained ImageNet-1K # https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth model", "num_heads=(3, 6, 12, 24), window_size=7, mlp_ratio=4., qkv_bias=True, drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1, norm_layer=nn.LayerNorm, patch_norm=True, use_checkpoint=False,", "1) else None, use_checkpoint=use_checkpoint) self.layers.append(layers) self.norm = norm_layer(self.num_features) self.avgpool = nn.AdaptiveAvgPool1d(1) self.head =", "[2*Mh-1 * 2*Mw-1, nH] # 相对位置索引获得方法 # get pair-wise relative position index for", "**kwargs) return model \"\"\"Swin-S depths->2,2,18,2 \"\"\" def swin_small_patch4_window7_224(num_classes: int = 1000, **kwargs): #", "= nn.ModuleList() for i_layer in range(self.num_layers): # 注意这里构建的stage和论文图中有些差异 # 这里的stage不包含该stage的patch_merging层,包含的是下个stage的 layers = BasicLayer(dim=int(embed_dim", "import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.checkpoint as", "# trained ImageNet-22K # https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window7_224_22k.pth model = SwinTransformer(in_chans=3, patch_size=4, window_size=7, embed_dim=128, depths=(2, 2,", "swin_large_patch4_window12_384_in22k(num_classes: int = 21841, **kwargs): # trained ImageNet-22K # https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window12_384_22k.pth model = SwinTransformer(in_chans=3,", "nn.GELU norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm \"\"\" super(SwinTransformerBlock, self).__init__() self.dim =", "Hp, Wp, 1), device=x.device) # [1, Hp, Wp, 1] # 准备进行区域生成,方便生成Mask h_slices =", "= window_size self.use_checkpoint = use_checkpoint # pre-trained self.shift_size = window_size // 2 #", "def swin_large_patch4_window12_384_in22k(num_classes: int = 21841, **kwargs): # trained ImageNet-22K # https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window12_384_22k.pth model =", "W: Feature Map Width x: Feature Map \"\"\" # 为SW-MSA计算Attention Mask. # 保证Hp和Wp是window_size的整数倍", "depths=(2, 2, 18, 2), num_heads=(6, 12, 24, 48), num_classes=num_classes, **kwargs) return model \"\"\"Swin", "norm_layer=nn.LayerNorm): \"\"\" Args参数定义: dim (int): Number of input channels. num_heads (int): Number of", "# B: # relative_position_bias_table.view: [Mh*Mw*Mh*Mw,nH] -> [Mh*Mw,Mh*Mw,nH] relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view( self.window_size[0] * self.window_size[1],", "proj_drop (float, optional): Dropout ratio of output. Default: 0.0 \"\"\" # Mh: Windows", "[nW, Mh*Mw, Mh*Mw] attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0)) return", "0 or pad_b > 0: # 把前面pad的数据移除掉 x = x[:, :H, :W, :].contiguous()", "x 48 -> H/4 x W/4 x C(Stage1) -> H/8 x W/8 x", "1.0) def forward(self,x): # x:[B, L, C] x,H,W = self.patch_embed(x) x = self.pos_drop(x)", "if drop_path > 0. else nn.Identity() # LN2 self.norm2 = norm_layer(dim) # MLP", "ImageNet-22K # https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window12_384_22k.pth model = SwinTransformer(in_chans=3, patch_size=4, window_size=12, embed_dim=192, depths=(2, 2, 18, 2),", "[nW, Mh*Mw] # 掩码生成 attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) # [nW, 1, Mh*Mw]", "in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule # bulid layers self.layers", "0 relative_coords[:, :, 1] += self.window_size[1] - 1 relative_coords[:, :, 0] *= 2", "QK^T/sqrt(d) # transpose: -> [batch_size*num_windows, num_heads, embed_dim_per_head, Mh*Mw] # @: multiply -> [batch_size*num_windows,", "Mw, 1] mask_windows = mask_windows.view(-1, self.window_size * self.window_size) # [nW, Mh*Mw] # 掩码生成", "permute: -> [3, batch_size*num_windows, num_heads, Mh*Mw, embed_dim_per_head] qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads,", "shifted_x # 移除Pad数据 if pad_r > 0 or pad_b > 0: # 把前面pad的数据移除掉", "attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0)) return attn_mask def forward(self,x,H,W):", "# [2, Mh*Mw] # [2, Mh*Mw, 1] - [2, 1, Mh*Mw] relative_coords =", "= attn + relative_position_bias.unsqueeze(0) if mask is not None: nW = mask.shape[0] #", "W, C) # 对feature map进行pad,pad到windows size的整数倍 pad_l = 0 pad_t = 0 pad_r", "qkv_bias=True, drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1, norm_layer=nn.LayerNorm, patch_norm=True, use_checkpoint=False, **kwargs): super().__init__() self.num_classes = num_classes self.num_layers", "if self.patch_norm else None) self.pos_drop = nn.Dropout(p=drop_rate) # stochastic depth # Drop Path", "model \"\"\"Swin-B\"\"\" def swin_base_patch4_window7_224(num_classes: int = 1000, **kwargs): # trained ImageNet-1K # https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window7_224.pth", "# Drop Path dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] #", "num_heads super(WindowsAttention, self).__init__() self.dim = dim self.window_size = window_size # [Mh, Mw] self.num_heads", "(Bs, 224, 224, 3) # output: (e.g patch_size=4: Bs, 56x56, 4x4x3) self.patch_embed =", "= shifted_x # 移除Pad数据 if pad_r > 0 or pad_b > 0: #", "dim, depth, num_heads, window_size, mlp_ratio=4., qkv_bias=True, drop=0., attn_drop=0., drop_path=0., norm_layer=nn.LayerNorm, downsample=None, use_checkpoint=False): \"\"\"", "0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0)) return attn_mask def forward(self,x,H,W): # [nW, Mh*Mw, Mh*Mw]", "query, key, value. Default: True attn_drop (float, optional): Dropout ratio of attention weight.", "W, \"input feature has wrong size\" # Skip Connect shortcut = x x", "of attention heads. window_size (int): Window size. shift_size (int): Shift size for SW-MSA.", "[Mh, Mw] self.num_heads = num_heads head_dim = dim // num_heads # 每个head的dim self.scale", "Layer mlp_hidden_dim = int(dim * mlp_ratio) self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) def", "# 是W-MSA 还是 SW-MSA ? # cyclic shift if self.shift_size > 0: shifted_x", "# trained ImageNet-1K # https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window7_224.pth model = SwinTransformer(in_chans=3, patch_size=4, window_size=7, embed_dim=128, depths=(2, 2,", "1) // 2, (W + 1) // 2 # DownSample之后,H,W应该减半 return x, H,", "x: input features with shape of (num_windows*B, Mh*Mw, C) mask: (0/-inf) mask with", "x.view(B,H*W,C) # FFN # 两个Skip Connect x = shortcut + self.drop_path(x) x =", "[B, L, C] x = self.avgpool(x.transpose(1, 2)) # [B, C, 1] x =", "\"\"\" import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.checkpoint", "None, use_checkpoint=use_checkpoint) self.layers.append(layers) self.norm = norm_layer(self.num_features) self.avgpool = nn.AdaptiveAvgPool1d(1) self.head = nn.Linear(self.num_features, num_classes)", "self.window_size[1] - 1 relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1 relative_position_index", "the window coords_h = torch.arange(self.window_size[0]) coords_w = torch.arange(self.window_size[1]) coords = torch.stack(torch.meshgrid([coords_h, coords_w], indexing=\"ij\"))", "# mask.unsqueeze: [1, nW, 1, Mh*Mw, Mh*Mw] attn = attn.view(B_ // nW, nW,", "ratio of output. Default: 0.0 \"\"\" # Mh: Windows Size Height # Mw:", "12, 24), num_classes=num_classes, **kwargs) return model \"\"\"Swin-B\"\"\" def swin_base_patch4_window7_224(num_classes: int = 1000, **kwargs):", "Mh*Mw, Mh*Mw] # attn.view: [batch_size, num_windows, num_heads, Mh*Mw, Mh*Mw] # # mask.unsqueeze: [1,", "int = 1000, **kwargs): # trained ImageNet-1K # https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth model = SwinTransformer(in_chans=3, patch_size=4,", "= W if not torch.jit.is_scripting() and self.use_checkpoint: x = checkpoint.checkpoint(blk, x, attn_mask) else:", "**kwargs): # trained ImageNet-22K # https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window7_224_22k.pth model = SwinTransformer(in_chans=3, patch_size=4, window_size=7, embed_dim=128, depths=(2,", "12, 24, 48), num_classes=num_classes, **kwargs) return model def swin_large_patch4_window12_384_in22k(num_classes: int = 21841, **kwargs):", "dropout rate. Default: 0.0 drop_path (float, optional): Stochastic depth rate. Default: 0.0 act_layer", "window_size, num_heads, qkv_bias=True, attn_drop=0., proj_drop=0.): \"\"\" Args: dim (int): Number of input channels.", "nn import torch.nn.functional as F import torch.utils.checkpoint as checkpoint import numpy as np", "3, num_heads, embed_dim_per_head] # permute: -> [3, batch_size*num_windows, num_heads, Mh*Mw, embed_dim_per_head] qkv =", "2), num_heads=(6, 12, 24, 48), num_classes=num_classes, **kwargs) return model def swin_large_patch4_window12_384_in22k(num_classes: int =", "Hp, Wp) # [B, H', W', C] # 如果是SW-MSA,需要逆shift过程 # reverse cyclic shift", "nn.LayerNorm): nn.init.constant_(m.bias, 0) nn.init.constant_(m.weight, 1.0) def forward(self,x): # x:[B, L, C] x,H,W =", "num_classes) if num_classes > 0 else nn.Identity() self.apply(self._init_weights) def _init_weights(self, m): if isinstance(m,", "int = 21841, **kwargs): # trained ImageNet-22K # https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window12_384_22k.pth model = SwinTransformer(in_chans=3, patch_size=4,", "patch,这也就是作者说的能起到 cross-window connection,就是窗口和窗口之间可以交互了 上述过程配合之后的Patch Merging,合并到Transformer最后几层的时候,每一个patch本身的感受 野就已经很大了。 \"\"\" def __init__(self, dim, window_size, num_heads, qkv_bias=True,", "Wp = int(np.ceil(W / self.window_size)) * self.window_size # 拥有和feature map一样的通道排列顺序,方便后续window_partition img_mask = torch.zeros((1,", "size的整数倍 pad_l = 0 pad_t = 0 pad_r = (self.window_size - W %", "窗口之间的信息传递,通过Shifted Windows Multi-head Self-Atten来让信息在相邻窗口进行传递。 A PyTorch impl of : `Swin Transformer: Hierarchical Vision", "[2, Mh*Mw] # [2, Mh*Mw, 1] - [2, 1, Mh*Mw] relative_coords = coords_flatten[:,", "# H/4 x W/4 x 48 -> H/4 x W/4 x C(Stage1) ->", ":, None] - coords_flatten[:, None, :] # [2, Mh*Mw, Mh*Mw] relative_coords = relative_coords.permute(1,", "using Shifted Windows` - https://arxiv.org/pdf/2103.14030 Code/weights from https://github.com/microsoft/Swin-Transformer \"\"\" import torch import torch.nn", "from https://github.com/microsoft/Swin-Transformer \"\"\" import torch import torch.nn as nn import torch.nn.functional as F", "-> H/4 x W/4 x C(Stage1) -> H/8 x W/8 x 2C(Stage2) ->", "norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm downsample (nn.Module | None, optional): Downsample", "[Mh*Mw,Mh*Mw,nH] relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view( self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1) relative_position_bias =", "# transpose: -> [batch_size*num_windows, Mh*Mw, num_heads, embed_dim_per_head] # reshape: -> [batch_size*num_windows, Mh*Mw, total_embed_dim]", "WindowsAttention(nn.Module): \"\"\" Window based multi-head self attention (W-MSA) module with relative position bias.", "Default: 0.0 norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm downsample (nn.Module | None,", "Default: 0.0 attn_drop (float, optional): Attention dropout rate. Default: 0.0 drop_path (float |", "window_size (tuple[int]): The height and width of the window. num_heads (int): Number of", "x = self.norm1(x) # reshape feature map x = x.view(B, H, W, C)", "num_classes > 0 else nn.Identity() self.apply(self._init_weights) def _init_weights(self, m): if isinstance(m, nn.Linear): nn.init.trunc_normal_(m.weight,", "None) self.pos_drop = nn.Dropout(p=drop_rate) # stochastic depth # Drop Path dpr = [x.item()", "1) // 2 # DownSample之后,H,W应该减半 return x, H, W \"\"\"一个基本的SwinTransformerBlock的构成Model\"\"\" class SwinTransformerBlock(nn.Module): \"\"\"", "map一样的通道排列顺序,方便后续window_partition img_mask = torch.zeros((1, Hp, Wp, 1), device=x.device) # [1, Hp, Wp, 1]", "(num_windows, Wh*Ww, Wh*Ww) or None x的输入维度是(num_windows窗口数*Batch Size) 在窗口内进行Attention Op \"\"\" # [batch_size*num_windows, Mh*Mw,", "BasicModule import PatchMerging, DropPath, PatchEmbed from BasicModule import Mlp from BasicModule import window_partition,", "def __init__(self, patch_size=4, in_chans=3, num_classes=1000, embed_dim=96, depths=(2, 2, 6, 2), num_heads=(3, 6, 12,", "self.patch_norm = patch_norm # 输出特征矩阵的Channels (C) # H/4 x W/4 x 48 ->", "bias to query, key, value. Default: True attn_drop (float, optional): Dropout ratio of", "L, C] x = self.avgpool(x.transpose(1, 2)) # [B, C, 1] x = torch.flatten(x,", "SwinTransformerBlock(nn.Module): \"\"\" Swin Transformer Block包括: Feature Map Input -> LayerNorm -> SW-MSA/W-MSA ->", "Number of attention heads. qkv_bias (bool, optional): If True, add a learnable bias", "x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2)) else: x = shifted_x # 移除Pad数据", "\"input feature has wrong size\" # Skip Connect shortcut = x x =", "- coords_flatten[:, None, :] # [2, Mh*Mw, Mh*Mw] relative_coords = relative_coords.permute(1, 2, 0).contiguous()", "self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) nn.init.trunc_normal_(self.relative_position_bias_table, std=.02) self.softmax", "self.qkv(x).reshape(B_, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) # [batch_size*num_windows,", "Map Width x: Feature Map \"\"\" # 为SW-MSA计算Attention Mask. # 保证Hp和Wp是window_size的整数倍 Hp =", "qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)", "optional): Stochastic depth rate. Default: 0.0 act_layer (nn.Module, optional): Activation layer. Default: nn.GELU", "x = checkpoint.checkpoint(blk, x, attn_mask) else: x = blk(x, attn_mask) if self.downsample is", "input channels. num_heads (int): Number of attention heads. window_size (int): Window size. shift_size", "qkv_bias=qkv_bias, drop=drop, attn_drop=attn_drop, drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path, norm_layer=norm_layer) for i in", "total_embed_dim] B_, N, C = x.shape # qkv(): -> [batch_size*num_windows, Mh*Mw, 3 *", "nW:窗口数 attn_mask = self.create_mask(x,H,W) for blk in self.blocks: blk.H, blk.W = H, W", "of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows` - https://arxiv.org/pdf/2103.14030 Code/weights", "& Width H, W = self.H, self.W # Batch, length, channel B, L,", "Mh*Mw, C] # 将分割的Windows进行还原 attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C) # [nW*B, Mh,", "# LN1 self.norm1 = norm_layer(dim) # Windows_Multi-head Self Attention self.attn = WindowsAttention( dim,", "as np from typing import Optional from BasicModule import PatchMerging, DropPath, PatchEmbed from", "= attn_windows.view(-1, self.window_size, self.window_size, C) # [nW*B, Mh, Mw, C] shifted_x = window_reverse(attn_windows,", "需要做attention Mask # mask: [nW, Mh*Mw, Mh*Mw] # attn.view: [batch_size, num_windows, num_heads, Mh*Mw,", "SW-MSA 需要做attention Mask # mask: [nW, Mh*Mw, Mh*Mw] # attn.view: [batch_size, num_windows, num_heads,", "self.window_size = window_size self.use_checkpoint = use_checkpoint # pre-trained self.shift_size = window_size // 2", "isinstance(drop_path, list) else drop_path, norm_layer=norm_layer) for i in range(depth)]) # Patch Merging Layer", "Number of attention heads. window_size (int): Window size. shift_size (int): Shift size for", "dim) self.proj_drop = nn.Dropout(proj_drop) nn.init.trunc_normal_(self.relative_position_bias_table, std=.02) self.softmax = nn.Softmax(dim=-1) def forward(self,x,mask=None): \"\"\" Args:", "get pair-wise relative position index for each token inside the window coords_h =", "* self.scale attn = (q @ k.transpose(-2, -1)) # QK^T/sqrt(d) + B #", "None, optional): Downsample layer at the end of the layer. Default: None use_checkpoint", "// num_heads # 每个head的dim self.scale = head_dim ** -0.5 # scale # 定义一个parameter", "a learnable bias to query, key, value. Default: True drop (float, optional): Dropout", "Wp代表pad后的feature map的Height和Width _, Hp, Wp, _ = x.shape # 是W-MSA 还是 SW-MSA ?", "None # 窗口划分 # Windows Partition x_windows = window_partition(shifted_x,self.window_size) #[nW*B, Mh, Mw, C]", "= nn.AdaptiveAvgPool1d(1) self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() self.apply(self._init_weights)", "+ B # B: # relative_position_bias_table.view: [Mh*Mw*Mh*Mw,nH] -> [Mh*Mw,Mh*Mw,nH] relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view( self.window_size[0]", "-> [batch_size*num_windows, Mh*Mw, 3 * total_embed_dim] # reshape: -> [batch_size*num_windows, Mh*Mw, 3, num_heads,", "Swin Transformer 1. 类似CNN的层次化构建方法(Hierarchical Feature Maps),特征图尺寸中有对图像下采样4倍、8倍、以及16倍; 这样的Backbone有助于再此基础上构建目标检测、实例分割等任务。 2. 使用Windows Multi-Head Self-Attention (W-MSA)概念。减少计算量。计算复杂度从指数级降到线性级,Multi-head Self-Attention只在每个Windows内部进行。相对于ViT直接对整个Global进行MSA,计算复杂度更低;但是会隔绝不同", "model def swin_base_patch4_window12_384_in22k(num_classes: int = 21841, **kwargs): # trained ImageNet-22K # https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window12_384_22k.pth model", "# 保证Hp和Wp是window_size的整数倍 Hp = int(np.ceil(H / self.window_size)) * self.window_size Wp = int(np.ceil(W /", "F.pad(x,(0,0,pad_l,pad_r,pad_t,pad_b)) # Hp, Wp代表pad后的feature map的Height和Width _, Hp, Wp, _ = x.shape # 是W-MSA", "in range(depth)]) # Patch Merging Layer 类似于Pooling下采样 if downsample is not None: self.downsample", "\"\"\" Window based multi-head self attention (W-MSA) module with relative position bias. It", "2C(Stage2) -> H/16 x W/16 x 4C(stage3) ... self.num_features = int(embed_dim * 2", "self.pos_drop(x) # 多尺度分层Multi-Stage for layer in self.layers: x,H,W = layer(x,H,W) x = self.norm(x)", "self.window_size[0] - 1 # shift to start from 0 relative_coords[:, :, 1] +=", "(C) # H/4 x W/4 x 48 -> H/4 x W/4 x C(Stage1)", "patch 进行 交互,但是 shift 之后,这个 patch就可以跟新的窗口里的别的 patch就进行交互了,而这个新的窗 口里所有的 patch 其实来自于上一层别的窗口里的 patch,这也就是作者说的能起到 cross-window connection,就是窗口和窗口之间可以交互了", "所以他们之间,按道理来说是不应该去做自注意力,也就是说他们之间不应该有什么太大的联系 以14x14个patch为例进行 H: Feature Map Height W: Feature Map Width x: Feature Map", "attn_drop=attn_drop, proj_drop=drop) self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() # LN2", "self.norm = norm_layer(self.num_features) self.avgpool = nn.AdaptiveAvgPool1d(1) self.head = nn.Linear(self.num_features, num_classes) if num_classes >", "\"\"\"一个基本的SwinTransformerBlock的构成Model\"\"\" class SwinTransformerBlock(nn.Module): \"\"\" Swin Transformer Block包括: Feature Map Input -> LayerNorm ->", "W', C] # 如果是SW-MSA,需要逆shift过程 # reverse cyclic shift if self.shift_size > 0: x", "Wp) # [B, H', W', C] # 如果是SW-MSA,需要逆shift过程 # reverse cyclic shift if", "heads. qkv_bias (bool, optional): If True, add a learnable bias to query, key,", "Width # nH: num_heads super(WindowsAttention, self).__init__() self.dim = dim self.window_size = window_size #", "self.layers: x,H,W = layer(x,H,W) x = self.norm(x) # [B, L, C] x =", "= self.pos_drop(x) # 多尺度分层Multi-Stage for layer in self.layers: x,H,W = layer(x,H,W) x =", "self.softmax(attn) attn = self.attn_drop(attn) # @: multiply -> [batch_size*num_windows, num_heads, Mh*Mw, embed_dim_per_head] #", "SwinTransformer(in_chans=3, patch_size=4, window_size=7, embed_dim=96, depths=(2, 2, 6, 2), num_heads=(3, 6, 12, 24), num_classes=num_classes,", "[B, H', W', C] # 如果是SW-MSA,需要逆shift过程 # reverse cyclic shift if self.shift_size >", "num_heads=(3, 6, 12, 24), num_classes=num_classes, **kwargs) return model \"\"\"Swin-S depths->2,2,18,2 \"\"\" def swin_small_patch4_window7_224(num_classes:", "self.drop_path(x) x = x + self.drop_path(self.mlp(self.norm2(x))) return x class WindowsAttention(nn.Module): \"\"\" Window based", "= shortcut + self.drop_path(x) x = x + self.drop_path(self.mlp(self.norm2(x))) return x class WindowsAttention(nn.Module):", "act_layer (nn.Module, optional): Activation layer. Default: nn.GELU norm_layer (nn.Module, optional): Normalization layer. Default:", ":, 0] *= 2 * self.window_size[1] - 1 relative_position_index = relative_coords.sum(-1) # [Mh*Mw,", "w_slices: img_mask[:, h, w, :] = cnt cnt += 1 # Shift Window", "1), device=x.device) # [1, Hp, Wp, 1] # 准备进行区域生成,方便生成Mask h_slices = (slice(0, -self.window_size),", "**kwargs): # trained ImageNet-1K # https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_small_patch4_window7_224.pth model = SwinTransformer(in_chans=3, patch_size=4, window_size=7, embed_dim=96, depths=(2,", "24, 48), num_classes=num_classes, **kwargs) return model \"\"\"Swin Transformer\"\"\" class SwinTransformer(nn.Module): \"\"\"Swin Transformer结构 这里有个不同之处,就是每个Stage", "self.dim = dim self.num_heads = num_heads self.window_size = window_size self.shift_size = shift_size self.mlp_ratio", "H, self.W = W if not torch.jit.is_scripting() and self.use_checkpoint: x = checkpoint.checkpoint(blk, x,", "nn.Linear(dim, dim * 3, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop", "General Model: W-MSA / SW-MSA Shift 操作但如果加上 shift 的操作,每个 patch 原来只能跟它所在的窗口里的别的 patch 进行", "Connect shortcut = x x = self.norm1(x) # reshape feature map x =", "# trained ImageNet-22K # https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window7_224_22k.pth model = SwinTransformer(in_chans=3, patch_size=4, window_size=7, embed_dim=192, depths=(2, 2,", "of input channels. num_heads (int): Number of attention heads. window_size (int): Window size.", "= mask_windows.view(-1, self.window_size * self.window_size) # [nW, Mh*Mw] # 掩码生成 attn_mask = mask_windows.unsqueeze(1)", "# shift to start from 0 relative_coords[:, :, 1] += self.window_size[1] - 1", "proj_drop=0.): \"\"\" Args: dim (int): Number of input channels. window_size (tuple[int]): The height", "self.shift_size > 0: x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2)) else: x =", "Attention dropout rate. Default: 0.0 drop_path (float, optional): Stochastic depth rate. Default: 0.0", "= nn.Dropout(p=drop_rate) # stochastic depth # Drop Path dpr = [x.item() for x", "Stage SwinTransformer Layer包括: \"\"\" def __init__(self, dim, depth, num_heads, window_size, mlp_ratio=4., qkv_bias=True, drop=0.,", "model = SwinTransformer(in_chans=3, patch_size=4, window_size=12, embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4, 8, 16,", "0 for h in h_slices: for w in w_slices: img_mask[:, h, w, :]", "0).contiguous() # [Mh*Mw, Mh*Mw, 2] relative_coords[:, :, 0] += self.window_size[0] - 1 #", "= self.head(x) # 分类头 return x \"\"\"一个Stage内的基本SwinTransformer模块\"\"\" class BasicLayer(nn.Module): \"\"\" One Stage SwinTransformer", "输出特征矩阵的Channels (C) # H/4 x W/4 x 48 -> H/4 x W/4 x", "56x56, 4x4x3) self.patch_embed = PatchEmbed( patch_size=patch_size, in_c=in_chans, embed_dim=embed_dim, norm_layer=norm_layer if self.patch_norm else None)", "patch_size=patch_size, in_c=in_chans, embed_dim=embed_dim, norm_layer=norm_layer if self.patch_norm else None) self.pos_drop = nn.Dropout(p=drop_rate) # stochastic", "def swin_base_patch4_window12_384_in22k(num_classes: int = 21841, **kwargs): # trained ImageNet-22K # https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window12_384_22k.pth model =", "之后,这个 patch就可以跟新的窗口里的别的 patch就进行交互了,而这个新的窗 口里所有的 patch 其实来自于上一层别的窗口里的 patch,这也就是作者说的能起到 cross-window connection,就是窗口和窗口之间可以交互了 上述过程配合之后的Patch Merging,合并到Transformer最后几层的时候,每一个patch本身的感受 野就已经很大了。 \"\"\"", "self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1) relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() #", "12, 24, 48), num_classes=num_classes, **kwargs) return model \"\"\"Swin Transformer\"\"\" class SwinTransformer(nn.Module): \"\"\"Swin Transformer结构", "self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() self.apply(self._init_weights) def _init_weights(self,", "self.shift_size = window_size // 2 # 构建SwinTransformer Block self.blocks = nn.ModuleList([ SwinTransformerBlock( dim=dim,", "野就已经很大了。 \"\"\" def __init__(self, dim, window_size, num_heads, qkv_bias=True, attn_drop=0., proj_drop=0.): \"\"\" Args: dim", "Path dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth", "0.0 proj_drop (float, optional): Dropout ratio of output. Default: 0.0 \"\"\" # Mh:", ":W, :].contiguous() x = x.view(B,H*W,C) # FFN # 两个Skip Connect x = shortcut", "self.num_features = int(embed_dim * 2 ** (self.num_layers - 1)) self.mlp_ratio = mlp_ratio #", "# 构建SwinTransformer Block self.blocks = nn.ModuleList([ SwinTransformerBlock( dim=dim, num_heads=num_heads, window_size=window_size, shift_size=0 if (i", "0.0 drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0 norm_layer (nn.Module,", "window_partition(shifted_x,self.window_size) #[nW*B, Mh, Mw, C] x_windows = x_windows.view(-1, self.window_size*self.window_size,C) # [nW*B, Mh*Mw, C]", "features with shape of (num_windows*B, Mh*Mw, C) mask: (0/-inf) mask with shape of", "x = self.proj(x) x = self.proj_drop(x) return x if __name__ == \"__main__\": pass", "# get pair-wise relative position index for each token inside the window coords_h", "Model: W-MSA / SW-MSA Shift 操作但如果加上 shift 的操作,每个 patch 原来只能跟它所在的窗口里的别的 patch 进行 交互,但是", "torch import torch.nn as nn import torch.nn.functional as F import torch.utils.checkpoint as checkpoint", "return model \"\"\"Swin Transformer\"\"\" class SwinTransformer(nn.Module): \"\"\"Swin Transformer结构 这里有个不同之处,就是每个Stage Layer中, \"\"\" def __init__(self,", "self.norm1(x) # reshape feature map x = x.view(B, H, W, C) # 对feature", "[2, 1, Mh*Mw] relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] #", "* 2 ** (self.num_layers - 1)) self.mlp_ratio = mlp_ratio # 将image切分为不重合的Patches # input:", "x = x + self.drop_path(self.mlp(self.norm2(x))) return x class WindowsAttention(nn.Module): \"\"\" Window based multi-head", "value. Default: True attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0", "int(np.ceil(W / self.window_size)) * self.window_size # 拥有和feature map一样的通道排列顺序,方便后续window_partition img_mask = torch.zeros((1, Hp, Wp,", "self.downsample = None def create_mask(self,x,H,W): \"\"\" SW-MSA后,对于移位后左上角的窗口(也就是移位前最中间的窗口)来说,里面的元素都是互相紧挨着的, 他们之间可以互相两两做自注意力,但是对于剩下几个窗口来说,它们里面的元素是从别的很远的地方搬过来的, 所以他们之间,按道理来说是不应该去做自注意力,也就是说他们之间不应该有什么太大的联系 以14x14个patch为例进行 H: Feature Map", "Mh*Mw] attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0)) return attn_mask def", "Block self.blocks = nn.ModuleList([ SwinTransformerBlock( dim=dim, num_heads=num_heads, window_size=window_size, shift_size=0 if (i % 2", "# 移除Pad数据 if pad_r > 0 or pad_b > 0: # 把前面pad的数据移除掉 x", "h_slices = (slice(0, -self.window_size), slice(-self.window_size, -self.shift_size), slice(-self.shift_size, None)) w_slices = (slice(0, -self.window_size), slice(-self.window_size,", "2 == 0) else self.shift_size, #当i为偶,就是W-MSA,i为奇,就是SW-MSA,与论文一致, 保证窗口之间通信 mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, drop=drop, attn_drop=attn_drop, drop_path=drop_path[i] if", "# [nW, Mh*Mw, Mh*Mw] nW:窗口数 attn_mask = self.create_mask(x,H,W) for blk in self.blocks: blk.H,", "= self.create_mask(x,H,W) for blk in self.blocks: blk.H, blk.W = H, W # self.H", "保证窗口之间通信 mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, drop=drop, attn_drop=attn_drop, drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path, norm_layer=norm_layer) for", "# nH: num_heads super(WindowsAttention, self).__init__() self.dim = dim self.window_size = window_size # [Mh,", "self.patch_embed(x) x = self.pos_drop(x) # 多尺度分层Multi-Stage for layer in self.layers: x,H,W = layer(x,H,W)", "应该就是在内存中定一个常量,同时,模型保存和加载的时候可以写入和读出。 # 不需要学习,但是可以灵活读写 self.register_buffer(\"relative_position_index\", relative_position_index) self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) self.attn_drop", "attn = self.softmax(attn) else: attn = self.softmax(attn) attn = self.attn_drop(attn) # @: multiply", "self attention (W-MSA) module with relative position bias. It supports both of shifted", "of mlp hidden dim to embedding dim. qkv_bias (bool, optional): If True, add", "上述过程配合之后的Patch Merging,合并到Transformer最后几层的时候,每一个patch本身的感受 野就已经很大了。 \"\"\" def __init__(self, dim, window_size, num_heads, qkv_bias=True, attn_drop=0., proj_drop=0.): \"\"\"", "return model \"\"\"Swin-Large\"\"\" def swin_large_patch4_window7_224_in22k(num_classes: int = 21841, **kwargs): # trained ImageNet-22K #", "3 * total_embed_dim] # reshape: -> [batch_size*num_windows, Mh*Mw, 3, num_heads, embed_dim_per_head] # permute:", "# reshape: -> [batch_size*num_windows, Mh*Mw, 3, num_heads, embed_dim_per_head] # permute: -> [3, batch_size*num_windows,", "https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth model = SwinTransformer(in_chans=3, patch_size=4, window_size=7, embed_dim=96, depths=(2, 2, 6, 2), num_heads=(3, 6,", "Width x: Feature Map \"\"\" # 为SW-MSA计算Attention Mask. # 保证Hp和Wp是window_size的整数倍 Hp = int(np.ceil(H", "x W/4 x C(Stage1) -> H/8 x W/8 x 2C(Stage2) -> H/16 x", "L == H * W, \"input feature has wrong size\" # Skip Connect", "预测型的任务,或者说遇到非常大尺寸的图片时候,这种全局算自注意力的计算复杂度就非常贵了 SwinTransformer中,采用Windows-based Attention来将计算复杂度与图片尺寸的关系变为线性关系。 General Model: W-MSA / SW-MSA Shift 操作但如果加上 shift 的操作,每个 patch", "= SwinTransformer(in_chans=3, patch_size=4, window_size=7, embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4, 8, 16, 32),", "i in range(depth)]) # Patch Merging Layer 类似于Pooling下采样 if downsample is not None:", "multiply -> [batch_size*num_windows, num_heads, Mh*Mw, embed_dim_per_head] # transpose: -> [batch_size*num_windows, Mh*Mw, num_heads, embed_dim_per_head]", "[Bs*nW, nH, Mh*Mw, Mh*Mw] attn = attn + relative_position_bias.unsqueeze(0) if mask is not", "in h_slices: for w in w_slices: img_mask[:, h, w, :] = cnt cnt", "m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.LayerNorm): nn.init.constant_(m.bias, 0) nn.init.constant_(m.weight, 1.0)", "m): if isinstance(m, nn.Linear): nn.init.trunc_normal_(m.weight, std=.02) if isinstance(m, nn.Linear) and m.bias is not", "shifted_x = x attn_mask = None # 窗口划分 # Windows Partition x_windows =", "Transformer结构 这里有个不同之处,就是每个Stage Layer中, \"\"\" def __init__(self, patch_size=4, in_chans=3, num_classes=1000, embed_dim=96, depths=(2, 2, 6,", "**kwargs) return model \"\"\"Swin-B\"\"\" def swin_base_patch4_window7_224(num_classes: int = 1000, **kwargs): # trained ImageNet-1K", "use checkpointing to save memory. Default: False. \"\"\" super(BasicLayer, self).__init__() self.dim = dim", "norm_layer=nn.LayerNorm, patch_norm=True, use_checkpoint=False, **kwargs): super().__init__() self.num_classes = num_classes self.num_layers = len(depths) self.embed_dim =", "Number of input channels. window_size (tuple[int]): The height and width of the window.", "tuple[float], optional): Stochastic depth rate. Default: 0.0 norm_layer (nn.Module, optional): Normalization layer. Default:", "mask_windows.unsqueeze(2) # [nW, 1, Mh*Mw] - [nW, Mh*Mw, 1] # [nW, Mh*Mw, Mh*Mw]", "if (i_layer < self.num_layers - 1) else None, use_checkpoint=use_checkpoint) self.layers.append(layers) self.norm = norm_layer(self.num_features)", "\"\"\" def swin_small_patch4_window7_224(num_classes: int = 1000, **kwargs): # trained ImageNet-1K # https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_small_patch4_window7_224.pth model", "2] relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from", "Args参数定义: dim (int): Number of input channels. num_heads (int): Number of attention heads.", "= attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0) attn = attn.view(-1,", "num_heads (int): Number of attention heads. window_size (int): Window size. shift_size (int): Shift", "x, attn_mask): # feature map的Height & Width H, W = self.H, self.W #", "mask.unsqueeze(1).unsqueeze(0) attn = attn.view(-1, self.num_heads, N, N) attn = self.softmax(attn) else: attn =", "num_heads=(4, 8, 16, 32), num_classes=num_classes, **kwargs) return model def swin_base_patch4_window12_384(num_classes: int = 1000,", "supports both of shifted and non-shifted window. VIT中注意力是全局的,复杂度随着图片尺寸的增加指数增加,样当去做视觉里的下游任务,尤其是密集 预测型的任务,或者说遇到非常大尺寸的图片时候,这种全局算自注意力的计算复杂度就非常贵了 SwinTransformer中,采用Windows-based Attention来将计算复杂度与图片尺寸的关系变为线性关系。 General Model:", "-self.window_size), slice(-self.window_size, -self.shift_size), slice(-self.shift_size, None)) w_slices = (slice(0, -self.window_size), slice(-self.window_size, -self.shift_size), slice(-self.shift_size, None))", "self.num_heads = num_heads head_dim = dim // num_heads # 每个head的dim self.scale = head_dim", "-> [batch_size*num_windows, num_heads, Mh*Mw, Mh*Mw] q = q * self.scale attn = (q", "= 0 pad_t = 0 pad_r = (self.window_size - W % self.window_size) %", "1) x = self.head(x) # 分类头 return x \"\"\"一个Stage内的基本SwinTransformer模块\"\"\" class BasicLayer(nn.Module): \"\"\" One", "C) mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None x的输入维度是(num_windows窗口数*Batch", "downsample=PatchMerging if (i_layer < self.num_layers - 1) else None, use_checkpoint=use_checkpoint) self.layers.append(layers) self.norm =", "rate. Default: 0.0 act_layer (nn.Module, optional): Activation layer. Default: nn.GELU norm_layer (nn.Module, optional):", "self.window_size, Hp, Wp) # [B, H', W', C] # 如果是SW-MSA,需要逆shift过程 # reverse cyclic", "None)) # 区域编码 cnt = 0 for h in h_slices: for w in", "int = 1000, **kwargs): # trained ImageNet-1K # https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window7_224.pth model = SwinTransformer(in_chans=3, patch_size=4,", "每个head的dim self.scale = head_dim ** -0.5 # scale # 定义一个parameter table来存放relative position bias", "Window size. shift_size (int): Shift size for SW-MSA. mlp_ratio (float): Ratio of mlp", "if (i % 2 == 0) else self.shift_size, #当i为偶,就是W-MSA,i为奇,就是SW-MSA,与论文一致, 保证窗口之间通信 mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, drop=drop,", "proj_drop=drop) self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() # LN2 self.norm2", "self.window_size[0] * self.window_size[1], -1) relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # [nH, Mh*Mw, Mh*Mw]", "# [Mh*Mw, Mh*Mw, 2] relative_coords[:, :, 0] += self.window_size[0] - 1 # shift", "= None def create_mask(self,x,H,W): \"\"\" SW-MSA后,对于移位后左上角的窗口(也就是移位前最中间的窗口)来说,里面的元素都是互相紧挨着的, 他们之间可以互相两两做自注意力,但是对于剩下几个窗口来说,它们里面的元素是从别的很远的地方搬过来的, 所以他们之间,按道理来说是不应该去做自注意力,也就是说他们之间不应该有什么太大的联系 以14x14个patch为例进行 H: Feature Map Height", "PatchMerging, DropPath, PatchEmbed from BasicModule import Mlp from BasicModule import window_partition, window_reverse \"\"\"SwinT", "with relative position bias. It supports both of shifted and non-shifted window. VIT中注意力是全局的,复杂度随着图片尺寸的增加指数增加,样当去做视觉里的下游任务,尤其是密集", "nn.ModuleList([ SwinTransformerBlock( dim=dim, num_heads=num_heads, window_size=window_size, shift_size=0 if (i % 2 == 0) else", "each token inside the window coords_h = torch.arange(self.window_size[0]) coords_w = torch.arange(self.window_size[1]) coords =", "transpose: -> [batch_size*num_windows, Mh*Mw, num_heads, embed_dim_per_head] # reshape: -> [batch_size*num_windows, Mh*Mw, total_embed_dim] x", "x = self.downsample(x, H, W) H, W = (H + 1) // 2,", "attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0)) return attn_mask def forward(self,x,H,W): # [nW,", "= self.norm1(x) # reshape feature map x = x.view(B, H, W, C) #", "attn_windows.view(-1, self.window_size, self.window_size, C) # [nW*B, Mh, Mw, C] shifted_x = window_reverse(attn_windows, self.window_size,", "model = SwinTransformer(in_chans=3, patch_size=4, window_size=7, embed_dim=96, depths=(2, 2, 6, 2), num_heads=(3, 6, 12,", "num_heads=num_heads, window_size=window_size, shift_size=0 if (i % 2 == 0) else self.shift_size, #当i为偶,就是W-MSA,i为奇,就是SW-MSA,与论文一致, 保证窗口之间通信", "Map Input -> LayerNorm -> SW-MSA/W-MSA -> LayerNorm-> MLP --------> |--------------------------------------||----------------------| \"\"\" def", "author: <NAME> # email: <EMAIL> \"\"\" Swin Transformer 1. 类似CNN的层次化构建方法(Hierarchical Feature Maps),特征图尺寸中有对图像下采样4倍、8倍、以及16倍; 这样的Backbone有助于再此基础上构建目标检测、实例分割等任务。", "1 relative_position_index = relative_coords.sum(-1) # [Mh*Mw, Mh*Mw] # Register_buffer: 应该就是在内存中定一个常量,同时,模型保存和加载的时候可以写入和读出。 # 不需要学习,但是可以灵活读写 self.register_buffer(\"relative_position_index\",", "0) else self.shift_size, #当i为偶,就是W-MSA,i为奇,就是SW-MSA,与论文一致, 保证窗口之间通信 mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, drop=drop, attn_drop=attn_drop, drop_path=drop_path[i] if isinstance(drop_path, list)", "+= self.window_size[0] - 1 # shift to start from 0 relative_coords[:, :, 1]", "num_heads=(3, 6, 12, 24), num_classes=num_classes, **kwargs) return model \"\"\"Swin-B\"\"\" def swin_base_patch4_window7_224(num_classes: int =", "x = self.avgpool(x.transpose(1, 2)) # [B, C, 1] x = torch.flatten(x, 1) x", "True, add a learnable bias to query, key, value. Default: True attn_drop (float,", "Input -> LayerNorm -> SW-MSA/W-MSA -> LayerNorm-> MLP --------> |--------------------------------------||----------------------| \"\"\" def __init__(self,", "Windows Multi-head Self-Atten来让信息在相邻窗口进行传递。 A PyTorch impl of : `Swin Transformer: Hierarchical Vision Transformer", "self.window_size = window_size # [Mh, Mw] self.num_heads = num_heads head_dim = dim //", "False. \"\"\" super(BasicLayer, self).__init__() self.dim = dim self.depth = depth self.window_size = window_size", "= window_partition(shifted_x,self.window_size) #[nW*B, Mh, Mw, C] x_windows = x_windows.view(-1, self.window_size*self.window_size,C) # [nW*B, Mh*Mw,", "Mlp from BasicModule import window_partition, window_reverse \"\"\"SwinT window_size = 7 img_size = 224", "class SwinTransformer(nn.Module): \"\"\"Swin Transformer结构 这里有个不同之处,就是每个Stage Layer中, \"\"\" def __init__(self, patch_size=4, in_chans=3, num_classes=1000, embed_dim=96,", "x = shortcut + self.drop_path(x) x = x + self.drop_path(self.mlp(self.norm2(x))) return x class", "heads. window_size (int): Local window size. mlp_ratio (float): Ratio of mlp hidden dim", "self.window_size # 拥有和feature map一样的通道排列顺序,方便后续window_partition img_mask = torch.zeros((1, Hp, Wp, 1), device=x.device) # [1,", "# qkv(): -> [batch_size*num_windows, Mh*Mw, 3 * total_embed_dim] # reshape: -> [batch_size*num_windows, Mh*Mw,", "= coords_flatten[:, :, None] - coords_flatten[:, None, :] # [2, Mh*Mw, Mh*Mw] relative_coords", "q * self.scale attn = (q @ k.transpose(-2, -1)) # QK^T/sqrt(d) + B", "2), num_heads=(4, 8, 16, 32), num_classes=num_classes, **kwargs) return model def swin_base_patch4_window12_384(num_classes: int =", "model = SwinTransformer(in_chans=3, patch_size=4, window_size=7, embed_dim=96, depths=(2, 2, 18, 2), num_heads=(3, 6, 12,", "= int(np.ceil(W / self.window_size)) * self.window_size # 拥有和feature map一样的通道排列顺序,方便后续window_partition img_mask = torch.zeros((1, Hp,", "x = F.pad(x,(0,0,pad_l,pad_r,pad_t,pad_b)) # Hp, Wp代表pad后的feature map的Height和Width _, Hp, Wp, _ = x.shape", "@: multiply -> [batch_size*num_windows, num_heads, Mh*Mw, Mh*Mw] q = q * self.scale attn", "def __init__(self, dim, num_heads, window_size=7, shift_size=0, mlp_ratio=4., qkv_bias=True, drop=0., attn_drop=0., drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm):", "**kwargs): # trained ImageNet-1K # https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window7_224.pth model = SwinTransformer(in_chans=3, patch_size=4, window_size=7, embed_dim=128, depths=(2,", "class SwinTransformerBlock(nn.Module): \"\"\" Swin Transformer Block包括: Feature Map Input -> LayerNorm -> SW-MSA/W-MSA", "WindowsAttention( dim, window_size=(self.window_size, self.window_size), num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop) self.drop_path = DropPath(drop_path) if drop_path", "C = x.shape assert L == H * W, \"input feature has wrong", "SwinTransformer(in_chans=3, patch_size=4, window_size=7, embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4, 8, 16, 32), num_classes=num_classes,", "= self.softmax(attn) else: attn = self.softmax(attn) attn = self.attn_drop(attn) # @: multiply ->", "input channels. depth (int): Number of blocks. block数量 num_heads (int): Number of attention", "else self.shift_size, #当i为偶,就是W-MSA,i为奇,就是SW-MSA,与论文一致, 保证窗口之间通信 mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, drop=drop, attn_drop=attn_drop, drop_path=drop_path[i] if isinstance(drop_path, list) else", "(num_windows*B, Mh*Mw, C) mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or", "dim (int): Number of input channels. window_size (tuple[int]): The height and width of", "None] - coords_flatten[:, None, :] # [2, Mh*Mw, Mh*Mw] relative_coords = relative_coords.permute(1, 2,", "shortcut = x x = self.norm1(x) # reshape feature map x = x.view(B,", "layer(x,H,W) x = self.norm(x) # [B, L, C] x = self.avgpool(x.transpose(1, 2)) #", "W-MSA / SW-MSA Shift 操作但如果加上 shift 的操作,每个 patch 原来只能跟它所在的窗口里的别的 patch 进行 交互,但是 shift", "patch_size=4, window_size=7, embed_dim=96, depths=(2, 2, 6, 2), num_heads=(3, 6, 12, 24), num_classes=num_classes, **kwargs)", "2, 18, 2), num_heads=(6, 12, 24, 48), num_classes=num_classes, **kwargs) return model def swin_large_patch4_window12_384_in22k(num_classes:", "1] x = torch.flatten(x, 1) x = self.head(x) # 分类头 return x \"\"\"一个Stage内的基本SwinTransformer模块\"\"\"", "for h in h_slices: for w in w_slices: img_mask[:, h, w, :] =", "blocks. block数量 num_heads (int): Number of attention heads. window_size (int): Local window size.", "optional): Attention dropout rate. Default: 0.0 drop_path (float, optional): Stochastic depth rate. Default:", "mlp_ratio=4., qkv_bias=True, drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1, norm_layer=nn.LayerNorm, patch_norm=True, use_checkpoint=False, **kwargs): super().__init__() self.num_classes = num_classes", "--------> |--------------------------------------||----------------------| \"\"\" def __init__(self, dim, num_heads, window_size=7, shift_size=0, mlp_ratio=4., qkv_bias=True, drop=0., attn_drop=0.,", "\"\"\" super(SwinTransformerBlock, self).__init__() self.dim = dim self.num_heads = num_heads self.window_size = window_size self.shift_size", "- 1) else None, use_checkpoint=use_checkpoint) self.layers.append(layers) self.norm = norm_layer(self.num_features) self.avgpool = nn.AdaptiveAvgPool1d(1) self.head", "embed_dim_per_head] # reshape: -> [batch_size*num_windows, Mh*Mw, total_embed_dim] x = (attn @ v).transpose(1, 2).reshape(B_,", "self.use_checkpoint = use_checkpoint # pre-trained self.shift_size = window_size // 2 # 构建SwinTransformer Block", "BasicModule import Mlp from BasicModule import window_partition, window_reverse \"\"\"SwinT window_size = 7 img_size", "= self.attn_drop(attn) # @: multiply -> [batch_size*num_windows, num_heads, Mh*Mw, embed_dim_per_head] # transpose: ->", "以14x14个patch为例进行 H: Feature Map Height W: Feature Map Width x: Feature Map \"\"\"", "- 1) * (2 * window_size[1] - 1), num_heads)) # [2*Mh-1 * 2*Mw-1,", "Mh*Mw, C] # W-MSA / SW-MSA attn_windows = self.attn(x_windows, mask=attn_mask) # [nW*B, Mh*Mw,", "= 7 img_size = 224 Trained ImageNet-1k depths->2,2,6,2 \"\"\" def swin_tiny_patch4_window7_224(num_classes: int =", "1] # [nW, Mh*Mw, Mh*Mw] attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0,", "Swin Transformer Block包括: Feature Map Input -> LayerNorm -> SW-MSA/W-MSA -> LayerNorm-> MLP", "nn.init.trunc_normal_(self.relative_position_bias_table, std=.02) self.softmax = nn.Softmax(dim=-1) def forward(self,x,mask=None): \"\"\" Args: x: input features with", "to query, key, value. Default: True attn_drop (float, optional): Dropout ratio of attention", "blk.H, blk.W = H, W # self.H = H, self.W = W if", "Mask # mask: [nW, Mh*Mw, Mh*Mw] # attn.view: [batch_size, num_windows, num_heads, Mh*Mw, Mh*Mw]", "shape of (num_windows, Wh*Ww, Wh*Ww) or None x的输入维度是(num_windows窗口数*Batch Size) 在窗口内进行Attention Op \"\"\" #", "num_heads=num_heads[i_layer], window_size=window_size, mlp_ratio=self.mlp_ratio, qkv_bias=qkv_bias, drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])], norm_layer=norm_layer, downsample=PatchMerging if (i_layer", "32), num_classes=num_classes, **kwargs) return model \"\"\"Swin-Large\"\"\" def swin_large_patch4_window7_224_in22k(num_classes: int = 21841, **kwargs): #", "drop (float, optional): Dropout rate. Default: 0.0 attn_drop (float, optional): Attention dropout rate.", "\"\"\" Swin Transformer Block包括: Feature Map Input -> LayerNorm -> SW-MSA/W-MSA -> LayerNorm->", "0 else nn.Identity() self.apply(self._init_weights) def _init_weights(self, m): if isinstance(m, nn.Linear): nn.init.trunc_normal_(m.weight, std=.02) if", "= 1000, **kwargs): # trained ImageNet-1K # https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window12_384.pth model = SwinTransformer(in_chans=3, patch_size=4, window_size=12,", "类似CNN的层次化构建方法(Hierarchical Feature Maps),特征图尺寸中有对图像下采样4倍、8倍、以及16倍; 这样的Backbone有助于再此基础上构建目标检测、实例分割等任务。 2. 使用Windows Multi-Head Self-Attention (W-MSA)概念。减少计算量。计算复杂度从指数级降到线性级,Multi-head Self-Attention只在每个Windows内部进行。相对于ViT直接对整个Global进行MSA,计算复杂度更低;但是会隔绝不同 窗口之间的信息传递,通过Shifted Windows Multi-head", "其实来自于上一层别的窗口里的 patch,这也就是作者说的能起到 cross-window connection,就是窗口和窗口之间可以交互了 上述过程配合之后的Patch Merging,合并到Transformer最后几层的时候,每一个patch本身的感受 野就已经很大了。 \"\"\" def __init__(self, dim, window_size, num_heads,", "= nn.Linear(dim, dim * 3, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim)", "# input: (Bs, 224, 224, 3) # output: (e.g patch_size=4: Bs, 56x56, 4x4x3)", "num_heads=(6, 12, 24, 48), num_classes=num_classes, **kwargs) return model \"\"\"Swin Transformer\"\"\" class SwinTransformer(nn.Module): \"\"\"Swin", "H, W \"\"\"一个基本的SwinTransformerBlock的构成Model\"\"\" class SwinTransformerBlock(nn.Module): \"\"\" Swin Transformer Block包括: Feature Map Input ->", "int(np.ceil(H / self.window_size)) * self.window_size Wp = int(np.ceil(W / self.window_size)) * self.window_size #", "C] shifted_x = window_reverse(attn_windows, self.window_size, Hp, Wp) # [B, H', W', C] #", "**kwargs) return model \"\"\"Swin Transformer\"\"\" class SwinTransformer(nn.Module): \"\"\"Swin Transformer结构 这里有个不同之处,就是每个Stage Layer中, \"\"\" def", "self.shift_size), dims=(1, 2)) else: x = shifted_x # 移除Pad数据 if pad_r > 0", "Mh*Mw, total_embed_dim] x = (attn @ v).transpose(1, 2).reshape(B_, N, C) x = self.proj(x)", "the layer. Default: None use_checkpoint (bool): Whether to use checkpointing to save memory.", "18, 2), num_heads=(6, 12, 24, 48), num_classes=num_classes, **kwargs) return model \"\"\"Swin Transformer\"\"\" class", "model \"\"\"Swin-S depths->2,2,18,2 \"\"\" def swin_small_patch4_window7_224(num_classes: int = 1000, **kwargs): # trained ImageNet-1K", "x W/16 x 4C(stage3) ... self.num_features = int(embed_dim * 2 ** (self.num_layers -", "is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.LayerNorm): nn.init.constant_(m.bias, 0) nn.init.constant_(m.weight, 1.0) def", "SwinTransformer(in_chans=3, patch_size=4, window_size=7, embed_dim=96, depths=(2, 2, 18, 2), num_heads=(3, 6, 12, 24), num_classes=num_classes,", "[nW, 1, Mh*Mw] - [nW, Mh*Mw, 1] # [nW, Mh*Mw, Mh*Mw] attn_mask =", "depths=(2, 2, 6, 2), num_heads=(3, 6, 12, 24), window_size=7, mlp_ratio=4., qkv_bias=True, drop_rate=0., attn_drop_rate=0.,", "relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0", "2).reshape(B_, N, C) x = self.proj(x) x = self.proj_drop(x) return x if __name__", "Feature Map Height W: Feature Map Width x: Feature Map \"\"\" # 为SW-MSA计算Attention", "map的Height和Width _, Hp, Wp, _ = x.shape # 是W-MSA 还是 SW-MSA ? #", "self.mlp_ratio = mlp_ratio # 将image切分为不重合的Patches # input: (Bs, 224, 224, 3) # output:", "Hp, Wp, 1] # 准备进行区域生成,方便生成Mask h_slices = (slice(0, -self.window_size), slice(-self.window_size, -self.shift_size), slice(-self.shift_size, None))", "https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window12_384.pth model = SwinTransformer(in_chans=3, patch_size=4, window_size=12, embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4, 8,", "num_heads (int): Number of attention heads. window_size (int): Local window size. mlp_ratio (float):", "map进行pad,pad到windows size的整数倍 pad_l = 0 pad_t = 0 pad_r = (self.window_size - W", "output. Default: 0.0 \"\"\" # Mh: Windows Size Height # Mw: Windows Size", "nn.init.constant_(m.weight, 1.0) def forward(self,x): # x:[B, L, C] x,H,W = self.patch_embed(x) x =", "True attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0 proj_drop (float,", "is not None: nW = mask.shape[0] # SW-MSA 需要做attention Mask # mask: [nW,", "None: self.downsample = downsample(dim=dim, norm_layer=norm_layer) else: self.downsample = None def create_mask(self,x,H,W): \"\"\" SW-MSA后,对于移位后左上角的窗口(也就是移位前最中间的窗口)来说,里面的元素都是互相紧挨着的,", "value. Default: True drop (float, optional): Dropout rate. Default: 0.0 attn_drop (float, optional):", "of input channels. depth (int): Number of blocks. block数量 num_heads (int): Number of", "Mh*Mw, 1] # [nW, Mh*Mw, Mh*Mw] attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask ==", "# Windows_Multi-head Self Attention self.attn = WindowsAttention( dim, window_size=(self.window_size, self.window_size), num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop,", "// 2, (W + 1) // 2 # DownSample之后,H,W应该减半 return x, H, W", "(q @ k.transpose(-2, -1)) # QK^T/sqrt(d) + B # B: # relative_position_bias_table.view: [Mh*Mw*Mh*Mw,nH]", "Mh*Mw] relative_coords = relative_coords.permute(1, 2, 0).contiguous() # [Mh*Mw, Mh*Mw, 2] relative_coords[:, :, 0]", "checkpoint.checkpoint(blk, x, attn_mask) else: x = blk(x, attn_mask) if self.downsample is not None:", "Self Attention self.attn = WindowsAttention( dim, window_size=(self.window_size, self.window_size), num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop) self.drop_path", "Mw, C] x_windows = x_windows.view(-1, self.window_size*self.window_size,C) # [nW*B, Mh*Mw, C] # W-MSA /", "self.window_size, C) # [nW*B, Mh, Mw, C] shifted_x = window_reverse(attn_windows, self.window_size, Hp, Wp)", "shift_size=0, mlp_ratio=4., qkv_bias=True, drop=0., attn_drop=0., drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm): \"\"\" Args参数定义: dim (int): Number", "return x \"\"\"一个Stage内的基本SwinTransformer模块\"\"\" class BasicLayer(nn.Module): \"\"\" One Stage SwinTransformer Layer包括: \"\"\" def __init__(self,", "x x = self.norm1(x) # reshape feature map x = x.view(B, H, W,", "\"\"\" SW-MSA后,对于移位后左上角的窗口(也就是移位前最中间的窗口)来说,里面的元素都是互相紧挨着的, 他们之间可以互相两两做自注意力,但是对于剩下几个窗口来说,它们里面的元素是从别的很远的地方搬过来的, 所以他们之间,按道理来说是不应该去做自注意力,也就是说他们之间不应该有什么太大的联系 以14x14个patch为例进行 H: Feature Map Height W: Feature Map Width", "2 # 构建SwinTransformer Block self.blocks = nn.ModuleList([ SwinTransformerBlock( dim=dim, num_heads=num_heads, window_size=window_size, shift_size=0 if", "1, Mh*Mw] - [nW, Mh*Mw, 1] # [nW, Mh*Mw, Mh*Mw] attn_mask = attn_mask.masked_fill(attn_mask", "depth rate. Default: 0.0 act_layer (nn.Module, optional): Activation layer. Default: nn.GELU norm_layer (nn.Module,", "21841, **kwargs): # trained ImageNet-22K # https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window12_384_22k.pth model = SwinTransformer(in_chans=3, patch_size=4, window_size=12, embed_dim=192,", "embed_dim=192, depths=(2, 2, 18, 2), num_heads=(6, 12, 24, 48), num_classes=num_classes, **kwargs) return model", "= patch_norm # 输出特征矩阵的Channels (C) # H/4 x W/4 x 48 -> H/4", "Mh*Mw, 1] - [2, 1, Mh*Mw] relative_coords = coords_flatten[:, :, None] - coords_flatten[:,", "def _init_weights(self, m): if isinstance(m, nn.Linear): nn.init.trunc_normal_(m.weight, std=.02) if isinstance(m, nn.Linear) and m.bias", "patch_norm # 输出特征矩阵的Channels (C) # H/4 x W/4 x 48 -> H/4 x", "import Mlp from BasicModule import window_partition, window_reverse \"\"\"SwinT window_size = 7 img_size =", "0.0 drop_path (float, optional): Stochastic depth rate. Default: 0.0 act_layer (nn.Module, optional): Activation", "nH] # 相对位置索引获得方法 # get pair-wise relative position index for each token inside", "Activation layer. Default: nn.GELU norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm \"\"\" super(SwinTransformerBlock,", "super(WindowsAttention, self).__init__() self.dim = dim self.window_size = window_size # [Mh, Mw] self.num_heads =", "mlp_hidden_dim = int(dim * mlp_ratio) self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) def forward(self,", "embed_dim_per_head] qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1,", "Mh*Mw, num_heads, embed_dim_per_head] # reshape: -> [batch_size*num_windows, Mh*Mw, total_embed_dim] x = (attn @", "decay rule # bulid layers self.layers = nn.ModuleList() for i_layer in range(self.num_layers): #", "Batch, length, channel B, L, C = x.shape assert L == H *", "depths=(2, 2, 18, 2), num_heads=(4, 8, 16, 32), num_classes=num_classes, **kwargs) return model def", "+ mask.unsqueeze(1).unsqueeze(0) attn = attn.view(-1, self.num_heads, N, N) attn = self.softmax(attn) else: attn", "trained ImageNet-1K # https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window12_384.pth model = SwinTransformer(in_chans=3, patch_size=4, window_size=12, embed_dim=128, depths=(2, 2, 18,", "\"\"\"Swin Transformer结构 这里有个不同之处,就是每个Stage Layer中, \"\"\" def __init__(self, patch_size=4, in_chans=3, num_classes=1000, embed_dim=96, depths=(2, 2,", "trained ImageNet-22K # https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window12_384_22k.pth model = SwinTransformer(in_chans=3, patch_size=4, window_size=12, embed_dim=192, depths=(2, 2, 18,", "block数量 num_heads (int): Number of attention heads. window_size (int): Local window size. mlp_ratio", "Layer 类似于Pooling下采样 if downsample is not None: self.downsample = downsample(dim=dim, norm_layer=norm_layer) else: self.downsample", "\"\"\" # Mh: Windows Size Height # Mw: Windows Size Width # nH:", "self.dim = dim self.window_size = window_size # [Mh, Mw] self.num_heads = num_heads head_dim", "Mh*Mw, Mh*Mw] attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0)) return attn_mask", "2 # DownSample之后,H,W应该减半 return x, H, W \"\"\"一个基本的SwinTransformerBlock的构成Model\"\"\" class SwinTransformerBlock(nn.Module): \"\"\" Swin Transformer", "layer. Default: nn.LayerNorm downsample (nn.Module | None, optional): Downsample layer at the end", "cross-window connection,就是窗口和窗口之间可以交互了 上述过程配合之后的Patch Merging,合并到Transformer最后几层的时候,每一个patch本身的感受 野就已经很大了。 \"\"\" def __init__(self, dim, window_size, num_heads, qkv_bias=True, attn_drop=0.,", "__init__(self, dim, depth, num_heads, window_size, mlp_ratio=4., qkv_bias=True, drop=0., attn_drop=0., drop_path=0., norm_layer=nn.LayerNorm, downsample=None, use_checkpoint=False):", "// self.num_heads).permute(2, 0, 3, 1, 4) # [batch_size*num_windows, num_heads, Mh*Mw, embed_dim_per_head] q,k,v =", "nn.Dropout(p=drop_rate) # stochastic depth # Drop Path dpr = [x.item() for x in", "dim self.depth = depth self.window_size = window_size self.use_checkpoint = use_checkpoint # pre-trained self.shift_size", "[batch_size*num_windows, num_heads, Mh*Mw, embed_dim_per_head] q,k,v = qkv.unbind(0) # QK^T/sqrt(d) # transpose: -> [batch_size*num_windows,", "device=x.device) # [1, Hp, Wp, 1] # 准备进行区域生成,方便生成Mask h_slices = (slice(0, -self.window_size), slice(-self.window_size,", "Mh*Mw] attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0) attn", "attention heads. window_size (int): Local window size. mlp_ratio (float): Ratio of mlp hidden", "self.embed_dim = embed_dim self.patch_norm = patch_norm # 输出特征矩阵的Channels (C) # H/4 x W/4", "num_heads=(4, 8, 16, 32), num_classes=num_classes, **kwargs) return model def swin_base_patch4_window12_384_in22k(num_classes: int = 21841,", "保证Hp和Wp是window_size的整数倍 Hp = int(np.ceil(H / self.window_size)) * self.window_size Wp = int(np.ceil(W / self.window_size))", "Mh*Mw] - [nW, Mh*Mw, 1] # [nW, Mh*Mw, Mh*Mw] attn_mask = attn_mask.masked_fill(attn_mask !=", "= num_heads head_dim = dim // num_heads # 每个head的dim self.scale = head_dim **", "self.num_heads, N, N) attn = self.softmax(attn) else: attn = self.softmax(attn) attn = self.attn_drop(attn)", "Op \"\"\" # [batch_size*num_windows, Mh*Mw, total_embed_dim] B_, N, C = x.shape # qkv():", "for i in range(depth)]) # Patch Merging Layer 类似于Pooling下采样 if downsample is not", "Mh, Mw, C] x_windows = x_windows.view(-1, self.window_size*self.window_size,C) # [nW*B, Mh*Mw, C] # W-MSA", "drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path, norm_layer=norm_layer) for i in range(depth)]) # Patch", "= H, self.W = W if not torch.jit.is_scripting() and self.use_checkpoint: x = checkpoint.checkpoint(blk,", "num_windows, num_heads, Mh*Mw, Mh*Mw] # # mask.unsqueeze: [1, nW, 1, Mh*Mw, Mh*Mw] attn", "depths=(2, 2, 18, 2), num_heads=(3, 6, 12, 24), num_classes=num_classes, **kwargs) return model \"\"\"Swin-B\"\"\"", "self.window_size = window_size self.shift_size = shift_size self.mlp_ratio = mlp_ratio # shift_size必须小于windows_size assert 0", "norm_layer=norm_layer, downsample=PatchMerging if (i_layer < self.num_layers - 1) else None, use_checkpoint=use_checkpoint) self.layers.append(layers) self.norm", "/ SW-MSA Shift 操作但如果加上 shift 的操作,每个 patch 原来只能跟它所在的窗口里的别的 patch 进行 交互,但是 shift 之后,这个", "= 1000, **kwargs): # trained ImageNet-1K # https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_small_patch4_window7_224.pth model = SwinTransformer(in_chans=3, patch_size=4, window_size=7,", "else: shifted_x = x attn_mask = None # 窗口划分 # Windows Partition x_windows", "and m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.LayerNorm): nn.init.constant_(m.bias, 0) nn.init.constant_(m.weight,", "attn = attn + relative_position_bias.unsqueeze(0) if mask is not None: nW = mask.shape[0]", "num_heads head_dim = dim // num_heads # 每个head的dim self.scale = head_dim ** -0.5", "model = SwinTransformer(in_chans=3, patch_size=4, window_size=12, embed_dim=192, depths=(2, 2, 18, 2), num_heads=(6, 12, 24,", "window size. mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. qkv_bias", "embed_dim_per_head] # transpose: -> [batch_size*num_windows, Mh*Mw, num_heads, embed_dim_per_head] # reshape: -> [batch_size*num_windows, Mh*Mw,", "= len(depths) self.embed_dim = embed_dim self.patch_norm = patch_norm # 输出特征矩阵的Channels (C) # H/4", "attn_drop (float, optional): Attention dropout rate. Default: 0.0 drop_path (float, optional): Stochastic depth", "drop_path, norm_layer=norm_layer) for i in range(depth)]) # Patch Merging Layer 类似于Pooling下采样 if downsample", "(float): Ratio of mlp hidden dim to embedding dim. qkv_bias (bool, optional): If", "Height # Mw: Windows Size Width # nH: num_heads super(WindowsAttention, self).__init__() self.dim =", "shortcut + self.drop_path(x) x = x + self.drop_path(self.mlp(self.norm2(x))) return x class WindowsAttention(nn.Module): \"\"\"", "of attention weight. Default: 0.0 proj_drop (float, optional): Dropout ratio of output. Default:", "torch.nn as nn import torch.nn.functional as F import torch.utils.checkpoint as checkpoint import numpy", "<= self.shift_size < self.window_size, \"shift_size must in 0~window_size\" # LN1 self.norm1 = norm_layer(dim)", "trained ImageNet-22K # https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window7_224_22k.pth model = SwinTransformer(in_chans=3, patch_size=4, window_size=7, embed_dim=192, depths=(2, 2, 18,", "\"\"\"SwinT window_size = 7 img_size = 224 Trained ImageNet-1k depths->2,2,6,2 \"\"\" def swin_tiny_patch4_window7_224(num_classes:", "\"\"\"Swin-S depths->2,2,18,2 \"\"\" def swin_small_patch4_window7_224(num_classes: int = 1000, **kwargs): # trained ImageNet-1K #", "self).__init__() self.dim = dim self.window_size = window_size # [Mh, Mw] self.num_heads = num_heads", "= x attn_mask = None # 窗口划分 # Windows Partition x_windows = window_partition(shifted_x,self.window_size)", "1] # 准备进行区域生成,方便生成Mask h_slices = (slice(0, -self.window_size), slice(-self.window_size, -self.shift_size), slice(-self.shift_size, None)) w_slices =", "# Batch, length, channel B, L, C = x.shape assert L == H", "= x[:, :H, :W, :].contiguous() x = x.view(B,H*W,C) # FFN # 两个Skip Connect", "拥有和feature map一样的通道排列顺序,方便后续window_partition img_mask = torch.zeros((1, Hp, Wp, 1), device=x.device) # [1, Hp, Wp,", "[nW, Mh*Mw, Mh*Mw] nW:窗口数 attn_mask = self.create_mask(x,H,W) for blk in self.blocks: blk.H, blk.W", "(float, optional): Dropout ratio of output. Default: 0.0 \"\"\" # Mh: Windows Size", "int = 21841, **kwargs): # trained ImageNet-22K # https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window7_224_22k.pth model = SwinTransformer(in_chans=3, patch_size=4,", "# cyclic shift if self.shift_size > 0: shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1,", "self.proj_drop = nn.Dropout(proj_drop) nn.init.trunc_normal_(self.relative_position_bias_table, std=.02) self.softmax = nn.Softmax(dim=-1) def forward(self,x,mask=None): \"\"\" Args: x:", "num_heads, qkv_bias=True, attn_drop=0., proj_drop=0.): \"\"\" Args: dim (int): Number of input channels. window_size", "-> LayerNorm -> SW-MSA/W-MSA -> LayerNorm-> MLP --------> |--------------------------------------||----------------------| \"\"\" def __init__(self, dim,", "H', W', C] # 如果是SW-MSA,需要逆shift过程 # reverse cyclic shift if self.shift_size > 0:", "2), num_heads=(4, 8, 16, 32), num_classes=num_classes, **kwargs) return model def swin_base_patch4_window12_384_in22k(num_classes: int =", "(self.window_size - H % self.window_size) % self.window_size x = F.pad(x,(0,0,pad_l,pad_r,pad_t,pad_b)) # Hp, Wp代表pad后的feature", "将image切分为不重合的Patches # input: (Bs, 224, 224, 3) # output: (e.g patch_size=4: Bs, 56x56,", "window_size (int): Local window size. mlp_ratio (float): Ratio of mlp hidden dim to", "bias. It supports both of shifted and non-shifted window. VIT中注意力是全局的,复杂度随着图片尺寸的增加指数增加,样当去做视觉里的下游任务,尤其是密集 预测型的任务,或者说遇到非常大尺寸的图片时候,这种全局算自注意力的计算复杂度就非常贵了 SwinTransformer中,采用Windows-based Attention来将计算复杂度与图片尺寸的关系变为线性关系。", "# [2, Mh*Mw, Mh*Mw] relative_coords = relative_coords.permute(1, 2, 0).contiguous() # [Mh*Mw, Mh*Mw, 2]", "ImageNet-22K # https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window7_224_22k.pth model = SwinTransformer(in_chans=3, patch_size=4, window_size=7, embed_dim=192, depths=(2, 2, 18, 2),", "position index for each token inside the window coords_h = torch.arange(self.window_size[0]) coords_w =", "# [2*Mh-1 * 2*Mw-1, nH] # 相对位置索引获得方法 # get pair-wise relative position index", "dim to embedding dim. qkv_bias (bool, optional): If True, add a learnable bias", "attention weight. Default: 0.0 proj_drop (float, optional): Dropout ratio of output. Default: 0.0", "True, add a learnable bias to query, key, value. Default: True drop (float,", "torch.stack(torch.meshgrid([coords_h, coords_w], indexing=\"ij\")) # [2, Mh, Mw] coords_flatten = torch.flatten(coords, 1) # [2,", "[nW*B, Mh, Mw, C] shifted_x = window_reverse(attn_windows, self.window_size, Hp, Wp) # [B, H',", "H/4 x W/4 x 48 -> H/4 x W/4 x C(Stage1) -> H/8", "self).__init__() self.dim = dim self.num_heads = num_heads self.window_size = window_size self.shift_size = shift_size", "num_heads (int): Number of attention heads. qkv_bias (bool, optional): If True, add a", "# trained ImageNet-1K # https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window12_384.pth model = SwinTransformer(in_chans=3, patch_size=4, window_size=12, embed_dim=128, depths=(2, 2,", "18, 2), num_heads=(3, 6, 12, 24), num_classes=num_classes, **kwargs) return model \"\"\"Swin-B\"\"\" def swin_base_patch4_window7_224(num_classes:", "img_size = 224 Trained ImageNet-1k depths->2,2,6,2 \"\"\" def swin_tiny_patch4_window7_224(num_classes: int = 1000, **kwargs):", "\"\"\" Args: dim (int): Number of input channels. window_size (tuple[int]): The height and", "# [nW*B, Mh*Mw, C] # 将分割的Windows进行还原 attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C) #", "inside the window coords_h = torch.arange(self.window_size[0]) coords_w = torch.arange(self.window_size[1]) coords = torch.stack(torch.meshgrid([coords_h, coords_w],", "use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False. \"\"\" super(BasicLayer,", "attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0) attn =", "num_heads, Mh*Mw, embed_dim_per_head] # transpose: -> [batch_size*num_windows, Mh*Mw, num_heads, embed_dim_per_head] # reshape: ->", "18, 2), num_heads=(4, 8, 16, 32), num_classes=num_classes, **kwargs) return model def swin_base_patch4_window7_224_in22k(num_classes: int", "model \"\"\"Swin Transformer\"\"\" class SwinTransformer(nn.Module): \"\"\"Swin Transformer结构 这里有个不同之处,就是每个Stage Layer中, \"\"\" def __init__(self, patch_size=4,", "Mh*Mw, Mh*Mw] # [Bs*nW, nH, Mh*Mw, Mh*Mw] attn = attn + relative_position_bias.unsqueeze(0) if", "isinstance(m, nn.Linear) and m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.LayerNorm): nn.init.constant_(m.bias,", "= self.patch_embed(x) x = self.pos_drop(x) # 多尺度分层Multi-Stage for layer in self.layers: x,H,W =", "self.window_size pad_b = (self.window_size - H % self.window_size) % self.window_size x = F.pad(x,(0,0,pad_l,pad_r,pad_t,pad_b))", "int(embed_dim * 2 ** (self.num_layers - 1)) self.mlp_ratio = mlp_ratio # 将image切分为不重合的Patches #", "H % self.window_size) % self.window_size x = F.pad(x,(0,0,pad_l,pad_r,pad_t,pad_b)) # Hp, Wp代表pad后的feature map的Height和Width _,", "patch就可以跟新的窗口里的别的 patch就进行交互了,而这个新的窗 口里所有的 patch 其实来自于上一层别的窗口里的 patch,这也就是作者说的能起到 cross-window connection,就是窗口和窗口之间可以交互了 上述过程配合之后的Patch Merging,合并到Transformer最后几层的时候,每一个patch本身的感受 野就已经很大了。 \"\"\" def", "(nn.Module | None, optional): Downsample layer at the end of the layer. Default:", "return x, H, W \"\"\"一个基本的SwinTransformerBlock的构成Model\"\"\" class SwinTransformerBlock(nn.Module): \"\"\" Swin Transformer Block包括: Feature Map", "# Register_buffer: 应该就是在内存中定一个常量,同时,模型保存和加载的时候可以写入和读出。 # 不需要学习,但是可以灵活读写 self.register_buffer(\"relative_position_index\", relative_position_index) self.qkv = nn.Linear(dim, dim * 3,", "window_size=7, embed_dim=96, depths=(2, 2, 18, 2), num_heads=(3, 6, 12, 24), num_classes=num_classes, **kwargs) return", "attention (W-MSA) module with relative position bias. It supports both of shifted and", "num_classes=num_classes, **kwargs) return model def swin_base_patch4_window7_224_in22k(num_classes: int = 21841, **kwargs): # trained ImageNet-22K", "depth (int): Number of blocks. block数量 num_heads (int): Number of attention heads. window_size", "self.window_size)) * self.window_size Wp = int(np.ceil(W / self.window_size)) * self.window_size # 拥有和feature map一样的通道排列顺序,方便后续window_partition", "**kwargs): # trained ImageNet-1K # https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window12_384.pth model = SwinTransformer(in_chans=3, patch_size=4, window_size=12, embed_dim=128, depths=(2,", "class BasicLayer(nn.Module): \"\"\" One Stage SwinTransformer Layer包括: \"\"\" def __init__(self, dim, depth, num_heads,", "# 将image切分为不重合的Patches # input: (Bs, 224, 224, 3) # output: (e.g patch_size=4: Bs,", "window_size=7, embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4, 8, 16, 32), num_classes=num_classes, **kwargs) return", "PatchEmbed from BasicModule import Mlp from BasicModule import window_partition, window_reverse \"\"\"SwinT window_size =", "# 掩码生成 attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) # [nW, 1, Mh*Mw] - [nW,", "[nW, Mh*Mw, 1] # [nW, Mh*Mw, Mh*Mw] attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask", "# bulid layers self.layers = nn.ModuleList() for i_layer in range(self.num_layers): # 注意这里构建的stage和论文图中有些差异 #", "int(dim * mlp_ratio) self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) def forward(self, x, attn_mask):", "patch_size=4, window_size=12, embed_dim=192, depths=(2, 2, 18, 2), num_heads=(6, 12, 24, 48), num_classes=num_classes, **kwargs)", "add a learnable bias to query, key, value. Default: True drop (float, optional):", "norm_layer=norm_layer if self.patch_norm else None) self.pos_drop = nn.Dropout(p=drop_rate) # stochastic depth # Drop", "Width H, W = self.H, self.W # Batch, length, channel B, L, C", "return model \"\"\"Swin-B\"\"\" def swin_base_patch4_window7_224(num_classes: int = 1000, **kwargs): # trained ImageNet-1K #", "# shift_size必须小于windows_size assert 0 <= self.shift_size < self.window_size, \"shift_size must in 0~window_size\" #", "18, 2), num_heads=(4, 8, 16, 32), num_classes=num_classes, **kwargs) return model def swin_base_patch4_window12_384(num_classes: int", "= nn.Softmax(dim=-1) def forward(self,x,mask=None): \"\"\" Args: x: input features with shape of (num_windows*B,", "assert L == H * W, \"input feature has wrong size\" # Skip", "N, N) attn = self.softmax(attn) else: attn = self.softmax(attn) attn = self.attn_drop(attn) #", "nH, Mh*Mw, Mh*Mw] attn = attn + relative_position_bias.unsqueeze(0) if mask is not None:", "-self.shift_size), dims=(1, 2)) else: shifted_x = x attn_mask = None # 窗口划分 #", "C] x,H,W = self.patch_embed(x) x = self.pos_drop(x) # 多尺度分层Multi-Stage for layer in self.layers:", "Default: 0.0 attn_drop (float, optional): Attention dropout rate. Default: 0.0 drop_path (float, optional):", "# [nW*B, Mh, Mw, C] shifted_x = window_reverse(attn_windows, self.window_size, Hp, Wp) # [B,", "# transpose: -> [batch_size*num_windows, num_heads, embed_dim_per_head, Mh*Mw] # @: multiply -> [batch_size*num_windows, num_heads,", "norm_layer=nn.LayerNorm, downsample=None, use_checkpoint=False): \"\"\" Args: dim (int): Number of input channels. depth (int):", "optional): Downsample layer at the end of the layer. Default: None use_checkpoint (bool):", "的操作,每个 patch 原来只能跟它所在的窗口里的别的 patch 进行 交互,但是 shift 之后,这个 patch就可以跟新的窗口里的别的 patch就进行交互了,而这个新的窗 口里所有的 patch 其实来自于上一层别的窗口里的", "if isinstance(m, nn.Linear) and m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.LayerNorm):", "drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])], norm_layer=norm_layer, downsample=PatchMerging if (i_layer < self.num_layers - 1) else None,", "Mh*Mw, embed_dim_per_head] qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3,", "reshape: -> [batch_size*num_windows, Mh*Mw, total_embed_dim] x = (attn @ v).transpose(1, 2).reshape(B_, N, C)", "# stochastic depth # Drop Path dpr = [x.item() for x in torch.linspace(0,", "* self.window_size[1], self.window_size[0] * self.window_size[1], -1) relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # [nH,", "x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule # bulid layers", "window_size self.use_checkpoint = use_checkpoint # pre-trained self.shift_size = window_size // 2 # 构建SwinTransformer", "x \"\"\"一个Stage内的基本SwinTransformer模块\"\"\" class BasicLayer(nn.Module): \"\"\" One Stage SwinTransformer Layer包括: \"\"\" def __init__(self, dim,", "# [nW*B, Mh*Mw, C] # W-MSA / SW-MSA attn_windows = self.attn(x_windows, mask=attn_mask) #", "\"\"\" One Stage SwinTransformer Layer包括: \"\"\" def __init__(self, dim, depth, num_heads, window_size, mlp_ratio=4.,", "H/4 x W/4 x C(Stage1) -> H/8 x W/8 x 2C(Stage2) -> H/16", "Attention dropout rate. Default: 0.0 drop_path (float | tuple[float], optional): Stochastic depth rate.", "2, 18, 2), num_heads=(3, 6, 12, 24), num_classes=num_classes, **kwargs) return model \"\"\"Swin-B\"\"\" def", "drop=drop, attn_drop=attn_drop, drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path, norm_layer=norm_layer) for i in range(depth)])", "# mask: [nW, Mh*Mw, Mh*Mw] # attn.view: [batch_size, num_windows, num_heads, Mh*Mw, Mh*Mw] #", "1])], norm_layer=norm_layer, downsample=PatchMerging if (i_layer < self.num_layers - 1) else None, use_checkpoint=use_checkpoint) self.layers.append(layers)", "self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() # LN2 self.norm2 =", "VIT中注意力是全局的,复杂度随着图片尺寸的增加指数增加,样当去做视觉里的下游任务,尤其是密集 预测型的任务,或者说遇到非常大尺寸的图片时候,这种全局算自注意力的计算复杂度就非常贵了 SwinTransformer中,采用Windows-based Attention来将计算复杂度与图片尺寸的关系变为线性关系。 General Model: W-MSA / SW-MSA Shift 操作但如果加上 shift 的操作,每个", "relative position index for each token inside the window coords_h = torch.arange(self.window_size[0]) coords_w", "Stochastic depth rate. Default: 0.0 act_layer (nn.Module, optional): Activation layer. Default: nn.GELU norm_layer", "2, 18, 2), num_heads=(4, 8, 16, 32), num_classes=num_classes, **kwargs) return model def swin_base_patch4_window12_384(num_classes:", "to embedding dim. qkv_bias (bool, optional): If True, add a learnable bias to", "mask.unsqueeze: [1, nW, 1, Mh*Mw, Mh*Mw] attn = attn.view(B_ // nW, nW, self.num_heads,", "16, 32), num_classes=num_classes, **kwargs) return model def swin_base_patch4_window12_384_in22k(num_classes: int = 21841, **kwargs): #", "# self.H = H, self.W = W if not torch.jit.is_scripting() and self.use_checkpoint: x", "Mh*Mw] # [Bs*nW, nH, Mh*Mw, Mh*Mw] attn = attn + relative_position_bias.unsqueeze(0) if mask", "-> H/16 x W/16 x 4C(stage3) ... self.num_features = int(embed_dim * 2 **", "num_heads, window_size=7, shift_size=0, mlp_ratio=4., qkv_bias=True, drop=0., attn_drop=0., drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm): \"\"\" Args参数定义: dim", "num_heads=(6, 12, 24, 48), num_classes=num_classes, **kwargs) return model def swin_large_patch4_window12_384_in22k(num_classes: int = 21841,", "self.relative_position_bias_table = nn.Parameter( torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] -", "# [Mh*Mw, Mh*Mw] # Register_buffer: 应该就是在内存中定一个常量,同时,模型保存和加载的时候可以写入和读出。 # 不需要学习,但是可以灵活读写 self.register_buffer(\"relative_position_index\", relative_position_index) self.qkv = nn.Linear(dim,", "进行 交互,但是 shift 之后,这个 patch就可以跟新的窗口里的别的 patch就进行交互了,而这个新的窗 口里所有的 patch 其实来自于上一层别的窗口里的 patch,这也就是作者说的能起到 cross-window connection,就是窗口和窗口之间可以交互了 上述过程配合之后的Patch", "Default: True drop (float, optional): Dropout rate. Default: 0.0 attn_drop (float, optional): Attention", "|--------------------------------------||----------------------| \"\"\" def __init__(self, dim, num_heads, window_size=7, shift_size=0, mlp_ratio=4., qkv_bias=True, drop=0., attn_drop=0., drop_path=0.,", "# [Mh, Mw] self.num_heads = num_heads head_dim = dim // num_heads # 每个head的dim", "交互,但是 shift 之后,这个 patch就可以跟新的窗口里的别的 patch就进行交互了,而这个新的窗 口里所有的 patch 其实来自于上一层别的窗口里的 patch,这也就是作者说的能起到 cross-window connection,就是窗口和窗口之间可以交互了 上述过程配合之后的Patch Merging,合并到Transformer最后几层的时候,每一个patch本身的感受", "self).__init__() self.dim = dim self.depth = depth self.window_size = window_size self.use_checkpoint = use_checkpoint", "**kwargs) return model def swin_base_patch4_window12_384_in22k(num_classes: int = 21841, **kwargs): # trained ImageNet-22K #", "depths=(2, 2, 6, 2), num_heads=(3, 6, 12, 24), num_classes=num_classes, **kwargs) return model \"\"\"Swin-S", "SwinTransformer Layer包括: \"\"\" def __init__(self, dim, depth, num_heads, window_size, mlp_ratio=4., qkv_bias=True, drop=0., attn_drop=0.,", "# 如果是SW-MSA,需要逆shift过程 # reverse cyclic shift if self.shift_size > 0: x = torch.roll(shifted_x,", "= x.view(B, H, W, C) # 对feature map进行pad,pad到windows size的整数倍 pad_l = 0 pad_t", "-> [batch_size*num_windows, num_heads, Mh*Mw, embed_dim_per_head] # transpose: -> [batch_size*num_windows, Mh*Mw, num_heads, embed_dim_per_head] #", "self.window_size * self.window_size) # [nW, Mh*Mw] # 掩码生成 attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)", "Partition x_windows = window_partition(shifted_x,self.window_size) #[nW*B, Mh, Mw, C] x_windows = x_windows.view(-1, self.window_size*self.window_size,C) #", "pad_r > 0 or pad_b > 0: # 把前面pad的数据移除掉 x = x[:, :H,", "= int(embed_dim * 2 ** (self.num_layers - 1)) self.mlp_ratio = mlp_ratio # 将image切分为不重合的Patches", "224 Trained ImageNet-1k depths->2,2,6,2 \"\"\" def swin_tiny_patch4_window7_224(num_classes: int = 1000, **kwargs): # trained", "Default: 0.0 act_layer (nn.Module, optional): Activation layer. Default: nn.GELU norm_layer (nn.Module, optional): Normalization", "x.shape # 是W-MSA 还是 SW-MSA ? # cyclic shift if self.shift_size > 0:", "N, N) + mask.unsqueeze(1).unsqueeze(0) attn = attn.view(-1, self.num_heads, N, N) attn = self.softmax(attn)", "**kwargs): # trained ImageNet-22K # https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window7_224_22k.pth model = SwinTransformer(in_chans=3, patch_size=4, window_size=7, embed_dim=192, depths=(2,", "downsample(dim=dim, norm_layer=norm_layer) else: self.downsample = None def create_mask(self,x,H,W): \"\"\" SW-MSA后,对于移位后左上角的窗口(也就是移位前最中间的窗口)来说,里面的元素都是互相紧挨着的, 他们之间可以互相两两做自注意力,但是对于剩下几个窗口来说,它们里面的元素是从别的很远的地方搬过来的, 所以他们之间,按道理来说是不应该去做自注意力,也就是说他们之间不应该有什么太大的联系 以14x14个patch为例进行", "Window based multi-head self attention (W-MSA) module with relative position bias. It supports", "8, 16, 32), num_classes=num_classes, **kwargs) return model def swin_base_patch4_window12_384_in22k(num_classes: int = 21841, **kwargs):", "Default: nn.LayerNorm downsample (nn.Module | None, optional): Downsample layer at the end of", "self.use_checkpoint: x = checkpoint.checkpoint(blk, x, attn_mask) else: x = blk(x, attn_mask) if self.downsample", "0: # 把前面pad的数据移除掉 x = x[:, :H, :W, :].contiguous() x = x.view(B,H*W,C) #", "(int): Number of attention heads. window_size (int): Local window size. mlp_ratio (float): Ratio", "1. 类似CNN的层次化构建方法(Hierarchical Feature Maps),特征图尺寸中有对图像下采样4倍、8倍、以及16倍; 这样的Backbone有助于再此基础上构建目标检测、实例分割等任务。 2. 使用Windows Multi-Head Self-Attention (W-MSA)概念。减少计算量。计算复杂度从指数级降到线性级,Multi-head Self-Attention只在每个Windows内部进行。相对于ViT直接对整个Global进行MSA,计算复杂度更低;但是会隔绝不同 窗口之间的信息传递,通过Shifted Windows", "in self.blocks: blk.H, blk.W = H, W # self.H = H, self.W =", ":, 1] += self.window_size[1] - 1 relative_coords[:, :, 0] *= 2 * self.window_size[1]", "create_mask(self,x,H,W): \"\"\" SW-MSA后,对于移位后左上角的窗口(也就是移位前最中间的窗口)来说,里面的元素都是互相紧挨着的, 他们之间可以互相两两做自注意力,但是对于剩下几个窗口来说,它们里面的元素是从别的很远的地方搬过来的, 所以他们之间,按道理来说是不应该去做自注意力,也就是说他们之间不应该有什么太大的联系 以14x14个patch为例进行 H: Feature Map Height W: Feature Map", "Hp, Wp代表pad后的feature map的Height和Width _, Hp, Wp, _ = x.shape # 是W-MSA 还是 SW-MSA", "# [nW, 1, Mh*Mw] - [nW, Mh*Mw, 1] # [nW, Mh*Mw, Mh*Mw] attn_mask", "2)) # [B, C, 1] x = torch.flatten(x, 1) x = self.head(x) #", "layer. Default: nn.LayerNorm \"\"\" super(SwinTransformerBlock, self).__init__() self.dim = dim self.num_heads = num_heads self.window_size", "准备进行区域生成,方便生成Mask h_slices = (slice(0, -self.window_size), slice(-self.window_size, -self.shift_size), slice(-self.shift_size, None)) w_slices = (slice(0, -self.window_size),", "# DownSample之后,H,W应该减半 return x, H, W \"\"\"一个基本的SwinTransformerBlock的构成Model\"\"\" class SwinTransformerBlock(nn.Module): \"\"\" Swin Transformer Block包括:", "embed_dim_per_head] # permute: -> [3, batch_size*num_windows, num_heads, Mh*Mw, embed_dim_per_head] qkv = self.qkv(x).reshape(B_, N,", "# 多尺度分层Multi-Stage for layer in self.layers: x,H,W = layer(x,H,W) x = self.norm(x) #", "self.window_size[1], self.window_size[0] * self.window_size[1], -1) relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # [nH, Mh*Mw,", "optional): If True, add a learnable bias to query, key, value. Default: True", "@: multiply -> [batch_size*num_windows, num_heads, Mh*Mw, embed_dim_per_head] # transpose: -> [batch_size*num_windows, Mh*Mw, num_heads,", "use_checkpoint # pre-trained self.shift_size = window_size // 2 # 构建SwinTransformer Block self.blocks =", "1] mask_windows = mask_windows.view(-1, self.window_size * self.window_size) # [nW, Mh*Mw] # 掩码生成 attn_mask", "as F import torch.utils.checkpoint as checkpoint import numpy as np from typing import", "multi-head self attention (W-MSA) module with relative position bias. It supports both of", "# 拥有和feature map一样的通道排列顺序,方便后续window_partition img_mask = torch.zeros((1, Hp, Wp, 1), device=x.device) # [1, Hp,", "shifted and non-shifted window. VIT中注意力是全局的,复杂度随着图片尺寸的增加指数增加,样当去做视觉里的下游任务,尤其是密集 预测型的任务,或者说遇到非常大尺寸的图片时候,这种全局算自注意力的计算复杂度就非常贵了 SwinTransformer中,采用Windows-based Attention来将计算复杂度与图片尺寸的关系变为线性关系。 General Model: W-MSA / SW-MSA", "N) + mask.unsqueeze(1).unsqueeze(0) attn = attn.view(-1, self.num_heads, N, N) attn = self.softmax(attn) else:", "shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2)) else: shifted_x = x attn_mask =", "coords_h = torch.arange(self.window_size[0]) coords_w = torch.arange(self.window_size[1]) coords = torch.stack(torch.meshgrid([coords_h, coords_w], indexing=\"ij\")) # [2,", "Mh, Mw, 1] mask_windows = mask_windows.view(-1, self.window_size * self.window_size) # [nW, Mh*Mw] #", "= SwinTransformer(in_chans=3, patch_size=4, window_size=7, embed_dim=192, depths=(2, 2, 18, 2), num_heads=(6, 12, 24, 48),", "window_size=(self.window_size, self.window_size), num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop) self.drop_path = DropPath(drop_path) if drop_path > 0.", "x = x[:, :H, :W, :].contiguous() x = x.view(B,H*W,C) # FFN # 两个Skip", "x = (attn @ v).transpose(1, 2).reshape(B_, N, C) x = self.proj(x) x =", "x:[B, L, C] x,H,W = self.patch_embed(x) x = self.pos_drop(x) # 多尺度分层Multi-Stage for layer", "num_heads=(4, 8, 16, 32), num_classes=num_classes, **kwargs) return model \"\"\"Swin-Large\"\"\" def swin_large_patch4_window7_224_in22k(num_classes: int =", "attention heads. qkv_bias (bool, optional): If True, add a learnable bias to query,", "attn_mask = self.create_mask(x,H,W) for blk in self.blocks: blk.H, blk.W = H, W #", "x.shape assert L == H * W, \"input feature has wrong size\" #", "forward(self,x,H,W): # [nW, Mh*Mw, Mh*Mw] nW:窗口数 attn_mask = self.create_mask(x,H,W) for blk in self.blocks:", "attn_drop=attn_drop_rate, drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])], norm_layer=norm_layer, downsample=PatchMerging if (i_layer < self.num_layers - 1) else", "# relative_position_bias_table.view: [Mh*Mw*Mh*Mw,nH] -> [Mh*Mw,Mh*Mw,nH] relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view( self.window_size[0] * self.window_size[1], self.window_size[0] *", "= torch.arange(self.window_size[1]) coords = torch.stack(torch.meshgrid([coords_h, coords_w], indexing=\"ij\")) # [2, Mh, Mw] coords_flatten =", "Vision Transformer using Shifted Windows` - https://arxiv.org/pdf/2103.14030 Code/weights from https://github.com/microsoft/Swin-Transformer \"\"\" import torch", "= x.shape assert L == H * W, \"input feature has wrong size\"", "# https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window12_384_22k.pth model = SwinTransformer(in_chans=3, patch_size=4, window_size=12, embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4,", "window_size # [Mh, Mw] self.num_heads = num_heads head_dim = dim // num_heads #", "# 为SW-MSA计算Attention Mask. # 保证Hp和Wp是window_size的整数倍 Hp = int(np.ceil(H / self.window_size)) * self.window_size Wp", "self.attn = WindowsAttention( dim, window_size=(self.window_size, self.window_size), num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop) self.drop_path = DropPath(drop_path)", "# https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window7_224_22k.pth model = SwinTransformer(in_chans=3, patch_size=4, window_size=7, embed_dim=192, depths=(2, 2, 18, 2), num_heads=(6,", "dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay", "(attn @ v).transpose(1, 2).reshape(B_, N, C) x = self.proj(x) x = self.proj_drop(x) return", "window_size // 2 # 构建SwinTransformer Block self.blocks = nn.ModuleList([ SwinTransformerBlock( dim=dim, num_heads=num_heads, window_size=window_size,", "self.layers = nn.ModuleList() for i_layer in range(self.num_layers): # 注意这里构建的stage和论文图中有些差异 # 这里的stage不包含该stage的patch_merging层,包含的是下个stage的 layers =", "else None, use_checkpoint=use_checkpoint) self.layers.append(layers) self.norm = norm_layer(self.num_features) self.avgpool = nn.AdaptiveAvgPool1d(1) self.head = nn.Linear(self.num_features,", "[B, C, 1] x = torch.flatten(x, 1) x = self.head(x) # 分类头 return", "self.window_size Wp = int(np.ceil(W / self.window_size)) * self.window_size # 拥有和feature map一样的通道排列顺序,方便后续window_partition img_mask =", "C] # W-MSA / SW-MSA attn_windows = self.attn(x_windows, mask=attn_mask) # [nW*B, Mh*Mw, C]", "Mh*Mw] # 掩码生成 attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) # [nW, 1, Mh*Mw] -", "掩码生成 attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) # [nW, 1, Mh*Mw] - [nW, Mh*Mw,", "LN2 self.norm2 = norm_layer(dim) # MLP Layer mlp_hidden_dim = int(dim * mlp_ratio) self.mlp", "def forward(self,x): # x:[B, L, C] x,H,W = self.patch_embed(x) x = self.pos_drop(x) #", "assert 0 <= self.shift_size < self.window_size, \"shift_size must in 0~window_size\" # LN1 self.norm1", "weight. Default: 0.0 proj_drop (float, optional): Dropout ratio of output. Default: 0.0 \"\"\"", "self.window_size) % self.window_size pad_b = (self.window_size - H % self.window_size) % self.window_size x" ]
[ "<reponame>jaywoong/python st1 = '<EMAIL>' print(len(st1)) print(st1.find('.')) print(st1.rfind('.')) print(st1.count('.')) id = st1[:st1.find('@')] print(id) domain", "= '<EMAIL>' print(len(st1)) print(st1.find('.')) print(st1.rfind('.')) print(st1.count('.')) id = st1[:st1.find('@')] print(id) domain = st1[st1.find('@')+1:st1.find('.')]", "st1 = '<EMAIL>' print(len(st1)) print(st1.find('.')) print(st1.rfind('.')) print(st1.count('.')) id = st1[:st1.find('@')] print(id) domain =", "'<EMAIL>' print(len(st1)) print(st1.find('.')) print(st1.rfind('.')) print(st1.count('.')) id = st1[:st1.find('@')] print(id) domain = st1[st1.find('@')+1:st1.find('.')] print(domain)" ]
[ "choices: either a name appearing in the html selector, or a URL (\"link\")", "def front_is_clear(): #py:front_is_clear \"\"\"Indicates if an obstacle (wall, fence, water, etc.) blocks the", "four formats are possible: named color, rgb and rgba, and hexadecimal notation. Examples::", "0, 0, 0.5)\") >>> set_trace_color(\"#FF00FF\") \"\"\" RUR._set_trace_color_(color) def set_trace_style(style=\"default\"): #py:set_trace_style \"\"\"Change the trace", "type of objects is at Reeborg's location, the type must be specified as", "RUR._carries_object_() return list(ans) def clear_print(): #py:clear_print \"\"\"Erase all the text previously written using", "Reeborg's location. Args: obj: optional parameter which is the name of an object", "to select a specific world within a program. If the world currently shown", "def at_goal(self): #py:UR.at_goal \"\"\"Indicate if Reeborg has reached the desired location. Returns: True", "the path followed, so that it is impossible to distinguish between motion to", "\"\"\"Instructs Reeborg to build a wall at the location in front of itself.\"\"\"", "else: carries = 'carries no tokens' return \"UsedRobot at {} {} {}.\".format(location, facing,", "reeborg = UsedRobot() >>> reeborg.object_here() [\"token\", \"apple\"] >>> reeborg.object_here(\"token\") [\"token\"] >>> reeborg.object_here(\"banana\") []", "model (images) for the robot. Args: model: a number between 0 and 3.", "shown in the selector instead # of the full url \"\"\" if shortname", "for the robot. More details will be provided soon. \"\"\" RUR._new_robot_images_(images) def no_highlight():", "front of itself. \"\"\" RUR._UR.build_wall_(self.body) def carries_object(self, obj=''): #py:UR.carries_object \"\"\"Indicates whether Reeborg carries", "'carries %s tokens' % self.body.objects['token'] else: carries = 'carries no tokens' return \"UsedRobot", "RUR._clear_print_() def color_here(): #py:color_here return RUR._color_here_() def default_robot(): #py:default_robot \"\"\"Returns a recreated version", "to create custom world menus. See the documentation for more details. \"\"\" RUR._MakeCustomMenu_(content)", "a number between 0 and 3. \"\"\" RUR._UR.set_model_(self.body, model) def set_trace_color(self, color): #py:UR.set_trace_color", "set_trace_color(\"#FF00FF\") \"\"\" RUR._set_trace_color_(color) def set_trace_style(style=\"default\"): #py:set_trace_style \"\"\"Change the trace style of the robot.", "set_trace_style(self, style): #py:UR.set_trace_style \"\"\"Change the trace style of the robot. Args: style: \"thick\",", "not the specified one, the result is an empty list. Examples: >>> carries_object()", "robots found in the world.\"\"\" RUR._remove_robots_() def right_is_clear(): #py:right_is_clear \"\"\"Indicates if an obstacle", "0, 0.5)\") >>> reeborg.set_trace_color(\"#FF00FF\") \"\"\" RUR._UR.set_trace_color_(self.body, color) def set_trace_style(self, style): #py:UR.set_trace_style \"\"\"Change the", "set_max_nb_instructions(nb): #py:set_max_nb_instructions \"\"\"Intended primarily for world creators, this function allows to change the", "def default_robot(): #py:default_robot \"\"\"Returns a recreated version of the default robot.\"\"\" class Robot(UsedRobot):", "the execution automatically resumes after this time has elapsed. \"\"\" if ms is", "about world. ''' import json return json.loads(RUR.control.get_world_map()) def print_world_map(self): #py:SI.print_world_map '''Prints a formatted", "not found, the result is an empty list. Examples: >>> reeborg = UsedRobot()", "reeborg = UsedRobot() >>> reeborg.carries_object() [\"token\", \"apple\"] >>> reeborg.carries_object(\"token\") [\"token\"] >>> reeborg.carries_object(\"banana\") []", "\"\"\" RUR._UR.build_wall_(self.body) def carries_object(self, obj=''): #py:UR.carries_object \"\"\"Indicates whether Reeborg carries an object or", "def put(obj=None): #py:put \"\"\"Puts down an object. If Reeborg carries more than one", "documentation using sphinx, these modules are both # unavailable and not needed try:", "collision turn_left() \"\"\" def __init__(self, message): #py:RE.__init__ self.reeborg_shouts = message def __str__(self): #py:RE.__str__", "RUR._paint_square_(color) def pause(ms=None): #py:pause \"\"\"Pauses a program's execution (playback). If an argument (time", "in [\"thick\", \"default\", \"invisible\"]: raise ReeborgError(\"Unrecognized style in set_trace_style().\") RUR._UR.set_trace_style_(self.body, style) def take(self,", "the default robot.\"\"\" class Robot(UsedRobot): def __init__(self): self.body = RUR._default_robot_body_() return Robot() def", "== RUR.SOUTH: facing = \"facing South\" if 'token' in self.body.objects: if self.body.objects['token'] ==", "\"()\" attrs.append(attr) print_html(str(\"\\n\".join(attrs)).replace(\"&\", \"&amp\").replace(\"<\", \"&lt;\" ).replace(\">\", \"&gt;\").replace(\"\\n\", \"<br>\")) class ReeborgError(Exception): #py:RE \"\"\"Exceptions specific", "the rest of the program will be ignored. If the desired world is", "\"\"\"Takes an object. If more than one type of objects is at Reeborg's", "except: pass class WallCollisionError(ReeborgError): #py:WCE \"\"\"Exceptions specific to Reeborg's World. Is raised when", "self.body.y) if self.body._orientation == RUR.EAST: facing = \"facing East\" elif self.body._orientation == RUR.WEST:", "reeborg.carries_object() [\"token\", \"apple\"] >>> reeborg.carries_object(\"token\") [\"token\"] >>> reeborg.carries_object(\"banana\") [] \"\"\" if obj is", "the specified one, the result is an empty list. Examples: >>> carries_object() [\"token\",", "by using this function, the result of running the program will simply be", "Reeborg has reached its goal, False otherwise. \"\"\" return RUR._UR.at_goal_(self.body) def build_wall(self): #py:UR.build_wall", "a call to print().\"\"\" RUR._clear_print_() def color_here(): #py:color_here return RUR._color_here_() def default_robot(): #py:default_robot", "and rgba, and hexadecimal notation. Examples:: >>> set_trace_color(\"red\") >>> set_trace_color(\"rgb(125, 0, 0)\") >>>", "None: ans = RUR._carries_object_(obj) else: ans = RUR._carries_object_() return list(ans) def clear_print(): #py:clear_print", "return RUR._UR.front_is_clear_(self.body) def in_the_bag(self): #py:UR.in_the_bag return dict(RUR._UR.in_the_bag_(self.body)) def is_facing_north(self): #py:UR.is_facing_north \"\"\"Indicates if Reeborg", "def __init__(self, message): #py:RE.__init__ self.reeborg_shouts = message def __str__(self): #py:RE.__str__ return repr(self.reeborg_shouts) try:", "of a Javascript function.\"\"\" RUR._view_source_js_(fn) def wall_in_front(): #py:wall_in_front \"\"\"Indicates if a wall blocks", "RUR._is_facing_north_() def in_the_bag(): #py:in_the_bag return dict(RUR._in_the_bag_()) def move(): #py:move \"\"\"Move forward, by one", "\"\"\"Indicates whether Reeborg carries an object or not. Args: obj: optional parameter which", "\"\"\" pass try: window['WallCollisionError'] = WallCollisionError except: pass class SatelliteInfo(): #py:SI @property def", "x=1, y=1, orientation='e', tokens=None): #py:UR.__init__ \"\"\"Creates a UsedRobot. Args: x: horizontal coordinate; an", "the form # RUR._xyz_ and be defined in commands.js and methods should have", "classes and exceptions that can be included in a Python program for Reeborg's", "\"\"\"Intended primarily for world creators, this function allows to change the default maximum", "to the robot; its value must be a positive integer, or the string", "and methods should appear # alphabetically in this English version, with the exception", "do not translate the name of this function attrs = [] for attr", "goal, False otherwise. \"\"\" return RUR._at_goal_() def build_wall(): #py:build_wall \"\"\"Instructs Reeborg to build", "time. \"\"\" RUR._no_highlight_() def object_here(obj=None): #py:object_here \"\"\"Indicates whether any type of objects are", "# No translation needed def put(obj=None): #py:put \"\"\"Puts down an object. If Reeborg", "specified one, the result is an empty list. Examples: >>> reeborg = UsedRobot()", "right_is_clear(self): #py:UR.right_is_clear \"\"\"Indicates if an obstacle (wall, fence, water, etc.) is on the", "\"thick\", \"invisible\" and \"default\" are the three possible arguments. \"invisible\" is equivalent to", "is ignored and the rest of the program is executed. If the world", "etc.) blocks the path. Returns: True if the path is clear (not blocked),", "executed. If the world is not already present in the html selector, it", "continue if callable(getattr(obj, attr)): attr += \"()\" attrs.append(attr) print_html(str(\"\\n\".join(attrs)).replace(\"&\", \"&amp\").replace(\"<\", \"&lt;\" ).replace(\">\", \"&gt;\").replace(\"\\n\",", "possible to create custom world menus. See the documentation for more details. \"\"\"", "if the specified object is not found, the result is an empty list.", "alphabetically in this English version, with the exception of Python-specific # functions or", "this function allows to set the maximum number of robots allowed in a", "must be a positive integer, or the string \"inf\" to indicate an infinite", "name of an object as a string. Returns: a list of the type", "def move(): #py:move \"\"\"Move forward, by one grid position.\"\"\" RUR._move_() def new_robot_images(images): #py:new_robot_images", "a different value. \"\"\" RUR._set_max_nb_instructions_(nb) def set_max_nb_robots(nb): #py:set_max_nb_robots \"\"\"Intended primarily for world creators,", "sound(bool): #py:sound \"\"\"Activate or deactivate sound effects.\"\"\" RUR._sound_(bool) def take(obj=None): #py:take \"\"\"Takes an", "translate the name of this function RUR._dir_js_(obj) def done(): #py:done \"\"\"Causes a program's", "None: RUR._put_() else: RUR._put_(obj) def recording(bool): #py:recording \"\"\"Stops or starts recording changes occuring", "will be raised. \"\"\" if obj is None: RUR._put_() else: RUR._put_(obj) def recording(bool):", "\"\"\" if obj is None: RUR._put_() else: RUR._put_(obj) def recording(bool): #py:recording \"\"\"Stops or", "--> Skipping importing from browser for sphinx.\\n\") # All functions from Javascript used", "object to proper Python list def paint_square(color): #py:paint_square RUR._paint_square_(color) def pause(ms=None): #py:pause \"\"\"Pauses", "\"\"\"Indicates if Reeborg is facing North (top of the screen) or not.\"\"\" return", "url \"\"\" if shortname is None: RUR._World_(url) else: RUR._World_(url, shortname) class UsedRobot(object): #py:UR", "methods of a Javascript object.\"\"\" # do not translate the name of this", "the world. Args: bool: True if recording is desired, False otherwise. \"\"\" RUR._recording_(bool)", "not translate the name of this function attrs = [] for attr in", "type must be specified as an argument, otherwise an exception will be raised.", "RUR._put_(obj) def recording(bool): #py:recording \"\"\"Stops or starts recording changes occuring in the world.", "robots allowed in a given world. \"\"\" RUR._set_max_nb_robots_(nb) def set_trace_color(color): #py:set_trace_color \"\"\"Change the", "if style not in [\"thick\", \"default\", \"invisible\"]: raise ReeborgError(\"Unrecognized style in set_trace_style().\") RUR._UR.set_trace_style_(self.body,", "executed in a program (1000) by a different value. \"\"\" RUR._set_max_nb_instructions_(nb) def set_max_nb_robots(nb):", "be raised. \"\"\" if obj is None: RUR._UR.take_(self.body) else: RUR._UR.take_(self.body, obj) def turn_left(self):", "added to the selector >>> World(\"http://reeborg.ca/my_world\", \"Hello\") # The name \"Hello\" will be", "methods should appear # alphabetically in this English version, with the exception of", "else: return list(RUR._UR.carries_object_(self.body)) def front_is_clear(self): #py:UR.front_is_clear \"\"\"Indicates if an obstacle (wall, fence, water,", "to execution. When disabling highlighting using this function, the extra instructions are still", "motion to the left or to the right, and right handed turns appear", "\"({}, {})\".format(self.body.x, self.body.y) if self.body._orientation == RUR.EAST: facing = \"facing East\" elif self.body._orientation", "\"\"\" return RUR._UR.wall_in_front_(self.body) def wall_on_right(self): #py:UR.wall_on_right \"\"\"Indicates if an wall is on the", "shortname) class UsedRobot(object): #py:UR def __init__(self, x=1, y=1, orientation='e', tokens=None): #py:UR.__init__ \"\"\"Creates a", "wall. \"\"\" pass try: window['WallCollisionError'] = WallCollisionError except: pass class SatelliteInfo(): #py:SI @property", "= UsedRobot() >>> reeborg.object_here() [\"token\", \"apple\"] >>> reeborg.object_here(\"token\") [\"token\"] >>> reeborg.object_here(\"banana\") [] \"\"\"", "the full url \"\"\" if shortname is None: RUR._World_(url) else: RUR._World_(url, shortname) class", "(given as a string) to the watch list. \"\"\" RUR.add_watch(expr) def dir_py(obj): #py:dir_py", "if the path blocked by a wall, False otherwise. \"\"\" return RUR._wall_in_front_() def", "if Reeborg is facing North (top of the screen) or not.\"\"\" return RUR._is_facing_north_()", "Reeborg carries more than one type of objects, the type must be specified", "wall at the location in front of itself. \"\"\" RUR._UR.build_wall_(self.body) def carries_object(self, obj=''):", "def build_wall(): #py:build_wall \"\"\"Instructs Reeborg to build a wall at the location in", "program's execution to end.\"\"\" RUR._done_() def front_is_clear(): #py:front_is_clear \"\"\"Indicates if an obstacle (wall,", "should have names of # the form RUR._UR.xyz_; functions and methods should appear", "and methods of a Python object, excluding those whose name start with a", "World. Code highlighting occurs thanks to some extra code inserted in a user's", "list(RUR._UR.object_here_(self.body, obj)) else: return list(RUR._UR.object_here_(self.body)) def put(self, obj=None): #py:UR.put \"\"\"Puts down an object.", "(wall, fence, water, etc.) blocks the path. Returns: True if the path is", "ans = RUR._object_here_(obj) else: ans = RUR._object_here_() return list(ans) # convert from JS", "raised. \"\"\" if obj is None: RUR._UR.put_(self.body) else: RUR._UR.put_(self.body, obj) def right_is_clear(self): #py:UR.right_is_clear", "fictitious example # the name http://reeborg.ca/my_world will be added to the selector >>>", "return RUR._right_is_clear_() def set_max_nb_instructions(nb): #py:set_max_nb_instructions \"\"\"Intended primarily for world creators, this function allows", "UsedRobot() >>> reeborg.set_trace_color(\"red\") >>> reeborg.set_trace_color(\"rgb(125, 0, 0)\") >>> reeborg.set_trace_color(\"rgba(125, 0, 0, 0.5)\") >>>", "if callable(getattr(obj, attr)): attr += \"()\" attrs.append(attr) print_html(str(\"\\n\".join(attrs)).replace(\"&\", \"&amp\").replace(\"<\", \"&lt;\" ).replace(\">\", \"&gt;\").replace(\"\\n\", \"<br>\"))", "can make use of html input. \"\"\" RUR._print_html_(html, append) window['print_html'] = print_html #", "function attrs = [] for attr in dir(obj): if attr.startswith(\"__\"): continue if callable(getattr(obj,", "the extra instructions are still present, but they will not be if the", "# unavailable and not needed try: from browser import window RUR = window.RUR", "__init__(self, x=1, y=1, orientation='e', tokens=None): #py:UR.__init__ \"\"\"Creates a UsedRobot. Args: x: horizontal coordinate;", "is on Reeborg's right, False otherwise. \"\"\" return RUR._right_is_clear_() def set_max_nb_instructions(nb): #py:set_max_nb_instructions \"\"\"Intended", "not.\"\"\" return RUR._UR.is_facing_north_(self.body) def move(self): #py:UR.move \"\"\"Move forward, by one grid position.\"\"\" RUR._UR.move_(self.body)", "return list(RUR._UR.object_here_(self.body)) def put(self, obj=None): #py:UR.put \"\"\"Puts down an object. If Reeborg carries", "reeborg.set_trace_color(\"#FF00FF\") \"\"\" RUR._UR.set_trace_color_(self.body, color) def set_trace_style(self, style): #py:UR.set_trace_style \"\"\"Change the trace style of", "\"\"\"Indicates if a wall blocks the way. Returns: True if the path blocked", "tokens' return \"UsedRobot at {} {} {}.\".format(location, facing, carries) def at_goal(self): #py:UR.at_goal \"\"\"Indicate", "Robot() def dir_js(obj): #py:dir_js \"\"\"Lists attributes and methods of a Javascript object.\"\"\" #", "right, False otherwise. \"\"\" return RUR._UR.wall_on_right_(self.body) #py:python_specific def add_watch(expr): #py:add_watch \"\"\"Adds a valid", "blocked by a wall, False otherwise. \"\"\" return RUR._UR.wall_in_front_(self.body) def wall_on_right(self): #py:UR.wall_on_right \"\"\"Indicates", "is an empty list. Examples: >>> carries_object() [\"token\", \"apple\"] >>> carries_object(\"token\") [\"token\"] >>>", "three possible arguments. \"invisible\" is equivalent to set_trace_color(\"rgba(0, 0, 0, 0)\"), that is", "to replace the images used for the robot. More details will be provided", "rest of the program will be ignored. If the desired world is already", "milliseconds) between Reeborg's actions played back. \"\"\" RUR._think_(ms) def turn_left(): #py:turn_left \"\"\"Reeborg turns", "RUR._UR.put_(self.body, obj) def right_is_clear(self): #py:UR.right_is_clear \"\"\"Indicates if an obstacle (wall, fence, water, etc.)", "left or to the right, and right handed turns appear to be done", "from browser import window RUR = window.RUR except: print(\"\\n --> Skipping importing from", "#py:put \"\"\"Puts down an object. If Reeborg carries more than one type of", "is not None: return list(RUR._UR.object_here_(self.body, obj)) else: return list(RUR._UR.object_here_(self.body)) def put(self, obj=None): #py:UR.put", "all the text previously written using a call to print().\"\"\" RUR._clear_print_() def color_here():", "color, rgb and rgba, and hexadecimal notation. Examples:: >>> reeborg = UsedRobot() >>>", "False otherwise. \"\"\" return RUR._front_is_clear_() def is_facing_north(): #py:is_facing_north \"\"\"Indicates if Reeborg is facing", "Optional parameter; if specified, this will be the name shown in the html", "world within a program. If the world currently shown is different than the", "select a specific world within a program. If the world currently shown is", "#py:in_the_bag return dict(RUR._in_the_bag_()) def move(): #py:move \"\"\"Move forward, by one grid position.\"\"\" RUR._move_()", "If no object is present, or if the specified object is not found,", "If Reeborg carries no object, or not the specified one, the result is", "RUR._remove_robots_() def right_is_clear(): #py:right_is_clear \"\"\"Indicates if an obstacle (wall, fence, water, etc.) is", "is centered on the path followed, so that it is impossible to distinguish", "a name appearing in the html selector, or a URL (\"link\") to a", "the colour to a completely transparent value. The \"thick\" style is centered on", "this function allows to change the default maximum number of instructions executed in", "to proper Python list def paint_square(color): #py:paint_square RUR._paint_square_(color) def pause(ms=None): #py:pause \"\"\"Pauses a", "world defined on some website. shortname: Optional parameter; if specified, this will be", "#py:UR.in_the_bag return dict(RUR._UR.in_the_bag_(self.body)) def is_facing_north(self): #py:UR.is_facing_north \"\"\"Indicates if Reeborg is facing North (top", "more than one type of objects, the type must be specified as an", "object is present, or if the specified object is not found, the result", "in the html selector. Examples: >>> World(\"Home 1\") # world included by default", "dir(obj): if attr.startswith(\"__\"): continue if callable(getattr(obj, attr)): attr += \"()\" attrs.append(attr) print_html(str(\"\\n\".join(attrs)).replace(\"&\", \"&amp\").replace(\"<\",", "found, the result is an empty list. Examples: >>> object_here() [\"token\", \"apple\"] >>>", "wall at the location in front of itself.\"\"\" RUR._build_wall_() def carries_object(obj=None): #py:carries_object \"\"\"Indicates", "Reeborg hits a wall. \"\"\" pass try: window['WallCollisionError'] = WallCollisionError except: pass class", "# RUR._xyz_ and be defined in commands.js and methods should have names of", "string. Returns: a list of the type of objects carried by Reeborg. If", "#py:RE \"\"\"Exceptions specific to Reeborg's World. Examples:: def done(): #py: message = \"You", "RUR._MakeCustomMenu_(content) def World(url, shortname=None): #py:World \"\"\"Allow to select a specific world within a", ">>> reeborg = UsedRobot() >>> reeborg.set_trace_color(\"red\") >>> reeborg.set_trace_color(\"rgb(125, 0, 0)\") >>> reeborg.set_trace_color(\"rgba(125, 0,", "or to the right, and right handed turns appear to be done all", "automatically resumes after this time has elapsed. \"\"\" if ms is None: RUR._pause_()", "== RUR.NORTH: facing = \"facing North\" elif self.body._orientation == RUR.SOUTH: facing = \"facing", "number of robots allowed in a given world. \"\"\" RUR._set_max_nb_robots_(nb) def set_trace_color(color): #py:set_trace_color", "except it can make use of html input. \"\"\" RUR._print_html_(html, append) window['print_html'] =", "style) def take(self, obj=None): #py:UR.take \"\"\"Takes an object. If more than one type", "False otherwise. \"\"\" return RUR._wall_in_front_() def wall_on_right(): #py:wall_on_right \"\"\"Indicates if an wall is", "RUR._print_html_(html, append) window['print_html'] = print_html # No translation needed def put(obj=None): #py:put \"\"\"Puts", "robot. Args: model: a number between 0 and 3. \"\"\" RUR._UR.set_model_(self.body, model) def", "None: RUR._UR.put_(self.body) else: RUR._UR.put_(self.body, obj) def right_is_clear(self): #py:UR.right_is_clear \"\"\"Indicates if an obstacle (wall,", "front of itself.\"\"\" RUR._build_wall_() def carries_object(obj=None): #py:carries_object \"\"\"Indicates whether Reeborg carries an object", "(top of the screen) or not.\"\"\" return RUR._UR.is_facing_north_(self.body) def move(self): #py:UR.move \"\"\"Move forward,", "is similar to print() except it can make use of html input. \"\"\"", "model) def set_trace_color(self, color): #py:UR.set_trace_color \"\"\"Change the color of the trace (oil leak).", "#py:is_facing_north \"\"\"Indicates if Reeborg is facing North (top of the screen) or not.\"\"\"", "possible arguments. \"invisible\" is equivalent to set_trace_color(\"rgba(0, 0, 0, 0)\"), that is it", "different than the one selected by using this function, the result of running", "of the default robot.\"\"\" class Robot(UsedRobot): def __init__(self): self.body = RUR._default_robot_body_() return Robot()", "classes that should appear near the end. def at_goal(): #py:at_goal \"\"\"Indicate if Reeborg", "a world defined on some website. shortname: Optional parameter; if specified, this will", "default >>> World(\"http://reeborg.ca/my_world\") # fictitious example # the name http://reeborg.ca/my_world will be added", "the string \"inf\" to indicate an infinite quantity. \"\"\" if tokens is None:", "can not use done() for this task.\" raise ReeborgError(message) #---- or ------ try:", "not None: return list(RUR._UR.carries_object_(self.body, obj)) else: return list(RUR._UR.carries_object_(self.body)) def front_is_clear(self): #py:UR.front_is_clear \"\"\"Indicates if", "right of Reeborg. Returns: True if an obstacle is on Reeborg's right, False", "from JS list-like object to proper Python list def paint_square(color): #py:paint_square RUR._paint_square_(color) def", "it is impossible to distinguish between motion to the left or to the", "the result of running the program will simply be to change the world", "argument, otherwise an exception will be raised. \"\"\" if obj is None: RUR._UR.take_(self.body)", ">>> object_here(\"token\") [\"token\"] >>> object_here(\"banana\") [] \"\"\" if obj is not None: ans", "vertical coordinate; an integer greater or equal to 1. orientation (string):, one of", "reeborg.carries_object(\"banana\") [] \"\"\" if obj is not None: return list(RUR._UR.carries_object_(self.body, obj)) else: return", "Examples:: >>> set_trace_color(\"red\") >>> set_trace_color(\"rgb(125, 0, 0)\") >>> set_trace_color(\"rgba(125, 0, 0, 0.5)\") >>>", "one only looks at the trace. \"\"\" if style not in [\"thick\", \"default\",", "found, the result is an empty list. Examples: >>> reeborg = UsedRobot() >>>", "is on Reeborg's right, False otherwise. \"\"\" return RUR._wall_on_right_() def MakeCustomMenu(content): #py:MakeCustomMenu \"\"\"Designed", "it possible to create custom world menus. See the documentation for more details.", "#py:UR.carries_object \"\"\"Indicates whether Reeborg carries an object or not. Args: obj: optional parameter", "trace. \"\"\" if style not in [\"thick\", \"default\", \"invisible\"]: raise ReeborgError(\"Unrecognized style in", "message): #py:RE.__init__ self.reeborg_shouts = message def __str__(self): #py:RE.__str__ return repr(self.reeborg_shouts) try: window['ReeborgError'] =", "\"\"\" RUR._print_html_(html, append) window['print_html'] = print_html # No translation needed def put(obj=None): #py:put", "robot = RUR.robot.create_robot(x, y, orientation, tokens) self.body = robot RUR.world.add_robot(self.body) def __str__(self): #py:UR.__str__", "starts recording changes occuring in the world. Args: bool: True if recording is", "\"\"\" if obj is None: RUR._UR.take_(self.body) else: RUR._UR.take_(self.body, obj) def turn_left(self): #py:UR.turn_left \"\"\"Reeborg", "Reeborg to build a wall at the location in front of itself.\"\"\" RUR._build_wall_()", "user's program prior to execution. When disabling highlighting using this function, the extra", "coordinate; an integer greater or equal to 1. y: vertical coordinate; an integer", "inserted in a user's program prior to execution. When disabling highlighting using this", "robot.\"\"\" class Robot(UsedRobot): def __init__(self): self.body = RUR._default_robot_body_() return Robot() def dir_js(obj): #py:dir_js", "object.\"\"\" # do not translate the name of this function RUR._dir_js_(obj) def done():", "except ReeborgError: # ignore a collision turn_left() \"\"\" def __init__(self, message): #py:RE.__init__ self.reeborg_shouts", "Reeborg to build a wall at the location in front of itself. \"\"\"", "\"thick\" style is centered on the path followed, so that it is impossible", "images used for the robot. More details will be provided soon. \"\"\" RUR._new_robot_images_(images)", "one type of objects is at Reeborg's location, the type must be specified", "def new_robot_images(images): #py:new_robot_images \"\"\"Allow to replace the images used for the robot. More", "the text previously written using a call to print().\"\"\" RUR._clear_print_() def color_here(): #py:color_here", "carries an object or not. Args: obj: optional parameter which is the name", "functions or classes that should appear near the end. def at_goal(): #py:at_goal \"\"\"Indicate", "list(RUR._UR.object_here_(self.body)) def put(self, obj=None): #py:UR.put \"\"\"Puts down an object. If Reeborg carries more", "The name \"Hello\" will be shown in the selector instead # of the", "a given world. \"\"\" RUR._set_max_nb_robots_(nb) def set_trace_color(color): #py:set_trace_color \"\"\"Change the color of the", "window['ReeborgError'] = ReeborgError except: pass class WallCollisionError(ReeborgError): #py:WCE \"\"\"Exceptions specific to Reeborg's World.", "- the rest of the program will be ignored. If the desired world", "a string. Returns: a list of the type of objects found. If no", "task.\" raise ReeborgError(message) #---- or ------ try: move() except ReeborgError: # ignore a", "path is clear (not blocked), False otherwise. \"\"\" return RUR._UR.front_is_clear_(self.body) def in_the_bag(self): #py:UR.in_the_bag", "by Reeborg. If Reeborg carries no object, or not the specified one, the", "or not.\"\"\" return RUR._is_facing_north_() def in_the_bag(): #py:in_the_bag return dict(RUR._in_the_bag_()) def move(): #py:move \"\"\"Move", "object, or not the specified one, the result is an empty list. Examples:", "objects found. If no object is present, or if the specified object is", "obj is None: RUR._put_() else: RUR._put_(obj) def recording(bool): #py:recording \"\"\"Stops or starts recording", "of robots allowed in a given world. \"\"\" RUR._set_max_nb_robots_(nb) def set_trace_color(color): #py:set_trace_color \"\"\"Change", "no object, or not the specified one, the result is an empty list.", "ReeborgError(\"Unrecognized style in set_trace_style().\") RUR._set_trace_style_(style) def sound(bool): #py:sound \"\"\"Activate or deactivate sound effects.\"\"\"", "use by educators. Makes it possible to create custom world menus. See the", "try: window['ReeborgError'] = ReeborgError except: pass class WallCollisionError(ReeborgError): #py:WCE \"\"\"Exceptions specific to Reeborg's", "the end. def at_goal(): #py:at_goal \"\"\"Indicate if Reeborg has reached the desired location.", "if obj is not None: return list(RUR._UR.object_here_(self.body, obj)) else: return list(RUR._UR.object_here_(self.body)) def put(self,", "obj) def turn_left(self): #py:UR.turn_left \"\"\"Reeborg turns to its left.\"\"\" RUR._UR.turn_left_(self.body) def wall_in_front(self): #py:UR.wall_in_front", "distinguish between motion to the left or to the right, and right handed", "the specified one, the result is an empty list. Examples: >>> reeborg =", "0 and 3. \"\"\" RUR._UR.set_model_(self.body, model) def set_trace_color(self, color): #py:UR.set_trace_color \"\"\"Change the color", "style not in [\"thick\", \"default\", \"invisible\"]: raise ReeborgError(\"Unrecognized style in set_trace_style().\") RUR._UR.set_trace_style_(self.body, style)", "move(self): #py:UR.move \"\"\"Move forward, by one grid position.\"\"\" RUR._UR.move_(self.body) def object_here(self, obj=None): #py:UR.object_here", "of a Python object, excluding those whose name start with a double underscore", "\"\"\" if obj is not None: return list(RUR._UR.object_here_(self.body, obj)) else: return list(RUR._UR.object_here_(self.body)) def", "of tokens.\" else: carries = 'carries %s tokens' % self.body.objects['token'] else: carries =", "in a program (1000) by a different value. \"\"\" RUR._set_max_nb_instructions_(nb) def set_max_nb_robots(nb): #py:set_max_nb_robots", "if obj is not None: ans = RUR._object_here_(obj) else: ans = RUR._object_here_() return", "{})\".format(self.body.x, self.body.y) if self.body._orientation == RUR.EAST: facing = \"facing East\" elif self.body._orientation ==", "whether Reeborg carries an object or not. Args: obj: optional parameter which is", "still present, but they will not be if the program is run a", ">>> set_trace_color(\"red\") >>> set_trace_color(\"rgb(125, 0, 0)\") >>> set_trace_color(\"rgba(125, 0, 0, 0.5)\") >>> set_trace_color(\"#FF00FF\")", "{} {} {}.\".format(location, facing, carries) def at_goal(self): #py:UR.at_goal \"\"\"Indicate if Reeborg has reached", "False otherwise. \"\"\" return RUR._right_is_clear_() def set_max_nb_instructions(nb): #py:set_max_nb_instructions \"\"\"Intended primarily for world creators,", "attrs = [] for attr in dir(obj): if attr.startswith(\"__\"): continue if callable(getattr(obj, attr)):", "world. \"\"\" RUR._set_max_nb_robots_(nb) def set_trace_color(color): #py:set_trace_color \"\"\"Change the color of the trace (oil", "Returns: True if an obstacle is on Reeborg's right, False otherwise. \"\"\" return", "a wall is on Reeborg's right, False otherwise. \"\"\" return RUR._wall_on_right_() def MakeCustomMenu(content):", "that can be included in a Python program for Reeborg's World. \"\"\" #", "has reached the desired location. Returns: True if Reeborg has reached its goal,", "list(RUR._UR.carries_object_(self.body)) def front_is_clear(self): #py:UR.front_is_clear \"\"\"Indicates if an obstacle (wall, fence, water, etc.) blocks", "[\"token\", \"apple\"] >>> reeborg.object_here(\"token\") [\"token\"] >>> reeborg.object_here(\"banana\") [] \"\"\" if obj is not", "= 'carries no tokens' return \"UsedRobot at {} {} {}.\".format(location, facing, carries) def", "\"\"\"Change the trace style of the robot. Args: style: \"thick\", \"invisible\" and \"default\"", "#py:SI.world_map '''Returns a dict containing information about world. ''' import json return json.loads(RUR.control.get_world_map())", "False otherwise. \"\"\" return RUR._UR.at_goal_(self.body) def build_wall(self): #py:UR.build_wall \"\"\"Instructs Reeborg to build a", "Examples: >>> World(\"Home 1\") # world included by default >>> World(\"http://reeborg.ca/my_world\") # fictitious", "#py:default_robot \"\"\"Returns a recreated version of the default robot.\"\"\" class Robot(UsedRobot): def __init__(self):", ">>> object_here(\"banana\") [] \"\"\" if obj is not None: ans = RUR._object_here_(obj) else:", "coordinate; an integer greater or equal to 1. orientation (string):, one of \"e\"", "1. orientation (string):, one of \"e\" or \"east\", \"w\" or \"west\", \"n\" or", "print(\"\\n --> Skipping importing from browser for sphinx.\\n\") # All functions from Javascript", "the type of objects carried by Reeborg. If Reeborg carries no object, or", "in the html selector, it will be added. Args: url: two possible choices:", "True if recording is desired, False otherwise. \"\"\" RUR._recording_(bool) def remove_robots(): #py:remove_robots \"\"\"Remove", "in dir(obj): if attr.startswith(\"__\"): continue if callable(getattr(obj, attr)): attr += \"()\" attrs.append(attr) print_html(str(\"\\n\".join(attrs)).replace(\"&\",", "is at Reeborg's location, the type must be specified as an argument, otherwise", "Reeborg has reached the desired location. Returns: True if Reeborg has reached its", "this English version, with the exception of Python-specific # functions or classes that", "execution to end.\"\"\" RUR._done_() def front_is_clear(): #py:front_is_clear \"\"\"Indicates if an obstacle (wall, fence,", "build_wall(self): #py:UR.build_wall \"\"\"Instructs Reeborg to build a wall at the location in front", "in commands.js and methods should have names of # the form RUR._UR.xyz_; functions", "RUR = window.RUR except: print(\"\\n --> Skipping importing from browser for sphinx.\\n\") #", "within a program. If the world currently shown is different than the one", "# alphabetically in this English version, with the exception of Python-specific # functions", "and hexadecimal notation. Examples:: >>> reeborg = UsedRobot() >>> reeborg.set_trace_color(\"red\") >>> reeborg.set_trace_color(\"rgb(125, 0,", "looks at the trace. \"\"\" if style not in [\"thick\", \"default\", \"invisible\"]: raise", "not None: return list(RUR._UR.object_here_(self.body, obj)) else: return list(RUR._UR.object_here_(self.body)) def put(self, obj=None): #py:UR.put \"\"\"Puts", "at Reeborg's location. Args: obj: optional parameter which is the name of an", "not already present in the html selector, it will be added. Args: url:", "a program (1000) by a different value. \"\"\" RUR._set_max_nb_instructions_(nb) def set_max_nb_robots(nb): #py:set_max_nb_robots \"\"\"Intended", "orientation='e', tokens=None): #py:UR.__init__ \"\"\"Creates a UsedRobot. Args: x: horizontal coordinate; an integer greater", "a time delay (in milliseconds) between Reeborg's actions played back. \"\"\" RUR._think_(ms) def", "the result is an empty list. Examples: >>> reeborg = UsedRobot() >>> reeborg.carries_object()", "world creators, this function is similar to print() except it can make use", "once, if one only looks at the trace. \"\"\" if style not in", "None: RUR._pause_() else: RUR._pause_(ms) def print_html(html, append=False): #py:print_html \"\"\"Intended primarily for world creators,", "on Reeborg's right, False otherwise. \"\"\" return RUR._right_is_clear_() def set_max_nb_instructions(nb): #py:set_max_nb_instructions \"\"\"Intended primarily", "quantity. \"\"\" if tokens is None: robot = RUR.robot.create_robot(x, y, orientation) else: robot", "execution (playback). If an argument (time in milliseconds) is given, the execution automatically", "0)\") >>> reeborg.set_trace_color(\"rgba(125, 0, 0, 0.5)\") >>> reeborg.set_trace_color(\"#FF00FF\") \"\"\" RUR._UR.set_trace_color_(self.body, color) def set_trace_style(self,", "whose name start with a double underscore and are considered to be private.", "ReeborgError(message) #---- or ------ try: move() except ReeborgError: # ignore a collision turn_left()", "a Javascript object.\"\"\" # do not translate the name of this function RUR._dir_js_(obj)", "creators, this function allows to change the default maximum number of instructions executed", "Reeborg has reached its goal, False otherwise. \"\"\" return RUR._at_goal_() def build_wall(): #py:build_wall", ">>> reeborg.carries_object(\"banana\") [] \"\"\" if obj is not None: return list(RUR._UR.carries_object_(self.body, obj)) else:", "= RUR._object_here_(obj) else: ans = RUR._object_here_() return list(ans) # convert from JS list-like", "def move(self): #py:UR.move \"\"\"Move forward, by one grid position.\"\"\" RUR._UR.move_(self.body) def object_here(self, obj=None):", "not None: ans = RUR._object_here_(obj) else: ans = RUR._object_here_() return list(ans) # convert", "to the right, and right handed turns appear to be done all at", "shortname=None): #py:World \"\"\"Allow to select a specific world within a program. If the", "value must be a positive integer, or the string \"inf\" to indicate an", "near the end. def at_goal(): #py:at_goal \"\"\"Indicate if Reeborg has reached the desired", "facing North (top of the screen) or not.\"\"\" return RUR._is_facing_north_() def in_the_bag(): #py:in_the_bag", "All functions from Javascript used below should have names of the form #", ">>> carries_object(\"token\") [\"token\"] >>> carries_object(\"banana\") [] \"\"\" if obj is not None: ans", "in the html selector, or a URL (\"link\") to a world defined on", "def right_is_clear(): #py:right_is_clear \"\"\"Indicates if an obstacle (wall, fence, water, etc.) is on", "= \"({}, {})\".format(self.body.x, self.body.y) if self.body._orientation == RUR.EAST: facing = \"facing East\" elif", "otherwise. \"\"\" return RUR._UR.at_goal_(self.body) def build_wall(self): #py:UR.build_wall \"\"\"Instructs Reeborg to build a wall", "on some website. shortname: Optional parameter; if specified, this will be the name", "pause(ms=None): #py:pause \"\"\"Pauses a program's execution (playback). If an argument (time in milliseconds)", "World. \"\"\" # When generating documentation using sphinx, these modules are both #", "from browser for sphinx.\\n\") # All functions from Javascript used below should have", "def pause(ms=None): #py:pause \"\"\"Pauses a program's execution (playback). If an argument (time in", "and methods of a Javascript object.\"\"\" # do not translate the name of", "milliseconds) is given, the execution automatically resumes after this time has elapsed. \"\"\"", "appearing in the html selector, or a URL (\"link\") to a world defined", "by a wall, False otherwise. \"\"\" return RUR._UR.wall_in_front_(self.body) def wall_on_right(self): #py:UR.wall_on_right \"\"\"Indicates if", "#py:clear_print \"\"\"Erase all the text previously written using a call to print().\"\"\" RUR._clear_print_()", "given world. \"\"\" RUR._set_max_nb_robots_(nb) def set_trace_color(color): #py:set_trace_color \"\"\"Change the color of the trace", "selected by using this function, the result of running the program will simply", "execution. When disabling highlighting using this function, the extra instructions are still present,", "or \"east\", \"w\" or \"west\", \"n\" or \"north\", \"s\" or \"south\". tokens: Initial", "at the trace. \"\"\" if style not in [\"thick\", \"default\", \"invisible\"]: raise ReeborgError(\"Unrecognized", "the corresponding button in Reeborg's World. Code highlighting occurs thanks to some extra", "object or not. Args: obj: optional parameter which is the name of an", "if Reeborg has reached its goal, False otherwise. \"\"\" return RUR._at_goal_() def build_wall():", "for sphinx.\\n\") # All functions from Javascript used below should have names of", "hexadecimal notation. Examples:: >>> set_trace_color(\"red\") >>> set_trace_color(\"rgb(125, 0, 0)\") >>> set_trace_color(\"rgba(125, 0, 0,", "RUR._put_() else: RUR._put_(obj) def recording(bool): #py:recording \"\"\"Stops or starts recording changes occuring in", "\"invisible\" and \"default\" are the three possible arguments. \"invisible\" is equivalent to set_trace_color(\"rgba(0,", "an obstacle is on Reeborg's right, False otherwise. \"\"\" return RUR._UR.right_is_clear_(self.body) def set_model(self,", "one, the result is an empty list. Examples: >>> carries_object() [\"token\", \"apple\"] >>>", "def dir_py(obj): #py:dir_py \"\"\"Lists attributes and methods of a Python object, excluding those", "a wall at the location in front of itself. \"\"\" RUR._UR.build_wall_(self.body) def carries_object(self,", "notation. Examples:: >>> reeborg = UsedRobot() >>> reeborg.set_trace_color(\"red\") >>> reeborg.set_trace_color(\"rgb(125, 0, 0)\") >>>", "color (string): four formats are possible: named color, rgb and rgba, and hexadecimal", "if 'token' in self.body.objects: if self.body.objects['token'] == 'inf': carries = \"carries an infinite", "the result is an empty list. Examples: >>> reeborg = UsedRobot() >>> reeborg.object_here()", "be provided soon. \"\"\" RUR._new_robot_images_(images) def no_highlight(): #py:no_highlight \"\"\"Prevents code highlighting from occurring.", "will be shown in the selector instead # of the full url \"\"\"", "reeborg = UsedRobot() >>> reeborg.set_trace_color(\"red\") >>> reeborg.set_trace_color(\"rgb(125, 0, 0)\") >>> reeborg.set_trace_color(\"rgba(125, 0, 0,", "change the default maximum number of instructions executed in a program (1000) by", "specific world within a program. If the world currently shown is different than", "itself.\"\"\" RUR._build_wall_() def carries_object(obj=None): #py:carries_object \"\"\"Indicates whether Reeborg carries an object or not.", "a specific world within a program. If the world currently shown is different", "__init__(self): self.body = RUR._default_robot_body_() return Robot() def dir_js(obj): #py:dir_js \"\"\"Lists attributes and methods", "\"default\", \"invisible\"]: raise ReeborgError(\"Unrecognized style in set_trace_style().\") RUR._set_trace_style_(style) def sound(bool): #py:sound \"\"\"Activate or", "between motion to the left or to the right, and right handed turns", "False otherwise. \"\"\" return RUR._UR.front_is_clear_(self.body) def in_the_bag(self): #py:UR.in_the_bag return dict(RUR._UR.in_the_bag_(self.body)) def is_facing_north(self): #py:UR.is_facing_north", "\"invisible\" is equivalent to set_trace_color(\"rgba(0, 0, 0, 0)\"), that is it sets the", "to distinguish between motion to the left or to the right, and right", "def build_wall(self): #py:UR.build_wall \"\"\"Instructs Reeborg to build a wall at the location in", "instructions executed in a program (1000) by a different value. \"\"\" RUR._set_max_nb_instructions_(nb) def", "RUR._UR.move_(self.body) def object_here(self, obj=None): #py:UR.object_here \"\"\"Indicates whether any type of objects are present", "def set_max_nb_robots(nb): #py:set_max_nb_robots \"\"\"Intended primarily for world creators, this function allows to set", "is None: robot = RUR.robot.create_robot(x, y, orientation) else: robot = RUR.robot.create_robot(x, y, orientation,", "is different than the one selected by using this function, the result of", "defined in commands.js and methods should have names of # the form RUR._UR.xyz_;", "\"\"\" RUR._set_trace_color_(color) def set_trace_style(style=\"default\"): #py:set_trace_style \"\"\"Change the trace style of the robot. Args:", ">>> World(\"http://reeborg.ca/my_world\") # fictitious example # the name http://reeborg.ca/my_world will be added to", "repr(self.reeborg_shouts) try: window['ReeborgError'] = ReeborgError except: pass class WallCollisionError(ReeborgError): #py:WCE \"\"\"Exceptions specific to", "#py:pause \"\"\"Pauses a program's execution (playback). If an argument (time in milliseconds) is", "#py:wall_on_right \"\"\"Indicates if an wall is on the immediate right of Reeborg. Returns:", "a Javascript function.\"\"\" RUR._view_source_js_(fn) def wall_in_front(): #py:wall_in_front \"\"\"Indicates if a wall blocks the", "sphinx, these modules are both # unavailable and not needed try: from browser", "world''' print(RUR.control.get_world_map()) #py:obsolete # Do not tranlate the following def narration(html): raise ReeborgError(\"narration", "is already selected, this command is ignored and the rest of the program", "has elapsed. \"\"\" if ms is None: RUR._pause_() else: RUR._pause_(ms) def print_html(html, append=False):", "right, False otherwise. \"\"\" return RUR._wall_on_right_() def MakeCustomMenu(content): #py:MakeCustomMenu \"\"\"Designed for use by", "\"apple\"] >>> carries_object(\"token\") [\"token\"] >>> carries_object(\"banana\") [] \"\"\" if obj is not None:", "'carries no tokens' return \"UsedRobot at {} {} {}.\".format(location, facing, carries) def at_goal(self):", "names of # the form RUR._UR.xyz_; functions and methods should appear # alphabetically", "an empty list. Examples: >>> object_here() [\"token\", \"apple\"] >>> object_here(\"token\") [\"token\"] >>> object_here(\"banana\")", "custom world menus. See the documentation for more details. \"\"\" RUR._MakeCustomMenu_(content) def World(url,", "both # unavailable and not needed try: from browser import window RUR =", "location in front of itself.\"\"\" RUR._build_wall_() def carries_object(obj=None): #py:carries_object \"\"\"Indicates whether Reeborg carries", "reached its goal, False otherwise. \"\"\" return RUR._at_goal_() def build_wall(): #py:build_wall \"\"\"Instructs Reeborg", "an wall is on the immediate right of Reeborg. Returns: True if a", "This function has a similar effect to clicking the corresponding button in Reeborg's", "= [] for attr in dir(obj): if attr.startswith(\"__\"): continue if callable(getattr(obj, attr)): attr", "RUR._right_is_clear_() def set_max_nb_instructions(nb): #py:set_max_nb_instructions \"\"\"Intended primarily for world creators, this function allows to", "the exception of Python-specific # functions or classes that should appear near the", "not tranlate the following def narration(html): raise ReeborgError(\"narration is obsolete; use print_html().\") def", "json.loads(RUR.control.get_world_map()) def print_world_map(self): #py:SI.print_world_map '''Prints a formatted copy of the world''' print(RUR.control.get_world_map()) #py:obsolete", "immediate right of Reeborg. Returns: True if an obstacle is on Reeborg's right,", "immediate right of Reeborg. Returns: True if a wall is on Reeborg's right,", "if a wall is on Reeborg's right, False otherwise. \"\"\" return RUR._wall_on_right_() def", "x: horizontal coordinate; an integer greater or equal to 1. y: vertical coordinate;", "Reeborg's World. Is raised when Reeborg hits a wall. \"\"\" pass try: window['WallCollisionError']", "excluding those whose name start with a double underscore and are considered to", "y=1, orientation='e', tokens=None): #py:UR.__init__ \"\"\"Creates a UsedRobot. Args: x: horizontal coordinate; an integer", "return dict(RUR._UR.in_the_bag_(self.body)) def is_facing_north(self): #py:UR.is_facing_north \"\"\"Indicates if Reeborg is facing North (top of", "print().\"\"\" RUR._clear_print_() def color_here(): #py:color_here return RUR._color_here_() def default_robot(): #py:default_robot \"\"\"Returns a recreated", "the name of this function RUR._dir_js_(obj) def done(): #py:done \"\"\"Causes a program's execution", "ignored and the rest of the program is executed. If the world is", "to be done all at once, if one only looks at the trace.", "def put(self, obj=None): #py:UR.put \"\"\"Puts down an object. If Reeborg carries more than", "to be private. \"\"\" # do not translate the name of this function", "#py:World \"\"\"Allow to select a specific world within a program. If the world", ">>> reeborg.set_trace_color(\"rgb(125, 0, 0)\") >>> reeborg.set_trace_color(\"rgba(125, 0, 0, 0.5)\") >>> reeborg.set_trace_color(\"#FF00FF\") \"\"\" RUR._UR.set_trace_color_(self.body,", "\"\"\"Puts down an object. If Reeborg carries more than one type of objects,", "= UsedRobot() >>> reeborg.carries_object() [\"token\", \"apple\"] >>> reeborg.carries_object(\"token\") [\"token\"] >>> reeborg.carries_object(\"banana\") [] \"\"\"", "on Reeborg's right, False otherwise. \"\"\" return RUR._wall_on_right_() def MakeCustomMenu(content): #py:MakeCustomMenu \"\"\"Designed for", "If the world currently shown is different than the one selected by using", "else: RUR._UR.put_(self.body, obj) def right_is_clear(self): #py:UR.right_is_clear \"\"\"Indicates if an obstacle (wall, fence, water,", "ReeborgError: # ignore a collision turn_left() \"\"\" def __init__(self, message): #py:RE.__init__ self.reeborg_shouts =", "if the program is run a second time. \"\"\" RUR._no_highlight_() def object_here(obj=None): #py:object_here", "= RUR._default_robot_body_() return Robot() def dir_js(obj): #py:dir_js \"\"\"Lists attributes and methods of a", "otherwise an exception will be raised. \"\"\" if obj is None: RUR._UR.put_(self.body) else:", "\"\"\"Lists attributes and methods of a Python object, excluding those whose name start", "robot. More details will be provided soon. \"\"\" RUR._new_robot_images_(images) def no_highlight(): #py:no_highlight \"\"\"Prevents", "True if an obstacle is on Reeborg's right, False otherwise. \"\"\" return RUR._right_is_clear_()", "if obj is None: RUR._UR.take_(self.body) else: RUR._UR.take_(self.body, obj) def turn_left(self): #py:UR.turn_left \"\"\"Reeborg turns", "browser for sphinx.\\n\") # All functions from Javascript used below should have names", "the immediate right of Reeborg. Returns: True if a wall is on Reeborg's", "not. Args: obj: optional parameter which is the name of an object as", "object as a string. Returns: a list of the type of objects found.", "reeborg.set_trace_color(\"red\") >>> reeborg.set_trace_color(\"rgb(125, 0, 0)\") >>> reeborg.set_trace_color(\"rgba(125, 0, 0, 0.5)\") >>> reeborg.set_trace_color(\"#FF00FF\") \"\"\"", "turn_left(self): #py:UR.turn_left \"\"\"Reeborg turns to its left.\"\"\" RUR._UR.turn_left_(self.body) def wall_in_front(self): #py:UR.wall_in_front \"\"\"Indicates if", "default maximum number of instructions executed in a program (1000) by a different", "list of the type of objects carried by Reeborg. If Reeborg carries no", "WallCollisionError(ReeborgError): #py:WCE \"\"\"Exceptions specific to Reeborg's World. Is raised when Reeborg hits a", "raise ReeborgError(\"Unrecognized style in set_trace_style().\") RUR._set_trace_style_(style) def sound(bool): #py:sound \"\"\"Activate or deactivate sound", "class WallCollisionError(ReeborgError): #py:WCE \"\"\"Exceptions specific to Reeborg's World. Is raised when Reeborg hits", ">>> reeborg.set_trace_color(\"#FF00FF\") \"\"\" RUR._UR.set_trace_color_(self.body, color) def set_trace_style(self, style): #py:UR.set_trace_style \"\"\"Change the trace style", "carries = \"carries an infinite number of tokens.\" else: carries = 'carries %s", "needed def put(obj=None): #py:put \"\"\"Puts down an object. If Reeborg carries more than", "an obstacle is on Reeborg's right, False otherwise. \"\"\" return RUR._right_is_clear_() def set_max_nb_instructions(nb):", ">>> reeborg.carries_object() [\"token\", \"apple\"] >>> reeborg.carries_object(\"token\") [\"token\"] >>> reeborg.carries_object(\"banana\") [] \"\"\" if obj", "expression (given as a string) to the watch list. \"\"\" RUR.add_watch(expr) def dir_py(obj):", "possible: named color, rgb and rgba, and hexadecimal notation. Examples:: >>> set_trace_color(\"red\") >>>", "model: a number between 0 and 3. \"\"\" RUR._UR.set_model_(self.body, model) def set_trace_color(self, color):", "is on Reeborg's right, False otherwise. \"\"\" return RUR._UR.wall_on_right_(self.body) #py:python_specific def add_watch(expr): #py:add_watch", "time delay (in milliseconds) between Reeborg's actions played back. \"\"\" RUR._think_(ms) def turn_left():", "name http://reeborg.ca/my_world will be added to the selector >>> World(\"http://reeborg.ca/my_world\", \"Hello\") # The", "If more than one type of objects is at Reeborg's location, the type", "\"\"\"Indicates if an obstacle (wall, fence, water, etc.) is on the immediate right", "= RUR.robot.create_robot(x, y, orientation, tokens) self.body = robot RUR.world.add_robot(self.body) def __str__(self): #py:UR.__str__ location", "= robot RUR.world.add_robot(self.body) def __str__(self): #py:UR.__str__ location = \"({}, {})\".format(self.body.x, self.body.y) if self.body._orientation", "self.body.objects: if self.body.objects['token'] == 'inf': carries = \"carries an infinite number of tokens.\"", "#py:UR.build_wall \"\"\"Instructs Reeborg to build a wall at the location in front of", "True if the path blocked by a wall, False otherwise. \"\"\" return RUR._UR.wall_in_front_(self.body)", "an argument, otherwise an exception will be raised. \"\"\" if obj is None:", "function is similar to print() except it can make use of html input.", "list. Examples: >>> reeborg = UsedRobot() >>> reeborg.carries_object() [\"token\", \"apple\"] >>> reeborg.carries_object(\"token\") [\"token\"]", "grid position.\"\"\" RUR._move_() def new_robot_images(images): #py:new_robot_images \"\"\"Allow to replace the images used for", "exception will be raised. \"\"\" if obj is None: RUR._put_() else: RUR._put_(obj) def", "facing, carries) def at_goal(self): #py:UR.at_goal \"\"\"Indicate if Reeborg has reached the desired location.", "at_goal(self): #py:UR.at_goal \"\"\"Indicate if Reeborg has reached the desired location. Returns: True if", "optional parameter which is the name of an object as a string. Returns:", "program is run a second time. \"\"\" RUR._no_highlight_() def object_here(obj=None): #py:object_here \"\"\"Indicates whether", "for Reeborg's World. \"\"\" # When generating documentation using sphinx, these modules are", "or equal to 1. orientation (string):, one of \"e\" or \"east\", \"w\" or", "\"&amp\").replace(\"<\", \"&lt;\" ).replace(\">\", \"&gt;\").replace(\"\\n\", \"<br>\")) class ReeborgError(Exception): #py:RE \"\"\"Exceptions specific to Reeborg's World.", "ReeborgError(\"narration is obsolete; use print_html().\") def say(): raise ReeborgError(\"say() is no longer supported;", "this will be the name shown in the html selector. Examples: >>> World(\"Home", "RUR._view_source_js_(fn) def wall_in_front(): #py:wall_in_front \"\"\"Indicates if a wall blocks the way. Returns: True", "[] for attr in dir(obj): if attr.startswith(\"__\"): continue if callable(getattr(obj, attr)): attr +=", "the world.\"\"\" RUR._remove_robots_() def right_is_clear(): #py:right_is_clear \"\"\"Indicates if an obstacle (wall, fence, water,", "return json.loads(RUR.control.get_world_map()) def print_world_map(self): #py:SI.print_world_map '''Prints a formatted copy of the world''' print(RUR.control.get_world_map())", "those whose name start with a double underscore and are considered to be", "reeborg.object_here() [\"token\", \"apple\"] >>> reeborg.object_here(\"token\") [\"token\"] >>> reeborg.object_here(\"banana\") [] \"\"\" if obj is", "wall_in_front(): #py:wall_in_front \"\"\"Indicates if a wall blocks the way. Returns: True if the", "is None: RUR._World_(url) else: RUR._World_(url, shortname) class UsedRobot(object): #py:UR def __init__(self, x=1, y=1,", "either a name appearing in the html selector, or a URL (\"link\") to", "def think(ms): #py:think \"\"\"Set a time delay (in milliseconds) between Reeborg's actions played", "the screen) or not.\"\"\" return RUR._UR.is_facing_north_(self.body) def move(self): #py:UR.move \"\"\"Move forward, by one", "ignore a collision turn_left() \"\"\" def __init__(self, message): #py:RE.__init__ self.reeborg_shouts = message def", "RUR._default_robot_body_() return Robot() def dir_js(obj): #py:dir_js \"\"\"Lists attributes and methods of a Javascript", "be done all at once, if one only looks at the trace. \"\"\"", "think(ms): #py:think \"\"\"Set a time delay (in milliseconds) between Reeborg's actions played back.", "#py: message = \"You can not use done() for this task.\" raise ReeborgError(message)", "a Python program for Reeborg's World. \"\"\" # When generating documentation using sphinx,", "See the documentation for more details. \"\"\" RUR._MakeCustomMenu_(content) def World(url, shortname=None): #py:World \"\"\"Allow", "using sphinx, these modules are both # unavailable and not needed try: from", "location in front of itself. \"\"\" RUR._UR.build_wall_(self.body) def carries_object(self, obj=''): #py:UR.carries_object \"\"\"Indicates whether", "used for the robot. More details will be provided soon. \"\"\" RUR._new_robot_images_(images) def", "When generating documentation using sphinx, these modules are both # unavailable and not", "orientation, tokens) self.body = robot RUR.world.add_robot(self.body) def __str__(self): #py:UR.__str__ location = \"({}, {})\".format(self.body.x,", "to give to the robot; its value must be a positive integer, or", "True if Reeborg has reached its goal, False otherwise. \"\"\" return RUR._at_goal_() def", "False otherwise. \"\"\" return RUR._UR.wall_on_right_(self.body) #py:python_specific def add_watch(expr): #py:add_watch \"\"\"Adds a valid Python", "right handed turns appear to be done all at once, if one only", "one type of objects, the type must be specified as an argument, otherwise", "the trace (oil leak). Args: color (string): four formats are possible: named color,", "for this task.\" raise ReeborgError(message) #---- or ------ try: move() except ReeborgError: #", "of the trace (oil leak). Args: color (string): four formats are possible: named", "set_trace_color(\"rgba(125, 0, 0, 0.5)\") >>> set_trace_color(\"#FF00FF\") \"\"\" RUR._set_trace_color_(color) def set_trace_style(style=\"default\"): #py:set_trace_style \"\"\"Change the", "Javascript used below should have names of the form # RUR._xyz_ and be", "sphinx.\\n\") # All functions from Javascript used below should have names of the", "class Robot(UsedRobot): def __init__(self): self.body = RUR._default_robot_body_() return Robot() def dir_js(obj): #py:dir_js \"\"\"Lists", "No translation needed def put(obj=None): #py:put \"\"\"Puts down an object. If Reeborg carries", "3. \"\"\" RUR._UR.set_model_(self.body, model) def set_trace_color(self, color): #py:UR.set_trace_color \"\"\"Change the color of the", "a wall is on Reeborg's right, False otherwise. \"\"\" return RUR._UR.wall_on_right_(self.body) #py:python_specific def", "or \"west\", \"n\" or \"north\", \"s\" or \"south\". tokens: Initial number of tokens", "RUR._UR.xyz_; functions and methods should appear # alphabetically in this English version, with", "otherwise. \"\"\" return RUR._wall_in_front_() def wall_on_right(): #py:wall_on_right \"\"\"Indicates if an wall is on", "\"\"\" return RUR._right_is_clear_() def set_max_nb_instructions(nb): #py:set_max_nb_instructions \"\"\"Intended primarily for world creators, this function", "modules are both # unavailable and not needed try: from browser import window", "shortname: Optional parameter; if specified, this will be the name shown in the", "#py:UR.__init__ \"\"\"Creates a UsedRobot. Args: x: horizontal coordinate; an integer greater or equal", "build a wall at the location in front of itself. \"\"\" RUR._UR.build_wall_(self.body) def", "Python-specific # functions or classes that should appear near the end. def at_goal():", "the one selected by using this function, the result of running the program", "resumes after this time has elapsed. \"\"\" if ms is None: RUR._pause_() else:", "run a second time. \"\"\" RUR._no_highlight_() def object_here(obj=None): #py:object_here \"\"\"Indicates whether any type", "of objects carried by Reeborg. If Reeborg carries no object, or not the", "occurring. This function has a similar effect to clicking the corresponding button in", "to the left or to the right, and right handed turns appear to", "not in [\"thick\", \"default\", \"invisible\"]: raise ReeborgError(\"Unrecognized style in set_trace_style().\") RUR._UR.set_trace_style_(self.body, style) def", "build_wall(): #py:build_wall \"\"\"Instructs Reeborg to build a wall at the location in front", "raise ReeborgError(\"narration is obsolete; use print_html().\") def say(): raise ReeborgError(\"say() is no longer", "\"\"\"Adds a valid Python expression (given as a string) to the watch list.", "#py:SI @property def world_map(self): #py:SI.world_map '''Returns a dict containing information about world. '''", "# The name \"Hello\" will be shown in the selector instead # of", "style in set_trace_style().\") RUR._UR.set_trace_style_(self.body, style) def take(self, obj=None): #py:UR.take \"\"\"Takes an object. If", "Do not tranlate the following def narration(html): raise ReeborgError(\"narration is obsolete; use print_html().\")", "right_is_clear(): #py:right_is_clear \"\"\"Indicates if an obstacle (wall, fence, water, etc.) is on the", "= \"facing West\" elif self.body._orientation == RUR.NORTH: facing = \"facing North\" elif self.body._orientation", "\"\"\"Intended primarily for world creators, this function is similar to print() except it", "None: return list(RUR._UR.carries_object_(self.body, obj)) else: return list(RUR._UR.carries_object_(self.body)) def front_is_clear(self): #py:UR.front_is_clear \"\"\"Indicates if an", "right of Reeborg. Returns: True if a wall is on Reeborg's right, False", "argument, otherwise an exception will be raised. \"\"\" if obj is None: RUR._take_()", "self.body = RUR._default_robot_body_() return Robot() def dir_js(obj): #py:dir_js \"\"\"Lists attributes and methods of", "WallCollisionError except: pass class SatelliteInfo(): #py:SI @property def world_map(self): #py:SI.world_map '''Returns a dict", "proper Python list def paint_square(color): #py:paint_square RUR._paint_square_(color) def pause(ms=None): #py:pause \"\"\"Pauses a program's", "shown is different than the one selected by using this function, the result", "Reeborg carries an object or not. Args: obj: optional parameter which is the", "an exception will be raised. \"\"\" if obj is None: RUR._put_() else: RUR._put_(obj)", "hexadecimal notation. Examples:: >>> reeborg = UsedRobot() >>> reeborg.set_trace_color(\"red\") >>> reeborg.set_trace_color(\"rgb(125, 0, 0)\")", "done all at once, if one only looks at the trace. \"\"\" if", "number between 0 and 3. \"\"\" RUR._UR.set_model_(self.body, model) def set_trace_color(self, color): #py:UR.set_trace_color \"\"\"Change", "the path is clear (not blocked), False otherwise. \"\"\" return RUR._front_is_clear_() def is_facing_north():", "argument, otherwise an exception will be raised. \"\"\" if obj is None: RUR._put_()", "style: \"thick\", \"invisible\" and \"default\" are the three possible arguments. \"invisible\" is equivalent", "return RUR._wall_on_right_() def MakeCustomMenu(content): #py:MakeCustomMenu \"\"\"Designed for use by educators. Makes it possible", "obj is None: RUR._take_() else: RUR._take_(obj) def think(ms): #py:think \"\"\"Set a time delay", "to the watch list. \"\"\" RUR.add_watch(expr) def dir_py(obj): #py:dir_py \"\"\"Lists attributes and methods", "of objects are present at Reeborg's location. Args: obj: optional parameter which is", "West\" elif self.body._orientation == RUR.NORTH: facing = \"facing North\" elif self.body._orientation == RUR.SOUTH:", "# the name http://reeborg.ca/my_world will be added to the selector >>> World(\"http://reeborg.ca/my_world\", \"Hello\")", "is facing North (top of the screen) or not.\"\"\" return RUR._UR.is_facing_north_(self.body) def move(self):", "program for Reeborg's World. \"\"\" # When generating documentation using sphinx, these modules", "and are considered to be private. \"\"\" # do not translate the name", "be raised. \"\"\" if obj is None: RUR._put_() else: RUR._put_(obj) def recording(bool): #py:recording", "following def narration(html): raise ReeborgError(\"narration is obsolete; use print_html().\") def say(): raise ReeborgError(\"say()", "educators. Makes it possible to create custom world menus. See the documentation for", "def done(): #py: message = \"You can not use done() for this task.\"", "reeborg.set_trace_color(\"rgba(125, 0, 0, 0.5)\") >>> reeborg.set_trace_color(\"#FF00FF\") \"\"\" RUR._UR.set_trace_color_(self.body, color) def set_trace_style(self, style): #py:UR.set_trace_style", "RUR._pause_(ms) def print_html(html, append=False): #py:print_html \"\"\"Intended primarily for world creators, this function is", "the form RUR._UR.xyz_; functions and methods should appear # alphabetically in this English", "object. If Reeborg carries more than one type of objects, the type must", "#py:at_goal \"\"\"Indicate if Reeborg has reached the desired location. Returns: True if Reeborg", "class UsedRobot(object): #py:UR def __init__(self, x=1, y=1, orientation='e', tokens=None): #py:UR.__init__ \"\"\"Creates a UsedRobot.", "\"invisible\"]: raise ReeborgError(\"Unrecognized style in set_trace_style().\") RUR._set_trace_style_(style) def sound(bool): #py:sound \"\"\"Activate or deactivate", "is run a second time. \"\"\" RUR._no_highlight_() def object_here(obj=None): #py:object_here \"\"\"Indicates whether any", "recording changes occuring in the world. Args: bool: True if recording is desired,", "do not translate the name of this function RUR._dir_js_(obj) def done(): #py:done \"\"\"Causes", "#py:set_max_nb_instructions \"\"\"Intended primarily for world creators, this function allows to change the default", "1\") # world included by default >>> World(\"http://reeborg.ca/my_world\") # fictitious example # the", "are still present, but they will not be if the program is run", "style is centered on the path followed, so that it is impossible to", "is None: RUR._UR.put_(self.body) else: RUR._UR.put_(self.body, obj) def right_is_clear(self): #py:UR.right_is_clear \"\"\"Indicates if an obstacle", "html selector, it will be added. Args: url: two possible choices: either a", "parameter which is the name of an object as a string. Returns: a", "time has elapsed. \"\"\" if ms is None: RUR._pause_() else: RUR._pause_(ms) def print_html(html,", "be included in a Python program for Reeborg's World. \"\"\" # When generating", "\"\"\" return RUR._front_is_clear_() def is_facing_north(): #py:is_facing_north \"\"\"Indicates if Reeborg is facing North (top", "present in the html selector, it will be added. Args: url: two possible", "obj=None): #py:UR.put \"\"\"Puts down an object. If Reeborg carries more than one type", "\"\"\"Reeborg turns to its left.\"\"\" RUR._turn_left_() def view_source_js(fn): #py:view_source_js \"\"\"Shows the source code", "#py:SI.print_world_map '''Prints a formatted copy of the world''' print(RUR.control.get_world_map()) #py:obsolete # Do not", "of this function RUR._dir_js_(obj) def done(): #py:done \"\"\"Causes a program's execution to end.\"\"\"", "\"\"\"Intended primarily for world creators, this function allows to set the maximum number", "facing = \"facing East\" elif self.body._orientation == RUR.WEST: facing = \"facing West\" elif", "result of running the program will simply be to change the world -", "is not already present in the html selector, it will be added. Args:", "otherwise. \"\"\" return RUR._right_is_clear_() def set_max_nb_instructions(nb): #py:set_max_nb_instructions \"\"\"Intended primarily for world creators, this", "\"north\", \"s\" or \"south\". tokens: Initial number of tokens to give to the", "rgb and rgba, and hexadecimal notation. Examples:: >>> reeborg = UsedRobot() >>> reeborg.set_trace_color(\"red\")", "\"\"\"Allow to select a specific world within a program. If the world currently", "move() except ReeborgError: # ignore a collision turn_left() \"\"\" def __init__(self, message): #py:RE.__init__", "the path is clear (not blocked), False otherwise. \"\"\" return RUR._UR.front_is_clear_(self.body) def in_the_bag(self):", "def recording(bool): #py:recording \"\"\"Stops or starts recording changes occuring in the world. Args:", "color of the trace (oil leak). Args: color (string): four formats are possible:", "RUR._front_is_clear_() def is_facing_north(): #py:is_facing_north \"\"\"Indicates if Reeborg is facing North (top of the", "def view_source_js(fn): #py:view_source_js \"\"\"Shows the source code of a Javascript function.\"\"\" RUR._view_source_js_(fn) def", "message def __str__(self): #py:RE.__str__ return repr(self.reeborg_shouts) try: window['ReeborgError'] = ReeborgError except: pass class", "facing = \"facing North\" elif self.body._orientation == RUR.SOUTH: facing = \"facing South\" if", "make use of html input. \"\"\" RUR._print_html_(html, append) window['print_html'] = print_html # No", "return Robot() def dir_js(obj): #py:dir_js \"\"\"Lists attributes and methods of a Javascript object.\"\"\"", "RUR._set_trace_color_(color) def set_trace_style(style=\"default\"): #py:set_trace_style \"\"\"Change the trace style of the robot. Args: style:", "list(ans) def clear_print(): #py:clear_print \"\"\"Erase all the text previously written using a call", "at_goal(): #py:at_goal \"\"\"Indicate if Reeborg has reached the desired location. Returns: True if", "in a user's program prior to execution. When disabling highlighting using this function,", "'inf': carries = \"carries an infinite number of tokens.\" else: carries = 'carries", "the source code of a Javascript function.\"\"\" RUR._view_source_js_(fn) def wall_in_front(): #py:wall_in_front \"\"\"Indicates if", ">>> reeborg.set_trace_color(\"rgba(125, 0, 0, 0.5)\") >>> reeborg.set_trace_color(\"#FF00FF\") \"\"\" RUR._UR.set_trace_color_(self.body, color) def set_trace_style(self, style):", "#py:UR.__str__ location = \"({}, {})\".format(self.body.x, self.body.y) if self.body._orientation == RUR.EAST: facing = \"facing", "by educators. Makes it possible to create custom world menus. See the documentation", "# world included by default >>> World(\"http://reeborg.ca/my_world\") # fictitious example # the name", "0, 0)\") >>> set_trace_color(\"rgba(125, 0, 0, 0.5)\") >>> set_trace_color(\"#FF00FF\") \"\"\" RUR._set_trace_color_(color) def set_trace_style(style=\"default\"):", "(time in milliseconds) is given, the execution automatically resumes after this time has", "function allows to set the maximum number of robots allowed in a given", "def World(url, shortname=None): #py:World \"\"\"Allow to select a specific world within a program.", "\"\"\" RUR._UR.set_trace_color_(self.body, color) def set_trace_style(self, style): #py:UR.set_trace_style \"\"\"Change the trace style of the", "a formatted copy of the world''' print(RUR.control.get_world_map()) #py:obsolete # Do not tranlate the", "the robot. More details will be provided soon. \"\"\" RUR._new_robot_images_(images) def no_highlight(): #py:no_highlight", "\"invisible\"]: raise ReeborgError(\"Unrecognized style in set_trace_style().\") RUR._UR.set_trace_style_(self.body, style) def take(self, obj=None): #py:UR.take \"\"\"Takes", "orientation) else: robot = RUR.robot.create_robot(x, y, orientation, tokens) self.body = robot RUR.world.add_robot(self.body) def", "at the location in front of itself. \"\"\" RUR._UR.build_wall_(self.body) def carries_object(self, obj=''): #py:UR.carries_object", "#py:python_specific def add_watch(expr): #py:add_watch \"\"\"Adds a valid Python expression (given as a string)", "an object. If Reeborg carries more than one type of objects, the type", "that it is impossible to distinguish between motion to the left or to", "import json return json.loads(RUR.control.get_world_map()) def print_world_map(self): #py:SI.print_world_map '''Prints a formatted copy of the", "an integer greater or equal to 1. y: vertical coordinate; an integer greater", "equal to 1. y: vertical coordinate; an integer greater or equal to 1.", "\"\"\"Move forward, by one grid position.\"\"\" RUR._move_() def new_robot_images(images): #py:new_robot_images \"\"\"Allow to replace", "#py:UR.turn_left \"\"\"Reeborg turns to its left.\"\"\" RUR._UR.turn_left_(self.body) def wall_in_front(self): #py:UR.wall_in_front \"\"\"Indicates if a", "style): #py:UR.set_trace_style \"\"\"Change the trace style of the robot. Args: style: \"thick\", \"invisible\"", "# do not translate the name of this function attrs = [] for", "number of tokens to give to the robot; its value must be a", "and exceptions that can be included in a Python program for Reeborg's World.", "so that it is impossible to distinguish between motion to the left or", "return RUR._front_is_clear_() def is_facing_north(): #py:is_facing_north \"\"\"Indicates if Reeborg is facing North (top of", "return list(ans) def clear_print(): #py:clear_print \"\"\"Erase all the text previously written using a", "return \"UsedRobot at {} {} {}.\".format(location, facing, carries) def at_goal(self): #py:UR.at_goal \"\"\"Indicate if", "value. \"\"\" RUR._set_max_nb_instructions_(nb) def set_max_nb_robots(nb): #py:set_max_nb_robots \"\"\"Intended primarily for world creators, this function", "RUR._UR.right_is_clear_(self.body) def set_model(self, model): #py:UR.set_model \"\"\"Select the model (images) for the robot. Args:", "[\"token\"] >>> reeborg.carries_object(\"banana\") [] \"\"\" if obj is not None: return list(RUR._UR.carries_object_(self.body, obj))", "0, 0, 0)\"), that is it sets the colour to a completely transparent", "of the full url \"\"\" if shortname is None: RUR._World_(url) else: RUR._World_(url, shortname)", "self.body = robot RUR.world.add_robot(self.body) def __str__(self): #py:UR.__str__ location = \"({}, {})\".format(self.body.x, self.body.y) if", "program prior to execution. When disabling highlighting using this function, the extra instructions", "this function is similar to print() except it can make use of html", "name of this function attrs = [] for attr in dir(obj): if attr.startswith(\"__\"):", "needed try: from browser import window RUR = window.RUR except: print(\"\\n --> Skipping", "more details. \"\"\" RUR._MakeCustomMenu_(content) def World(url, shortname=None): #py:World \"\"\"Allow to select a specific", "is None: RUR._take_() else: RUR._take_(obj) def think(ms): #py:think \"\"\"Set a time delay (in", "code highlighting from occurring. This function has a similar effect to clicking the", "when Reeborg hits a wall. \"\"\" pass try: window['WallCollisionError'] = WallCollisionError except: pass", "to change the default maximum number of instructions executed in a program (1000)", "specific to Reeborg's World. Is raised when Reeborg hits a wall. \"\"\" pass", "in_the_bag(): #py:in_the_bag return dict(RUR._in_the_bag_()) def move(): #py:move \"\"\"Move forward, by one grid position.\"\"\"", "#py:color_here return RUR._color_here_() def default_robot(): #py:default_robot \"\"\"Returns a recreated version of the default", "list of the type of objects found. If no object is present, or", "{}.\".format(location, facing, carries) def at_goal(self): #py:UR.at_goal \"\"\"Indicate if Reeborg has reached the desired", "Returns: a list of the type of objects carried by Reeborg. If Reeborg", "transparent value. The \"thick\" style is centered on the path followed, so that", "def narration(html): raise ReeborgError(\"narration is obsolete; use print_html().\") def say(): raise ReeborgError(\"say() is", "[\"token\", \"apple\"] >>> object_here(\"token\") [\"token\"] >>> object_here(\"banana\") [] \"\"\" if obj is not", "an exception will be raised. \"\"\" if obj is None: RUR._UR.take_(self.body) else: RUR._UR.take_(self.body,", "1. y: vertical coordinate; an integer greater or equal to 1. orientation (string):,", "model): #py:UR.set_model \"\"\"Select the model (images) for the robot. Args: model: a number", "world is not already present in the html selector, it will be added.", "def __init__(self, x=1, y=1, orientation='e', tokens=None): #py:UR.__init__ \"\"\"Creates a UsedRobot. Args: x: horizontal", "def no_highlight(): #py:no_highlight \"\"\"Prevents code highlighting from occurring. This function has a similar", "source code of a Javascript function.\"\"\" RUR._view_source_js_(fn) def wall_in_front(): #py:wall_in_front \"\"\"Indicates if a", "\"\"\"Allow to replace the images used for the robot. More details will be", "a collision turn_left() \"\"\" def __init__(self, message): #py:RE.__init__ self.reeborg_shouts = message def __str__(self):", "return RUR._UR.right_is_clear_(self.body) def set_model(self, model): #py:UR.set_model \"\"\"Select the model (images) for the robot.", "a dict containing information about world. ''' import json return json.loads(RUR.control.get_world_map()) def print_world_map(self):", "if style not in [\"thick\", \"default\", \"invisible\"]: raise ReeborgError(\"Unrecognized style in set_trace_style().\") RUR._set_trace_style_(style)", "#py:UR def __init__(self, x=1, y=1, orientation='e', tokens=None): #py:UR.__init__ \"\"\"Creates a UsedRobot. Args: x:", "#py:no_highlight \"\"\"Prevents code highlighting from occurring. This function has a similar effect to", "#py:add_watch \"\"\"Adds a valid Python expression (given as a string) to the watch", "used below should have names of the form # RUR._xyz_ and be defined", "an obstacle (wall, fence, water, etc.) blocks the path. Returns: True if the", "down an object. If Reeborg carries more than one type of objects, the", "trace (oil leak). Args: color (string): four formats are possible: named color, rgb", "occurs thanks to some extra code inserted in a user's program prior to", "an obstacle (wall, fence, water, etc.) is on the immediate right of Reeborg.", "rgb and rgba, and hexadecimal notation. Examples:: >>> set_trace_color(\"red\") >>> set_trace_color(\"rgb(125, 0, 0)\")", "equivalent to set_trace_color(\"rgba(0, 0, 0, 0)\"), that is it sets the colour to", "= print_html # No translation needed def put(obj=None): #py:put \"\"\"Puts down an object.", "#py:set_max_nb_robots \"\"\"Intended primarily for world creators, this function allows to set the maximum", "else: RUR._UR.take_(self.body, obj) def turn_left(self): #py:UR.turn_left \"\"\"Reeborg turns to its left.\"\"\" RUR._UR.turn_left_(self.body) def", "will simply be to change the world - the rest of the program", "False otherwise. \"\"\" return RUR._UR.right_is_clear_(self.body) def set_model(self, model): #py:UR.set_model \"\"\"Select the model (images)", "its goal, False otherwise. \"\"\" return RUR._at_goal_() def build_wall(): #py:build_wall \"\"\"Instructs Reeborg to", "def in_the_bag(self): #py:UR.in_the_bag return dict(RUR._UR.in_the_bag_(self.body)) def is_facing_north(self): #py:UR.is_facing_north \"\"\"Indicates if Reeborg is facing", "function, the extra instructions are still present, but they will not be if", "only looks at the trace. \"\"\" if style not in [\"thick\", \"default\", \"invisible\"]:", "robot. Args: style: \"thick\", \"invisible\" and \"default\" are the three possible arguments. \"invisible\"", "False otherwise. \"\"\" return RUR._UR.wall_in_front_(self.body) def wall_on_right(self): #py:UR.wall_on_right \"\"\"Indicates if an wall is", "world_map(self): #py:SI.world_map '''Returns a dict containing information about world. ''' import json return", "remove_robots(): #py:remove_robots \"\"\"Remove all robots found in the world.\"\"\" RUR._remove_robots_() def right_is_clear(): #py:right_is_clear", "raise ReeborgError(\"Unrecognized style in set_trace_style().\") RUR._UR.set_trace_style_(self.body, style) def take(self, obj=None): #py:UR.take \"\"\"Takes an", "creators, this function allows to set the maximum number of robots allowed in", "as an argument, otherwise an exception will be raised. \"\"\" if obj is", "reeborg.carries_object(\"token\") [\"token\"] >>> reeborg.carries_object(\"banana\") [] \"\"\" if obj is not None: return list(RUR._UR.carries_object_(self.body,", "[] \"\"\" if obj is not None: ans = RUR._object_here_(obj) else: ans =", "\"south\". tokens: Initial number of tokens to give to the robot; its value", "wall is on Reeborg's right, False otherwise. \"\"\" return RUR._wall_on_right_() def MakeCustomMenu(content): #py:MakeCustomMenu", "location = \"({}, {})\".format(self.body.x, self.body.y) if self.body._orientation == RUR.EAST: facing = \"facing East\"", "is clear (not blocked), False otherwise. \"\"\" return RUR._UR.front_is_clear_(self.body) def in_the_bag(self): #py:UR.in_the_bag return", "the documentation for more details. \"\"\" RUR._MakeCustomMenu_(content) def World(url, shortname=None): #py:World \"\"\"Allow to", "browser import window RUR = window.RUR except: print(\"\\n --> Skipping importing from browser", "Args: color (string): four formats are possible: named color, rgb and rgba, and", "'token' in self.body.objects: if self.body.objects['token'] == 'inf': carries = \"carries an infinite number", "of # the form RUR._UR.xyz_; functions and methods should appear # alphabetically in", "\"\"\"Indicate if Reeborg has reached the desired location. Returns: True if Reeborg has", "# convert from JS list-like object to proper Python list def paint_square(color): #py:paint_square", "as a string) to the watch list. \"\"\" RUR.add_watch(expr) def dir_py(obj): #py:dir_py \"\"\"Lists", "= message def __str__(self): #py:RE.__str__ return repr(self.reeborg_shouts) try: window['ReeborgError'] = ReeborgError except: pass", "possible choices: either a name appearing in the html selector, or a URL", "\"\"\"Instructs Reeborg to build a wall at the location in front of itself.", "RUR._World_(url, shortname) class UsedRobot(object): #py:UR def __init__(self, x=1, y=1, orientation='e', tokens=None): #py:UR.__init__ \"\"\"Creates", "otherwise. \"\"\" return RUR._front_is_clear_() def is_facing_north(): #py:is_facing_north \"\"\"Indicates if Reeborg is facing North", "import window RUR = window.RUR except: print(\"\\n --> Skipping importing from browser for", "contains functions, classes and exceptions that can be included in a Python program", "Args: x: horizontal coordinate; an integer greater or equal to 1. y: vertical", "dict(RUR._UR.in_the_bag_(self.body)) def is_facing_north(self): #py:UR.is_facing_north \"\"\"Indicates if Reeborg is facing North (top of the", "RUR._set_max_nb_instructions_(nb) def set_max_nb_robots(nb): #py:set_max_nb_robots \"\"\"Intended primarily for world creators, this function allows to", "the program will be ignored. If the desired world is already selected, this", "def clear_print(): #py:clear_print \"\"\"Erase all the text previously written using a call to", "return list(ans) # convert from JS list-like object to proper Python list def", "UsedRobot() >>> reeborg.object_here() [\"token\", \"apple\"] >>> reeborg.object_here(\"token\") [\"token\"] >>> reeborg.object_here(\"banana\") [] \"\"\" if", "# fictitious example # the name http://reeborg.ca/my_world will be added to the selector", "the selector >>> World(\"http://reeborg.ca/my_world\", \"Hello\") # The name \"Hello\" will be shown in", "\"s\" or \"south\". tokens: Initial number of tokens to give to the robot;", "Examples: >>> object_here() [\"token\", \"apple\"] >>> object_here(\"token\") [\"token\"] >>> object_here(\"banana\") [] \"\"\" if", "#py:UR.object_here \"\"\"Indicates whether any type of objects are present at Reeborg's location. Args:", "give to the robot; its value must be a positive integer, or the", "World. Examples:: def done(): #py: message = \"You can not use done() for", "is on the immediate right of Reeborg. Returns: True if an obstacle is", "running the program will simply be to change the world - the rest", "(not blocked), False otherwise. \"\"\" return RUR._UR.front_is_clear_(self.body) def in_the_bag(self): #py:UR.in_the_bag return dict(RUR._UR.in_the_bag_(self.body)) def", "no tokens' return \"UsedRobot at {} {} {}.\".format(location, facing, carries) def at_goal(self): #py:UR.at_goal", "return RUR._wall_in_front_() def wall_on_right(): #py:wall_on_right \"\"\"Indicates if an wall is on the immediate", "reached the desired location. Returns: True if Reeborg has reached its goal, False", "is None: RUR._UR.take_(self.body) else: RUR._UR.take_(self.body, obj) def turn_left(self): #py:UR.turn_left \"\"\"Reeborg turns to its", ">>> reeborg.object_here() [\"token\", \"apple\"] >>> reeborg.object_here(\"token\") [\"token\"] >>> reeborg.object_here(\"banana\") [] \"\"\" if obj", "the result is an empty list. Examples: >>> object_here() [\"token\", \"apple\"] >>> object_here(\"token\")", "else: RUR._pause_(ms) def print_html(html, append=False): #py:print_html \"\"\"Intended primarily for world creators, this function", "self.body._orientation == RUR.EAST: facing = \"facing East\" elif self.body._orientation == RUR.WEST: facing =", "object_here(obj=None): #py:object_here \"\"\"Indicates whether any type of objects are present at Reeborg's location.", "callable(getattr(obj, attr)): attr += \"()\" attrs.append(attr) print_html(str(\"\\n\".join(attrs)).replace(\"&\", \"&amp\").replace(\"<\", \"&lt;\" ).replace(\">\", \"&gt;\").replace(\"\\n\", \"<br>\")) class", "or \"north\", \"s\" or \"south\". tokens: Initial number of tokens to give to", "the following def narration(html): raise ReeborgError(\"narration is obsolete; use print_html().\") def say(): raise", "Reeborg's right, False otherwise. \"\"\" return RUR._right_is_clear_() def set_max_nb_instructions(nb): #py:set_max_nb_instructions \"\"\"Intended primarily for", "color) def set_trace_style(self, style): #py:UR.set_trace_style \"\"\"Change the trace style of the robot. Args:", "RUR._set_max_nb_robots_(nb) def set_trace_color(color): #py:set_trace_color \"\"\"Change the color of the trace (oil leak). Args:", "set_trace_style(style=\"default\"): #py:set_trace_style \"\"\"Change the trace style of the robot. Args: style: \"thick\", \"invisible\"", "on the immediate right of Reeborg. Returns: True if a wall is on", "RUR.robot.create_robot(x, y, orientation, tokens) self.body = robot RUR.world.add_robot(self.body) def __str__(self): #py:UR.__str__ location =", "reeborg.object_here(\"banana\") [] \"\"\" if obj is not None: return list(RUR._UR.object_here_(self.body, obj)) else: return", ">>> reeborg.set_trace_color(\"red\") >>> reeborg.set_trace_color(\"rgb(125, 0, 0)\") >>> reeborg.set_trace_color(\"rgba(125, 0, 0, 0.5)\") >>> reeborg.set_trace_color(\"#FF00FF\")", "it can make use of html input. \"\"\" RUR._print_html_(html, append) window['print_html'] = print_html", "tokens: Initial number of tokens to give to the robot; its value must", "to a world defined on some website. shortname: Optional parameter; if specified, this", "object. If more than one type of objects is at Reeborg's location, the", "# Do not tranlate the following def narration(html): raise ReeborgError(\"narration is obsolete; use", "of the form # RUR._xyz_ and be defined in commands.js and methods should", "the immediate right of Reeborg. Returns: True if an obstacle is on Reeborg's", "True if Reeborg has reached its goal, False otherwise. \"\"\" return RUR._UR.at_goal_(self.body) def", "object, excluding those whose name start with a double underscore and are considered", "for the robot. Args: model: a number between 0 and 3. \"\"\" RUR._UR.set_model_(self.body,", "by one grid position.\"\"\" RUR._UR.move_(self.body) def object_here(self, obj=None): #py:UR.object_here \"\"\"Indicates whether any type", "ans = RUR._carries_object_() return list(ans) def clear_print(): #py:clear_print \"\"\"Erase all the text previously", "in set_trace_style().\") RUR._UR.set_trace_style_(self.body, style) def take(self, obj=None): #py:UR.take \"\"\"Takes an object. If more", "the name shown in the html selector. Examples: >>> World(\"Home 1\") # world", "wall, False otherwise. \"\"\" return RUR._wall_in_front_() def wall_on_right(): #py:wall_on_right \"\"\"Indicates if an wall", "empty list. Examples: >>> reeborg = UsedRobot() >>> reeborg.carries_object() [\"token\", \"apple\"] >>> reeborg.carries_object(\"token\")", "of an object as a string. Returns: a list of the type of", "delay (in milliseconds) between Reeborg's actions played back. \"\"\" RUR._think_(ms) def turn_left(): #py:turn_left", "#py:object_here \"\"\"Indicates whether any type of objects are present at Reeborg's location. Args:", "the three possible arguments. \"invisible\" is equivalent to set_trace_color(\"rgba(0, 0, 0, 0)\"), that", "added. Args: url: two possible choices: either a name appearing in the html", "\"apple\"] >>> reeborg.carries_object(\"token\") [\"token\"] >>> reeborg.carries_object(\"banana\") [] \"\"\" if obj is not None:", "attrs.append(attr) print_html(str(\"\\n\".join(attrs)).replace(\"&\", \"&amp\").replace(\"<\", \"&lt;\" ).replace(\">\", \"&gt;\").replace(\"\\n\", \"<br>\")) class ReeborgError(Exception): #py:RE \"\"\"Exceptions specific to", "return list(RUR._UR.carries_object_(self.body)) def front_is_clear(self): #py:UR.front_is_clear \"\"\"Indicates if an obstacle (wall, fence, water, etc.)", "a valid Python expression (given as a string) to the watch list. \"\"\"", "it will be added. Args: url: two possible choices: either a name appearing", "ReeborgError(\"Unrecognized style in set_trace_style().\") RUR._UR.set_trace_style_(self.body, style) def take(self, obj=None): #py:UR.take \"\"\"Takes an object.", "with a double underscore and are considered to be private. \"\"\" # do", "def is_facing_north(self): #py:UR.is_facing_north \"\"\"Indicates if Reeborg is facing North (top of the screen)", "function RUR._dir_js_(obj) def done(): #py:done \"\"\"Causes a program's execution to end.\"\"\" RUR._done_() def", "return RUR._UR.at_goal_(self.body) def build_wall(self): #py:UR.build_wall \"\"\"Instructs Reeborg to build a wall at the", "[] \"\"\" if obj is not None: return list(RUR._UR.carries_object_(self.body, obj)) else: return list(RUR._UR.carries_object_(self.body))", "done() for this task.\" raise ReeborgError(message) #---- or ------ try: move() except ReeborgError:", "world. Args: bool: True if recording is desired, False otherwise. \"\"\" RUR._recording_(bool) def", "#py:RE.__str__ return repr(self.reeborg_shouts) try: window['ReeborgError'] = ReeborgError except: pass class WallCollisionError(ReeborgError): #py:WCE \"\"\"Exceptions", "clear_print(): #py:clear_print \"\"\"Erase all the text previously written using a call to print().\"\"\"", "allows to change the default maximum number of instructions executed in a program", "attr.startswith(\"__\"): continue if callable(getattr(obj, attr)): attr += \"()\" attrs.append(attr) print_html(str(\"\\n\".join(attrs)).replace(\"&\", \"&amp\").replace(\"<\", \"&lt;\" ).replace(\">\",", "url: two possible choices: either a name appearing in the html selector, or", "program (1000) by a different value. \"\"\" RUR._set_max_nb_instructions_(nb) def set_max_nb_robots(nb): #py:set_max_nb_robots \"\"\"Intended primarily", "except: print(\"\\n --> Skipping importing from browser for sphinx.\\n\") # All functions from", "if obj is not None: ans = RUR._carries_object_(obj) else: ans = RUR._carries_object_() return", "a list of the type of objects carried by Reeborg. If Reeborg carries", "Args: url: two possible choices: either a name appearing in the html selector,", "are possible: named color, rgb and rgba, and hexadecimal notation. Examples:: >>> reeborg", "\"\"\"Pauses a program's execution (playback). If an argument (time in milliseconds) is given,", "the world is not already present in the html selector, it will be", "input. \"\"\" RUR._print_html_(html, append) window['print_html'] = print_html # No translation needed def put(obj=None):", "carries more than one type of objects, the type must be specified as", "the world - the rest of the program will be ignored. If the", "robot RUR.world.add_robot(self.body) def __str__(self): #py:UR.__str__ location = \"({}, {})\".format(self.body.x, self.body.y) if self.body._orientation ==", "the name of this function attrs = [] for attr in dir(obj): if", "= RUR._carries_object_() return list(ans) def clear_print(): #py:clear_print \"\"\"Erase all the text previously written", "set the maximum number of robots allowed in a given world. \"\"\" RUR._set_max_nb_robots_(nb)", "otherwise. \"\"\" return RUR._at_goal_() def build_wall(): #py:build_wall \"\"\"Instructs Reeborg to build a wall", "previously written using a call to print().\"\"\" RUR._clear_print_() def color_here(): #py:color_here return RUR._color_here_()", "are both # unavailable and not needed try: from browser import window RUR", "True if the path blocked by a wall, False otherwise. \"\"\" return RUR._wall_in_front_()", "elif self.body._orientation == RUR.WEST: facing = \"facing West\" elif self.body._orientation == RUR.NORTH: facing", "None: return list(RUR._UR.object_here_(self.body, obj)) else: return list(RUR._UR.object_here_(self.body)) def put(self, obj=None): #py:UR.put \"\"\"Puts down", "recreated version of the default robot.\"\"\" class Robot(UsedRobot): def __init__(self): self.body = RUR._default_robot_body_()", "is_facing_north(): #py:is_facing_north \"\"\"Indicates if Reeborg is facing North (top of the screen) or", "recording(bool): #py:recording \"\"\"Stops or starts recording changes occuring in the world. Args: bool:", "the screen) or not.\"\"\" return RUR._is_facing_north_() def in_the_bag(): #py:in_the_bag return dict(RUR._in_the_bag_()) def move():", "integer greater or equal to 1. orientation (string):, one of \"e\" or \"east\",", "if specified, this will be the name shown in the html selector. Examples:", "def turn_left(self): #py:UR.turn_left \"\"\"Reeborg turns to its left.\"\"\" RUR._UR.turn_left_(self.body) def wall_in_front(self): #py:UR.wall_in_front \"\"\"Indicates", ">>> set_trace_color(\"#FF00FF\") \"\"\" RUR._set_trace_color_(color) def set_trace_style(style=\"default\"): #py:set_trace_style \"\"\"Change the trace style of the", "methods of a Python object, excluding those whose name start with a double", "turn_left() \"\"\" def __init__(self, message): #py:RE.__init__ self.reeborg_shouts = message def __str__(self): #py:RE.__str__ return", "the program is run a second time. \"\"\" RUR._no_highlight_() def object_here(obj=None): #py:object_here \"\"\"Indicates", "obstacle is on Reeborg's right, False otherwise. \"\"\" return RUR._UR.right_is_clear_(self.body) def set_model(self, model):", "RUR._sound_(bool) def take(obj=None): #py:take \"\"\"Takes an object. If more than one type of", "North\" elif self.body._orientation == RUR.SOUTH: facing = \"facing South\" if 'token' in self.body.objects:", "arguments. \"invisible\" is equivalent to set_trace_color(\"rgba(0, 0, 0, 0)\"), that is it sets", "\"facing North\" elif self.body._orientation == RUR.SOUTH: facing = \"facing South\" if 'token' in", "0)\"), that is it sets the colour to a completely transparent value. The", "this time has elapsed. \"\"\" if ms is None: RUR._pause_() else: RUR._pause_(ms) def", "not in [\"thick\", \"default\", \"invisible\"]: raise ReeborgError(\"Unrecognized style in set_trace_style().\") RUR._set_trace_style_(style) def sound(bool):", "narration(html): raise ReeborgError(\"narration is obsolete; use print_html().\") def say(): raise ReeborgError(\"say() is no", "all robots found in the world.\"\"\" RUR._remove_robots_() def right_is_clear(): #py:right_is_clear \"\"\"Indicates if an", "return list(RUR._UR.carries_object_(self.body, obj)) else: return list(RUR._UR.carries_object_(self.body)) def front_is_clear(self): #py:UR.front_is_clear \"\"\"Indicates if an obstacle", "\"\"\" if obj is not None: ans = RUR._object_here_(obj) else: ans = RUR._object_here_()", "True if the path is clear (not blocked), False otherwise. \"\"\" return RUR._UR.front_is_clear_(self.body)", "\"\"\" if obj is not None: return list(RUR._UR.carries_object_(self.body, obj)) else: return list(RUR._UR.carries_object_(self.body)) def", "put(self, obj=None): #py:UR.put \"\"\"Puts down an object. If Reeborg carries more than one", "def sound(bool): #py:sound \"\"\"Activate or deactivate sound effects.\"\"\" RUR._sound_(bool) def take(obj=None): #py:take \"\"\"Takes", "Python object, excluding those whose name start with a double underscore and are", "path followed, so that it is impossible to distinguish between motion to the", "color): #py:UR.set_trace_color \"\"\"Change the color of the trace (oil leak). Args: color (string):", "execution automatically resumes after this time has elapsed. \"\"\" if ms is None:", "to the selector >>> World(\"http://reeborg.ca/my_world\", \"Hello\") # The name \"Hello\" will be shown", "[\"token\"] >>> carries_object(\"banana\") [] \"\"\" if obj is not None: ans = RUR._carries_object_(obj)", "in [\"thick\", \"default\", \"invisible\"]: raise ReeborgError(\"Unrecognized style in set_trace_style().\") RUR._set_trace_style_(style) def sound(bool): #py:sound", "the path blocked by a wall, False otherwise. \"\"\" return RUR._wall_in_front_() def wall_on_right():", "#py:UR.take \"\"\"Takes an object. If more than one type of objects is at", "string \"inf\" to indicate an infinite quantity. \"\"\" if tokens is None: robot", "otherwise. \"\"\" return RUR._UR.wall_on_right_(self.body) #py:python_specific def add_watch(expr): #py:add_watch \"\"\"Adds a valid Python expression", "function, the result of running the program will simply be to change the", "be raised. \"\"\" if obj is None: RUR._UR.put_(self.body) else: RUR._UR.put_(self.body, obj) def right_is_clear(self):", "of Reeborg. Returns: True if an obstacle is on Reeborg's right, False otherwise.", "a program. If the world currently shown is different than the one selected", "#py:sound \"\"\"Activate or deactivate sound effects.\"\"\" RUR._sound_(bool) def take(obj=None): #py:take \"\"\"Takes an object.", "this task.\" raise ReeborgError(message) #---- or ------ try: move() except ReeborgError: # ignore", "#py:UR.front_is_clear \"\"\"Indicates if an obstacle (wall, fence, water, etc.) blocks the path. Returns:", "0, 0.5)\") >>> set_trace_color(\"#FF00FF\") \"\"\" RUR._set_trace_color_(color) def set_trace_style(style=\"default\"): #py:set_trace_style \"\"\"Change the trace style", "print_html(str(\"\\n\".join(attrs)).replace(\"&\", \"&amp\").replace(\"<\", \"&lt;\" ).replace(\">\", \"&gt;\").replace(\"\\n\", \"<br>\")) class ReeborgError(Exception): #py:RE \"\"\"Exceptions specific to Reeborg's", "an exception will be raised. \"\"\" if obj is None: RUR._UR.put_(self.body) else: RUR._UR.put_(self.body,", "the color of the trace (oil leak). Args: color (string): four formats are", "(string):, one of \"e\" or \"east\", \"w\" or \"west\", \"n\" or \"north\", \"s\"", "will be raised. \"\"\" if obj is None: RUR._take_() else: RUR._take_(obj) def think(ms):", "already present in the html selector, it will be added. Args: url: two", "\"east\", \"w\" or \"west\", \"n\" or \"north\", \"s\" or \"south\". tokens: Initial number", "object_here() [\"token\", \"apple\"] >>> object_here(\"token\") [\"token\"] >>> object_here(\"banana\") [] \"\"\" if obj is", "North (top of the screen) or not.\"\"\" return RUR._UR.is_facing_north_(self.body) def move(self): #py:UR.move \"\"\"Move", "appear # alphabetically in this English version, with the exception of Python-specific #", "0.5)\") >>> reeborg.set_trace_color(\"#FF00FF\") \"\"\" RUR._UR.set_trace_color_(self.body, color) def set_trace_style(self, style): #py:UR.set_trace_style \"\"\"Change the trace", "to 1. y: vertical coordinate; an integer greater or equal to 1. orientation", "has reached its goal, False otherwise. \"\"\" return RUR._UR.at_goal_(self.body) def build_wall(self): #py:UR.build_wall \"\"\"Instructs", "left.\"\"\" RUR._UR.turn_left_(self.body) def wall_in_front(self): #py:UR.wall_in_front \"\"\"Indicates if a wall blocks the way. Returns:", "view_source_js(fn): #py:view_source_js \"\"\"Shows the source code of a Javascript function.\"\"\" RUR._view_source_js_(fn) def wall_in_front():", "the html selector, or a URL (\"link\") to a world defined on some", "desired, False otherwise. \"\"\" RUR._recording_(bool) def remove_robots(): #py:remove_robots \"\"\"Remove all robots found in", "html input. \"\"\" RUR._print_html_(html, append) window['print_html'] = print_html # No translation needed def", "0, 0)\") >>> reeborg.set_trace_color(\"rgba(125, 0, 0, 0.5)\") >>> reeborg.set_trace_color(\"#FF00FF\") \"\"\" RUR._UR.set_trace_color_(self.body, color) def", "\"\"\" RUR._set_max_nb_instructions_(nb) def set_max_nb_robots(nb): #py:set_max_nb_robots \"\"\"Intended primarily for world creators, this function allows", "\"n\" or \"north\", \"s\" or \"south\". tokens: Initial number of tokens to give", "website. shortname: Optional parameter; if specified, this will be the name shown in", "= RUR._carries_object_(obj) else: ans = RUR._carries_object_() return list(ans) def clear_print(): #py:clear_print \"\"\"Erase all", "or ------ try: move() except ReeborgError: # ignore a collision turn_left() \"\"\" def", "no object is present, or if the specified object is not found, the", "more than one type of objects is at Reeborg's location, the type must", "[\"token\"] >>> object_here(\"banana\") [] \"\"\" if obj is not None: ans = RUR._object_here_(obj)", "to build a wall at the location in front of itself. \"\"\" RUR._UR.build_wall_(self.body)", "\"west\", \"n\" or \"north\", \"s\" or \"south\". tokens: Initial number of tokens to", "#py:recording \"\"\"Stops or starts recording changes occuring in the world. Args: bool: True", "else: ans = RUR._object_here_() return list(ans) # convert from JS list-like object to", ">>> carries_object(\"banana\") [] \"\"\" if obj is not None: ans = RUR._carries_object_(obj) else:", "\"<br>\")) class ReeborgError(Exception): #py:RE \"\"\"Exceptions specific to Reeborg's World. Examples:: def done(): #py:", "# All functions from Javascript used below should have names of the form", "number of tokens.\" else: carries = 'carries %s tokens' % self.body.objects['token'] else: carries", "if obj is None: RUR._take_() else: RUR._take_(obj) def think(ms): #py:think \"\"\"Set a time", "#py:wall_in_front \"\"\"Indicates if a wall blocks the way. Returns: True if the path", "the desired location. Returns: True if Reeborg has reached its goal, False otherwise.", "the name http://reeborg.ca/my_world will be added to the selector >>> World(\"http://reeborg.ca/my_world\", \"Hello\") #", "selected, this command is ignored and the rest of the program is executed.", "this function, the extra instructions are still present, but they will not be", "copy of the world''' print(RUR.control.get_world_map()) #py:obsolete # Do not tranlate the following def", "is the name of an object as a string. Returns: a list of", "on the path followed, so that it is impossible to distinguish between motion", "default_robot(): #py:default_robot \"\"\"Returns a recreated version of the default robot.\"\"\" class Robot(UsedRobot): def", "blocked by a wall, False otherwise. \"\"\" return RUR._wall_in_front_() def wall_on_right(): #py:wall_on_right \"\"\"Indicates", "\"\"\"Activate or deactivate sound effects.\"\"\" RUR._sound_(bool) def take(obj=None): #py:take \"\"\"Takes an object. If", "path blocked by a wall, False otherwise. \"\"\" return RUR._wall_in_front_() def wall_on_right(): #py:wall_on_right", "print_html # No translation needed def put(obj=None): #py:put \"\"\"Puts down an object. If", "True if a wall is on Reeborg's right, False otherwise. \"\"\" return RUR._UR.wall_on_right_(self.body)", "pass class WallCollisionError(ReeborgError): #py:WCE \"\"\"Exceptions specific to Reeborg's World. Is raised when Reeborg", "specified as an argument, otherwise an exception will be raised. \"\"\" if obj", "self.body.objects['token'] else: carries = 'carries no tokens' return \"UsedRobot at {} {} {}.\".format(location,", "else: RUR._World_(url, shortname) class UsedRobot(object): #py:UR def __init__(self, x=1, y=1, orientation='e', tokens=None): #py:UR.__init__", "which is the name of an object as a string. Returns: a list", "tokens to give to the robot; its value must be a positive integer,", "(1000) by a different value. \"\"\" RUR._set_max_nb_instructions_(nb) def set_max_nb_robots(nb): #py:set_max_nb_robots \"\"\"Intended primarily for", "methods should have names of # the form RUR._UR.xyz_; functions and methods should", "corresponding button in Reeborg's World. Code highlighting occurs thanks to some extra code", "a second time. \"\"\" RUR._no_highlight_() def object_here(obj=None): #py:object_here \"\"\"Indicates whether any type of", "Reeborg's right, False otherwise. \"\"\" return RUR._UR.wall_on_right_(self.body) #py:python_specific def add_watch(expr): #py:add_watch \"\"\"Adds a", "dict(RUR._in_the_bag_()) def move(): #py:move \"\"\"Move forward, by one grid position.\"\"\" RUR._move_() def new_robot_images(images):", "pass class SatelliteInfo(): #py:SI @property def world_map(self): #py:SI.world_map '''Returns a dict containing information", "obj=''): #py:UR.carries_object \"\"\"Indicates whether Reeborg carries an object or not. Args: obj: optional", "that should appear near the end. def at_goal(): #py:at_goal \"\"\"Indicate if Reeborg has", "def done(): #py:done \"\"\"Causes a program's execution to end.\"\"\" RUR._done_() def front_is_clear(): #py:front_is_clear", "world. ''' import json return json.loads(RUR.control.get_world_map()) def print_world_map(self): #py:SI.print_world_map '''Prints a formatted copy", "\"\"\"Exceptions specific to Reeborg's World. Is raised when Reeborg hits a wall. \"\"\"", "argument (time in milliseconds) is given, the execution automatically resumes after this time", "= RUR._object_here_() return list(ans) # convert from JS list-like object to proper Python", "should have names of the form # RUR._xyz_ and be defined in commands.js", "desired world is already selected, this command is ignored and the rest of", "@property def world_map(self): #py:SI.world_map '''Returns a dict containing information about world. ''' import", "RUR._new_robot_images_(images) def no_highlight(): #py:no_highlight \"\"\"Prevents code highlighting from occurring. This function has a", "the robot. Args: model: a number between 0 and 3. \"\"\" RUR._UR.set_model_(self.body, model)", "importing from browser for sphinx.\\n\") # All functions from Javascript used below should", "of the type of objects carried by Reeborg. If Reeborg carries no object,", "style in set_trace_style().\") RUR._set_trace_style_(style) def sound(bool): #py:sound \"\"\"Activate or deactivate sound effects.\"\"\" RUR._sound_(bool)", "provided soon. \"\"\" RUR._new_robot_images_(images) def no_highlight(): #py:no_highlight \"\"\"Prevents code highlighting from occurring. This", "UsedRobot() >>> reeborg.carries_object() [\"token\", \"apple\"] >>> reeborg.carries_object(\"token\") [\"token\"] >>> reeborg.carries_object(\"banana\") [] \"\"\" if", "(\"link\") to a world defined on some website. shortname: Optional parameter; if specified,", "the left or to the right, and right handed turns appear to be", "is None: RUR._put_() else: RUR._put_(obj) def recording(bool): #py:recording \"\"\"Stops or starts recording changes", "= \"facing North\" elif self.body._orientation == RUR.SOUTH: facing = \"facing South\" if 'token'", "def set_max_nb_instructions(nb): #py:set_max_nb_instructions \"\"\"Intended primarily for world creators, this function allows to change", "style of the robot. Args: style: \"thick\", \"invisible\" and \"default\" are the three", "type of objects found. If no object is present, or if the specified", "rgba, and hexadecimal notation. Examples:: >>> reeborg = UsedRobot() >>> reeborg.set_trace_color(\"red\") >>> reeborg.set_trace_color(\"rgb(125,", "fence, water, etc.) blocks the path. Returns: True if the path is clear", "def wall_on_right(self): #py:UR.wall_on_right \"\"\"Indicates if an wall is on the immediate right of", "ignored. If the desired world is already selected, this command is ignored and", "start with a double underscore and are considered to be private. \"\"\" #", "\"\"\" return RUR._UR.right_is_clear_(self.body) def set_model(self, model): #py:UR.set_model \"\"\"Select the model (images) for the", "Python list def paint_square(color): #py:paint_square RUR._paint_square_(color) def pause(ms=None): #py:pause \"\"\"Pauses a program's execution", "translate the name of this function attrs = [] for attr in dir(obj):", "if shortname is None: RUR._World_(url) else: RUR._World_(url, shortname) class UsedRobot(object): #py:UR def __init__(self,", "RUR.world.add_robot(self.body) def __str__(self): #py:UR.__str__ location = \"({}, {})\".format(self.body.x, self.body.y) if self.body._orientation == RUR.EAST:", "obstacle (wall, fence, water, etc.) blocks the path. Returns: True if the path", "is given, the execution automatically resumes after this time has elapsed. \"\"\" if", "world is already selected, this command is ignored and the rest of the", "be specified as an argument, otherwise an exception will be raised. \"\"\" if", "will be ignored. If the desired world is already selected, this command is", "the watch list. \"\"\" RUR.add_watch(expr) def dir_py(obj): #py:dir_py \"\"\"Lists attributes and methods of", "RUR._UR.set_trace_color_(self.body, color) def set_trace_style(self, style): #py:UR.set_trace_style \"\"\"Change the trace style of the robot.", "specific to Reeborg's World. Examples:: def done(): #py: message = \"You can not", "forward, by one grid position.\"\"\" RUR._UR.move_(self.body) def object_here(self, obj=None): #py:UR.object_here \"\"\"Indicates whether any", "# the form RUR._UR.xyz_; functions and methods should appear # alphabetically in this", "wall is on the immediate right of Reeborg. Returns: True if a wall", "command is ignored and the rest of the program is executed. If the", "whether any type of objects are present at Reeborg's location. Args: obj: optional", "else: carries = 'carries %s tokens' % self.body.objects['token'] else: carries = 'carries no", "specified one, the result is an empty list. Examples: >>> carries_object() [\"token\", \"apple\"]", "the selector instead # of the full url \"\"\" if shortname is None:", "def add_watch(expr): #py:add_watch \"\"\"Adds a valid Python expression (given as a string) to", "of the screen) or not.\"\"\" return RUR._UR.is_facing_north_(self.body) def move(self): #py:UR.move \"\"\"Move forward, by", "location. Args: obj: optional parameter which is the name of an object as", "be private. \"\"\" # do not translate the name of this function attrs", "a program's execution (playback). If an argument (time in milliseconds) is given, the", "\"default\" are the three possible arguments. \"invisible\" is equivalent to set_trace_color(\"rgba(0, 0, 0,", "desired location. Returns: True if Reeborg has reached its goal, False otherwise. \"\"\"", "tokens' % self.body.objects['token'] else: carries = 'carries no tokens' return \"UsedRobot at {}", "RUR._take_(obj) def think(ms): #py:think \"\"\"Set a time delay (in milliseconds) between Reeborg's actions", "Examples: >>> reeborg = UsedRobot() >>> reeborg.object_here() [\"token\", \"apple\"] >>> reeborg.object_here(\"token\") [\"token\"] >>>", "pass try: window['WallCollisionError'] = WallCollisionError except: pass class SatelliteInfo(): #py:SI @property def world_map(self):", "def world_map(self): #py:SI.world_map '''Returns a dict containing information about world. ''' import json", "empty list. Examples: >>> object_here() [\"token\", \"apple\"] >>> object_here(\"token\") [\"token\"] >>> object_here(\"banana\") []", "given, the execution automatically resumes after this time has elapsed. \"\"\" if ms", "that is it sets the colour to a completely transparent value. The \"thick\"", "is not found, the result is an empty list. Examples: >>> object_here() [\"token\",", "Skipping importing from browser for sphinx.\\n\") # All functions from Javascript used below", "\"\"\"Designed for use by educators. Makes it possible to create custom world menus.", "More details will be provided soon. \"\"\" RUR._new_robot_images_(images) def no_highlight(): #py:no_highlight \"\"\"Prevents code", "be added. Args: url: two possible choices: either a name appearing in the", "one, the result is an empty list. Examples: >>> reeborg = UsedRobot() >>>", "RUR._dir_js_(obj) def done(): #py:done \"\"\"Causes a program's execution to end.\"\"\" RUR._done_() def front_is_clear():", "% self.body.objects['token'] else: carries = 'carries no tokens' return \"UsedRobot at {} {}", "allowed in a given world. \"\"\" RUR._set_max_nb_robots_(nb) def set_trace_color(color): #py:set_trace_color \"\"\"Change the color", "elif self.body._orientation == RUR.SOUTH: facing = \"facing South\" if 'token' in self.body.objects: if", "empty list. Examples: >>> carries_object() [\"token\", \"apple\"] >>> carries_object(\"token\") [\"token\"] >>> carries_object(\"banana\") []", "between 0 and 3. \"\"\" RUR._UR.set_model_(self.body, model) def set_trace_color(self, color): #py:UR.set_trace_color \"\"\"Change the", "tokens=None): #py:UR.__init__ \"\"\"Creates a UsedRobot. Args: x: horizontal coordinate; an integer greater or", "y, orientation) else: robot = RUR.robot.create_robot(x, y, orientation, tokens) self.body = robot RUR.world.add_robot(self.body)", "__str__(self): #py:RE.__str__ return repr(self.reeborg_shouts) try: window['ReeborgError'] = ReeborgError except: pass class WallCollisionError(ReeborgError): #py:WCE", "message = \"You can not use done() for this task.\" raise ReeborgError(message) #----", "done(): #py:done \"\"\"Causes a program's execution to end.\"\"\" RUR._done_() def front_is_clear(): #py:front_is_clear \"\"\"Indicates", "#py:think \"\"\"Set a time delay (in milliseconds) between Reeborg's actions played back. \"\"\"", "an object. If more than one type of objects is at Reeborg's location,", "if the path is clear (not blocked), False otherwise. \"\"\" return RUR._UR.front_is_clear_(self.body) def", "\"w\" or \"west\", \"n\" or \"north\", \"s\" or \"south\". tokens: Initial number of", "in the selector instead # of the full url \"\"\" if shortname is", "new_robot_images(images): #py:new_robot_images \"\"\"Allow to replace the images used for the robot. More details", "\"\"\"Change the color of the trace (oil leak). Args: color (string): four formats", "shortname is None: RUR._World_(url) else: RUR._World_(url, shortname) class UsedRobot(object): #py:UR def __init__(self, x=1,", "[\"thick\", \"default\", \"invisible\"]: raise ReeborgError(\"Unrecognized style in set_trace_style().\") RUR._set_trace_style_(style) def sound(bool): #py:sound \"\"\"Activate", "%s tokens' % self.body.objects['token'] else: carries = 'carries no tokens' return \"UsedRobot at", "is None: RUR._pause_() else: RUR._pause_(ms) def print_html(html, append=False): #py:print_html \"\"\"Intended primarily for world", "this function RUR._dir_js_(obj) def done(): #py:done \"\"\"Causes a program's execution to end.\"\"\" RUR._done_()", ">>> object_here() [\"token\", \"apple\"] >>> object_here(\"token\") [\"token\"] >>> object_here(\"banana\") [] \"\"\" if obj", "def paint_square(color): #py:paint_square RUR._paint_square_(color) def pause(ms=None): #py:pause \"\"\"Pauses a program's execution (playback). If", "not.\"\"\" return RUR._is_facing_north_() def in_the_bag(): #py:in_the_bag return dict(RUR._in_the_bag_()) def move(): #py:move \"\"\"Move forward,", "result is an empty list. Examples: >>> reeborg = UsedRobot() >>> reeborg.carries_object() [\"token\",", "type of objects, the type must be specified as an argument, otherwise an", "RUR._UR.take_(self.body) else: RUR._UR.take_(self.body, obj) def turn_left(self): #py:UR.turn_left \"\"\"Reeborg turns to its left.\"\"\" RUR._UR.turn_left_(self.body)", "changes occuring in the world. Args: bool: True if recording is desired, False", "program. If the world currently shown is different than the one selected by", "if an obstacle (wall, fence, water, etc.) is on the immediate right of", "not found, the result is an empty list. Examples: >>> object_here() [\"token\", \"apple\"]", "primarily for world creators, this function is similar to print() except it can", "have names of the form # RUR._xyz_ and be defined in commands.js and", "second time. \"\"\" RUR._no_highlight_() def object_here(obj=None): #py:object_here \"\"\"Indicates whether any type of objects", "objects, the type must be specified as an argument, otherwise an exception will", "the world currently shown is different than the one selected by using this", "example # the name http://reeborg.ca/my_world will be added to the selector >>> World(\"http://reeborg.ca/my_world\",", "the images used for the robot. More details will be provided soon. \"\"\"", "RUR.add_watch(expr) def dir_py(obj): #py:dir_py \"\"\"Lists attributes and methods of a Python object, excluding", "raised. \"\"\" if obj is None: RUR._UR.take_(self.body) else: RUR._UR.take_(self.body, obj) def turn_left(self): #py:UR.turn_left", "exception will be raised. \"\"\" if obj is None: RUR._take_() else: RUR._take_(obj) def", "otherwise. \"\"\" return RUR._UR.front_is_clear_(self.body) def in_the_bag(self): #py:UR.in_the_bag return dict(RUR._UR.in_the_bag_(self.body)) def is_facing_north(self): #py:UR.is_facing_north \"\"\"Indicates", "ReeborgError except: pass class WallCollisionError(ReeborgError): #py:WCE \"\"\"Exceptions specific to Reeborg's World. Is raised", "carries no object, or not the specified one, the result is an empty", "extra instructions are still present, but they will not be if the program", "\"\"\" if obj is not None: ans = RUR._carries_object_(obj) else: ans = RUR._carries_object_()", "sets the colour to a completely transparent value. The \"thick\" style is centered", "a recreated version of the default robot.\"\"\" class Robot(UsedRobot): def __init__(self): self.body =", "def take(self, obj=None): #py:UR.take \"\"\"Takes an object. If more than one type of", "== RUR.WEST: facing = \"facing West\" elif self.body._orientation == RUR.NORTH: facing = \"facing", ">>> World(\"Home 1\") # world included by default >>> World(\"http://reeborg.ca/my_world\") # fictitious example", "move(): #py:move \"\"\"Move forward, by one grid position.\"\"\" RUR._move_() def new_robot_images(images): #py:new_robot_images \"\"\"Allow", "Reeborg is facing North (top of the screen) or not.\"\"\" return RUR._UR.is_facing_north_(self.body) def", "this command is ignored and the rest of the program is executed. If", "Args: model: a number between 0 and 3. \"\"\" RUR._UR.set_model_(self.body, model) def set_trace_color(self,", "add_watch(expr): #py:add_watch \"\"\"Adds a valid Python expression (given as a string) to the", "def MakeCustomMenu(content): #py:MakeCustomMenu \"\"\"Designed for use by educators. Makes it possible to create", "in the world. Args: bool: True if recording is desired, False otherwise. \"\"\"", "list(ans) # convert from JS list-like object to proper Python list def paint_square(color):", "an empty list. Examples: >>> reeborg = UsedRobot() >>> reeborg.carries_object() [\"token\", \"apple\"] >>>", "to its left.\"\"\" RUR._UR.turn_left_(self.body) def wall_in_front(self): #py:UR.wall_in_front \"\"\"Indicates if a wall blocks the", "this function attrs = [] for attr in dir(obj): if attr.startswith(\"__\"): continue if", "attributes and methods of a Javascript object.\"\"\" # do not translate the name", "#py:RE.__init__ self.reeborg_shouts = message def __str__(self): #py:RE.__str__ return repr(self.reeborg_shouts) try: window['ReeborgError'] = ReeborgError", "unavailable and not needed try: from browser import window RUR = window.RUR except:", "#py:turn_left \"\"\"Reeborg turns to its left.\"\"\" RUR._turn_left_() def view_source_js(fn): #py:view_source_js \"\"\"Shows the source", "and not needed try: from browser import window RUR = window.RUR except: print(\"\\n", "change the world - the rest of the program will be ignored. If", "obj) def right_is_clear(self): #py:UR.right_is_clear \"\"\"Indicates if an obstacle (wall, fence, water, etc.) is", "and \"default\" are the three possible arguments. \"invisible\" is equivalent to set_trace_color(\"rgba(0, 0,", "front_is_clear(): #py:front_is_clear \"\"\"Indicates if an obstacle (wall, fence, water, etc.) blocks the path.", "present, or if the specified object is not found, the result is an", "currently shown is different than the one selected by using this function, the", ">>> reeborg.carries_object(\"token\") [\"token\"] >>> reeborg.carries_object(\"banana\") [] \"\"\" if obj is not None: return", "at {} {} {}.\".format(location, facing, carries) def at_goal(self): #py:UR.at_goal \"\"\"Indicate if Reeborg has", "Returns: True if Reeborg has reached its goal, False otherwise. \"\"\" return RUR._at_goal_()", "to print() except it can make use of html input. \"\"\" RUR._print_html_(html, append)", "in the world.\"\"\" RUR._remove_robots_() def right_is_clear(): #py:right_is_clear \"\"\"Indicates if an obstacle (wall, fence,", "after this time has elapsed. \"\"\" if ms is None: RUR._pause_() else: RUR._pause_(ms)", "y, orientation, tokens) self.body = robot RUR.world.add_robot(self.body) def __str__(self): #py:UR.__str__ location = \"({},", "selector >>> World(\"http://reeborg.ca/my_world\", \"Hello\") # The name \"Hello\" will be shown in the", "way. Returns: True if the path blocked by a wall, False otherwise. \"\"\"", "to a completely transparent value. The \"thick\" style is centered on the path", "\"\"\" if obj is None: RUR._take_() else: RUR._take_(obj) def think(ms): #py:think \"\"\"Set a", "RUR._UR.front_is_clear_(self.body) def in_the_bag(self): #py:UR.in_the_bag return dict(RUR._UR.in_the_bag_(self.body)) def is_facing_north(self): #py:UR.is_facing_north \"\"\"Indicates if Reeborg is", "if recording is desired, False otherwise. \"\"\" RUR._recording_(bool) def remove_robots(): #py:remove_robots \"\"\"Remove all", "\"\"\"Set a time delay (in milliseconds) between Reeborg's actions played back. \"\"\" RUR._think_(ms)", "\"\"\" RUR._no_highlight_() def object_here(obj=None): #py:object_here \"\"\"Indicates whether any type of objects are present", "set_trace_color(\"red\") >>> set_trace_color(\"rgb(125, 0, 0)\") >>> set_trace_color(\"rgba(125, 0, 0, 0.5)\") >>> set_trace_color(\"#FF00FF\") \"\"\"", "notation. Examples:: >>> set_trace_color(\"red\") >>> set_trace_color(\"rgb(125, 0, 0)\") >>> set_trace_color(\"rgba(125, 0, 0, 0.5)\")", "the trace style of the robot. Args: style: \"thick\", \"invisible\" and \"default\" are", "turns to its left.\"\"\" RUR._turn_left_() def view_source_js(fn): #py:view_source_js \"\"\"Shows the source code of", "\"\"\" if tokens is None: robot = RUR.robot.create_robot(x, y, orientation) else: robot =", "wall_in_front(self): #py:UR.wall_in_front \"\"\"Indicates if a wall blocks the way. Returns: True if the", "if ms is None: RUR._pause_() else: RUR._pause_(ms) def print_html(html, append=False): #py:print_html \"\"\"Intended primarily", "def remove_robots(): #py:remove_robots \"\"\"Remove all robots found in the world.\"\"\" RUR._remove_robots_() def right_is_clear():", "the program is executed. If the world is not already present in the", "carries) def at_goal(self): #py:UR.at_goal \"\"\"Indicate if Reeborg has reached the desired location. Returns:", "of Python-specific # functions or classes that should appear near the end. def", "list-like object to proper Python list def paint_square(color): #py:paint_square RUR._paint_square_(color) def pause(ms=None): #py:pause", "disabling highlighting using this function, the extra instructions are still present, but they", "raised. \"\"\" if obj is None: RUR._put_() else: RUR._put_(obj) def recording(bool): #py:recording \"\"\"Stops", "if obj is not None: return list(RUR._UR.carries_object_(self.body, obj)) else: return list(RUR._UR.carries_object_(self.body)) def front_is_clear(self):", "has reached its goal, False otherwise. \"\"\" return RUR._at_goal_() def build_wall(): #py:build_wall \"\"\"Instructs", "RUR._UR.is_facing_north_(self.body) def move(self): #py:UR.move \"\"\"Move forward, by one grid position.\"\"\" RUR._UR.move_(self.body) def object_here(self,", "= \"You can not use done() for this task.\" raise ReeborgError(message) #---- or", "take(obj=None): #py:take \"\"\"Takes an object. If more than one type of objects is", "end.\"\"\" RUR._done_() def front_is_clear(): #py:front_is_clear \"\"\"Indicates if an obstacle (wall, fence, water, etc.)", "deactivate sound effects.\"\"\" RUR._sound_(bool) def take(obj=None): #py:take \"\"\"Takes an object. If more than", "commands.js and methods should have names of # the form RUR._UR.xyz_; functions and", "#py:UR.set_model \"\"\"Select the model (images) for the robot. Args: model: a number between", "if one only looks at the trace. \"\"\" if style not in [\"thick\",", "an empty list. Examples: >>> carries_object() [\"token\", \"apple\"] >>> carries_object(\"token\") [\"token\"] >>> carries_object(\"banana\")", "is equivalent to set_trace_color(\"rgba(0, 0, 0, 0)\"), that is it sets the colour", "result is an empty list. Examples: >>> object_here() [\"token\", \"apple\"] >>> object_here(\"token\") [\"token\"]", "def at_goal(): #py:at_goal \"\"\"Indicate if Reeborg has reached the desired location. Returns: True", ">>> set_trace_color(\"rgb(125, 0, 0)\") >>> set_trace_color(\"rgba(125, 0, 0, 0.5)\") >>> set_trace_color(\"#FF00FF\") \"\"\" RUR._set_trace_color_(color)", "0)\") >>> set_trace_color(\"rgba(125, 0, 0, 0.5)\") >>> set_trace_color(\"#FF00FF\") \"\"\" RUR._set_trace_color_(color) def set_trace_style(style=\"default\"): #py:set_trace_style", "list. Examples: >>> carries_object() [\"token\", \"apple\"] >>> carries_object(\"token\") [\"token\"] >>> carries_object(\"banana\") [] \"\"\"", "names of the form # RUR._xyz_ and be defined in commands.js and methods", "RUR._World_(url) else: RUR._World_(url, shortname) class UsedRobot(object): #py:UR def __init__(self, x=1, y=1, orientation='e', tokens=None):", "False otherwise. \"\"\" return RUR._at_goal_() def build_wall(): #py:build_wall \"\"\"Instructs Reeborg to build a", "thanks to some extra code inserted in a user's program prior to execution.", "RUR._object_here_(obj) else: ans = RUR._object_here_() return list(ans) # convert from JS list-like object", "world creators, this function allows to set the maximum number of robots allowed", "self.body._orientation == RUR.WEST: facing = \"facing West\" elif self.body._orientation == RUR.NORTH: facing =", "empty list. Examples: >>> reeborg = UsedRobot() >>> reeborg.object_here() [\"token\", \"apple\"] >>> reeborg.object_here(\"token\")", "all at once, if one only looks at the trace. \"\"\" if style", "#py:UR.put \"\"\"Puts down an object. If Reeborg carries more than one type of", "return RUR._color_here_() def default_robot(): #py:default_robot \"\"\"Returns a recreated version of the default robot.\"\"\"", "any type of objects are present at Reeborg's location. Args: obj: optional parameter", "def set_trace_style(style=\"default\"): #py:set_trace_style \"\"\"Change the trace style of the robot. Args: style: \"thick\",", "window.RUR except: print(\"\\n --> Skipping importing from browser for sphinx.\\n\") # All functions", "return RUR._UR.wall_on_right_(self.body) #py:python_specific def add_watch(expr): #py:add_watch \"\"\"Adds a valid Python expression (given as", "Javascript function.\"\"\" RUR._view_source_js_(fn) def wall_in_front(): #py:wall_in_front \"\"\"Indicates if a wall blocks the way.", "East\" elif self.body._orientation == RUR.WEST: facing = \"facing West\" elif self.body._orientation == RUR.NORTH:", "a user's program prior to execution. When disabling highlighting using this function, the", "#py:WCE \"\"\"Exceptions specific to Reeborg's World. Is raised when Reeborg hits a wall.", "carries_object(obj=None): #py:carries_object \"\"\"Indicates whether Reeborg carries an object or not. Args: obj: optional", "by a wall, False otherwise. \"\"\" return RUR._wall_in_front_() def wall_on_right(): #py:wall_on_right \"\"\"Indicates if", "= WallCollisionError except: pass class SatelliteInfo(): #py:SI @property def world_map(self): #py:SI.world_map '''Returns a", "selector, it will be added. Args: url: two possible choices: either a name", "the specified object is not found, the result is an empty list. Examples:", "its value must be a positive integer, or the string \"inf\" to indicate", "of Reeborg. Returns: True if a wall is on Reeborg's right, False otherwise.", "than the one selected by using this function, the result of running the", "try: move() except ReeborgError: # ignore a collision turn_left() \"\"\" def __init__(self, message):", "build a wall at the location in front of itself.\"\"\" RUR._build_wall_() def carries_object(obj=None):", "English version, with the exception of Python-specific # functions or classes that should", "Reeborg carries no object, or not the specified one, the result is an", "dir_py(obj): #py:dir_py \"\"\"Lists attributes and methods of a Python object, excluding those whose", "by one grid position.\"\"\" RUR._move_() def new_robot_images(images): #py:new_robot_images \"\"\"Allow to replace the images", "greater or equal to 1. y: vertical coordinate; an integer greater or equal", "#py:remove_robots \"\"\"Remove all robots found in the world.\"\"\" RUR._remove_robots_() def right_is_clear(): #py:right_is_clear \"\"\"Indicates", "object_here(\"token\") [\"token\"] >>> object_here(\"banana\") [] \"\"\" if obj is not None: ans =", "if Reeborg is facing North (top of the screen) or not.\"\"\" return RUR._UR.is_facing_north_(self.body)", "extra code inserted in a user's program prior to execution. When disabling highlighting", "specified object is not found, the result is an empty list. Examples: >>>", "(playback). If an argument (time in milliseconds) is given, the execution automatically resumes", "or not. Args: obj: optional parameter which is the name of an object", "parameter; if specified, this will be the name shown in the html selector.", "reeborg.set_trace_color(\"rgb(125, 0, 0)\") >>> reeborg.set_trace_color(\"rgba(125, 0, 0, 0.5)\") >>> reeborg.set_trace_color(\"#FF00FF\") \"\"\" RUR._UR.set_trace_color_(self.body, color)", "Reeborg's World. Examples:: def done(): #py: message = \"You can not use done()", "json return json.loads(RUR.control.get_world_map()) def print_world_map(self): #py:SI.print_world_map '''Prints a formatted copy of the world'''", "a wall. \"\"\" pass try: window['WallCollisionError'] = WallCollisionError except: pass class SatelliteInfo(): #py:SI", "path is clear (not blocked), False otherwise. \"\"\" return RUR._front_is_clear_() def is_facing_north(): #py:is_facing_north", "use print_html().\") def say(): raise ReeborgError(\"say() is no longer supported; use print() instead.\")", "its left.\"\"\" RUR._UR.turn_left_(self.body) def wall_in_front(self): #py:UR.wall_in_front \"\"\"Indicates if a wall blocks the way.", "Reeborg is facing North (top of the screen) or not.\"\"\" return RUR._is_facing_north_() def", "dict containing information about world. ''' import json return json.loads(RUR.control.get_world_map()) def print_world_map(self): #py:SI.print_world_map", "position.\"\"\" RUR._move_() def new_robot_images(images): #py:new_robot_images \"\"\"Allow to replace the images used for the", "\"\"\"Erase all the text previously written using a call to print().\"\"\" RUR._clear_print_() def", "Examples: >>> carries_object() [\"token\", \"apple\"] >>> carries_object(\"token\") [\"token\"] >>> carries_object(\"banana\") [] \"\"\" if", "carries = 'carries no tokens' return \"UsedRobot at {} {} {}.\".format(location, facing, carries)", "a completely transparent value. The \"thick\" style is centered on the path followed,", "of a Javascript object.\"\"\" # do not translate the name of this function", "of the screen) or not.\"\"\" return RUR._is_facing_north_() def in_the_bag(): #py:in_the_bag return dict(RUR._in_the_bag_()) def", "reeborg.object_here(\"token\") [\"token\"] >>> reeborg.object_here(\"banana\") [] \"\"\" if obj is not None: return list(RUR._UR.object_here_(self.body,", "return list(RUR._UR.object_here_(self.body, obj)) else: return list(RUR._UR.object_here_(self.body)) def put(self, obj=None): #py:UR.put \"\"\"Puts down an", "ReeborgError(Exception): #py:RE \"\"\"Exceptions specific to Reeborg's World. Examples:: def done(): #py: message =", "#py:dir_py \"\"\"Lists attributes and methods of a Python object, excluding those whose name", "be shown in the selector instead # of the full url \"\"\" if", "set_trace_style().\") RUR._UR.set_trace_style_(self.body, style) def take(self, obj=None): #py:UR.take \"\"\"Takes an object. If more than", "# functions or classes that should appear near the end. def at_goal(): #py:at_goal", "be raised. \"\"\" if obj is None: RUR._take_() else: RUR._take_(obj) def think(ms): #py:think", "path blocked by a wall, False otherwise. \"\"\" return RUR._UR.wall_in_front_(self.body) def wall_on_right(self): #py:UR.wall_on_right", "on the immediate right of Reeborg. Returns: True if an obstacle is on", "one grid position.\"\"\" RUR._UR.move_(self.body) def object_here(self, obj=None): #py:UR.object_here \"\"\"Indicates whether any type of", "(string): four formats are possible: named color, rgb and rgba, and hexadecimal notation.", "#py:UR.set_trace_style \"\"\"Change the trace style of the robot. Args: style: \"thick\", \"invisible\" and", "otherwise. \"\"\" return RUR._UR.right_is_clear_(self.body) def set_model(self, model): #py:UR.set_model \"\"\"Select the model (images) for", "found. If no object is present, or if the specified object is not", "form # RUR._xyz_ and be defined in commands.js and methods should have names", "recording is desired, False otherwise. \"\"\" RUR._recording_(bool) def remove_robots(): #py:remove_robots \"\"\"Remove all robots", "the type of objects found. If no object is present, or if the", "Returns: True if Reeborg has reached its goal, False otherwise. \"\"\" return RUR._UR.at_goal_(self.body)", ").replace(\">\", \"&gt;\").replace(\"\\n\", \"<br>\")) class ReeborgError(Exception): #py:RE \"\"\"Exceptions specific to Reeborg's World. Examples:: def", "(wall, fence, water, etc.) is on the immediate right of Reeborg. Returns: True", "html selector, or a URL (\"link\") to a world defined on some website.", "RUR._pause_() else: RUR._pause_(ms) def print_html(html, append=False): #py:print_html \"\"\"Intended primarily for world creators, this", "self.body._orientation == RUR.NORTH: facing = \"facing North\" elif self.body._orientation == RUR.SOUTH: facing =", "a URL (\"link\") to a world defined on some website. shortname: Optional parameter;", "obj)) else: return list(RUR._UR.object_here_(self.body)) def put(self, obj=None): #py:UR.put \"\"\"Puts down an object. If", "argument, otherwise an exception will be raised. \"\"\" if obj is None: RUR._UR.put_(self.body)", "object is not found, the result is an empty list. Examples: >>> object_here()", "if an wall is on the immediate right of Reeborg. Returns: True if", "Args: bool: True if recording is desired, False otherwise. \"\"\" RUR._recording_(bool) def remove_robots():", "from occurring. This function has a similar effect to clicking the corresponding button", "Code highlighting occurs thanks to some extra code inserted in a user's program", "is executed. If the world is not already present in the html selector,", "wall, False otherwise. \"\"\" return RUR._UR.wall_in_front_(self.body) def wall_on_right(self): #py:UR.wall_on_right \"\"\"Indicates if an wall", "named color, rgb and rgba, and hexadecimal notation. Examples:: >>> set_trace_color(\"red\") >>> set_trace_color(\"rgb(125,", "#py:UR.move \"\"\"Move forward, by one grid position.\"\"\" RUR._UR.move_(self.body) def object_here(self, obj=None): #py:UR.object_here \"\"\"Indicates", "if an obstacle is on Reeborg's right, False otherwise. \"\"\" return RUR._right_is_clear_() def", "a wall blocks the way. Returns: True if the path blocked by a", "possible: named color, rgb and rgba, and hexadecimal notation. Examples:: >>> reeborg =", "''' import json return json.loads(RUR.control.get_world_map()) def print_world_map(self): #py:SI.print_world_map '''Prints a formatted copy of", "at once, if one only looks at the trace. \"\"\" if style not", "def set_trace_color(self, color): #py:UR.set_trace_color \"\"\"Change the color of the trace (oil leak). Args:", "is on Reeborg's right, False otherwise. \"\"\" return RUR._UR.right_is_clear_(self.body) def set_model(self, model): #py:UR.set_model", "Reeborg's right, False otherwise. \"\"\" return RUR._wall_on_right_() def MakeCustomMenu(content): #py:MakeCustomMenu \"\"\"Designed for use", "RUR.SOUTH: facing = \"facing South\" if 'token' in self.body.objects: if self.body.objects['token'] == 'inf':", "list. \"\"\" RUR.add_watch(expr) def dir_py(obj): #py:dir_py \"\"\"Lists attributes and methods of a Python", "dir_js(obj): #py:dir_js \"\"\"Lists attributes and methods of a Javascript object.\"\"\" # do not", "version of the default robot.\"\"\" class Robot(UsedRobot): def __init__(self): self.body = RUR._default_robot_body_() return", "of running the program will simply be to change the world - the", "RUR._wall_on_right_() def MakeCustomMenu(content): #py:MakeCustomMenu \"\"\"Designed for use by educators. Makes it possible to", "in milliseconds) is given, the execution automatically resumes after this time has elapsed.", "should appear near the end. def at_goal(): #py:at_goal \"\"\"Indicate if Reeborg has reached", "indicate an infinite quantity. \"\"\" if tokens is None: robot = RUR.robot.create_robot(x, y,", "result is an empty list. Examples: >>> reeborg = UsedRobot() >>> reeborg.object_here() [\"token\",", "functions and methods should appear # alphabetically in this English version, with the", "the location in front of itself. \"\"\" RUR._UR.build_wall_(self.body) def carries_object(self, obj=''): #py:UR.carries_object \"\"\"Indicates", "else: robot = RUR.robot.create_robot(x, y, orientation, tokens) self.body = robot RUR.world.add_robot(self.body) def __str__(self):", "facing = \"facing West\" elif self.body._orientation == RUR.NORTH: facing = \"facing North\" elif", "similar effect to clicking the corresponding button in Reeborg's World. Code highlighting occurs", "RUR._UR.set_trace_style_(self.body, style) def take(self, obj=None): #py:UR.take \"\"\"Takes an object. If more than one", "a wall at the location in front of itself.\"\"\" RUR._build_wall_() def carries_object(obj=None): #py:carries_object", "name \"Hello\" will be shown in the selector instead # of the full", "is_facing_north(self): #py:UR.is_facing_north \"\"\"Indicates if Reeborg is facing North (top of the screen) or", "of the world''' print(RUR.control.get_world_map()) #py:obsolete # Do not tranlate the following def narration(html):", "function has a similar effect to clicking the corresponding button in Reeborg's World.", "else: return list(RUR._UR.object_here_(self.body)) def put(self, obj=None): #py:UR.put \"\"\"Puts down an object. If Reeborg", "as a string. Returns: a list of the type of objects carried by", "(top of the screen) or not.\"\"\" return RUR._is_facing_north_() def in_the_bag(): #py:in_the_bag return dict(RUR._in_the_bag_())", "not be if the program is run a second time. \"\"\" RUR._no_highlight_() def", "its left.\"\"\" RUR._turn_left_() def view_source_js(fn): #py:view_source_js \"\"\"Shows the source code of a Javascript", "if an obstacle is on Reeborg's right, False otherwise. \"\"\" return RUR._UR.right_is_clear_(self.body) def", "a string. Returns: a list of the type of objects carried by Reeborg.", "be if the program is run a second time. \"\"\" RUR._no_highlight_() def object_here(obj=None):", "integer, or the string \"inf\" to indicate an infinite quantity. \"\"\" if tokens", "class ReeborgError(Exception): #py:RE \"\"\"Exceptions specific to Reeborg's World. Examples:: def done(): #py: message", "or if the specified object is not found, the result is an empty", "is facing North (top of the screen) or not.\"\"\" return RUR._is_facing_north_() def in_the_bag():", "#py:move \"\"\"Move forward, by one grid position.\"\"\" RUR._move_() def new_robot_images(images): #py:new_robot_images \"\"\"Allow to", "use done() for this task.\" raise ReeborgError(message) #---- or ------ try: move() except", "\"e\" or \"east\", \"w\" or \"west\", \"n\" or \"north\", \"s\" or \"south\". tokens:", "be to change the world - the rest of the program will be", "is not None: ans = RUR._object_here_(obj) else: ans = RUR._object_here_() return list(ans) #", "World(url, shortname=None): #py:World \"\"\"Allow to select a specific world within a program. If", "RUR._no_highlight_() def object_here(obj=None): #py:object_here \"\"\"Indicates whether any type of objects are present at", "function allows to change the default maximum number of instructions executed in a", "and the rest of the program is executed. If the world is not", "carries_object(\"banana\") [] \"\"\" if obj is not None: ans = RUR._carries_object_(obj) else: ans", "color, rgb and rgba, and hexadecimal notation. Examples:: >>> set_trace_color(\"red\") >>> set_trace_color(\"rgb(125, 0,", "handed turns appear to be done all at once, if one only looks", "object is not found, the result is an empty list. Examples: >>> reeborg", "is an empty list. Examples: >>> reeborg = UsedRobot() >>> reeborg.carries_object() [\"token\", \"apple\"]", "if the path blocked by a wall, False otherwise. \"\"\" return RUR._UR.wall_in_front_(self.body) def", "of the robot. Args: style: \"thick\", \"invisible\" and \"default\" are the three possible", "name of this function RUR._dir_js_(obj) def done(): #py:done \"\"\"Causes a program's execution to", "prior to execution. When disabling highlighting using this function, the extra instructions are", "window RUR = window.RUR except: print(\"\\n --> Skipping importing from browser for sphinx.\\n\")", "back. \"\"\" RUR._think_(ms) def turn_left(): #py:turn_left \"\"\"Reeborg turns to its left.\"\"\" RUR._turn_left_() def", "\"\"\"Creates a UsedRobot. Args: x: horizontal coordinate; an integer greater or equal to", "if tokens is None: robot = RUR.robot.create_robot(x, y, orientation) else: robot = RUR.robot.create_robot(x,", "create custom world menus. See the documentation for more details. \"\"\" RUR._MakeCustomMenu_(content) def", "in a Python program for Reeborg's World. \"\"\" # When generating documentation using", "list. Examples: >>> object_here() [\"token\", \"apple\"] >>> object_here(\"token\") [\"token\"] >>> object_here(\"banana\") [] \"\"\"", "paint_square(color): #py:paint_square RUR._paint_square_(color) def pause(ms=None): #py:pause \"\"\"Pauses a program's execution (playback). If an", "fence, water, etc.) is on the immediate right of Reeborg. Returns: True if", "actions played back. \"\"\" RUR._think_(ms) def turn_left(): #py:turn_left \"\"\"Reeborg turns to its left.\"\"\"", "appear to be done all at once, if one only looks at the", "an argument (time in milliseconds) is given, the execution automatically resumes after this", "RUR._at_goal_() def build_wall(): #py:build_wall \"\"\"Instructs Reeborg to build a wall at the location", "than one type of objects, the type must be specified as an argument,", "of instructions executed in a program (1000) by a different value. \"\"\" RUR._set_max_nb_instructions_(nb)", "def wall_on_right(): #py:wall_on_right \"\"\"Indicates if an wall is on the immediate right of", "if the path is clear (not blocked), False otherwise. \"\"\" return RUR._front_is_clear_() def", "formatted copy of the world''' print(RUR.control.get_world_map()) #py:obsolete # Do not tranlate the following", "of objects, the type must be specified as an argument, otherwise an exception", "the default maximum number of instructions executed in a program (1000) by a", "else: ans = RUR._carries_object_() return list(ans) def clear_print(): #py:clear_print \"\"\"Erase all the text", "def is_facing_north(): #py:is_facing_north \"\"\"Indicates if Reeborg is facing North (top of the screen)", "False otherwise. \"\"\" return RUR._wall_on_right_() def MakeCustomMenu(content): #py:MakeCustomMenu \"\"\"Designed for use by educators.", "The \"thick\" style is centered on the path followed, so that it is", "return RUR._is_facing_north_() def in_the_bag(): #py:in_the_bag return dict(RUR._in_the_bag_()) def move(): #py:move \"\"\"Move forward, by", "and 3. \"\"\" RUR._UR.set_model_(self.body, model) def set_trace_color(self, color): #py:UR.set_trace_color \"\"\"Change the color of", "of the program will be ignored. If the desired world is already selected,", "an exception will be raised. \"\"\" if obj is None: RUR._take_() else: RUR._take_(obj)", "obsolete; use print_html().\") def say(): raise ReeborgError(\"say() is no longer supported; use print()", "object_here(self, obj=None): #py:UR.object_here \"\"\"Indicates whether any type of objects are present at Reeborg's", "the robot. Args: style: \"thick\", \"invisible\" and \"default\" are the three possible arguments.", "watch list. \"\"\" RUR.add_watch(expr) def dir_py(obj): #py:dir_py \"\"\"Lists attributes and methods of a", "effect to clicking the corresponding button in Reeborg's World. Code highlighting occurs thanks", ">>> reeborg = UsedRobot() >>> reeborg.carries_object() [\"token\", \"apple\"] >>> reeborg.carries_object(\"token\") [\"token\"] >>> reeborg.carries_object(\"banana\")", "position.\"\"\" RUR._UR.move_(self.body) def object_here(self, obj=None): #py:UR.object_here \"\"\"Indicates whether any type of objects are", "will be provided soon. \"\"\" RUR._new_robot_images_(images) def no_highlight(): #py:no_highlight \"\"\"Prevents code highlighting from", "= \"carries an infinite number of tokens.\" else: carries = 'carries %s tokens'", "bool: True if recording is desired, False otherwise. \"\"\" RUR._recording_(bool) def remove_robots(): #py:remove_robots", "robot; its value must be a positive integer, or the string \"inf\" to", "If the desired world is already selected, this command is ignored and the", ">>> World(\"http://reeborg.ca/my_world\", \"Hello\") # The name \"Hello\" will be shown in the selector", "and be defined in commands.js and methods should have names of # the", "to clicking the corresponding button in Reeborg's World. Code highlighting occurs thanks to", "turns appear to be done all at once, if one only looks at", "instructions are still present, but they will not be if the program is", "def wall_in_front(): #py:wall_in_front \"\"\"Indicates if a wall blocks the way. Returns: True if", "underscore and are considered to be private. \"\"\" # do not translate the", "maximum number of instructions executed in a program (1000) by a different value.", "RUR._move_() def new_robot_images(images): #py:new_robot_images \"\"\"Allow to replace the images used for the robot.", "in this English version, with the exception of Python-specific # functions or classes", "tokens is None: robot = RUR.robot.create_robot(x, y, orientation) else: robot = RUR.robot.create_robot(x, y,", "the rest of the program is executed. If the world is not already", ">>> reeborg.object_here(\"banana\") [] \"\"\" if obj is not None: return list(RUR._UR.object_here_(self.body, obj)) else:", "RUR._UR.at_goal_(self.body) def build_wall(self): #py:UR.build_wall \"\"\"Instructs Reeborg to build a wall at the location", "put(obj=None): #py:put \"\"\"Puts down an object. If Reeborg carries more than one type", "present at Reeborg's location. Args: obj: optional parameter which is the name of", "one grid position.\"\"\" RUR._move_() def new_robot_images(images): #py:new_robot_images \"\"\"Allow to replace the images used", "Makes it possible to create custom world menus. See the documentation for more", "\"default\", \"invisible\"]: raise ReeborgError(\"Unrecognized style in set_trace_style().\") RUR._UR.set_trace_style_(self.body, style) def take(self, obj=None): #py:UR.take", "highlighting from occurring. This function has a similar effect to clicking the corresponding", "\"\"\"Indicates if an obstacle (wall, fence, water, etc.) blocks the path. Returns: True", "\"\"\" RUR._set_max_nb_robots_(nb) def set_trace_color(color): #py:set_trace_color \"\"\"Change the color of the trace (oil leak).", "World(\"http://reeborg.ca/my_world\") # fictitious example # the name http://reeborg.ca/my_world will be added to the", "\"&lt;\" ).replace(\">\", \"&gt;\").replace(\"\\n\", \"<br>\")) class ReeborgError(Exception): #py:RE \"\"\"Exceptions specific to Reeborg's World. Examples::", "RUR._UR.build_wall_(self.body) def carries_object(self, obj=''): #py:UR.carries_object \"\"\"Indicates whether Reeborg carries an object or not.", "if obj is None: RUR._UR.put_(self.body) else: RUR._UR.put_(self.body, obj) def right_is_clear(self): #py:UR.right_is_clear \"\"\"Indicates if", "goal, False otherwise. \"\"\" return RUR._UR.at_goal_(self.body) def build_wall(self): #py:UR.build_wall \"\"\"Instructs Reeborg to build", "translation needed def put(obj=None): #py:put \"\"\"Puts down an object. If Reeborg carries more", "RUR.EAST: facing = \"facing East\" elif self.body._orientation == RUR.WEST: facing = \"facing West\"", "(not blocked), False otherwise. \"\"\" return RUR._front_is_clear_() def is_facing_north(): #py:is_facing_north \"\"\"Indicates if Reeborg", "functions from Javascript used below should have names of the form # RUR._xyz_", "= 'carries %s tokens' % self.body.objects['token'] else: carries = 'carries no tokens' return", "def print_html(html, append=False): #py:print_html \"\"\"Intended primarily for world creators, this function is similar", "for world creators, this function allows to change the default maximum number of", "\"\"\" return RUR._UR.at_goal_(self.body) def build_wall(self): #py:UR.build_wall \"\"\"Instructs Reeborg to build a wall at", "carries = 'carries %s tokens' % self.body.objects['token'] else: carries = 'carries no tokens'", "by a different value. \"\"\" RUR._set_max_nb_instructions_(nb) def set_max_nb_robots(nb): #py:set_max_nb_robots \"\"\"Intended primarily for world", "a similar effect to clicking the corresponding button in Reeborg's World. Code highlighting", "obj=None): #py:UR.object_here \"\"\"Indicates whether any type of objects are present at Reeborg's location.", "#py:UR.wall_on_right \"\"\"Indicates if an wall is on the immediate right of Reeborg. Returns:", "# do not translate the name of this function RUR._dir_js_(obj) def done(): #py:done", "between Reeborg's actions played back. \"\"\" RUR._think_(ms) def turn_left(): #py:turn_left \"\"\"Reeborg turns to", "UsedRobot. Args: x: horizontal coordinate; an integer greater or equal to 1. y:", "\"\"\"This module contains functions, classes and exceptions that can be included in a", "wall_on_right(self): #py:UR.wall_on_right \"\"\"Indicates if an wall is on the immediate right of Reeborg.", "RUR._set_trace_style_(style) def sound(bool): #py:sound \"\"\"Activate or deactivate sound effects.\"\"\" RUR._sound_(bool) def take(obj=None): #py:take", "Robot(UsedRobot): def __init__(self): self.body = RUR._default_robot_body_() return Robot() def dir_js(obj): #py:dir_js \"\"\"Lists attributes", "should appear # alphabetically in this English version, with the exception of Python-specific", "included by default >>> World(\"http://reeborg.ca/my_world\") # fictitious example # the name http://reeborg.ca/my_world will", "def turn_left(): #py:turn_left \"\"\"Reeborg turns to its left.\"\"\" RUR._turn_left_() def view_source_js(fn): #py:view_source_js \"\"\"Shows", "True if an obstacle is on Reeborg's right, False otherwise. \"\"\" return RUR._UR.right_is_clear_(self.body)", "will not be if the program is run a second time. \"\"\" RUR._no_highlight_()", "selector, or a URL (\"link\") to a world defined on some website. shortname:", "def set_trace_color(color): #py:set_trace_color \"\"\"Change the color of the trace (oil leak). Args: color", "two possible choices: either a name appearing in the html selector, or a", "#py:print_html \"\"\"Intended primarily for world creators, this function is similar to print() except", "some website. shortname: Optional parameter; if specified, this will be the name shown", "are the three possible arguments. \"invisible\" is equivalent to set_trace_color(\"rgba(0, 0, 0, 0)\"),", "but they will not be if the program is run a second time.", ">>> carries_object() [\"token\", \"apple\"] >>> carries_object(\"token\") [\"token\"] >>> carries_object(\"banana\") [] \"\"\" if obj", "to Reeborg's World. Is raised when Reeborg hits a wall. \"\"\" pass try:", "text previously written using a call to print().\"\"\" RUR._clear_print_() def color_here(): #py:color_here return", "RUR._UR.put_(self.body) else: RUR._UR.put_(self.body, obj) def right_is_clear(self): #py:UR.right_is_clear \"\"\"Indicates if an obstacle (wall, fence,", "attr in dir(obj): if attr.startswith(\"__\"): continue if callable(getattr(obj, attr)): attr += \"()\" attrs.append(attr)", "#py:right_is_clear \"\"\"Indicates if an obstacle (wall, fence, water, etc.) is on the immediate", "tokens) self.body = robot RUR.world.add_robot(self.body) def __str__(self): #py:UR.__str__ location = \"({}, {})\".format(self.body.x, self.body.y)", "or classes that should appear near the end. def at_goal(): #py:at_goal \"\"\"Indicate if", "\"\"\" RUR._think_(ms) def turn_left(): #py:turn_left \"\"\"Reeborg turns to its left.\"\"\" RUR._turn_left_() def view_source_js(fn):", "rest of the program is executed. If the world is not already present", "if Reeborg has reached its goal, False otherwise. \"\"\" return RUR._UR.at_goal_(self.body) def build_wall(self):", "def __init__(self): self.body = RUR._default_robot_body_() return Robot() def dir_js(obj): #py:dir_js \"\"\"Lists attributes and", "tranlate the following def narration(html): raise ReeborgError(\"narration is obsolete; use print_html().\") def say():", "self.body.objects['token'] == 'inf': carries = \"carries an infinite number of tokens.\" else: carries", "# of the full url \"\"\" if shortname is None: RUR._World_(url) else: RUR._World_(url,", "is not found, the result is an empty list. Examples: >>> reeborg =", "World. Is raised when Reeborg hits a wall. \"\"\" pass try: window['WallCollisionError'] =", "carries_object(self, obj=''): #py:UR.carries_object \"\"\"Indicates whether Reeborg carries an object or not. Args: obj:", "window['WallCollisionError'] = WallCollisionError except: pass class SatelliteInfo(): #py:SI @property def world_map(self): #py:SI.world_map '''Returns", "effects.\"\"\" RUR._sound_(bool) def take(obj=None): #py:take \"\"\"Takes an object. If more than one type", "an empty list. Examples: >>> reeborg = UsedRobot() >>> reeborg.object_here() [\"token\", \"apple\"] >>>", "Returns: a list of the type of objects found. If no object is", "\"\"\" return RUR._UR.wall_on_right_(self.body) #py:python_specific def add_watch(expr): #py:add_watch \"\"\"Adds a valid Python expression (given", "attr)): attr += \"()\" attrs.append(attr) print_html(str(\"\\n\".join(attrs)).replace(\"&\", \"&amp\").replace(\"<\", \"&lt;\" ).replace(\">\", \"&gt;\").replace(\"\\n\", \"<br>\")) class ReeborgError(Exception):", "return RUR._UR.is_facing_north_(self.body) def move(self): #py:UR.move \"\"\"Move forward, by one grid position.\"\"\" RUR._UR.move_(self.body) def", "RUR._wall_in_front_() def wall_on_right(): #py:wall_on_right \"\"\"Indicates if an wall is on the immediate right", "\"\"\"Indicates if an wall is on the immediate right of Reeborg. Returns: True", "print(RUR.control.get_world_map()) #py:obsolete # Do not tranlate the following def narration(html): raise ReeborgError(\"narration is", "left.\"\"\" RUR._turn_left_() def view_source_js(fn): #py:view_source_js \"\"\"Shows the source code of a Javascript function.\"\"\"", "\"\"\" RUR._MakeCustomMenu_(content) def World(url, shortname=None): #py:World \"\"\"Allow to select a specific world within", "to build a wall at the location in front of itself.\"\"\" RUR._build_wall_() def", "return dict(RUR._in_the_bag_()) def move(): #py:move \"\"\"Move forward, by one grid position.\"\"\" RUR._move_() def", "wall is on Reeborg's right, False otherwise. \"\"\" return RUR._UR.wall_on_right_(self.body) #py:python_specific def add_watch(expr):", "private. \"\"\" # do not translate the name of this function attrs =", "an integer greater or equal to 1. orientation (string):, one of \"e\" or", "def __str__(self): #py:RE.__str__ return repr(self.reeborg_shouts) try: window['ReeborgError'] = ReeborgError except: pass class WallCollisionError(ReeborgError):", "found in the world.\"\"\" RUR._remove_robots_() def right_is_clear(): #py:right_is_clear \"\"\"Indicates if an obstacle (wall,", "def object_here(self, obj=None): #py:UR.object_here \"\"\"Indicates whether any type of objects are present at", "the maximum number of robots allowed in a given world. \"\"\" RUR._set_max_nb_robots_(nb) def", "Reeborg's World. Code highlighting occurs thanks to some extra code inserted in a", "obj: optional parameter which is the name of an object as a string.", "obstacle is on Reeborg's right, False otherwise. \"\"\" return RUR._right_is_clear_() def set_max_nb_instructions(nb): #py:set_max_nb_instructions", ">>> reeborg.object_here(\"token\") [\"token\"] >>> reeborg.object_here(\"banana\") [] \"\"\" if obj is not None: return", "print_world_map(self): #py:SI.print_world_map '''Prints a formatted copy of the world''' print(RUR.control.get_world_map()) #py:obsolete # Do", "horizontal coordinate; an integer greater or equal to 1. y: vertical coordinate; an", "to change the world - the rest of the program will be ignored.", "\"\"\" RUR._UR.set_model_(self.body, model) def set_trace_color(self, color): #py:UR.set_trace_color \"\"\"Change the color of the trace", "except: pass class SatelliteInfo(): #py:SI @property def world_map(self): #py:SI.world_map '''Returns a dict containing", "are considered to be private. \"\"\" # do not translate the name of", "\"apple\"] >>> object_here(\"token\") [\"token\"] >>> object_here(\"banana\") [] \"\"\" if obj is not None:", "def color_here(): #py:color_here return RUR._color_here_() def default_robot(): #py:default_robot \"\"\"Returns a recreated version of", "False otherwise. \"\"\" RUR._recording_(bool) def remove_robots(): #py:remove_robots \"\"\"Remove all robots found in the", "of tokens to give to the robot; its value must be a positive", "not translate the name of this function RUR._dir_js_(obj) def done(): #py:done \"\"\"Causes a", ">>> set_trace_color(\"rgba(125, 0, 0, 0.5)\") >>> set_trace_color(\"#FF00FF\") \"\"\" RUR._set_trace_color_(color) def set_trace_style(style=\"default\"): #py:set_trace_style \"\"\"Change", "'''Returns a dict containing information about world. ''' import json return json.loads(RUR.control.get_world_map()) def", "right, False otherwise. \"\"\" return RUR._UR.right_is_clear_(self.body) def set_model(self, model): #py:UR.set_model \"\"\"Select the model", "location, the type must be specified as an argument, otherwise an exception will", "these modules are both # unavailable and not needed try: from browser import", "they will not be if the program is run a second time. \"\"\"", "Python program for Reeborg's World. \"\"\" # When generating documentation using sphinx, these", "is clear (not blocked), False otherwise. \"\"\" return RUR._front_is_clear_() def is_facing_north(): #py:is_facing_north \"\"\"Indicates", "must be specified as an argument, otherwise an exception will be raised. \"\"\"", "\"\"\" if obj is None: RUR._UR.put_(self.body) else: RUR._UR.put_(self.body, obj) def right_is_clear(self): #py:UR.right_is_clear \"\"\"Indicates", "RUR._UR.set_model_(self.body, model) def set_trace_color(self, color): #py:UR.set_trace_color \"\"\"Change the color of the trace (oil", "set_max_nb_robots(nb): #py:set_max_nb_robots \"\"\"Intended primarily for world creators, this function allows to set the", "is it sets the colour to a completely transparent value. The \"thick\" style", "#py:UR.at_goal \"\"\"Indicate if Reeborg has reached the desired location. Returns: True if Reeborg", "if an obstacle (wall, fence, water, etc.) blocks the path. Returns: True if", "selector instead # of the full url \"\"\" if shortname is None: RUR._World_(url)", "\"\"\"Reeborg turns to its left.\"\"\" RUR._UR.turn_left_(self.body) def wall_in_front(self): #py:UR.wall_in_front \"\"\"Indicates if a wall", "RUR._UR.wall_on_right_(self.body) #py:python_specific def add_watch(expr): #py:add_watch \"\"\"Adds a valid Python expression (given as a", "itself. \"\"\" RUR._UR.build_wall_(self.body) def carries_object(self, obj=''): #py:UR.carries_object \"\"\"Indicates whether Reeborg carries an object", "RUR._UR.turn_left_(self.body) def wall_in_front(self): #py:UR.wall_in_front \"\"\"Indicates if a wall blocks the way. Returns: True", "def __str__(self): #py:UR.__str__ location = \"({}, {})\".format(self.body.x, self.body.y) if self.body._orientation == RUR.EAST: facing", "front_is_clear(self): #py:UR.front_is_clear \"\"\"Indicates if an obstacle (wall, fence, water, etc.) blocks the path.", "turn_left(): #py:turn_left \"\"\"Reeborg turns to its left.\"\"\" RUR._turn_left_() def view_source_js(fn): #py:view_source_js \"\"\"Shows the", "convert from JS list-like object to proper Python list def paint_square(color): #py:paint_square RUR._paint_square_(color)", "program is executed. If the world is not already present in the html", "in self.body.objects: if self.body.objects['token'] == 'inf': carries = \"carries an infinite number of", "default robot.\"\"\" class Robot(UsedRobot): def __init__(self): self.body = RUR._default_robot_body_() return Robot() def dir_js(obj):", "else: RUR._take_(obj) def think(ms): #py:think \"\"\"Set a time delay (in milliseconds) between Reeborg's", "is desired, False otherwise. \"\"\" RUR._recording_(bool) def remove_robots(): #py:remove_robots \"\"\"Remove all robots found", "a double underscore and are considered to be private. \"\"\" # do not", "a UsedRobot. Args: x: horizontal coordinate; an integer greater or equal to 1.", "attr += \"()\" attrs.append(attr) print_html(str(\"\\n\".join(attrs)).replace(\"&\", \"&amp\").replace(\"<\", \"&lt;\" ).replace(\">\", \"&gt;\").replace(\"\\n\", \"<br>\")) class ReeborgError(Exception): #py:RE", "#py:set_trace_style \"\"\"Change the trace style of the robot. Args: style: \"thick\", \"invisible\" and", "code of a Javascript function.\"\"\" RUR._view_source_js_(fn) def wall_in_front(): #py:wall_in_front \"\"\"Indicates if a wall", "Python expression (given as a string) to the watch list. \"\"\" RUR.add_watch(expr) def", "right, False otherwise. \"\"\" return RUR._right_is_clear_() def set_max_nb_instructions(nb): #py:set_max_nb_instructions \"\"\"Intended primarily for world", "(images) for the robot. Args: model: a number between 0 and 3. \"\"\"", "http://reeborg.ca/my_world will be added to the selector >>> World(\"http://reeborg.ca/my_world\", \"Hello\") # The name", "RUR._carries_object_(obj) else: ans = RUR._carries_object_() return list(ans) def clear_print(): #py:clear_print \"\"\"Erase all the", "carried by Reeborg. If Reeborg carries no object, or not the specified one,", "#py:front_is_clear \"\"\"Indicates if an obstacle (wall, fence, water, etc.) blocks the path. Returns:", "RUR._UR.take_(self.body, obj) def turn_left(self): #py:UR.turn_left \"\"\"Reeborg turns to its left.\"\"\" RUR._UR.turn_left_(self.body) def wall_in_front(self):", "def in_the_bag(): #py:in_the_bag return dict(RUR._in_the_bag_()) def move(): #py:move \"\"\"Move forward, by one grid", "to 1. orientation (string):, one of \"e\" or \"east\", \"w\" or \"west\", \"n\"", "wall_on_right(): #py:wall_on_right \"\"\"Indicates if an wall is on the immediate right of Reeborg.", "Args: obj: optional parameter which is the name of an object as a", "a wall, False otherwise. \"\"\" return RUR._wall_in_front_() def wall_on_right(): #py:wall_on_right \"\"\"Indicates if an", "no_highlight(): #py:no_highlight \"\"\"Prevents code highlighting from occurring. This function has a similar effect", "its goal, False otherwise. \"\"\" return RUR._UR.at_goal_(self.body) def build_wall(self): #py:UR.build_wall \"\"\"Instructs Reeborg to", "of this function attrs = [] for attr in dir(obj): if attr.startswith(\"__\"): continue", "be ignored. If the desired world is already selected, this command is ignored", "if Reeborg has reached the desired location. Returns: True if Reeborg has reached", "to set the maximum number of robots allowed in a given world. \"\"\"", "Args: style: \"thick\", \"invisible\" and \"default\" are the three possible arguments. \"invisible\" is", "\"\"\"Shows the source code of a Javascript function.\"\"\" RUR._view_source_js_(fn) def wall_in_front(): #py:wall_in_front \"\"\"Indicates", "return RUR._UR.wall_in_front_(self.body) def wall_on_right(self): #py:UR.wall_on_right \"\"\"Indicates if an wall is on the immediate", "None: robot = RUR.robot.create_robot(x, y, orientation) else: robot = RUR.robot.create_robot(x, y, orientation, tokens)", "location. Returns: True if Reeborg has reached its goal, False otherwise. \"\"\" return", "= RUR.robot.create_robot(x, y, orientation) else: robot = RUR.robot.create_robot(x, y, orientation, tokens) self.body =", "the way. Returns: True if the path blocked by a wall, False otherwise.", "#---- or ------ try: move() except ReeborgError: # ignore a collision turn_left() \"\"\"", "#py:new_robot_images \"\"\"Allow to replace the images used for the robot. More details will", "the robot; its value must be a positive integer, or the string \"inf\"", "RUR._recording_(bool) def remove_robots(): #py:remove_robots \"\"\"Remove all robots found in the world.\"\"\" RUR._remove_robots_() def", "def right_is_clear(self): #py:UR.right_is_clear \"\"\"Indicates if an obstacle (wall, fence, water, etc.) is on", "world menus. See the documentation for more details. \"\"\" RUR._MakeCustomMenu_(content) def World(url, shortname=None):", "highlighting occurs thanks to some extra code inserted in a user's program prior", "name appearing in the html selector, or a URL (\"link\") to a world", "#py:UR.set_trace_color \"\"\"Change the color of the trace (oil leak). Args: color (string): four", "primarily for world creators, this function allows to change the default maximum number", "program will simply be to change the world - the rest of the", "RUR._object_here_() return list(ans) # convert from JS list-like object to proper Python list", "of html input. \"\"\" RUR._print_html_(html, append) window['print_html'] = print_html # No translation needed", "Examples: >>> reeborg = UsedRobot() >>> reeborg.carries_object() [\"token\", \"apple\"] >>> reeborg.carries_object(\"token\") [\"token\"] >>>", "primarily for world creators, this function allows to set the maximum number of", "of objects is at Reeborg's location, the type must be specified as an", "will be added. Args: url: two possible choices: either a name appearing in", "= UsedRobot() >>> reeborg.set_trace_color(\"red\") >>> reeborg.set_trace_color(\"rgb(125, 0, 0)\") >>> reeborg.set_trace_color(\"rgba(125, 0, 0, 0.5)\")", "to set_trace_color(\"rgba(0, 0, 0, 0)\"), that is it sets the colour to a", "below should have names of the form # RUR._xyz_ and be defined in", "leak). Args: color (string): four formats are possible: named color, rgb and rgba,", "a program's execution to end.\"\"\" RUR._done_() def front_is_clear(): #py:front_is_clear \"\"\"Indicates if an obstacle", "Is raised when Reeborg hits a wall. \"\"\" pass try: window['WallCollisionError'] = WallCollisionError", "def carries_object(obj=None): #py:carries_object \"\"\"Indicates whether Reeborg carries an object or not. Args: obj:", "\"\"\"Lists attributes and methods of a Javascript object.\"\"\" # do not translate the", "RUR._take_() else: RUR._take_(obj) def think(ms): #py:think \"\"\"Set a time delay (in milliseconds) between", "0, 0, 0.5)\") >>> reeborg.set_trace_color(\"#FF00FF\") \"\"\" RUR._UR.set_trace_color_(self.body, color) def set_trace_style(self, style): #py:UR.set_trace_style \"\"\"Change", "some extra code inserted in a user's program prior to execution. When disabling", "to some extra code inserted in a user's program prior to execution. When", "type of objects are present at Reeborg's location. Args: obj: optional parameter which", "[\"thick\", \"default\", \"invisible\"]: raise ReeborgError(\"Unrecognized style in set_trace_style().\") RUR._UR.set_trace_style_(self.body, style) def take(self, obj=None):", "\"Hello\") # The name \"Hello\" will be shown in the selector instead #", "def carries_object(self, obj=''): #py:UR.carries_object \"\"\"Indicates whether Reeborg carries an object or not. Args:", "#py:dir_js \"\"\"Lists attributes and methods of a Javascript object.\"\"\" # do not translate", "for world creators, this function is similar to print() except it can make", "[\"token\"] >>> reeborg.object_here(\"banana\") [] \"\"\" if obj is not None: return list(RUR._UR.object_here_(self.body, obj))", "otherwise. \"\"\" return RUR._wall_on_right_() def MakeCustomMenu(content): #py:MakeCustomMenu \"\"\"Designed for use by educators. Makes", "name start with a double underscore and are considered to be private. \"\"\"", "the location in front of itself.\"\"\" RUR._build_wall_() def carries_object(obj=None): #py:carries_object \"\"\"Indicates whether Reeborg", "in a given world. \"\"\" RUR._set_max_nb_robots_(nb) def set_trace_color(color): #py:set_trace_color \"\"\"Change the color of", "the world''' print(RUR.control.get_world_map()) #py:obsolete # Do not tranlate the following def narration(html): raise", "at the location in front of itself.\"\"\" RUR._build_wall_() def carries_object(obj=None): #py:carries_object \"\"\"Indicates whether", "to its left.\"\"\" RUR._turn_left_() def view_source_js(fn): #py:view_source_js \"\"\"Shows the source code of a", "\"\"\" def __init__(self, message): #py:RE.__init__ self.reeborg_shouts = message def __str__(self): #py:RE.__str__ return repr(self.reeborg_shouts)", "#py:view_source_js \"\"\"Shows the source code of a Javascript function.\"\"\" RUR._view_source_js_(fn) def wall_in_front(): #py:wall_in_front", "value. The \"thick\" style is centered on the path followed, so that it", "for use by educators. Makes it possible to create custom world menus. See", "or deactivate sound effects.\"\"\" RUR._sound_(bool) def take(obj=None): #py:take \"\"\"Takes an object. If more", "robot = RUR.robot.create_robot(x, y, orientation) else: robot = RUR.robot.create_robot(x, y, orientation, tokens) self.body", "list(RUR._UR.carries_object_(self.body, obj)) else: return list(RUR._UR.carries_object_(self.body)) def front_is_clear(self): #py:UR.front_is_clear \"\"\"Indicates if an obstacle (wall,", "tokens.\" else: carries = 'carries %s tokens' % self.body.objects['token'] else: carries = 'carries", "is present, or if the specified object is not found, the result is", "#py:paint_square RUR._paint_square_(color) def pause(ms=None): #py:pause \"\"\"Pauses a program's execution (playback). If an argument", "water, etc.) is on the immediate right of Reeborg. Returns: True if an", "\"\"\"Select the model (images) for the robot. Args: model: a number between 0", "\"\"\"Prevents code highlighting from occurring. This function has a similar effect to clicking", "Examples:: >>> reeborg = UsedRobot() >>> reeborg.set_trace_color(\"red\") >>> reeborg.set_trace_color(\"rgb(125, 0, 0)\") >>> reeborg.set_trace_color(\"rgba(125,", "not use done() for this task.\" raise ReeborgError(message) #---- or ------ try: move()", "details. \"\"\" RUR._MakeCustomMenu_(content) def World(url, shortname=None): #py:World \"\"\"Allow to select a specific world", "obj is None: RUR._UR.put_(self.body) else: RUR._UR.put_(self.body, obj) def right_is_clear(self): #py:UR.right_is_clear \"\"\"Indicates if an", "shown in the html selector. Examples: >>> World(\"Home 1\") # world included by", "\"facing East\" elif self.body._orientation == RUR.WEST: facing = \"facing West\" elif self.body._orientation ==", "If the world is not already present in the html selector, it will", "the desired world is already selected, this command is ignored and the rest", "highlighting using this function, the extra instructions are still present, but they will", "selector. Examples: >>> World(\"Home 1\") # world included by default >>> World(\"http://reeborg.ca/my_world\") #", "URL (\"link\") to a world defined on some website. shortname: Optional parameter; if", "None: RUR._World_(url) else: RUR._World_(url, shortname) class UsedRobot(object): #py:UR def __init__(self, x=1, y=1, orientation='e',", "carries_object() [\"token\", \"apple\"] >>> carries_object(\"token\") [\"token\"] >>> carries_object(\"banana\") [] \"\"\" if obj is", "as a string. Returns: a list of the type of objects found. If", "take(self, obj=None): #py:UR.take \"\"\"Takes an object. If more than one type of objects", "+= \"()\" attrs.append(attr) print_html(str(\"\\n\".join(attrs)).replace(\"&\", \"&amp\").replace(\"<\", \"&lt;\" ).replace(\">\", \"&gt;\").replace(\"\\n\", \"<br>\")) class ReeborgError(Exception): #py:RE \"\"\"Exceptions", "obj is not None: return list(RUR._UR.carries_object_(self.body, obj)) else: return list(RUR._UR.carries_object_(self.body)) def front_is_clear(self): #py:UR.front_is_clear", "object_here(\"banana\") [] \"\"\" if obj is not None: ans = RUR._object_here_(obj) else: ans", "#py:UR.right_is_clear \"\"\"Indicates if an obstacle (wall, fence, water, etc.) is on the immediate", "reached its goal, False otherwise. \"\"\" return RUR._UR.at_goal_(self.body) def build_wall(self): #py:UR.build_wall \"\"\"Instructs Reeborg", "documentation for more details. \"\"\" RUR._MakeCustomMenu_(content) def World(url, shortname=None): #py:World \"\"\"Allow to select", "if attr.startswith(\"__\"): continue if callable(getattr(obj, attr)): attr += \"()\" attrs.append(attr) print_html(str(\"\\n\".join(attrs)).replace(\"&\", \"&amp\").replace(\"<\", \"&lt;\"", "blocks the way. Returns: True if the path blocked by a wall, False", "appear near the end. def at_goal(): #py:at_goal \"\"\"Indicate if Reeborg has reached the", "style not in [\"thick\", \"default\", \"invisible\"]: raise ReeborgError(\"Unrecognized style in set_trace_style().\") RUR._set_trace_style_(style) def", "type of objects carried by Reeborg. If Reeborg carries no object, or not", "\"\"\"Indicates whether any type of objects are present at Reeborg's location. Args: obj:", "or starts recording changes occuring in the world. Args: bool: True if recording", "simply be to change the world - the rest of the program will", "RUR._color_here_() def default_robot(): #py:default_robot \"\"\"Returns a recreated version of the default robot.\"\"\" class", "\"facing South\" if 'token' in self.body.objects: if self.body.objects['token'] == 'inf': carries = \"carries", "it sets the colour to a completely transparent value. The \"thick\" style is", "different value. \"\"\" RUR._set_max_nb_instructions_(nb) def set_max_nb_robots(nb): #py:set_max_nb_robots \"\"\"Intended primarily for world creators, this", "\"carries an infinite number of tokens.\" else: carries = 'carries %s tokens' %", "else: RUR._put_(obj) def recording(bool): #py:recording \"\"\"Stops or starts recording changes occuring in the", "RUR.robot.create_robot(x, y, orientation) else: robot = RUR.robot.create_robot(x, y, orientation, tokens) self.body = robot", "centered on the path followed, so that it is impossible to distinguish between", "will be the name shown in the html selector. Examples: >>> World(\"Home 1\")", "version, with the exception of Python-specific # functions or classes that should appear", "\"\"\"Causes a program's execution to end.\"\"\" RUR._done_() def front_is_clear(): #py:front_is_clear \"\"\"Indicates if an", "to end.\"\"\" RUR._done_() def front_is_clear(): #py:front_is_clear \"\"\"Indicates if an obstacle (wall, fence, water,", "right, and right handed turns appear to be done all at once, if", "is not None: ans = RUR._carries_object_(obj) else: ans = RUR._carries_object_() return list(ans) def", "list def paint_square(color): #py:paint_square RUR._paint_square_(color) def pause(ms=None): #py:pause \"\"\"Pauses a program's execution (playback).", "is impossible to distinguish between motion to the left or to the right,", "elapsed. \"\"\" if ms is None: RUR._pause_() else: RUR._pause_(ms) def print_html(html, append=False): #py:print_html", "in set_trace_style().\") RUR._set_trace_style_(style) def sound(bool): #py:sound \"\"\"Activate or deactivate sound effects.\"\"\" RUR._sound_(bool) def", "program's execution (playback). If an argument (time in milliseconds) is given, the execution", "Reeborg. Returns: True if an obstacle is on Reeborg's right, False otherwise. \"\"\"", "greater or equal to 1. orientation (string):, one of \"e\" or \"east\", \"w\"", "valid Python expression (given as a string) to the watch list. \"\"\" RUR.add_watch(expr)", "will be raised. \"\"\" if obj is None: RUR._UR.take_(self.body) else: RUR._UR.take_(self.body, obj) def", "\"\"\"Move forward, by one grid position.\"\"\" RUR._UR.move_(self.body) def object_here(self, obj=None): #py:UR.object_here \"\"\"Indicates whether", "\"\"\" # When generating documentation using sphinx, these modules are both # unavailable", "set_trace_style().\") RUR._set_trace_style_(style) def sound(bool): #py:sound \"\"\"Activate or deactivate sound effects.\"\"\" RUR._sound_(bool) def take(obj=None):", "RUR._xyz_ and be defined in commands.js and methods should have names of #", "if self.body.objects['token'] == 'inf': carries = \"carries an infinite number of tokens.\" else:", "\"\"\" return RUR._UR.front_is_clear_(self.body) def in_the_bag(self): #py:UR.in_the_bag return dict(RUR._UR.in_the_bag_(self.body)) def is_facing_north(self): #py:UR.is_facing_north \"\"\"Indicates if", "attributes and methods of a Python object, excluding those whose name start with", "obj=None): #py:UR.take \"\"\"Takes an object. If more than one type of objects is", "call to print().\"\"\" RUR._clear_print_() def color_here(): #py:color_here return RUR._color_here_() def default_robot(): #py:default_robot \"\"\"Returns", "already selected, this command is ignored and the rest of the program is", "def print_world_map(self): #py:SI.print_world_map '''Prints a formatted copy of the world''' print(RUR.control.get_world_map()) #py:obsolete #", "\"\"\" if style not in [\"thick\", \"default\", \"invisible\"]: raise ReeborgError(\"Unrecognized style in set_trace_style().\")", "UsedRobot(object): #py:UR def __init__(self, x=1, y=1, orientation='e', tokens=None): #py:UR.__init__ \"\"\"Creates a UsedRobot. Args:", "on Reeborg's right, False otherwise. \"\"\" return RUR._UR.wall_on_right_(self.body) #py:python_specific def add_watch(expr): #py:add_watch \"\"\"Adds", "with the exception of Python-specific # functions or classes that should appear near", "number of instructions executed in a program (1000) by a different value. \"\"\"", "or a URL (\"link\") to a world defined on some website. shortname: Optional", "infinite number of tokens.\" else: carries = 'carries %s tokens' % self.body.objects['token'] else:", "def front_is_clear(self): #py:UR.front_is_clear \"\"\"Indicates if an obstacle (wall, fence, water, etc.) blocks the", "Reeborg's actions played back. \"\"\" RUR._think_(ms) def turn_left(): #py:turn_left \"\"\"Reeborg turns to its", "in Reeborg's World. Code highlighting occurs thanks to some extra code inserted in", "exception will be raised. \"\"\" if obj is None: RUR._UR.take_(self.body) else: RUR._UR.take_(self.body, obj)", "list. Examples: >>> reeborg = UsedRobot() >>> reeborg.object_here() [\"token\", \"apple\"] >>> reeborg.object_here(\"token\") [\"token\"]", "on Reeborg's right, False otherwise. \"\"\" return RUR._UR.right_is_clear_(self.body) def set_model(self, model): #py:UR.set_model \"\"\"Select", "or the string \"inf\" to indicate an infinite quantity. \"\"\" if tokens is", "soon. \"\"\" RUR._new_robot_images_(images) def no_highlight(): #py:no_highlight \"\"\"Prevents code highlighting from occurring. This function", "sound effects.\"\"\" RUR._sound_(bool) def take(obj=None): #py:take \"\"\"Takes an object. If more than one", "be defined in commands.js and methods should have names of # the form", "ms is None: RUR._pause_() else: RUR._pause_(ms) def print_html(html, append=False): #py:print_html \"\"\"Intended primarily for", "RUR._done_() def front_is_clear(): #py:front_is_clear \"\"\"Indicates if an obstacle (wall, fence, water, etc.) blocks", "the model (images) for the robot. Args: model: a number between 0 and", "RUR._turn_left_() def view_source_js(fn): #py:view_source_js \"\"\"Shows the source code of a Javascript function.\"\"\" RUR._view_source_js_(fn)", "raise ReeborgError(message) #---- or ------ try: move() except ReeborgError: # ignore a collision", "None: RUR._UR.take_(self.body) else: RUR._UR.take_(self.body, obj) def turn_left(self): #py:UR.turn_left \"\"\"Reeborg turns to its left.\"\"\"", "Reeborg. If Reeborg carries no object, or not the specified one, the result", "forward, by one grid position.\"\"\" RUR._move_() def new_robot_images(images): #py:new_robot_images \"\"\"Allow to replace the", "\"\"\" return RUR._at_goal_() def build_wall(): #py:build_wall \"\"\"Instructs Reeborg to build a wall at", "for attr in dir(obj): if attr.startswith(\"__\"): continue if callable(getattr(obj, attr)): attr += \"()\"", "world - the rest of the program will be ignored. If the desired", "end. def at_goal(): #py:at_goal \"\"\"Indicate if Reeborg has reached the desired location. Returns:", "[] \"\"\" if obj is not None: return list(RUR._UR.object_here_(self.body, obj)) else: return list(RUR._UR.object_here_(self.body))", "of \"e\" or \"east\", \"w\" or \"west\", \"n\" or \"north\", \"s\" or \"south\".", "module contains functions, classes and exceptions that can be included in a Python", "colour to a completely transparent value. The \"thick\" style is centered on the", "played back. \"\"\" RUR._think_(ms) def turn_left(): #py:turn_left \"\"\"Reeborg turns to its left.\"\"\" RUR._turn_left_()", "#py:set_trace_color \"\"\"Change the color of the trace (oil leak). Args: color (string): four", "an object as a string. Returns: a list of the type of objects", "done(): #py: message = \"You can not use done() for this task.\" raise", "#py:UR.is_facing_north \"\"\"Indicates if Reeborg is facing North (top of the screen) or not.\"\"\"", "if self.body._orientation == RUR.EAST: facing = \"facing East\" elif self.body._orientation == RUR.WEST: facing", "or \"south\". tokens: Initial number of tokens to give to the robot; its", "if obj is None: RUR._put_() else: RUR._put_(obj) def recording(bool): #py:recording \"\"\"Stops or starts", "obj is not None: return list(RUR._UR.object_here_(self.body, obj)) else: return list(RUR._UR.object_here_(self.body)) def put(self, obj=None):", "one selected by using this function, the result of running the program will", "return RUR._at_goal_() def build_wall(): #py:build_wall \"\"\"Instructs Reeborg to build a wall at the", "Reeborg's location, the type must be specified as an argument, otherwise an exception", "If Reeborg carries more than one type of objects, the type must be", "the program will simply be to change the world - the rest of", "full url \"\"\" if shortname is None: RUR._World_(url) else: RUR._World_(url, shortname) class UsedRobot(object):", "\"UsedRobot at {} {} {}.\".format(location, facing, carries) def at_goal(self): #py:UR.at_goal \"\"\"Indicate if Reeborg", "water, etc.) blocks the path. Returns: True if the path is clear (not", "\"\"\" return RUR._wall_in_front_() def wall_on_right(): #py:wall_on_right \"\"\"Indicates if an wall is on the", "otherwise. \"\"\" RUR._recording_(bool) def remove_robots(): #py:remove_robots \"\"\"Remove all robots found in the world.\"\"\"", "not None: ans = RUR._carries_object_(obj) else: ans = RUR._carries_object_() return list(ans) def clear_print():", "def object_here(obj=None): #py:object_here \"\"\"Indicates whether any type of objects are present at Reeborg's", "\"\"\" RUR._new_robot_images_(images) def no_highlight(): #py:no_highlight \"\"\"Prevents code highlighting from occurring. This function has", "occuring in the world. Args: bool: True if recording is desired, False otherwise.", "included in a Python program for Reeborg's World. \"\"\" # When generating documentation", "window['print_html'] = print_html # No translation needed def put(obj=None): #py:put \"\"\"Puts down an", "blocks the path. Returns: True if the path is clear (not blocked), False", "Reeborg. Returns: True if a wall is on Reeborg's right, False otherwise. \"\"\"", "# ignore a collision turn_left() \"\"\" def __init__(self, message): #py:RE.__init__ self.reeborg_shouts = message", "#py:obsolete # Do not tranlate the following def narration(html): raise ReeborgError(\"narration is obsolete;", "None: ans = RUR._object_here_(obj) else: ans = RUR._object_here_() return list(ans) # convert from", "__init__(self, message): #py:RE.__init__ self.reeborg_shouts = message def __str__(self): #py:RE.__str__ return repr(self.reeborg_shouts) try: window['ReeborgError']", "Reeborg's World. \"\"\" # When generating documentation using sphinx, these modules are both", "this function, the result of running the program will simply be to change", "\"You can not use done() for this task.\" raise ReeborgError(message) #---- or ------", "world included by default >>> World(\"http://reeborg.ca/my_world\") # fictitious example # the name http://reeborg.ca/my_world", "\"\"\" RUR.add_watch(expr) def dir_py(obj): #py:dir_py \"\"\"Lists attributes and methods of a Python object,", "a positive integer, or the string \"inf\" to indicate an infinite quantity. \"\"\"", "set_trace_color(self, color): #py:UR.set_trace_color \"\"\"Change the color of the trace (oil leak). Args: color", "#py:carries_object \"\"\"Indicates whether Reeborg carries an object or not. Args: obj: optional parameter", "exception will be raised. \"\"\" if obj is None: RUR._UR.put_(self.body) else: RUR._UR.put_(self.body, obj)", "If an argument (time in milliseconds) is given, the execution automatically resumes after", "When disabling highlighting using this function, the extra instructions are still present, but", "def dir_js(obj): #py:dir_js \"\"\"Lists attributes and methods of a Javascript object.\"\"\" # do", "using this function, the extra instructions are still present, but they will not", "#py:build_wall \"\"\"Instructs Reeborg to build a wall at the location in front of", "------ try: move() except ReeborgError: # ignore a collision turn_left() \"\"\" def __init__(self,", "MakeCustomMenu(content): #py:MakeCustomMenu \"\"\"Designed for use by educators. Makes it possible to create custom", "facing = \"facing South\" if 'token' in self.body.objects: if self.body.objects['token'] == 'inf': carries", "# When generating documentation using sphinx, these modules are both # unavailable and", "the html selector, it will be added. Args: url: two possible choices: either", "JS list-like object to proper Python list def paint_square(color): #py:paint_square RUR._paint_square_(color) def pause(ms=None):", "or equal to 1. y: vertical coordinate; an integer greater or equal to", "screen) or not.\"\"\" return RUR._UR.is_facing_north_(self.body) def move(self): #py:UR.move \"\"\"Move forward, by one grid", "\"\"\" RUR._recording_(bool) def remove_robots(): #py:remove_robots \"\"\"Remove all robots found in the world.\"\"\" RUR._remove_robots_()", "def take(obj=None): #py:take \"\"\"Takes an object. If more than one type of objects", "Returns: True if the path blocked by a wall, False otherwise. \"\"\" return", "allows to set the maximum number of robots allowed in a given world.", "True if the path is clear (not blocked), False otherwise. \"\"\" return RUR._front_is_clear_()", "if a wall is on Reeborg's right, False otherwise. \"\"\" return RUR._UR.wall_on_right_(self.body) #py:python_specific", "class SatelliteInfo(): #py:SI @property def world_map(self): #py:SI.world_map '''Returns a dict containing information about", "maximum number of robots allowed in a given world. \"\"\" RUR._set_max_nb_robots_(nb) def set_trace_color(color):", "obstacle (wall, fence, water, etc.) is on the immediate right of Reeborg. Returns:", "name shown in the html selector. Examples: >>> World(\"Home 1\") # world included", "of itself.\"\"\" RUR._build_wall_() def carries_object(obj=None): #py:carries_object \"\"\"Indicates whether Reeborg carries an object or", "Returns: True if a wall is on Reeborg's right, False otherwise. \"\"\" return", "set_trace_color(\"rgb(125, 0, 0)\") >>> set_trace_color(\"rgba(125, 0, 0, 0.5)\") >>> set_trace_color(\"#FF00FF\") \"\"\" RUR._set_trace_color_(color) def", "SatelliteInfo(): #py:SI @property def world_map(self): #py:SI.world_map '''Returns a dict containing information about world.", "use of html input. \"\"\" RUR._print_html_(html, append) window['print_html'] = print_html # No translation", "= ReeborgError except: pass class WallCollisionError(ReeborgError): #py:WCE \"\"\"Exceptions specific to Reeborg's World. Is", "[] \"\"\" if obj is not None: ans = RUR._carries_object_(obj) else: ans =", "form RUR._UR.xyz_; functions and methods should appear # alphabetically in this English version,", "objects are present at Reeborg's location. Args: obj: optional parameter which is the", "otherwise an exception will be raised. \"\"\" if obj is None: RUR._put_() else:", "exceptions that can be included in a Python program for Reeborg's World. \"\"\"", "rgba, and hexadecimal notation. Examples:: >>> set_trace_color(\"red\") >>> set_trace_color(\"rgb(125, 0, 0)\") >>> set_trace_color(\"rgba(125,", "raised when Reeborg hits a wall. \"\"\" pass try: window['WallCollisionError'] = WallCollisionError except:", "the trace. \"\"\" if style not in [\"thick\", \"default\", \"invisible\"]: raise ReeborgError(\"Unrecognized style", "is obsolete; use print_html().\") def say(): raise ReeborgError(\"say() is no longer supported; use", "#py:done \"\"\"Causes a program's execution to end.\"\"\" RUR._done_() def front_is_clear(): #py:front_is_clear \"\"\"Indicates if", "grid position.\"\"\" RUR._UR.move_(self.body) def object_here(self, obj=None): #py:UR.object_here \"\"\"Indicates whether any type of objects", "y: vertical coordinate; an integer greater or equal to 1. orientation (string):, one", "== 'inf': carries = \"carries an infinite number of tokens.\" else: carries =", "etc.) is on the immediate right of Reeborg. Returns: True if an obstacle", "\"\"\" if ms is None: RUR._pause_() else: RUR._pause_(ms) def print_html(html, append=False): #py:print_html \"\"\"Intended", "self.reeborg_shouts = message def __str__(self): #py:RE.__str__ return repr(self.reeborg_shouts) try: window['ReeborgError'] = ReeborgError except:", "\"inf\" to indicate an infinite quantity. \"\"\" if tokens is None: robot =", "obj is not None: ans = RUR._carries_object_(obj) else: ans = RUR._carries_object_() return list(ans)", "return repr(self.reeborg_shouts) try: window['ReeborgError'] = ReeborgError except: pass class WallCollisionError(ReeborgError): #py:WCE \"\"\"Exceptions specific", "Examples:: def done(): #py: message = \"You can not use done() for this", "is on the immediate right of Reeborg. Returns: True if a wall is", "otherwise. \"\"\" return RUR._UR.wall_in_front_(self.body) def wall_on_right(self): #py:UR.wall_on_right \"\"\"Indicates if an wall is on", "True if a wall is on Reeborg's right, False otherwise. \"\"\" return RUR._wall_on_right_()", "by default >>> World(\"http://reeborg.ca/my_world\") # fictitious example # the name http://reeborg.ca/my_world will be", "an object or not. Args: obj: optional parameter which is the name of", "\"facing West\" elif self.body._orientation == RUR.NORTH: facing = \"facing North\" elif self.body._orientation ==", "or not.\"\"\" return RUR._UR.is_facing_north_(self.body) def move(self): #py:UR.move \"\"\"Move forward, by one grid position.\"\"\"", "details will be provided soon. \"\"\" RUR._new_robot_images_(images) def no_highlight(): #py:no_highlight \"\"\"Prevents code highlighting", "objects carried by Reeborg. If Reeborg carries no object, or not the specified", "clicking the corresponding button in Reeborg's World. Code highlighting occurs thanks to some", "not needed try: from browser import window RUR = window.RUR except: print(\"\\n -->", "[\"token\", \"apple\"] >>> carries_object(\"token\") [\"token\"] >>> carries_object(\"banana\") [] \"\"\" if obj is not", "def set_model(self, model): #py:UR.set_model \"\"\"Select the model (images) for the robot. Args: model:", "\"&gt;\").replace(\"\\n\", \"<br>\")) class ReeborgError(Exception): #py:RE \"\"\"Exceptions specific to Reeborg's World. Examples:: def done():", "self.body._orientation == RUR.SOUTH: facing = \"facing South\" if 'token' in self.body.objects: if self.body.objects['token']", "World(\"Home 1\") # world included by default >>> World(\"http://reeborg.ca/my_world\") # fictitious example #", "of itself. \"\"\" RUR._UR.build_wall_(self.body) def carries_object(self, obj=''): #py:UR.carries_object \"\"\"Indicates whether Reeborg carries an", "orientation (string):, one of \"e\" or \"east\", \"w\" or \"west\", \"n\" or \"north\",", "of the type of objects found. If no object is present, or if", "at Reeborg's location, the type must be specified as an argument, otherwise an", "generating documentation using sphinx, these modules are both # unavailable and not needed", "set_trace_color(color): #py:set_trace_color \"\"\"Change the color of the trace (oil leak). Args: color (string):", "creators, this function is similar to print() except it can make use of", "world currently shown is different than the one selected by using this function,", "to Reeborg's World. Examples:: def done(): #py: message = \"You can not use", "are possible: named color, rgb and rgba, and hexadecimal notation. Examples:: >>> set_trace_color(\"red\")", "button in Reeborg's World. Code highlighting occurs thanks to some extra code inserted", "print_html(html, append=False): #py:print_html \"\"\"Intended primarily for world creators, this function is similar to", "and hexadecimal notation. Examples:: >>> set_trace_color(\"red\") >>> set_trace_color(\"rgb(125, 0, 0)\") >>> set_trace_color(\"rgba(125, 0,", "string. Returns: a list of the type of objects found. If no object", "have names of # the form RUR._UR.xyz_; functions and methods should appear #", "\"\"\" if shortname is None: RUR._World_(url) else: RUR._World_(url, shortname) class UsedRobot(object): #py:UR def", "defined on some website. shortname: Optional parameter; if specified, this will be the", "RUR._think_(ms) def turn_left(): #py:turn_left \"\"\"Reeborg turns to its left.\"\"\" RUR._turn_left_() def view_source_js(fn): #py:view_source_js", "\"\"\" # do not translate the name of this function attrs = []", "the result is an empty list. Examples: >>> carries_object() [\"token\", \"apple\"] >>> carries_object(\"token\")", "the type must be specified as an argument, otherwise an exception will be", "html selector. Examples: >>> World(\"Home 1\") # world included by default >>> World(\"http://reeborg.ca/my_world\")", "be a positive integer, or the string \"inf\" to indicate an infinite quantity.", "functions, classes and exceptions that can be included in a Python program for", "and methods should have names of # the form RUR._UR.xyz_; functions and methods", "elif self.body._orientation == RUR.NORTH: facing = \"facing North\" elif self.body._orientation == RUR.SOUTH: facing", "if a wall blocks the way. Returns: True if the path blocked by", "None: RUR._take_() else: RUR._take_(obj) def think(ms): #py:think \"\"\"Set a time delay (in milliseconds)", "\"\"\"Remove all robots found in the world.\"\"\" RUR._remove_robots_() def right_is_clear(): #py:right_is_clear \"\"\"Indicates if", "screen) or not.\"\"\" return RUR._is_facing_north_() def in_the_bag(): #py:in_the_bag return dict(RUR._in_the_bag_()) def move(): #py:move", "equal to 1. orientation (string):, one of \"e\" or \"east\", \"w\" or \"west\",", "considered to be private. \"\"\" # do not translate the name of this", "be added to the selector >>> World(\"http://reeborg.ca/my_world\", \"Hello\") # The name \"Hello\" will", "\"\"\" return RUR._wall_on_right_() def MakeCustomMenu(content): #py:MakeCustomMenu \"\"\"Designed for use by educators. Makes it", "function.\"\"\" RUR._view_source_js_(fn) def wall_in_front(): #py:wall_in_front \"\"\"Indicates if a wall blocks the way. Returns:", "append) window['print_html'] = print_html # No translation needed def put(obj=None): #py:put \"\"\"Puts down", "Reeborg's right, False otherwise. \"\"\" return RUR._UR.right_is_clear_(self.body) def set_model(self, model): #py:UR.set_model \"\"\"Select the", "menus. See the documentation for more details. \"\"\" RUR._MakeCustomMenu_(content) def World(url, shortname=None): #py:World", "= \"facing South\" if 'token' in self.body.objects: if self.body.objects['token'] == 'inf': carries =", "\"Hello\" will be shown in the selector instead # of the full url", "append=False): #py:print_html \"\"\"Intended primarily for world creators, this function is similar to print()", "trace style of the robot. Args: style: \"thick\", \"invisible\" and \"default\" are the", "= \"facing East\" elif self.body._orientation == RUR.WEST: facing = \"facing West\" elif self.body._orientation", "are present at Reeborg's location. Args: obj: optional parameter which is the name", "RUR.NORTH: facing = \"facing North\" elif self.body._orientation == RUR.SOUTH: facing = \"facing South\"", "the html selector. Examples: >>> World(\"Home 1\") # world included by default >>>", "instead # of the full url \"\"\" if shortname is None: RUR._World_(url) else:", "from Javascript used below should have names of the form # RUR._xyz_ and", "hits a wall. \"\"\" pass try: window['WallCollisionError'] = WallCollisionError except: pass class SatelliteInfo():", "integer greater or equal to 1. y: vertical coordinate; an integer greater or", "followed, so that it is impossible to distinguish between motion to the left", "program will be ignored. If the desired world is already selected, this command", "RUR.WEST: facing = \"facing West\" elif self.body._orientation == RUR.NORTH: facing = \"facing North\"", "clear (not blocked), False otherwise. \"\"\" return RUR._front_is_clear_() def is_facing_north(): #py:is_facing_north \"\"\"Indicates if", "path. Returns: True if the path is clear (not blocked), False otherwise. \"\"\"", "string) to the watch list. \"\"\" RUR.add_watch(expr) def dir_py(obj): #py:dir_py \"\"\"Lists attributes and", "positive integer, or the string \"inf\" to indicate an infinite quantity. \"\"\" if", "object as a string. Returns: a list of the type of objects carried", "a list of the type of objects found. If no object is present,", "for more details. \"\"\" RUR._MakeCustomMenu_(content) def World(url, shortname=None): #py:World \"\"\"Allow to select a", "\"\"\"Stops or starts recording changes occuring in the world. Args: bool: True if", "#py:take \"\"\"Takes an object. If more than one type of objects is at", "World(\"http://reeborg.ca/my_world\", \"Hello\") # The name \"Hello\" will be shown in the selector instead", "South\" if 'token' in self.body.objects: if self.body.objects['token'] == 'inf': carries = \"carries an", "color_here(): #py:color_here return RUR._color_here_() def default_robot(): #py:default_robot \"\"\"Returns a recreated version of the", "try: from browser import window RUR = window.RUR except: print(\"\\n --> Skipping importing", "can be included in a Python program for Reeborg's World. \"\"\" # When", "Returns: True if the path is clear (not blocked), False otherwise. \"\"\" return", "a wall, False otherwise. \"\"\" return RUR._UR.wall_in_front_(self.body) def wall_on_right(self): #py:UR.wall_on_right \"\"\"Indicates if an", "a string) to the watch list. \"\"\" RUR.add_watch(expr) def dir_py(obj): #py:dir_py \"\"\"Lists attributes", "replace the images used for the robot. More details will be provided soon.", "of the program is executed. If the world is not already present in", "otherwise an exception will be raised. \"\"\" if obj is None: RUR._UR.take_(self.body) else:", "impossible to distinguish between motion to the left or to the right, and", "will be raised. \"\"\" if obj is None: RUR._UR.put_(self.body) else: RUR._UR.put_(self.body, obj) def", ">>> reeborg = UsedRobot() >>> reeborg.object_here() [\"token\", \"apple\"] >>> reeborg.object_here(\"token\") [\"token\"] >>> reeborg.object_here(\"banana\")", "def wall_in_front(self): #py:UR.wall_in_front \"\"\"Indicates if a wall blocks the way. Returns: True if", "in front of itself. \"\"\" RUR._UR.build_wall_(self.body) def carries_object(self, obj=''): #py:UR.carries_object \"\"\"Indicates whether Reeborg", "one of \"e\" or \"east\", \"w\" or \"west\", \"n\" or \"north\", \"s\" or", "ans = RUR._object_here_() return list(ans) # convert from JS list-like object to proper", "obj)) else: return list(RUR._UR.carries_object_(self.body)) def front_is_clear(self): #py:UR.front_is_clear \"\"\"Indicates if an obstacle (wall, fence,", "set_trace_color(\"rgba(0, 0, 0, 0)\"), that is it sets the colour to a completely", "containing information about world. ''' import json return json.loads(RUR.control.get_world_map()) def print_world_map(self): #py:SI.print_world_map '''Prints", "world creators, this function allows to change the default maximum number of instructions", "facing North (top of the screen) or not.\"\"\" return RUR._UR.is_facing_north_(self.body) def move(self): #py:UR.move", "0.5)\") >>> set_trace_color(\"#FF00FF\") \"\"\" RUR._set_trace_color_(color) def set_trace_style(style=\"default\"): #py:set_trace_style \"\"\"Change the trace style of", "has a similar effect to clicking the corresponding button in Reeborg's World. Code", "world.\"\"\" RUR._remove_robots_() def right_is_clear(): #py:right_is_clear \"\"\"Indicates if an obstacle (wall, fence, water, etc.)", "def set_trace_style(self, style): #py:UR.set_trace_style \"\"\"Change the trace style of the robot. Args: style:", "otherwise an exception will be raised. \"\"\" if obj is None: RUR._take_() else:", "present, but they will not be if the program is run a second", "is an empty list. Examples: >>> reeborg = UsedRobot() >>> reeborg.object_here() [\"token\", \"apple\"]", "North (top of the screen) or not.\"\"\" return RUR._is_facing_north_() def in_the_bag(): #py:in_the_bag return", "carries_object(\"token\") [\"token\"] >>> carries_object(\"banana\") [] \"\"\" if obj is not None: ans =", "double underscore and are considered to be private. \"\"\" # do not translate", "__str__(self): #py:UR.__str__ location = \"({}, {})\".format(self.body.x, self.body.y) if self.body._orientation == RUR.EAST: facing =", "and right handed turns appear to be done all at once, if one", "== RUR.EAST: facing = \"facing East\" elif self.body._orientation == RUR.WEST: facing = \"facing", "#py:UR.wall_in_front \"\"\"Indicates if a wall blocks the way. Returns: True if the path", "Initial number of tokens to give to the robot; its value must be", "blocked), False otherwise. \"\"\" return RUR._front_is_clear_() def is_facing_north(): #py:is_facing_north \"\"\"Indicates if Reeborg is", "an infinite number of tokens.\" else: carries = 'carries %s tokens' % self.body.objects['token']", "'''Prints a formatted copy of the world''' print(RUR.control.get_world_map()) #py:obsolete # Do not tranlate", "named color, rgb and rgba, and hexadecimal notation. Examples:: >>> reeborg = UsedRobot()", "using this function, the result of running the program will simply be to", "and rgba, and hexadecimal notation. Examples:: >>> reeborg = UsedRobot() >>> reeborg.set_trace_color(\"red\") >>>", "be the name shown in the html selector. Examples: >>> World(\"Home 1\") #", "{} {}.\".format(location, facing, carries) def at_goal(self): #py:UR.at_goal \"\"\"Indicate if Reeborg has reached the", "objects is at Reeborg's location, the type must be specified as an argument,", "is not None: return list(RUR._UR.carries_object_(self.body, obj)) else: return list(RUR._UR.carries_object_(self.body)) def front_is_clear(self): #py:UR.front_is_clear \"\"\"Indicates", "0, 0)\"), that is it sets the colour to a completely transparent value.", "the name of an object as a string. Returns: a list of the", "an infinite quantity. \"\"\" if tokens is None: robot = RUR.robot.create_robot(x, y, orientation)", "= window.RUR except: print(\"\\n --> Skipping importing from browser for sphinx.\\n\") # All", "written using a call to print().\"\"\" RUR._clear_print_() def color_here(): #py:color_here return RUR._color_here_() def", "than one type of objects is at Reeborg's location, the type must be", "in_the_bag(self): #py:UR.in_the_bag return dict(RUR._UR.in_the_bag_(self.body)) def is_facing_north(self): #py:UR.is_facing_north \"\"\"Indicates if Reeborg is facing North", "to indicate an infinite quantity. \"\"\" if tokens is None: robot = RUR.robot.create_robot(x,", "\"\"\"Exceptions specific to Reeborg's World. Examples:: def done(): #py: message = \"You can", "the right, and right handed turns appear to be done all at once,", "(oil leak). Args: color (string): four formats are possible: named color, rgb and", "or not the specified one, the result is an empty list. Examples: >>>", "similar to print() except it can make use of html input. \"\"\" RUR._print_html_(html,", "[\"token\", \"apple\"] >>> reeborg.carries_object(\"token\") [\"token\"] >>> reeborg.carries_object(\"banana\") [] \"\"\" if obj is not", "a Python object, excluding those whose name start with a double underscore and", "(in milliseconds) between Reeborg's actions played back. \"\"\" RUR._think_(ms) def turn_left(): #py:turn_left \"\"\"Reeborg", "turns to its left.\"\"\" RUR._UR.turn_left_(self.body) def wall_in_front(self): #py:UR.wall_in_front \"\"\"Indicates if a wall blocks", "set_model(self, model): #py:UR.set_model \"\"\"Select the model (images) for the robot. Args: model: a", "RUR._UR.wall_in_front_(self.body) def wall_on_right(self): #py:UR.wall_on_right \"\"\"Indicates if an wall is on the immediate right", "#py:MakeCustomMenu \"\"\"Designed for use by educators. Makes it possible to create custom world", "infinite quantity. \"\"\" if tokens is None: robot = RUR.robot.create_robot(x, y, orientation) else:", "of objects found. If no object is present, or if the specified object", "for world creators, this function allows to set the maximum number of robots", "\"\"\"Returns a recreated version of the default robot.\"\"\" class Robot(UsedRobot): def __init__(self): self.body", "not the specified one, the result is an empty list. Examples: >>> reeborg", "the path blocked by a wall, False otherwise. \"\"\" return RUR._UR.wall_in_front_(self.body) def wall_on_right(self):", "wall blocks the way. Returns: True if the path blocked by a wall,", "obj is None: RUR._UR.take_(self.body) else: RUR._UR.take_(self.body, obj) def turn_left(self): #py:UR.turn_left \"\"\"Reeborg turns to", "is an empty list. Examples: >>> object_here() [\"token\", \"apple\"] >>> object_here(\"token\") [\"token\"] >>>", "blocked), False otherwise. \"\"\" return RUR._UR.front_is_clear_(self.body) def in_the_bag(self): #py:UR.in_the_bag return dict(RUR._UR.in_the_bag_(self.body)) def is_facing_north(self):", "information about world. ''' import json return json.loads(RUR.control.get_world_map()) def print_world_map(self): #py:SI.print_world_map '''Prints a", "in front of itself.\"\"\" RUR._build_wall_() def carries_object(obj=None): #py:carries_object \"\"\"Indicates whether Reeborg carries an", "try: window['WallCollisionError'] = WallCollisionError except: pass class SatelliteInfo(): #py:SI @property def world_map(self): #py:SI.world_map", "code inserted in a user's program prior to execution. When disabling highlighting using", "using a call to print().\"\"\" RUR._clear_print_() def color_here(): #py:color_here return RUR._color_here_() def default_robot():", "RUR._build_wall_() def carries_object(obj=None): #py:carries_object \"\"\"Indicates whether Reeborg carries an object or not. Args:", "clear (not blocked), False otherwise. \"\"\" return RUR._UR.front_is_clear_(self.body) def in_the_bag(self): #py:UR.in_the_bag return dict(RUR._UR.in_the_bag_(self.body))", "raised. \"\"\" if obj is None: RUR._take_() else: RUR._take_(obj) def think(ms): #py:think \"\"\"Set", "exception of Python-specific # functions or classes that should appear near the end.", "specified, this will be the name shown in the html selector. Examples: >>>", "Javascript object.\"\"\" # do not translate the name of this function RUR._dir_js_(obj) def", "print() except it can make use of html input. \"\"\" RUR._print_html_(html, append) window['print_html']", "ans = RUR._carries_object_(obj) else: ans = RUR._carries_object_() return list(ans) def clear_print(): #py:clear_print \"\"\"Erase", "obj is not None: ans = RUR._object_here_(obj) else: ans = RUR._object_here_() return list(ans)", "will be added to the selector >>> World(\"http://reeborg.ca/my_world\", \"Hello\") # The name \"Hello\"", "completely transparent value. The \"thick\" style is centered on the path followed, so", "result is an empty list. Examples: >>> carries_object() [\"token\", \"apple\"] >>> carries_object(\"token\") [\"token\"]", "\"apple\"] >>> reeborg.object_here(\"token\") [\"token\"] >>> reeborg.object_here(\"banana\") [] \"\"\" if obj is not None:", "to print().\"\"\" RUR._clear_print_() def color_here(): #py:color_here return RUR._color_here_() def default_robot(): #py:default_robot \"\"\"Returns a", "the path. Returns: True if the path is clear (not blocked), False otherwise.", "formats are possible: named color, rgb and rgba, and hexadecimal notation. Examples:: >>>" ]
[]
[ "'bool': if not root: return True if abs(self.maxDepth(root.left)-self.maxDepth(root.right))<=1: # 这一点原本想错了 return self.XXX(root.left) and", "not root: return True if abs(self.maxDepth(root.left)-self.maxDepth(root.right))<=1: # 这一点原本想错了 return self.XXX(root.left) and self.XXX(root.right) return", "class Solution: def XXX(self, root: 'TreeNode') -> 'bool': if not root: return True", "root: 'TreeNode') -> 'bool': if not root: return True if abs(self.maxDepth(root.left)-self.maxDepth(root.right))<=1: # 这一点原本想错了", "'TreeNode') -> 'bool': if not root: return True if abs(self.maxDepth(root.left)-self.maxDepth(root.right))<=1: # 这一点原本想错了 return", "def XXX(self, root: 'TreeNode') -> 'bool': if not root: return True if abs(self.maxDepth(root.left)-self.maxDepth(root.right))<=1:", "这一点原本想错了 return self.XXX(root.left) and self.XXX(root.right) return False def maxDepth(self, p): if not p:", "self.XXX(root.left) and self.XXX(root.right) return False def maxDepth(self, p): if not p: return 0", "if not root: return True if abs(self.maxDepth(root.left)-self.maxDepth(root.right))<=1: # 这一点原本想错了 return self.XXX(root.left) and self.XXX(root.right)", "abs(self.maxDepth(root.left)-self.maxDepth(root.right))<=1: # 这一点原本想错了 return self.XXX(root.left) and self.XXX(root.right) return False def maxDepth(self, p): if", "True if abs(self.maxDepth(root.left)-self.maxDepth(root.right))<=1: # 这一点原本想错了 return self.XXX(root.left) and self.XXX(root.right) return False def maxDepth(self,", "-> 'bool': if not root: return True if abs(self.maxDepth(root.left)-self.maxDepth(root.right))<=1: # 这一点原本想错了 return self.XXX(root.left)", "# 这一点原本想错了 return self.XXX(root.left) and self.XXX(root.right) return False def maxDepth(self, p): if not", "and self.XXX(root.right) return False def maxDepth(self, p): if not p: return 0 else:", "return self.XXX(root.left) and self.XXX(root.right) return False def maxDepth(self, p): if not p: return", "Solution: def XXX(self, root: 'TreeNode') -> 'bool': if not root: return True if", "if abs(self.maxDepth(root.left)-self.maxDepth(root.right))<=1: # 这一点原本想错了 return self.XXX(root.left) and self.XXX(root.right) return False def maxDepth(self, p):", "root: return True if abs(self.maxDepth(root.left)-self.maxDepth(root.right))<=1: # 这一点原本想错了 return self.XXX(root.left) and self.XXX(root.right) return False", "XXX(self, root: 'TreeNode') -> 'bool': if not root: return True if abs(self.maxDepth(root.left)-self.maxDepth(root.right))<=1: #", "self.XXX(root.right) return False def maxDepth(self, p): if not p: return 0 else: return", "return False def maxDepth(self, p): if not p: return 0 else: return max(self.maxDepth(p.left),self.maxDepth(p.right))+1", "return True if abs(self.maxDepth(root.left)-self.maxDepth(root.right))<=1: # 这一点原本想错了 return self.XXX(root.left) and self.XXX(root.right) return False def" ]
[ "def size(self): return len(self.queue) def pop(self): if len(self.queue) <=0: return \"No element in", "class Queue: def __init__(self): self.queue = [] def insert(self, data): if data is", "insert(self, data): if data is not None: self.queue.insert(0,data) return True return False def", "size(self): return len(self.queue) def pop(self): if len(self.queue) <=0: return \"No element in the", "= [] def insert(self, data): if data is not None: self.queue.insert(0,data) return True", "return len(self.queue) def pop(self): if len(self.queue) <=0: return \"No element in the Queue!\"", "<reponame>sujeek/python_base class Queue: def __init__(self): self.queue = [] def insert(self, data): if data", "data): if data is not None: self.queue.insert(0,data) return True return False def size(self):", "True return False def size(self): return len(self.queue) def pop(self): if len(self.queue) <=0: return", "def insert(self, data): if data is not None: self.queue.insert(0,data) return True return False", "def __init__(self): self.queue = [] def insert(self, data): if data is not None:", "Queue: def __init__(self): self.queue = [] def insert(self, data): if data is not", "None: self.queue.insert(0,data) return True return False def size(self): return len(self.queue) def pop(self): if", "[] def insert(self, data): if data is not None: self.queue.insert(0,data) return True return", "__init__(self): self.queue = [] def insert(self, data): if data is not None: self.queue.insert(0,data)", "False def size(self): return len(self.queue) def pop(self): if len(self.queue) <=0: return \"No element", "is not None: self.queue.insert(0,data) return True return False def size(self): return len(self.queue) def", "if data is not None: self.queue.insert(0,data) return True return False def size(self): return", "return False def size(self): return len(self.queue) def pop(self): if len(self.queue) <=0: return \"No", "len(self.queue) def pop(self): if len(self.queue) <=0: return \"No element in the Queue!\" return", "self.queue = [] def insert(self, data): if data is not None: self.queue.insert(0,data) return", "return True return False def size(self): return len(self.queue) def pop(self): if len(self.queue) <=0:", "def pop(self): if len(self.queue) <=0: return \"No element in the Queue!\" return self.queue.pop()", "self.queue.insert(0,data) return True return False def size(self): return len(self.queue) def pop(self): if len(self.queue)", "data is not None: self.queue.insert(0,data) return True return False def size(self): return len(self.queue)", "not None: self.queue.insert(0,data) return True return False def size(self): return len(self.queue) def pop(self):" ]
[ "duplicate. # ----------------------------------------------------------------------------- def test_run_action_process_inbox_rejected_duplicate(fxtr_setup_empty_db_and_inbox): \"\"\"Test RUN_ACTION_PROCESS_INBOX - rejected duplicate.\"\"\" cfg.glob.logger.debug(cfg.glob.LOGGER_START) # -------------------------------------------------------------------------", "\"pdf_wrong_format\" file_ext: str = \"pdf\" pytest.helpers.copy_files_4_pytest_2_dir( source_files=[(stem_name_1, file_ext)], target_path=cfg.glob.setup.directory_inbox ) stem_name_2: str =", "source_files=[ (stem_name_1, file_ext), ], target_path=cfg.glob.setup.directory_inbox, ) stem_name_2: str = \"pdf_text_ok_1\" pytest.helpers.copy_files_4_pytest_2_dir( source_files=[(stem_name_1, file_ext)],", "(\"pdf_text_ok\", \"pdf\"), (\"pdf_text_ok_protected\", \"pdf\"), (\"pdf_wrong_format\", \"pdf\"), ], target_path=cfg.glob.setup.directory_inbox, ) # ------------------------------------------------------------------------- values_original =", "import cfg.glob import db.cls_db_core import db.cls_run import pytest import utils import dcr #", "target_path=cfg.glob.setup.directory_inbox_accepted ) os.rename( utils.get_full_name(cfg.glob.setup.directory_inbox_accepted, stem_name_1 + \".\" + file_ext), utils.get_full_name(cfg.glob.setup.directory_inbox_accepted, stem_name_2 + \".\"", "pytest.helpers.restore_config_params( cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST, values_original, ) # ------------------------------------------------------------------------- cfg.glob.logger.info(\"=========> test_run_action_process_inbox_french <=========\") pytest.helpers.verify_content_of_inboxes( inbox=( [\"french\"], [],", "[], [ stem_name_1 + \".\" + file_ext, ], ), inbox_rejected=( [], [ stem_name_2", "------------------------------------------------------------------------- cfg.glob.logger.debug(cfg.glob.LOGGER_END) # ----------------------------------------------------------------------------- # Test RUN_ACTION_PROCESS_INBOX - rejected. # ----------------------------------------------------------------------------- def test_run_action_process_inbox_rejected(fxtr_rmdir_opt,", "duplicate.\"\"\" cfg.glob.logger.debug(cfg.glob.LOGGER_START) # ------------------------------------------------------------------------- stem_name_1: str = \"pdf_text_ok\" file_ext: str = \"pdf\" pytest.helpers.copy_files_4_pytest_2_dir(", "str = \"pdf\" pytest.helpers.copy_files_4_pytest_2_dir( source_files=[(stem_name_1, file_ext)], target_path=cfg.glob.setup.directory_inbox ) stem_name_2: str = \"pdf_wrong_format_1\" pytest.helpers.copy_files_4_pytest_2_dir(", "------------------------------------------------------------------------- dcr.main([dcr.DCR_ARGV_0, db.cls_run.Run.ACTION_CODE_INBOX]) # ------------------------------------------------------------------------- cfg.glob.logger.info(\"=========> test_run_action_process_inbox_rejected_duplicate <=========\") pytest.helpers.verify_content_of_inboxes( inbox=( [], [ stem_name_1", "french. # ----------------------------------------------------------------------------- def test_run_action_process_inbox_french(fxtr_setup_empty_inbox): \"\"\"Test RUN_ACTION_PROCESS_INBOX - French.\"\"\" cfg.glob.logger.debug(cfg.glob.LOGGER_START) # ------------------------------------------------------------------------- initial_database_data_path", "# ------------------------------------------------------------------------- values_original = pytest.helpers.backup_config_params( cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST, [ (cfg.cls_setup.Setup._DCR_CFG_IGNORE_DUPLICATES, \"true\"), ], ) dcr.main([dcr.DCR_ARGV_0, db.cls_run.Run.ACTION_CODE_INBOX])", "directory '\" + language_directory_name + \"' after processing missing\" ) assert 0 ==", "<=========\") pytest.helpers.verify_content_of_inboxes( inbox=( [], [ stem_name_1 + \".\" + file_ext, ], ), inbox_accepted=(", "utils.get_full_name(cfg.glob.setup.directory_inbox_rejected, stem_name_2 + \".\" + file_ext), ) # ------------------------------------------------------------------------- dcr.main([dcr.DCR_ARGV_0, db.cls_run.Run.ACTION_CODE_INBOX]) # -------------------------------------------------------------------------", "pytest.helpers.copy_files_4_pytest_2_dir( source_files=[ (\"pdf_text_ok\", \"pdf\"), (\"pdf_text_ok_protected\", \"pdf\"), (\"pdf_wrong_format\", \"pdf\"), ], target_path=cfg.glob.setup.directory_inbox, ) # -------------------------------------------------------------------------", "\"pdf\"), (\"pdf_text_ok_protected\", \"pdf\"), ], target_path=cfg.glob.setup.directory_inbox, ) # ------------------------------------------------------------------------- values_original = pytest.helpers.backup_config_params( cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST, [", "# ------------------------------------------------------------------------- cfg.glob.logger.info(\"=========> test_run_action_process_inbox_rejected <=========\") pytest.helpers.verify_content_of_inboxes( inbox=( [], [], ), inbox_accepted=( [], [", "( \"language directory '\" + language_directory_name + \"' after processing missing\" ) assert", "import os.path import pathlib import shutil import cfg.cls_setup import cfg.glob import db.cls_db_core import", "= os.path.dirname(initial_database_data_path) initial_database_data_path_file_name = os.path.basename(initial_database_data_path) initial_database_data_path_file_name_test = \"initial_database_data_french.json\" # copy test file shutil.copy(", "rejected - duplicate. # ----------------------------------------------------------------------------- def test_run_action_process_inbox_rejected_duplicate(fxtr_setup_empty_db_and_inbox): \"\"\"Test RUN_ACTION_PROCESS_INBOX - rejected duplicate.\"\"\" cfg.glob.logger.debug(cfg.glob.LOGGER_START)", ") os.rename( utils.get_full_name(cfg.glob.setup.directory_inbox_accepted, stem_name_1 + \".\" + file_ext), utils.get_full_name(cfg.glob.setup.directory_inbox_accepted, stem_name_2 + \".\" +", "stem_name_2 + \".\" + file_ext), ) # ------------------------------------------------------------------------- dcr.main([dcr.DCR_ARGV_0, db.cls_run.Run.ACTION_CODE_INBOX]) # ------------------------------------------------------------------------- cfg.glob.logger.info(\"=========>", "shutil.copy( utils.get_full_name(pytest.helpers.get_test_inbox_directory_name(), initial_database_data_path_file_name_test), utils.get_full_name(initial_database_data_path_directory, initial_database_data_path_file_name), ) cfg.glob.db_core = db.cls_db_core.DBCore(is_admin=True) cfg.glob.db_core.create_database() # ------------------------------------------------------------------------- #", "inbox_rejected=( [], [ \"pdf_text_ok_protected_2.pdf\", \"pdf_wrong_format_3.pdf\", ], ), ) # ------------------------------------------------------------------------- cfg.glob.logger.debug(cfg.glob.LOGGER_END) # -------------------------------------------------------------------------", "cfg.glob.db_core.create_database() # ------------------------------------------------------------------------- # Copy language subdirectory pytest.helpers.copy_directories_4_pytest_2_dir( source_directories=[\"french\"], target_dir=str(cfg.glob.setup.directory_inbox) ) # -------------------------------------------------------------------------", "subdirectory # TBD # ------------------------------------------------------------------------- # Test not language English in document #", "+ language_directory_name + \"' after processing missing\" ) assert 0 == len(os.listdir(language_directory_name)), (", "# ----------------------------------------------------------------------------- def test_run_action_process_inbox_rejected_duplicate(fxtr_setup_empty_db_and_inbox): \"\"\"Test RUN_ACTION_PROCESS_INBOX - rejected duplicate.\"\"\" cfg.glob.logger.debug(cfg.glob.LOGGER_START) # ------------------------------------------------------------------------- stem_name_1:", "disable=W0212 # @pytest.mark.issue # ----------------------------------------------------------------------------- # Test RUN_ACTION_PROCESS_INBOX - accepted - duplicate. #", "file_ext), ], target_path=cfg.glob.setup.directory_inbox, ) stem_name_2: str = \"pdf_text_ok_1\" pytest.helpers.copy_files_4_pytest_2_dir( source_files=[(stem_name_1, file_ext)], target_path=cfg.glob.setup.directory_inbox_accepted )", "\"docx_french_ok_1.docx\", \"pdf_french_ok_2.jpg\", \"pdf_french_ok_3.pdf\", \"pdf_french_scanned_4.pdf\", ], ), ) # ------------------------------------------------------------------------- base_directory = str(cfg.glob.setup.directory_inbox) language_directory_name", "import pathlib import shutil import cfg.cls_setup import cfg.glob import db.cls_db_core import db.cls_run import", "+ \" files still found after processing\" ) # ------------------------------------------------------------------------- # Check empty", "\"xxx\"), ], target_path=cfg.glob.setup.directory_inbox, ) # ------------------------------------------------------------------------- values_original = pytest.helpers.backup_config_params( cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST, [ (cfg.cls_setup.Setup._DCR_CFG_IGNORE_DUPLICATES, \"false\"),", "values_original, ) # ------------------------------------------------------------------------- cfg.glob.logger.info(\"=========> test_run_action_process_inbox_french <=========\") pytest.helpers.verify_content_of_inboxes( inbox=( [\"french\"], [], ), inbox_accepted=(", "\"base directory '\" + base_directory + \"' after processing missing\" ) assert os.path.isdir(utils.get_os_independent_name(language_directory_name)),", "test_run_action_process_inbox_rejected_duplicate <=========\") pytest.helpers.verify_content_of_inboxes( inbox=( [], [ stem_name_1 + \".\" + file_ext, ], ),", ") # ------------------------------------------------------------------------- cfg.glob.logger.info(\"=========> test_run_action_process_inbox_ignore_duplicates <=========\") pytest.helpers.verify_content_of_inboxes( inbox_accepted=( [], [ \"pdf_text_ok_1.pdf\", \"pdf_text_ok_protected_2.pdf\", ],", "# ----------------------------------------------------------------------------- # pylint: disable=W0212 # @pytest.mark.issue # ----------------------------------------------------------------------------- # Test RUN_ACTION_PROCESS_INBOX -", "pytest.helpers.copy_directories_4_pytest_2_dir( source_directories=[\"french\"], target_dir=str(cfg.glob.setup.directory_inbox) ) # ------------------------------------------------------------------------- values_original = pytest.helpers.backup_config_params( cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST, [ (cfg.cls_setup.Setup._DCR_CFG_VERBOSE, \"false\"),", "[], ), inbox_accepted=( [], [], ), inbox_rejected=( [], [ \"unknown_file_extension_1.xxx\", \"unknown_file_extension_protected_2.xxx\", ], ),", "# ------------------------------------------------------------------------- dcr.main([dcr.DCR_ARGV_0, db.cls_run.Run.ACTION_CODE_INBOX]) # ------------------------------------------------------------------------- cfg.glob.logger.info(\"=========> test_run_action_process_inbox_accepted_duplicate <=========\") pytest.helpers.verify_content_of_inboxes( inbox=( [], [", "cfg.cls_setup import cfg.glob import db.cls_db_core import db.cls_run import pytest import utils import dcr", "duplicate. # ----------------------------------------------------------------------------- def test_run_action_process_inbox_accepted_duplicate(fxtr_setup_empty_db_and_inbox): \"\"\"Test RUN_ACTION_PROCESS_INBOX - accepted duplicate.\"\"\" cfg.glob.logger.debug(cfg.glob.LOGGER_START) # -------------------------------------------------------------------------", "cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST, values_original, ) # ------------------------------------------------------------------------- cfg.glob.logger.info(\"=========> test_run_action_process_inbox_rejected <=========\") pytest.helpers.verify_content_of_inboxes( inbox=( [], [], ),", "source_files=[(stem_name_1, file_ext)], target_path=cfg.glob.setup.directory_inbox ) stem_name_2: str = \"pdf_wrong_format_1\" pytest.helpers.copy_files_4_pytest_2_dir( source_files=[(stem_name_1, file_ext)], target_path=cfg.glob.setup.directory_inbox_rejected )", "------------------------------------------------------------------------- cfg.glob.logger.debug(cfg.glob.LOGGER_END) # ----------------------------------------------------------------------------- # Test RUN_ACTION_PROCESS_INBOX - rejected - duplicate. # -----------------------------------------------------------------------------", "RUN_ACTION_PROCESS_INBOX - rejected - 901. # ----------------------------------------------------------------------------- def test_run_action_process_inbox_rejected_901(fxtr_rmdir_opt, fxtr_setup_empty_db_and_inbox): \"\"\"Test RUN_ACTION_PROCESS_INBOX -", ") # ------------------------------------------------------------------------- dcr.main([dcr.DCR_ARGV_0, db.cls_run.Run.ACTION_CODE_INBOX]) # ------------------------------------------------------------------------- cfg.glob.logger.info(\"=========> test_run_action_process_inbox_accepted_duplicate <=========\") pytest.helpers.verify_content_of_inboxes( inbox=( [],", ") assert 0 == len(os.listdir(language_directory_name)), ( str(len(os.listdir(language_directory_name))) + \" files still found after", "pytest import utils import dcr # ----------------------------------------------------------------------------- # Constants & Globals. # -----------------------------------------------------------------------------", "), ) # ------------------------------------------------------------------------- cfg.glob.logger.debug(cfg.glob.LOGGER_END) # ----------------------------------------------------------------------------- # Test RUN_ACTION_PROCESS_INBOX - rejected. #", "----------------------------------------------------------------------------- # Test RUN_ACTION_PROCESS_INBOX - rejected - duplicate. # ----------------------------------------------------------------------------- def test_run_action_process_inbox_rejected_duplicate(fxtr_setup_empty_db_and_inbox): \"\"\"Test", "cfg.glob.logger.info(\"=========> test_run_action_process_inbox_rejected_duplicate <=========\") pytest.helpers.verify_content_of_inboxes( inbox=( [], [ stem_name_1 + \".\" + file_ext, ],", "pytest.helpers.verify_content_of_inboxes( inbox=( [], [ stem_name_1 + \".\" + file_ext, ], ), inbox_accepted=( [],", "# @pytest.mark.issue # ----------------------------------------------------------------------------- # Test RUN_ACTION_PROCESS_INBOX - accepted - duplicate. # -----------------------------------------------------------------------------", "# ------------------------------------------------------------------------- # Copy language subdirectory pytest.helpers.copy_directories_4_pytest_2_dir( source_directories=[\"french\"], target_dir=str(cfg.glob.setup.directory_inbox) ) # ------------------------------------------------------------------------- values_original", "pytest.helpers.verify_content_of_inboxes( inbox=( [\"french\"], [], ), inbox_accepted=( [], [ \"docx_french_ok_1.docx\", \"pdf_french_ok_2.jpg\", \"pdf_french_ok_3.pdf\", \"pdf_french_scanned_4.pdf\", ],", "------------------------------------------------------------------------- # Test not language English in document # TBD # ------------------------------------------------------------------------- cfg.glob.logger.debug(cfg.glob.LOGGER_END)", "inbox=( [], [ stem_name_1 + \".\" + file_ext, ], ), inbox_accepted=( [], [", "+ file_ext, ], ), inbox_accepted=( [], [ stem_name_2 + \".\" + file_ext, ],", "pytest.helpers.backup_config_params( cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST, [ (cfg.cls_setup.Setup._DCR_CFG_IGNORE_DUPLICATES, \"false\"), ], ) dcr.main([dcr.DCR_ARGV_0, db.cls_run.Run.ACTION_CODE_INBOX]) pytest.helpers.restore_config_params( cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST, values_original, )", "<=========\") pytest.helpers.verify_content_of_inboxes( inbox=( [], [ stem_name_1 + \".\" + file_ext, ], ), inbox_rejected=(", "- rejected - 901.\"\"\" cfg.glob.logger.debug(cfg.glob.LOGGER_START) # ------------------------------------------------------------------------- fxtr_rmdir_opt(cfg.glob.setup.directory_inbox_accepted) fxtr_rmdir_opt(cfg.glob.setup.directory_inbox_rejected) pytest.helpers.copy_files_4_pytest_2_dir( source_files=[ (\"unknown_file_extension\", \"xxx\"),", "target_path=cfg.glob.setup.directory_inbox ) stem_name_2: str = \"pdf_wrong_format_1\" pytest.helpers.copy_files_4_pytest_2_dir( source_files=[(stem_name_1, file_ext)], target_path=cfg.glob.setup.directory_inbox_rejected ) os.rename( utils.get_full_name(cfg.glob.setup.directory_inbox_rejected,", "accepted duplicate.\"\"\" cfg.glob.logger.debug(cfg.glob.LOGGER_START) # ------------------------------------------------------------------------- stem_name_1: str = \"pdf_text_ok\" file_ext: str = \"pdf\"", "[ (cfg.cls_setup.Setup._DCR_CFG_VERBOSE, \"false\"), ], ) dcr.main([dcr.DCR_ARGV_0, db.cls_run.Run.ACTION_CODE_INBOX]) pytest.helpers.restore_config_params( cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST, values_original, ) # -------------------------------------------------------------------------", "os.path.basename(initial_database_data_path) initial_database_data_path_file_name_test = \"initial_database_data_french.json\" # copy test file shutil.copy( utils.get_full_name(pytest.helpers.get_test_inbox_directory_name(), initial_database_data_path_file_name_test), utils.get_full_name(initial_database_data_path_directory, initial_database_data_path_file_name),", "----------------------------------------------------------------------------- # Test RUN_ACTION_PROCESS_INBOX - french. # ----------------------------------------------------------------------------- def test_run_action_process_inbox_french(fxtr_setup_empty_inbox): \"\"\"Test RUN_ACTION_PROCESS_INBOX -", "after processing\" ) # ------------------------------------------------------------------------- # Check empty language subdirectory # TBD #", "Module pp.inbox.\"\"\" import os.path import pathlib import shutil import cfg.cls_setup import cfg.glob import", "\"pdf_text_ok_protected_2.pdf\", \"pdf_wrong_format_3.pdf\", ], ), ) # ------------------------------------------------------------------------- cfg.glob.logger.debug(cfg.glob.LOGGER_END) # ------------------------------------------------------------------------- cfg.glob.logger.debug(cfg.glob.LOGGER_END) # -----------------------------------------------------------------------------", "\"false\"), ], ) dcr.main([dcr.DCR_ARGV_0, db.cls_run.Run.ACTION_CODE_INBOX]) pytest.helpers.restore_config_params( cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST, values_original, ) # ------------------------------------------------------------------------- cfg.glob.logger.info(\"=========> test_run_action_process_inbox_french", "test_run_action_process_inbox_rejected <=========\") pytest.helpers.verify_content_of_inboxes( inbox=( [], [], ), inbox_accepted=( [], [], ), inbox_rejected=( [],", "stem_name_1: str = \"pdf_wrong_format\" file_ext: str = \"pdf\" pytest.helpers.copy_files_4_pytest_2_dir( source_files=[(stem_name_1, file_ext)], target_path=cfg.glob.setup.directory_inbox )", "# ------------------------------------------------------------------------- cfg.glob.logger.debug(cfg.glob.LOGGER_END) # ------------------------------------------------------------------------- cfg.glob.logger.debug(cfg.glob.LOGGER_END) # ----------------------------------------------------------------------------- # Test RUN_ACTION_PROCESS_INBOX - rejected", ") # ------------------------------------------------------------------------- values_original = pytest.helpers.backup_config_params( cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST, [ (cfg.cls_setup.Setup._DCR_CFG_IGNORE_DUPLICATES, \"false\"), ], ) dcr.main([dcr.DCR_ARGV_0,", ") # ------------------------------------------------------------------------- cfg.glob.logger.debug(cfg.glob.LOGGER_END) # ----------------------------------------------------------------------------- # Test RUN_ACTION_PROCESS_INBOX - rejected - 901.", "cfg.glob.logger.debug(cfg.glob.LOGGER_START) # ------------------------------------------------------------------------- stem_name_1: str = \"pdf_text_ok\" file_ext: str = \"pdf\" pytest.helpers.copy_files_4_pytest_2_dir( source_files=[", "subdirectory pytest.helpers.copy_directories_4_pytest_2_dir( source_directories=[\"french\"], target_dir=str(cfg.glob.setup.directory_inbox) ) # ------------------------------------------------------------------------- values_original = pytest.helpers.backup_config_params( cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST, [ (cfg.cls_setup.Setup._DCR_CFG_VERBOSE,", "# ------------------------------------------------------------------------- cfg.glob.logger.debug(cfg.glob.LOGGER_END) # ----------------------------------------------------------------------------- # Test RUN_ACTION_PROCESS_INBOX - ignore duplicates. # -----------------------------------------------------------------------------", "- rejected.\"\"\" cfg.glob.logger.debug(cfg.glob.LOGGER_START) # ------------------------------------------------------------------------- fxtr_rmdir_opt(cfg.glob.setup.directory_inbox_accepted) fxtr_rmdir_opt(cfg.glob.setup.directory_inbox_rejected) pytest.helpers.copy_files_4_pytest_2_dir( source_files=[ (\"pdf_text_ok\", \"pdf\"), (\"pdf_text_ok_protected\", \"pdf\"),", ") stem_name_2: str = \"pdf_text_ok_1\" pytest.helpers.copy_files_4_pytest_2_dir( source_files=[(stem_name_1, file_ext)], target_path=cfg.glob.setup.directory_inbox_accepted ) os.rename( utils.get_full_name(cfg.glob.setup.directory_inbox_accepted, stem_name_1", "\"pdf_text_ok_1.pdf\", \"pdf_text_ok_protected_2.pdf\", ], ), ) # ------------------------------------------------------------------------- cfg.glob.logger.debug(cfg.glob.LOGGER_END) # ----------------------------------------------------------------------------- # Test RUN_ACTION_PROCESS_INBOX", "dcr.main([dcr.DCR_ARGV_0, db.cls_run.Run.ACTION_CODE_INBOX]) # ------------------------------------------------------------------------- cfg.glob.logger.info(\"=========> test_run_action_process_inbox_rejected_duplicate <=========\") pytest.helpers.verify_content_of_inboxes( inbox=( [], [ stem_name_1 +", "\"pdf_french_scanned_4.pdf\", ], ), ) # ------------------------------------------------------------------------- base_directory = str(cfg.glob.setup.directory_inbox) language_directory_name = str(utils.get_full_name(base_directory, pathlib.Path(\"french\")))", "RUN_ACTION_PROCESS_INBOX - rejected. # ----------------------------------------------------------------------------- def test_run_action_process_inbox_rejected(fxtr_rmdir_opt, fxtr_setup_empty_db_and_inbox): \"\"\"Test RUN_ACTION_PROCESS_INBOX - rejected.\"\"\" cfg.glob.logger.debug(cfg.glob.LOGGER_START)", "import cfg.cls_setup import cfg.glob import db.cls_db_core import db.cls_run import pytest import utils import", "----------------------------------------------------------------------------- # Test RUN_ACTION_PROCESS_INBOX - rejected. # ----------------------------------------------------------------------------- def test_run_action_process_inbox_rejected(fxtr_rmdir_opt, fxtr_setup_empty_db_and_inbox): \"\"\"Test RUN_ACTION_PROCESS_INBOX", "- 901. # ----------------------------------------------------------------------------- def test_run_action_process_inbox_rejected_901(fxtr_rmdir_opt, fxtr_setup_empty_db_and_inbox): \"\"\"Test RUN_ACTION_PROCESS_INBOX - rejected - 901.\"\"\"", "# ----------------------------------------------------------------------------- def test_run_action_process_inbox_accepted_duplicate(fxtr_setup_empty_db_and_inbox): \"\"\"Test RUN_ACTION_PROCESS_INBOX - accepted duplicate.\"\"\" cfg.glob.logger.debug(cfg.glob.LOGGER_START) # ------------------------------------------------------------------------- stem_name_1:", "Test RUN_ACTION_PROCESS_INBOX - rejected - 901. # ----------------------------------------------------------------------------- def test_run_action_process_inbox_rejected_901(fxtr_rmdir_opt, fxtr_setup_empty_db_and_inbox): \"\"\"Test RUN_ACTION_PROCESS_INBOX", "# Test RUN_ACTION_PROCESS_INBOX - rejected - duplicate. # ----------------------------------------------------------------------------- def test_run_action_process_inbox_rejected_duplicate(fxtr_setup_empty_db_and_inbox): \"\"\"Test RUN_ACTION_PROCESS_INBOX", "901.\"\"\" cfg.glob.logger.debug(cfg.glob.LOGGER_START) # ------------------------------------------------------------------------- fxtr_rmdir_opt(cfg.glob.setup.directory_inbox_accepted) fxtr_rmdir_opt(cfg.glob.setup.directory_inbox_rejected) pytest.helpers.copy_files_4_pytest_2_dir( source_files=[ (\"unknown_file_extension\", \"xxx\"), (\"unknown_file_extension_protected\", \"xxx\"), ],", "rejected.\"\"\" cfg.glob.logger.debug(cfg.glob.LOGGER_START) # ------------------------------------------------------------------------- fxtr_rmdir_opt(cfg.glob.setup.directory_inbox_accepted) fxtr_rmdir_opt(cfg.glob.setup.directory_inbox_rejected) pytest.helpers.copy_files_4_pytest_2_dir( source_files=[ (\"pdf_text_ok\", \"pdf\"), (\"pdf_text_ok_protected\", \"pdf\"), (\"pdf_wrong_format\",", "------------------------------------------------------------------------- cfg.glob.logger.debug(cfg.glob.LOGGER_END) # ----------------------------------------------------------------------------- # Test RUN_ACTION_PROCESS_INBOX - ignore duplicates. # ----------------------------------------------------------------------------- def", "----------------------------------------------------------------------------- # Test RUN_ACTION_PROCESS_INBOX - ignore duplicates. # ----------------------------------------------------------------------------- def test_run_action_process_inbox_ignore_duplicates(fxtr_setup_empty_db_and_inbox): \"\"\"Test RUN_ACTION_PROCESS_INBOX", "# ------------------------------------------------------------------------- fxtr_rmdir_opt(cfg.glob.setup.directory_inbox_accepted) fxtr_rmdir_opt(cfg.glob.setup.directory_inbox_rejected) pytest.helpers.copy_files_4_pytest_2_dir( source_files=[ (\"pdf_text_ok\", \"pdf\"), (\"pdf_text_ok_protected\", \"pdf\"), (\"pdf_wrong_format\", \"pdf\"), ],", "db.cls_run.Run.ACTION_CODE_INBOX]) pytest.helpers.restore_config_params( cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST, values_original, ) # ------------------------------------------------------------------------- cfg.glob.logger.info(\"=========> test_run_action_process_inbox_ignore_duplicates <=========\") pytest.helpers.verify_content_of_inboxes( inbox_accepted=( [],", "(cfg.cls_setup.Setup._DCR_CFG_IGNORE_DUPLICATES, \"false\"), ], ) dcr.main([dcr.DCR_ARGV_0, db.cls_run.Run.ACTION_CODE_INBOX]) pytest.helpers.restore_config_params( cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST, values_original, ) # ------------------------------------------------------------------------- cfg.glob.logger.info(\"=========>", "cfg.glob.logger.debug(cfg.glob.LOGGER_START) # ------------------------------------------------------------------------- initial_database_data_path = pathlib.Path(cfg.glob.setup.initial_database_data) initial_database_data_path_directory = os.path.dirname(initial_database_data_path) initial_database_data_path_file_name = os.path.basename(initial_database_data_path) initial_database_data_path_file_name_test", "Check empty language subdirectory # TBD # ------------------------------------------------------------------------- # Test not language English", "[ stem_name_2 + \".\" + file_ext, ], ), ) # ------------------------------------------------------------------------- cfg.glob.logger.debug(cfg.glob.LOGGER_END) #", "inbox_accepted=( [], [ \"docx_french_ok_1.docx\", \"pdf_french_ok_2.jpg\", \"pdf_french_ok_3.pdf\", \"pdf_french_scanned_4.pdf\", ], ), ) # ------------------------------------------------------------------------- base_directory", "test_run_action_process_inbox_ignore_duplicates <=========\") pytest.helpers.verify_content_of_inboxes( inbox_accepted=( [], [ \"pdf_text_ok_1.pdf\", \"pdf_text_ok_protected_2.pdf\", ], ), ) # -------------------------------------------------------------------------", "missing\" ) assert 0 == len(os.listdir(language_directory_name)), ( str(len(os.listdir(language_directory_name))) + \" files still found", "import shutil import cfg.cls_setup import cfg.glob import db.cls_db_core import db.cls_run import pytest import", "[ \"pdf_text_ok_protected_2.pdf\", \"pdf_wrong_format_3.pdf\", ], ), ) # ------------------------------------------------------------------------- cfg.glob.logger.debug(cfg.glob.LOGGER_END) # ------------------------------------------------------------------------- cfg.glob.logger.debug(cfg.glob.LOGGER_END) #", "# Test not language English in document # TBD # ------------------------------------------------------------------------- cfg.glob.logger.debug(cfg.glob.LOGGER_END) #", "stem_name_1 + \".\" + file_ext), utils.get_full_name(cfg.glob.setup.directory_inbox_rejected, stem_name_2 + \".\" + file_ext), ) #", "Globals. # ----------------------------------------------------------------------------- # pylint: disable=W0212 # @pytest.mark.issue # ----------------------------------------------------------------------------- # Test RUN_ACTION_PROCESS_INBOX", "cfg.glob.logger.debug(cfg.glob.LOGGER_END) # ----------------------------------------------------------------------------- # Test RUN_ACTION_PROCESS_INBOX - ignore duplicates. # ----------------------------------------------------------------------------- def test_run_action_process_inbox_ignore_duplicates(fxtr_setup_empty_db_and_inbox):", "rejected - 901.\"\"\" cfg.glob.logger.debug(cfg.glob.LOGGER_START) # ------------------------------------------------------------------------- fxtr_rmdir_opt(cfg.glob.setup.directory_inbox_accepted) fxtr_rmdir_opt(cfg.glob.setup.directory_inbox_rejected) pytest.helpers.copy_files_4_pytest_2_dir( source_files=[ (\"unknown_file_extension\", \"xxx\"), (\"unknown_file_extension_protected\",", "# Test RUN_ACTION_PROCESS_INBOX - ignore duplicates. # ----------------------------------------------------------------------------- def test_run_action_process_inbox_ignore_duplicates(fxtr_setup_empty_db_and_inbox): \"\"\"Test RUN_ACTION_PROCESS_INBOX -", "initial_database_data_path_file_name_test = \"initial_database_data_french.json\" # copy test file shutil.copy( utils.get_full_name(pytest.helpers.get_test_inbox_directory_name(), initial_database_data_path_file_name_test), utils.get_full_name(initial_database_data_path_directory, initial_database_data_path_file_name), )", "# ------------------------------------------------------------------------- stem_name_1: str = \"pdf_text_ok\" file_ext: str = \"pdf\" pytest.helpers.copy_files_4_pytest_2_dir( source_files=[ (stem_name_1,", "\"pdf_text_ok_1.pdf\", ], ), inbox_rejected=( [], [ \"pdf_text_ok_protected_2.pdf\", \"pdf_wrong_format_3.pdf\", ], ), ) # -------------------------------------------------------------------------", "# ----------------------------------------------------------------------------- def test_run_action_process_inbox_rejected(fxtr_rmdir_opt, fxtr_setup_empty_db_and_inbox): \"\"\"Test RUN_ACTION_PROCESS_INBOX - rejected.\"\"\" cfg.glob.logger.debug(cfg.glob.LOGGER_START) # ------------------------------------------------------------------------- fxtr_rmdir_opt(cfg.glob.setup.directory_inbox_accepted)", "----------------------------------------------------------------------------- # pylint: disable=W0212 # @pytest.mark.issue # ----------------------------------------------------------------------------- # Test RUN_ACTION_PROCESS_INBOX - accepted", "cfg.glob.logger.info(\"=========> test_run_action_process_inbox_ignore_duplicates <=========\") pytest.helpers.verify_content_of_inboxes( inbox_accepted=( [], [ \"pdf_text_ok_1.pdf\", \"pdf_text_ok_protected_2.pdf\", ], ), ) #", "- rejected. # ----------------------------------------------------------------------------- def test_run_action_process_inbox_rejected(fxtr_rmdir_opt, fxtr_setup_empty_db_and_inbox): \"\"\"Test RUN_ACTION_PROCESS_INBOX - rejected.\"\"\" cfg.glob.logger.debug(cfg.glob.LOGGER_START) #", "\"' after processing missing\" ) assert 0 == len(os.listdir(language_directory_name)), ( str(len(os.listdir(language_directory_name))) + \"", "cfg.glob.logger.debug(cfg.glob.LOGGER_START) # ------------------------------------------------------------------------- pytest.helpers.copy_files_4_pytest_2_dir( source_files=[ (\"pdf_text_ok\", \"pdf\"), (\"pdf_text_ok_protected\", \"pdf\"), ], target_path=cfg.glob.setup.directory_inbox, ) #", "'\" + base_directory + \"' after processing missing\" ) assert os.path.isdir(utils.get_os_independent_name(language_directory_name)), ( \"language", "target_path=cfg.glob.setup.directory_inbox, ) # ------------------------------------------------------------------------- values_original = pytest.helpers.backup_config_params( cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST, [ (cfg.cls_setup.Setup._DCR_CFG_IGNORE_DUPLICATES, \"true\"), ], )", "os.rename( utils.get_full_name(cfg.glob.setup.directory_inbox_rejected, stem_name_1 + \".\" + file_ext), utils.get_full_name(cfg.glob.setup.directory_inbox_rejected, stem_name_2 + \".\" + file_ext),", ") dcr.main([dcr.DCR_ARGV_0, db.cls_run.Run.ACTION_CODE_INBOX]) pytest.helpers.restore_config_params( cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST, values_original, ) # ------------------------------------------------------------------------- cfg.glob.logger.info(\"=========> test_run_action_process_inbox_rejected <=========\") pytest.helpers.verify_content_of_inboxes(", "cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST, values_original, ) # ------------------------------------------------------------------------- cfg.glob.logger.info(\"=========> test_run_action_process_inbox_ignore_duplicates <=========\") pytest.helpers.verify_content_of_inboxes( inbox_accepted=( [], [ \"pdf_text_ok_1.pdf\",", ") dcr.main([dcr.DCR_ARGV_0, db.cls_run.Run.ACTION_CODE_INBOX]) pytest.helpers.restore_config_params( cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST, values_original, ) # ------------------------------------------------------------------------- cfg.glob.logger.info(\"=========> test_run_action_process_inbox_french <=========\") pytest.helpers.verify_content_of_inboxes(", "English in document # TBD # ------------------------------------------------------------------------- cfg.glob.logger.debug(cfg.glob.LOGGER_END) # ----------------------------------------------------------------------------- # Test RUN_ACTION_PROCESS_INBOX", "- duplicate. # ----------------------------------------------------------------------------- def test_run_action_process_inbox_rejected_duplicate(fxtr_setup_empty_db_and_inbox): \"\"\"Test RUN_ACTION_PROCESS_INBOX - rejected duplicate.\"\"\" cfg.glob.logger.debug(cfg.glob.LOGGER_START) #", "\".\" + file_ext, ], ), inbox_rejected=( [], [ stem_name_2 + \".\" + file_ext,", "test_run_action_process_inbox_rejected_duplicate(fxtr_setup_empty_db_and_inbox): \"\"\"Test RUN_ACTION_PROCESS_INBOX - rejected duplicate.\"\"\" cfg.glob.logger.debug(cfg.glob.LOGGER_START) # ------------------------------------------------------------------------- stem_name_1: str = \"pdf_wrong_format\"", "<=========\") pytest.helpers.verify_content_of_inboxes( inbox=( [\"french\"], [], ), inbox_accepted=( [], [ \"docx_french_ok_1.docx\", \"pdf_french_ok_2.jpg\", \"pdf_french_ok_3.pdf\", \"pdf_french_scanned_4.pdf\",", "# ----------------------------------------------------------------------------- def test_run_action_process_inbox_ignore_duplicates(fxtr_setup_empty_db_and_inbox): \"\"\"Test RUN_ACTION_PROCESS_INBOX - ignore duplicates.\"\"\" cfg.glob.logger.debug(cfg.glob.LOGGER_START) # ------------------------------------------------------------------------- pytest.helpers.copy_files_4_pytest_2_dir(", "pytest.helpers.copy_files_4_pytest_2_dir( source_files=[ (stem_name_1, file_ext), ], target_path=cfg.glob.setup.directory_inbox, ) stem_name_2: str = \"pdf_text_ok_1\" pytest.helpers.copy_files_4_pytest_2_dir( source_files=[(stem_name_1,", "------------------------------------------------------------------------- cfg.glob.logger.debug(cfg.glob.LOGGER_END) # ----------------------------------------------------------------------------- # Test RUN_ACTION_PROCESS_INBOX - french. # ----------------------------------------------------------------------------- def test_run_action_process_inbox_french(fxtr_setup_empty_inbox):", "source_files=[ (\"pdf_text_ok\", \"pdf\"), (\"pdf_text_ok_protected\", \"pdf\"), (\"pdf_wrong_format\", \"pdf\"), ], target_path=cfg.glob.setup.directory_inbox, ) # ------------------------------------------------------------------------- values_original", "file_ext, ], ), ) # ------------------------------------------------------------------------- cfg.glob.logger.debug(cfg.glob.LOGGER_END) # ----------------------------------------------------------------------------- # Test RUN_ACTION_PROCESS_INBOX -", "----------------------------------------------------------------------------- def test_run_action_process_inbox_rejected(fxtr_rmdir_opt, fxtr_setup_empty_db_and_inbox): \"\"\"Test RUN_ACTION_PROCESS_INBOX - rejected.\"\"\" cfg.glob.logger.debug(cfg.glob.LOGGER_START) # ------------------------------------------------------------------------- fxtr_rmdir_opt(cfg.glob.setup.directory_inbox_accepted) fxtr_rmdir_opt(cfg.glob.setup.directory_inbox_rejected)", "cfg.glob.logger.info(\"=========> test_run_action_process_inbox_rejected <=========\") pytest.helpers.verify_content_of_inboxes( inbox=( [], [], ), inbox_accepted=( [], [], ), inbox_rejected=(", "- rejected - duplicate. # ----------------------------------------------------------------------------- def test_run_action_process_inbox_rejected_duplicate(fxtr_setup_empty_db_and_inbox): \"\"\"Test RUN_ACTION_PROCESS_INBOX - rejected duplicate.\"\"\"", "\"\"\"Test RUN_ACTION_PROCESS_INBOX - accepted duplicate.\"\"\" cfg.glob.logger.debug(cfg.glob.LOGGER_START) # ------------------------------------------------------------------------- stem_name_1: str = \"pdf_text_ok\" file_ext:", "\"pdf_text_ok_protected_2.pdf\", ], ), ) # ------------------------------------------------------------------------- cfg.glob.logger.debug(cfg.glob.LOGGER_END) # ----------------------------------------------------------------------------- # Test RUN_ACTION_PROCESS_INBOX -", "utils.get_full_name(cfg.glob.setup.directory_inbox_rejected, stem_name_1 + \".\" + file_ext), utils.get_full_name(cfg.glob.setup.directory_inbox_rejected, stem_name_2 + \".\" + file_ext), )", "= \"pdf_wrong_format\" file_ext: str = \"pdf\" pytest.helpers.copy_files_4_pytest_2_dir( source_files=[(stem_name_1, file_ext)], target_path=cfg.glob.setup.directory_inbox ) stem_name_2: str", "shutil import cfg.cls_setup import cfg.glob import db.cls_db_core import db.cls_run import pytest import utils", "dcr.main([dcr.DCR_ARGV_0, db.cls_run.Run.ACTION_CODE_INBOX]) pytest.helpers.restore_config_params( cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST, values_original, ) # ------------------------------------------------------------------------- cfg.glob.logger.info(\"=========> test_run_action_process_inbox_french <=========\") pytest.helpers.verify_content_of_inboxes( inbox=(", "), ) # ------------------------------------------------------------------------- cfg.glob.logger.debug(cfg.glob.LOGGER_END) # ----------------------------------------------------------------------------- # Test RUN_ACTION_PROCESS_INBOX - french. #", "------------------------------------------------------------------------- cfg.glob.logger.info(\"=========> test_run_action_process_inbox_ignore_duplicates <=========\") pytest.helpers.verify_content_of_inboxes( inbox_accepted=( [], [ \"pdf_text_ok_1.pdf\", \"pdf_text_ok_protected_2.pdf\", ], ), )", "cfg.glob.logger.debug(cfg.glob.LOGGER_END) # ----------------------------------------------------------------------------- # Test RUN_ACTION_PROCESS_INBOX - rejected - duplicate. # ----------------------------------------------------------------------------- def", "(cfg.cls_setup.Setup._DCR_CFG_IGNORE_DUPLICATES, \"true\"), ], ) dcr.main([dcr.DCR_ARGV_0, db.cls_run.Run.ACTION_CODE_INBOX]) pytest.helpers.restore_config_params( cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST, values_original, ) # ------------------------------------------------------------------------- cfg.glob.logger.info(\"=========>", "cfg.glob.logger.debug(cfg.glob.LOGGER_END) # ----------------------------------------------------------------------------- # Test RUN_ACTION_PROCESS_INBOX - rejected. # ----------------------------------------------------------------------------- def test_run_action_process_inbox_rejected(fxtr_rmdir_opt, fxtr_setup_empty_db_and_inbox):", "file_ext, ], ), inbox_accepted=( [], [ stem_name_2 + \".\" + file_ext, ], ),", "# ----------------------------------------------------------------------------- # Test RUN_ACTION_PROCESS_INBOX - accepted - duplicate. # ----------------------------------------------------------------------------- def test_run_action_process_inbox_accepted_duplicate(fxtr_setup_empty_db_and_inbox):", "cfg.glob.logger.debug(cfg.glob.LOGGER_END) # ----------------------------------------------------------------------------- # Test RUN_ACTION_PROCESS_INBOX - rejected - 901. # ----------------------------------------------------------------------------- def", "rejected duplicate.\"\"\" cfg.glob.logger.debug(cfg.glob.LOGGER_START) # ------------------------------------------------------------------------- stem_name_1: str = \"pdf_wrong_format\" file_ext: str = \"pdf\"", "( \"base directory '\" + base_directory + \"' after processing missing\" ) assert", "str(len(os.listdir(language_directory_name))) + \" files still found after processing\" ) # ------------------------------------------------------------------------- # Check", "Test RUN_ACTION_PROCESS_INBOX - ignore duplicates. # ----------------------------------------------------------------------------- def test_run_action_process_inbox_ignore_duplicates(fxtr_setup_empty_db_and_inbox): \"\"\"Test RUN_ACTION_PROCESS_INBOX - ignore", "source_files=[(stem_name_1, file_ext)], target_path=cfg.glob.setup.directory_inbox_rejected ) os.rename( utils.get_full_name(cfg.glob.setup.directory_inbox_rejected, stem_name_1 + \".\" + file_ext), utils.get_full_name(cfg.glob.setup.directory_inbox_rejected, stem_name_2", "+ \"' after processing missing\" ) assert 0 == len(os.listdir(language_directory_name)), ( str(len(os.listdir(language_directory_name))) +", "source_directories=[\"french\"], target_dir=str(cfg.glob.setup.directory_inbox) ) # ------------------------------------------------------------------------- values_original = pytest.helpers.backup_config_params( cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST, [ (cfg.cls_setup.Setup._DCR_CFG_VERBOSE, \"false\"), ],", "# ------------------------------------------------------------------------- cfg.glob.logger.debug(cfg.glob.LOGGER_END) # ----------------------------------------------------------------------------- # Test RUN_ACTION_PROCESS_INBOX - french. # ----------------------------------------------------------------------------- def", "# ------------------------------------------------------------------------- cfg.glob.logger.info(\"=========> test_run_action_process_inbox_ignore_duplicates <=========\") pytest.helpers.verify_content_of_inboxes( inbox_accepted=( [], [ \"pdf_text_ok_1.pdf\", \"pdf_text_ok_protected_2.pdf\", ], ),", "values_original = pytest.helpers.backup_config_params( cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST, [ (cfg.cls_setup.Setup._DCR_CFG_IGNORE_DUPLICATES, \"false\"), ], ) dcr.main([dcr.DCR_ARGV_0, db.cls_run.Run.ACTION_CODE_INBOX]) pytest.helpers.restore_config_params( cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST,", "test_run_action_process_inbox_french <=========\") pytest.helpers.verify_content_of_inboxes( inbox=( [\"french\"], [], ), inbox_accepted=( [], [ \"docx_french_ok_1.docx\", \"pdf_french_ok_2.jpg\", \"pdf_french_ok_3.pdf\",", "processing missing\" ) assert 0 == len(os.listdir(language_directory_name)), ( str(len(os.listdir(language_directory_name))) + \" files still", "cfg.glob.logger.debug(cfg.glob.LOGGER_END) # ------------------------------------------------------------------------- cfg.glob.logger.debug(cfg.glob.LOGGER_END) # ----------------------------------------------------------------------------- # Test RUN_ACTION_PROCESS_INBOX - rejected - duplicate.", "\"pdf\" pytest.helpers.copy_files_4_pytest_2_dir( source_files=[(stem_name_1, file_ext)], target_path=cfg.glob.setup.directory_inbox ) stem_name_2: str = \"pdf_wrong_format_1\" pytest.helpers.copy_files_4_pytest_2_dir( source_files=[(stem_name_1, file_ext)],", "( str(len(os.listdir(language_directory_name))) + \" files still found after processing\" ) # ------------------------------------------------------------------------- #", "initial_database_data_path_file_name), ) cfg.glob.db_core = db.cls_db_core.DBCore(is_admin=True) cfg.glob.db_core.create_database() # ------------------------------------------------------------------------- # Copy language subdirectory pytest.helpers.copy_directories_4_pytest_2_dir(", "----------------------------------------------------------------------------- def test_run_action_process_inbox_ignore_duplicates(fxtr_setup_empty_db_and_inbox): \"\"\"Test RUN_ACTION_PROCESS_INBOX - ignore duplicates.\"\"\" cfg.glob.logger.debug(cfg.glob.LOGGER_START) # ------------------------------------------------------------------------- pytest.helpers.copy_files_4_pytest_2_dir( source_files=[", "------------------------------------------------------------------------- fxtr_rmdir_opt(cfg.glob.setup.directory_inbox_accepted) fxtr_rmdir_opt(cfg.glob.setup.directory_inbox_rejected) pytest.helpers.copy_files_4_pytest_2_dir( source_files=[ (\"unknown_file_extension\", \"xxx\"), (\"unknown_file_extension_protected\", \"xxx\"), ], target_path=cfg.glob.setup.directory_inbox, ) #", "source_files=[ (\"pdf_text_ok\", \"pdf\"), (\"pdf_text_ok_protected\", \"pdf\"), ], target_path=cfg.glob.setup.directory_inbox, ) # ------------------------------------------------------------------------- values_original = pytest.helpers.backup_config_params(", "test_run_action_process_inbox_rejected(fxtr_rmdir_opt, fxtr_setup_empty_db_and_inbox): \"\"\"Test RUN_ACTION_PROCESS_INBOX - rejected.\"\"\" cfg.glob.logger.debug(cfg.glob.LOGGER_START) # ------------------------------------------------------------------------- fxtr_rmdir_opt(cfg.glob.setup.directory_inbox_accepted) fxtr_rmdir_opt(cfg.glob.setup.directory_inbox_rejected) pytest.helpers.copy_files_4_pytest_2_dir( source_files=[", "cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST, [ (cfg.cls_setup.Setup._DCR_CFG_IGNORE_DUPLICATES, \"true\"), ], ) dcr.main([dcr.DCR_ARGV_0, db.cls_run.Run.ACTION_CODE_INBOX]) pytest.helpers.restore_config_params( cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST, values_original, ) #", "+ file_ext), ) # ------------------------------------------------------------------------- dcr.main([dcr.DCR_ARGV_0, db.cls_run.Run.ACTION_CODE_INBOX]) # ------------------------------------------------------------------------- cfg.glob.logger.info(\"=========> test_run_action_process_inbox_rejected_duplicate <=========\") pytest.helpers.verify_content_of_inboxes(", "fxtr_setup_empty_db_and_inbox): \"\"\"Test RUN_ACTION_PROCESS_INBOX - rejected - 901.\"\"\" cfg.glob.logger.debug(cfg.glob.LOGGER_START) # ------------------------------------------------------------------------- fxtr_rmdir_opt(cfg.glob.setup.directory_inbox_accepted) fxtr_rmdir_opt(cfg.glob.setup.directory_inbox_rejected) pytest.helpers.copy_files_4_pytest_2_dir(", ") # ------------------------------------------------------------------------- cfg.glob.logger.debug(cfg.glob.LOGGER_END) # ----------------------------------------------------------------------------- # Test RUN_ACTION_PROCESS_INBOX - french. # -----------------------------------------------------------------------------", "\"initial_database_data_french.json\" # copy test file shutil.copy( utils.get_full_name(pytest.helpers.get_test_inbox_directory_name(), initial_database_data_path_file_name_test), utils.get_full_name(initial_database_data_path_directory, initial_database_data_path_file_name), ) cfg.glob.db_core =", "# ----------------------------------------------------------------------------- # Test RUN_ACTION_PROCESS_INBOX - french. # ----------------------------------------------------------------------------- def test_run_action_process_inbox_french(fxtr_setup_empty_inbox): \"\"\"Test RUN_ACTION_PROCESS_INBOX", "assert os.path.isdir(utils.get_os_independent_name(base_directory)), ( \"base directory '\" + base_directory + \"' after processing missing\"", "pathlib.Path(cfg.glob.setup.initial_database_data) initial_database_data_path_directory = os.path.dirname(initial_database_data_path) initial_database_data_path_file_name = os.path.basename(initial_database_data_path) initial_database_data_path_file_name_test = \"initial_database_data_french.json\" # copy test", "\"true\"), ], ) dcr.main([dcr.DCR_ARGV_0, db.cls_run.Run.ACTION_CODE_INBOX]) pytest.helpers.restore_config_params( cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST, values_original, ) # ------------------------------------------------------------------------- cfg.glob.logger.info(\"=========> test_run_action_process_inbox_ignore_duplicates", ") dcr.main([dcr.DCR_ARGV_0, db.cls_run.Run.ACTION_CODE_INBOX]) pytest.helpers.restore_config_params( cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST, values_original, ) # ------------------------------------------------------------------------- cfg.glob.logger.info(\"=========> test_run_action_process_inbox_ignore_duplicates <=========\") pytest.helpers.verify_content_of_inboxes(", "test_run_action_process_inbox_french(fxtr_setup_empty_inbox): \"\"\"Test RUN_ACTION_PROCESS_INBOX - French.\"\"\" cfg.glob.logger.debug(cfg.glob.LOGGER_START) # ------------------------------------------------------------------------- initial_database_data_path = pathlib.Path(cfg.glob.setup.initial_database_data) initial_database_data_path_directory =", "language English in document # TBD # ------------------------------------------------------------------------- cfg.glob.logger.debug(cfg.glob.LOGGER_END) # ----------------------------------------------------------------------------- # Test", "processing missing\" ) assert os.path.isdir(utils.get_os_independent_name(language_directory_name)), ( \"language directory '\" + language_directory_name + \"'", "fxtr_rmdir_opt(cfg.glob.setup.directory_inbox_rejected) pytest.helpers.copy_files_4_pytest_2_dir( source_files=[ (\"pdf_text_ok\", \"pdf\"), (\"pdf_text_ok_protected\", \"pdf\"), (\"pdf_wrong_format\", \"pdf\"), ], target_path=cfg.glob.setup.directory_inbox, ) #", "], ), ) # ------------------------------------------------------------------------- cfg.glob.logger.debug(cfg.glob.LOGGER_END) # ------------------------------------------------------------------------- cfg.glob.logger.debug(cfg.glob.LOGGER_END) # ----------------------------------------------------------------------------- # Test", "[], ), inbox_accepted=( [], [ \"docx_french_ok_1.docx\", \"pdf_french_ok_2.jpg\", \"pdf_french_ok_3.pdf\", \"pdf_french_scanned_4.pdf\", ], ), ) #", "files still found after processing\" ) # ------------------------------------------------------------------------- # Check empty language subdirectory", "def test_run_action_process_inbox_rejected_duplicate(fxtr_setup_empty_db_and_inbox): \"\"\"Test RUN_ACTION_PROCESS_INBOX - rejected duplicate.\"\"\" cfg.glob.logger.debug(cfg.glob.LOGGER_START) # ------------------------------------------------------------------------- stem_name_1: str =", "RUN_ACTION_PROCESS_INBOX - rejected.\"\"\" cfg.glob.logger.debug(cfg.glob.LOGGER_START) # ------------------------------------------------------------------------- fxtr_rmdir_opt(cfg.glob.setup.directory_inbox_accepted) fxtr_rmdir_opt(cfg.glob.setup.directory_inbox_rejected) pytest.helpers.copy_files_4_pytest_2_dir( source_files=[ (\"pdf_text_ok\", \"pdf\"), (\"pdf_text_ok_protected\",", "# ------------------------------------------------------------------------- dcr.main([dcr.DCR_ARGV_0, db.cls_run.Run.ACTION_CODE_INBOX]) # ------------------------------------------------------------------------- cfg.glob.logger.info(\"=========> test_run_action_process_inbox_rejected_duplicate <=========\") pytest.helpers.verify_content_of_inboxes( inbox=( [], [", "inbox=( [], [], ), inbox_accepted=( [], [], ), inbox_rejected=( [], [ \"unknown_file_extension_1.xxx\", \"unknown_file_extension_protected_2.xxx\",", "file_ext), ) # ------------------------------------------------------------------------- dcr.main([dcr.DCR_ARGV_0, db.cls_run.Run.ACTION_CODE_INBOX]) # ------------------------------------------------------------------------- cfg.glob.logger.info(\"=========> test_run_action_process_inbox_rejected_duplicate <=========\") pytest.helpers.verify_content_of_inboxes( inbox=(", "# ------------------------------------------------------------------------- cfg.glob.logger.debug(cfg.glob.LOGGER_END) # ----------------------------------------------------------------------------- # Test RUN_ACTION_PROCESS_INBOX - rejected - 901. #", "Copy language subdirectory pytest.helpers.copy_directories_4_pytest_2_dir( source_directories=[\"french\"], target_dir=str(cfg.glob.setup.directory_inbox) ) # ------------------------------------------------------------------------- values_original = pytest.helpers.backup_config_params( cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST,", "file_ext: str = \"pdf\" pytest.helpers.copy_files_4_pytest_2_dir( source_files=[(stem_name_1, file_ext)], target_path=cfg.glob.setup.directory_inbox ) stem_name_2: str = \"pdf_wrong_format_1\"", "[], [], ), inbox_accepted=( [], [], ), inbox_rejected=( [], [ \"unknown_file_extension_1.xxx\", \"unknown_file_extension_protected_2.xxx\", ],", "pathlib import shutil import cfg.cls_setup import cfg.glob import db.cls_db_core import db.cls_run import pytest", "os.path.isdir(utils.get_os_independent_name(base_directory)), ( \"base directory '\" + base_directory + \"' after processing missing\" )", "source_files=[ (\"unknown_file_extension\", \"xxx\"), (\"unknown_file_extension_protected\", \"xxx\"), ], target_path=cfg.glob.setup.directory_inbox, ) # ------------------------------------------------------------------------- values_original = pytest.helpers.backup_config_params(", "fxtr_rmdir_opt(cfg.glob.setup.directory_inbox_accepted) fxtr_rmdir_opt(cfg.glob.setup.directory_inbox_rejected) pytest.helpers.copy_files_4_pytest_2_dir( source_files=[ (\"unknown_file_extension\", \"xxx\"), (\"unknown_file_extension_protected\", \"xxx\"), ], target_path=cfg.glob.setup.directory_inbox, ) # -------------------------------------------------------------------------", "initial_database_data_path = pathlib.Path(cfg.glob.setup.initial_database_data) initial_database_data_path_directory = os.path.dirname(initial_database_data_path) initial_database_data_path_file_name = os.path.basename(initial_database_data_path) initial_database_data_path_file_name_test = \"initial_database_data_french.json\" #", "# ------------------------------------------------------------------------- # Test not language English in document # TBD # -------------------------------------------------------------------------", "------------------------------------------------------------------------- dcr.main([dcr.DCR_ARGV_0, db.cls_run.Run.ACTION_CODE_INBOX]) # ------------------------------------------------------------------------- cfg.glob.logger.info(\"=========> test_run_action_process_inbox_accepted_duplicate <=========\") pytest.helpers.verify_content_of_inboxes( inbox=( [], [ stem_name_1", "+ \".\" + file_ext), utils.get_full_name(cfg.glob.setup.directory_inbox_accepted, stem_name_2 + \".\" + file_ext), ) # -------------------------------------------------------------------------", "processing\" ) # ------------------------------------------------------------------------- # Check empty language subdirectory # TBD # -------------------------------------------------------------------------", "stem_name_1 + \".\" + file_ext, ], ), inbox_accepted=( [], [ stem_name_2 + \".\"", "----------------------------------------------------------------------------- # Constants & Globals. # ----------------------------------------------------------------------------- # pylint: disable=W0212 # @pytest.mark.issue #", "import db.cls_run import pytest import utils import dcr # ----------------------------------------------------------------------------- # Constants &", "[\"french\"], [], ), inbox_accepted=( [], [ \"docx_french_ok_1.docx\", \"pdf_french_ok_2.jpg\", \"pdf_french_ok_3.pdf\", \"pdf_french_scanned_4.pdf\", ], ), )", "# ----------------------------------------------------------------------------- # Test RUN_ACTION_PROCESS_INBOX - rejected - 901. # ----------------------------------------------------------------------------- def test_run_action_process_inbox_rejected_901(fxtr_rmdir_opt,", "French.\"\"\" cfg.glob.logger.debug(cfg.glob.LOGGER_START) # ------------------------------------------------------------------------- initial_database_data_path = pathlib.Path(cfg.glob.setup.initial_database_data) initial_database_data_path_directory = os.path.dirname(initial_database_data_path) initial_database_data_path_file_name = os.path.basename(initial_database_data_path)", "----------------------------------------------------------------------------- def test_run_action_process_inbox_accepted_duplicate(fxtr_setup_empty_db_and_inbox): \"\"\"Test RUN_ACTION_PROCESS_INBOX - accepted duplicate.\"\"\" cfg.glob.logger.debug(cfg.glob.LOGGER_START) # ------------------------------------------------------------------------- stem_name_1: str", "in document # TBD # ------------------------------------------------------------------------- cfg.glob.logger.debug(cfg.glob.LOGGER_END) # ----------------------------------------------------------------------------- # Test RUN_ACTION_PROCESS_INBOX -", "RUN_ACTION_PROCESS_INBOX - rejected - 901.\"\"\" cfg.glob.logger.debug(cfg.glob.LOGGER_START) # ------------------------------------------------------------------------- fxtr_rmdir_opt(cfg.glob.setup.directory_inbox_accepted) fxtr_rmdir_opt(cfg.glob.setup.directory_inbox_rejected) pytest.helpers.copy_files_4_pytest_2_dir( source_files=[ (\"unknown_file_extension\",", "), inbox_rejected=( [], [ stem_name_2 + \".\" + file_ext, ], ), ) #", "(\"unknown_file_extension_protected\", \"xxx\"), ], target_path=cfg.glob.setup.directory_inbox, ) # ------------------------------------------------------------------------- values_original = pytest.helpers.backup_config_params( cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST, [ (cfg.cls_setup.Setup._DCR_CFG_IGNORE_DUPLICATES,", "- duplicate. # ----------------------------------------------------------------------------- def test_run_action_process_inbox_accepted_duplicate(fxtr_setup_empty_db_and_inbox): \"\"\"Test RUN_ACTION_PROCESS_INBOX - accepted duplicate.\"\"\" cfg.glob.logger.debug(cfg.glob.LOGGER_START) #", "import pytest import utils import dcr # ----------------------------------------------------------------------------- # Constants & Globals. #", "+ \".\" + file_ext, ], ), inbox_accepted=( [], [ stem_name_2 + \".\" +", "\"pdf_wrong_format_3.pdf\", ], ), ) # ------------------------------------------------------------------------- cfg.glob.logger.debug(cfg.glob.LOGGER_END) # ------------------------------------------------------------------------- cfg.glob.logger.debug(cfg.glob.LOGGER_END) # ----------------------------------------------------------------------------- #", "def test_run_action_process_inbox_french(fxtr_setup_empty_inbox): \"\"\"Test RUN_ACTION_PROCESS_INBOX - French.\"\"\" cfg.glob.logger.debug(cfg.glob.LOGGER_START) # ------------------------------------------------------------------------- initial_database_data_path = pathlib.Path(cfg.glob.setup.initial_database_data) initial_database_data_path_directory", "[ \"docx_french_ok_1.docx\", \"pdf_french_ok_2.jpg\", \"pdf_french_ok_3.pdf\", \"pdf_french_scanned_4.pdf\", ], ), ) # ------------------------------------------------------------------------- base_directory = str(cfg.glob.setup.directory_inbox)", "== len(os.listdir(language_directory_name)), ( str(len(os.listdir(language_directory_name))) + \" files still found after processing\" ) #", "inbox=( [], [ stem_name_1 + \".\" + file_ext, ], ), inbox_rejected=( [], [", "<reponame>KonnexionsGmbH/dcr<gh_stars>1-10 # pylint: disable=unused-argument \"\"\"Testing Module pp.inbox.\"\"\" import os.path import pathlib import shutil", "values_original = pytest.helpers.backup_config_params( cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST, [ (cfg.cls_setup.Setup._DCR_CFG_VERBOSE, \"false\"), ], ) dcr.main([dcr.DCR_ARGV_0, db.cls_run.Run.ACTION_CODE_INBOX]) pytest.helpers.restore_config_params( cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST,", "import dcr # ----------------------------------------------------------------------------- # Constants & Globals. # ----------------------------------------------------------------------------- # pylint: disable=W0212", ") os.rename( utils.get_full_name(cfg.glob.setup.directory_inbox_rejected, stem_name_1 + \".\" + file_ext), utils.get_full_name(cfg.glob.setup.directory_inbox_rejected, stem_name_2 + \".\" +", "language_directory_name + \"' after processing missing\" ) assert 0 == len(os.listdir(language_directory_name)), ( str(len(os.listdir(language_directory_name)))", "inbox_accepted=( [], [ \"pdf_text_ok_1.pdf\", ], ), inbox_rejected=( [], [ \"pdf_text_ok_protected_2.pdf\", \"pdf_wrong_format_3.pdf\", ], ),", "len(os.listdir(language_directory_name)), ( str(len(os.listdir(language_directory_name))) + \" files still found after processing\" ) # -------------------------------------------------------------------------", "- accepted - duplicate. # ----------------------------------------------------------------------------- def test_run_action_process_inbox_accepted_duplicate(fxtr_setup_empty_db_and_inbox): \"\"\"Test RUN_ACTION_PROCESS_INBOX - accepted duplicate.\"\"\"", "Test RUN_ACTION_PROCESS_INBOX - french. # ----------------------------------------------------------------------------- def test_run_action_process_inbox_french(fxtr_setup_empty_inbox): \"\"\"Test RUN_ACTION_PROCESS_INBOX - French.\"\"\" cfg.glob.logger.debug(cfg.glob.LOGGER_START)", "pytest.helpers.copy_files_4_pytest_2_dir( source_files=[(stem_name_1, file_ext)], target_path=cfg.glob.setup.directory_inbox ) stem_name_2: str = \"pdf_wrong_format_1\" pytest.helpers.copy_files_4_pytest_2_dir( source_files=[(stem_name_1, file_ext)], target_path=cfg.glob.setup.directory_inbox_rejected", "# ------------------------------------------------------------------------- cfg.glob.logger.info(\"=========> test_run_action_process_inbox_rejected <=========\") pytest.helpers.verify_content_of_inboxes( inbox=( [], [], ), inbox_accepted=( [], [],", "test_run_action_process_inbox_ignore_duplicates(fxtr_setup_empty_db_and_inbox): \"\"\"Test RUN_ACTION_PROCESS_INBOX - ignore duplicates.\"\"\" cfg.glob.logger.debug(cfg.glob.LOGGER_START) # ------------------------------------------------------------------------- pytest.helpers.copy_files_4_pytest_2_dir( source_files=[ (\"pdf_text_ok\", \"pdf\"),", "cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST, values_original, ) # ------------------------------------------------------------------------- cfg.glob.logger.info(\"=========> test_run_action_process_inbox_french <=========\") pytest.helpers.verify_content_of_inboxes( inbox=( [\"french\"], [], ),", "), ) # ------------------------------------------------------------------------- cfg.glob.logger.debug(cfg.glob.LOGGER_END) # ------------------------------------------------------------------------- cfg.glob.logger.debug(cfg.glob.LOGGER_END) # ----------------------------------------------------------------------------- # Test RUN_ACTION_PROCESS_INBOX", "values_original, ) # ------------------------------------------------------------------------- cfg.glob.logger.info(\"=========> test_run_action_process_inbox_rejected <=========\") pytest.helpers.verify_content_of_inboxes( inbox=( [], [], ), inbox_accepted=(", "- accepted duplicate.\"\"\" cfg.glob.logger.debug(cfg.glob.LOGGER_START) # ------------------------------------------------------------------------- stem_name_1: str = \"pdf_text_ok\" file_ext: str =", "# ------------------------------------------------------------------------- initial_database_data_path = pathlib.Path(cfg.glob.setup.initial_database_data) initial_database_data_path_directory = os.path.dirname(initial_database_data_path) initial_database_data_path_file_name = os.path.basename(initial_database_data_path) initial_database_data_path_file_name_test =", "+ file_ext), ) # ------------------------------------------------------------------------- dcr.main([dcr.DCR_ARGV_0, db.cls_run.Run.ACTION_CODE_INBOX]) # ------------------------------------------------------------------------- cfg.glob.logger.info(\"=========> test_run_action_process_inbox_accepted_duplicate <=========\") pytest.helpers.verify_content_of_inboxes(", ") assert os.path.isdir(utils.get_os_independent_name(language_directory_name)), ( \"language directory '\" + language_directory_name + \"' after processing", "[], [ \"pdf_text_ok_1.pdf\", ], ), inbox_rejected=( [], [ \"pdf_text_ok_protected_2.pdf\", \"pdf_wrong_format_3.pdf\", ], ), )", "duplicate.\"\"\" cfg.glob.logger.debug(cfg.glob.LOGGER_START) # ------------------------------------------------------------------------- stem_name_1: str = \"pdf_wrong_format\" file_ext: str = \"pdf\" pytest.helpers.copy_files_4_pytest_2_dir(", "<=========\") pytest.helpers.verify_content_of_inboxes( inbox=( [], [], ), inbox_accepted=( [], [ \"pdf_text_ok_1.pdf\", ], ), inbox_rejected=(", "str = \"pdf_text_ok\" file_ext: str = \"pdf\" pytest.helpers.copy_files_4_pytest_2_dir( source_files=[ (stem_name_1, file_ext), ], target_path=cfg.glob.setup.directory_inbox,", "RUN_ACTION_PROCESS_INBOX - ignore duplicates. # ----------------------------------------------------------------------------- def test_run_action_process_inbox_ignore_duplicates(fxtr_setup_empty_db_and_inbox): \"\"\"Test RUN_ACTION_PROCESS_INBOX - ignore duplicates.\"\"\"", "[ stem_name_1 + \".\" + file_ext, ], ), inbox_rejected=( [], [ stem_name_2 +", "<=========\") pytest.helpers.verify_content_of_inboxes( inbox=( [], [], ), inbox_accepted=( [], [], ), inbox_rejected=( [], [", "Constants & Globals. # ----------------------------------------------------------------------------- # pylint: disable=W0212 # @pytest.mark.issue # ----------------------------------------------------------------------------- #", "\".\" + file_ext, ], ), ) # ------------------------------------------------------------------------- cfg.glob.logger.debug(cfg.glob.LOGGER_END) # ----------------------------------------------------------------------------- # Test", "not language English in document # TBD # ------------------------------------------------------------------------- cfg.glob.logger.debug(cfg.glob.LOGGER_END) # ----------------------------------------------------------------------------- #", "cfg.glob.logger.debug(cfg.glob.LOGGER_START) # ------------------------------------------------------------------------- stem_name_1: str = \"pdf_wrong_format\" file_ext: str = \"pdf\" pytest.helpers.copy_files_4_pytest_2_dir( source_files=[(stem_name_1,", "[ \"pdf_text_ok_1.pdf\", \"pdf_text_ok_protected_2.pdf\", ], ), ) # ------------------------------------------------------------------------- cfg.glob.logger.debug(cfg.glob.LOGGER_END) # ----------------------------------------------------------------------------- # Test", "[], [ stem_name_2 + \".\" + file_ext, ], ), ) # ------------------------------------------------------------------------- cfg.glob.logger.debug(cfg.glob.LOGGER_END)", "------------------------------------------------------------------------- cfg.glob.logger.debug(cfg.glob.LOGGER_END) # ----------------------------------------------------------------------------- # Test RUN_ACTION_PROCESS_INBOX - rejected - 901. # -----------------------------------------------------------------------------", "def test_run_action_process_inbox_ignore_duplicates(fxtr_setup_empty_db_and_inbox): \"\"\"Test RUN_ACTION_PROCESS_INBOX - ignore duplicates.\"\"\" cfg.glob.logger.debug(cfg.glob.LOGGER_START) # ------------------------------------------------------------------------- pytest.helpers.copy_files_4_pytest_2_dir( source_files=[ (\"pdf_text_ok\",", "[], [], ), inbox_accepted=( [], [ \"pdf_text_ok_1.pdf\", ], ), inbox_rejected=( [], [ \"pdf_text_ok_protected_2.pdf\",", "= os.path.basename(initial_database_data_path) initial_database_data_path_file_name_test = \"initial_database_data_french.json\" # copy test file shutil.copy( utils.get_full_name(pytest.helpers.get_test_inbox_directory_name(), initial_database_data_path_file_name_test), utils.get_full_name(initial_database_data_path_directory,", "+ file_ext, ], ), ) # ------------------------------------------------------------------------- cfg.glob.logger.debug(cfg.glob.LOGGER_END) # ----------------------------------------------------------------------------- # Test RUN_ACTION_PROCESS_INBOX", "\"\"\"Test RUN_ACTION_PROCESS_INBOX - rejected duplicate.\"\"\" cfg.glob.logger.debug(cfg.glob.LOGGER_START) # ------------------------------------------------------------------------- stem_name_1: str = \"pdf_wrong_format\" file_ext:", "\"\"\"Test RUN_ACTION_PROCESS_INBOX - French.\"\"\" cfg.glob.logger.debug(cfg.glob.LOGGER_START) # ------------------------------------------------------------------------- initial_database_data_path = pathlib.Path(cfg.glob.setup.initial_database_data) initial_database_data_path_directory = os.path.dirname(initial_database_data_path)", "------------------------------------------------------------------------- cfg.glob.logger.debug(cfg.glob.LOGGER_END) # ------------------------------------------------------------------------- cfg.glob.logger.debug(cfg.glob.LOGGER_END) # ----------------------------------------------------------------------------- # Test RUN_ACTION_PROCESS_INBOX - rejected -", "# pylint: disable=W0212 # @pytest.mark.issue # ----------------------------------------------------------------------------- # Test RUN_ACTION_PROCESS_INBOX - accepted -", "db.cls_run.Run.ACTION_CODE_INBOX]) # ------------------------------------------------------------------------- cfg.glob.logger.info(\"=========> test_run_action_process_inbox_accepted_duplicate <=========\") pytest.helpers.verify_content_of_inboxes( inbox=( [], [ stem_name_1 + \".\"", "language_directory_name = str(utils.get_full_name(base_directory, pathlib.Path(\"french\"))) assert os.path.isdir(utils.get_os_independent_name(base_directory)), ( \"base directory '\" + base_directory +", "# ------------------------------------------------------------------------- cfg.glob.logger.debug(cfg.glob.LOGGER_END) # ----------------------------------------------------------------------------- # Test RUN_ACTION_PROCESS_INBOX - rejected. # ----------------------------------------------------------------------------- def", "test_run_action_process_inbox_accepted_duplicate(fxtr_setup_empty_db_and_inbox): \"\"\"Test RUN_ACTION_PROCESS_INBOX - accepted duplicate.\"\"\" cfg.glob.logger.debug(cfg.glob.LOGGER_START) # ------------------------------------------------------------------------- stem_name_1: str = \"pdf_text_ok\"", "str(utils.get_full_name(base_directory, pathlib.Path(\"french\"))) assert os.path.isdir(utils.get_os_independent_name(base_directory)), ( \"base directory '\" + base_directory + \"' after", "------------------------------------------------------------------------- values_original = pytest.helpers.backup_config_params( cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST, [ (cfg.cls_setup.Setup._DCR_CFG_IGNORE_DUPLICATES, \"true\"), ], ) dcr.main([dcr.DCR_ARGV_0, db.cls_run.Run.ACTION_CODE_INBOX]) pytest.helpers.restore_config_params(", "), ) # ------------------------------------------------------------------------- cfg.glob.logger.debug(cfg.glob.LOGGER_END) # ----------------------------------------------------------------------------- # Test RUN_ACTION_PROCESS_INBOX - rejected -", "(\"pdf_wrong_format\", \"pdf\"), ], target_path=cfg.glob.setup.directory_inbox, ) # ------------------------------------------------------------------------- values_original = pytest.helpers.backup_config_params( cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST, [ (cfg.cls_setup.Setup._DCR_CFG_IGNORE_DUPLICATES,", "[ \"pdf_text_ok_1.pdf\", ], ), inbox_rejected=( [], [ \"pdf_text_ok_protected_2.pdf\", \"pdf_wrong_format_3.pdf\", ], ), ) #", "inbox_rejected=( [], [ stem_name_2 + \".\" + file_ext, ], ), ) # -------------------------------------------------------------------------", "inbox_accepted=( [], [ \"pdf_text_ok_1.pdf\", \"pdf_text_ok_protected_2.pdf\", ], ), ) # ------------------------------------------------------------------------- cfg.glob.logger.debug(cfg.glob.LOGGER_END) # -----------------------------------------------------------------------------", "], ), ) # ------------------------------------------------------------------------- cfg.glob.logger.debug(cfg.glob.LOGGER_END) # ----------------------------------------------------------------------------- # Test RUN_ACTION_PROCESS_INBOX - french.", "directory '\" + base_directory + \"' after processing missing\" ) assert os.path.isdir(utils.get_os_independent_name(language_directory_name)), (", "# ------------------------------------------------------------------------- cfg.glob.logger.info(\"=========> test_run_action_process_inbox_french <=========\") pytest.helpers.verify_content_of_inboxes( inbox=( [\"french\"], [], ), inbox_accepted=( [], [", "# ----------------------------------------------------------------------------- # Test RUN_ACTION_PROCESS_INBOX - rejected - duplicate. # ----------------------------------------------------------------------------- def test_run_action_process_inbox_rejected_duplicate(fxtr_setup_empty_db_and_inbox):", "cfg.glob.logger.info(\"=========> test_run_action_process_inbox_french <=========\") pytest.helpers.verify_content_of_inboxes( inbox=( [\"french\"], [], ), inbox_accepted=( [], [ \"docx_french_ok_1.docx\", \"pdf_french_ok_2.jpg\",", "], target_path=cfg.glob.setup.directory_inbox, ) # ------------------------------------------------------------------------- values_original = pytest.helpers.backup_config_params( cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST, [ (cfg.cls_setup.Setup._DCR_CFG_IGNORE_DUPLICATES, \"true\"), ],", "pytest.helpers.copy_files_4_pytest_2_dir( source_files=[ (\"unknown_file_extension\", \"xxx\"), (\"unknown_file_extension_protected\", \"xxx\"), ], target_path=cfg.glob.setup.directory_inbox, ) # ------------------------------------------------------------------------- values_original =", "file_ext)], target_path=cfg.glob.setup.directory_inbox ) stem_name_2: str = \"pdf_wrong_format_1\" pytest.helpers.copy_files_4_pytest_2_dir( source_files=[(stem_name_1, file_ext)], target_path=cfg.glob.setup.directory_inbox_rejected ) os.rename(", "+ \".\" + file_ext), ) # ------------------------------------------------------------------------- dcr.main([dcr.DCR_ARGV_0, db.cls_run.Run.ACTION_CODE_INBOX]) # ------------------------------------------------------------------------- cfg.glob.logger.info(\"=========> test_run_action_process_inbox_rejected_duplicate", "test file shutil.copy( utils.get_full_name(pytest.helpers.get_test_inbox_directory_name(), initial_database_data_path_file_name_test), utils.get_full_name(initial_database_data_path_directory, initial_database_data_path_file_name), ) cfg.glob.db_core = db.cls_db_core.DBCore(is_admin=True) cfg.glob.db_core.create_database() #", "db.cls_run.Run.ACTION_CODE_INBOX]) pytest.helpers.restore_config_params( cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST, values_original, ) # ------------------------------------------------------------------------- cfg.glob.logger.info(\"=========> test_run_action_process_inbox_rejected <=========\") pytest.helpers.verify_content_of_inboxes( inbox=( [],", "= \"pdf\" pytest.helpers.copy_files_4_pytest_2_dir( source_files=[(stem_name_1, file_ext)], target_path=cfg.glob.setup.directory_inbox ) stem_name_2: str = \"pdf_wrong_format_1\" pytest.helpers.copy_files_4_pytest_2_dir( source_files=[(stem_name_1,", "target_path=cfg.glob.setup.directory_inbox_rejected ) os.rename( utils.get_full_name(cfg.glob.setup.directory_inbox_rejected, stem_name_1 + \".\" + file_ext), utils.get_full_name(cfg.glob.setup.directory_inbox_rejected, stem_name_2 + \".\"", "utils.get_full_name(initial_database_data_path_directory, initial_database_data_path_file_name), ) cfg.glob.db_core = db.cls_db_core.DBCore(is_admin=True) cfg.glob.db_core.create_database() # ------------------------------------------------------------------------- # Copy language subdirectory", "# ------------------------------------------------------------------------- pytest.helpers.copy_files_4_pytest_2_dir( source_files=[ (\"pdf_text_ok\", \"pdf\"), (\"pdf_text_ok_protected\", \"pdf\"), ], target_path=cfg.glob.setup.directory_inbox, ) # -------------------------------------------------------------------------", "Test RUN_ACTION_PROCESS_INBOX - accepted - duplicate. # ----------------------------------------------------------------------------- def test_run_action_process_inbox_accepted_duplicate(fxtr_setup_empty_db_and_inbox): \"\"\"Test RUN_ACTION_PROCESS_INBOX -", "fxtr_rmdir_opt(cfg.glob.setup.directory_inbox_accepted) fxtr_rmdir_opt(cfg.glob.setup.directory_inbox_rejected) pytest.helpers.copy_files_4_pytest_2_dir( source_files=[ (\"pdf_text_ok\", \"pdf\"), (\"pdf_text_ok_protected\", \"pdf\"), (\"pdf_wrong_format\", \"pdf\"), ], target_path=cfg.glob.setup.directory_inbox, )", "file_ext), utils.get_full_name(cfg.glob.setup.directory_inbox_rejected, stem_name_2 + \".\" + file_ext), ) # ------------------------------------------------------------------------- dcr.main([dcr.DCR_ARGV_0, db.cls_run.Run.ACTION_CODE_INBOX]) #", "disable=unused-argument \"\"\"Testing Module pp.inbox.\"\"\" import os.path import pathlib import shutil import cfg.cls_setup import", "test_run_action_process_inbox_rejected <=========\") pytest.helpers.verify_content_of_inboxes( inbox=( [], [], ), inbox_accepted=( [], [ \"pdf_text_ok_1.pdf\", ], ),", "inbox=( [], [], ), inbox_accepted=( [], [ \"pdf_text_ok_1.pdf\", ], ), inbox_rejected=( [], [", "cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST, [ (cfg.cls_setup.Setup._DCR_CFG_VERBOSE, \"false\"), ], ) dcr.main([dcr.DCR_ARGV_0, db.cls_run.Run.ACTION_CODE_INBOX]) pytest.helpers.restore_config_params( cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST, values_original, ) #", "----------------------------------------------------------------------------- def test_run_action_process_inbox_rejected_duplicate(fxtr_setup_empty_db_and_inbox): \"\"\"Test RUN_ACTION_PROCESS_INBOX - rejected duplicate.\"\"\" cfg.glob.logger.debug(cfg.glob.LOGGER_START) # ------------------------------------------------------------------------- stem_name_1: str", "------------------------------------------------------------------------- cfg.glob.logger.info(\"=========> test_run_action_process_inbox_rejected_duplicate <=========\") pytest.helpers.verify_content_of_inboxes( inbox=( [], [ stem_name_1 + \".\" + file_ext,", "\".\" + file_ext), ) # ------------------------------------------------------------------------- dcr.main([dcr.DCR_ARGV_0, db.cls_run.Run.ACTION_CODE_INBOX]) # ------------------------------------------------------------------------- cfg.glob.logger.info(\"=========> test_run_action_process_inbox_accepted_duplicate <=========\")", "# ----------------------------------------------------------------------------- def test_run_action_process_inbox_rejected_901(fxtr_rmdir_opt, fxtr_setup_empty_db_and_inbox): \"\"\"Test RUN_ACTION_PROCESS_INBOX - rejected - 901.\"\"\" cfg.glob.logger.debug(cfg.glob.LOGGER_START) #", "cfg.glob.logger.info(\"=========> test_run_action_process_inbox_rejected <=========\") pytest.helpers.verify_content_of_inboxes( inbox=( [], [], ), inbox_accepted=( [], [ \"pdf_text_ok_1.pdf\", ],", "# copy test file shutil.copy( utils.get_full_name(pytest.helpers.get_test_inbox_directory_name(), initial_database_data_path_file_name_test), utils.get_full_name(initial_database_data_path_directory, initial_database_data_path_file_name), ) cfg.glob.db_core = db.cls_db_core.DBCore(is_admin=True)", "# Test RUN_ACTION_PROCESS_INBOX - french. # ----------------------------------------------------------------------------- def test_run_action_process_inbox_french(fxtr_setup_empty_inbox): \"\"\"Test RUN_ACTION_PROCESS_INBOX - French.\"\"\"", "dcr # ----------------------------------------------------------------------------- # Constants & Globals. # ----------------------------------------------------------------------------- # pylint: disable=W0212 #", "values_original, ) # ------------------------------------------------------------------------- cfg.glob.logger.info(\"=========> test_run_action_process_inbox_ignore_duplicates <=========\") pytest.helpers.verify_content_of_inboxes( inbox_accepted=( [], [ \"pdf_text_ok_1.pdf\", \"pdf_text_ok_protected_2.pdf\",", "), inbox_accepted=( [], [], ), inbox_rejected=( [], [ \"unknown_file_extension_1.xxx\", \"unknown_file_extension_protected_2.xxx\", ], ), )", "dcr.main([dcr.DCR_ARGV_0, db.cls_run.Run.ACTION_CODE_INBOX]) pytest.helpers.restore_config_params( cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST, values_original, ) # ------------------------------------------------------------------------- cfg.glob.logger.info(\"=========> test_run_action_process_inbox_rejected <=========\") pytest.helpers.verify_content_of_inboxes( inbox=(", "------------------------------------------------------------------------- base_directory = str(cfg.glob.setup.directory_inbox) language_directory_name = str(utils.get_full_name(base_directory, pathlib.Path(\"french\"))) assert os.path.isdir(utils.get_os_independent_name(base_directory)), ( \"base directory", "values_original = pytest.helpers.backup_config_params( cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST, [ (cfg.cls_setup.Setup._DCR_CFG_IGNORE_DUPLICATES, \"true\"), ], ) dcr.main([dcr.DCR_ARGV_0, db.cls_run.Run.ACTION_CODE_INBOX]) pytest.helpers.restore_config_params( cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST,", "Test RUN_ACTION_PROCESS_INBOX - rejected. # ----------------------------------------------------------------------------- def test_run_action_process_inbox_rejected(fxtr_rmdir_opt, fxtr_setup_empty_db_and_inbox): \"\"\"Test RUN_ACTION_PROCESS_INBOX - rejected.\"\"\"", "missing\" ) assert os.path.isdir(utils.get_os_independent_name(language_directory_name)), ( \"language directory '\" + language_directory_name + \"' after", "pathlib.Path(\"french\"))) assert os.path.isdir(utils.get_os_independent_name(base_directory)), ( \"base directory '\" + base_directory + \"' after processing", "+ \".\" + file_ext, ], ), inbox_rejected=( [], [ stem_name_2 + \".\" +", "stem_name_1: str = \"pdf_text_ok\" file_ext: str = \"pdf\" pytest.helpers.copy_files_4_pytest_2_dir( source_files=[ (stem_name_1, file_ext), ],", "copy test file shutil.copy( utils.get_full_name(pytest.helpers.get_test_inbox_directory_name(), initial_database_data_path_file_name_test), utils.get_full_name(initial_database_data_path_directory, initial_database_data_path_file_name), ) cfg.glob.db_core = db.cls_db_core.DBCore(is_admin=True) cfg.glob.db_core.create_database()", "901. # ----------------------------------------------------------------------------- def test_run_action_process_inbox_rejected_901(fxtr_rmdir_opt, fxtr_setup_empty_db_and_inbox): \"\"\"Test RUN_ACTION_PROCESS_INBOX - rejected - 901.\"\"\" cfg.glob.logger.debug(cfg.glob.LOGGER_START)", "------------------------------------------------------------------------- cfg.glob.logger.info(\"=========> test_run_action_process_inbox_accepted_duplicate <=========\") pytest.helpers.verify_content_of_inboxes( inbox=( [], [ stem_name_1 + \".\" + file_ext,", "Test not language English in document # TBD # ------------------------------------------------------------------------- cfg.glob.logger.debug(cfg.glob.LOGGER_END) # -----------------------------------------------------------------------------", "import db.cls_db_core import db.cls_run import pytest import utils import dcr # ----------------------------------------------------------------------------- #", "\" files still found after processing\" ) # ------------------------------------------------------------------------- # Check empty language", "= \"pdf\" pytest.helpers.copy_files_4_pytest_2_dir( source_files=[ (stem_name_1, file_ext), ], target_path=cfg.glob.setup.directory_inbox, ) stem_name_2: str = \"pdf_text_ok_1\"", "initial_database_data_path_file_name_test), utils.get_full_name(initial_database_data_path_directory, initial_database_data_path_file_name), ) cfg.glob.db_core = db.cls_db_core.DBCore(is_admin=True) cfg.glob.db_core.create_database() # ------------------------------------------------------------------------- # Copy language", "str = \"pdf_wrong_format\" file_ext: str = \"pdf\" pytest.helpers.copy_files_4_pytest_2_dir( source_files=[(stem_name_1, file_ext)], target_path=cfg.glob.setup.directory_inbox ) stem_name_2:", "pp.inbox.\"\"\" import os.path import pathlib import shutil import cfg.cls_setup import cfg.glob import db.cls_db_core", "= \"pdf_text_ok\" file_ext: str = \"pdf\" pytest.helpers.copy_files_4_pytest_2_dir( source_files=[ (stem_name_1, file_ext), ], target_path=cfg.glob.setup.directory_inbox, )", "os.path import pathlib import shutil import cfg.cls_setup import cfg.glob import db.cls_db_core import db.cls_run", "], ), inbox_accepted=( [], [ stem_name_2 + \".\" + file_ext, ], ), )", "str(cfg.glob.setup.directory_inbox) language_directory_name = str(utils.get_full_name(base_directory, pathlib.Path(\"french\"))) assert os.path.isdir(utils.get_os_independent_name(base_directory)), ( \"base directory '\" + base_directory", "), inbox_accepted=( [], [ \"docx_french_ok_1.docx\", \"pdf_french_ok_2.jpg\", \"pdf_french_ok_3.pdf\", \"pdf_french_scanned_4.pdf\", ], ), ) # -------------------------------------------------------------------------", "+ file_ext), utils.get_full_name(cfg.glob.setup.directory_inbox_rejected, stem_name_2 + \".\" + file_ext), ) # ------------------------------------------------------------------------- dcr.main([dcr.DCR_ARGV_0, db.cls_run.Run.ACTION_CODE_INBOX])", "), inbox_rejected=( [], [ \"pdf_text_ok_protected_2.pdf\", \"pdf_wrong_format_3.pdf\", ], ), ) # ------------------------------------------------------------------------- cfg.glob.logger.debug(cfg.glob.LOGGER_END) #", "source_files=[(stem_name_1, file_ext)], target_path=cfg.glob.setup.directory_inbox_accepted ) os.rename( utils.get_full_name(cfg.glob.setup.directory_inbox_accepted, stem_name_1 + \".\" + file_ext), utils.get_full_name(cfg.glob.setup.directory_inbox_accepted, stem_name_2", "target_dir=str(cfg.glob.setup.directory_inbox) ) # ------------------------------------------------------------------------- values_original = pytest.helpers.backup_config_params( cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST, [ (cfg.cls_setup.Setup._DCR_CFG_VERBOSE, \"false\"), ], )", "file_ext, ], ), inbox_rejected=( [], [ stem_name_2 + \".\" + file_ext, ], ),", "+ \".\" + file_ext), ) # ------------------------------------------------------------------------- dcr.main([dcr.DCR_ARGV_0, db.cls_run.Run.ACTION_CODE_INBOX]) # ------------------------------------------------------------------------- cfg.glob.logger.info(\"=========> test_run_action_process_inbox_accepted_duplicate", "<=========\") pytest.helpers.verify_content_of_inboxes( inbox_accepted=( [], [ \"pdf_text_ok_1.pdf\", \"pdf_text_ok_protected_2.pdf\", ], ), ) # ------------------------------------------------------------------------- cfg.glob.logger.debug(cfg.glob.LOGGER_END)", "(\"pdf_text_ok_protected\", \"pdf\"), (\"pdf_wrong_format\", \"pdf\"), ], target_path=cfg.glob.setup.directory_inbox, ) # ------------------------------------------------------------------------- values_original = pytest.helpers.backup_config_params( cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST,", "stem_name_1 + \".\" + file_ext), utils.get_full_name(cfg.glob.setup.directory_inbox_accepted, stem_name_2 + \".\" + file_ext), ) #", "------------------------------------------------------------------------- # Check empty language subdirectory # TBD # ------------------------------------------------------------------------- # Test not", ") # ------------------------------------------------------------------------- cfg.glob.logger.debug(cfg.glob.LOGGER_END) # ------------------------------------------------------------------------- cfg.glob.logger.debug(cfg.glob.LOGGER_END) # ----------------------------------------------------------------------------- # Test RUN_ACTION_PROCESS_INBOX -", "(\"pdf_text_ok_protected\", \"pdf\"), ], target_path=cfg.glob.setup.directory_inbox, ) # ------------------------------------------------------------------------- values_original = pytest.helpers.backup_config_params( cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST, [ (cfg.cls_setup.Setup._DCR_CFG_IGNORE_DUPLICATES,", "initial_database_data_path_file_name = os.path.basename(initial_database_data_path) initial_database_data_path_file_name_test = \"initial_database_data_french.json\" # copy test file shutil.copy( utils.get_full_name(pytest.helpers.get_test_inbox_directory_name(), initial_database_data_path_file_name_test),", "----------------------------------------------------------------------------- def test_run_action_process_inbox_rejected_901(fxtr_rmdir_opt, fxtr_setup_empty_db_and_inbox): \"\"\"Test RUN_ACTION_PROCESS_INBOX - rejected - 901.\"\"\" cfg.glob.logger.debug(cfg.glob.LOGGER_START) # -------------------------------------------------------------------------", "\"pdf\"), ], target_path=cfg.glob.setup.directory_inbox, ) # ------------------------------------------------------------------------- values_original = pytest.helpers.backup_config_params( cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST, [ (cfg.cls_setup.Setup._DCR_CFG_IGNORE_DUPLICATES, \"false\"),", "# ----------------------------------------------------------------------------- # Test RUN_ACTION_PROCESS_INBOX - rejected. # ----------------------------------------------------------------------------- def test_run_action_process_inbox_rejected(fxtr_rmdir_opt, fxtr_setup_empty_db_and_inbox): \"\"\"Test", "# ------------------------------------------------------------------------- cfg.glob.logger.info(\"=========> test_run_action_process_inbox_accepted_duplicate <=========\") pytest.helpers.verify_content_of_inboxes( inbox=( [], [ stem_name_1 + \".\" +", "- ignore duplicates.\"\"\" cfg.glob.logger.debug(cfg.glob.LOGGER_START) # ------------------------------------------------------------------------- pytest.helpers.copy_files_4_pytest_2_dir( source_files=[ (\"pdf_text_ok\", \"pdf\"), (\"pdf_text_ok_protected\", \"pdf\"), ],", "], ), ) # ------------------------------------------------------------------------- cfg.glob.logger.debug(cfg.glob.LOGGER_END) # ----------------------------------------------------------------------------- # Test RUN_ACTION_PROCESS_INBOX - rejected", "utils import dcr # ----------------------------------------------------------------------------- # Constants & Globals. # ----------------------------------------------------------------------------- # pylint:", "= str(utils.get_full_name(base_directory, pathlib.Path(\"french\"))) assert os.path.isdir(utils.get_os_independent_name(base_directory)), ( \"base directory '\" + base_directory + \"'", "# ------------------------------------------------------------------------- values_original = pytest.helpers.backup_config_params( cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST, [ (cfg.cls_setup.Setup._DCR_CFG_IGNORE_DUPLICATES, \"false\"), ], ) dcr.main([dcr.DCR_ARGV_0, db.cls_run.Run.ACTION_CODE_INBOX])", "# pylint: disable=unused-argument \"\"\"Testing Module pp.inbox.\"\"\" import os.path import pathlib import shutil import", "import utils import dcr # ----------------------------------------------------------------------------- # Constants & Globals. # ----------------------------------------------------------------------------- #", "----------------------------------------------------------------------------- # Test RUN_ACTION_PROCESS_INBOX - accepted - duplicate. # ----------------------------------------------------------------------------- def test_run_action_process_inbox_accepted_duplicate(fxtr_setup_empty_db_and_inbox): \"\"\"Test", "+ file_ext, ], ), inbox_rejected=( [], [ stem_name_2 + \".\" + file_ext, ],", "], ), inbox_rejected=( [], [ stem_name_2 + \".\" + file_ext, ], ), )", "after processing missing\" ) assert os.path.isdir(utils.get_os_independent_name(language_directory_name)), ( \"language directory '\" + language_directory_name +", "], ) dcr.main([dcr.DCR_ARGV_0, db.cls_run.Run.ACTION_CODE_INBOX]) pytest.helpers.restore_config_params( cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST, values_original, ) # ------------------------------------------------------------------------- cfg.glob.logger.info(\"=========> test_run_action_process_inbox_ignore_duplicates <=========\")", "RUN_ACTION_PROCESS_INBOX - rejected - duplicate. # ----------------------------------------------------------------------------- def test_run_action_process_inbox_rejected_duplicate(fxtr_setup_empty_db_and_inbox): \"\"\"Test RUN_ACTION_PROCESS_INBOX - rejected", "RUN_ACTION_PROCESS_INBOX - accepted duplicate.\"\"\" cfg.glob.logger.debug(cfg.glob.LOGGER_START) # ------------------------------------------------------------------------- stem_name_1: str = \"pdf_text_ok\" file_ext: str", "+ \".\" + file_ext), utils.get_full_name(cfg.glob.setup.directory_inbox_rejected, stem_name_2 + \".\" + file_ext), ) # -------------------------------------------------------------------------", "], ), inbox_rejected=( [], [ \"pdf_text_ok_protected_2.pdf\", \"pdf_wrong_format_3.pdf\", ], ), ) # ------------------------------------------------------------------------- cfg.glob.logger.debug(cfg.glob.LOGGER_END)", "# Copy language subdirectory pytest.helpers.copy_directories_4_pytest_2_dir( source_directories=[\"french\"], target_dir=str(cfg.glob.setup.directory_inbox) ) # ------------------------------------------------------------------------- values_original = pytest.helpers.backup_config_params(", "os.path.dirname(initial_database_data_path) initial_database_data_path_file_name = os.path.basename(initial_database_data_path) initial_database_data_path_file_name_test = \"initial_database_data_french.json\" # copy test file shutil.copy( utils.get_full_name(pytest.helpers.get_test_inbox_directory_name(),", "fxtr_rmdir_opt(cfg.glob.setup.directory_inbox_rejected) pytest.helpers.copy_files_4_pytest_2_dir( source_files=[ (\"unknown_file_extension\", \"xxx\"), (\"unknown_file_extension_protected\", \"xxx\"), ], target_path=cfg.glob.setup.directory_inbox, ) # ------------------------------------------------------------------------- values_original", "\"' after processing missing\" ) assert os.path.isdir(utils.get_os_independent_name(language_directory_name)), ( \"language directory '\" + language_directory_name", "# Constants & Globals. # ----------------------------------------------------------------------------- # pylint: disable=W0212 # @pytest.mark.issue # -----------------------------------------------------------------------------", "& Globals. # ----------------------------------------------------------------------------- # pylint: disable=W0212 # @pytest.mark.issue # ----------------------------------------------------------------------------- # Test", "stem_name_2: str = \"pdf_wrong_format_1\" pytest.helpers.copy_files_4_pytest_2_dir( source_files=[(stem_name_1, file_ext)], target_path=cfg.glob.setup.directory_inbox_rejected ) os.rename( utils.get_full_name(cfg.glob.setup.directory_inbox_rejected, stem_name_1 +", "+ base_directory + \"' after processing missing\" ) assert os.path.isdir(utils.get_os_independent_name(language_directory_name)), ( \"language directory", "db.cls_run.Run.ACTION_CODE_INBOX]) pytest.helpers.restore_config_params( cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST, values_original, ) # ------------------------------------------------------------------------- cfg.glob.logger.info(\"=========> test_run_action_process_inbox_french <=========\") pytest.helpers.verify_content_of_inboxes( inbox=( [\"french\"],", "accepted - duplicate. # ----------------------------------------------------------------------------- def test_run_action_process_inbox_accepted_duplicate(fxtr_setup_empty_db_and_inbox): \"\"\"Test RUN_ACTION_PROCESS_INBOX - accepted duplicate.\"\"\" cfg.glob.logger.debug(cfg.glob.LOGGER_START)", "target_path=cfg.glob.setup.directory_inbox, ) # ------------------------------------------------------------------------- values_original = pytest.helpers.backup_config_params( cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST, [ (cfg.cls_setup.Setup._DCR_CFG_IGNORE_DUPLICATES, \"false\"), ], )", "def test_run_action_process_inbox_rejected_901(fxtr_rmdir_opt, fxtr_setup_empty_db_and_inbox): \"\"\"Test RUN_ACTION_PROCESS_INBOX - rejected - 901.\"\"\" cfg.glob.logger.debug(cfg.glob.LOGGER_START) # ------------------------------------------------------------------------- fxtr_rmdir_opt(cfg.glob.setup.directory_inbox_accepted)", "utils.get_full_name(cfg.glob.setup.directory_inbox_accepted, stem_name_1 + \".\" + file_ext), utils.get_full_name(cfg.glob.setup.directory_inbox_accepted, stem_name_2 + \".\" + file_ext), )", "- rejected duplicate.\"\"\" cfg.glob.logger.debug(cfg.glob.LOGGER_START) # ------------------------------------------------------------------------- stem_name_1: str = \"pdf_wrong_format\" file_ext: str =", "TBD # ------------------------------------------------------------------------- cfg.glob.logger.debug(cfg.glob.LOGGER_END) # ----------------------------------------------------------------------------- # Test RUN_ACTION_PROCESS_INBOX - ignore duplicates. #", "# ------------------------------------------------------------------------- # Check empty language subdirectory # TBD # ------------------------------------------------------------------------- # Test", "\"\"\"Test RUN_ACTION_PROCESS_INBOX - rejected - 901.\"\"\" cfg.glob.logger.debug(cfg.glob.LOGGER_START) # ------------------------------------------------------------------------- fxtr_rmdir_opt(cfg.glob.setup.directory_inbox_accepted) fxtr_rmdir_opt(cfg.glob.setup.directory_inbox_rejected) pytest.helpers.copy_files_4_pytest_2_dir( source_files=[", "----------------------------------------------------------------------------- # Test RUN_ACTION_PROCESS_INBOX - rejected - 901. # ----------------------------------------------------------------------------- def test_run_action_process_inbox_rejected_901(fxtr_rmdir_opt, fxtr_setup_empty_db_and_inbox):", "db.cls_db_core import db.cls_run import pytest import utils import dcr # ----------------------------------------------------------------------------- # Constants", "\".\" + file_ext), utils.get_full_name(cfg.glob.setup.directory_inbox_accepted, stem_name_2 + \".\" + file_ext), ) # ------------------------------------------------------------------------- dcr.main([dcr.DCR_ARGV_0,", "\"\"\"Test RUN_ACTION_PROCESS_INBOX - ignore duplicates.\"\"\" cfg.glob.logger.debug(cfg.glob.LOGGER_START) # ------------------------------------------------------------------------- pytest.helpers.copy_files_4_pytest_2_dir( source_files=[ (\"pdf_text_ok\", \"pdf\"), (\"pdf_text_ok_protected\",", "], ), ) # ------------------------------------------------------------------------- cfg.glob.logger.debug(cfg.glob.LOGGER_END) # ----------------------------------------------------------------------------- # Test RUN_ACTION_PROCESS_INBOX - rejected.", "], target_path=cfg.glob.setup.directory_inbox, ) stem_name_2: str = \"pdf_text_ok_1\" pytest.helpers.copy_files_4_pytest_2_dir( source_files=[(stem_name_1, file_ext)], target_path=cfg.glob.setup.directory_inbox_accepted ) os.rename(", "cfg.glob.logger.debug(cfg.glob.LOGGER_END) # ----------------------------------------------------------------------------- # Test RUN_ACTION_PROCESS_INBOX - french. # ----------------------------------------------------------------------------- def test_run_action_process_inbox_french(fxtr_setup_empty_inbox): \"\"\"Test", "pylint: disable=W0212 # @pytest.mark.issue # ----------------------------------------------------------------------------- # Test RUN_ACTION_PROCESS_INBOX - accepted - duplicate.", "# ----------------------------------------------------------------------------- # Test RUN_ACTION_PROCESS_INBOX - ignore duplicates. # ----------------------------------------------------------------------------- def test_run_action_process_inbox_ignore_duplicates(fxtr_setup_empty_db_and_inbox): \"\"\"Test", "- rejected - 901. # ----------------------------------------------------------------------------- def test_run_action_process_inbox_rejected_901(fxtr_rmdir_opt, fxtr_setup_empty_db_and_inbox): \"\"\"Test RUN_ACTION_PROCESS_INBOX - rejected", "\"language directory '\" + language_directory_name + \"' after processing missing\" ) assert 0", "= db.cls_db_core.DBCore(is_admin=True) cfg.glob.db_core.create_database() # ------------------------------------------------------------------------- # Copy language subdirectory pytest.helpers.copy_directories_4_pytest_2_dir( source_directories=[\"french\"], target_dir=str(cfg.glob.setup.directory_inbox) )", "[ (cfg.cls_setup.Setup._DCR_CFG_IGNORE_DUPLICATES, \"false\"), ], ) dcr.main([dcr.DCR_ARGV_0, db.cls_run.Run.ACTION_CODE_INBOX]) pytest.helpers.restore_config_params( cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST, values_original, ) # -------------------------------------------------------------------------", "ignore duplicates. # ----------------------------------------------------------------------------- def test_run_action_process_inbox_ignore_duplicates(fxtr_setup_empty_db_and_inbox): \"\"\"Test RUN_ACTION_PROCESS_INBOX - ignore duplicates.\"\"\" cfg.glob.logger.debug(cfg.glob.LOGGER_START) #", "str = \"pdf_wrong_format_1\" pytest.helpers.copy_files_4_pytest_2_dir( source_files=[(stem_name_1, file_ext)], target_path=cfg.glob.setup.directory_inbox_rejected ) os.rename( utils.get_full_name(cfg.glob.setup.directory_inbox_rejected, stem_name_1 + \".\"", "str = \"pdf\" pytest.helpers.copy_files_4_pytest_2_dir( source_files=[ (stem_name_1, file_ext), ], target_path=cfg.glob.setup.directory_inbox, ) stem_name_2: str =", ") # ------------------------------------------------------------------------- values_original = pytest.helpers.backup_config_params( cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST, [ (cfg.cls_setup.Setup._DCR_CFG_IGNORE_DUPLICATES, \"true\"), ], ) dcr.main([dcr.DCR_ARGV_0,", "\"\"\"Testing Module pp.inbox.\"\"\" import os.path import pathlib import shutil import cfg.cls_setup import cfg.glob", "\"pdf\" pytest.helpers.copy_files_4_pytest_2_dir( source_files=[ (stem_name_1, file_ext), ], target_path=cfg.glob.setup.directory_inbox, ) stem_name_2: str = \"pdf_text_ok_1\" pytest.helpers.copy_files_4_pytest_2_dir(", "cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST, [ (cfg.cls_setup.Setup._DCR_CFG_IGNORE_DUPLICATES, \"false\"), ], ) dcr.main([dcr.DCR_ARGV_0, db.cls_run.Run.ACTION_CODE_INBOX]) pytest.helpers.restore_config_params( cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST, values_original, ) #", "], ) dcr.main([dcr.DCR_ARGV_0, db.cls_run.Run.ACTION_CODE_INBOX]) pytest.helpers.restore_config_params( cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST, values_original, ) # ------------------------------------------------------------------------- cfg.glob.logger.info(\"=========> test_run_action_process_inbox_rejected <=========\")", "file_ext)], target_path=cfg.glob.setup.directory_inbox_rejected ) os.rename( utils.get_full_name(cfg.glob.setup.directory_inbox_rejected, stem_name_1 + \".\" + file_ext), utils.get_full_name(cfg.glob.setup.directory_inbox_rejected, stem_name_2 +", ") # ------------------------------------------------------------------------- base_directory = str(cfg.glob.setup.directory_inbox) language_directory_name = str(utils.get_full_name(base_directory, pathlib.Path(\"french\"))) assert os.path.isdir(utils.get_os_independent_name(base_directory)), (", "), inbox_accepted=( [], [ \"pdf_text_ok_1.pdf\", ], ), inbox_rejected=( [], [ \"pdf_text_ok_protected_2.pdf\", \"pdf_wrong_format_3.pdf\", ],", "db.cls_run.Run.ACTION_CODE_INBOX]) # ------------------------------------------------------------------------- cfg.glob.logger.info(\"=========> test_run_action_process_inbox_rejected_duplicate <=========\") pytest.helpers.verify_content_of_inboxes( inbox=( [], [ stem_name_1 + \".\"", "- French.\"\"\" cfg.glob.logger.debug(cfg.glob.LOGGER_START) # ------------------------------------------------------------------------- initial_database_data_path = pathlib.Path(cfg.glob.setup.initial_database_data) initial_database_data_path_directory = os.path.dirname(initial_database_data_path) initial_database_data_path_file_name =", "# ----------------------------------------------------------------------------- # Constants & Globals. # ----------------------------------------------------------------------------- # pylint: disable=W0212 # @pytest.mark.issue", "\"pdf\"), ], target_path=cfg.glob.setup.directory_inbox, ) # ------------------------------------------------------------------------- values_original = pytest.helpers.backup_config_params( cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST, [ (cfg.cls_setup.Setup._DCR_CFG_IGNORE_DUPLICATES, \"true\"),", "+ file_ext), utils.get_full_name(cfg.glob.setup.directory_inbox_accepted, stem_name_2 + \".\" + file_ext), ) # ------------------------------------------------------------------------- dcr.main([dcr.DCR_ARGV_0, db.cls_run.Run.ACTION_CODE_INBOX])", "------------------------------------------------------------------------- cfg.glob.logger.info(\"=========> test_run_action_process_inbox_rejected <=========\") pytest.helpers.verify_content_of_inboxes( inbox=( [], [], ), inbox_accepted=( [], [], ),", ") # ------------------------------------------------------------------------- cfg.glob.logger.info(\"=========> test_run_action_process_inbox_rejected <=========\") pytest.helpers.verify_content_of_inboxes( inbox=( [], [], ), inbox_accepted=( [],", "pytest.helpers.verify_content_of_inboxes( inbox=( [], [ stem_name_1 + \".\" + file_ext, ], ), inbox_rejected=( [],", "rejected - 901. # ----------------------------------------------------------------------------- def test_run_action_process_inbox_rejected_901(fxtr_rmdir_opt, fxtr_setup_empty_db_and_inbox): \"\"\"Test RUN_ACTION_PROCESS_INBOX - rejected -", "language subdirectory # TBD # ------------------------------------------------------------------------- # Test not language English in document", "= \"pdf_text_ok_1\" pytest.helpers.copy_files_4_pytest_2_dir( source_files=[(stem_name_1, file_ext)], target_path=cfg.glob.setup.directory_inbox_accepted ) os.rename( utils.get_full_name(cfg.glob.setup.directory_inbox_accepted, stem_name_1 + \".\" +", "after processing missing\" ) assert 0 == len(os.listdir(language_directory_name)), ( str(len(os.listdir(language_directory_name))) + \" files", "pytest.helpers.verify_content_of_inboxes( inbox_accepted=( [], [ \"pdf_text_ok_1.pdf\", \"pdf_text_ok_protected_2.pdf\", ], ), ) # ------------------------------------------------------------------------- cfg.glob.logger.debug(cfg.glob.LOGGER_END) #", "------------------------------------------------------------------------- pytest.helpers.copy_files_4_pytest_2_dir( source_files=[ (\"pdf_text_ok\", \"pdf\"), (\"pdf_text_ok_protected\", \"pdf\"), ], target_path=cfg.glob.setup.directory_inbox, ) # ------------------------------------------------------------------------- values_original", "rejected. # ----------------------------------------------------------------------------- def test_run_action_process_inbox_rejected(fxtr_rmdir_opt, fxtr_setup_empty_db_and_inbox): \"\"\"Test RUN_ACTION_PROCESS_INBOX - rejected.\"\"\" cfg.glob.logger.debug(cfg.glob.LOGGER_START) # -------------------------------------------------------------------------", "cfg.glob.logger.debug(cfg.glob.LOGGER_START) # ------------------------------------------------------------------------- fxtr_rmdir_opt(cfg.glob.setup.directory_inbox_accepted) fxtr_rmdir_opt(cfg.glob.setup.directory_inbox_rejected) pytest.helpers.copy_files_4_pytest_2_dir( source_files=[ (\"pdf_text_ok\", \"pdf\"), (\"pdf_text_ok_protected\", \"pdf\"), (\"pdf_wrong_format\", \"pdf\"),", "assert os.path.isdir(utils.get_os_independent_name(language_directory_name)), ( \"language directory '\" + language_directory_name + \"' after processing missing\"", "------------------------------------------------------------------------- cfg.glob.logger.info(\"=========> test_run_action_process_inbox_rejected <=========\") pytest.helpers.verify_content_of_inboxes( inbox=( [], [], ), inbox_accepted=( [], [ \"pdf_text_ok_1.pdf\",", "------------------------------------------------------------------------- stem_name_1: str = \"pdf_wrong_format\" file_ext: str = \"pdf\" pytest.helpers.copy_files_4_pytest_2_dir( source_files=[(stem_name_1, file_ext)], target_path=cfg.glob.setup.directory_inbox", ") # ------------------------------------------------------------------------- dcr.main([dcr.DCR_ARGV_0, db.cls_run.Run.ACTION_CODE_INBOX]) # ------------------------------------------------------------------------- cfg.glob.logger.info(\"=========> test_run_action_process_inbox_rejected_duplicate <=========\") pytest.helpers.verify_content_of_inboxes( inbox=( [],", "utils.get_full_name(pytest.helpers.get_test_inbox_directory_name(), initial_database_data_path_file_name_test), utils.get_full_name(initial_database_data_path_directory, initial_database_data_path_file_name), ) cfg.glob.db_core = db.cls_db_core.DBCore(is_admin=True) cfg.glob.db_core.create_database() # ------------------------------------------------------------------------- # Copy", "def test_run_action_process_inbox_rejected(fxtr_rmdir_opt, fxtr_setup_empty_db_and_inbox): \"\"\"Test RUN_ACTION_PROCESS_INBOX - rejected.\"\"\" cfg.glob.logger.debug(cfg.glob.LOGGER_START) # ------------------------------------------------------------------------- fxtr_rmdir_opt(cfg.glob.setup.directory_inbox_accepted) fxtr_rmdir_opt(cfg.glob.setup.directory_inbox_rejected) pytest.helpers.copy_files_4_pytest_2_dir(", "document # TBD # ------------------------------------------------------------------------- cfg.glob.logger.debug(cfg.glob.LOGGER_END) # ----------------------------------------------------------------------------- # Test RUN_ACTION_PROCESS_INBOX - ignore", "pytest.helpers.verify_content_of_inboxes( inbox=( [], [], ), inbox_accepted=( [], [], ), inbox_rejected=( [], [ \"unknown_file_extension_1.xxx\",", "0 == len(os.listdir(language_directory_name)), ( str(len(os.listdir(language_directory_name))) + \" files still found after processing\" )", "# ------------------------------------------------------------------------- cfg.glob.logger.info(\"=========> test_run_action_process_inbox_rejected_duplicate <=========\") pytest.helpers.verify_content_of_inboxes( inbox=( [], [ stem_name_1 + \".\" +", "ignore duplicates.\"\"\" cfg.glob.logger.debug(cfg.glob.LOGGER_START) # ------------------------------------------------------------------------- pytest.helpers.copy_files_4_pytest_2_dir( source_files=[ (\"pdf_text_ok\", \"pdf\"), (\"pdf_text_ok_protected\", \"pdf\"), ], target_path=cfg.glob.setup.directory_inbox,", "initial_database_data_path_directory = os.path.dirname(initial_database_data_path) initial_database_data_path_file_name = os.path.basename(initial_database_data_path) initial_database_data_path_file_name_test = \"initial_database_data_french.json\" # copy test file", "[], ), inbox_accepted=( [], [ \"pdf_text_ok_1.pdf\", ], ), inbox_rejected=( [], [ \"pdf_text_ok_protected_2.pdf\", \"pdf_wrong_format_3.pdf\",", "# TBD # ------------------------------------------------------------------------- # Test not language English in document # TBD", "Test RUN_ACTION_PROCESS_INBOX - rejected - duplicate. # ----------------------------------------------------------------------------- def test_run_action_process_inbox_rejected_duplicate(fxtr_setup_empty_db_and_inbox): \"\"\"Test RUN_ACTION_PROCESS_INBOX -", "os.rename( utils.get_full_name(cfg.glob.setup.directory_inbox_accepted, stem_name_1 + \".\" + file_ext), utils.get_full_name(cfg.glob.setup.directory_inbox_accepted, stem_name_2 + \".\" + file_ext),", "cfg.glob.logger.debug(cfg.glob.LOGGER_START) # ------------------------------------------------------------------------- fxtr_rmdir_opt(cfg.glob.setup.directory_inbox_accepted) fxtr_rmdir_opt(cfg.glob.setup.directory_inbox_rejected) pytest.helpers.copy_files_4_pytest_2_dir( source_files=[ (\"unknown_file_extension\", \"xxx\"), (\"unknown_file_extension_protected\", \"xxx\"), ], target_path=cfg.glob.setup.directory_inbox,", "stem_name_1 + \".\" + file_ext, ], ), inbox_rejected=( [], [ stem_name_2 + \".\"", "[], [ \"pdf_text_ok_1.pdf\", \"pdf_text_ok_protected_2.pdf\", ], ), ) # ------------------------------------------------------------------------- cfg.glob.logger.debug(cfg.glob.LOGGER_END) # ----------------------------------------------------------------------------- #", "= pytest.helpers.backup_config_params( cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST, [ (cfg.cls_setup.Setup._DCR_CFG_IGNORE_DUPLICATES, \"true\"), ], ) dcr.main([dcr.DCR_ARGV_0, db.cls_run.Run.ACTION_CODE_INBOX]) pytest.helpers.restore_config_params( cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST, values_original,", "test_run_action_process_inbox_accepted_duplicate <=========\") pytest.helpers.verify_content_of_inboxes( inbox=( [], [ stem_name_1 + \".\" + file_ext, ], ),", "[], [ stem_name_1 + \".\" + file_ext, ], ), inbox_accepted=( [], [ stem_name_2", "[ stem_name_1 + \".\" + file_ext, ], ), inbox_accepted=( [], [ stem_name_2 +", "# ------------------------------------------------------------------------- stem_name_1: str = \"pdf_wrong_format\" file_ext: str = \"pdf\" pytest.helpers.copy_files_4_pytest_2_dir( source_files=[(stem_name_1, file_ext)],", "- 901.\"\"\" cfg.glob.logger.debug(cfg.glob.LOGGER_START) # ------------------------------------------------------------------------- fxtr_rmdir_opt(cfg.glob.setup.directory_inbox_accepted) fxtr_rmdir_opt(cfg.glob.setup.directory_inbox_rejected) pytest.helpers.copy_files_4_pytest_2_dir( source_files=[ (\"unknown_file_extension\", \"xxx\"), (\"unknown_file_extension_protected\", \"xxx\"),", "pytest.helpers.backup_config_params( cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST, [ (cfg.cls_setup.Setup._DCR_CFG_IGNORE_DUPLICATES, \"true\"), ], ) dcr.main([dcr.DCR_ARGV_0, db.cls_run.Run.ACTION_CODE_INBOX]) pytest.helpers.restore_config_params( cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST, values_original, )", "def test_run_action_process_inbox_accepted_duplicate(fxtr_setup_empty_db_and_inbox): \"\"\"Test RUN_ACTION_PROCESS_INBOX - accepted duplicate.\"\"\" cfg.glob.logger.debug(cfg.glob.LOGGER_START) # ------------------------------------------------------------------------- stem_name_1: str =", ") cfg.glob.db_core = db.cls_db_core.DBCore(is_admin=True) cfg.glob.db_core.create_database() # ------------------------------------------------------------------------- # Copy language subdirectory pytest.helpers.copy_directories_4_pytest_2_dir( source_directories=[\"french\"],", "dcr.main([dcr.DCR_ARGV_0, db.cls_run.Run.ACTION_CODE_INBOX]) pytest.helpers.restore_config_params( cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST, values_original, ) # ------------------------------------------------------------------------- cfg.glob.logger.info(\"=========> test_run_action_process_inbox_ignore_duplicates <=========\") pytest.helpers.verify_content_of_inboxes( inbox_accepted=(", "file shutil.copy( utils.get_full_name(pytest.helpers.get_test_inbox_directory_name(), initial_database_data_path_file_name_test), utils.get_full_name(initial_database_data_path_directory, initial_database_data_path_file_name), ) cfg.glob.db_core = db.cls_db_core.DBCore(is_admin=True) cfg.glob.db_core.create_database() # -------------------------------------------------------------------------", "stem_name_2: str = \"pdf_text_ok_1\" pytest.helpers.copy_files_4_pytest_2_dir( source_files=[(stem_name_1, file_ext)], target_path=cfg.glob.setup.directory_inbox_accepted ) os.rename( utils.get_full_name(cfg.glob.setup.directory_inbox_accepted, stem_name_1 +", "pytest.helpers.copy_files_4_pytest_2_dir( source_files=[(stem_name_1, file_ext)], target_path=cfg.glob.setup.directory_inbox_rejected ) os.rename( utils.get_full_name(cfg.glob.setup.directory_inbox_rejected, stem_name_1 + \".\" + file_ext), utils.get_full_name(cfg.glob.setup.directory_inbox_rejected,", "os.path.isdir(utils.get_os_independent_name(language_directory_name)), ( \"language directory '\" + language_directory_name + \"' after processing missing\" )", "= \"pdf_wrong_format_1\" pytest.helpers.copy_files_4_pytest_2_dir( source_files=[(stem_name_1, file_ext)], target_path=cfg.glob.setup.directory_inbox_rejected ) os.rename( utils.get_full_name(cfg.glob.setup.directory_inbox_rejected, stem_name_1 + \".\" +", "pytest.helpers.restore_config_params( cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST, values_original, ) # ------------------------------------------------------------------------- cfg.glob.logger.info(\"=========> test_run_action_process_inbox_rejected <=========\") pytest.helpers.verify_content_of_inboxes( inbox=( [], [],", ") # ------------------------------------------------------------------------- cfg.glob.logger.info(\"=========> test_run_action_process_inbox_french <=========\") pytest.helpers.verify_content_of_inboxes( inbox=( [\"french\"], [], ), inbox_accepted=( [],", "pytest.helpers.verify_content_of_inboxes( inbox=( [], [], ), inbox_accepted=( [], [ \"pdf_text_ok_1.pdf\", ], ), inbox_rejected=( [],", "# ------------------------------------------------------------------------- base_directory = str(cfg.glob.setup.directory_inbox) language_directory_name = str(utils.get_full_name(base_directory, pathlib.Path(\"french\"))) assert os.path.isdir(utils.get_os_independent_name(base_directory)), ( \"base", "pytest.helpers.copy_files_4_pytest_2_dir( source_files=[(stem_name_1, file_ext)], target_path=cfg.glob.setup.directory_inbox_accepted ) os.rename( utils.get_full_name(cfg.glob.setup.directory_inbox_accepted, stem_name_1 + \".\" + file_ext), utils.get_full_name(cfg.glob.setup.directory_inbox_accepted,", "fxtr_setup_empty_db_and_inbox): \"\"\"Test RUN_ACTION_PROCESS_INBOX - rejected.\"\"\" cfg.glob.logger.debug(cfg.glob.LOGGER_START) # ------------------------------------------------------------------------- fxtr_rmdir_opt(cfg.glob.setup.directory_inbox_accepted) fxtr_rmdir_opt(cfg.glob.setup.directory_inbox_rejected) pytest.helpers.copy_files_4_pytest_2_dir( source_files=[ (\"pdf_text_ok\",", "assert 0 == len(os.listdir(language_directory_name)), ( str(len(os.listdir(language_directory_name))) + \" files still found after processing\"", "db.cls_run import pytest import utils import dcr # ----------------------------------------------------------------------------- # Constants & Globals.", "------------------------------------------------------------------------- stem_name_1: str = \"pdf_text_ok\" file_ext: str = \"pdf\" pytest.helpers.copy_files_4_pytest_2_dir( source_files=[ (stem_name_1, file_ext),", ") # ------------------------------------------------------------------------- cfg.glob.logger.debug(cfg.glob.LOGGER_END) # ----------------------------------------------------------------------------- # Test RUN_ACTION_PROCESS_INBOX - rejected. # -----------------------------------------------------------------------------", "(cfg.cls_setup.Setup._DCR_CFG_VERBOSE, \"false\"), ], ) dcr.main([dcr.DCR_ARGV_0, db.cls_run.Run.ACTION_CODE_INBOX]) pytest.helpers.restore_config_params( cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST, values_original, ) # ------------------------------------------------------------------------- cfg.glob.logger.info(\"=========>", "pytest.helpers.copy_files_4_pytest_2_dir( source_files=[ (\"pdf_text_ok\", \"pdf\"), (\"pdf_text_ok_protected\", \"pdf\"), ], target_path=cfg.glob.setup.directory_inbox, ) # ------------------------------------------------------------------------- values_original =", "), ) # ------------------------------------------------------------------------- base_directory = str(cfg.glob.setup.directory_inbox) language_directory_name = str(utils.get_full_name(base_directory, pathlib.Path(\"french\"))) assert os.path.isdir(utils.get_os_independent_name(base_directory)),", "= pytest.helpers.backup_config_params( cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST, [ (cfg.cls_setup.Setup._DCR_CFG_VERBOSE, \"false\"), ], ) dcr.main([dcr.DCR_ARGV_0, db.cls_run.Run.ACTION_CODE_INBOX]) pytest.helpers.restore_config_params( cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST, values_original,", "# ------------------------------------------------------------------------- values_original = pytest.helpers.backup_config_params( cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST, [ (cfg.cls_setup.Setup._DCR_CFG_VERBOSE, \"false\"), ], ) dcr.main([dcr.DCR_ARGV_0, db.cls_run.Run.ACTION_CODE_INBOX])", "utils.get_full_name(cfg.glob.setup.directory_inbox_accepted, stem_name_2 + \".\" + file_ext), ) # ------------------------------------------------------------------------- dcr.main([dcr.DCR_ARGV_0, db.cls_run.Run.ACTION_CODE_INBOX]) # -------------------------------------------------------------------------", "----------------------------------------------------------------------------- def test_run_action_process_inbox_french(fxtr_setup_empty_inbox): \"\"\"Test RUN_ACTION_PROCESS_INBOX - French.\"\"\" cfg.glob.logger.debug(cfg.glob.LOGGER_START) # ------------------------------------------------------------------------- initial_database_data_path = pathlib.Path(cfg.glob.setup.initial_database_data)", "# Test RUN_ACTION_PROCESS_INBOX - rejected. # ----------------------------------------------------------------------------- def test_run_action_process_inbox_rejected(fxtr_rmdir_opt, fxtr_setup_empty_db_and_inbox): \"\"\"Test RUN_ACTION_PROCESS_INBOX -", "cfg.glob import db.cls_db_core import db.cls_run import pytest import utils import dcr # -----------------------------------------------------------------------------", "= \"initial_database_data_french.json\" # copy test file shutil.copy( utils.get_full_name(pytest.helpers.get_test_inbox_directory_name(), initial_database_data_path_file_name_test), utils.get_full_name(initial_database_data_path_directory, initial_database_data_path_file_name), ) cfg.glob.db_core", "RUN_ACTION_PROCESS_INBOX - French.\"\"\" cfg.glob.logger.debug(cfg.glob.LOGGER_START) # ------------------------------------------------------------------------- initial_database_data_path = pathlib.Path(cfg.glob.setup.initial_database_data) initial_database_data_path_directory = os.path.dirname(initial_database_data_path) initial_database_data_path_file_name", "- ignore duplicates. # ----------------------------------------------------------------------------- def test_run_action_process_inbox_ignore_duplicates(fxtr_setup_empty_db_and_inbox): \"\"\"Test RUN_ACTION_PROCESS_INBOX - ignore duplicates.\"\"\" cfg.glob.logger.debug(cfg.glob.LOGGER_START)", "dcr.main([dcr.DCR_ARGV_0, db.cls_run.Run.ACTION_CODE_INBOX]) # ------------------------------------------------------------------------- cfg.glob.logger.info(\"=========> test_run_action_process_inbox_accepted_duplicate <=========\") pytest.helpers.verify_content_of_inboxes( inbox=( [], [ stem_name_1 +", "\"pdf_french_ok_3.pdf\", \"pdf_french_scanned_4.pdf\", ], ), ) # ------------------------------------------------------------------------- base_directory = str(cfg.glob.setup.directory_inbox) language_directory_name = str(utils.get_full_name(base_directory,", "inbox_accepted=( [], [ stem_name_2 + \".\" + file_ext, ], ), ) # -------------------------------------------------------------------------", "target_path=cfg.glob.setup.directory_inbox, ) stem_name_2: str = \"pdf_text_ok_1\" pytest.helpers.copy_files_4_pytest_2_dir( source_files=[(stem_name_1, file_ext)], target_path=cfg.glob.setup.directory_inbox_accepted ) os.rename( utils.get_full_name(cfg.glob.setup.directory_inbox_accepted,", "= pathlib.Path(cfg.glob.setup.initial_database_data) initial_database_data_path_directory = os.path.dirname(initial_database_data_path) initial_database_data_path_file_name = os.path.basename(initial_database_data_path) initial_database_data_path_file_name_test = \"initial_database_data_french.json\" # copy", "= str(cfg.glob.setup.directory_inbox) language_directory_name = str(utils.get_full_name(base_directory, pathlib.Path(\"french\"))) assert os.path.isdir(utils.get_os_independent_name(base_directory)), ( \"base directory '\" +", "file_ext), ) # ------------------------------------------------------------------------- dcr.main([dcr.DCR_ARGV_0, db.cls_run.Run.ACTION_CODE_INBOX]) # ------------------------------------------------------------------------- cfg.glob.logger.info(\"=========> test_run_action_process_inbox_accepted_duplicate <=========\") pytest.helpers.verify_content_of_inboxes( inbox=(", "base_directory + \"' after processing missing\" ) assert os.path.isdir(utils.get_os_independent_name(language_directory_name)), ( \"language directory '\"", "------------------------------------------------------------------------- values_original = pytest.helpers.backup_config_params( cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST, [ (cfg.cls_setup.Setup._DCR_CFG_VERBOSE, \"false\"), ], ) dcr.main([dcr.DCR_ARGV_0, db.cls_run.Run.ACTION_CODE_INBOX]) pytest.helpers.restore_config_params(", "RUN_ACTION_PROCESS_INBOX - rejected duplicate.\"\"\" cfg.glob.logger.debug(cfg.glob.LOGGER_START) # ------------------------------------------------------------------------- stem_name_1: str = \"pdf_wrong_format\" file_ext: str", "test_run_action_process_inbox_rejected_901(fxtr_rmdir_opt, fxtr_setup_empty_db_and_inbox): \"\"\"Test RUN_ACTION_PROCESS_INBOX - rejected - 901.\"\"\" cfg.glob.logger.debug(cfg.glob.LOGGER_START) # ------------------------------------------------------------------------- fxtr_rmdir_opt(cfg.glob.setup.directory_inbox_accepted) fxtr_rmdir_opt(cfg.glob.setup.directory_inbox_rejected)", "# TBD # ------------------------------------------------------------------------- cfg.glob.logger.debug(cfg.glob.LOGGER_END) # ----------------------------------------------------------------------------- # Test RUN_ACTION_PROCESS_INBOX - ignore duplicates.", "), inbox_accepted=( [], [ stem_name_2 + \".\" + file_ext, ], ), ) #", "], target_path=cfg.glob.setup.directory_inbox, ) # ------------------------------------------------------------------------- values_original = pytest.helpers.backup_config_params( cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST, [ (cfg.cls_setup.Setup._DCR_CFG_IGNORE_DUPLICATES, \"false\"), ],", "\"false\"), ], ) dcr.main([dcr.DCR_ARGV_0, db.cls_run.Run.ACTION_CODE_INBOX]) pytest.helpers.restore_config_params( cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST, values_original, ) # ------------------------------------------------------------------------- cfg.glob.logger.info(\"=========> test_run_action_process_inbox_rejected", "\"pdf_text_ok_1\" pytest.helpers.copy_files_4_pytest_2_dir( source_files=[(stem_name_1, file_ext)], target_path=cfg.glob.setup.directory_inbox_accepted ) os.rename( utils.get_full_name(cfg.glob.setup.directory_inbox_accepted, stem_name_1 + \".\" + file_ext),", "duplicates. # ----------------------------------------------------------------------------- def test_run_action_process_inbox_ignore_duplicates(fxtr_setup_empty_db_and_inbox): \"\"\"Test RUN_ACTION_PROCESS_INBOX - ignore duplicates.\"\"\" cfg.glob.logger.debug(cfg.glob.LOGGER_START) # -------------------------------------------------------------------------", "cfg.glob.db_core = db.cls_db_core.DBCore(is_admin=True) cfg.glob.db_core.create_database() # ------------------------------------------------------------------------- # Copy language subdirectory pytest.helpers.copy_directories_4_pytest_2_dir( source_directories=[\"french\"], target_dir=str(cfg.glob.setup.directory_inbox)", "# ----------------------------------------------------------------------------- def test_run_action_process_inbox_french(fxtr_setup_empty_inbox): \"\"\"Test RUN_ACTION_PROCESS_INBOX - French.\"\"\" cfg.glob.logger.debug(cfg.glob.LOGGER_START) # ------------------------------------------------------------------------- initial_database_data_path =", "base_directory = str(cfg.glob.setup.directory_inbox) language_directory_name = str(utils.get_full_name(base_directory, pathlib.Path(\"french\"))) assert os.path.isdir(utils.get_os_independent_name(base_directory)), ( \"base directory '\"", "\"pdf_wrong_format_1\" pytest.helpers.copy_files_4_pytest_2_dir( source_files=[(stem_name_1, file_ext)], target_path=cfg.glob.setup.directory_inbox_rejected ) os.rename( utils.get_full_name(cfg.glob.setup.directory_inbox_rejected, stem_name_1 + \".\" + file_ext),", "\"pdf_text_ok\" file_ext: str = \"pdf\" pytest.helpers.copy_files_4_pytest_2_dir( source_files=[ (stem_name_1, file_ext), ], target_path=cfg.glob.setup.directory_inbox, ) stem_name_2:", "------------------------------------------------------------------------- values_original = pytest.helpers.backup_config_params( cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST, [ (cfg.cls_setup.Setup._DCR_CFG_IGNORE_DUPLICATES, \"false\"), ], ) dcr.main([dcr.DCR_ARGV_0, db.cls_run.Run.ACTION_CODE_INBOX]) pytest.helpers.restore_config_params(", "language subdirectory pytest.helpers.copy_directories_4_pytest_2_dir( source_directories=[\"french\"], target_dir=str(cfg.glob.setup.directory_inbox) ) # ------------------------------------------------------------------------- values_original = pytest.helpers.backup_config_params( cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST, [", "\".\" + file_ext), utils.get_full_name(cfg.glob.setup.directory_inbox_rejected, stem_name_2 + \".\" + file_ext), ) # ------------------------------------------------------------------------- dcr.main([dcr.DCR_ARGV_0,", "# Test RUN_ACTION_PROCESS_INBOX - rejected - 901. # ----------------------------------------------------------------------------- def test_run_action_process_inbox_rejected_901(fxtr_rmdir_opt, fxtr_setup_empty_db_and_inbox): \"\"\"Test", "# ------------------------------------------------------------------------- cfg.glob.logger.debug(cfg.glob.LOGGER_END) # ----------------------------------------------------------------------------- # Test RUN_ACTION_PROCESS_INBOX - rejected - duplicate. #", "------------------------------------------------------------------------- initial_database_data_path = pathlib.Path(cfg.glob.setup.initial_database_data) initial_database_data_path_directory = os.path.dirname(initial_database_data_path) initial_database_data_path_file_name = os.path.basename(initial_database_data_path) initial_database_data_path_file_name_test = \"initial_database_data_french.json\"", "RUN_ACTION_PROCESS_INBOX - accepted - duplicate. # ----------------------------------------------------------------------------- def test_run_action_process_inbox_accepted_duplicate(fxtr_setup_empty_db_and_inbox): \"\"\"Test RUN_ACTION_PROCESS_INBOX - accepted", "\"pdf\"), (\"pdf_text_ok_protected\", \"pdf\"), (\"pdf_wrong_format\", \"pdf\"), ], target_path=cfg.glob.setup.directory_inbox, ) # ------------------------------------------------------------------------- values_original = pytest.helpers.backup_config_params(", "inbox=( [\"french\"], [], ), inbox_accepted=( [], [ \"docx_french_ok_1.docx\", \"pdf_french_ok_2.jpg\", \"pdf_french_ok_3.pdf\", \"pdf_french_scanned_4.pdf\", ], ),", "------------------------------------------------------------------------- fxtr_rmdir_opt(cfg.glob.setup.directory_inbox_accepted) fxtr_rmdir_opt(cfg.glob.setup.directory_inbox_rejected) pytest.helpers.copy_files_4_pytest_2_dir( source_files=[ (\"pdf_text_ok\", \"pdf\"), (\"pdf_text_ok_protected\", \"pdf\"), (\"pdf_wrong_format\", \"pdf\"), ], target_path=cfg.glob.setup.directory_inbox,", "@pytest.mark.issue # ----------------------------------------------------------------------------- # Test RUN_ACTION_PROCESS_INBOX - accepted - duplicate. # ----------------------------------------------------------------------------- def", "+ \".\" + file_ext, ], ), ) # ------------------------------------------------------------------------- cfg.glob.logger.debug(cfg.glob.LOGGER_END) # ----------------------------------------------------------------------------- #", ") # ------------------------------------------------------------------------- # Check empty language subdirectory # TBD # ------------------------------------------------------------------------- #", "empty language subdirectory # TBD # ------------------------------------------------------------------------- # Test not language English in", "(\"unknown_file_extension\", \"xxx\"), (\"unknown_file_extension_protected\", \"xxx\"), ], target_path=cfg.glob.setup.directory_inbox, ) # ------------------------------------------------------------------------- values_original = pytest.helpers.backup_config_params( cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST,", "# Test RUN_ACTION_PROCESS_INBOX - accepted - duplicate. # ----------------------------------------------------------------------------- def test_run_action_process_inbox_accepted_duplicate(fxtr_setup_empty_db_and_inbox): \"\"\"Test RUN_ACTION_PROCESS_INBOX", "found after processing\" ) # ------------------------------------------------------------------------- # Check empty language subdirectory # TBD", "TBD # ------------------------------------------------------------------------- # Test not language English in document # TBD #", "\"\"\"Test RUN_ACTION_PROCESS_INBOX - rejected.\"\"\" cfg.glob.logger.debug(cfg.glob.LOGGER_START) # ------------------------------------------------------------------------- fxtr_rmdir_opt(cfg.glob.setup.directory_inbox_accepted) fxtr_rmdir_opt(cfg.glob.setup.directory_inbox_rejected) pytest.helpers.copy_files_4_pytest_2_dir( source_files=[ (\"pdf_text_ok\", \"pdf\"),", "# ------------------------------------------------------------------------- fxtr_rmdir_opt(cfg.glob.setup.directory_inbox_accepted) fxtr_rmdir_opt(cfg.glob.setup.directory_inbox_rejected) pytest.helpers.copy_files_4_pytest_2_dir( source_files=[ (\"unknown_file_extension\", \"xxx\"), (\"unknown_file_extension_protected\", \"xxx\"), ], target_path=cfg.glob.setup.directory_inbox, )", "cfg.glob.logger.info(\"=========> test_run_action_process_inbox_accepted_duplicate <=========\") pytest.helpers.verify_content_of_inboxes( inbox=( [], [ stem_name_1 + \".\" + file_ext, ],", "+ \"' after processing missing\" ) assert os.path.isdir(utils.get_os_independent_name(language_directory_name)), ( \"language directory '\" +", "pylint: disable=unused-argument \"\"\"Testing Module pp.inbox.\"\"\" import os.path import pathlib import shutil import cfg.cls_setup", "file_ext), utils.get_full_name(cfg.glob.setup.directory_inbox_accepted, stem_name_2 + \".\" + file_ext), ) # ------------------------------------------------------------------------- dcr.main([dcr.DCR_ARGV_0, db.cls_run.Run.ACTION_CODE_INBOX]) #", "stem_name_2 + \".\" + file_ext, ], ), ) # ------------------------------------------------------------------------- cfg.glob.logger.debug(cfg.glob.LOGGER_END) # -----------------------------------------------------------------------------", "\"pdf\"), (\"pdf_wrong_format\", \"pdf\"), ], target_path=cfg.glob.setup.directory_inbox, ) # ------------------------------------------------------------------------- values_original = pytest.helpers.backup_config_params( cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST, [", "], ), ) # ------------------------------------------------------------------------- base_directory = str(cfg.glob.setup.directory_inbox) language_directory_name = str(utils.get_full_name(base_directory, pathlib.Path(\"french\"))) assert", "RUN_ACTION_PROCESS_INBOX - french. # ----------------------------------------------------------------------------- def test_run_action_process_inbox_french(fxtr_setup_empty_inbox): \"\"\"Test RUN_ACTION_PROCESS_INBOX - French.\"\"\" cfg.glob.logger.debug(cfg.glob.LOGGER_START) #", "file_ext)], target_path=cfg.glob.setup.directory_inbox_accepted ) os.rename( utils.get_full_name(cfg.glob.setup.directory_inbox_accepted, stem_name_1 + \".\" + file_ext), utils.get_full_name(cfg.glob.setup.directory_inbox_accepted, stem_name_2 +", "------------------------------------------------------------------------- # Copy language subdirectory pytest.helpers.copy_directories_4_pytest_2_dir( source_directories=[\"french\"], target_dir=str(cfg.glob.setup.directory_inbox) ) # ------------------------------------------------------------------------- values_original =", "------------------------------------------------------------------------- cfg.glob.logger.info(\"=========> test_run_action_process_inbox_french <=========\") pytest.helpers.verify_content_of_inboxes( inbox=( [\"french\"], [], ), inbox_accepted=( [], [ \"docx_french_ok_1.docx\",", "(stem_name_1, file_ext), ], target_path=cfg.glob.setup.directory_inbox, ) stem_name_2: str = \"pdf_text_ok_1\" pytest.helpers.copy_files_4_pytest_2_dir( source_files=[(stem_name_1, file_ext)], target_path=cfg.glob.setup.directory_inbox_accepted", "], ) dcr.main([dcr.DCR_ARGV_0, db.cls_run.Run.ACTION_CODE_INBOX]) pytest.helpers.restore_config_params( cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST, values_original, ) # ------------------------------------------------------------------------- cfg.glob.logger.info(\"=========> test_run_action_process_inbox_french <=========\")", "\"xxx\"), (\"unknown_file_extension_protected\", \"xxx\"), ], target_path=cfg.glob.setup.directory_inbox, ) # ------------------------------------------------------------------------- values_original = pytest.helpers.backup_config_params( cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST, [", "RUN_ACTION_PROCESS_INBOX - ignore duplicates.\"\"\" cfg.glob.logger.debug(cfg.glob.LOGGER_START) # ------------------------------------------------------------------------- pytest.helpers.copy_files_4_pytest_2_dir( source_files=[ (\"pdf_text_ok\", \"pdf\"), (\"pdf_text_ok_protected\", \"pdf\"),", "pytest.helpers.backup_config_params( cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST, [ (cfg.cls_setup.Setup._DCR_CFG_VERBOSE, \"false\"), ], ) dcr.main([dcr.DCR_ARGV_0, db.cls_run.Run.ACTION_CODE_INBOX]) pytest.helpers.restore_config_params( cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST, values_original, )", "\".\" + file_ext, ], ), inbox_accepted=( [], [ stem_name_2 + \".\" + file_ext,", "(\"pdf_text_ok\", \"pdf\"), (\"pdf_text_ok_protected\", \"pdf\"), ], target_path=cfg.glob.setup.directory_inbox, ) # ------------------------------------------------------------------------- values_original = pytest.helpers.backup_config_params( cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST,", "[], [ \"pdf_text_ok_protected_2.pdf\", \"pdf_wrong_format_3.pdf\", ], ), ) # ------------------------------------------------------------------------- cfg.glob.logger.debug(cfg.glob.LOGGER_END) # ------------------------------------------------------------------------- cfg.glob.logger.debug(cfg.glob.LOGGER_END)", "\".\" + file_ext), ) # ------------------------------------------------------------------------- dcr.main([dcr.DCR_ARGV_0, db.cls_run.Run.ACTION_CODE_INBOX]) # ------------------------------------------------------------------------- cfg.glob.logger.info(\"=========> test_run_action_process_inbox_rejected_duplicate <=========\")", "still found after processing\" ) # ------------------------------------------------------------------------- # Check empty language subdirectory #", "'\" + language_directory_name + \"' after processing missing\" ) assert 0 == len(os.listdir(language_directory_name)),", "\"pdf_french_ok_2.jpg\", \"pdf_french_ok_3.pdf\", \"pdf_french_scanned_4.pdf\", ], ), ) # ------------------------------------------------------------------------- base_directory = str(cfg.glob.setup.directory_inbox) language_directory_name =", "file_ext: str = \"pdf\" pytest.helpers.copy_files_4_pytest_2_dir( source_files=[ (stem_name_1, file_ext), ], target_path=cfg.glob.setup.directory_inbox, ) stem_name_2: str", "db.cls_db_core.DBCore(is_admin=True) cfg.glob.db_core.create_database() # ------------------------------------------------------------------------- # Copy language subdirectory pytest.helpers.copy_directories_4_pytest_2_dir( source_directories=[\"french\"], target_dir=str(cfg.glob.setup.directory_inbox) ) #", "[], [ \"docx_french_ok_1.docx\", \"pdf_french_ok_2.jpg\", \"pdf_french_ok_3.pdf\", \"pdf_french_scanned_4.pdf\", ], ), ) # ------------------------------------------------------------------------- base_directory =", "# Check empty language subdirectory # TBD # ------------------------------------------------------------------------- # Test not language", "str = \"pdf_text_ok_1\" pytest.helpers.copy_files_4_pytest_2_dir( source_files=[(stem_name_1, file_ext)], target_path=cfg.glob.setup.directory_inbox_accepted ) os.rename( utils.get_full_name(cfg.glob.setup.directory_inbox_accepted, stem_name_1 + \".\"", "- french. # ----------------------------------------------------------------------------- def test_run_action_process_inbox_french(fxtr_setup_empty_inbox): \"\"\"Test RUN_ACTION_PROCESS_INBOX - French.\"\"\" cfg.glob.logger.debug(cfg.glob.LOGGER_START) # -------------------------------------------------------------------------", ") # ------------------------------------------------------------------------- values_original = pytest.helpers.backup_config_params( cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST, [ (cfg.cls_setup.Setup._DCR_CFG_VERBOSE, \"false\"), ], ) dcr.main([dcr.DCR_ARGV_0,", "duplicates.\"\"\" cfg.glob.logger.debug(cfg.glob.LOGGER_START) # ------------------------------------------------------------------------- pytest.helpers.copy_files_4_pytest_2_dir( source_files=[ (\"pdf_text_ok\", \"pdf\"), (\"pdf_text_ok_protected\", \"pdf\"), ], target_path=cfg.glob.setup.directory_inbox, )", "pytest.helpers.restore_config_params( cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST, values_original, ) # ------------------------------------------------------------------------- cfg.glob.logger.info(\"=========> test_run_action_process_inbox_ignore_duplicates <=========\") pytest.helpers.verify_content_of_inboxes( inbox_accepted=( [], [", "= pytest.helpers.backup_config_params( cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST, [ (cfg.cls_setup.Setup._DCR_CFG_IGNORE_DUPLICATES, \"false\"), ], ) dcr.main([dcr.DCR_ARGV_0, db.cls_run.Run.ACTION_CODE_INBOX]) pytest.helpers.restore_config_params( cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST, values_original,", ") stem_name_2: str = \"pdf_wrong_format_1\" pytest.helpers.copy_files_4_pytest_2_dir( source_files=[(stem_name_1, file_ext)], target_path=cfg.glob.setup.directory_inbox_rejected ) os.rename( utils.get_full_name(cfg.glob.setup.directory_inbox_rejected, stem_name_1", "[ (cfg.cls_setup.Setup._DCR_CFG_IGNORE_DUPLICATES, \"true\"), ], ) dcr.main([dcr.DCR_ARGV_0, db.cls_run.Run.ACTION_CODE_INBOX]) pytest.helpers.restore_config_params( cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST, values_original, ) # -------------------------------------------------------------------------" ]
[ "smarkets.streaming_api.utils import set_payload_message def test_set_payload_message(): payload = Payload() assert payload.type != PAYLOAD_ORDER_CREATE oc", "from nose.tools import eq_ from smarkets.streaming_api.seto import OrderCreate, Payload, PAYLOAD_ORDER_CREATE from smarkets.streaming_api.utils import", "from smarkets.streaming_api.seto import OrderCreate, Payload, PAYLOAD_ORDER_CREATE from smarkets.streaming_api.utils import set_payload_message def test_set_payload_message(): payload", "from __future__ import absolute_import, division, print_function, unicode_literals from nose.tools import eq_ from smarkets.streaming_api.seto", "test_set_payload_message(): payload = Payload() assert payload.type != PAYLOAD_ORDER_CREATE oc = OrderCreate(quantity=123456) set_payload_message(payload, oc)", "import absolute_import, division, print_function, unicode_literals from nose.tools import eq_ from smarkets.streaming_api.seto import OrderCreate,", "eq_ from smarkets.streaming_api.seto import OrderCreate, Payload, PAYLOAD_ORDER_CREATE from smarkets.streaming_api.utils import set_payload_message def test_set_payload_message():", "print_function, unicode_literals from nose.tools import eq_ from smarkets.streaming_api.seto import OrderCreate, Payload, PAYLOAD_ORDER_CREATE from", "import eq_ from smarkets.streaming_api.seto import OrderCreate, Payload, PAYLOAD_ORDER_CREATE from smarkets.streaming_api.utils import set_payload_message def", "payload = Payload() assert payload.type != PAYLOAD_ORDER_CREATE oc = OrderCreate(quantity=123456) set_payload_message(payload, oc) eq_(payload.type,", "from smarkets.streaming_api.utils import set_payload_message def test_set_payload_message(): payload = Payload() assert payload.type != PAYLOAD_ORDER_CREATE", "smarkets.streaming_api.seto import OrderCreate, Payload, PAYLOAD_ORDER_CREATE from smarkets.streaming_api.utils import set_payload_message def test_set_payload_message(): payload =", "nose.tools import eq_ from smarkets.streaming_api.seto import OrderCreate, Payload, PAYLOAD_ORDER_CREATE from smarkets.streaming_api.utils import set_payload_message", "set_payload_message def test_set_payload_message(): payload = Payload() assert payload.type != PAYLOAD_ORDER_CREATE oc = OrderCreate(quantity=123456)", "PAYLOAD_ORDER_CREATE from smarkets.streaming_api.utils import set_payload_message def test_set_payload_message(): payload = Payload() assert payload.type !=", "division, print_function, unicode_literals from nose.tools import eq_ from smarkets.streaming_api.seto import OrderCreate, Payload, PAYLOAD_ORDER_CREATE", "OrderCreate, Payload, PAYLOAD_ORDER_CREATE from smarkets.streaming_api.utils import set_payload_message def test_set_payload_message(): payload = Payload() assert", "= Payload() assert payload.type != PAYLOAD_ORDER_CREATE oc = OrderCreate(quantity=123456) set_payload_message(payload, oc) eq_(payload.type, PAYLOAD_ORDER_CREATE)", "__future__ import absolute_import, division, print_function, unicode_literals from nose.tools import eq_ from smarkets.streaming_api.seto import", "<reponame>smarkets/smk_python_sdk from __future__ import absolute_import, division, print_function, unicode_literals from nose.tools import eq_ from", "import OrderCreate, Payload, PAYLOAD_ORDER_CREATE from smarkets.streaming_api.utils import set_payload_message def test_set_payload_message(): payload = Payload()", "def test_set_payload_message(): payload = Payload() assert payload.type != PAYLOAD_ORDER_CREATE oc = OrderCreate(quantity=123456) set_payload_message(payload,", "Payload() assert payload.type != PAYLOAD_ORDER_CREATE oc = OrderCreate(quantity=123456) set_payload_message(payload, oc) eq_(payload.type, PAYLOAD_ORDER_CREATE) eq_(payload.order_create,", "Payload, PAYLOAD_ORDER_CREATE from smarkets.streaming_api.utils import set_payload_message def test_set_payload_message(): payload = Payload() assert payload.type", "unicode_literals from nose.tools import eq_ from smarkets.streaming_api.seto import OrderCreate, Payload, PAYLOAD_ORDER_CREATE from smarkets.streaming_api.utils", "absolute_import, division, print_function, unicode_literals from nose.tools import eq_ from smarkets.streaming_api.seto import OrderCreate, Payload,", "assert payload.type != PAYLOAD_ORDER_CREATE oc = OrderCreate(quantity=123456) set_payload_message(payload, oc) eq_(payload.type, PAYLOAD_ORDER_CREATE) eq_(payload.order_create, oc)", "import set_payload_message def test_set_payload_message(): payload = Payload() assert payload.type != PAYLOAD_ORDER_CREATE oc =" ]
[ "callable(getattr(method, x)), dir(method))) def loading_data(self, trade_date): \"\"\" 获取基础数据 按天获取当天交易日所有股票的基础数据 :param trade_date: 交易日 :return:", "IncomeReport.DILUTEDEPS, # 稀释每股收益 ], dates=[trade_date]) for col in columns: if col in list(income_sets.keys()):", "factor_share_indicators) factor_share_indicators = per_share.CashEquPS(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.DivPS(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.EPSTTM(valuation_sets, factor_share_indicators)", "pdb, importlib, inspect, time, datetime, json # from PyFin.api import advanceDateByCalendar # from", "engine.fetch_fundamentals_pit_extend_company_id(IncomeReport, [IncomeReport.BIZINCO, # 营业收入 IncomeReport.BIZTOTINCO, # 营业总收入 IncomeReport.PERPROFIT, # 营业利润 IncomeReport.DILUTEDEPS, # 稀释每股收益", "name, url, methods=[{'packet': 'financial.factor_pre_share_indicators', 'class': 'FactorPerShareIndicators'}, ]): self._name = name self._methods = methods", "factor_share_indicators = per_share.SurplusReservePS(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.UndividedProfitPS(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.RetainedEarningsPS(factor_share_indicators, factor_share_indicators) factor_share_indicators", "# 现金及现金等价物净增加额 CashFlowTTM.MANANETR, # 经营活动现金流量净额 ], dates=[trade_date]) for col in columns: if col", "factor_per_share_indicators from data.model import BalanceMRQ, BalanceTTM, BalanceReport from data.model import CashFlowTTM, CashFlowReport from", "# cache_data.set_cache(session, 'alphax', total_data.to_json(orient='records')) # distributed_factor.delay(session, json.dumps(self._methods), self._name) # # def distributed_factor(self, total_data):", "= pd.merge(cash_flow_sets, income_sets, on='security_code').reindex() valuation_sets = pd.merge(balance_sets, valuation_sets, on='security_code').reindex() valuation_sets = pd.merge(indicator_sets, valuation_sets,", "= datetime.strptime(str(trade_date), \"%Y%m%d\") time_array = time_array - timedelta(days=days) * n date_time = int(datetime.strftime(time_array,", "columns={'CASHNETI': 'cash_equivalent_increase_ttm', # 现金及现金等价物净增加额 'MANANETR': 'net_operate_cash_flow_ttm', # 经营活动现金流量净额 }) income_ttm_sets = engine.fetch_fundamentals_pit_extend_company_id(IncomeTTM, [IncomeTTM.PARENETP,", "axis=1) balance_sets = balance_sets.rename(columns={'PARESHARRIGH': 'total_owner_equities', # 归属于母公司的所有者权益 'CAPISURP': 'capital_reserve_fund', # 资本公积 'RESE': 'surplus_reserve_fund',", "factor_share_indicators = per_share.CapticalSurplusPS(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.SurplusReservePS(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.UndividedProfitPS(valuation_sets, factor_share_indicators) factor_share_indicators", "当前交易日 :param n: :return: \"\"\" syn_util = SyncUtil() trade_date_sets = syn_util.get_all_trades('001002', '19900101', trade_date)", "factor_share_indicators = per_share.ShareholderFCFPS(valuation_sets, factor_share_indicators) factor_share_indicators = factor_share_indicators.reset_index() factor_share_indicators['trade_date'] = str(trade_date) factor_share_indicators.replace([-np.inf, np.inf, None],", "valuation_sets): per_share = factor_per_share_indicators.FactorPerShareIndicators() factor_share_indicators = pd.DataFrame() factor_share_indicators['security_code'] = valuation_sets['security_code'] valuation_sets = valuation_sets.set_index('security_code')", "on='security_code').reindex() valuation_sets = pd.merge(cash_flow_ttm_sets, valuation_sets, on='security_code').reindex() valuation_sets = pd.merge(income_ttm_sets, valuation_sets, on='security_code').reindex() valuation_sets =", "# 营业利润 'DILUTEDEPS': 'diluted_eps', # 稀释每股收益 }) balance_sets = engine.fetch_fundamentals_pit_extend_company_id(BalanceReport, [BalanceReport.PARESHARRIGH, # 归属于母公司的所有者权益", "营业收入 'BIZTOTINCO': 'total_operating_revenue_ttm', # 营业总收入 }) column = ['trade_date'] valuation_data = get_fundamentals(query(Valuation.security_code, Valuation.trade_date,", "'ENDDATE', 'symbol', 'company_id', 'trade_date'] # Report data cash_flow_sets = engine.fetch_fundamentals_pit_extend_company_id(CashFlowReport, [CashFlowReport.FINALCASHBALA, # 期末现金及现金等价物余额", "income_ttm_sets.rename(columns={'PARENETP': 'np_parent_company_owners_ttm', # 归属于母公司所有者的净利润 'PERPROFIT': 'operating_profit_ttm', # 营业利润 'BIZINCO': 'operating_revenue_ttm', # 营业收入 'BIZTOTINCO':", "#存储数据 # session = str(int(time.time() * 1000000 + datetime.datetime.now().microsecond)) # cache_data.set_cache(session, 'alphax', total_data.to_json(orient='records'))", "indicator_sets = engine.fetch_fundamentals_pit_extend_company_id(IndicatorReport, [IndicatorReport.FCFE, # 股东自由现金流量 IndicatorReport.FCFF, # 企业自由现金流量 IndicatorReport.EPSBASIC, # 基本每股收益 IndicatorReport.DPS,", "factor_share_indicators) factor_share_indicators = per_share.CapticalSurplusPS(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.SurplusReservePS(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.UndividedProfitPS(valuation_sets, factor_share_indicators)", "total_data.to_json(orient='records')) # distributed_factor.delay(session, json.dumps(self._methods), self._name) # # def distributed_factor(self, total_data): # mkt_df =", "= pd.merge(valuation_data, valuation_sets, on='security_code').reindex() return valuation_sets def process_calc_factor(self, trade_date, valuation_sets): per_share = factor_per_share_indicators.FactorPerShareIndicators()", "import pdb, importlib, inspect, time, datetime, json # from PyFin.api import advanceDateByCalendar #", "factor_share_indicators = per_share.TotalRevPSTTM(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.TotalRevPS(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.OptRevPSTTM(valuation_sets, factor_share_indicators) factor_share_indicators", "# def distributed_factor(self, total_data): # mkt_df = self.calc_factor_by_date(total_data,trade_date) # result = self.calc_factor('alphax.alpha191','Alpha191',mkt_df,trade_date) #", "on='security_code').reindex() valuation_sets = pd.merge(balance_sets, valuation_sets, on='security_code').reindex() valuation_sets = pd.merge(indicator_sets, valuation_sets, on='security_code').reindex() valuation_sets =", "# 营业利润 'BIZINCO': 'operating_revenue_ttm', # 营业收入 'BIZTOTINCO': 'total_operating_revenue_ttm', # 营业总收入 }) column =", "trade_date) tic = time.time() valuation_sets = self.loading_data(trade_date) print('data load time %s' % (time.time()", "factor_share_indicators = per_share.EnterpriseFCFPS(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.ShareholderFCFPS(valuation_sets, factor_share_indicators) factor_share_indicators = factor_share_indicators.reset_index() factor_share_indicators['trade_date'] =", "读取目前涉及到的因子 engine = sqlEngine() columns = ['COMPCODE', 'PUBLISHDATE', 'ENDDATE', 'symbol', 'company_id', 'trade_date'] #", "# distributed_factor.delay(session, json.dumps(self._methods), self._name) # # def distributed_factor(self, total_data): # mkt_df = self.calc_factor_by_date(total_data,trade_date)", "engine.fetch_fundamentals_pit_extend_company_id(CashFlowReport, [CashFlowReport.FINALCASHBALA, # 期末现金及现金等价物余额 ], dates=[trade_date]) for col in columns: if col in", "# 企业自由现金流量 'EPSBASIC': 'basic_eps', # 基本每股收益 'DPS': 'dividend_receivable', # 每股股利(税前) }) # TTM", "list(income_sets.keys()): income_sets = income_sets.drop(col, axis=1) income_sets = income_sets.rename(columns={'BIZINCO': 'operating_revenue', # 营业收入 'BIZTOTINCO': 'total_operating_revenue',", "= url def get_trade_date(self, trade_date, n, days=365): \"\"\" 获取当前时间前n年的时间点,且为交易日,如果非交易日,则往前提取最近的一天。 :param days: :param trade_date:", "if col in list(income_sets.keys()): income_sets = income_sets.drop(col, axis=1) income_sets = income_sets.rename(columns={'BIZINCO': 'operating_revenue', #", "CalcEngine(name, packet_sets) # content = cache_data.get_cache(session, factor_name) # total_data = json_normalize(json.loads(content)) # calc_engines.distributed_factor(total_data)", "def loading_data(self, trade_date): \"\"\" 获取基础数据 按天获取当天交易日所有股票的基础数据 :param trade_date: 交易日 :return: \"\"\" # 转换时间格式", "Report data cash_flow_sets = engine.fetch_fundamentals_pit_extend_company_id(CashFlowReport, [CashFlowReport.FINALCASHBALA, # 期末现金及现金等价物余额 ], dates=[trade_date]) for col in", "total_data): # mkt_df = self.calc_factor_by_date(total_data,trade_date) # result = self.calc_factor('alphax.alpha191','Alpha191',mkt_df,trade_date) # @app.task # def", "import timedelta, datetime from financial import factor_per_share_indicators from data.model import BalanceMRQ, BalanceTTM, BalanceReport", "], dates=[trade_date]) for col in columns: if col in list(balance_sets.keys()): balance_sets = balance_sets.drop(col,", "def __init__(self, name, url, methods=[{'packet': 'financial.factor_pre_share_indicators', 'class': 'FactorPerShareIndicators'}, ]): self._name = name self._methods", "= engine.fetch_fundamentals_pit_extend_company_id(IndicatorReport, [IndicatorReport.FCFE, # 股东自由现金流量 IndicatorReport.FCFF, # 企业自由现金流量 IndicatorReport.EPSBASIC, # 基本每股收益 IndicatorReport.DPS, #", "col in list(indicator_sets.keys()): indicator_sets = indicator_sets.drop(col, axis=1) indicator_sets = indicator_sets.rename(columns={'FCFE': 'shareholder_fcfps', # 股东自由现金流量", "= per_share.OptCFPSTTM(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.CFPSTTM(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.EnterpriseFCFPS(valuation_sets, factor_share_indicators) factor_share_indicators =", "# Report data cash_flow_sets = engine.fetch_fundamentals_pit_extend_company_id(CashFlowReport, [CashFlowReport.FINALCASHBALA, # 期末现金及现金等价物余额 ], dates=[trade_date]) for col", "col in columns: if col in list(indicator_sets.keys()): indicator_sets = indicator_sets.drop(col, axis=1) indicator_sets =", "= time_array - timedelta(days=days) * n date_time = int(datetime.strftime(time_array, \"%Y%m%d\")) if str(date_time) <", "trade_date, valuation_sets): per_share = factor_per_share_indicators.FactorPerShareIndicators() factor_share_indicators = pd.DataFrame() factor_share_indicators['security_code'] = valuation_sets['security_code'] valuation_sets =", "if col in list(valuation_data.keys()): valuation_data = valuation_data.drop(col, axis=1) valuation_sets = pd.merge(cash_flow_sets, income_sets, on='security_code').reindex()", "income_sets.drop(col, axis=1) income_sets = income_sets.rename(columns={'BIZINCO': 'operating_revenue', # 营业收入 'BIZTOTINCO': 'total_operating_revenue', # 营业总收入 'PERPROFIT':", "@app.task # def distributed_factor(session, trade_date, packet_sets, name): # calc_engines = CalcEngine(name, packet_sets) #", "每股股利(税前) ], dates=[trade_date]) for col in columns: if col in list(indicator_sets.keys()): indicator_sets =", "from PyFin.api import advanceDateByCalendar # from data.polymerize import DBPolymerize from data.storage_engine import StorageEngine", "'UNDIPROF': 'retained_profit', # 未分配利润 }) indicator_sets = engine.fetch_fundamentals_pit_extend_company_id(IndicatorReport, [IndicatorReport.FCFE, # 股东自由现金流量 IndicatorReport.FCFF, #", "method): # 私有函数和保护函数过滤 return list(filter(lambda x: not x.startswith('_') and callable(getattr(method, x)), dir(method))) def", "* from data.sqlengine import sqlEngine # pd.set_option('display.max_columns', None) # pd.set_option('display.max_rows', None) # from", "axis=1) cash_flow_sets = cash_flow_sets.rename(columns={'FINALCASHBALA': 'cash_and_equivalents_at_end', # 期末现金及现金等价物余额 }) income_sets = engine.fetch_fundamentals_pit_extend_company_id(IncomeReport, [IncomeReport.BIZINCO, #", "valuation_sets = pd.merge(balance_sets, valuation_sets, on='security_code').reindex() valuation_sets = pd.merge(indicator_sets, valuation_sets, on='security_code').reindex() valuation_sets = pd.merge(cash_flow_ttm_sets,", "# def distributed_factor(session, trade_date, packet_sets, name): # calc_engines = CalcEngine(name, packet_sets) # content", "self.process_calc_factor(trade_date, valuation_sets) print('cal_time %s' % (time.time() - tic)) storage_engine.update_destdb(str(self._methods[-1]['packet'].split('.')[-1]), trade_date, result) # storage_engine.update_destdb('factor_pre_share_indicators',", "kwargs['session'] # content = cache_data.get_cache(session + str(date_index), date_index) # total_pre_share_data = json_normalize(json.loads(str(content, encoding='utf8')))", "date_time - 1 # print('trade_date pre %s year %s' % (n, date_time)) return", "None) # from ultron.cluster.invoke.cache_data import cache_data class CalcEngine(object): def __init__(self, name, url, methods=[{'packet':", ").filter(Valuation.trade_date.in_([trade_date]))) for col in column: if col in list(valuation_data.keys()): valuation_data = valuation_data.drop(col, axis=1)", "self.loading_data(trade_date) # #存储数据 # session = str(int(time.time() * 1000000 + datetime.datetime.now().microsecond)) # cache_data.set_cache(session,", "factor_share_indicators = per_share.CashEquPS(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.DivPS(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.EPSTTM(valuation_sets, factor_share_indicators) factor_share_indicators", "print('data load time %s' % (time.time() - tic)) storage_engine = StorageEngine(self._url) result =", "'alphax', total_data.to_json(orient='records')) # distributed_factor.delay(session, json.dumps(self._methods), self._name) # # def distributed_factor(self, total_data): # mkt_df", "valuation_sets = pd.merge(income_ttm_sets, valuation_sets, on='security_code').reindex() valuation_sets = pd.merge(valuation_data, valuation_sets, on='security_code').reindex() return valuation_sets def", "name self._methods = methods self._url = url def get_trade_date(self, trade_date, n, days=365): \"\"\"", "% (n, date_time)) return str(date_time) def _func_sets(self, method): # 私有函数和保护函数过滤 return list(filter(lambda x:", "from financial import factor_per_share_indicators from data.model import BalanceMRQ, BalanceTTM, BalanceReport from data.model import", "= ['COMPCODE', 'PUBLISHDATE', 'ENDDATE', 'symbol', 'company_id', 'trade_date'] # Report data cash_flow_sets = engine.fetch_fundamentals_pit_extend_company_id(CashFlowReport,", "+ datetime.datetime.now().microsecond)) # cache_data.set_cache(session, 'alphax', total_data.to_json(orient='records')) # distributed_factor.delay(session, json.dumps(self._methods), self._name) # # def", "@app.task() # def factor_calculate(**kwargs): # print(\"per_share_kwargs: {}\".format(kwargs)) # date_index = kwargs['date_index'] # session", "return str(date_time) def _func_sets(self, method): # 私有函数和保护函数过滤 return list(filter(lambda x: not x.startswith('_') and", "list(indicator_sets.keys()): indicator_sets = indicator_sets.drop(col, axis=1) indicator_sets = indicator_sets.rename(columns={'FCFE': 'shareholder_fcfps', # 股东自由现金流量 'FCFF': 'enterprise_fcfps',", "def factor_calculate(**kwargs): # print(\"per_share_kwargs: {}\".format(kwargs)) # date_index = kwargs['date_index'] # session = kwargs['session']", "from data.sqlengine import sqlEngine # pd.set_option('display.max_columns', None) # pd.set_option('display.max_rows', None) # from ultron.cluster.invoke.cache_data", "import IncomeReport, IncomeTTM from vision.table.valuation import Valuation from vision.db.signletion_engine import * from data.sqlengine", ":param trade_date: 交易日 :return: \"\"\" # 转换时间格式 time_array = datetime.strptime(trade_date, \"%Y-%m-%d\") trade_date =", "sqlEngine() columns = ['COMPCODE', 'PUBLISHDATE', 'ENDDATE', 'symbol', 'company_id', 'trade_date'] # Report data cash_flow_sets", "%s' % (time.time() - tic)) storage_engine.update_destdb(str(self._methods[-1]['packet'].split('.')[-1]), trade_date, result) # storage_engine.update_destdb('factor_pre_share_indicators', trade_date, result) #", "'FCFF': 'enterprise_fcfps', # 企业自由现金流量 'EPSBASIC': 'basic_eps', # 基本每股收益 'DPS': 'dividend_receivable', # 每股股利(税前) })", "inspect, time, datetime, json # from PyFin.api import advanceDateByCalendar # from data.polymerize import", "time %s' % (time.time() - tic)) storage_engine = StorageEngine(self._url) result = self.process_calc_factor(trade_date, valuation_sets)", "'operating_revenue', # 营业收入 'BIZTOTINCO': 'total_operating_revenue', # 营业总收入 'PERPROFIT': 'operating_profit', # 营业利润 'DILUTEDEPS': 'diluted_eps',", "str(date_time) < min(trade_date_sets): # print('date_time %s is out of trade_date_sets' % date_time) return", "cash_flow_sets = engine.fetch_fundamentals_pit_extend_company_id(CashFlowReport, [CashFlowReport.FINALCASHBALA, # 期末现金及现金等价物余额 ], dates=[trade_date]) for col in columns: if", "sqlEngine # pd.set_option('display.max_columns', None) # pd.set_option('display.max_rows', None) # from ultron.cluster.invoke.cache_data import cache_data class", "# 稀释每股收益 ], dates=[trade_date]) for col in columns: if col in list(income_sets.keys()): income_sets", "list(cash_flow_sets.keys()): cash_flow_sets = cash_flow_sets.drop(col, axis=1) cash_flow_sets = cash_flow_sets.rename(columns={'FINALCASHBALA': 'cash_and_equivalents_at_end', # 期末现金及现金等价物余额 }) income_sets", "= time.time() valuation_sets = self.loading_data(trade_date) print('data load time %s' % (time.time() - tic))", "'BIZTOTINCO': 'total_operating_revenue_ttm', # 营业总收入 }) column = ['trade_date'] valuation_data = get_fundamentals(query(Valuation.security_code, Valuation.trade_date, Valuation.capitalization,", ":param trade_date: 当前交易日 :param n: :return: \"\"\" syn_util = SyncUtil() trade_date_sets = syn_util.get_all_trades('001002',", "factor_share_indicators) factor_share_indicators = per_share.EnterpriseFCFPS(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.ShareholderFCFPS(valuation_sets, factor_share_indicators) factor_share_indicators = factor_share_indicators.reset_index() factor_share_indicators['trade_date']", "factor_share_indicators = factor_share_indicators.reset_index() factor_share_indicators['trade_date'] = str(trade_date) factor_share_indicators.replace([-np.inf, np.inf, None], np.nan, inplace=True) return factor_share_indicators", "'CAPISURP': 'capital_reserve_fund', # 资本公积 'RESE': 'surplus_reserve_fund', # 盈余公积 'UNDIPROF': 'retained_profit', # 未分配利润 })", "# # def distributed_factor(self, total_data): # mkt_df = self.calc_factor_by_date(total_data,trade_date) # result = self.calc_factor('alphax.alpha191','Alpha191',mkt_df,trade_date)", "trade_date, packet_sets, name): # calc_engines = CalcEngine(name, packet_sets) # content = cache_data.get_cache(session, factor_name)", "# 营业收入 'BIZTOTINCO': 'total_operating_revenue', # 营业总收入 'PERPROFIT': 'operating_profit', # 营业利润 'DILUTEDEPS': 'diluted_eps', #", "盈余公积 'UNDIPROF': 'retained_profit', # 未分配利润 }) indicator_sets = engine.fetch_fundamentals_pit_extend_company_id(IndicatorReport, [IndicatorReport.FCFE, # 股东自由现金流量 IndicatorReport.FCFF,", "# 营业总收入 ], dates=[trade_date]) for col in columns: if col in list(income_ttm_sets.keys()): income_ttm_sets", "result) # storage_engine.update_destdb('factor_pre_share_indicators', trade_date, result) # def remote_run(self, trade_date): # total_data = self.loading_data(trade_date)", "income_sets = income_sets.drop(col, axis=1) income_sets = income_sets.rename(columns={'BIZINCO': 'operating_revenue', # 营业收入 'BIZTOTINCO': 'total_operating_revenue', #", "= name self._methods = methods self._url = url def get_trade_date(self, trade_date, n, days=365):", "x: not x.startswith('_') and callable(getattr(method, x)), dir(method))) def loading_data(self, trade_date): \"\"\" 获取基础数据 按天获取当天交易日所有股票的基础数据", "= pd.merge(cash_flow_ttm_sets, valuation_sets, on='security_code').reindex() valuation_sets = pd.merge(income_ttm_sets, valuation_sets, on='security_code').reindex() valuation_sets = pd.merge(valuation_data, valuation_sets,", "importlib, inspect, time, datetime, json # from PyFin.api import advanceDateByCalendar # from data.polymerize", "on='security_code').reindex() return valuation_sets def process_calc_factor(self, trade_date, valuation_sets): per_share = factor_per_share_indicators.FactorPerShareIndicators() factor_share_indicators = pd.DataFrame()", "= self.calc_factor('alphax.alpha191','Alpha191',mkt_df,trade_date) # @app.task # def distributed_factor(session, trade_date, packet_sets, name): # calc_engines =", "loading_data(self, trade_date): \"\"\" 获取基础数据 按天获取当天交易日所有股票的基础数据 :param trade_date: 交易日 :return: \"\"\" # 转换时间格式 time_array", "# 归属于母公司的所有者权益 BalanceReport.CAPISURP, BalanceReport.RESE, BalanceReport.UNDIPROF, ], dates=[trade_date]) for col in columns: if col", "self._methods = methods self._url = url def get_trade_date(self, trade_date, n, days=365): \"\"\" 获取当前时间前n年的时间点,且为交易日,如果非交易日,则往前提取最近的一天。", "trade_date: 当前交易日 :param n: :return: \"\"\" syn_util = SyncUtil() trade_date_sets = syn_util.get_all_trades('001002', '19900101',", "# 营业利润 IncomeTTM.BIZINCO, # 营业收入 IncomeTTM.BIZTOTINCO, # 营业总收入 ], dates=[trade_date]) for col in", "if col in list(cash_flow_ttm_sets.keys()): cash_flow_ttm_sets = cash_flow_ttm_sets.drop(col, axis=1) cash_flow_ttm_sets = cash_flow_ttm_sets.rename( columns={'CASHNETI': 'cash_equivalent_increase_ttm',", "if col in list(cash_flow_sets.keys()): cash_flow_sets = cash_flow_sets.drop(col, axis=1) cash_flow_sets = cash_flow_sets.rename(columns={'FINALCASHBALA': 'cash_and_equivalents_at_end', #", "= per_share.CashEquPS(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.DivPS(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.EPSTTM(valuation_sets, factor_share_indicators) factor_share_indicators =", "\"\"\" syn_util = SyncUtil() trade_date_sets = syn_util.get_all_trades('001002', '19900101', trade_date) trade_date_sets = trade_date_sets['TRADEDATE'].values time_array", "indicator_sets = indicator_sets.rename(columns={'FCFE': 'shareholder_fcfps', # 股东自由现金流量 'FCFF': 'enterprise_fcfps', # 企业自由现金流量 'EPSBASIC': 'basic_eps', #", "factor_share_indicators = per_share.EPS(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.DilutedEPSTTM(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.CashEquPS(valuation_sets, factor_share_indicators) factor_share_indicators", "vision.table.valuation import Valuation from vision.db.signletion_engine import * from data.sqlengine import sqlEngine # pd.set_option('display.max_columns',", "= factor_per_share_indicators.FactorPerShareIndicators() factor_share_indicators = pd.DataFrame() factor_share_indicators['security_code'] = valuation_sets['security_code'] valuation_sets = valuation_sets.set_index('security_code') factor_share_indicators =", "factor_share_indicators = pd.DataFrame() factor_share_indicators['security_code'] = valuation_sets['security_code'] valuation_sets = valuation_sets.set_index('security_code') factor_share_indicators = factor_share_indicators.set_index('security_code') factor_share_indicators", "# 营业收入 IncomeReport.BIZTOTINCO, # 营业总收入 IncomeReport.PERPROFIT, # 营业利润 IncomeReport.DILUTEDEPS, # 稀释每股收益 ], dates=[trade_date])", "资本公积 'RESE': 'surplus_reserve_fund', # 盈余公积 'UNDIPROF': 'retained_profit', # 未分配利润 }) indicator_sets = engine.fetch_fundamentals_pit_extend_company_id(IndicatorReport,", "cash_flow_ttm_sets.drop(col, axis=1) cash_flow_ttm_sets = cash_flow_ttm_sets.rename( columns={'CASHNETI': 'cash_equivalent_increase_ttm', # 现金及现金等价物净增加额 'MANANETR': 'net_operate_cash_flow_ttm', # 经营活动现金流量净额", "for col in columns: if col in list(cash_flow_sets.keys()): cash_flow_sets = cash_flow_sets.drop(col, axis=1) cash_flow_sets", "# 稀释每股收益 }) balance_sets = engine.fetch_fundamentals_pit_extend_company_id(BalanceReport, [BalanceReport.PARESHARRIGH, # 归属于母公司的所有者权益 BalanceReport.CAPISURP, BalanceReport.RESE, BalanceReport.UNDIPROF, ],", "# 资本公积 'RESE': 'surplus_reserve_fund', # 盈余公积 'UNDIPROF': 'retained_profit', # 未分配利润 }) indicator_sets =", "import * from data.sqlengine import sqlEngine # pd.set_option('display.max_columns', None) # pd.set_option('display.max_rows', None) #", "column = ['trade_date'] valuation_data = get_fundamentals(query(Valuation.security_code, Valuation.trade_date, Valuation.capitalization, ).filter(Valuation.trade_date.in_([trade_date]))) for col in column:", "balance_sets = engine.fetch_fundamentals_pit_extend_company_id(BalanceReport, [BalanceReport.PARESHARRIGH, # 归属于母公司的所有者权益 BalanceReport.CAPISURP, BalanceReport.RESE, BalanceReport.UNDIPROF, ], dates=[trade_date]) for col", "# 未分配利润 }) indicator_sets = engine.fetch_fundamentals_pit_extend_company_id(IndicatorReport, [IndicatorReport.FCFE, # 股东自由现金流量 IndicatorReport.FCFF, # 企业自由现金流量 IndicatorReport.EPSBASIC,", "numpy as np from datetime import timedelta, datetime from financial import factor_per_share_indicators from", "trade_date, result) # def remote_run(self, trade_date): # total_data = self.loading_data(trade_date) # #存储数据 #", "from data.model import CashFlowTTM, CashFlowReport from data.model import IndicatorReport from data.model import IncomeReport,", "process_calc_factor(self, trade_date, valuation_sets): per_share = factor_per_share_indicators.FactorPerShareIndicators() factor_share_indicators = pd.DataFrame() factor_share_indicators['security_code'] = valuation_sets['security_code'] valuation_sets", "per_share.CFPSTTM(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.EnterpriseFCFPS(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.ShareholderFCFPS(valuation_sets, factor_share_indicators) factor_share_indicators = factor_share_indicators.reset_index()", "trade_date = datetime.strftime(time_array, '%Y%m%d') # 读取目前涉及到的因子 engine = sqlEngine() columns = ['COMPCODE', 'PUBLISHDATE',", "per_share.UndividedProfitPS(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.RetainedEarningsPS(factor_share_indicators, factor_share_indicators) factor_share_indicators = per_share.OptCFPSTTM(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.CFPSTTM(valuation_sets,", "return list(filter(lambda x: not x.startswith('_') and callable(getattr(method, x)), dir(method))) def loading_data(self, trade_date): \"\"\"", "n date_time = int(datetime.strftime(time_array, \"%Y%m%d\")) if str(date_time) < min(trade_date_sets): # print('date_time %s is", "result) # def remote_run(self, trade_date): # total_data = self.loading_data(trade_date) # #存储数据 # session", "* 1000000 + datetime.datetime.now().microsecond)) # cache_data.set_cache(session, 'alphax', total_data.to_json(orient='records')) # distributed_factor.delay(session, json.dumps(self._methods), self._name) #", "BalanceMRQ, BalanceTTM, BalanceReport from data.model import CashFlowTTM, CashFlowReport from data.model import IndicatorReport from", "'financial.factor_pre_share_indicators', 'class': 'FactorPerShareIndicators'}, ]): self._name = name self._methods = methods self._url = url", "self._name) # # def distributed_factor(self, total_data): # mkt_df = self.calc_factor_by_date(total_data,trade_date) # result =", "'capital_reserve_fund', # 资本公积 'RESE': 'surplus_reserve_fund', # 盈余公积 'UNDIPROF': 'retained_profit', # 未分配利润 }) indicator_sets", "time_array = time_array - timedelta(days=days) * n date_time = int(datetime.strftime(time_array, \"%Y%m%d\")) if str(date_time)", "print('trade_date pre %s year %s' % (n, date_time)) return str(date_time) def _func_sets(self, method):", "trade_date_sets = syn_util.get_all_trades('001002', '19900101', trade_date) trade_date_sets = trade_date_sets['TRADEDATE'].values time_array = datetime.strptime(str(trade_date), \"%Y%m%d\") time_array", "= valuation_sets.set_index('security_code') factor_share_indicators = factor_share_indicators.set_index('security_code') factor_share_indicators = per_share.EPS(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.DilutedEPSTTM(valuation_sets, factor_share_indicators)", "'dividend_receivable', # 每股股利(税前) }) # TTM data cash_flow_ttm_sets = engine.fetch_fundamentals_pit_extend_company_id(CashFlowTTM, [CashFlowTTM.CASHNETI, # 现金及现金等价物净增加额", "{}\".format(kwargs)) # date_index = kwargs['date_index'] # session = kwargs['session'] # content = cache_data.get_cache(session", "(time.time() - tic)) storage_engine.update_destdb(str(self._methods[-1]['packet'].split('.')[-1]), trade_date, result) # storage_engine.update_destdb('factor_pre_share_indicators', trade_date, result) # def remote_run(self,", "(time.time() - tic)) storage_engine = StorageEngine(self._url) result = self.process_calc_factor(trade_date, valuation_sets) print('cal_time %s' %", "time.time() valuation_sets = self.loading_data(trade_date) print('data load time %s' % (time.time() - tic)) storage_engine", "valuation_data = valuation_data.drop(col, axis=1) valuation_sets = pd.merge(cash_flow_sets, income_sets, on='security_code').reindex() valuation_sets = pd.merge(balance_sets, valuation_sets,", "dir(method))) def loading_data(self, trade_date): \"\"\" 获取基础数据 按天获取当天交易日所有股票的基础数据 :param trade_date: 交易日 :return: \"\"\" #", "Valuation.trade_date, Valuation.capitalization, ).filter(Valuation.trade_date.in_([trade_date]))) for col in column: if col in list(valuation_data.keys()): valuation_data =", "= engine.fetch_fundamentals_pit_extend_company_id(BalanceReport, [BalanceReport.PARESHARRIGH, # 归属于母公司的所有者权益 BalanceReport.CAPISURP, BalanceReport.RESE, BalanceReport.UNDIPROF, ], dates=[trade_date]) for col in", "col in list(balance_sets.keys()): balance_sets = balance_sets.drop(col, axis=1) balance_sets = balance_sets.rename(columns={'PARESHARRIGH': 'total_owner_equities', # 归属于母公司的所有者权益", "in list(balance_sets.keys()): balance_sets = balance_sets.drop(col, axis=1) balance_sets = balance_sets.rename(columns={'PARESHARRIGH': 'total_owner_equities', # 归属于母公司的所有者权益 'CAPISURP':", "dates=[trade_date]) for col in columns: if col in list(cash_flow_sets.keys()): cash_flow_sets = cash_flow_sets.drop(col, axis=1)", "trade_date): # total_data = self.loading_data(trade_date) # #存储数据 # session = str(int(time.time() * 1000000", "['COMPCODE', 'PUBLISHDATE', 'ENDDATE', 'symbol', 'company_id', 'trade_date'] # Report data cash_flow_sets = engine.fetch_fundamentals_pit_extend_company_id(CashFlowReport, [CashFlowReport.FINALCASHBALA,", "result = self.calc_factor('alphax.alpha191','Alpha191',mkt_df,trade_date) # @app.task # def distributed_factor(session, trade_date, packet_sets, name): # calc_engines", "# date_index = kwargs['date_index'] # session = kwargs['session'] # content = cache_data.get_cache(session +", "= SyncUtil() trade_date_sets = syn_util.get_all_trades('001002', '19900101', trade_date) trade_date_sets = trade_date_sets['TRADEDATE'].values time_array = datetime.strptime(str(trade_date),", "= per_share.OptRevPS(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.OptProfitPSTTM(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.OptProfitPS(valuation_sets, factor_share_indicators) factor_share_indicators =", "= per_share.CapticalSurplusPS(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.SurplusReservePS(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.UndividedProfitPS(valuation_sets, factor_share_indicators) factor_share_indicators =", "# 每股股利(税前) ], dates=[trade_date]) for col in columns: if col in list(indicator_sets.keys()): indicator_sets", "trade_date, result) # storage_engine.update_destdb('factor_pre_share_indicators', trade_date, result) # def remote_run(self, trade_date): # total_data =", "x)), dir(method))) def loading_data(self, trade_date): \"\"\" 获取基础数据 按天获取当天交易日所有股票的基础数据 :param trade_date: 交易日 :return: \"\"\"", "'RESE': 'surplus_reserve_fund', # 盈余公积 'UNDIPROF': 'retained_profit', # 未分配利润 }) indicator_sets = engine.fetch_fundamentals_pit_extend_company_id(IndicatorReport, [IndicatorReport.FCFE,", "cache_data.get_cache(session, factor_name) # total_data = json_normalize(json.loads(content)) # calc_engines.distributed_factor(total_data) # # # @app.task() #", "营业利润 IncomeReport.DILUTEDEPS, # 稀释每股收益 ], dates=[trade_date]) for col in columns: if col in", "# 经营活动现金流量净额 ], dates=[trade_date]) for col in columns: if col in list(cash_flow_ttm_sets.keys()): cash_flow_ttm_sets", "factor_share_indicators) factor_share_indicators = per_share.OptProfitPSTTM(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.OptProfitPS(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.CapticalSurplusPS(valuation_sets, factor_share_indicators)", "in list(income_ttm_sets.keys()): income_ttm_sets = income_ttm_sets.drop(col, axis=1) income_ttm_sets = income_ttm_sets.rename(columns={'PARENETP': 'np_parent_company_owners_ttm', # 归属于母公司所有者的净利润 'PERPROFIT':", "__init__(self, name, url, methods=[{'packet': 'financial.factor_pre_share_indicators', 'class': 'FactorPerShareIndicators'}, ]): self._name = name self._methods =", "# 归属于母公司所有者的净利润 IncomeTTM.PERPROFIT, # 营业利润 IncomeTTM.BIZINCO, # 营业收入 IncomeTTM.BIZTOTINCO, # 营业总收入 ], dates=[trade_date])", "= per_share.EnterpriseFCFPS(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.ShareholderFCFPS(valuation_sets, factor_share_indicators) factor_share_indicators = factor_share_indicators.reset_index() factor_share_indicators['trade_date'] = str(trade_date)", "valuation_sets = pd.merge(indicator_sets, valuation_sets, on='security_code').reindex() valuation_sets = pd.merge(cash_flow_ttm_sets, valuation_sets, on='security_code').reindex() valuation_sets = pd.merge(income_ttm_sets,", "axis=1) valuation_sets = pd.merge(cash_flow_sets, income_sets, on='security_code').reindex() valuation_sets = pd.merge(balance_sets, valuation_sets, on='security_code').reindex() valuation_sets =", "in columns: if col in list(income_sets.keys()): income_sets = income_sets.drop(col, axis=1) income_sets = income_sets.rename(columns={'BIZINCO':", "'retained_profit', # 未分配利润 }) indicator_sets = engine.fetch_fundamentals_pit_extend_company_id(IndicatorReport, [IndicatorReport.FCFE, # 股东自由现金流量 IndicatorReport.FCFF, # 企业自由现金流量", "factor_per_share_indicators.FactorPerShareIndicators() factor_share_indicators = pd.DataFrame() factor_share_indicators['security_code'] = valuation_sets['security_code'] valuation_sets = valuation_sets.set_index('security_code') factor_share_indicators = factor_share_indicators.set_index('security_code')", "valuation_data = get_fundamentals(query(Valuation.security_code, Valuation.trade_date, Valuation.capitalization, ).filter(Valuation.trade_date.in_([trade_date]))) for col in column: if col in", "distributed_factor.delay(session, json.dumps(self._methods), self._name) # # def distributed_factor(self, total_data): # mkt_df = self.calc_factor_by_date(total_data,trade_date) #", "total_data = json_normalize(json.loads(content)) # calc_engines.distributed_factor(total_data) # # # @app.task() # def factor_calculate(**kwargs): #", "# total_data = json_normalize(json.loads(content)) # calc_engines.distributed_factor(total_data) # # # @app.task() # def factor_calculate(**kwargs):", "data.model import IncomeReport, IncomeTTM from vision.table.valuation import Valuation from vision.db.signletion_engine import * from", "factor_share_indicators = per_share.OptProfitPS(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.CapticalSurplusPS(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.SurplusReservePS(valuation_sets, factor_share_indicators) factor_share_indicators", "# calc_engines = CalcEngine(name, packet_sets) # content = cache_data.get_cache(session, factor_name) # total_data =", "ultron.cluster.invoke.cache_data import cache_data class CalcEngine(object): def __init__(self, name, url, methods=[{'packet': 'financial.factor_pre_share_indicators', 'class': 'FactorPerShareIndicators'},", "[IndicatorReport.FCFE, # 股东自由现金流量 IndicatorReport.FCFF, # 企业自由现金流量 IndicatorReport.EPSBASIC, # 基本每股收益 IndicatorReport.DPS, # 每股股利(税前) ],", "on='security_code').reindex() valuation_sets = pd.merge(indicator_sets, valuation_sets, on='security_code').reindex() valuation_sets = pd.merge(cash_flow_ttm_sets, valuation_sets, on='security_code').reindex() valuation_sets =", "as pd import numpy as np from datetime import timedelta, datetime from financial", "valuation_sets = valuation_sets.set_index('security_code') factor_share_indicators = factor_share_indicators.set_index('security_code') factor_share_indicators = per_share.EPS(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.DilutedEPSTTM(valuation_sets,", "valuation_sets = pd.merge(cash_flow_sets, income_sets, on='security_code').reindex() valuation_sets = pd.merge(balance_sets, valuation_sets, on='security_code').reindex() valuation_sets = pd.merge(indicator_sets,", "valuation_sets.set_index('security_code') factor_share_indicators = factor_share_indicators.set_index('security_code') factor_share_indicators = per_share.EPS(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.DilutedEPSTTM(valuation_sets, factor_share_indicators) factor_share_indicators", "# 盈余公积 'UNDIPROF': 'retained_profit', # 未分配利润 }) indicator_sets = engine.fetch_fundamentals_pit_extend_company_id(IndicatorReport, [IndicatorReport.FCFE, # 股东自由现金流量", "= income_ttm_sets.drop(col, axis=1) income_ttm_sets = income_ttm_sets.rename(columns={'PARENETP': 'np_parent_company_owners_ttm', # 归属于母公司所有者的净利润 'PERPROFIT': 'operating_profit_ttm', # 营业利润", "valuation_sets = pd.merge(cash_flow_ttm_sets, valuation_sets, on='security_code').reindex() valuation_sets = pd.merge(income_ttm_sets, valuation_sets, on='security_code').reindex() valuation_sets = pd.merge(valuation_data,", "columns: if col in list(balance_sets.keys()): balance_sets = balance_sets.drop(col, axis=1) balance_sets = balance_sets.rename(columns={'PARESHARRIGH': 'total_owner_equities',", "time_array = datetime.strptime(str(trade_date), \"%Y%m%d\") time_array = time_array - timedelta(days=days) * n date_time =", "def get_trade_date(self, trade_date, n, days=365): \"\"\" 获取当前时间前n年的时间点,且为交易日,如果非交易日,则往前提取最近的一天。 :param days: :param trade_date: 当前交易日 :param", "factor_share_indicators) factor_share_indicators = per_share.OptRevPS(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.OptProfitPSTTM(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.OptProfitPS(valuation_sets, factor_share_indicators)", "pd.merge(indicator_sets, valuation_sets, on='security_code').reindex() valuation_sets = pd.merge(cash_flow_ttm_sets, valuation_sets, on='security_code').reindex() valuation_sets = pd.merge(income_ttm_sets, valuation_sets, on='security_code').reindex()", "- 1 # print('trade_date pre %s year %s' % (n, date_time)) return str(date_time)", "from ultron.cluster.invoke.cache_data import cache_data class CalcEngine(object): def __init__(self, name, url, methods=[{'packet': 'financial.factor_pre_share_indicators', 'class':", "factor_share_indicators) factor_share_indicators = per_share.TotalRevPSTTM(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.TotalRevPS(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.OptRevPSTTM(valuation_sets, factor_share_indicators)", "IndicatorReport from data.model import IncomeReport, IncomeTTM from vision.table.valuation import Valuation from vision.db.signletion_engine import", "IncomeTTM.BIZTOTINCO, # 营业总收入 ], dates=[trade_date]) for col in columns: if col in list(income_ttm_sets.keys()):", "indicator_sets = indicator_sets.drop(col, axis=1) indicator_sets = indicator_sets.rename(columns={'FCFE': 'shareholder_fcfps', # 股东自由现金流量 'FCFF': 'enterprise_fcfps', #", "'cash_and_equivalents_at_end', # 期末现金及现金等价物余额 }) income_sets = engine.fetch_fundamentals_pit_extend_company_id(IncomeReport, [IncomeReport.BIZINCO, # 营业收入 IncomeReport.BIZTOTINCO, # 营业总收入", "# 股东自由现金流量 IndicatorReport.FCFF, # 企业自由现金流量 IndicatorReport.EPSBASIC, # 基本每股收益 IndicatorReport.DPS, # 每股股利(税前) ], dates=[trade_date])", "days=365): \"\"\" 获取当前时间前n年的时间点,且为交易日,如果非交易日,则往前提取最近的一天。 :param days: :param trade_date: 当前交易日 :param n: :return: \"\"\" syn_util", "date_time) return str(date_time) else: while str(date_time) not in trade_date_sets: date_time = date_time -", "list(balance_sets.keys()): balance_sets = balance_sets.drop(col, axis=1) balance_sets = balance_sets.rename(columns={'PARESHARRIGH': 'total_owner_equities', # 归属于母公司的所有者权益 'CAPISURP': 'capital_reserve_fund',", "factor_share_indicators) factor_share_indicators = per_share.TotalRevPS(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.OptRevPSTTM(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.OptRevPS(valuation_sets, factor_share_indicators)", "factor_share_indicators = per_share.RetainedEarningsPS(factor_share_indicators, factor_share_indicators) factor_share_indicators = per_share.OptCFPSTTM(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.CFPSTTM(valuation_sets, factor_share_indicators) factor_share_indicators", "tic = time.time() valuation_sets = self.loading_data(trade_date) print('data load time %s' % (time.time() -", "# session = str(int(time.time() * 1000000 + datetime.datetime.now().microsecond)) # cache_data.set_cache(session, 'alphax', total_data.to_json(orient='records')) #", "IncomeTTM.PERPROFIT, # 营业利润 IncomeTTM.BIZINCO, # 营业收入 IncomeTTM.BIZTOTINCO, # 营业总收入 ], dates=[trade_date]) for col", "str(date_time) def _func_sets(self, method): # 私有函数和保护函数过滤 return list(filter(lambda x: not x.startswith('_') and callable(getattr(method,", "from data.polymerize import DBPolymerize from data.storage_engine import StorageEngine import time import pandas as", "}) balance_sets = engine.fetch_fundamentals_pit_extend_company_id(BalanceReport, [BalanceReport.PARESHARRIGH, # 归属于母公司的所有者权益 BalanceReport.CAPISURP, BalanceReport.RESE, BalanceReport.UNDIPROF, ], dates=[trade_date]) for", "'diluted_eps', # 稀释每股收益 }) balance_sets = engine.fetch_fundamentals_pit_extend_company_id(BalanceReport, [BalanceReport.PARESHARRIGH, # 归属于母公司的所有者权益 BalanceReport.CAPISURP, BalanceReport.RESE, BalanceReport.UNDIPROF,", "营业总收入 ], dates=[trade_date]) for col in columns: if col in list(income_ttm_sets.keys()): income_ttm_sets =", "SyncUtil() trade_date_sets = syn_util.get_all_trades('001002', '19900101', trade_date) trade_date_sets = trade_date_sets['TRADEDATE'].values time_array = datetime.strptime(str(trade_date), \"%Y%m%d\")", "# from data.polymerize import DBPolymerize from data.storage_engine import StorageEngine import time import pandas", "# pd.set_option('display.max_rows', None) # from ultron.cluster.invoke.cache_data import cache_data class CalcEngine(object): def __init__(self, name,", "营业利润 'BIZINCO': 'operating_revenue_ttm', # 营业收入 'BIZTOTINCO': 'total_operating_revenue_ttm', # 营业总收入 }) column = ['trade_date']", "# -*- coding: utf-8 -*- import pdb, importlib, inspect, time, datetime, json #", "factor_share_indicators) factor_share_indicators = per_share.OptProfitPS(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.CapticalSurplusPS(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.SurplusReservePS(valuation_sets, factor_share_indicators)", "return str(date_time) else: while str(date_time) not in trade_date_sets: date_time = date_time - 1", "归属于母公司的所有者权益 BalanceReport.CAPISURP, BalanceReport.RESE, BalanceReport.UNDIPROF, ], dates=[trade_date]) for col in columns: if col in", "factor_share_indicators) factor_share_indicators = per_share.DivPS(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.EPSTTM(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.NetAssetPS(valuation_sets, factor_share_indicators)", "现金及现金等价物净增加额 CashFlowTTM.MANANETR, # 经营活动现金流量净额 ], dates=[trade_date]) for col in columns: if col in", "= factor_share_indicators.reset_index() factor_share_indicators['trade_date'] = str(trade_date) factor_share_indicators.replace([-np.inf, np.inf, None], np.nan, inplace=True) return factor_share_indicators def", "= per_share.EPSTTM(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.NetAssetPS(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.TotalRevPSTTM(valuation_sets, factor_share_indicators) factor_share_indicators =", "def process_calc_factor(self, trade_date, valuation_sets): per_share = factor_per_share_indicators.FactorPerShareIndicators() factor_share_indicators = pd.DataFrame() factor_share_indicators['security_code'] = valuation_sets['security_code']", "# total_data = self.loading_data(trade_date) # #存储数据 # session = str(int(time.time() * 1000000 +", "n: :return: \"\"\" syn_util = SyncUtil() trade_date_sets = syn_util.get_all_trades('001002', '19900101', trade_date) trade_date_sets =", "np.inf, None], np.nan, inplace=True) return factor_share_indicators def local_run(self, trade_date): print('当前交易日: %s' % trade_date)", "= cache_data.get_cache(session + str(date_index), date_index) # total_pre_share_data = json_normalize(json.loads(str(content, encoding='utf8'))) # print(\"len_total_per_share_data {}\".format(len(total_pre_share_data)))", "= trade_date_sets['TRADEDATE'].values time_array = datetime.strptime(str(trade_date), \"%Y%m%d\") time_array = time_array - timedelta(days=days) * n", "import DBPolymerize from data.storage_engine import StorageEngine import time import pandas as pd import", "valuation_data.drop(col, axis=1) valuation_sets = pd.merge(cash_flow_sets, income_sets, on='security_code').reindex() valuation_sets = pd.merge(balance_sets, valuation_sets, on='security_code').reindex() valuation_sets", "str(trade_date) factor_share_indicators.replace([-np.inf, np.inf, None], np.nan, inplace=True) return factor_share_indicators def local_run(self, trade_date): print('当前交易日: %s'", "= syn_util.get_all_trades('001002', '19900101', trade_date) trade_date_sets = trade_date_sets['TRADEDATE'].values time_array = datetime.strptime(str(trade_date), \"%Y%m%d\") time_array =", "datetime import timedelta, datetime from financial import factor_per_share_indicators from data.model import BalanceMRQ, BalanceTTM,", "import IndicatorReport from data.model import IncomeReport, IncomeTTM from vision.table.valuation import Valuation from vision.db.signletion_engine", "按天获取当天交易日所有股票的基础数据 :param trade_date: 交易日 :return: \"\"\" # 转换时间格式 time_array = datetime.strptime(trade_date, \"%Y-%m-%d\") trade_date", "columns: if col in list(income_ttm_sets.keys()): income_ttm_sets = income_ttm_sets.drop(col, axis=1) income_ttm_sets = income_ttm_sets.rename(columns={'PARENETP': 'np_parent_company_owners_ttm',", "date_index = kwargs['date_index'] # session = kwargs['session'] # content = cache_data.get_cache(session + str(date_index),", "trade_date_sets' % date_time) return str(date_time) else: while str(date_time) not in trade_date_sets: date_time =", "IncomeTTM from vision.table.valuation import Valuation from vision.db.signletion_engine import * from data.sqlengine import sqlEngine", "'total_operating_revenue', # 营业总收入 'PERPROFIT': 'operating_profit', # 营业利润 'DILUTEDEPS': 'diluted_eps', # 稀释每股收益 }) balance_sets", "balance_sets = balance_sets.rename(columns={'PARESHARRIGH': 'total_owner_equities', # 归属于母公司的所有者权益 'CAPISURP': 'capital_reserve_fund', # 资本公积 'RESE': 'surplus_reserve_fund', #", "json.dumps(self._methods), self._name) # # def distributed_factor(self, total_data): # mkt_df = self.calc_factor_by_date(total_data,trade_date) # result", "as np from datetime import timedelta, datetime from financial import factor_per_share_indicators from data.model", "list(valuation_data.keys()): valuation_data = valuation_data.drop(col, axis=1) valuation_sets = pd.merge(cash_flow_sets, income_sets, on='security_code').reindex() valuation_sets = pd.merge(balance_sets,", "cache_data.get_cache(session + str(date_index), date_index) # total_pre_share_data = json_normalize(json.loads(str(content, encoding='utf8'))) # print(\"len_total_per_share_data {}\".format(len(total_pre_share_data))) #", "local_run(self, trade_date): print('当前交易日: %s' % trade_date) tic = time.time() valuation_sets = self.loading_data(trade_date) print('data", "valuation_sets['security_code'] valuation_sets = valuation_sets.set_index('security_code') factor_share_indicators = factor_share_indicators.set_index('security_code') factor_share_indicators = per_share.EPS(valuation_sets, factor_share_indicators) factor_share_indicators =", "IncomeTTM.BIZINCO, # 营业收入 IncomeTTM.BIZTOTINCO, # 营业总收入 ], dates=[trade_date]) for col in columns: if", "IndicatorReport.DPS, # 每股股利(税前) ], dates=[trade_date]) for col in columns: if col in list(indicator_sets.keys()):", "indicator_sets.drop(col, axis=1) indicator_sets = indicator_sets.rename(columns={'FCFE': 'shareholder_fcfps', # 股东自由现金流量 'FCFF': 'enterprise_fcfps', # 企业自由现金流量 'EPSBASIC':", "[CashFlowReport.FINALCASHBALA, # 期末现金及现金等价物余额 ], dates=[trade_date]) for col in columns: if col in list(cash_flow_sets.keys()):", "balance_sets.rename(columns={'PARESHARRIGH': 'total_owner_equities', # 归属于母公司的所有者权益 'CAPISURP': 'capital_reserve_fund', # 资本公积 'RESE': 'surplus_reserve_fund', # 盈余公积 'UNDIPROF':", "income_sets = income_sets.rename(columns={'BIZINCO': 'operating_revenue', # 营业收入 'BIZTOTINCO': 'total_operating_revenue', # 营业总收入 'PERPROFIT': 'operating_profit', #", "financial import factor_per_share_indicators from data.model import BalanceMRQ, BalanceTTM, BalanceReport from data.model import CashFlowTTM,", "col in columns: if col in list(cash_flow_sets.keys()): cash_flow_sets = cash_flow_sets.drop(col, axis=1) cash_flow_sets =", "datetime, json # from PyFin.api import advanceDateByCalendar # from data.polymerize import DBPolymerize from", "# content = cache_data.get_cache(session, factor_name) # total_data = json_normalize(json.loads(content)) # calc_engines.distributed_factor(total_data) # #", "'company_id', 'trade_date'] # Report data cash_flow_sets = engine.fetch_fundamentals_pit_extend_company_id(CashFlowReport, [CashFlowReport.FINALCASHBALA, # 期末现金及现金等价物余额 ], dates=[trade_date])", "data.sqlengine import sqlEngine # pd.set_option('display.max_columns', None) # pd.set_option('display.max_rows', None) # from ultron.cluster.invoke.cache_data import", "for col in columns: if col in list(income_ttm_sets.keys()): income_ttm_sets = income_ttm_sets.drop(col, axis=1) income_ttm_sets", "factor_share_indicators) factor_share_indicators = per_share.OptCFPSTTM(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.CFPSTTM(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.EnterpriseFCFPS(valuation_sets, factor_share_indicators)", "归属于母公司所有者的净利润 'PERPROFIT': 'operating_profit_ttm', # 营业利润 'BIZINCO': 'operating_revenue_ttm', # 营业收入 'BIZTOTINCO': 'total_operating_revenue_ttm', # 营业总收入", "= int(datetime.strftime(time_array, \"%Y%m%d\")) if str(date_time) < min(trade_date_sets): # print('date_time %s is out of", "per_share.OptRevPSTTM(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.OptRevPS(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.OptProfitPSTTM(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.OptProfitPS(valuation_sets,", "if col in list(indicator_sets.keys()): indicator_sets = indicator_sets.drop(col, axis=1) indicator_sets = indicator_sets.rename(columns={'FCFE': 'shareholder_fcfps', #", "import BalanceMRQ, BalanceTTM, BalanceReport from data.model import CashFlowTTM, CashFlowReport from data.model import IndicatorReport", "# 期末现金及现金等价物余额 ], dates=[trade_date]) for col in columns: if col in list(cash_flow_sets.keys()): cash_flow_sets", "distributed_factor(self, total_data): # mkt_df = self.calc_factor_by_date(total_data,trade_date) # result = self.calc_factor('alphax.alpha191','Alpha191',mkt_df,trade_date) # @app.task #", "营业总收入 IncomeReport.PERPROFIT, # 营业利润 IncomeReport.DILUTEDEPS, # 稀释每股收益 ], dates=[trade_date]) for col in columns:", "import sqlEngine # pd.set_option('display.max_columns', None) # pd.set_option('display.max_rows', None) # from ultron.cluster.invoke.cache_data import cache_data", "for col in column: if col in list(valuation_data.keys()): valuation_data = valuation_data.drop(col, axis=1) valuation_sets", "time import pandas as pd import numpy as np from datetime import timedelta,", "StorageEngine(self._url) result = self.process_calc_factor(trade_date, valuation_sets) print('cal_time %s' % (time.time() - tic)) storage_engine.update_destdb(str(self._methods[-1]['packet'].split('.')[-1]), trade_date,", "# def remote_run(self, trade_date): # total_data = self.loading_data(trade_date) # #存储数据 # session =", "}) column = ['trade_date'] valuation_data = get_fundamentals(query(Valuation.security_code, Valuation.trade_date, Valuation.capitalization, ).filter(Valuation.trade_date.in_([trade_date]))) for col in", "= per_share.DivPS(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.EPSTTM(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.NetAssetPS(valuation_sets, factor_share_indicators) factor_share_indicators =", "# 企业自由现金流量 IndicatorReport.EPSBASIC, # 基本每股收益 IndicatorReport.DPS, # 每股股利(税前) ], dates=[trade_date]) for col in", "'surplus_reserve_fund', # 盈余公积 'UNDIPROF': 'retained_profit', # 未分配利润 }) indicator_sets = engine.fetch_fundamentals_pit_extend_company_id(IndicatorReport, [IndicatorReport.FCFE, #", "pd.merge(cash_flow_sets, income_sets, on='security_code').reindex() valuation_sets = pd.merge(balance_sets, valuation_sets, on='security_code').reindex() valuation_sets = pd.merge(indicator_sets, valuation_sets, on='security_code').reindex()", "in trade_date_sets: date_time = date_time - 1 # print('trade_date pre %s year %s'", "], dates=[trade_date]) for col in columns: if col in list(indicator_sets.keys()): indicator_sets = indicator_sets.drop(col,", "cash_flow_ttm_sets = cash_flow_ttm_sets.rename( columns={'CASHNETI': 'cash_equivalent_increase_ttm', # 现金及现金等价物净增加额 'MANANETR': 'net_operate_cash_flow_ttm', # 经营活动现金流量净额 }) income_ttm_sets", "= sqlEngine() columns = ['COMPCODE', 'PUBLISHDATE', 'ENDDATE', 'symbol', 'company_id', 'trade_date'] # Report data", "if col in list(income_ttm_sets.keys()): income_ttm_sets = income_ttm_sets.drop(col, axis=1) income_ttm_sets = income_ttm_sets.rename(columns={'PARENETP': 'np_parent_company_owners_ttm', #", "factor_share_indicators = per_share.CFPSTTM(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.EnterpriseFCFPS(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.ShareholderFCFPS(valuation_sets, factor_share_indicators) factor_share_indicators", "datetime.strftime(time_array, '%Y%m%d') # 读取目前涉及到的因子 engine = sqlEngine() columns = ['COMPCODE', 'PUBLISHDATE', 'ENDDATE', 'symbol',", "np from datetime import timedelta, datetime from financial import factor_per_share_indicators from data.model import", "# 基本每股收益 IndicatorReport.DPS, # 每股股利(税前) ], dates=[trade_date]) for col in columns: if col", "[BalanceReport.PARESHARRIGH, # 归属于母公司的所有者权益 BalanceReport.CAPISURP, BalanceReport.RESE, BalanceReport.UNDIPROF, ], dates=[trade_date]) for col in columns: if", "factor_share_indicators) factor_share_indicators = per_share.ShareholderFCFPS(valuation_sets, factor_share_indicators) factor_share_indicators = factor_share_indicators.reset_index() factor_share_indicators['trade_date'] = str(trade_date) factor_share_indicators.replace([-np.inf, np.inf,", "print('cal_time %s' % (time.time() - tic)) storage_engine.update_destdb(str(self._methods[-1]['packet'].split('.')[-1]), trade_date, result) # storage_engine.update_destdb('factor_pre_share_indicators', trade_date, result)", "valuation_sets = pd.merge(valuation_data, valuation_sets, on='security_code').reindex() return valuation_sets def process_calc_factor(self, trade_date, valuation_sets): per_share =", "vision.db.signletion_engine import * from data.sqlengine import sqlEngine # pd.set_option('display.max_columns', None) # pd.set_option('display.max_rows', None)", "期末现金及现金等价物余额 }) income_sets = engine.fetch_fundamentals_pit_extend_company_id(IncomeReport, [IncomeReport.BIZINCO, # 营业收入 IncomeReport.BIZTOTINCO, # 营业总收入 IncomeReport.PERPROFIT, #", "'trade_date'] # Report data cash_flow_sets = engine.fetch_fundamentals_pit_extend_company_id(CashFlowReport, [CashFlowReport.FINALCASHBALA, # 期末现金及现金等价物余额 ], dates=[trade_date]) for", "= indicator_sets.drop(col, axis=1) indicator_sets = indicator_sets.rename(columns={'FCFE': 'shareholder_fcfps', # 股东自由现金流量 'FCFF': 'enterprise_fcfps', # 企业自由现金流量", "企业自由现金流量 'EPSBASIC': 'basic_eps', # 基本每股收益 'DPS': 'dividend_receivable', # 每股股利(税前) }) # TTM data", "= income_sets.drop(col, axis=1) income_sets = income_sets.rename(columns={'BIZINCO': 'operating_revenue', # 营业收入 'BIZTOTINCO': 'total_operating_revenue', # 营业总收入", "]): self._name = name self._methods = methods self._url = url def get_trade_date(self, trade_date,", "self._url = url def get_trade_date(self, trade_date, n, days=365): \"\"\" 获取当前时间前n年的时间点,且为交易日,如果非交易日,则往前提取最近的一天。 :param days: :param", "CalcEngine(object): def __init__(self, name, url, methods=[{'packet': 'financial.factor_pre_share_indicators', 'class': 'FactorPerShareIndicators'}, ]): self._name = name", "= indicator_sets.rename(columns={'FCFE': 'shareholder_fcfps', # 股东自由现金流量 'FCFF': 'enterprise_fcfps', # 企业自由现金流量 'EPSBASIC': 'basic_eps', # 基本每股收益", "name): # calc_engines = CalcEngine(name, packet_sets) # content = cache_data.get_cache(session, factor_name) # total_data", "str(int(time.time() * 1000000 + datetime.datetime.now().microsecond)) # cache_data.set_cache(session, 'alphax', total_data.to_json(orient='records')) # distributed_factor.delay(session, json.dumps(self._methods), self._name)", "BalanceTTM, BalanceReport from data.model import CashFlowTTM, CashFlowReport from data.model import IndicatorReport from data.model", "私有函数和保护函数过滤 return list(filter(lambda x: not x.startswith('_') and callable(getattr(method, x)), dir(method))) def loading_data(self, trade_date):", ":param n: :return: \"\"\" syn_util = SyncUtil() trade_date_sets = syn_util.get_all_trades('001002', '19900101', trade_date) trade_date_sets", "交易日 :return: \"\"\" # 转换时间格式 time_array = datetime.strptime(trade_date, \"%Y-%m-%d\") trade_date = datetime.strftime(time_array, '%Y%m%d')", "'cash_equivalent_increase_ttm', # 现金及现金等价物净增加额 'MANANETR': 'net_operate_cash_flow_ttm', # 经营活动现金流量净额 }) income_ttm_sets = engine.fetch_fundamentals_pit_extend_company_id(IncomeTTM, [IncomeTTM.PARENETP, #", "= per_share.RetainedEarningsPS(factor_share_indicators, factor_share_indicators) factor_share_indicators = per_share.OptCFPSTTM(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.CFPSTTM(valuation_sets, factor_share_indicators) factor_share_indicators =", "income_sets, on='security_code').reindex() valuation_sets = pd.merge(balance_sets, valuation_sets, on='security_code').reindex() valuation_sets = pd.merge(indicator_sets, valuation_sets, on='security_code').reindex() valuation_sets", "col in list(income_sets.keys()): income_sets = income_sets.drop(col, axis=1) income_sets = income_sets.rename(columns={'BIZINCO': 'operating_revenue', # 营业收入", "per_share.EPSTTM(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.NetAssetPS(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.TotalRevPSTTM(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.TotalRevPS(valuation_sets,", "per_share.NetAssetPS(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.TotalRevPSTTM(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.TotalRevPS(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.OptRevPSTTM(valuation_sets,", "(n, date_time)) return str(date_time) def _func_sets(self, method): # 私有函数和保护函数过滤 return list(filter(lambda x: not", "for col in columns: if col in list(cash_flow_ttm_sets.keys()): cash_flow_ttm_sets = cash_flow_ttm_sets.drop(col, axis=1) cash_flow_ttm_sets", "# 读取目前涉及到的因子 engine = sqlEngine() columns = ['COMPCODE', 'PUBLISHDATE', 'ENDDATE', 'symbol', 'company_id', 'trade_date']", "= pd.merge(balance_sets, valuation_sets, on='security_code').reindex() valuation_sets = pd.merge(indicator_sets, valuation_sets, on='security_code').reindex() valuation_sets = pd.merge(cash_flow_ttm_sets, valuation_sets,", "datetime.strptime(trade_date, \"%Y-%m-%d\") trade_date = datetime.strftime(time_array, '%Y%m%d') # 读取目前涉及到的因子 engine = sqlEngine() columns =", "columns: if col in list(cash_flow_sets.keys()): cash_flow_sets = cash_flow_sets.drop(col, axis=1) cash_flow_sets = cash_flow_sets.rename(columns={'FINALCASHBALA': 'cash_and_equivalents_at_end',", "valuation_sets, on='security_code').reindex() valuation_sets = pd.merge(valuation_data, valuation_sets, on='security_code').reindex() return valuation_sets def process_calc_factor(self, trade_date, valuation_sets):", "per_share.SurplusReservePS(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.UndividedProfitPS(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.RetainedEarningsPS(factor_share_indicators, factor_share_indicators) factor_share_indicators = per_share.OptCFPSTTM(valuation_sets,", "income_ttm_sets = engine.fetch_fundamentals_pit_extend_company_id(IncomeTTM, [IncomeTTM.PARENETP, # 归属于母公司所有者的净利润 IncomeTTM.PERPROFIT, # 营业利润 IncomeTTM.BIZINCO, # 营业收入 IncomeTTM.BIZTOTINCO,", "import numpy as np from datetime import timedelta, datetime from financial import factor_per_share_indicators", "归属于母公司的所有者权益 'CAPISURP': 'capital_reserve_fund', # 资本公积 'RESE': 'surplus_reserve_fund', # 盈余公积 'UNDIPROF': 'retained_profit', # 未分配利润", "data.polymerize import DBPolymerize from data.storage_engine import StorageEngine import time import pandas as pd", "url def get_trade_date(self, trade_date, n, days=365): \"\"\" 获取当前时间前n年的时间点,且为交易日,如果非交易日,则往前提取最近的一天。 :param days: :param trade_date: 当前交易日", "pre %s year %s' % (n, date_time)) return str(date_time) def _func_sets(self, method): #", "list(income_ttm_sets.keys()): income_ttm_sets = income_ttm_sets.drop(col, axis=1) income_ttm_sets = income_ttm_sets.rename(columns={'PARENETP': 'np_parent_company_owners_ttm', # 归属于母公司所有者的净利润 'PERPROFIT': 'operating_profit_ttm',", "# print('date_time %s is out of trade_date_sets' % date_time) return str(date_time) else: while", "+ str(date_index), date_index) # total_pre_share_data = json_normalize(json.loads(str(content, encoding='utf8'))) # print(\"len_total_per_share_data {}\".format(len(total_pre_share_data))) # calculate(date_index,", "'DPS': 'dividend_receivable', # 每股股利(税前) }) # TTM data cash_flow_ttm_sets = engine.fetch_fundamentals_pit_extend_company_id(CashFlowTTM, [CashFlowTTM.CASHNETI, #", "out of trade_date_sets' % date_time) return str(date_time) else: while str(date_time) not in trade_date_sets:", "], dates=[trade_date]) for col in columns: if col in list(cash_flow_ttm_sets.keys()): cash_flow_ttm_sets = cash_flow_ttm_sets.drop(col,", "pd.set_option('display.max_rows', None) # from ultron.cluster.invoke.cache_data import cache_data class CalcEngine(object): def __init__(self, name, url,", "= engine.fetch_fundamentals_pit_extend_company_id(IncomeReport, [IncomeReport.BIZINCO, # 营业收入 IncomeReport.BIZTOTINCO, # 营业总收入 IncomeReport.PERPROFIT, # 营业利润 IncomeReport.DILUTEDEPS, #", "}) indicator_sets = engine.fetch_fundamentals_pit_extend_company_id(IndicatorReport, [IndicatorReport.FCFE, # 股东自由现金流量 IndicatorReport.FCFF, # 企业自由现金流量 IndicatorReport.EPSBASIC, # 基本每股收益", "], dates=[trade_date]) for col in columns: if col in list(cash_flow_sets.keys()): cash_flow_sets = cash_flow_sets.drop(col,", "= CalcEngine(name, packet_sets) # content = cache_data.get_cache(session, factor_name) # total_data = json_normalize(json.loads(content)) #", "datetime.datetime.now().microsecond)) # cache_data.set_cache(session, 'alphax', total_data.to_json(orient='records')) # distributed_factor.delay(session, json.dumps(self._methods), self._name) # # def distributed_factor(self,", "%s' % trade_date) tic = time.time() valuation_sets = self.loading_data(trade_date) print('data load time %s'", "}) income_sets = engine.fetch_fundamentals_pit_extend_company_id(IncomeReport, [IncomeReport.BIZINCO, # 营业收入 IncomeReport.BIZTOTINCO, # 营业总收入 IncomeReport.PERPROFIT, # 营业利润", "factor_calculate(**kwargs): # print(\"per_share_kwargs: {}\".format(kwargs)) # date_index = kwargs['date_index'] # session = kwargs['session'] #", "= datetime.strftime(time_array, '%Y%m%d') # 读取目前涉及到的因子 engine = sqlEngine() columns = ['COMPCODE', 'PUBLISHDATE', 'ENDDATE',", "% (time.time() - tic)) storage_engine = StorageEngine(self._url) result = self.process_calc_factor(trade_date, valuation_sets) print('cal_time %s'", "in columns: if col in list(balance_sets.keys()): balance_sets = balance_sets.drop(col, axis=1) balance_sets = balance_sets.rename(columns={'PARESHARRIGH':", "= income_ttm_sets.rename(columns={'PARENETP': 'np_parent_company_owners_ttm', # 归属于母公司所有者的净利润 'PERPROFIT': 'operating_profit_ttm', # 营业利润 'BIZINCO': 'operating_revenue_ttm', # 营业收入", "获取当前时间前n年的时间点,且为交易日,如果非交易日,则往前提取最近的一天。 :param days: :param trade_date: 当前交易日 :param n: :return: \"\"\" syn_util = SyncUtil()", "in list(cash_flow_sets.keys()): cash_flow_sets = cash_flow_sets.drop(col, axis=1) cash_flow_sets = cash_flow_sets.rename(columns={'FINALCASHBALA': 'cash_and_equivalents_at_end', # 期末现金及现金等价物余额 })", "in list(income_sets.keys()): income_sets = income_sets.drop(col, axis=1) income_sets = income_sets.rename(columns={'BIZINCO': 'operating_revenue', # 营业收入 'BIZTOTINCO':", "import CashFlowTTM, CashFlowReport from data.model import IndicatorReport from data.model import IncomeReport, IncomeTTM from", "factor_share_indicators) factor_share_indicators = per_share.DilutedEPSTTM(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.CashEquPS(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.DivPS(valuation_sets, factor_share_indicators)", "StorageEngine import time import pandas as pd import numpy as np from datetime", "营业收入 'BIZTOTINCO': 'total_operating_revenue', # 营业总收入 'PERPROFIT': 'operating_profit', # 营业利润 'DILUTEDEPS': 'diluted_eps', # 稀释每股收益", "def _func_sets(self, method): # 私有函数和保护函数过滤 return list(filter(lambda x: not x.startswith('_') and callable(getattr(method, x)),", "factor_share_indicators.replace([-np.inf, np.inf, None], np.nan, inplace=True) return factor_share_indicators def local_run(self, trade_date): print('当前交易日: %s' %", "self._name = name self._methods = methods self._url = url def get_trade_date(self, trade_date, n,", "per_share.RetainedEarningsPS(factor_share_indicators, factor_share_indicators) factor_share_indicators = per_share.OptCFPSTTM(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.CFPSTTM(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.EnterpriseFCFPS(valuation_sets,", "per_share.EnterpriseFCFPS(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.ShareholderFCFPS(valuation_sets, factor_share_indicators) factor_share_indicators = factor_share_indicators.reset_index() factor_share_indicators['trade_date'] = str(trade_date) factor_share_indicators.replace([-np.inf,", "%s' % (time.time() - tic)) storage_engine = StorageEngine(self._url) result = self.process_calc_factor(trade_date, valuation_sets) print('cal_time", "= date_time - 1 # print('trade_date pre %s year %s' % (n, date_time))", "factor_share_indicators = per_share.TotalRevPS(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.OptRevPSTTM(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.OptRevPS(valuation_sets, factor_share_indicators) factor_share_indicators", "期末现金及现金等价物余额 ], dates=[trade_date]) for col in columns: if col in list(cash_flow_sets.keys()): cash_flow_sets =", "'np_parent_company_owners_ttm', # 归属于母公司所有者的净利润 'PERPROFIT': 'operating_profit_ttm', # 营业利润 'BIZINCO': 'operating_revenue_ttm', # 营业收入 'BIZTOTINCO': 'total_operating_revenue_ttm',", "of trade_date_sets' % date_time) return str(date_time) else: while str(date_time) not in trade_date_sets: date_time", "# 营业总收入 IncomeReport.PERPROFIT, # 营业利润 IncomeReport.DILUTEDEPS, # 稀释每股收益 ], dates=[trade_date]) for col in", "营业利润 'DILUTEDEPS': 'diluted_eps', # 稀释每股收益 }) balance_sets = engine.fetch_fundamentals_pit_extend_company_id(BalanceReport, [BalanceReport.PARESHARRIGH, # 归属于母公司的所有者权益 BalanceReport.CAPISURP,", "col in list(cash_flow_sets.keys()): cash_flow_sets = cash_flow_sets.drop(col, axis=1) cash_flow_sets = cash_flow_sets.rename(columns={'FINALCASHBALA': 'cash_and_equivalents_at_end', # 期末现金及现金等价物余额", "min(trade_date_sets): # print('date_time %s is out of trade_date_sets' % date_time) return str(date_time) else:", "= engine.fetch_fundamentals_pit_extend_company_id(IncomeTTM, [IncomeTTM.PARENETP, # 归属于母公司所有者的净利润 IncomeTTM.PERPROFIT, # 营业利润 IncomeTTM.BIZINCO, # 营业收入 IncomeTTM.BIZTOTINCO, #", "engine.fetch_fundamentals_pit_extend_company_id(IndicatorReport, [IndicatorReport.FCFE, # 股东自由现金流量 IndicatorReport.FCFF, # 企业自由现金流量 IndicatorReport.EPSBASIC, # 基本每股收益 IndicatorReport.DPS, # 每股股利(税前)", "per_share.ShareholderFCFPS(valuation_sets, factor_share_indicators) factor_share_indicators = factor_share_indicators.reset_index() factor_share_indicators['trade_date'] = str(trade_date) factor_share_indicators.replace([-np.inf, np.inf, None], np.nan, inplace=True)", "CashFlowTTM, CashFlowReport from data.model import IndicatorReport from data.model import IncomeReport, IncomeTTM from vision.table.valuation", "col in columns: if col in list(balance_sets.keys()): balance_sets = balance_sets.drop(col, axis=1) balance_sets =", "cash_flow_ttm_sets = cash_flow_ttm_sets.drop(col, axis=1) cash_flow_ttm_sets = cash_flow_ttm_sets.rename( columns={'CASHNETI': 'cash_equivalent_increase_ttm', # 现金及现金等价物净增加额 'MANANETR': 'net_operate_cash_flow_ttm',", "= per_share.ShareholderFCFPS(valuation_sets, factor_share_indicators) factor_share_indicators = factor_share_indicators.reset_index() factor_share_indicators['trade_date'] = str(trade_date) factor_share_indicators.replace([-np.inf, np.inf, None], np.nan,", "columns: if col in list(income_sets.keys()): income_sets = income_sets.drop(col, axis=1) income_sets = income_sets.rename(columns={'BIZINCO': 'operating_revenue',", "营业利润 IncomeTTM.BIZINCO, # 营业收入 IncomeTTM.BIZTOTINCO, # 营业总收入 ], dates=[trade_date]) for col in columns:", "pd.merge(balance_sets, valuation_sets, on='security_code').reindex() valuation_sets = pd.merge(indicator_sets, valuation_sets, on='security_code').reindex() valuation_sets = pd.merge(cash_flow_ttm_sets, valuation_sets, on='security_code').reindex()", "= self.process_calc_factor(trade_date, valuation_sets) print('cal_time %s' % (time.time() - tic)) storage_engine.update_destdb(str(self._methods[-1]['packet'].split('.')[-1]), trade_date, result) #", "= cash_flow_sets.drop(col, axis=1) cash_flow_sets = cash_flow_sets.rename(columns={'FINALCASHBALA': 'cash_and_equivalents_at_end', # 期末现金及现金等价物余额 }) income_sets = engine.fetch_fundamentals_pit_extend_company_id(IncomeReport,", "归属于母公司所有者的净利润 IncomeTTM.PERPROFIT, # 营业利润 IncomeTTM.BIZINCO, # 营业收入 IncomeTTM.BIZTOTINCO, # 营业总收入 ], dates=[trade_date]) for", "factor_share_indicators = per_share.OptProfitPSTTM(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.OptProfitPS(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.CapticalSurplusPS(valuation_sets, factor_share_indicators) factor_share_indicators", "'DILUTEDEPS': 'diluted_eps', # 稀释每股收益 }) balance_sets = engine.fetch_fundamentals_pit_extend_company_id(BalanceReport, [BalanceReport.PARESHARRIGH, # 归属于母公司的所有者权益 BalanceReport.CAPISURP, BalanceReport.RESE,", "'shareholder_fcfps', # 股东自由现金流量 'FCFF': 'enterprise_fcfps', # 企业自由现金流量 'EPSBASIC': 'basic_eps', # 基本每股收益 'DPS': 'dividend_receivable',", "url, methods=[{'packet': 'financial.factor_pre_share_indicators', 'class': 'FactorPerShareIndicators'}, ]): self._name = name self._methods = methods self._url", "str(date_time) not in trade_date_sets: date_time = date_time - 1 # print('trade_date pre %s", "基本每股收益 IndicatorReport.DPS, # 每股股利(税前) ], dates=[trade_date]) for col in columns: if col in", "\"%Y-%m-%d\") trade_date = datetime.strftime(time_array, '%Y%m%d') # 读取目前涉及到的因子 engine = sqlEngine() columns = ['COMPCODE',", "packet_sets, name): # calc_engines = CalcEngine(name, packet_sets) # content = cache_data.get_cache(session, factor_name) #", "tic)) storage_engine = StorageEngine(self._url) result = self.process_calc_factor(trade_date, valuation_sets) print('cal_time %s' % (time.time() -", "# calc_engines.distributed_factor(total_data) # # # @app.task() # def factor_calculate(**kwargs): # print(\"per_share_kwargs: {}\".format(kwargs)) #", "'operating_profit', # 营业利润 'DILUTEDEPS': 'diluted_eps', # 稀释每股收益 }) balance_sets = engine.fetch_fundamentals_pit_extend_company_id(BalanceReport, [BalanceReport.PARESHARRIGH, #", "%s' % (n, date_time)) return str(date_time) def _func_sets(self, method): # 私有函数和保护函数过滤 return list(filter(lambda", "1 # print('trade_date pre %s year %s' % (n, date_time)) return str(date_time) def", "# #存储数据 # session = str(int(time.time() * 1000000 + datetime.datetime.now().microsecond)) # cache_data.set_cache(session, 'alphax',", "# # # @app.task() # def factor_calculate(**kwargs): # print(\"per_share_kwargs: {}\".format(kwargs)) # date_index =", "营业收入 IncomeTTM.BIZTOTINCO, # 营业总收入 ], dates=[trade_date]) for col in columns: if col in", "return factor_share_indicators def local_run(self, trade_date): print('当前交易日: %s' % trade_date) tic = time.time() valuation_sets", "= methods self._url = url def get_trade_date(self, trade_date, n, days=365): \"\"\" 获取当前时间前n年的时间点,且为交易日,如果非交易日,则往前提取最近的一天。 :param", "in column: if col in list(valuation_data.keys()): valuation_data = valuation_data.drop(col, axis=1) valuation_sets = pd.merge(cash_flow_sets,", "factor_share_indicators = per_share.OptCFPSTTM(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.CFPSTTM(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.EnterpriseFCFPS(valuation_sets, factor_share_indicators) factor_share_indicators", "PyFin.api import advanceDateByCalendar # from data.polymerize import DBPolymerize from data.storage_engine import StorageEngine import", "in columns: if col in list(income_ttm_sets.keys()): income_ttm_sets = income_ttm_sets.drop(col, axis=1) income_ttm_sets = income_ttm_sets.rename(columns={'PARENETP':", "IndicatorReport.EPSBASIC, # 基本每股收益 IndicatorReport.DPS, # 每股股利(税前) ], dates=[trade_date]) for col in columns: if", "cash_flow_sets = cash_flow_sets.rename(columns={'FINALCASHBALA': 'cash_and_equivalents_at_end', # 期末现金及现金等价物余额 }) income_sets = engine.fetch_fundamentals_pit_extend_company_id(IncomeReport, [IncomeReport.BIZINCO, # 营业收入", "print('当前交易日: %s' % trade_date) tic = time.time() valuation_sets = self.loading_data(trade_date) print('data load time", "per_share.OptRevPS(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.OptProfitPSTTM(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.OptProfitPS(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.CapticalSurplusPS(valuation_sets,", "基本每股收益 'DPS': 'dividend_receivable', # 每股股利(税前) }) # TTM data cash_flow_ttm_sets = engine.fetch_fundamentals_pit_extend_company_id(CashFlowTTM, [CashFlowTTM.CASHNETI,", "col in list(income_ttm_sets.keys()): income_ttm_sets = income_ttm_sets.drop(col, axis=1) income_ttm_sets = income_ttm_sets.rename(columns={'PARENETP': 'np_parent_company_owners_ttm', # 归属于母公司所有者的净利润", "data.model import BalanceMRQ, BalanceTTM, BalanceReport from data.model import CashFlowTTM, CashFlowReport from data.model import", "int(datetime.strftime(time_array, \"%Y%m%d\")) if str(date_time) < min(trade_date_sets): # print('date_time %s is out of trade_date_sets'", "# 营业总收入 'PERPROFIT': 'operating_profit', # 营业利润 'DILUTEDEPS': 'diluted_eps', # 稀释每股收益 }) balance_sets =", "factor_share_indicators) factor_share_indicators = per_share.NetAssetPS(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.TotalRevPSTTM(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.TotalRevPS(valuation_sets, factor_share_indicators)", "# # @app.task() # def factor_calculate(**kwargs): # print(\"per_share_kwargs: {}\".format(kwargs)) # date_index = kwargs['date_index']", "CashFlowReport from data.model import IndicatorReport from data.model import IncomeReport, IncomeTTM from vision.table.valuation import", "% date_time) return str(date_time) else: while str(date_time) not in trade_date_sets: date_time = date_time", "storage_engine.update_destdb('factor_pre_share_indicators', trade_date, result) # def remote_run(self, trade_date): # total_data = self.loading_data(trade_date) # #存储数据", "in columns: if col in list(cash_flow_ttm_sets.keys()): cash_flow_ttm_sets = cash_flow_ttm_sets.drop(col, axis=1) cash_flow_ttm_sets = cash_flow_ttm_sets.rename(", "cache_data class CalcEngine(object): def __init__(self, name, url, methods=[{'packet': 'financial.factor_pre_share_indicators', 'class': 'FactorPerShareIndicators'}, ]): self._name", "get_fundamentals(query(Valuation.security_code, Valuation.trade_date, Valuation.capitalization, ).filter(Valuation.trade_date.in_([trade_date]))) for col in column: if col in list(valuation_data.keys()): valuation_data", "storage_engine.update_destdb(str(self._methods[-1]['packet'].split('.')[-1]), trade_date, result) # storage_engine.update_destdb('factor_pre_share_indicators', trade_date, result) # def remote_run(self, trade_date): # total_data", "# session = kwargs['session'] # content = cache_data.get_cache(session + str(date_index), date_index) # total_pre_share_data", "balance_sets = balance_sets.drop(col, axis=1) balance_sets = balance_sets.rename(columns={'PARESHARRIGH': 'total_owner_equities', # 归属于母公司的所有者权益 'CAPISURP': 'capital_reserve_fund', #", "in list(indicator_sets.keys()): indicator_sets = indicator_sets.drop(col, axis=1) indicator_sets = indicator_sets.rename(columns={'FCFE': 'shareholder_fcfps', # 股东自由现金流量 'FCFF':", "session = str(int(time.time() * 1000000 + datetime.datetime.now().microsecond)) # cache_data.set_cache(session, 'alphax', total_data.to_json(orient='records')) # distributed_factor.delay(session,", "factor_share_indicators = per_share.OptRevPS(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.OptProfitPSTTM(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.OptProfitPS(valuation_sets, factor_share_indicators) factor_share_indicators", "}) # TTM data cash_flow_ttm_sets = engine.fetch_fundamentals_pit_extend_company_id(CashFlowTTM, [CashFlowTTM.CASHNETI, # 现金及现金等价物净增加额 CashFlowTTM.MANANETR, # 经营活动现金流量净额", "factor_share_indicators) factor_share_indicators = per_share.UndividedProfitPS(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.RetainedEarningsPS(factor_share_indicators, factor_share_indicators) factor_share_indicators = per_share.OptCFPSTTM(valuation_sets, factor_share_indicators)", "per_share.TotalRevPS(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.OptRevPSTTM(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.OptRevPS(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.OptProfitPSTTM(valuation_sets,", "x.startswith('_') and callable(getattr(method, x)), dir(method))) def loading_data(self, trade_date): \"\"\" 获取基础数据 按天获取当天交易日所有股票的基础数据 :param trade_date:", "营业总收入 'PERPROFIT': 'operating_profit', # 营业利润 'DILUTEDEPS': 'diluted_eps', # 稀释每股收益 }) balance_sets = engine.fetch_fundamentals_pit_extend_company_id(BalanceReport,", "coding: utf-8 -*- import pdb, importlib, inspect, time, datetime, json # from PyFin.api", "col in list(valuation_data.keys()): valuation_data = valuation_data.drop(col, axis=1) valuation_sets = pd.merge(cash_flow_sets, income_sets, on='security_code').reindex() valuation_sets", "in columns: if col in list(cash_flow_sets.keys()): cash_flow_sets = cash_flow_sets.drop(col, axis=1) cash_flow_sets = cash_flow_sets.rename(columns={'FINALCASHBALA':", "[IncomeTTM.PARENETP, # 归属于母公司所有者的净利润 IncomeTTM.PERPROFIT, # 营业利润 IncomeTTM.BIZINCO, # 营业收入 IncomeTTM.BIZTOTINCO, # 营业总收入 ],", "\"\"\" # 转换时间格式 time_array = datetime.strptime(trade_date, \"%Y-%m-%d\") trade_date = datetime.strftime(time_array, '%Y%m%d') # 读取目前涉及到的因子", "factor_name) # total_data = json_normalize(json.loads(content)) # calc_engines.distributed_factor(total_data) # # # @app.task() # def", "per_share.TotalRevPSTTM(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.TotalRevPS(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.OptRevPSTTM(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.OptRevPS(valuation_sets,", "dates=[trade_date]) for col in columns: if col in list(indicator_sets.keys()): indicator_sets = indicator_sets.drop(col, axis=1)", "# print('trade_date pre %s year %s' % (n, date_time)) return str(date_time) def _func_sets(self,", "_func_sets(self, method): # 私有函数和保护函数过滤 return list(filter(lambda x: not x.startswith('_') and callable(getattr(method, x)), dir(method)))", "def remote_run(self, trade_date): # total_data = self.loading_data(trade_date) # #存储数据 # session = str(int(time.time()", "cash_flow_sets.rename(columns={'FINALCASHBALA': 'cash_and_equivalents_at_end', # 期末现金及现金等价物余额 }) income_sets = engine.fetch_fundamentals_pit_extend_company_id(IncomeReport, [IncomeReport.BIZINCO, # 营业收入 IncomeReport.BIZTOTINCO, #", "- tic)) storage_engine.update_destdb(str(self._methods[-1]['packet'].split('.')[-1]), trade_date, result) # storage_engine.update_destdb('factor_pre_share_indicators', trade_date, result) # def remote_run(self, trade_date):", "pd.merge(cash_flow_ttm_sets, valuation_sets, on='security_code').reindex() valuation_sets = pd.merge(income_ttm_sets, valuation_sets, on='security_code').reindex() valuation_sets = pd.merge(valuation_data, valuation_sets, on='security_code').reindex()", "per_share.OptCFPSTTM(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.CFPSTTM(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.EnterpriseFCFPS(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.ShareholderFCFPS(valuation_sets,", "per_share.OptProfitPS(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.CapticalSurplusPS(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.SurplusReservePS(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.UndividedProfitPS(valuation_sets,", "pd.merge(income_ttm_sets, valuation_sets, on='security_code').reindex() valuation_sets = pd.merge(valuation_data, valuation_sets, on='security_code').reindex() return valuation_sets def process_calc_factor(self, trade_date,", "methods=[{'packet': 'financial.factor_pre_share_indicators', 'class': 'FactorPerShareIndicators'}, ]): self._name = name self._methods = methods self._url =", "pd.merge(valuation_data, valuation_sets, on='security_code').reindex() return valuation_sets def process_calc_factor(self, trade_date, valuation_sets): per_share = factor_per_share_indicators.FactorPerShareIndicators() factor_share_indicators", "= str(trade_date) factor_share_indicators.replace([-np.inf, np.inf, None], np.nan, inplace=True) return factor_share_indicators def local_run(self, trade_date): print('当前交易日:", "= self.loading_data(trade_date) print('data load time %s' % (time.time() - tic)) storage_engine = StorageEngine(self._url)", "data.model import CashFlowTTM, CashFlowReport from data.model import IndicatorReport from data.model import IncomeReport, IncomeTTM", "# @app.task() # def factor_calculate(**kwargs): # print(\"per_share_kwargs: {}\".format(kwargs)) # date_index = kwargs['date_index'] #", "= pd.merge(income_ttm_sets, valuation_sets, on='security_code').reindex() valuation_sets = pd.merge(valuation_data, valuation_sets, on='security_code').reindex() return valuation_sets def process_calc_factor(self,", "col in columns: if col in list(cash_flow_ttm_sets.keys()): cash_flow_ttm_sets = cash_flow_ttm_sets.drop(col, axis=1) cash_flow_ttm_sets =", "timedelta, datetime from financial import factor_per_share_indicators from data.model import BalanceMRQ, BalanceTTM, BalanceReport from", "TTM data cash_flow_ttm_sets = engine.fetch_fundamentals_pit_extend_company_id(CashFlowTTM, [CashFlowTTM.CASHNETI, # 现金及现金等价物净增加额 CashFlowTTM.MANANETR, # 经营活动现金流量净额 ], dates=[trade_date])", "], dates=[trade_date]) for col in columns: if col in list(income_ttm_sets.keys()): income_ttm_sets = income_ttm_sets.drop(col,", "# 期末现金及现金等价物余额 }) income_sets = engine.fetch_fundamentals_pit_extend_company_id(IncomeReport, [IncomeReport.BIZINCO, # 营业收入 IncomeReport.BIZTOTINCO, # 营业总收入 IncomeReport.PERPROFIT,", "# 转换时间格式 time_array = datetime.strptime(trade_date, \"%Y-%m-%d\") trade_date = datetime.strftime(time_array, '%Y%m%d') # 读取目前涉及到的因子 engine", "from data.model import IncomeReport, IncomeTTM from vision.table.valuation import Valuation from vision.db.signletion_engine import *", "col in columns: if col in list(income_ttm_sets.keys()): income_ttm_sets = income_ttm_sets.drop(col, axis=1) income_ttm_sets =", "= str(int(time.time() * 1000000 + datetime.datetime.now().microsecond)) # cache_data.set_cache(session, 'alphax', total_data.to_json(orient='records')) # distributed_factor.delay(session, json.dumps(self._methods),", "Valuation from vision.db.signletion_engine import * from data.sqlengine import sqlEngine # pd.set_option('display.max_columns', None) #", "col in list(cash_flow_ttm_sets.keys()): cash_flow_ttm_sets = cash_flow_ttm_sets.drop(col, axis=1) cash_flow_ttm_sets = cash_flow_ttm_sets.rename( columns={'CASHNETI': 'cash_equivalent_increase_ttm', #", "- tic)) storage_engine = StorageEngine(self._url) result = self.process_calc_factor(trade_date, valuation_sets) print('cal_time %s' % (time.time()", "list(filter(lambda x: not x.startswith('_') and callable(getattr(method, x)), dir(method))) def loading_data(self, trade_date): \"\"\" 获取基础数据", "# 每股股利(税前) }) # TTM data cash_flow_ttm_sets = engine.fetch_fundamentals_pit_extend_company_id(CashFlowTTM, [CashFlowTTM.CASHNETI, # 现金及现金等价物净增加额 CashFlowTTM.MANANETR,", "= engine.fetch_fundamentals_pit_extend_company_id(CashFlowReport, [CashFlowReport.FINALCASHBALA, # 期末现金及现金等价物余额 ], dates=[trade_date]) for col in columns: if col", "= per_share.OptRevPSTTM(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.OptRevPS(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.OptProfitPSTTM(valuation_sets, factor_share_indicators) factor_share_indicators =", "axis=1) income_sets = income_sets.rename(columns={'BIZINCO': 'operating_revenue', # 营业收入 'BIZTOTINCO': 'total_operating_revenue', # 营业总收入 'PERPROFIT': 'operating_profit',", "'symbol', 'company_id', 'trade_date'] # Report data cash_flow_sets = engine.fetch_fundamentals_pit_extend_company_id(CashFlowReport, [CashFlowReport.FINALCASHBALA, # 期末现金及现金等价物余额 ],", "dates=[trade_date]) for col in columns: if col in list(income_ttm_sets.keys()): income_ttm_sets = income_ttm_sets.drop(col, axis=1)", "import pandas as pd import numpy as np from datetime import timedelta, datetime", "cash_flow_sets = cash_flow_sets.drop(col, axis=1) cash_flow_sets = cash_flow_sets.rename(columns={'FINALCASHBALA': 'cash_and_equivalents_at_end', # 期末现金及现金等价物余额 }) income_sets =", "str(date_time) else: while str(date_time) not in trade_date_sets: date_time = date_time - 1 #", "= per_share.TotalRevPSTTM(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.TotalRevPS(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.OptRevPSTTM(valuation_sets, factor_share_indicators) factor_share_indicators =", "# TTM data cash_flow_ttm_sets = engine.fetch_fundamentals_pit_extend_company_id(CashFlowTTM, [CashFlowTTM.CASHNETI, # 现金及现金等价物净增加额 CashFlowTTM.MANANETR, # 经营活动现金流量净额 ],", "= factor_share_indicators.set_index('security_code') factor_share_indicators = per_share.EPS(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.DilutedEPSTTM(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.CashEquPS(valuation_sets,", "trade_date: 交易日 :return: \"\"\" # 转换时间格式 time_array = datetime.strptime(trade_date, \"%Y-%m-%d\") trade_date = datetime.strftime(time_array,", "% trade_date) tic = time.time() valuation_sets = self.loading_data(trade_date) print('data load time %s' %", "tic)) storage_engine.update_destdb(str(self._methods[-1]['packet'].split('.')[-1]), trade_date, result) # storage_engine.update_destdb('factor_pre_share_indicators', trade_date, result) # def remote_run(self, trade_date): #", "not x.startswith('_') and callable(getattr(method, x)), dir(method))) def loading_data(self, trade_date): \"\"\" 获取基础数据 按天获取当天交易日所有股票的基础数据 :param", "valuation_sets, on='security_code').reindex() valuation_sets = pd.merge(cash_flow_ttm_sets, valuation_sets, on='security_code').reindex() valuation_sets = pd.merge(income_ttm_sets, valuation_sets, on='security_code').reindex() valuation_sets", "calc_engines.distributed_factor(total_data) # # # @app.task() # def factor_calculate(**kwargs): # print(\"per_share_kwargs: {}\".format(kwargs)) # date_index", "BalanceReport.UNDIPROF, ], dates=[trade_date]) for col in columns: if col in list(balance_sets.keys()): balance_sets =", "'operating_revenue_ttm', # 营业收入 'BIZTOTINCO': 'total_operating_revenue_ttm', # 营业总收入 }) column = ['trade_date'] valuation_data =", "1000000 + datetime.datetime.now().microsecond)) # cache_data.set_cache(session, 'alphax', total_data.to_json(orient='records')) # distributed_factor.delay(session, json.dumps(self._methods), self._name) # #", "'net_operate_cash_flow_ttm', # 经营活动现金流量净额 }) income_ttm_sets = engine.fetch_fundamentals_pit_extend_company_id(IncomeTTM, [IncomeTTM.PARENETP, # 归属于母公司所有者的净利润 IncomeTTM.PERPROFIT, # 营业利润", "def local_run(self, trade_date): print('当前交易日: %s' % trade_date) tic = time.time() valuation_sets = self.loading_data(trade_date)", "cache_data.set_cache(session, 'alphax', total_data.to_json(orient='records')) # distributed_factor.delay(session, json.dumps(self._methods), self._name) # # def distributed_factor(self, total_data): #", "转换时间格式 time_array = datetime.strptime(trade_date, \"%Y-%m-%d\") trade_date = datetime.strftime(time_array, '%Y%m%d') # 读取目前涉及到的因子 engine =", "valuation_sets, on='security_code').reindex() valuation_sets = pd.merge(indicator_sets, valuation_sets, on='security_code').reindex() valuation_sets = pd.merge(cash_flow_ttm_sets, valuation_sets, on='security_code').reindex() valuation_sets", "per_share.CashEquPS(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.DivPS(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.EPSTTM(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.NetAssetPS(valuation_sets,", "-*- import pdb, importlib, inspect, time, datetime, json # from PyFin.api import advanceDateByCalendar", "trade_date_sets['TRADEDATE'].values time_array = datetime.strptime(str(trade_date), \"%Y%m%d\") time_array = time_array - timedelta(days=days) * n date_time", "storage_engine = StorageEngine(self._url) result = self.process_calc_factor(trade_date, valuation_sets) print('cal_time %s' % (time.time() - tic))", "valuation_sets def process_calc_factor(self, trade_date, valuation_sets): per_share = factor_per_share_indicators.FactorPerShareIndicators() factor_share_indicators = pd.DataFrame() factor_share_indicators['security_code'] =", "'BIZINCO': 'operating_revenue_ttm', # 营业收入 'BIZTOTINCO': 'total_operating_revenue_ttm', # 营业总收入 }) column = ['trade_date'] valuation_data", "cash_flow_ttm_sets = engine.fetch_fundamentals_pit_extend_company_id(CashFlowTTM, [CashFlowTTM.CASHNETI, # 现金及现金等价物净增加额 CashFlowTTM.MANANETR, # 经营活动现金流量净额 ], dates=[trade_date]) for col", "未分配利润 }) indicator_sets = engine.fetch_fundamentals_pit_extend_company_id(IndicatorReport, [IndicatorReport.FCFE, # 股东自由现金流量 IndicatorReport.FCFF, # 企业自由现金流量 IndicatorReport.EPSBASIC, #", "methods self._url = url def get_trade_date(self, trade_date, n, days=365): \"\"\" 获取当前时间前n年的时间点,且为交易日,如果非交易日,则往前提取最近的一天。 :param days:", "稀释每股收益 ], dates=[trade_date]) for col in columns: if col in list(income_sets.keys()): income_sets =", "for col in columns: if col in list(indicator_sets.keys()): indicator_sets = indicator_sets.drop(col, axis=1) indicator_sets", "factor_share_indicators = per_share.NetAssetPS(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.TotalRevPSTTM(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.TotalRevPS(valuation_sets, factor_share_indicators) factor_share_indicators", "['trade_date'] valuation_data = get_fundamentals(query(Valuation.security_code, Valuation.trade_date, Valuation.capitalization, ).filter(Valuation.trade_date.in_([trade_date]))) for col in column: if col", "'class': 'FactorPerShareIndicators'}, ]): self._name = name self._methods = methods self._url = url def", "= per_share.TotalRevPS(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.OptRevPSTTM(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.OptRevPS(valuation_sets, factor_share_indicators) factor_share_indicators =", "inplace=True) return factor_share_indicators def local_run(self, trade_date): print('当前交易日: %s' % trade_date) tic = time.time()", "IncomeReport, IncomeTTM from vision.table.valuation import Valuation from vision.db.signletion_engine import * from data.sqlengine import", "data cash_flow_ttm_sets = engine.fetch_fundamentals_pit_extend_company_id(CashFlowTTM, [CashFlowTTM.CASHNETI, # 现金及现金等价物净增加额 CashFlowTTM.MANANETR, # 经营活动现金流量净额 ], dates=[trade_date]) for", "= valuation_data.drop(col, axis=1) valuation_sets = pd.merge(cash_flow_sets, income_sets, on='security_code').reindex() valuation_sets = pd.merge(balance_sets, valuation_sets, on='security_code').reindex()", "session = kwargs['session'] # content = cache_data.get_cache(session + str(date_index), date_index) # total_pre_share_data =", "from datetime import timedelta, datetime from financial import factor_per_share_indicators from data.model import BalanceMRQ,", "DBPolymerize from data.storage_engine import StorageEngine import time import pandas as pd import numpy", "from vision.db.signletion_engine import * from data.sqlengine import sqlEngine # pd.set_option('display.max_columns', None) # pd.set_option('display.max_rows',", "json # from PyFin.api import advanceDateByCalendar # from data.polymerize import DBPolymerize from data.storage_engine", "None) # pd.set_option('display.max_rows', None) # from ultron.cluster.invoke.cache_data import cache_data class CalcEngine(object): def __init__(self,", "import factor_per_share_indicators from data.model import BalanceMRQ, BalanceTTM, BalanceReport from data.model import CashFlowTTM, CashFlowReport", "\"%Y%m%d\")) if str(date_time) < min(trade_date_sets): # print('date_time %s is out of trade_date_sets' %", "columns: if col in list(cash_flow_ttm_sets.keys()): cash_flow_ttm_sets = cash_flow_ttm_sets.drop(col, axis=1) cash_flow_ttm_sets = cash_flow_ttm_sets.rename( columns={'CASHNETI':", "营业总收入 }) column = ['trade_date'] valuation_data = get_fundamentals(query(Valuation.security_code, Valuation.trade_date, Valuation.capitalization, ).filter(Valuation.trade_date.in_([trade_date]))) for col", "columns: if col in list(indicator_sets.keys()): indicator_sets = indicator_sets.drop(col, axis=1) indicator_sets = indicator_sets.rename(columns={'FCFE': 'shareholder_fcfps',", "col in column: if col in list(valuation_data.keys()): valuation_data = valuation_data.drop(col, axis=1) valuation_sets =", "# print(\"per_share_kwargs: {}\".format(kwargs)) # date_index = kwargs['date_index'] # session = kwargs['session'] # content", "mkt_df = self.calc_factor_by_date(total_data,trade_date) # result = self.calc_factor('alphax.alpha191','Alpha191',mkt_df,trade_date) # @app.task # def distributed_factor(session, trade_date,", "np.nan, inplace=True) return factor_share_indicators def local_run(self, trade_date): print('当前交易日: %s' % trade_date) tic =", "content = cache_data.get_cache(session, factor_name) # total_data = json_normalize(json.loads(content)) # calc_engines.distributed_factor(total_data) # # #", "# 营业收入 'BIZTOTINCO': 'total_operating_revenue_ttm', # 营业总收入 }) column = ['trade_date'] valuation_data = get_fundamentals(query(Valuation.security_code,", "factor_share_indicators.set_index('security_code') factor_share_indicators = per_share.EPS(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.DilutedEPSTTM(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.CashEquPS(valuation_sets, factor_share_indicators)", "- timedelta(days=days) * n date_time = int(datetime.strftime(time_array, \"%Y%m%d\")) if str(date_time) < min(trade_date_sets): #", "kwargs['date_index'] # session = kwargs['session'] # content = cache_data.get_cache(session + str(date_index), date_index) #", "factor_share_indicators = factor_share_indicators.set_index('security_code') factor_share_indicators = per_share.EPS(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.DilutedEPSTTM(valuation_sets, factor_share_indicators) factor_share_indicators =", "# 基本每股收益 'DPS': 'dividend_receivable', # 每股股利(税前) }) # TTM data cash_flow_ttm_sets = engine.fetch_fundamentals_pit_extend_company_id(CashFlowTTM,", "advanceDateByCalendar # from data.polymerize import DBPolymerize from data.storage_engine import StorageEngine import time import", "# 归属于母公司所有者的净利润 'PERPROFIT': 'operating_profit_ttm', # 营业利润 'BIZINCO': 'operating_revenue_ttm', # 营业收入 'BIZTOTINCO': 'total_operating_revenue_ttm', #", "= cash_flow_sets.rename(columns={'FINALCASHBALA': 'cash_and_equivalents_at_end', # 期末现金及现金等价物余额 }) income_sets = engine.fetch_fundamentals_pit_extend_company_id(IncomeReport, [IncomeReport.BIZINCO, # 营业收入 IncomeReport.BIZTOTINCO,", "factor_share_indicators) factor_share_indicators = factor_share_indicators.reset_index() factor_share_indicators['trade_date'] = str(trade_date) factor_share_indicators.replace([-np.inf, np.inf, None], np.nan, inplace=True) return", "from vision.table.valuation import Valuation from vision.db.signletion_engine import * from data.sqlengine import sqlEngine #", ":return: \"\"\" syn_util = SyncUtil() trade_date_sets = syn_util.get_all_trades('001002', '19900101', trade_date) trade_date_sets = trade_date_sets['TRADEDATE'].values", "utf-8 -*- import pdb, importlib, inspect, time, datetime, json # from PyFin.api import", "# 经营活动现金流量净额 }) income_ttm_sets = engine.fetch_fundamentals_pit_extend_company_id(IncomeTTM, [IncomeTTM.PARENETP, # 归属于母公司所有者的净利润 IncomeTTM.PERPROFIT, # 营业利润 IncomeTTM.BIZINCO,", "axis=1) indicator_sets = indicator_sets.rename(columns={'FCFE': 'shareholder_fcfps', # 股东自由现金流量 'FCFF': 'enterprise_fcfps', # 企业自由现金流量 'EPSBASIC': 'basic_eps',", "'PERPROFIT': 'operating_profit_ttm', # 营业利润 'BIZINCO': 'operating_revenue_ttm', # 营业收入 'BIZTOTINCO': 'total_operating_revenue_ttm', # 营业总收入 })", "= per_share.SurplusReservePS(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.UndividedProfitPS(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.RetainedEarningsPS(factor_share_indicators, factor_share_indicators) factor_share_indicators =", "date_time = int(datetime.strftime(time_array, \"%Y%m%d\")) if str(date_time) < min(trade_date_sets): # print('date_time %s is out", "= datetime.strptime(trade_date, \"%Y-%m-%d\") trade_date = datetime.strftime(time_array, '%Y%m%d') # 读取目前涉及到的因子 engine = sqlEngine() columns", "result = self.process_calc_factor(trade_date, valuation_sets) print('cal_time %s' % (time.time() - tic)) storage_engine.update_destdb(str(self._methods[-1]['packet'].split('.')[-1]), trade_date, result)", "IncomeReport.BIZTOTINCO, # 营业总收入 IncomeReport.PERPROFIT, # 营业利润 IncomeReport.DILUTEDEPS, # 稀释每股收益 ], dates=[trade_date]) for col", "# def factor_calculate(**kwargs): # print(\"per_share_kwargs: {}\".format(kwargs)) # date_index = kwargs['date_index'] # session =", "engine.fetch_fundamentals_pit_extend_company_id(CashFlowTTM, [CashFlowTTM.CASHNETI, # 现金及现金等价物净增加额 CashFlowTTM.MANANETR, # 经营活动现金流量净额 ], dates=[trade_date]) for col in columns:", "factor_share_indicators['security_code'] = valuation_sets['security_code'] valuation_sets = valuation_sets.set_index('security_code') factor_share_indicators = factor_share_indicators.set_index('security_code') factor_share_indicators = per_share.EPS(valuation_sets, factor_share_indicators)", "# 营业利润 IncomeReport.DILUTEDEPS, # 稀释每股收益 ], dates=[trade_date]) for col in columns: if col", "def distributed_factor(session, trade_date, packet_sets, name): # calc_engines = CalcEngine(name, packet_sets) # content =", "days: :param trade_date: 当前交易日 :param n: :return: \"\"\" syn_util = SyncUtil() trade_date_sets =", "self.calc_factor('alphax.alpha191','Alpha191',mkt_df,trade_date) # @app.task # def distributed_factor(session, trade_date, packet_sets, name): # calc_engines = CalcEngine(name,", "BalanceReport.RESE, BalanceReport.UNDIPROF, ], dates=[trade_date]) for col in columns: if col in list(balance_sets.keys()): balance_sets", "distributed_factor(session, trade_date, packet_sets, name): # calc_engines = CalcEngine(name, packet_sets) # content = cache_data.get_cache(session,", "from data.model import BalanceMRQ, BalanceTTM, BalanceReport from data.model import CashFlowTTM, CashFlowReport from data.model", "syn_util.get_all_trades('001002', '19900101', trade_date) trade_date_sets = trade_date_sets['TRADEDATE'].values time_array = datetime.strptime(str(trade_date), \"%Y%m%d\") time_array = time_array", "= kwargs['session'] # content = cache_data.get_cache(session + str(date_index), date_index) # total_pre_share_data = json_normalize(json.loads(str(content,", "else: while str(date_time) not in trade_date_sets: date_time = date_time - 1 # print('trade_date", "time_array = datetime.strptime(trade_date, \"%Y-%m-%d\") trade_date = datetime.strftime(time_array, '%Y%m%d') # 读取目前涉及到的因子 engine = sqlEngine()", "import StorageEngine import time import pandas as pd import numpy as np from", "None], np.nan, inplace=True) return factor_share_indicators def local_run(self, trade_date): print('当前交易日: %s' % trade_date) tic", "= json_normalize(json.loads(content)) # calc_engines.distributed_factor(total_data) # # # @app.task() # def factor_calculate(**kwargs): # print(\"per_share_kwargs:", "datetime from financial import factor_per_share_indicators from data.model import BalanceMRQ, BalanceTTM, BalanceReport from data.model", "engine = sqlEngine() columns = ['COMPCODE', 'PUBLISHDATE', 'ENDDATE', 'symbol', 'company_id', 'trade_date'] # Report", "dates=[trade_date]) for col in columns: if col in list(cash_flow_ttm_sets.keys()): cash_flow_ttm_sets = cash_flow_ttm_sets.drop(col, axis=1)", "'EPSBASIC': 'basic_eps', # 基本每股收益 'DPS': 'dividend_receivable', # 每股股利(税前) }) # TTM data cash_flow_ttm_sets", "for col in columns: if col in list(balance_sets.keys()): balance_sets = balance_sets.drop(col, axis=1) balance_sets", "factor_share_indicators = per_share.EPSTTM(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.NetAssetPS(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.TotalRevPSTTM(valuation_sets, factor_share_indicators) factor_share_indicators", "factor_share_indicators.reset_index() factor_share_indicators['trade_date'] = str(trade_date) factor_share_indicators.replace([-np.inf, np.inf, None], np.nan, inplace=True) return factor_share_indicators def local_run(self,", "str(date_index), date_index) # total_pre_share_data = json_normalize(json.loads(str(content, encoding='utf8'))) # print(\"len_total_per_share_data {}\".format(len(total_pre_share_data))) # calculate(date_index, total_pre_share_data)", "= per_share.NetAssetPS(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.TotalRevPSTTM(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.TotalRevPS(valuation_sets, factor_share_indicators) factor_share_indicators =", "# 营业总收入 }) column = ['trade_date'] valuation_data = get_fundamentals(query(Valuation.security_code, Valuation.trade_date, Valuation.capitalization, ).filter(Valuation.trade_date.in_([trade_date]))) for", "Valuation.capitalization, ).filter(Valuation.trade_date.in_([trade_date]))) for col in column: if col in list(valuation_data.keys()): valuation_data = valuation_data.drop(col,", "# from PyFin.api import advanceDateByCalendar # from data.polymerize import DBPolymerize from data.storage_engine import", "engine.fetch_fundamentals_pit_extend_company_id(BalanceReport, [BalanceReport.PARESHARRIGH, # 归属于母公司的所有者权益 BalanceReport.CAPISURP, BalanceReport.RESE, BalanceReport.UNDIPROF, ], dates=[trade_date]) for col in columns:", "# pd.set_option('display.max_columns', None) # pd.set_option('display.max_rows', None) # from ultron.cluster.invoke.cache_data import cache_data class CalcEngine(object):", "content = cache_data.get_cache(session + str(date_index), date_index) # total_pre_share_data = json_normalize(json.loads(str(content, encoding='utf8'))) # print(\"len_total_per_share_data", "< min(trade_date_sets): # print('date_time %s is out of trade_date_sets' % date_time) return str(date_time)", "valuation_sets, on='security_code').reindex() return valuation_sets def process_calc_factor(self, trade_date, valuation_sets): per_share = factor_per_share_indicators.FactorPerShareIndicators() factor_share_indicators =", "indicator_sets.rename(columns={'FCFE': 'shareholder_fcfps', # 股东自由现金流量 'FCFF': 'enterprise_fcfps', # 企业自由现金流量 'EPSBASIC': 'basic_eps', # 基本每股收益 'DPS':", "self.calc_factor_by_date(total_data,trade_date) # result = self.calc_factor('alphax.alpha191','Alpha191',mkt_df,trade_date) # @app.task # def distributed_factor(session, trade_date, packet_sets, name):", "'MANANETR': 'net_operate_cash_flow_ttm', # 经营活动现金流量净额 }) income_ttm_sets = engine.fetch_fundamentals_pit_extend_company_id(IncomeTTM, [IncomeTTM.PARENETP, # 归属于母公司所有者的净利润 IncomeTTM.PERPROFIT, #", "data.storage_engine import StorageEngine import time import pandas as pd import numpy as np", "= per_share.DilutedEPSTTM(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.CashEquPS(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.DivPS(valuation_sets, factor_share_indicators) factor_share_indicators =", "per_share.OptProfitPSTTM(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.OptProfitPS(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.CapticalSurplusPS(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.SurplusReservePS(valuation_sets,", "income_sets.rename(columns={'BIZINCO': 'operating_revenue', # 营业收入 'BIZTOTINCO': 'total_operating_revenue', # 营业总收入 'PERPROFIT': 'operating_profit', # 营业利润 'DILUTEDEPS':", "trade_date): \"\"\" 获取基础数据 按天获取当天交易日所有股票的基础数据 :param trade_date: 交易日 :return: \"\"\" # 转换时间格式 time_array =", "balance_sets.drop(col, axis=1) balance_sets = balance_sets.rename(columns={'PARESHARRIGH': 'total_owner_equities', # 归属于母公司的所有者权益 'CAPISURP': 'capital_reserve_fund', # 资本公积 'RESE':", "datetime.strptime(str(trade_date), \"%Y%m%d\") time_array = time_array - timedelta(days=days) * n date_time = int(datetime.strftime(time_array, \"%Y%m%d\"))", "column: if col in list(valuation_data.keys()): valuation_data = valuation_data.drop(col, axis=1) valuation_sets = pd.merge(cash_flow_sets, income_sets,", "= pd.DataFrame() factor_share_indicators['security_code'] = valuation_sets['security_code'] valuation_sets = valuation_sets.set_index('security_code') factor_share_indicators = factor_share_indicators.set_index('security_code') factor_share_indicators =", "factor_share_indicators = per_share.DivPS(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.EPSTTM(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.NetAssetPS(valuation_sets, factor_share_indicators) factor_share_indicators", "factor_share_indicators) factor_share_indicators = per_share.SurplusReservePS(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.UndividedProfitPS(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.RetainedEarningsPS(factor_share_indicators, factor_share_indicators)", "factor_share_indicators = per_share.OptRevPSTTM(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.OptRevPS(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.OptProfitPSTTM(valuation_sets, factor_share_indicators) factor_share_indicators", "factor_share_indicators) factor_share_indicators = per_share.RetainedEarningsPS(factor_share_indicators, factor_share_indicators) factor_share_indicators = per_share.OptCFPSTTM(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.CFPSTTM(valuation_sets, factor_share_indicators)", "稀释每股收益 }) balance_sets = engine.fetch_fundamentals_pit_extend_company_id(BalanceReport, [BalanceReport.PARESHARRIGH, # 归属于母公司的所有者权益 BalanceReport.CAPISURP, BalanceReport.RESE, BalanceReport.UNDIPROF, ], dates=[trade_date])", "factor_share_indicators) factor_share_indicators = per_share.OptRevPSTTM(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.OptRevPS(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.OptProfitPSTTM(valuation_sets, factor_share_indicators)", "= cash_flow_ttm_sets.rename( columns={'CASHNETI': 'cash_equivalent_increase_ttm', # 现金及现金等价物净增加额 'MANANETR': 'net_operate_cash_flow_ttm', # 经营活动现金流量净额 }) income_ttm_sets =", "import Valuation from vision.db.signletion_engine import * from data.sqlengine import sqlEngine # pd.set_option('display.max_columns', None)", ":return: \"\"\" # 转换时间格式 time_array = datetime.strptime(trade_date, \"%Y-%m-%d\") trade_date = datetime.strftime(time_array, '%Y%m%d') #", "columns = ['COMPCODE', 'PUBLISHDATE', 'ENDDATE', 'symbol', 'company_id', 'trade_date'] # Report data cash_flow_sets =", "# from ultron.cluster.invoke.cache_data import cache_data class CalcEngine(object): def __init__(self, name, url, methods=[{'packet': 'financial.factor_pre_share_indicators',", "营业收入 IncomeReport.BIZTOTINCO, # 营业总收入 IncomeReport.PERPROFIT, # 营业利润 IncomeReport.DILUTEDEPS, # 稀释每股收益 ], dates=[trade_date]) for", "date_time)) return str(date_time) def _func_sets(self, method): # 私有函数和保护函数过滤 return list(filter(lambda x: not x.startswith('_')", "while str(date_time) not in trade_date_sets: date_time = date_time - 1 # print('trade_date pre", "[CashFlowTTM.CASHNETI, # 现金及现金等价物净增加额 CashFlowTTM.MANANETR, # 经营活动现金流量净额 ], dates=[trade_date]) for col in columns: if", "valuation_sets, on='security_code').reindex() valuation_sets = pd.merge(income_ttm_sets, valuation_sets, on='security_code').reindex() valuation_sets = pd.merge(valuation_data, valuation_sets, on='security_code').reindex() return", "[IncomeReport.BIZINCO, # 营业收入 IncomeReport.BIZTOTINCO, # 营业总收入 IncomeReport.PERPROFIT, # 营业利润 IncomeReport.DILUTEDEPS, # 稀释每股收益 ],", "engine.fetch_fundamentals_pit_extend_company_id(IncomeTTM, [IncomeTTM.PARENETP, # 归属于母公司所有者的净利润 IncomeTTM.PERPROFIT, # 营业利润 IncomeTTM.BIZINCO, # 营业收入 IncomeTTM.BIZTOTINCO, # 营业总收入", "def distributed_factor(self, total_data): # mkt_df = self.calc_factor_by_date(total_data,trade_date) # result = self.calc_factor('alphax.alpha191','Alpha191',mkt_df,trade_date) # @app.task", "= pd.merge(indicator_sets, valuation_sets, on='security_code').reindex() valuation_sets = pd.merge(cash_flow_ttm_sets, valuation_sets, on='security_code').reindex() valuation_sets = pd.merge(income_ttm_sets, valuation_sets,", "packet_sets) # content = cache_data.get_cache(session, factor_name) # total_data = json_normalize(json.loads(content)) # calc_engines.distributed_factor(total_data) #", "income_ttm_sets = income_ttm_sets.drop(col, axis=1) income_ttm_sets = income_ttm_sets.rename(columns={'PARENETP': 'np_parent_company_owners_ttm', # 归属于母公司所有者的净利润 'PERPROFIT': 'operating_profit_ttm', #", "% (time.time() - tic)) storage_engine.update_destdb(str(self._methods[-1]['packet'].split('.')[-1]), trade_date, result) # storage_engine.update_destdb('factor_pre_share_indicators', trade_date, result) # def", "IncomeReport.PERPROFIT, # 营业利润 IncomeReport.DILUTEDEPS, # 稀释每股收益 ], dates=[trade_date]) for col in columns: if", "for col in columns: if col in list(income_sets.keys()): income_sets = income_sets.drop(col, axis=1) income_sets", "factor_share_indicators['trade_date'] = str(trade_date) factor_share_indicators.replace([-np.inf, np.inf, None], np.nan, inplace=True) return factor_share_indicators def local_run(self, trade_date):", "data.model import IndicatorReport from data.model import IncomeReport, IncomeTTM from vision.table.valuation import Valuation from", "axis=1) cash_flow_ttm_sets = cash_flow_ttm_sets.rename( columns={'CASHNETI': 'cash_equivalent_increase_ttm', # 现金及现金等价物净增加额 'MANANETR': 'net_operate_cash_flow_ttm', # 经营活动现金流量净额 })", "class CalcEngine(object): def __init__(self, name, url, methods=[{'packet': 'financial.factor_pre_share_indicators', 'class': 'FactorPerShareIndicators'}, ]): self._name =", "= ['trade_date'] valuation_data = get_fundamentals(query(Valuation.security_code, Valuation.trade_date, Valuation.capitalization, ).filter(Valuation.trade_date.in_([trade_date]))) for col in column: if", "= per_share.EPS(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.DilutedEPSTTM(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.CashEquPS(valuation_sets, factor_share_indicators) factor_share_indicators =", "per_share.DivPS(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.EPSTTM(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.NetAssetPS(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.TotalRevPSTTM(valuation_sets,", "= cash_flow_ttm_sets.drop(col, axis=1) cash_flow_ttm_sets = cash_flow_ttm_sets.rename( columns={'CASHNETI': 'cash_equivalent_increase_ttm', # 现金及现金等价物净增加额 'MANANETR': 'net_operate_cash_flow_ttm', #", "'total_owner_equities', # 归属于母公司的所有者权益 'CAPISURP': 'capital_reserve_fund', # 资本公积 'RESE': 'surplus_reserve_fund', # 盈余公积 'UNDIPROF': 'retained_profit',", "= balance_sets.drop(col, axis=1) balance_sets = balance_sets.rename(columns={'PARESHARRIGH': 'total_owner_equities', # 归属于母公司的所有者权益 'CAPISURP': 'capital_reserve_fund', # 资本公积", "'PUBLISHDATE', 'ENDDATE', 'symbol', 'company_id', 'trade_date'] # Report data cash_flow_sets = engine.fetch_fundamentals_pit_extend_company_id(CashFlowReport, [CashFlowReport.FINALCASHBALA, #", "\"\"\" 获取当前时间前n年的时间点,且为交易日,如果非交易日,则往前提取最近的一天。 :param days: :param trade_date: 当前交易日 :param n: :return: \"\"\" syn_util =", "year %s' % (n, date_time)) return str(date_time) def _func_sets(self, method): # 私有函数和保护函数过滤 return", "axis=1) income_ttm_sets = income_ttm_sets.rename(columns={'PARENETP': 'np_parent_company_owners_ttm', # 归属于母公司所有者的净利润 'PERPROFIT': 'operating_profit_ttm', # 营业利润 'BIZINCO': 'operating_revenue_ttm',", "on='security_code').reindex() valuation_sets = pd.merge(income_ttm_sets, valuation_sets, on='security_code').reindex() valuation_sets = pd.merge(valuation_data, valuation_sets, on='security_code').reindex() return valuation_sets", "print('date_time %s is out of trade_date_sets' % date_time) return str(date_time) else: while str(date_time)", "dates=[trade_date]) for col in columns: if col in list(balance_sets.keys()): balance_sets = balance_sets.drop(col, axis=1)", "per_share.CapticalSurplusPS(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.SurplusReservePS(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.UndividedProfitPS(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.RetainedEarningsPS(factor_share_indicators,", "pandas as pd import numpy as np from datetime import timedelta, datetime from", "per_share = factor_per_share_indicators.FactorPerShareIndicators() factor_share_indicators = pd.DataFrame() factor_share_indicators['security_code'] = valuation_sets['security_code'] valuation_sets = valuation_sets.set_index('security_code') factor_share_indicators", "= per_share.OptProfitPSTTM(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.OptProfitPS(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.CapticalSurplusPS(valuation_sets, factor_share_indicators) factor_share_indicators =", "total_data = self.loading_data(trade_date) # #存储数据 # session = str(int(time.time() * 1000000 + datetime.datetime.now().microsecond))", "BalanceReport from data.model import CashFlowTTM, CashFlowReport from data.model import IndicatorReport from data.model import", "经营活动现金流量净额 }) income_ttm_sets = engine.fetch_fundamentals_pit_extend_company_id(IncomeTTM, [IncomeTTM.PARENETP, # 归属于母公司所有者的净利润 IncomeTTM.PERPROFIT, # 营业利润 IncomeTTM.BIZINCO, #", "timedelta(days=days) * n date_time = int(datetime.strftime(time_array, \"%Y%m%d\")) if str(date_time) < min(trade_date_sets): # print('date_time", "data cash_flow_sets = engine.fetch_fundamentals_pit_extend_company_id(CashFlowReport, [CashFlowReport.FINALCASHBALA, # 期末现金及现金等价物余额 ], dates=[trade_date]) for col in columns:", "import advanceDateByCalendar # from data.polymerize import DBPolymerize from data.storage_engine import StorageEngine import time", "BalanceReport.CAPISURP, BalanceReport.RESE, BalanceReport.UNDIPROF, ], dates=[trade_date]) for col in columns: if col in list(balance_sets.keys()):", "cash_flow_ttm_sets.rename( columns={'CASHNETI': 'cash_equivalent_increase_ttm', # 现金及现金等价物净增加额 'MANANETR': 'net_operate_cash_flow_ttm', # 经营活动现金流量净额 }) income_ttm_sets = engine.fetch_fundamentals_pit_extend_company_id(IncomeTTM,", "per_share.EPS(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.DilutedEPSTTM(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.CashEquPS(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.DivPS(valuation_sets,", "= per_share.OptProfitPS(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.CapticalSurplusPS(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.SurplusReservePS(valuation_sets, factor_share_indicators) factor_share_indicators =", "is out of trade_date_sets' % date_time) return str(date_time) else: while str(date_time) not in", "# 归属于母公司的所有者权益 'CAPISURP': 'capital_reserve_fund', # 资本公积 'RESE': 'surplus_reserve_fund', # 盈余公积 'UNDIPROF': 'retained_profit', #", "= income_sets.rename(columns={'BIZINCO': 'operating_revenue', # 营业收入 'BIZTOTINCO': 'total_operating_revenue', # 营业总收入 'PERPROFIT': 'operating_profit', # 营业利润", "# 股东自由现金流量 'FCFF': 'enterprise_fcfps', # 企业自由现金流量 'EPSBASIC': 'basic_eps', # 基本每股收益 'DPS': 'dividend_receivable', #", "= StorageEngine(self._url) result = self.process_calc_factor(trade_date, valuation_sets) print('cal_time %s' % (time.time() - tic)) storage_engine.update_destdb(str(self._methods[-1]['packet'].split('.')[-1]),", "= self.loading_data(trade_date) # #存储数据 # session = str(int(time.time() * 1000000 + datetime.datetime.now().microsecond)) #", "syn_util = SyncUtil() trade_date_sets = syn_util.get_all_trades('001002', '19900101', trade_date) trade_date_sets = trade_date_sets['TRADEDATE'].values time_array =", "income_sets = engine.fetch_fundamentals_pit_extend_company_id(IncomeReport, [IncomeReport.BIZINCO, # 营业收入 IncomeReport.BIZTOTINCO, # 营业总收入 IncomeReport.PERPROFIT, # 营业利润 IncomeReport.DILUTEDEPS,", "date_time = date_time - 1 # print('trade_date pre %s year %s' % (n,", "股东自由现金流量 'FCFF': 'enterprise_fcfps', # 企业自由现金流量 'EPSBASIC': 'basic_eps', # 基本每股收益 'DPS': 'dividend_receivable', # 每股股利(税前)", "经营活动现金流量净额 ], dates=[trade_date]) for col in columns: if col in list(cash_flow_ttm_sets.keys()): cash_flow_ttm_sets =", "list(cash_flow_ttm_sets.keys()): cash_flow_ttm_sets = cash_flow_ttm_sets.drop(col, axis=1) cash_flow_ttm_sets = cash_flow_ttm_sets.rename( columns={'CASHNETI': 'cash_equivalent_increase_ttm', # 现金及现金等价物净增加额 'MANANETR':", "= valuation_sets['security_code'] valuation_sets = valuation_sets.set_index('security_code') factor_share_indicators = factor_share_indicators.set_index('security_code') factor_share_indicators = per_share.EPS(valuation_sets, factor_share_indicators) factor_share_indicators", "# result = self.calc_factor('alphax.alpha191','Alpha191',mkt_df,trade_date) # @app.task # def distributed_factor(session, trade_date, packet_sets, name): #", "trade_date, n, days=365): \"\"\" 获取当前时间前n年的时间点,且为交易日,如果非交易日,则往前提取最近的一天。 :param days: :param trade_date: 当前交易日 :param n: :return:", "-*- coding: utf-8 -*- import pdb, importlib, inspect, time, datetime, json # from", "cash_flow_sets.drop(col, axis=1) cash_flow_sets = cash_flow_sets.rename(columns={'FINALCASHBALA': 'cash_and_equivalents_at_end', # 期末现金及现金等价物余额 }) income_sets = engine.fetch_fundamentals_pit_extend_company_id(IncomeReport, [IncomeReport.BIZINCO,", "valuation_sets = self.loading_data(trade_date) print('data load time %s' % (time.time() - tic)) storage_engine =", "trade_date): print('当前交易日: %s' % trade_date) tic = time.time() valuation_sets = self.loading_data(trade_date) print('data load", "'PERPROFIT': 'operating_profit', # 营业利润 'DILUTEDEPS': 'diluted_eps', # 稀释每股收益 }) balance_sets = engine.fetch_fundamentals_pit_extend_company_id(BalanceReport, [BalanceReport.PARESHARRIGH,", "# storage_engine.update_destdb('factor_pre_share_indicators', trade_date, result) # def remote_run(self, trade_date): # total_data = self.loading_data(trade_date) #", "'19900101', trade_date) trade_date_sets = trade_date_sets['TRADEDATE'].values time_array = datetime.strptime(str(trade_date), \"%Y%m%d\") time_array = time_array -", "股东自由现金流量 IndicatorReport.FCFF, # 企业自由现金流量 IndicatorReport.EPSBASIC, # 基本每股收益 IndicatorReport.DPS, # 每股股利(税前) ], dates=[trade_date]) for", "CashFlowTTM.MANANETR, # 经营活动现金流量净额 ], dates=[trade_date]) for col in columns: if col in list(cash_flow_ttm_sets.keys()):", "dates=[trade_date]) for col in columns: if col in list(income_sets.keys()): income_sets = income_sets.drop(col, axis=1)", "factor_share_indicators = per_share.UndividedProfitPS(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.RetainedEarningsPS(factor_share_indicators, factor_share_indicators) factor_share_indicators = per_share.OptCFPSTTM(valuation_sets, factor_share_indicators) factor_share_indicators", "calc_engines = CalcEngine(name, packet_sets) # content = cache_data.get_cache(session, factor_name) # total_data = json_normalize(json.loads(content))", "factor_share_indicators) factor_share_indicators = per_share.EPSTTM(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.NetAssetPS(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.TotalRevPSTTM(valuation_sets, factor_share_indicators)", "'BIZTOTINCO': 'total_operating_revenue', # 营业总收入 'PERPROFIT': 'operating_profit', # 营业利润 'DILUTEDEPS': 'diluted_eps', # 稀释每股收益 })", "# 营业收入 IncomeTTM.BIZTOTINCO, # 营业总收入 ], dates=[trade_date]) for col in columns: if col", "col in columns: if col in list(income_sets.keys()): income_sets = income_sets.drop(col, axis=1) income_sets =", "获取基础数据 按天获取当天交易日所有股票的基础数据 :param trade_date: 交易日 :return: \"\"\" # 转换时间格式 time_array = datetime.strptime(trade_date, \"%Y-%m-%d\")", "= engine.fetch_fundamentals_pit_extend_company_id(CashFlowTTM, [CashFlowTTM.CASHNETI, # 现金及现金等价物净增加额 CashFlowTTM.MANANETR, # 经营活动现金流量净额 ], dates=[trade_date]) for col in", "on='security_code').reindex() valuation_sets = pd.merge(valuation_data, valuation_sets, on='security_code').reindex() return valuation_sets def process_calc_factor(self, trade_date, valuation_sets): per_share", "remote_run(self, trade_date): # total_data = self.loading_data(trade_date) # #存储数据 # session = str(int(time.time() *", "= get_fundamentals(query(Valuation.security_code, Valuation.trade_date, Valuation.capitalization, ).filter(Valuation.trade_date.in_([trade_date]))) for col in column: if col in list(valuation_data.keys()):", "n, days=365): \"\"\" 获取当前时间前n年的时间点,且为交易日,如果非交易日,则往前提取最近的一天。 :param days: :param trade_date: 当前交易日 :param n: :return: \"\"\"", "\"%Y%m%d\") time_array = time_array - timedelta(days=days) * n date_time = int(datetime.strftime(time_array, \"%Y%m%d\")) if", "trade_date) trade_date_sets = trade_date_sets['TRADEDATE'].values time_array = datetime.strptime(str(trade_date), \"%Y%m%d\") time_array = time_array - timedelta(days=days)", "per_share.DilutedEPSTTM(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.CashEquPS(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.DivPS(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.EPSTTM(valuation_sets,", "\"\"\" 获取基础数据 按天获取当天交易日所有股票的基础数据 :param trade_date: 交易日 :return: \"\"\" # 转换时间格式 time_array = datetime.strptime(trade_date,", "print(\"per_share_kwargs: {}\".format(kwargs)) # date_index = kwargs['date_index'] # session = kwargs['session'] # content =", "pd.set_option('display.max_columns', None) # pd.set_option('display.max_rows', None) # from ultron.cluster.invoke.cache_data import cache_data class CalcEngine(object): def", "valuation_sets) print('cal_time %s' % (time.time() - tic)) storage_engine.update_destdb(str(self._methods[-1]['packet'].split('.')[-1]), trade_date, result) # storage_engine.update_destdb('factor_pre_share_indicators', trade_date,", "'FactorPerShareIndicators'}, ]): self._name = name self._methods = methods self._url = url def get_trade_date(self,", "not in trade_date_sets: date_time = date_time - 1 # print('trade_date pre %s year", "factor_share_indicators) factor_share_indicators = per_share.CFPSTTM(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.EnterpriseFCFPS(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.ShareholderFCFPS(valuation_sets, factor_share_indicators)", "# mkt_df = self.calc_factor_by_date(total_data,trade_date) # result = self.calc_factor('alphax.alpha191','Alpha191',mkt_df,trade_date) # @app.task # def distributed_factor(session,", "# 私有函数和保护函数过滤 return list(filter(lambda x: not x.startswith('_') and callable(getattr(method, x)), dir(method))) def loading_data(self,", "'enterprise_fcfps', # 企业自由现金流量 'EPSBASIC': 'basic_eps', # 基本每股收益 'DPS': 'dividend_receivable', # 每股股利(税前) }) #", "in list(valuation_data.keys()): valuation_data = valuation_data.drop(col, axis=1) valuation_sets = pd.merge(cash_flow_sets, income_sets, on='security_code').reindex() valuation_sets =", "load time %s' % (time.time() - tic)) storage_engine = StorageEngine(self._url) result = self.process_calc_factor(trade_date,", "trade_date_sets = trade_date_sets['TRADEDATE'].values time_array = datetime.strptime(str(trade_date), \"%Y%m%d\") time_array = time_array - timedelta(days=days) *", "return valuation_sets def process_calc_factor(self, trade_date, valuation_sets): per_share = factor_per_share_indicators.FactorPerShareIndicators() factor_share_indicators = pd.DataFrame() factor_share_indicators['security_code']", "* n date_time = int(datetime.strftime(time_array, \"%Y%m%d\")) if str(date_time) < min(trade_date_sets): # print('date_time %s", "time_array - timedelta(days=days) * n date_time = int(datetime.strftime(time_array, \"%Y%m%d\")) if str(date_time) < min(trade_date_sets):", "in columns: if col in list(indicator_sets.keys()): indicator_sets = indicator_sets.drop(col, axis=1) indicator_sets = indicator_sets.rename(columns={'FCFE':", "'operating_profit_ttm', # 营业利润 'BIZINCO': 'operating_revenue_ttm', # 营业收入 'BIZTOTINCO': 'total_operating_revenue_ttm', # 营业总收入 }) column", "factor_share_indicators = per_share.DilutedEPSTTM(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.CashEquPS(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.DivPS(valuation_sets, factor_share_indicators) factor_share_indicators", "if col in list(balance_sets.keys()): balance_sets = balance_sets.drop(col, axis=1) balance_sets = balance_sets.rename(columns={'PARESHARRIGH': 'total_owner_equities', #", "IndicatorReport.FCFF, # 企业自由现金流量 IndicatorReport.EPSBASIC, # 基本每股收益 IndicatorReport.DPS, # 每股股利(税前) ], dates=[trade_date]) for col", "factor_share_indicators def local_run(self, trade_date): print('当前交易日: %s' % trade_date) tic = time.time() valuation_sets =", "from data.storage_engine import StorageEngine import time import pandas as pd import numpy as", "}) income_ttm_sets = engine.fetch_fundamentals_pit_extend_company_id(IncomeTTM, [IncomeTTM.PARENETP, # 归属于母公司所有者的净利润 IncomeTTM.PERPROFIT, # 营业利润 IncomeTTM.BIZINCO, # 营业收入", "pd import numpy as np from datetime import timedelta, datetime from financial import", "from data.model import IndicatorReport from data.model import IncomeReport, IncomeTTM from vision.table.valuation import Valuation", "trade_date_sets: date_time = date_time - 1 # print('trade_date pre %s year %s' %", "in list(cash_flow_ttm_sets.keys()): cash_flow_ttm_sets = cash_flow_ttm_sets.drop(col, axis=1) cash_flow_ttm_sets = cash_flow_ttm_sets.rename( columns={'CASHNETI': 'cash_equivalent_increase_ttm', # 现金及现金等价物净增加额", "= per_share.UndividedProfitPS(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.RetainedEarningsPS(factor_share_indicators, factor_share_indicators) factor_share_indicators = per_share.OptCFPSTTM(valuation_sets, factor_share_indicators) factor_share_indicators =", ":param days: :param trade_date: 当前交易日 :param n: :return: \"\"\" syn_util = SyncUtil() trade_date_sets", "'total_operating_revenue_ttm', # 营业总收入 }) column = ['trade_date'] valuation_data = get_fundamentals(query(Valuation.security_code, Valuation.trade_date, Valuation.capitalization, ).filter(Valuation.trade_date.in_([trade_date])))", "'%Y%m%d') # 读取目前涉及到的因子 engine = sqlEngine() columns = ['COMPCODE', 'PUBLISHDATE', 'ENDDATE', 'symbol', 'company_id',", "每股股利(税前) }) # TTM data cash_flow_ttm_sets = engine.fetch_fundamentals_pit_extend_company_id(CashFlowTTM, [CashFlowTTM.CASHNETI, # 现金及现金等价物净增加额 CashFlowTTM.MANANETR, #", "import time import pandas as pd import numpy as np from datetime import", "# content = cache_data.get_cache(session + str(date_index), date_index) # total_pre_share_data = json_normalize(json.loads(str(content, encoding='utf8'))) #", "time, datetime, json # from PyFin.api import advanceDateByCalendar # from data.polymerize import DBPolymerize", "if str(date_time) < min(trade_date_sets): # print('date_time %s is out of trade_date_sets' % date_time)", "'basic_eps', # 基本每股收益 'DPS': 'dividend_receivable', # 每股股利(税前) }) # TTM data cash_flow_ttm_sets =", "get_trade_date(self, trade_date, n, days=365): \"\"\" 获取当前时间前n年的时间点,且为交易日,如果非交易日,则往前提取最近的一天。 :param days: :param trade_date: 当前交易日 :param n:", "income_ttm_sets = income_ttm_sets.rename(columns={'PARENETP': 'np_parent_company_owners_ttm', # 归属于母公司所有者的净利润 'PERPROFIT': 'operating_profit_ttm', # 营业利润 'BIZINCO': 'operating_revenue_ttm', #", "= per_share.CFPSTTM(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.EnterpriseFCFPS(valuation_sets, factor_share_indicators) factor_share_indicators = per_share.ShareholderFCFPS(valuation_sets, factor_share_indicators) factor_share_indicators =", "self.loading_data(trade_date) print('data load time %s' % (time.time() - tic)) storage_engine = StorageEngine(self._url) result", "and callable(getattr(method, x)), dir(method))) def loading_data(self, trade_date): \"\"\" 获取基础数据 按天获取当天交易日所有股票的基础数据 :param trade_date: 交易日", "%s is out of trade_date_sets' % date_time) return str(date_time) else: while str(date_time) not", "%s year %s' % (n, date_time)) return str(date_time) def _func_sets(self, method): # 私有函数和保护函数过滤", "= self.calc_factor_by_date(total_data,trade_date) # result = self.calc_factor('alphax.alpha191','Alpha191',mkt_df,trade_date) # @app.task # def distributed_factor(session, trade_date, packet_sets,", "import cache_data class CalcEngine(object): def __init__(self, name, url, methods=[{'packet': 'financial.factor_pre_share_indicators', 'class': 'FactorPerShareIndicators'}, ]):", "= balance_sets.rename(columns={'PARESHARRIGH': 'total_owner_equities', # 归属于母公司的所有者权益 'CAPISURP': 'capital_reserve_fund', # 资本公积 'RESE': 'surplus_reserve_fund', # 盈余公积", "income_ttm_sets.drop(col, axis=1) income_ttm_sets = income_ttm_sets.rename(columns={'PARENETP': 'np_parent_company_owners_ttm', # 归属于母公司所有者的净利润 'PERPROFIT': 'operating_profit_ttm', # 营业利润 'BIZINCO':", "pd.DataFrame() factor_share_indicators['security_code'] = valuation_sets['security_code'] valuation_sets = valuation_sets.set_index('security_code') factor_share_indicators = factor_share_indicators.set_index('security_code') factor_share_indicators = per_share.EPS(valuation_sets,", "= kwargs['date_index'] # session = kwargs['session'] # content = cache_data.get_cache(session + str(date_index), date_index)", "json_normalize(json.loads(content)) # calc_engines.distributed_factor(total_data) # # # @app.task() # def factor_calculate(**kwargs): # print(\"per_share_kwargs: {}\".format(kwargs))", "], dates=[trade_date]) for col in columns: if col in list(income_sets.keys()): income_sets = income_sets.drop(col,", "# @app.task # def distributed_factor(session, trade_date, packet_sets, name): # calc_engines = CalcEngine(name, packet_sets)", "# 现金及现金等价物净增加额 'MANANETR': 'net_operate_cash_flow_ttm', # 经营活动现金流量净额 }) income_ttm_sets = engine.fetch_fundamentals_pit_extend_company_id(IncomeTTM, [IncomeTTM.PARENETP, # 归属于母公司所有者的净利润", "企业自由现金流量 IndicatorReport.EPSBASIC, # 基本每股收益 IndicatorReport.DPS, # 每股股利(税前) ], dates=[trade_date]) for col in columns:", "= cache_data.get_cache(session, factor_name) # total_data = json_normalize(json.loads(content)) # calc_engines.distributed_factor(total_data) # # # @app.task()", "现金及现金等价物净增加额 'MANANETR': 'net_operate_cash_flow_ttm', # 经营活动现金流量净额 }) income_ttm_sets = engine.fetch_fundamentals_pit_extend_company_id(IncomeTTM, [IncomeTTM.PARENETP, # 归属于母公司所有者的净利润 IncomeTTM.PERPROFIT," ]
[ "home(): sceneboi = { 'scenario': \"\"\"Your alarm wakes you up. You lay in", "def index(): return render_template('scenes/index.html') @bp.route('/home', methods=('GET', 'POST')) def home(): sceneboi = { 'scenario':", "yells at you to get up you yell at her \\\"You don't understand", "g, redirect, render_template, request, session, url_for ) from werkzeug.exceptions import abort bp =", "at her \\\"You don't understand me mom!\\\" She dropkicks you into the bus", "and yells at you to get up you yell at her \\\"You don't", "sceneboi = { 'scenario': \"\"\"Your alarm wakes you up. You lay in bed", "don't understand me mom!\\\" She dropkicks you into the bus from your room.", "yell at her \\\"You don't understand me mom!\\\" She dropkicks you into the", "bus from your room. You land in the driver's seat of the bus", "\"1\": return redirect(url_for('scenes.bus')) if choice == \"2\": return redirect(url_for('scenes.walk')) return render_template('scenes/play.html', scene=sceneboi) @bp.route('/bus',", "dropkicks you into the bus from your room. You land in the driver's", "into the bus from your room. You land in the driver's seat of", "methods=('GET', 'POST')) def bus(): sceneboi = { 'scenario': \"\"\"You lay in bed and", "def home(): sceneboi = { 'scenario': \"\"\"Your alarm wakes you up. You lay", "\"\"\"You lay in bed and close your eyes. Your mom comes through your", "your eyes. Your mom comes through your air vent and yells at you", "alarm wakes you up. You lay in bed and decide whether or not", "\"\"\"Your alarm wakes you up. You lay in bed and decide whether or", "the bus. do you? 1. Drift dat boi 2. Drive like a civilized", "return render_template('scenes/play.html', scene=sceneboi) @bp.route('/bus', methods=('GET', 'POST')) def bus(): sceneboi = { 'scenario': \"\"\"You", "\\n 1. Stay in and sleep \\n 2. Get ready for class\"\"\" }", "and close your eyes. Your mom comes through your air vent and yells", "__name__) @bp.route('/') def index(): return render_template('scenes/index.html') @bp.route('/home', methods=('GET', 'POST')) def home(): sceneboi =", "1. Drift dat boi 2. Drive like a civilized person\"\"\" } if request.method", "== \"2\": return redirect(url_for('scenes.walk')) return render_template('scenes/play.html', scene=sceneboi) @bp.route('/bus', methods=('GET', 'POST')) def bus(): sceneboi", "<gh_stars>0 from flask import ( Blueprint, flash, g, redirect, render_template, request, session, url_for", "you into the bus from your room. You land in the driver's seat", "2. Get ready for class\"\"\" } if request.method == 'POST': choice = request.form['action']", "if choice == \"1\": return redirect(url_for('scenes.bus')) if choice == \"2\": return redirect(url_for('scenes.walk')) return", "through your air vent and yells at you to get up you yell", "abort bp = Blueprint('scenes', __name__) @bp.route('/') def index(): return render_template('scenes/index.html') @bp.route('/home', methods=('GET', 'POST'))", "get up you yell at her \\\"You don't understand me mom!\\\" She dropkicks", "redirect, render_template, request, session, url_for ) from werkzeug.exceptions import abort bp = Blueprint('scenes',", "methods=('GET', 'POST')) def home(): sceneboi = { 'scenario': \"\"\"Your alarm wakes you up.", "flask import ( Blueprint, flash, g, redirect, render_template, request, session, url_for ) from", "in bed and decide whether or not to skip class. \\n 1. Stay", "'scenario': \"\"\"Your alarm wakes you up. You lay in bed and decide whether", "index(): return render_template('scenes/index.html') @bp.route('/home', methods=('GET', 'POST')) def home(): sceneboi = { 'scenario': \"\"\"Your", "from werkzeug.exceptions import abort bp = Blueprint('scenes', __name__) @bp.route('/') def index(): return render_template('scenes/index.html')", "= { 'scenario': \"\"\"Your alarm wakes you up. You lay in bed and", "wakes you up. You lay in bed and decide whether or not to", "civilized person\"\"\" } if request.method == 'POST': choice = request.form['action'] return render_template('scenes/play.html', scene=sceneboi)", "return redirect(url_for('scenes.bus')) if choice == \"2\": return redirect(url_for('scenes.walk')) return render_template('scenes/play.html', scene=sceneboi) @bp.route('/bus', methods=('GET',", "1. Stay in and sleep \\n 2. Get ready for class\"\"\" } if", "request, session, url_for ) from werkzeug.exceptions import abort bp = Blueprint('scenes', __name__) @bp.route('/')", "Drive like a civilized person\"\"\" } if request.method == 'POST': choice = request.form['action']", "'POST')) def bus(): sceneboi = { 'scenario': \"\"\"You lay in bed and close", "= { 'scenario': \"\"\"You lay in bed and close your eyes. Your mom", "Get ready for class\"\"\" } if request.method == 'POST': choice = request.form['action'] if", "choice = request.form['action'] if choice == \"1\": return redirect(url_for('scenes.bus')) if choice == \"2\":", "mom!\\\" She dropkicks you into the bus from your room. You land in", "the bus from your room. You land in the driver's seat of the", "} if request.method == 'POST': choice = request.form['action'] if choice == \"1\": return", "have to drive the bus. do you? 1. Drift dat boi 2. Drive", "bus(): sceneboi = { 'scenario': \"\"\"You lay in bed and close your eyes.", "request.form['action'] if choice == \"1\": return redirect(url_for('scenes.bus')) if choice == \"2\": return redirect(url_for('scenes.walk'))", "in bed and close your eyes. Your mom comes through your air vent", "( Blueprint, flash, g, redirect, render_template, request, session, url_for ) from werkzeug.exceptions import", "{ 'scenario': \"\"\"Your alarm wakes you up. You lay in bed and decide", "scene=sceneboi) @bp.route('/bus', methods=('GET', 'POST')) def bus(): sceneboi = { 'scenario': \"\"\"You lay in", "redirect(url_for('scenes.bus')) if choice == \"2\": return redirect(url_for('scenes.walk')) return render_template('scenes/play.html', scene=sceneboi) @bp.route('/bus', methods=('GET', 'POST'))", "{ 'scenario': \"\"\"You lay in bed and close your eyes. Your mom comes", "request.method == 'POST': choice = request.form['action'] if choice == \"1\": return redirect(url_for('scenes.bus')) if", "url_for ) from werkzeug.exceptions import abort bp = Blueprint('scenes', __name__) @bp.route('/') def index():", "and decide whether or not to skip class. \\n 1. Stay in and", "Stay in and sleep \\n 2. Get ready for class\"\"\" } if request.method", "and sleep \\n 2. Get ready for class\"\"\" } if request.method == 'POST':", "choice == \"2\": return redirect(url_for('scenes.walk')) return render_template('scenes/play.html', scene=sceneboi) @bp.route('/bus', methods=('GET', 'POST')) def bus():", "werkzeug.exceptions import abort bp = Blueprint('scenes', __name__) @bp.route('/') def index(): return render_template('scenes/index.html') @bp.route('/home',", "seat of the bus and realize you have to drive the bus. do", "bed and decide whether or not to skip class. \\n 1. Stay in", "do you? 1. Drift dat boi 2. Drive like a civilized person\"\"\" }", "\"2\": return redirect(url_for('scenes.walk')) return render_template('scenes/play.html', scene=sceneboi) @bp.route('/bus', methods=('GET', 'POST')) def bus(): sceneboi =", "render_template, request, session, url_for ) from werkzeug.exceptions import abort bp = Blueprint('scenes', __name__)", "whether or not to skip class. \\n 1. Stay in and sleep \\n", "'scenario': \"\"\"You lay in bed and close your eyes. Your mom comes through", "mom comes through your air vent and yells at you to get up", "like a civilized person\"\"\" } if request.method == 'POST': choice = request.form['action'] return", "sceneboi = { 'scenario': \"\"\"You lay in bed and close your eyes. Your", "land in the driver's seat of the bus and realize you have to", "at you to get up you yell at her \\\"You don't understand me", "for class\"\"\" } if request.method == 'POST': choice = request.form['action'] if choice ==", "bed and close your eyes. Your mom comes through your air vent and", "comes through your air vent and yells at you to get up you", "up. You lay in bed and decide whether or not to skip class.", "bp = Blueprint('scenes', __name__) @bp.route('/') def index(): return render_template('scenes/index.html') @bp.route('/home', methods=('GET', 'POST')) def", "return redirect(url_for('scenes.walk')) return render_template('scenes/play.html', scene=sceneboi) @bp.route('/bus', methods=('GET', 'POST')) def bus(): sceneboi = {", "the bus and realize you have to drive the bus. do you? 1.", "\\\"You don't understand me mom!\\\" She dropkicks you into the bus from your", ") from werkzeug.exceptions import abort bp = Blueprint('scenes', __name__) @bp.route('/') def index(): return", "@bp.route('/') def index(): return render_template('scenes/index.html') @bp.route('/home', methods=('GET', 'POST')) def home(): sceneboi = {", "render_template('scenes/play.html', scene=sceneboi) @bp.route('/bus', methods=('GET', 'POST')) def bus(): sceneboi = { 'scenario': \"\"\"You lay", "lay in bed and close your eyes. Your mom comes through your air", "you? 1. Drift dat boi 2. Drive like a civilized person\"\"\" } if", "render_template('scenes/index.html') @bp.route('/home', methods=('GET', 'POST')) def home(): sceneboi = { 'scenario': \"\"\"Your alarm wakes", "choice == \"1\": return redirect(url_for('scenes.bus')) if choice == \"2\": return redirect(url_for('scenes.walk')) return render_template('scenes/play.html',", "up you yell at her \\\"You don't understand me mom!\\\" She dropkicks you", "dat boi 2. Drive like a civilized person\"\"\" } if request.method == 'POST':", "== 'POST': choice = request.form['action'] if choice == \"1\": return redirect(url_for('scenes.bus')) if choice", "'POST': choice = request.form['action'] if choice == \"1\": return redirect(url_for('scenes.bus')) if choice ==", "'POST')) def home(): sceneboi = { 'scenario': \"\"\"Your alarm wakes you up. You", "Blueprint('scenes', __name__) @bp.route('/') def index(): return render_template('scenes/index.html') @bp.route('/home', methods=('GET', 'POST')) def home(): sceneboi", "bus and realize you have to drive the bus. do you? 1. Drift", "boi 2. Drive like a civilized person\"\"\" } if request.method == 'POST': choice", "air vent and yells at you to get up you yell at her", "decide whether or not to skip class. \\n 1. Stay in and sleep", "of the bus and realize you have to drive the bus. do you?", "= request.form['action'] if choice == \"1\": return redirect(url_for('scenes.bus')) if choice == \"2\": return", "if choice == \"2\": return redirect(url_for('scenes.walk')) return render_template('scenes/play.html', scene=sceneboi) @bp.route('/bus', methods=('GET', 'POST')) def", "me mom!\\\" She dropkicks you into the bus from your room. You land", "you yell at her \\\"You don't understand me mom!\\\" She dropkicks you into", "eyes. Your mom comes through your air vent and yells at you to", "her \\\"You don't understand me mom!\\\" She dropkicks you into the bus from", "== \"1\": return redirect(url_for('scenes.bus')) if choice == \"2\": return redirect(url_for('scenes.walk')) return render_template('scenes/play.html', scene=sceneboi)", "import ( Blueprint, flash, g, redirect, render_template, request, session, url_for ) from werkzeug.exceptions", "you up. You lay in bed and decide whether or not to skip", "to drive the bus. do you? 1. Drift dat boi 2. Drive like", "import abort bp = Blueprint('scenes', __name__) @bp.route('/') def index(): return render_template('scenes/index.html') @bp.route('/home', methods=('GET',", "or not to skip class. \\n 1. Stay in and sleep \\n 2.", "you have to drive the bus. do you? 1. Drift dat boi 2.", "You land in the driver's seat of the bus and realize you have", "@bp.route('/bus', methods=('GET', 'POST')) def bus(): sceneboi = { 'scenario': \"\"\"You lay in bed", "in and sleep \\n 2. Get ready for class\"\"\" } if request.method ==", "Your mom comes through your air vent and yells at you to get", "not to skip class. \\n 1. Stay in and sleep \\n 2. Get", "to get up you yell at her \\\"You don't understand me mom!\\\" She", "lay in bed and decide whether or not to skip class. \\n 1.", "Drift dat boi 2. Drive like a civilized person\"\"\" } if request.method ==", "understand me mom!\\\" She dropkicks you into the bus from your room. You", "bus. do you? 1. Drift dat boi 2. Drive like a civilized person\"\"\"", "class\"\"\" } if request.method == 'POST': choice = request.form['action'] if choice == \"1\":", "your air vent and yells at you to get up you yell at", "you to get up you yell at her \\\"You don't understand me mom!\\\"", "to skip class. \\n 1. Stay in and sleep \\n 2. Get ready", "@bp.route('/home', methods=('GET', 'POST')) def home(): sceneboi = { 'scenario': \"\"\"Your alarm wakes you", "She dropkicks you into the bus from your room. You land in the", "vent and yells at you to get up you yell at her \\\"You", "def bus(): sceneboi = { 'scenario': \"\"\"You lay in bed and close your", "close your eyes. Your mom comes through your air vent and yells at", "return render_template('scenes/index.html') @bp.route('/home', methods=('GET', 'POST')) def home(): sceneboi = { 'scenario': \"\"\"Your alarm", "2. Drive like a civilized person\"\"\" } if request.method == 'POST': choice =", "You lay in bed and decide whether or not to skip class. \\n", "driver's seat of the bus and realize you have to drive the bus.", "skip class. \\n 1. Stay in and sleep \\n 2. Get ready for", "session, url_for ) from werkzeug.exceptions import abort bp = Blueprint('scenes', __name__) @bp.route('/') def", "if request.method == 'POST': choice = request.form['action'] if choice == \"1\": return redirect(url_for('scenes.bus'))", "room. You land in the driver's seat of the bus and realize you", "class. \\n 1. Stay in and sleep \\n 2. Get ready for class\"\"\"", "sleep \\n 2. Get ready for class\"\"\" } if request.method == 'POST': choice", "Blueprint, flash, g, redirect, render_template, request, session, url_for ) from werkzeug.exceptions import abort", "from your room. You land in the driver's seat of the bus and", "flash, g, redirect, render_template, request, session, url_for ) from werkzeug.exceptions import abort bp", "realize you have to drive the bus. do you? 1. Drift dat boi", "drive the bus. do you? 1. Drift dat boi 2. Drive like a", "ready for class\"\"\" } if request.method == 'POST': choice = request.form['action'] if choice", "= Blueprint('scenes', __name__) @bp.route('/') def index(): return render_template('scenes/index.html') @bp.route('/home', methods=('GET', 'POST')) def home():", "redirect(url_for('scenes.walk')) return render_template('scenes/play.html', scene=sceneboi) @bp.route('/bus', methods=('GET', 'POST')) def bus(): sceneboi = { 'scenario':", "and realize you have to drive the bus. do you? 1. Drift dat", "your room. You land in the driver's seat of the bus and realize", "a civilized person\"\"\" } if request.method == 'POST': choice = request.form['action'] return render_template('scenes/play.html',", "the driver's seat of the bus and realize you have to drive the", "from flask import ( Blueprint, flash, g, redirect, render_template, request, session, url_for )", "in the driver's seat of the bus and realize you have to drive", "\\n 2. Get ready for class\"\"\" } if request.method == 'POST': choice =" ]
[ "patientList[i]['patientCorporateEmail'], 'patientCorporatePhone': patientList[i]['patientCorporatePhone'], 'patientCorporatewhatsappnumber': patientList[i]['patientCorporatewhatsappnumber'], 'patientCorporateAddress': patientList[i]['patientCorporateAddress'], 'patientCorporateCity': patientList[i]['patientCorporateCity'], 'patientCorporateState': patientList[i]['patientCorporateState'], 'patientCorporateCountry': patientList[i]['patientCorporateCountry'],", "'current_login_ip': userList[i]['current_login_ip'], 'login_count': userList[i]['login_count'], 'confirmed_at': userList[i]['confirmed_at'], 'active': userList[i]['active'] }] }, indent=2 ) #print(data2)", "as file: for i in range(0, len(patientList)) : data2 = json.dumps( { 'patient_row_id'", "data2 = json.dumps( { 'test_id': testList[i]['test_id'], 'testType': testList[i]['testType'], 'testBottleType': testList[i]['testBottleType'], 'testName': testList[i]['testName'], 'testmnemonics':", "in dbtableData], default=alchemyencoder, indent=4) with open(f'{pwd}/table_json/{dbtable}.json', 'w+') as file: file.write(dataList) file.close() if dbtable", "transactionList[i]['paymentconfirm'], 'barcode': transactionList[i]['barcode'], 'phlebotomy_processed': transactionList[i]['phlebotomy_processed'] } ], 'PaymentPtocessor' : [ { 'regtype': transactionList[i]['regtype'],", "'testspriority': transactionList[i]['testspriority'], 'testscheduletype': transactionList[i]['testscheduletype'] } ], 'Payment_Reference' : [ { 'subtotal': transactionList[i]['subtotal'], 'discount':", "as file: file.write(dataList) file.close() if dbtable == 'patients': patientList = json.loads(dataList) with open(f'{pwd}/table_json/{dbtable}_casded.json',", "patientList[i]['patientState'], 'patientCountry': patientList[i]['patientCountry'], 'patientpersonalEnroledby': patientList[i]['patientpersonalEnroledby'] } ], 'PatientCorporateDetails' :[ { 'patientCompanyname': patientList[i]['patientCompanyname'], 'patientCorporateContactperson':", ": [ { 'subtotal': transactionList[i]['subtotal'], 'discount': transactionList[i]['discount'], 'equalltax': transactionList[i]['equalltax'], 'total': transactionList[i]['total'], 'paymentmethod': transactionList[i]['paymentmethod'],", "dbtable = self.dbtable if dbtable not in ['patients', 'labtests', 'transactions', 'user']: return {'response':'Not", "'ageGrade': patientList[i]['ageGrade'], 'patientDateofBirth': patientList[i]['patientDateofBirth'], 'patientTitle': patientList[i]['patientTitle'], 'patientFirstname': patientList[i]['patientFirstname'], 'patientLastname': patientList[i]['patientLastname'], 'patientMiddlename': patientList[i]['patientMiddlename'], 'patientEmail':", "'testscheduletype': transactionList[i]['testscheduletype'] } ], 'Payment_Reference' : [ { 'subtotal': transactionList[i]['subtotal'], 'discount': transactionList[i]['discount'], 'equalltax':", "] }, indent=2 ) #print(data2) file.write(data2) file.close() #print(transactionList[0]) return data2, dataList elif dbtable", "'transactions', 'user']: return {'response':'Not available in database'}, inspect(engine) dbtableData = engine.execute('SELECT * FROM", "patientList[i]['patientTitle'], 'patientFirstname': patientList[i]['patientFirstname'], 'patientLastname': patientList[i]['patientLastname'], 'patientMiddlename': patientList[i]['patientMiddlename'], 'patientEmail': patientList[i]['patientEmail'], 'patientAltEmail': patientList[i]['patientAltEmail'], 'patientPhonenumber': patientList[i]['patientPhonenumber'],", "'patientCompanyname': patientList[i]['patientCompanyname'], 'patientCorporateContactperson': patientList[i]['patientCorporateContactperson'], 'patientCorporateEmail': patientList[i]['patientCorporateEmail'], 'patientCorporatePhone': patientList[i]['patientCorporatePhone'], 'patientCorporatewhatsappnumber': patientList[i]['patientCorporatewhatsappnumber'], 'patientCorporateAddress': patientList[i]['patientCorporateAddress'], 'patientCorporateCity':", "with open(f'{pwd}/table_json/{dbtable}_casded.json', 'w+') as file: for i in range(0, len(userList)) : data2 =json.dumps(", "patientList[i]['patientFirstname'], 'patientLastname': patientList[i]['patientLastname'], 'patientMiddlename': patientList[i]['patientMiddlename'], 'patientEmail': patientList[i]['patientEmail'], 'patientAltEmail': patientList[i]['patientAltEmail'], 'patientPhonenumber': patientList[i]['patientPhonenumber'], 'patientAltPhonenumber': patientList[i]['patientAltPhonenumber'],", "}], 'designation': userList[i]['designation'], 'userDetails' :[{ 'firstname' : userList[i]['firstname'], 'lastname': userList[i]['lastname'], 'email': userList[i]['email'], 'phonenumber':", "i in range(0, len(transactionList)) : data2 = json.dumps( { 'transaction_id': 1, 'transactTime': transactionList[i]['transactTime'],", "'test_id': testList[i]['test_id'], 'testType': testList[i]['testType'], 'testBottleType': testList[i]['testBottleType'], 'testName': testList[i]['testName'], 'testmnemonics': testList[i]['testmnemonics'], 'testDetails': testList[i]['testDetails'], 'testTAT':", "testList[i]['testBottleType'], 'testName': testList[i]['testName'], 'testmnemonics': testList[i]['testmnemonics'], 'testDetails': testList[i]['testDetails'], 'testTAT': testList[i]['testTAT'], 'testPrice': testList[i]['testPrice'] }, indent=2", "transactionList[i]['referenceOrchange'], 'sessionconfirm': transactionList[i]['sessionconfirm'], 'paymentconfirm': transactionList[i]['paymentconfirm'], 'barcode': transactionList[i]['barcode'], 'phlebotomy_processed': transactionList[i]['phlebotomy_processed'] } ], 'PaymentPtocessor' :", "=json.dumps( { 'userID': userList[i]['id'], 'loginDetails' :[{ 'username': userList[i]['email'], 'password': userList[i]['password'] }], 'designation': userList[i]['designation'],", "patientList[i]['patientCorporatePhone'], 'patientCorporatewhatsappnumber': patientList[i]['patientCorporatewhatsappnumber'], 'patientCorporateAddress': patientList[i]['patientCorporateAddress'], 'patientCorporateCity': patientList[i]['patientCorporateCity'], 'patientCorporateState': patientList[i]['patientCorporateState'], 'patientCorporateCountry': patientList[i]['patientCorporateCountry'], 'patientCorporateEnroledby': patientList[i]['patientCorporateEnroledby'],", "'sex': transactionList[i]['sex'], 'billto': transactionList[i]['billto'], 'testspriority': transactionList[i]['testspriority'], 'testscheduletype': transactionList[i]['testscheduletype'] } ], 'Payment_Reference' : [", "'testmnemonics': testList[i]['testmnemonics'], 'testDetails': testList[i]['testDetails'], 'testTAT': testList[i]['testTAT'], 'testPrice': testList[i]['testPrice'] }, indent=2 ) #print(data2) file.write(data2)", "'equalltax': transactionList[i]['equalltax'], 'total': transactionList[i]['total'], 'paymentmethod': transactionList[i]['paymentmethod'], 'payment': transactionList[i]['payment'], 'referenceOrchange': transactionList[i]['referenceOrchange'], 'sessionconfirm': transactionList[i]['sessionconfirm'], 'paymentconfirm':", "'testPrice': testList[i]['testPrice'] }, indent=2 ) #print(data2) file.write(data2) file.close() return data2, dataList elif dbtable", "{ 'test_id': testList[i]['test_id'], 'testType': testList[i]['testType'], 'testBottleType': testList[i]['testBottleType'], 'testName': testList[i]['testName'], 'testmnemonics': testList[i]['testmnemonics'], 'testDetails': testList[i]['testDetails'],", "in database'}, inspect(engine) dbtableData = engine.execute('SELECT * FROM {dbtable}' .format(dbtable=dbtable)) #engine.dispose() dataList =", "'patientFirstname': patientList[i]['patientFirstname'], 'patientLastname': patientList[i]['patientLastname'], 'patientMiddlename': patientList[i]['patientMiddlename'], 'patientEmail': patientList[i]['patientEmail'], 'patientAltEmail': patientList[i]['patientAltEmail'], 'patientPhonenumber': patientList[i]['patientPhonenumber'], 'patientAltPhonenumber':", "testList = json.loads(dataList) with open(f'{pwd}/table_json/{dbtable}_casded.json', 'w+') as file: for i in range(0, len(testList))", "not in ['patients', 'labtests', 'transactions', 'user']: return {'response':'Not available in database'}, inspect(engine) dbtableData", "len(testList)) : data2 = json.dumps( { 'test_id': testList[i]['test_id'], 'testType': testList[i]['testType'], 'testBottleType': testList[i]['testBottleType'], 'testName':", "'testType': testList[i]['testType'], 'testBottleType': testList[i]['testBottleType'], 'testName': testList[i]['testName'], 'testmnemonics': testList[i]['testmnemonics'], 'testDetails': testList[i]['testDetails'], 'testTAT': testList[i]['testTAT'], 'testPrice':", "], 'PatientDetails' : [ { 'CurrentpatientID': transactionList[i]['CurrentpatientID'], 'fullName': transactionList[i]['fullName'], 'sex': transactionList[i]['sex'], 'billto': transactionList[i]['billto'],", "file.write(data2) file.close() return data2, dataList elif dbtable == 'transactions' : transactionList = json.loads(dataList)", ":[{ 'last_login_at': userList[i]['last_login_at'], 'current_login_at': userList[i]['current_login_at'], 'last_login_ip': userList[i]['last_login_ip'], 'current_login_ip': userList[i]['current_login_ip'], 'login_count': userList[i]['login_count'], 'confirmed_at': userList[i]['confirmed_at'],", "userList[i]['phonenumber'], 'AlternatePhonenumber' : userList[i]['altnumber'], 'location' :[{ 'location': userList[i]['location'], 'city' : userList[i]['city'], 'state': userList[i]['state'],", "'AlternatePhonenumber' : userList[i]['altnumber'], 'location' :[{ 'location': userList[i]['location'], 'city' : userList[i]['city'], 'state': userList[i]['state'], 'country':", "json.dumps([dict(row) for row in dbtableData], default=alchemyencoder, indent=4) with open(f'{pwd}/table_json/{dbtable}.json', 'w+') as file: file.write(dataList)", "patientList[i]['patientID'], 'labsessioncount' : '', 'PatientPersonalDetails' :[ { 'patientSex': patientList[i]['patientSex'], 'patientStatus': patientList[i]['patientStatus'], 'patientType': patientList[i]['patientType'],", ": [ { 'regtype': transactionList[i]['regtype'], 'cashier': transactionList[i]['cashier'], 'paymentupdateamount': transactionList[i][ 'paymentupdateamount'], 'paymentupdateby': transactionList[i]['paymentupdateby'], 'paymentupdateTime':", "== 'labtests' : testList = json.loads(dataList) with open(f'{pwd}/table_json/{dbtable}_casded.json', 'w+') as file: for i", "'patientAddress': patientList[i]['patientAddress'], 'patientCity': patientList[i]['patientCity'], 'patientState': patientList[i]['patientState'], 'patientCountry': patientList[i]['patientCountry'], 'patientpersonalEnroledby': patientList[i]['patientpersonalEnroledby'] } ], 'PatientCorporateDetails'", "userList[i]['current_login_at'], 'last_login_ip': userList[i]['last_login_ip'], 'current_login_ip': userList[i]['current_login_ip'], 'login_count': userList[i]['login_count'], 'confirmed_at': userList[i]['confirmed_at'], 'active': userList[i]['active'] }] },", "json.dumps( { 'transaction_id': 1, 'transactTime': transactionList[i]['transactTime'], 'labSessionTestDetails' : [ { 'invoicemnemonics': transactionList[i]['invoicemnemonics'], 'invoicetestname':", "with open(f'{pwd}/table_json/{dbtable}_casded.json', 'w+') as file: for i in range(0, len(transactionList)) : data2 =", "transactionList[i]['invoicetestname'], 'invoiceprice': transactionList[i]['invoiceprice'], 'invoicetat': transactionList[i]['invoicetat'] } ], 'PatientDetails' : [ { 'CurrentpatientID': transactionList[i]['CurrentpatientID'],", "transactionList[i]['sessionconfirm'], 'paymentconfirm': transactionList[i]['paymentconfirm'], 'barcode': transactionList[i]['barcode'], 'phlebotomy_processed': transactionList[i]['phlebotomy_processed'] } ], 'PaymentPtocessor' : [ {", "testList[i]['testType'], 'testBottleType': testList[i]['testBottleType'], 'testName': testList[i]['testName'], 'testmnemonics': testList[i]['testmnemonics'], 'testDetails': testList[i]['testDetails'], 'testTAT': testList[i]['testTAT'], 'testPrice': testList[i]['testPrice']", "= json.loads(dataList) with open(f'{pwd}/table_json/{dbtable}_casded.json', 'w+') as file: for i in range(0, len(testList)) :", "'patientType': patientList[i]['patientType'], 'ageGrade': patientList[i]['ageGrade'], 'patientDateofBirth': patientList[i]['patientDateofBirth'], 'patientTitle': patientList[i]['patientTitle'], 'patientFirstname': patientList[i]['patientFirstname'], 'patientLastname': patientList[i]['patientLastname'], 'patientMiddlename':", "patientList[i]['patientAltEmail'], 'patientPhonenumber': patientList[i]['patientPhonenumber'], 'patientAltPhonenumber': patientList[i]['patientAltPhonenumber'], 'patientwhatsappnumber': patientList[i]['patientwhatsappnumber'], 'patientAddress': patientList[i]['patientAddress'], 'patientCity': patientList[i]['patientCity'], 'patientState': patientList[i]['patientState'],", "transactionList[i]['equalltax'], 'total': transactionList[i]['total'], 'paymentmethod': transactionList[i]['paymentmethod'], 'payment': transactionList[i]['payment'], 'referenceOrchange': transactionList[i]['referenceOrchange'], 'sessionconfirm': transactionList[i]['sessionconfirm'], 'paymentconfirm': transactionList[i]['paymentconfirm'],", "'barcode': transactionList[i]['barcode'], 'phlebotomy_processed': transactionList[i]['phlebotomy_processed'] } ], 'PaymentPtocessor' : [ { 'regtype': transactionList[i]['regtype'], 'cashier':", "= json.loads(dataList) with open(f'{pwd}/table_json/{dbtable}_casded.json', 'w+') as file: for i in range(0, len(transactionList)) :", "'paymentupdateamount': transactionList[i][ 'paymentupdateamount'], 'paymentupdateby': transactionList[i]['paymentupdateby'], 'paymentupdateTime': transactionList[i]['paymentupdateTime'] } ] }, indent=2 ) #print(data2)", "as file: for i in range(0, len(testList)) : data2 = json.dumps( { 'test_id':", "userList[i]['zip_code'] }], 'Analytics' :[{ 'last_login_at': userList[i]['last_login_at'], 'current_login_at': userList[i]['current_login_at'], 'last_login_ip': userList[i]['last_login_ip'], 'current_login_ip': userList[i]['current_login_ip'], 'login_count':", "'zip_code' : userList[i]['zip_code'] }], 'Analytics' :[{ 'last_login_at': userList[i]['last_login_at'], 'current_login_at': userList[i]['current_login_at'], 'last_login_ip': userList[i]['last_login_ip'], 'current_login_ip':", "} ], 'PatientDetails' : [ { 'CurrentpatientID': transactionList[i]['CurrentpatientID'], 'fullName': transactionList[i]['fullName'], 'sex': transactionList[i]['sex'], 'billto':", "inspect(engine) dbtableData = engine.execute('SELECT * FROM {dbtable}' .format(dbtable=dbtable)) #engine.dispose() dataList = json.dumps([dict(row) for", "transactionList[i]['phlebotomy_processed'] } ], 'PaymentPtocessor' : [ { 'regtype': transactionList[i]['regtype'], 'cashier': transactionList[i]['cashier'], 'paymentupdateamount': transactionList[i][", "{ 'patientCompanyname': patientList[i]['patientCompanyname'], 'patientCorporateContactperson': patientList[i]['patientCorporateContactperson'], 'patientCorporateEmail': patientList[i]['patientCorporateEmail'], 'patientCorporatePhone': patientList[i]['patientCorporatePhone'], 'patientCorporatewhatsappnumber': patientList[i]['patientCorporatewhatsappnumber'], 'patientCorporateAddress': patientList[i]['patientCorporateAddress'],", "transactionList[i]['billto'], 'testspriority': transactionList[i]['testspriority'], 'testscheduletype': transactionList[i]['testscheduletype'] } ], 'Payment_Reference' : [ { 'subtotal': transactionList[i]['subtotal'],", "json.dumps( { 'patient_row_id' : patientList[i]['patient_id'], 'patient_unique_ID': patientList[i]['patientID'], 'labsessioncount' : '', 'PatientPersonalDetails' :[ {", "with open(f'{pwd}/table_json/{dbtable}.json', 'w+') as file: file.write(dataList) file.close() if dbtable == 'patients': patientList =", "userList[i]['confirmed_at'], 'active': userList[i]['active'] }] }, indent=2 ) #print(data2) file.write(data2) file.close() # End for", "transactionList[i]['subtotal'], 'discount': transactionList[i]['discount'], 'equalltax': transactionList[i]['equalltax'], 'total': transactionList[i]['total'], 'paymentmethod': transactionList[i]['paymentmethod'], 'payment': transactionList[i]['payment'], 'referenceOrchange': transactionList[i]['referenceOrchange'],", "'patient_unique_ID': patientList[i]['patientID'], 'labsessioncount' : '', 'PatientPersonalDetails' :[ { 'patientSex': patientList[i]['patientSex'], 'patientStatus': patientList[i]['patientStatus'], 'patientType':", "userList[i]['firstname'], 'lastname': userList[i]['lastname'], 'email': userList[i]['email'], 'phonenumber': userList[i]['phonenumber'], 'AlternatePhonenumber' : userList[i]['altnumber'], 'location' :[{ 'location':", "'location': userList[i]['location'], 'city' : userList[i]['city'], 'state': userList[i]['state'], 'country': userList[i]['country'] }], 'zip_code' : userList[i]['zip_code']", "file: for i in range(0, len(patientList)) : data2 = json.dumps( { 'patient_row_id' :", "'fullName': transactionList[i]['fullName'], 'sex': transactionList[i]['sex'], 'billto': transactionList[i]['billto'], 'testspriority': transactionList[i]['testspriority'], 'testscheduletype': transactionList[i]['testscheduletype'] } ], 'Payment_Reference'", "patientList[i]['patientType'], 'ageGrade': patientList[i]['ageGrade'], 'patientDateofBirth': patientList[i]['patientDateofBirth'], 'patientTitle': patientList[i]['patientTitle'], 'patientFirstname': patientList[i]['patientFirstname'], 'patientLastname': patientList[i]['patientLastname'], 'patientMiddlename': patientList[i]['patientMiddlename'],", "datetime from .dbconnect import engine, alchemyencoder #from .dbconnect import engine, alchemyencoder pwd =", "engine, alchemyencoder #from .dbconnect import engine, alchemyencoder pwd = os.path.dirname(os.path.abspath(__file__)) class flatToCascadedJson(object): def", "'patientCountry': patientList[i]['patientCountry'], 'patientpersonalEnroledby': patientList[i]['patientpersonalEnroledby'] } ], 'PatientCorporateDetails' :[ { 'patientCompanyname': patientList[i]['patientCompanyname'], 'patientCorporateContactperson': patientList[i]['patientCorporateContactperson'],", "patientList[i]['patient_id'], 'patient_unique_ID': patientList[i]['patientID'], 'labsessioncount' : '', 'PatientPersonalDetails' :[ { 'patientSex': patientList[i]['patientSex'], 'patientStatus': patientList[i]['patientStatus'],", "'patientLastname': patientList[i]['patientLastname'], 'patientMiddlename': patientList[i]['patientMiddlename'], 'patientEmail': patientList[i]['patientEmail'], 'patientAltEmail': patientList[i]['patientAltEmail'], 'patientPhonenumber': patientList[i]['patientPhonenumber'], 'patientAltPhonenumber': patientList[i]['patientAltPhonenumber'], 'patientwhatsappnumber':", "'patientwhatsappnumber': patientList[i]['patientwhatsappnumber'], 'patientAddress': patientList[i]['patientAddress'], 'patientCity': patientList[i]['patientCity'], 'patientState': patientList[i]['patientState'], 'patientCountry': patientList[i]['patientCountry'], 'patientpersonalEnroledby': patientList[i]['patientpersonalEnroledby'] }", "with open(f'{pwd}/table_json/{dbtable}_casded.json', 'w+') as file: for i in range(0, len(patientList)) : data2 =", "'testDetails': testList[i]['testDetails'], 'testTAT': testList[i]['testTAT'], 'testPrice': testList[i]['testPrice'] }, indent=2 ) #print(data2) file.write(data2) file.close() return", "file: for i in range(0, len(testList)) : data2 = json.dumps( { 'test_id': testList[i]['test_id'],", "], 'Payment_Reference' : [ { 'subtotal': transactionList[i]['subtotal'], 'discount': transactionList[i]['discount'], 'equalltax': transactionList[i]['equalltax'], 'total': transactionList[i]['total'],", ":[{ 'username': userList[i]['email'], 'password': userList[i]['password'] }], 'designation': userList[i]['designation'], 'userDetails' :[{ 'firstname' : userList[i]['firstname'],", "patientList[i]['patientAddress'], 'patientCity': patientList[i]['patientCity'], 'patientState': patientList[i]['patientState'], 'patientCountry': patientList[i]['patientCountry'], 'patientpersonalEnroledby': patientList[i]['patientpersonalEnroledby'] } ], 'PatientCorporateDetails' :[", "'labtests' : testList = json.loads(dataList) with open(f'{pwd}/table_json/{dbtable}_casded.json', 'w+') as file: for i in", "transactionList[i]['cashier'], 'paymentupdateamount': transactionList[i][ 'paymentupdateamount'], 'paymentupdateby': transactionList[i]['paymentupdateby'], 'paymentupdateTime': transactionList[i]['paymentupdateTime'] } ] }, indent=2 )", "patientList[i]['enrolment_Time'] } ] }, indent=2 ) #print(data2) file.write(data2) file.close() #print(patientList) return data2, dataList", "sqlalchemy import create_engine, inspect import os, json #import requests import decimal, datetime from", "for i in range(0, len(transactionList)) : data2 = json.dumps( { 'transaction_id': 1, 'transactTime':", "os.makedirs(f'{pwd}/table_json/') def reformatjson(self): dbtable = self.dbtable if dbtable not in ['patients', 'labtests', 'transactions',", "__init__(self, dbtable, *args): super(flatToCascadedJson, self).__init__(*args) self.dbtable =dbtable if not os.path.exists(f'{pwd}/table_json/'): os.makedirs(f'{pwd}/table_json/') def reformatjson(self):", "return data2, dataList elif dbtable == 'labtests' : testList = json.loads(dataList) with open(f'{pwd}/table_json/{dbtable}_casded.json',", "import engine, alchemyencoder pwd = os.path.dirname(os.path.abspath(__file__)) class flatToCascadedJson(object): def __init__(self, dbtable, *args): super(flatToCascadedJson,", "'patientCorporateContactperson': patientList[i]['patientCorporateContactperson'], 'patientCorporateEmail': patientList[i]['patientCorporateEmail'], 'patientCorporatePhone': patientList[i]['patientCorporatePhone'], 'patientCorporatewhatsappnumber': patientList[i]['patientCorporatewhatsappnumber'], 'patientCorporateAddress': patientList[i]['patientCorporateAddress'], 'patientCorporateCity': patientList[i]['patientCorporateCity'], 'patientCorporateState':", "'patientTitle': patientList[i]['patientTitle'], 'patientFirstname': patientList[i]['patientFirstname'], 'patientLastname': patientList[i]['patientLastname'], 'patientMiddlename': patientList[i]['patientMiddlename'], 'patientEmail': patientList[i]['patientEmail'], 'patientAltEmail': patientList[i]['patientAltEmail'], 'patientPhonenumber':", "'patientpersonalEnroledby': patientList[i]['patientpersonalEnroledby'] } ], 'PatientCorporateDetails' :[ { 'patientCompanyname': patientList[i]['patientCompanyname'], 'patientCorporateContactperson': patientList[i]['patientCorporateContactperson'], 'patientCorporateEmail': patientList[i]['patientCorporateEmail'],", "patientList[i]['patientMiddlename'], 'patientEmail': patientList[i]['patientEmail'], 'patientAltEmail': patientList[i]['patientAltEmail'], 'patientPhonenumber': patientList[i]['patientPhonenumber'], 'patientAltPhonenumber': patientList[i]['patientAltPhonenumber'], 'patientwhatsappnumber': patientList[i]['patientwhatsappnumber'], 'patientAddress': patientList[i]['patientAddress'],", "'w+') as file: for i in range(0, len(testList)) : data2 = json.dumps( {", "transactionList[i]['fullName'], 'sex': transactionList[i]['sex'], 'billto': transactionList[i]['billto'], 'testspriority': transactionList[i]['testspriority'], 'testscheduletype': transactionList[i]['testscheduletype'] } ], 'Payment_Reference' :", "{ 'regtype': transactionList[i]['regtype'], 'cashier': transactionList[i]['cashier'], 'paymentupdateamount': transactionList[i][ 'paymentupdateamount'], 'paymentupdateby': transactionList[i]['paymentupdateby'], 'paymentupdateTime': transactionList[i]['paymentupdateTime'] }", "userList[i]['current_login_ip'], 'login_count': userList[i]['login_count'], 'confirmed_at': userList[i]['confirmed_at'], 'active': userList[i]['active'] }] }, indent=2 ) #print(data2) file.write(data2)", "transactionList[i]['CurrentpatientID'], 'fullName': transactionList[i]['fullName'], 'sex': transactionList[i]['sex'], 'billto': transactionList[i]['billto'], 'testspriority': transactionList[i]['testspriority'], 'testscheduletype': transactionList[i]['testscheduletype'] } ],", "import decimal, datetime from .dbconnect import engine, alchemyencoder #from .dbconnect import engine, alchemyencoder", "json.dumps( { 'test_id': testList[i]['test_id'], 'testType': testList[i]['testType'], 'testBottleType': testList[i]['testBottleType'], 'testName': testList[i]['testName'], 'testmnemonics': testList[i]['testmnemonics'], 'testDetails':", "'confirmed_at': userList[i]['confirmed_at'], 'active': userList[i]['active'] }] }, indent=2 ) #print(data2) file.write(data2) file.close() # End", "self.dbtable if dbtable not in ['patients', 'labtests', 'transactions', 'user']: return {'response':'Not available in", "dataList elif dbtable == 'transactions' : transactionList = json.loads(dataList) with open(f'{pwd}/table_json/{dbtable}_casded.json', 'w+') as", "'phlebotomy_processed': transactionList[i]['phlebotomy_processed'] } ], 'PaymentPtocessor' : [ { 'regtype': transactionList[i]['regtype'], 'cashier': transactionList[i]['cashier'], 'paymentupdateamount':", "'last_login_ip': userList[i]['last_login_ip'], 'current_login_ip': userList[i]['current_login_ip'], 'login_count': userList[i]['login_count'], 'confirmed_at': userList[i]['confirmed_at'], 'active': userList[i]['active'] }] }, indent=2", "} ] }, indent=2 ) #print(data2) file.write(data2) file.close() #print(patientList) return data2, dataList elif", "row in dbtableData], default=alchemyencoder, indent=4) with open(f'{pwd}/table_json/{dbtable}.json', 'w+') as file: file.write(dataList) file.close() if", "} ] }, indent=2 ) #print(data2) file.write(data2) file.close() #print(transactionList[0]) return data2, dataList elif", "= os.path.dirname(os.path.abspath(__file__)) class flatToCascadedJson(object): def __init__(self, dbtable, *args): super(flatToCascadedJson, self).__init__(*args) self.dbtable =dbtable if", "'w+') as file: for i in range(0, len(transactionList)) : data2 = json.dumps( {", "open(f'{pwd}/table_json/{dbtable}_casded.json', 'w+') as file: for i in range(0, len(testList)) : data2 = json.dumps(", "userList[i]['last_login_at'], 'current_login_at': userList[i]['current_login_at'], 'last_login_ip': userList[i]['last_login_ip'], 'current_login_ip': userList[i]['current_login_ip'], 'login_count': userList[i]['login_count'], 'confirmed_at': userList[i]['confirmed_at'], 'active': userList[i]['active']", "decimal, datetime from .dbconnect import engine, alchemyencoder #from .dbconnect import engine, alchemyencoder pwd", "not os.path.exists(f'{pwd}/table_json/'): os.makedirs(f'{pwd}/table_json/') def reformatjson(self): dbtable = self.dbtable if dbtable not in ['patients',", "patientList[i]['patientwhatsappnumber'], 'patientAddress': patientList[i]['patientAddress'], 'patientCity': patientList[i]['patientCity'], 'patientState': patientList[i]['patientState'], 'patientCountry': patientList[i]['patientCountry'], 'patientpersonalEnroledby': patientList[i]['patientpersonalEnroledby'] } ],", "transactionList[i]['invoicemnemonics'], 'invoicetestname': transactionList[i]['invoicetestname'], 'invoiceprice': transactionList[i]['invoiceprice'], 'invoicetat': transactionList[i]['invoicetat'] } ], 'PatientDetails' : [ {", "] }, indent=2 ) #print(data2) file.write(data2) file.close() #print(patientList) return data2, dataList elif dbtable", "'testBottleType': testList[i]['testBottleType'], 'testName': testList[i]['testName'], 'testmnemonics': testList[i]['testmnemonics'], 'testDetails': testList[i]['testDetails'], 'testTAT': testList[i]['testTAT'], 'testPrice': testList[i]['testPrice'] },", "=dbtable if not os.path.exists(f'{pwd}/table_json/'): os.makedirs(f'{pwd}/table_json/') def reformatjson(self): dbtable = self.dbtable if dbtable not", "== 'user' : userList = json.loads(dataList) with open(f'{pwd}/table_json/{dbtable}_casded.json', 'w+') as file: for i", ": userList[i]['zip_code'] }], 'Analytics' :[{ 'last_login_at': userList[i]['last_login_at'], 'current_login_at': userList[i]['current_login_at'], 'last_login_ip': userList[i]['last_login_ip'], 'current_login_ip': userList[i]['current_login_ip'],", "json.loads(dataList) with open(f'{pwd}/table_json/{dbtable}_casded.json', 'w+') as file: for i in range(0, len(testList)) : data2", "patientList[i]['patientCompanyname'], 'patientCorporateContactperson': patientList[i]['patientCorporateContactperson'], 'patientCorporateEmail': patientList[i]['patientCorporateEmail'], 'patientCorporatePhone': patientList[i]['patientCorporatePhone'], 'patientCorporatewhatsappnumber': patientList[i]['patientCorporatewhatsappnumber'], 'patientCorporateAddress': patientList[i]['patientCorporateAddress'], 'patientCorporateCity': patientList[i]['patientCorporateCity'],", "'patientCorporateAddress': patientList[i]['patientCorporateAddress'], 'patientCorporateCity': patientList[i]['patientCorporateCity'], 'patientCorporateState': patientList[i]['patientCorporateState'], 'patientCorporateCountry': patientList[i]['patientCorporateCountry'], 'patientCorporateEnroledby': patientList[i]['patientCorporateEnroledby'], 'enrolment_Time': patientList[i]['enrolment_Time'] }", "userList[i]['login_count'], 'confirmed_at': userList[i]['confirmed_at'], 'active': userList[i]['active'] }] }, indent=2 ) #print(data2) file.write(data2) file.close() #", ": userList[i]['city'], 'state': userList[i]['state'], 'country': userList[i]['country'] }], 'zip_code' : userList[i]['zip_code'] }], 'Analytics' :[{", "userList = json.loads(dataList) with open(f'{pwd}/table_json/{dbtable}_casded.json', 'w+') as file: for i in range(0, len(userList))", "patientList[i]['patientpersonalEnroledby'] } ], 'PatientCorporateDetails' :[ { 'patientCompanyname': patientList[i]['patientCompanyname'], 'patientCorporateContactperson': patientList[i]['patientCorporateContactperson'], 'patientCorporateEmail': patientList[i]['patientCorporateEmail'], 'patientCorporatePhone':", "data2, dataList elif dbtable == 'transactions' : transactionList = json.loads(dataList) with open(f'{pwd}/table_json/{dbtable}_casded.json', 'w+')", "patientList[i]['patientDateofBirth'], 'patientTitle': patientList[i]['patientTitle'], 'patientFirstname': patientList[i]['patientFirstname'], 'patientLastname': patientList[i]['patientLastname'], 'patientMiddlename': patientList[i]['patientMiddlename'], 'patientEmail': patientList[i]['patientEmail'], 'patientAltEmail': patientList[i]['patientAltEmail'],", "'username': userList[i]['email'], 'password': userList[i]['password'] }], 'designation': userList[i]['designation'], 'userDetails' :[{ 'firstname' : userList[i]['firstname'], 'lastname':", ") #print(data2) file.write(data2) file.close() #print(patientList) return data2, dataList elif dbtable == 'labtests' :", "i in range(0, len(patientList)) : data2 = json.dumps( { 'patient_row_id' : patientList[i]['patient_id'], 'patient_unique_ID':", "range(0, len(userList)) : data2 =json.dumps( { 'userID': userList[i]['id'], 'loginDetails' :[{ 'username': userList[i]['email'], 'password':", "transactionList[i]['sex'], 'billto': transactionList[i]['billto'], 'testspriority': transactionList[i]['testspriority'], 'testscheduletype': transactionList[i]['testscheduletype'] } ], 'Payment_Reference' : [ {", "patientList[i]['patientCountry'], 'patientpersonalEnroledby': patientList[i]['patientpersonalEnroledby'] } ], 'PatientCorporateDetails' :[ { 'patientCompanyname': patientList[i]['patientCompanyname'], 'patientCorporateContactperson': patientList[i]['patientCorporateContactperson'], 'patientCorporateEmail':", "== 'patients': patientList = json.loads(dataList) with open(f'{pwd}/table_json/{dbtable}_casded.json', 'w+') as file: for i in", "}], 'Analytics' :[{ 'last_login_at': userList[i]['last_login_at'], 'current_login_at': userList[i]['current_login_at'], 'last_login_ip': userList[i]['last_login_ip'], 'current_login_ip': userList[i]['current_login_ip'], 'login_count': userList[i]['login_count'],", "{ 'invoicemnemonics': transactionList[i]['invoicemnemonics'], 'invoicetestname': transactionList[i]['invoicetestname'], 'invoiceprice': transactionList[i]['invoiceprice'], 'invoicetat': transactionList[i]['invoicetat'] } ], 'PatientDetails' :", "#print(transactionList[0]) return data2, dataList elif dbtable == 'user' : userList = json.loads(dataList) with", "'city' : userList[i]['city'], 'state': userList[i]['state'], 'country': userList[i]['country'] }], 'zip_code' : userList[i]['zip_code'] }], 'Analytics'", "file.write(dataList) file.close() if dbtable == 'patients': patientList = json.loads(dataList) with open(f'{pwd}/table_json/{dbtable}_casded.json', 'w+') as", "'PaymentPtocessor' : [ { 'regtype': transactionList[i]['regtype'], 'cashier': transactionList[i]['cashier'], 'paymentupdateamount': transactionList[i][ 'paymentupdateamount'], 'paymentupdateby': transactionList[i]['paymentupdateby'],", "}] }, indent=2 ) #print(data2) file.write(data2) file.close() # End for statement' return data2,", "'patientAltEmail': patientList[i]['patientAltEmail'], 'patientPhonenumber': patientList[i]['patientPhonenumber'], 'patientAltPhonenumber': patientList[i]['patientAltPhonenumber'], 'patientwhatsappnumber': patientList[i]['patientwhatsappnumber'], 'patientAddress': patientList[i]['patientAddress'], 'patientCity': patientList[i]['patientCity'], 'patientState':", ": userList[i]['altnumber'], 'location' :[{ 'location': userList[i]['location'], 'city' : userList[i]['city'], 'state': userList[i]['state'], 'country': userList[i]['country']", "'firstname' : userList[i]['firstname'], 'lastname': userList[i]['lastname'], 'email': userList[i]['email'], 'phonenumber': userList[i]['phonenumber'], 'AlternatePhonenumber' : userList[i]['altnumber'], 'location'", "'country': userList[i]['country'] }], 'zip_code' : userList[i]['zip_code'] }], 'Analytics' :[{ 'last_login_at': userList[i]['last_login_at'], 'current_login_at': userList[i]['current_login_at'],", "{ 'transaction_id': 1, 'transactTime': transactionList[i]['transactTime'], 'labSessionTestDetails' : [ { 'invoicemnemonics': transactionList[i]['invoicemnemonics'], 'invoicetestname': transactionList[i]['invoicetestname'],", "#print(data2) file.write(data2) file.close() #print(transactionList[0]) return data2, dataList elif dbtable == 'user' : userList", "userList[i]['last_login_ip'], 'current_login_ip': userList[i]['current_login_ip'], 'login_count': userList[i]['login_count'], 'confirmed_at': userList[i]['confirmed_at'], 'active': userList[i]['active'] }] }, indent=2 )", "if dbtable == 'patients': patientList = json.loads(dataList) with open(f'{pwd}/table_json/{dbtable}_casded.json', 'w+') as file: for", ".format(dbtable=dbtable)) #engine.dispose() dataList = json.dumps([dict(row) for row in dbtableData], default=alchemyencoder, indent=4) with open(f'{pwd}/table_json/{dbtable}.json',", "'last_login_at': userList[i]['last_login_at'], 'current_login_at': userList[i]['current_login_at'], 'last_login_ip': userList[i]['last_login_ip'], 'current_login_ip': userList[i]['current_login_ip'], 'login_count': userList[i]['login_count'], 'confirmed_at': userList[i]['confirmed_at'], 'active':", "{'response':'Not available in database'}, inspect(engine) dbtableData = engine.execute('SELECT * FROM {dbtable}' .format(dbtable=dbtable)) #engine.dispose()", "transactionList[i]['total'], 'paymentmethod': transactionList[i]['paymentmethod'], 'payment': transactionList[i]['payment'], 'referenceOrchange': transactionList[i]['referenceOrchange'], 'sessionconfirm': transactionList[i]['sessionconfirm'], 'paymentconfirm': transactionList[i]['paymentconfirm'], 'barcode': transactionList[i]['barcode'],", "{ 'patientSex': patientList[i]['patientSex'], 'patientStatus': patientList[i]['patientStatus'], 'patientType': patientList[i]['patientType'], 'ageGrade': patientList[i]['ageGrade'], 'patientDateofBirth': patientList[i]['patientDateofBirth'], 'patientTitle': patientList[i]['patientTitle'],", "testList[i]['test_id'], 'testType': testList[i]['testType'], 'testBottleType': testList[i]['testBottleType'], 'testName': testList[i]['testName'], 'testmnemonics': testList[i]['testmnemonics'], 'testDetails': testList[i]['testDetails'], 'testTAT': testList[i]['testTAT'],", ": [ { 'CurrentpatientID': transactionList[i]['CurrentpatientID'], 'fullName': transactionList[i]['fullName'], 'sex': transactionList[i]['sex'], 'billto': transactionList[i]['billto'], 'testspriority': transactionList[i]['testspriority'],", "dataList elif dbtable == 'labtests' : testList = json.loads(dataList) with open(f'{pwd}/table_json/{dbtable}_casded.json', 'w+') as", "transactionList[i]['invoiceprice'], 'invoicetat': transactionList[i]['invoicetat'] } ], 'PatientDetails' : [ { 'CurrentpatientID': transactionList[i]['CurrentpatientID'], 'fullName': transactionList[i]['fullName'],", ") #print(data2) file.write(data2) file.close() return data2, dataList elif dbtable == 'transactions' : transactionList", "'password': userList[i]['password'] }], 'designation': userList[i]['designation'], 'userDetails' :[{ 'firstname' : userList[i]['firstname'], 'lastname': userList[i]['lastname'], 'email':", "'Payment_Reference' : [ { 'subtotal': transactionList[i]['subtotal'], 'discount': transactionList[i]['discount'], 'equalltax': transactionList[i]['equalltax'], 'total': transactionList[i]['total'], 'paymentmethod':", "default=alchemyencoder, indent=4) with open(f'{pwd}/table_json/{dbtable}.json', 'w+') as file: file.write(dataList) file.close() if dbtable == 'patients':", "'phonenumber': userList[i]['phonenumber'], 'AlternatePhonenumber' : userList[i]['altnumber'], 'location' :[{ 'location': userList[i]['location'], 'city' : userList[i]['city'], 'state':", "pwd = os.path.dirname(os.path.abspath(__file__)) class flatToCascadedJson(object): def __init__(self, dbtable, *args): super(flatToCascadedJson, self).__init__(*args) self.dbtable =dbtable", "{ 'subtotal': transactionList[i]['subtotal'], 'discount': transactionList[i]['discount'], 'equalltax': transactionList[i]['equalltax'], 'total': transactionList[i]['total'], 'paymentmethod': transactionList[i]['paymentmethod'], 'payment': transactionList[i]['payment'],", "#print(patientList) return data2, dataList elif dbtable == 'labtests' : testList = json.loads(dataList) with", "alchemyencoder pwd = os.path.dirname(os.path.abspath(__file__)) class flatToCascadedJson(object): def __init__(self, dbtable, *args): super(flatToCascadedJson, self).__init__(*args) self.dbtable", "dbtable not in ['patients', 'labtests', 'transactions', 'user']: return {'response':'Not available in database'}, inspect(engine)", "'enrolment_Time': patientList[i]['enrolment_Time'] } ] }, indent=2 ) #print(data2) file.write(data2) file.close() #print(patientList) return data2,", "'patientAltPhonenumber': patientList[i]['patientAltPhonenumber'], 'patientwhatsappnumber': patientList[i]['patientwhatsappnumber'], 'patientAddress': patientList[i]['patientAddress'], 'patientCity': patientList[i]['patientCity'], 'patientState': patientList[i]['patientState'], 'patientCountry': patientList[i]['patientCountry'], 'patientpersonalEnroledby':", "data2 = json.dumps( { 'patient_row_id' : patientList[i]['patient_id'], 'patient_unique_ID': patientList[i]['patientID'], 'labsessioncount' : '', 'PatientPersonalDetails'", "json.loads(dataList) with open(f'{pwd}/table_json/{dbtable}_casded.json', 'w+') as file: for i in range(0, len(userList)) : data2", ": data2 = json.dumps( { 'test_id': testList[i]['test_id'], 'testType': testList[i]['testType'], 'testBottleType': testList[i]['testBottleType'], 'testName': testList[i]['testName'],", "[ { 'CurrentpatientID': transactionList[i]['CurrentpatientID'], 'fullName': transactionList[i]['fullName'], 'sex': transactionList[i]['sex'], 'billto': transactionList[i]['billto'], 'testspriority': transactionList[i]['testspriority'], 'testscheduletype':", "self).__init__(*args) self.dbtable =dbtable if not os.path.exists(f'{pwd}/table_json/'): os.makedirs(f'{pwd}/table_json/') def reformatjson(self): dbtable = self.dbtable if", "inspect import os, json #import requests import decimal, datetime from .dbconnect import engine,", "{ 'CurrentpatientID': transactionList[i]['CurrentpatientID'], 'fullName': transactionList[i]['fullName'], 'sex': transactionList[i]['sex'], 'billto': transactionList[i]['billto'], 'testspriority': transactionList[i]['testspriority'], 'testscheduletype': transactionList[i]['testscheduletype']", "#engine.dispose() dataList = json.dumps([dict(row) for row in dbtableData], default=alchemyencoder, indent=4) with open(f'{pwd}/table_json/{dbtable}.json', 'w+')", "i in range(0, len(testList)) : data2 = json.dumps( { 'test_id': testList[i]['test_id'], 'testType': testList[i]['testType'],", "patientList = json.loads(dataList) with open(f'{pwd}/table_json/{dbtable}_casded.json', 'w+') as file: for i in range(0, len(patientList))", "open(f'{pwd}/table_json/{dbtable}_casded.json', 'w+') as file: for i in range(0, len(userList)) : data2 =json.dumps( {", "dataList = json.dumps([dict(row) for row in dbtableData], default=alchemyencoder, indent=4) with open(f'{pwd}/table_json/{dbtable}.json', 'w+') as", ": data2 =json.dumps( { 'userID': userList[i]['id'], 'loginDetails' :[{ 'username': userList[i]['email'], 'password': userList[i]['password'] }],", "'patientCorporateCity': patientList[i]['patientCorporateCity'], 'patientCorporateState': patientList[i]['patientCorporateState'], 'patientCorporateCountry': patientList[i]['patientCorporateCountry'], 'patientCorporateEnroledby': patientList[i]['patientCorporateEnroledby'], 'enrolment_Time': patientList[i]['enrolment_Time'] } ] },", "testList[i]['testTAT'], 'testPrice': testList[i]['testPrice'] }, indent=2 ) #print(data2) file.write(data2) file.close() return data2, dataList elif", "'patientCity': patientList[i]['patientCity'], 'patientState': patientList[i]['patientState'], 'patientCountry': patientList[i]['patientCountry'], 'patientpersonalEnroledby': patientList[i]['patientpersonalEnroledby'] } ], 'PatientCorporateDetails' :[ {", "transactionList[i]['transactTime'], 'labSessionTestDetails' : [ { 'invoicemnemonics': transactionList[i]['invoicemnemonics'], 'invoicetestname': transactionList[i]['invoicetestname'], 'invoiceprice': transactionList[i]['invoiceprice'], 'invoicetat': transactionList[i]['invoicetat']", "{ 'patient_row_id' : patientList[i]['patient_id'], 'patient_unique_ID': patientList[i]['patientID'], 'labsessioncount' : '', 'PatientPersonalDetails' :[ { 'patientSex':", "len(userList)) : data2 =json.dumps( { 'userID': userList[i]['id'], 'loginDetails' :[{ 'username': userList[i]['email'], 'password': userList[i]['password']", "['patients', 'labtests', 'transactions', 'user']: return {'response':'Not available in database'}, inspect(engine) dbtableData = engine.execute('SELECT", "= self.dbtable if dbtable not in ['patients', 'labtests', 'transactions', 'user']: return {'response':'Not available", "'user']: return {'response':'Not available in database'}, inspect(engine) dbtableData = engine.execute('SELECT * FROM {dbtable}'", "'w+') as file: for i in range(0, len(patientList)) : data2 = json.dumps( {", "engine, alchemyencoder pwd = os.path.dirname(os.path.abspath(__file__)) class flatToCascadedJson(object): def __init__(self, dbtable, *args): super(flatToCascadedJson, self).__init__(*args)", "patientList[i]['patientCity'], 'patientState': patientList[i]['patientState'], 'patientCountry': patientList[i]['patientCountry'], 'patientpersonalEnroledby': patientList[i]['patientpersonalEnroledby'] } ], 'PatientCorporateDetails' :[ { 'patientCompanyname':", "elif dbtable == 'labtests' : testList = json.loads(dataList) with open(f'{pwd}/table_json/{dbtable}_casded.json', 'w+') as file:", "userList[i]['designation'], 'userDetails' :[{ 'firstname' : userList[i]['firstname'], 'lastname': userList[i]['lastname'], 'email': userList[i]['email'], 'phonenumber': userList[i]['phonenumber'], 'AlternatePhonenumber'", "#import requests import decimal, datetime from .dbconnect import engine, alchemyencoder #from .dbconnect import", "= json.loads(dataList) with open(f'{pwd}/table_json/{dbtable}_casded.json', 'w+') as file: for i in range(0, len(patientList)) :", "'cashier': transactionList[i]['cashier'], 'paymentupdateamount': transactionList[i][ 'paymentupdateamount'], 'paymentupdateby': transactionList[i]['paymentupdateby'], 'paymentupdateTime': transactionList[i]['paymentupdateTime'] } ] }, indent=2", "'billto': transactionList[i]['billto'], 'testspriority': transactionList[i]['testspriority'], 'testscheduletype': transactionList[i]['testscheduletype'] } ], 'Payment_Reference' : [ { 'subtotal':", "patientList[i]['patientSex'], 'patientStatus': patientList[i]['patientStatus'], 'patientType': patientList[i]['patientType'], 'ageGrade': patientList[i]['ageGrade'], 'patientDateofBirth': patientList[i]['patientDateofBirth'], 'patientTitle': patientList[i]['patientTitle'], 'patientFirstname': patientList[i]['patientFirstname'],", "testList[i]['testName'], 'testmnemonics': testList[i]['testmnemonics'], 'testDetails': testList[i]['testDetails'], 'testTAT': testList[i]['testTAT'], 'testPrice': testList[i]['testPrice'] }, indent=2 ) #print(data2)", ": userList[i]['firstname'], 'lastname': userList[i]['lastname'], 'email': userList[i]['email'], 'phonenumber': userList[i]['phonenumber'], 'AlternatePhonenumber' : userList[i]['altnumber'], 'location' :[{", "range(0, len(testList)) : data2 = json.dumps( { 'test_id': testList[i]['test_id'], 'testType': testList[i]['testType'], 'testBottleType': testList[i]['testBottleType'],", "dbtable == 'transactions' : transactionList = json.loads(dataList) with open(f'{pwd}/table_json/{dbtable}_casded.json', 'w+') as file: for", "patientList[i]['patientCorporateState'], 'patientCorporateCountry': patientList[i]['patientCorporateCountry'], 'patientCorporateEnroledby': patientList[i]['patientCorporateEnroledby'], 'enrolment_Time': patientList[i]['enrolment_Time'] } ] }, indent=2 ) #print(data2)", "'paymentmethod': transactionList[i]['paymentmethod'], 'payment': transactionList[i]['payment'], 'referenceOrchange': transactionList[i]['referenceOrchange'], 'sessionconfirm': transactionList[i]['sessionconfirm'], 'paymentconfirm': transactionList[i]['paymentconfirm'], 'barcode': transactionList[i]['barcode'], 'phlebotomy_processed':", "os.path.exists(f'{pwd}/table_json/'): os.makedirs(f'{pwd}/table_json/') def reformatjson(self): dbtable = self.dbtable if dbtable not in ['patients', 'labtests',", "'w+') as file: file.write(dataList) file.close() if dbtable == 'patients': patientList = json.loads(dataList) with", "requests import decimal, datetime from .dbconnect import engine, alchemyencoder #from .dbconnect import engine,", "} ], 'Payment_Reference' : [ { 'subtotal': transactionList[i]['subtotal'], 'discount': transactionList[i]['discount'], 'equalltax': transactionList[i]['equalltax'], 'total':", "as file: for i in range(0, len(userList)) : data2 =json.dumps( { 'userID': userList[i]['id'],", "} ], 'PatientCorporateDetails' :[ { 'patientCompanyname': patientList[i]['patientCompanyname'], 'patientCorporateContactperson': patientList[i]['patientCorporateContactperson'], 'patientCorporateEmail': patientList[i]['patientCorporateEmail'], 'patientCorporatePhone': patientList[i]['patientCorporatePhone'],", "in range(0, len(testList)) : data2 = json.dumps( { 'test_id': testList[i]['test_id'], 'testType': testList[i]['testType'], 'testBottleType':", "dbtable == 'patients': patientList = json.loads(dataList) with open(f'{pwd}/table_json/{dbtable}_casded.json', 'w+') as file: for i", "[ { 'subtotal': transactionList[i]['subtotal'], 'discount': transactionList[i]['discount'], 'equalltax': transactionList[i]['equalltax'], 'total': transactionList[i]['total'], 'paymentmethod': transactionList[i]['paymentmethod'], 'payment':", "file.close() #print(patientList) return data2, dataList elif dbtable == 'labtests' : testList = json.loads(dataList)", "return data2, dataList elif dbtable == 'user' : userList = json.loads(dataList) with open(f'{pwd}/table_json/{dbtable}_casded.json',", "}], 'zip_code' : userList[i]['zip_code'] }], 'Analytics' :[{ 'last_login_at': userList[i]['last_login_at'], 'current_login_at': userList[i]['current_login_at'], 'last_login_ip': userList[i]['last_login_ip'],", "transactionList[i][ 'paymentupdateamount'], 'paymentupdateby': transactionList[i]['paymentupdateby'], 'paymentupdateTime': transactionList[i]['paymentupdateTime'] } ] }, indent=2 ) #print(data2) file.write(data2)", ": data2 = json.dumps( { 'transaction_id': 1, 'transactTime': transactionList[i]['transactTime'], 'labSessionTestDetails' : [ {", "'CurrentpatientID': transactionList[i]['CurrentpatientID'], 'fullName': transactionList[i]['fullName'], 'sex': transactionList[i]['sex'], 'billto': transactionList[i]['billto'], 'testspriority': transactionList[i]['testspriority'], 'testscheduletype': transactionList[i]['testscheduletype'] }", "'patientCorporateEmail': patientList[i]['patientCorporateEmail'], 'patientCorporatePhone': patientList[i]['patientCorporatePhone'], 'patientCorporatewhatsappnumber': patientList[i]['patientCorporatewhatsappnumber'], 'patientCorporateAddress': patientList[i]['patientCorporateAddress'], 'patientCorporateCity': patientList[i]['patientCorporateCity'], 'patientCorporateState': patientList[i]['patientCorporateState'], 'patientCorporateCountry':", "file.close() return data2, dataList elif dbtable == 'transactions' : transactionList = json.loads(dataList) with", "'lastname': userList[i]['lastname'], 'email': userList[i]['email'], 'phonenumber': userList[i]['phonenumber'], 'AlternatePhonenumber' : userList[i]['altnumber'], 'location' :[{ 'location': userList[i]['location'],", "'', 'PatientPersonalDetails' :[ { 'patientSex': patientList[i]['patientSex'], 'patientStatus': patientList[i]['patientStatus'], 'patientType': patientList[i]['patientType'], 'ageGrade': patientList[i]['ageGrade'], 'patientDateofBirth':", "#print(data2) file.write(data2) file.close() return data2, dataList elif dbtable == 'transactions' : transactionList =", "'patients': patientList = json.loads(dataList) with open(f'{pwd}/table_json/{dbtable}_casded.json', 'w+') as file: for i in range(0,", "'invoicetat': transactionList[i]['invoicetat'] } ], 'PatientDetails' : [ { 'CurrentpatientID': transactionList[i]['CurrentpatientID'], 'fullName': transactionList[i]['fullName'], 'sex':", "json #import requests import decimal, datetime from .dbconnect import engine, alchemyencoder #from .dbconnect", ": testList = json.loads(dataList) with open(f'{pwd}/table_json/{dbtable}_casded.json', 'w+') as file: for i in range(0,", "patientList[i]['patientPhonenumber'], 'patientAltPhonenumber': patientList[i]['patientAltPhonenumber'], 'patientwhatsappnumber': patientList[i]['patientwhatsappnumber'], 'patientAddress': patientList[i]['patientAddress'], 'patientCity': patientList[i]['patientCity'], 'patientState': patientList[i]['patientState'], 'patientCountry': patientList[i]['patientCountry'],", "'userID': userList[i]['id'], 'loginDetails' :[{ 'username': userList[i]['email'], 'password': userList[i]['password'] }], 'designation': userList[i]['designation'], 'userDetails' :[{", ") #print(data2) file.write(data2) file.close() #print(transactionList[0]) return data2, dataList elif dbtable == 'user' :", "'state': userList[i]['state'], 'country': userList[i]['country'] }], 'zip_code' : userList[i]['zip_code'] }], 'Analytics' :[{ 'last_login_at': userList[i]['last_login_at'],", "file: for i in range(0, len(userList)) : data2 =json.dumps( { 'userID': userList[i]['id'], 'loginDetails'", "'current_login_at': userList[i]['current_login_at'], 'last_login_ip': userList[i]['last_login_ip'], 'current_login_ip': userList[i]['current_login_ip'], 'login_count': userList[i]['login_count'], 'confirmed_at': userList[i]['confirmed_at'], 'active': userList[i]['active'] }]", "alchemyencoder #from .dbconnect import engine, alchemyencoder pwd = os.path.dirname(os.path.abspath(__file__)) class flatToCascadedJson(object): def __init__(self,", "}, indent=2 ) #print(data2) file.write(data2) file.close() #print(patientList) return data2, dataList elif dbtable ==", "indent=2 ) #print(data2) file.write(data2) file.close() # End for statement' return data2, dataList #print(userList[0])", "patientList[i]['patientAltPhonenumber'], 'patientwhatsappnumber': patientList[i]['patientwhatsappnumber'], 'patientAddress': patientList[i]['patientAddress'], 'patientCity': patientList[i]['patientCity'], 'patientState': patientList[i]['patientState'], 'patientCountry': patientList[i]['patientCountry'], 'patientpersonalEnroledby': patientList[i]['patientpersonalEnroledby']", "userList[i]['email'], 'password': userList[i]['password'] }], 'designation': userList[i]['designation'], 'userDetails' :[{ 'firstname' : userList[i]['firstname'], 'lastname': userList[i]['lastname'],", "if dbtable not in ['patients', 'labtests', 'transactions', 'user']: return {'response':'Not available in database'},", "class flatToCascadedJson(object): def __init__(self, dbtable, *args): super(flatToCascadedJson, self).__init__(*args) self.dbtable =dbtable if not os.path.exists(f'{pwd}/table_json/'):", "} ], 'PaymentPtocessor' : [ { 'regtype': transactionList[i]['regtype'], 'cashier': transactionList[i]['cashier'], 'paymentupdateamount': transactionList[i][ 'paymentupdateamount'],", "file.write(data2) file.close() #print(patientList) return data2, dataList elif dbtable == 'labtests' : testList =", "}, indent=2 ) #print(data2) file.write(data2) file.close() #print(transactionList[0]) return data2, dataList elif dbtable ==", "in range(0, len(userList)) : data2 =json.dumps( { 'userID': userList[i]['id'], 'loginDetails' :[{ 'username': userList[i]['email'],", "'location' :[{ 'location': userList[i]['location'], 'city' : userList[i]['city'], 'state': userList[i]['state'], 'country': userList[i]['country'] }], 'zip_code'", "userList[i]['country'] }], 'zip_code' : userList[i]['zip_code'] }], 'Analytics' :[{ 'last_login_at': userList[i]['last_login_at'], 'current_login_at': userList[i]['current_login_at'], 'last_login_ip':", ":[ { 'patientSex': patientList[i]['patientSex'], 'patientStatus': patientList[i]['patientStatus'], 'patientType': patientList[i]['patientType'], 'ageGrade': patientList[i]['ageGrade'], 'patientDateofBirth': patientList[i]['patientDateofBirth'], 'patientTitle':", "*args): super(flatToCascadedJson, self).__init__(*args) self.dbtable =dbtable if not os.path.exists(f'{pwd}/table_json/'): os.makedirs(f'{pwd}/table_json/') def reformatjson(self): dbtable =", "dataList elif dbtable == 'user' : userList = json.loads(dataList) with open(f'{pwd}/table_json/{dbtable}_casded.json', 'w+') as", "self.dbtable =dbtable if not os.path.exists(f'{pwd}/table_json/'): os.makedirs(f'{pwd}/table_json/') def reformatjson(self): dbtable = self.dbtable if dbtable", "'patientStatus': patientList[i]['patientStatus'], 'patientType': patientList[i]['patientType'], 'ageGrade': patientList[i]['ageGrade'], 'patientDateofBirth': patientList[i]['patientDateofBirth'], 'patientTitle': patientList[i]['patientTitle'], 'patientFirstname': patientList[i]['patientFirstname'], 'patientLastname':", "'PatientPersonalDetails' :[ { 'patientSex': patientList[i]['patientSex'], 'patientStatus': patientList[i]['patientStatus'], 'patientType': patientList[i]['patientType'], 'ageGrade': patientList[i]['ageGrade'], 'patientDateofBirth': patientList[i]['patientDateofBirth'],", "dbtableData = engine.execute('SELECT * FROM {dbtable}' .format(dbtable=dbtable)) #engine.dispose() dataList = json.dumps([dict(row) for row", "testList[i]['testDetails'], 'testTAT': testList[i]['testTAT'], 'testPrice': testList[i]['testPrice'] }, indent=2 ) #print(data2) file.write(data2) file.close() return data2,", "data2 = json.dumps( { 'transaction_id': 1, 'transactTime': transactionList[i]['transactTime'], 'labSessionTestDetails' : [ { 'invoicemnemonics':", "file: for i in range(0, len(transactionList)) : data2 = json.dumps( { 'transaction_id': 1,", "as file: for i in range(0, len(transactionList)) : data2 = json.dumps( { 'transaction_id':", "indent=2 ) #print(data2) file.write(data2) file.close() #print(patientList) return data2, dataList elif dbtable == 'labtests'", "from .dbconnect import engine, alchemyencoder #from .dbconnect import engine, alchemyencoder pwd = os.path.dirname(os.path.abspath(__file__))", "'invoicemnemonics': transactionList[i]['invoicemnemonics'], 'invoicetestname': transactionList[i]['invoicetestname'], 'invoiceprice': transactionList[i]['invoiceprice'], 'invoicetat': transactionList[i]['invoicetat'] } ], 'PatientDetails' : [", "userList[i]['altnumber'], 'location' :[{ 'location': userList[i]['location'], 'city' : userList[i]['city'], 'state': userList[i]['state'], 'country': userList[i]['country'] }],", ":[ { 'patientCompanyname': patientList[i]['patientCompanyname'], 'patientCorporateContactperson': patientList[i]['patientCorporateContactperson'], 'patientCorporateEmail': patientList[i]['patientCorporateEmail'], 'patientCorporatePhone': patientList[i]['patientCorporatePhone'], 'patientCorporatewhatsappnumber': patientList[i]['patientCorporatewhatsappnumber'], 'patientCorporateAddress':", "create_engine, inspect import os, json #import requests import decimal, datetime from .dbconnect import", "'labtests', 'transactions', 'user']: return {'response':'Not available in database'}, inspect(engine) dbtableData = engine.execute('SELECT *", "'invoiceprice': transactionList[i]['invoiceprice'], 'invoicetat': transactionList[i]['invoicetat'] } ], 'PatientDetails' : [ { 'CurrentpatientID': transactionList[i]['CurrentpatientID'], 'fullName':", "'paymentupdateTime': transactionList[i]['paymentupdateTime'] } ] }, indent=2 ) #print(data2) file.write(data2) file.close() #print(transactionList[0]) return data2,", "file.write(data2) file.close() #print(transactionList[0]) return data2, dataList elif dbtable == 'user' : userList =", "from sqlalchemy import create_engine, inspect import os, json #import requests import decimal, datetime", "if not os.path.exists(f'{pwd}/table_json/'): os.makedirs(f'{pwd}/table_json/') def reformatjson(self): dbtable = self.dbtable if dbtable not in", "len(transactionList)) : data2 = json.dumps( { 'transaction_id': 1, 'transactTime': transactionList[i]['transactTime'], 'labSessionTestDetails' : [", ": data2 = json.dumps( { 'patient_row_id' : patientList[i]['patient_id'], 'patient_unique_ID': patientList[i]['patientID'], 'labsessioncount' : '',", "indent=2 ) #print(data2) file.write(data2) file.close() #print(transactionList[0]) return data2, dataList elif dbtable == 'user'", "elif dbtable == 'user' : userList = json.loads(dataList) with open(f'{pwd}/table_json/{dbtable}_casded.json', 'w+') as file:", ".dbconnect import engine, alchemyencoder pwd = os.path.dirname(os.path.abspath(__file__)) class flatToCascadedJson(object): def __init__(self, dbtable, *args):", "= json.dumps( { 'transaction_id': 1, 'transactTime': transactionList[i]['transactTime'], 'labSessionTestDetails' : [ { 'invoicemnemonics': transactionList[i]['invoicemnemonics'],", "<filename>app/mysqltojson.py from sqlalchemy import create_engine, inspect import os, json #import requests import decimal,", "transactionList[i]['invoicetat'] } ], 'PatientDetails' : [ { 'CurrentpatientID': transactionList[i]['CurrentpatientID'], 'fullName': transactionList[i]['fullName'], 'sex': transactionList[i]['sex'],", "transactionList[i]['barcode'], 'phlebotomy_processed': transactionList[i]['phlebotomy_processed'] } ], 'PaymentPtocessor' : [ { 'regtype': transactionList[i]['regtype'], 'cashier': transactionList[i]['cashier'],", "patientList[i]['patientCorporatewhatsappnumber'], 'patientCorporateAddress': patientList[i]['patientCorporateAddress'], 'patientCorporateCity': patientList[i]['patientCorporateCity'], 'patientCorporateState': patientList[i]['patientCorporateState'], 'patientCorporateCountry': patientList[i]['patientCorporateCountry'], 'patientCorporateEnroledby': patientList[i]['patientCorporateEnroledby'], 'enrolment_Time': patientList[i]['enrolment_Time']", "database'}, inspect(engine) dbtableData = engine.execute('SELECT * FROM {dbtable}' .format(dbtable=dbtable)) #engine.dispose() dataList = json.dumps([dict(row)", "'patientCorporateCountry': patientList[i]['patientCorporateCountry'], 'patientCorporateEnroledby': patientList[i]['patientCorporateEnroledby'], 'enrolment_Time': patientList[i]['enrolment_Time'] } ] }, indent=2 ) #print(data2) file.write(data2)", "flatToCascadedJson(object): def __init__(self, dbtable, *args): super(flatToCascadedJson, self).__init__(*args) self.dbtable =dbtable if not os.path.exists(f'{pwd}/table_json/'): os.makedirs(f'{pwd}/table_json/')", "'PatientCorporateDetails' :[ { 'patientCompanyname': patientList[i]['patientCompanyname'], 'patientCorporateContactperson': patientList[i]['patientCorporateContactperson'], 'patientCorporateEmail': patientList[i]['patientCorporateEmail'], 'patientCorporatePhone': patientList[i]['patientCorporatePhone'], 'patientCorporatewhatsappnumber': patientList[i]['patientCorporatewhatsappnumber'],", "'regtype': transactionList[i]['regtype'], 'cashier': transactionList[i]['cashier'], 'paymentupdateamount': transactionList[i][ 'paymentupdateamount'], 'paymentupdateby': transactionList[i]['paymentupdateby'], 'paymentupdateTime': transactionList[i]['paymentupdateTime'] } ]", ": [ { 'invoicemnemonics': transactionList[i]['invoicemnemonics'], 'invoicetestname': transactionList[i]['invoicetestname'], 'invoiceprice': transactionList[i]['invoiceprice'], 'invoicetat': transactionList[i]['invoicetat'] } ],", "import engine, alchemyencoder #from .dbconnect import engine, alchemyencoder pwd = os.path.dirname(os.path.abspath(__file__)) class flatToCascadedJson(object):", "'active': userList[i]['active'] }] }, indent=2 ) #print(data2) file.write(data2) file.close() # End for statement'", "indent=2 ) #print(data2) file.write(data2) file.close() return data2, dataList elif dbtable == 'transactions' :", "i in range(0, len(userList)) : data2 =json.dumps( { 'userID': userList[i]['id'], 'loginDetails' :[{ 'username':", "patientList[i]['patientCorporateCity'], 'patientCorporateState': patientList[i]['patientCorporateState'], 'patientCorporateCountry': patientList[i]['patientCorporateCountry'], 'patientCorporateEnroledby': patientList[i]['patientCorporateEnroledby'], 'enrolment_Time': patientList[i]['enrolment_Time'] } ] }, indent=2", "'patientMiddlename': patientList[i]['patientMiddlename'], 'patientEmail': patientList[i]['patientEmail'], 'patientAltEmail': patientList[i]['patientAltEmail'], 'patientPhonenumber': patientList[i]['patientPhonenumber'], 'patientAltPhonenumber': patientList[i]['patientAltPhonenumber'], 'patientwhatsappnumber': patientList[i]['patientwhatsappnumber'], 'patientAddress':", "'total': transactionList[i]['total'], 'paymentmethod': transactionList[i]['paymentmethod'], 'payment': transactionList[i]['payment'], 'referenceOrchange': transactionList[i]['referenceOrchange'], 'sessionconfirm': transactionList[i]['sessionconfirm'], 'paymentconfirm': transactionList[i]['paymentconfirm'], 'barcode':", "'transaction_id': 1, 'transactTime': transactionList[i]['transactTime'], 'labSessionTestDetails' : [ { 'invoicemnemonics': transactionList[i]['invoicemnemonics'], 'invoicetestname': transactionList[i]['invoicetestname'], 'invoiceprice':", "= json.loads(dataList) with open(f'{pwd}/table_json/{dbtable}_casded.json', 'w+') as file: for i in range(0, len(userList)) :", "dbtable == 'user' : userList = json.loads(dataList) with open(f'{pwd}/table_json/{dbtable}_casded.json', 'w+') as file: for", ": patientList[i]['patient_id'], 'patient_unique_ID': patientList[i]['patientID'], 'labsessioncount' : '', 'PatientPersonalDetails' :[ { 'patientSex': patientList[i]['patientSex'], 'patientStatus':", "'transactions' : transactionList = json.loads(dataList) with open(f'{pwd}/table_json/{dbtable}_casded.json', 'w+') as file: for i in", "def __init__(self, dbtable, *args): super(flatToCascadedJson, self).__init__(*args) self.dbtable =dbtable if not os.path.exists(f'{pwd}/table_json/'): os.makedirs(f'{pwd}/table_json/') def", "[ { 'invoicemnemonics': transactionList[i]['invoicemnemonics'], 'invoicetestname': transactionList[i]['invoicetestname'], 'invoiceprice': transactionList[i]['invoiceprice'], 'invoicetat': transactionList[i]['invoicetat'] } ], 'PatientDetails'", "for i in range(0, len(userList)) : data2 =json.dumps( { 'userID': userList[i]['id'], 'loginDetails' :[{", "dbtable == 'labtests' : testList = json.loads(dataList) with open(f'{pwd}/table_json/{dbtable}_casded.json', 'w+') as file: for", "'loginDetails' :[{ 'username': userList[i]['email'], 'password': userList[i]['password'] }], 'designation': userList[i]['designation'], 'userDetails' :[{ 'firstname' :", "[ { 'regtype': transactionList[i]['regtype'], 'cashier': transactionList[i]['cashier'], 'paymentupdateamount': transactionList[i][ 'paymentupdateamount'], 'paymentupdateby': transactionList[i]['paymentupdateby'], 'paymentupdateTime': transactionList[i]['paymentupdateTime']", "'discount': transactionList[i]['discount'], 'equalltax': transactionList[i]['equalltax'], 'total': transactionList[i]['total'], 'paymentmethod': transactionList[i]['paymentmethod'], 'payment': transactionList[i]['payment'], 'referenceOrchange': transactionList[i]['referenceOrchange'], 'sessionconfirm':", "transactionList[i]['paymentupdateby'], 'paymentupdateTime': transactionList[i]['paymentupdateTime'] } ] }, indent=2 ) #print(data2) file.write(data2) file.close() #print(transactionList[0]) return", "patientList[i]['patientCorporateContactperson'], 'patientCorporateEmail': patientList[i]['patientCorporateEmail'], 'patientCorporatePhone': patientList[i]['patientCorporatePhone'], 'patientCorporatewhatsappnumber': patientList[i]['patientCorporatewhatsappnumber'], 'patientCorporateAddress': patientList[i]['patientCorporateAddress'], 'patientCorporateCity': patientList[i]['patientCorporateCity'], 'patientCorporateState': patientList[i]['patientCorporateState'],", "'userDetails' :[{ 'firstname' : userList[i]['firstname'], 'lastname': userList[i]['lastname'], 'email': userList[i]['email'], 'phonenumber': userList[i]['phonenumber'], 'AlternatePhonenumber' :", "transactionList[i]['discount'], 'equalltax': transactionList[i]['equalltax'], 'total': transactionList[i]['total'], 'paymentmethod': transactionList[i]['paymentmethod'], 'payment': transactionList[i]['payment'], 'referenceOrchange': transactionList[i]['referenceOrchange'], 'sessionconfirm': transactionList[i]['sessionconfirm'],", "}, indent=2 ) #print(data2) file.write(data2) file.close() return data2, dataList elif dbtable == 'transactions'", "'patientEmail': patientList[i]['patientEmail'], 'patientAltEmail': patientList[i]['patientAltEmail'], 'patientPhonenumber': patientList[i]['patientPhonenumber'], 'patientAltPhonenumber': patientList[i]['patientAltPhonenumber'], 'patientwhatsappnumber': patientList[i]['patientwhatsappnumber'], 'patientAddress': patientList[i]['patientAddress'], 'patientCity':", "open(f'{pwd}/table_json/{dbtable}_casded.json', 'w+') as file: for i in range(0, len(patientList)) : data2 = json.dumps(", "'designation': userList[i]['designation'], 'userDetails' :[{ 'firstname' : userList[i]['firstname'], 'lastname': userList[i]['lastname'], 'email': userList[i]['email'], 'phonenumber': userList[i]['phonenumber'],", "= json.dumps([dict(row) for row in dbtableData], default=alchemyencoder, indent=4) with open(f'{pwd}/table_json/{dbtable}.json', 'w+') as file:", "transactionList[i]['testspriority'], 'testscheduletype': transactionList[i]['testscheduletype'] } ], 'Payment_Reference' : [ { 'subtotal': transactionList[i]['subtotal'], 'discount': transactionList[i]['discount'],", "{ 'userID': userList[i]['id'], 'loginDetails' :[{ 'username': userList[i]['email'], 'password': userList[i]['password'] }], 'designation': userList[i]['designation'], 'userDetails'", "import os, json #import requests import decimal, datetime from .dbconnect import engine, alchemyencoder", "'invoicetestname': transactionList[i]['invoicetestname'], 'invoiceprice': transactionList[i]['invoiceprice'], 'invoicetat': transactionList[i]['invoicetat'] } ], 'PatientDetails' : [ { 'CurrentpatientID':", "'paymentconfirm': transactionList[i]['paymentconfirm'], 'barcode': transactionList[i]['barcode'], 'phlebotomy_processed': transactionList[i]['phlebotomy_processed'] } ], 'PaymentPtocessor' : [ { 'regtype':", "transactionList[i]['regtype'], 'cashier': transactionList[i]['cashier'], 'paymentupdateamount': transactionList[i][ 'paymentupdateamount'], 'paymentupdateby': transactionList[i]['paymentupdateby'], 'paymentupdateTime': transactionList[i]['paymentupdateTime'] } ] },", "data2 =json.dumps( { 'userID': userList[i]['id'], 'loginDetails' :[{ 'username': userList[i]['email'], 'password': userList[i]['password'] }], 'designation':", "len(patientList)) : data2 = json.dumps( { 'patient_row_id' : patientList[i]['patient_id'], 'patient_unique_ID': patientList[i]['patientID'], 'labsessioncount' :", "'testName': testList[i]['testName'], 'testmnemonics': testList[i]['testmnemonics'], 'testDetails': testList[i]['testDetails'], 'testTAT': testList[i]['testTAT'], 'testPrice': testList[i]['testPrice'] }, indent=2 )", "json.loads(dataList) with open(f'{pwd}/table_json/{dbtable}_casded.json', 'w+') as file: for i in range(0, len(patientList)) : data2", "elif dbtable == 'transactions' : transactionList = json.loads(dataList) with open(f'{pwd}/table_json/{dbtable}_casded.json', 'w+') as file:", "= json.dumps( { 'patient_row_id' : patientList[i]['patient_id'], 'patient_unique_ID': patientList[i]['patientID'], 'labsessioncount' : '', 'PatientPersonalDetails' :[", "patientList[i]['patientCorporateEnroledby'], 'enrolment_Time': patientList[i]['enrolment_Time'] } ] }, indent=2 ) #print(data2) file.write(data2) file.close() #print(patientList) return", "'labsessioncount' : '', 'PatientPersonalDetails' :[ { 'patientSex': patientList[i]['patientSex'], 'patientStatus': patientList[i]['patientStatus'], 'patientType': patientList[i]['patientType'], 'ageGrade':", "patientList[i]['patientLastname'], 'patientMiddlename': patientList[i]['patientMiddlename'], 'patientEmail': patientList[i]['patientEmail'], 'patientAltEmail': patientList[i]['patientAltEmail'], 'patientPhonenumber': patientList[i]['patientPhonenumber'], 'patientAltPhonenumber': patientList[i]['patientAltPhonenumber'], 'patientwhatsappnumber': patientList[i]['patientwhatsappnumber'],", "'patient_row_id' : patientList[i]['patient_id'], 'patient_unique_ID': patientList[i]['patientID'], 'labsessioncount' : '', 'PatientPersonalDetails' :[ { 'patientSex': patientList[i]['patientSex'],", "{dbtable}' .format(dbtable=dbtable)) #engine.dispose() dataList = json.dumps([dict(row) for row in dbtableData], default=alchemyencoder, indent=4) with", "engine.execute('SELECT * FROM {dbtable}' .format(dbtable=dbtable)) #engine.dispose() dataList = json.dumps([dict(row) for row in dbtableData],", "'patientSex': patientList[i]['patientSex'], 'patientStatus': patientList[i]['patientStatus'], 'patientType': patientList[i]['patientType'], 'ageGrade': patientList[i]['ageGrade'], 'patientDateofBirth': patientList[i]['patientDateofBirth'], 'patientTitle': patientList[i]['patientTitle'], 'patientFirstname':", "patientList[i]['ageGrade'], 'patientDateofBirth': patientList[i]['patientDateofBirth'], 'patientTitle': patientList[i]['patientTitle'], 'patientFirstname': patientList[i]['patientFirstname'], 'patientLastname': patientList[i]['patientLastname'], 'patientMiddlename': patientList[i]['patientMiddlename'], 'patientEmail': patientList[i]['patientEmail'],", "with open(f'{pwd}/table_json/{dbtable}_casded.json', 'w+') as file: for i in range(0, len(testList)) : data2 =", "return {'response':'Not available in database'}, inspect(engine) dbtableData = engine.execute('SELECT * FROM {dbtable}' .format(dbtable=dbtable))", ".dbconnect import engine, alchemyencoder #from .dbconnect import engine, alchemyencoder pwd = os.path.dirname(os.path.abspath(__file__)) class", "for i in range(0, len(testList)) : data2 = json.dumps( { 'test_id': testList[i]['test_id'], 'testType':", "#from .dbconnect import engine, alchemyencoder pwd = os.path.dirname(os.path.abspath(__file__)) class flatToCascadedJson(object): def __init__(self, dbtable,", "== 'transactions' : transactionList = json.loads(dataList) with open(f'{pwd}/table_json/{dbtable}_casded.json', 'w+') as file: for i", "'paymentupdateby': transactionList[i]['paymentupdateby'], 'paymentupdateTime': transactionList[i]['paymentupdateTime'] } ] }, indent=2 ) #print(data2) file.write(data2) file.close() #print(transactionList[0])", ": transactionList = json.loads(dataList) with open(f'{pwd}/table_json/{dbtable}_casded.json', 'w+') as file: for i in range(0,", "def reformatjson(self): dbtable = self.dbtable if dbtable not in ['patients', 'labtests', 'transactions', 'user']:", "transactionList[i]['paymentmethod'], 'payment': transactionList[i]['payment'], 'referenceOrchange': transactionList[i]['referenceOrchange'], 'sessionconfirm': transactionList[i]['sessionconfirm'], 'paymentconfirm': transactionList[i]['paymentconfirm'], 'barcode': transactionList[i]['barcode'], 'phlebotomy_processed': transactionList[i]['phlebotomy_processed']", "userList[i]['lastname'], 'email': userList[i]['email'], 'phonenumber': userList[i]['phonenumber'], 'AlternatePhonenumber' : userList[i]['altnumber'], 'location' :[{ 'location': userList[i]['location'], 'city'", "'login_count': userList[i]['login_count'], 'confirmed_at': userList[i]['confirmed_at'], 'active': userList[i]['active'] }] }, indent=2 ) #print(data2) file.write(data2) file.close()", "'PatientDetails' : [ { 'CurrentpatientID': transactionList[i]['CurrentpatientID'], 'fullName': transactionList[i]['fullName'], 'sex': transactionList[i]['sex'], 'billto': transactionList[i]['billto'], 'testspriority':", "userList[i]['city'], 'state': userList[i]['state'], 'country': userList[i]['country'] }], 'zip_code' : userList[i]['zip_code'] }], 'Analytics' :[{ 'last_login_at':", "'user' : userList = json.loads(dataList) with open(f'{pwd}/table_json/{dbtable}_casded.json', 'w+') as file: for i in", "os, json #import requests import decimal, datetime from .dbconnect import engine, alchemyencoder #from", "patientList[i]['patientCorporateCountry'], 'patientCorporateEnroledby': patientList[i]['patientCorporateEnroledby'], 'enrolment_Time': patientList[i]['enrolment_Time'] } ] }, indent=2 ) #print(data2) file.write(data2) file.close()", "= json.dumps( { 'test_id': testList[i]['test_id'], 'testType': testList[i]['testType'], 'testBottleType': testList[i]['testBottleType'], 'testName': testList[i]['testName'], 'testmnemonics': testList[i]['testmnemonics'],", "open(f'{pwd}/table_json/{dbtable}.json', 'w+') as file: file.write(dataList) file.close() if dbtable == 'patients': patientList = json.loads(dataList)", "patientList[i]['patientCorporateAddress'], 'patientCorporateCity': patientList[i]['patientCorporateCity'], 'patientCorporateState': patientList[i]['patientCorporateState'], 'patientCorporateCountry': patientList[i]['patientCorporateCountry'], 'patientCorporateEnroledby': patientList[i]['patientCorporateEnroledby'], 'enrolment_Time': patientList[i]['enrolment_Time'] } ]", "data2, dataList elif dbtable == 'user' : userList = json.loads(dataList) with open(f'{pwd}/table_json/{dbtable}_casded.json', 'w+')", "'email': userList[i]['email'], 'phonenumber': userList[i]['phonenumber'], 'AlternatePhonenumber' : userList[i]['altnumber'], 'location' :[{ 'location': userList[i]['location'], 'city' :", ": '', 'PatientPersonalDetails' :[ { 'patientSex': patientList[i]['patientSex'], 'patientStatus': patientList[i]['patientStatus'], 'patientType': patientList[i]['patientType'], 'ageGrade': patientList[i]['ageGrade'],", "'patientDateofBirth': patientList[i]['patientDateofBirth'], 'patientTitle': patientList[i]['patientTitle'], 'patientFirstname': patientList[i]['patientFirstname'], 'patientLastname': patientList[i]['patientLastname'], 'patientMiddlename': patientList[i]['patientMiddlename'], 'patientEmail': patientList[i]['patientEmail'], 'patientAltEmail':", "'patientCorporatePhone': patientList[i]['patientCorporatePhone'], 'patientCorporatewhatsappnumber': patientList[i]['patientCorporatewhatsappnumber'], 'patientCorporateAddress': patientList[i]['patientCorporateAddress'], 'patientCorporateCity': patientList[i]['patientCorporateCity'], 'patientCorporateState': patientList[i]['patientCorporateState'], 'patientCorporateCountry': patientList[i]['patientCorporateCountry'], 'patientCorporateEnroledby':", "json.loads(dataList) with open(f'{pwd}/table_json/{dbtable}_casded.json', 'w+') as file: for i in range(0, len(transactionList)) : data2", "'patientCorporateState': patientList[i]['patientCorporateState'], 'patientCorporateCountry': patientList[i]['patientCorporateCountry'], 'patientCorporateEnroledby': patientList[i]['patientCorporateEnroledby'], 'enrolment_Time': patientList[i]['enrolment_Time'] } ] }, indent=2 )", "patientList[i]['patientEmail'], 'patientAltEmail': patientList[i]['patientAltEmail'], 'patientPhonenumber': patientList[i]['patientPhonenumber'], 'patientAltPhonenumber': patientList[i]['patientAltPhonenumber'], 'patientwhatsappnumber': patientList[i]['patientwhatsappnumber'], 'patientAddress': patientList[i]['patientAddress'], 'patientCity': patientList[i]['patientCity'],", "'patientCorporatewhatsappnumber': patientList[i]['patientCorporatewhatsappnumber'], 'patientCorporateAddress': patientList[i]['patientCorporateAddress'], 'patientCorporateCity': patientList[i]['patientCorporateCity'], 'patientCorporateState': patientList[i]['patientCorporateState'], 'patientCorporateCountry': patientList[i]['patientCorporateCountry'], 'patientCorporateEnroledby': patientList[i]['patientCorporateEnroledby'], 'enrolment_Time':", "'paymentupdateamount'], 'paymentupdateby': transactionList[i]['paymentupdateby'], 'paymentupdateTime': transactionList[i]['paymentupdateTime'] } ] }, indent=2 ) #print(data2) file.write(data2) file.close()", "dbtableData], default=alchemyencoder, indent=4) with open(f'{pwd}/table_json/{dbtable}.json', 'w+') as file: file.write(dataList) file.close() if dbtable ==", "], 'PaymentPtocessor' : [ { 'regtype': transactionList[i]['regtype'], 'cashier': transactionList[i]['cashier'], 'paymentupdateamount': transactionList[i][ 'paymentupdateamount'], 'paymentupdateby':", "'sessionconfirm': transactionList[i]['sessionconfirm'], 'paymentconfirm': transactionList[i]['paymentconfirm'], 'barcode': transactionList[i]['barcode'], 'phlebotomy_processed': transactionList[i]['phlebotomy_processed'] } ], 'PaymentPtocessor' : [", "'patientCorporateEnroledby': patientList[i]['patientCorporateEnroledby'], 'enrolment_Time': patientList[i]['enrolment_Time'] } ] }, indent=2 ) #print(data2) file.write(data2) file.close() #print(patientList)", "'w+') as file: for i in range(0, len(userList)) : data2 =json.dumps( { 'userID':", "* FROM {dbtable}' .format(dbtable=dbtable)) #engine.dispose() dataList = json.dumps([dict(row) for row in dbtableData], default=alchemyencoder,", "userList[i]['location'], 'city' : userList[i]['city'], 'state': userList[i]['state'], 'country': userList[i]['country'] }], 'zip_code' : userList[i]['zip_code'] }],", "transactionList[i]['payment'], 'referenceOrchange': transactionList[i]['referenceOrchange'], 'sessionconfirm': transactionList[i]['sessionconfirm'], 'paymentconfirm': transactionList[i]['paymentconfirm'], 'barcode': transactionList[i]['barcode'], 'phlebotomy_processed': transactionList[i]['phlebotomy_processed'] } ],", "super(flatToCascadedJson, self).__init__(*args) self.dbtable =dbtable if not os.path.exists(f'{pwd}/table_json/'): os.makedirs(f'{pwd}/table_json/') def reformatjson(self): dbtable = self.dbtable", "'transactTime': transactionList[i]['transactTime'], 'labSessionTestDetails' : [ { 'invoicemnemonics': transactionList[i]['invoicemnemonics'], 'invoicetestname': transactionList[i]['invoicetestname'], 'invoiceprice': transactionList[i]['invoiceprice'], 'invoicetat':", "in ['patients', 'labtests', 'transactions', 'user']: return {'response':'Not available in database'}, inspect(engine) dbtableData =", "for row in dbtableData], default=alchemyencoder, indent=4) with open(f'{pwd}/table_json/{dbtable}.json', 'w+') as file: file.write(dataList) file.close()", "userList[i]['id'], 'loginDetails' :[{ 'username': userList[i]['email'], 'password': userList[i]['password'] }], 'designation': userList[i]['designation'], 'userDetails' :[{ 'firstname'", "import create_engine, inspect import os, json #import requests import decimal, datetime from .dbconnect", "testList[i]['testmnemonics'], 'testDetails': testList[i]['testDetails'], 'testTAT': testList[i]['testTAT'], 'testPrice': testList[i]['testPrice'] }, indent=2 ) #print(data2) file.write(data2) file.close()", "dbtable, *args): super(flatToCascadedJson, self).__init__(*args) self.dbtable =dbtable if not os.path.exists(f'{pwd}/table_json/'): os.makedirs(f'{pwd}/table_json/') def reformatjson(self): dbtable", "'labSessionTestDetails' : [ { 'invoicemnemonics': transactionList[i]['invoicemnemonics'], 'invoicetestname': transactionList[i]['invoicetestname'], 'invoiceprice': transactionList[i]['invoiceprice'], 'invoicetat': transactionList[i]['invoicetat'] }", ": userList = json.loads(dataList) with open(f'{pwd}/table_json/{dbtable}_casded.json', 'w+') as file: for i in range(0,", "'payment': transactionList[i]['payment'], 'referenceOrchange': transactionList[i]['referenceOrchange'], 'sessionconfirm': transactionList[i]['sessionconfirm'], 'paymentconfirm': transactionList[i]['paymentconfirm'], 'barcode': transactionList[i]['barcode'], 'phlebotomy_processed': transactionList[i]['phlebotomy_processed'] }", "= engine.execute('SELECT * FROM {dbtable}' .format(dbtable=dbtable)) #engine.dispose() dataList = json.dumps([dict(row) for row in", "return data2, dataList elif dbtable == 'transactions' : transactionList = json.loads(dataList) with open(f'{pwd}/table_json/{dbtable}_casded.json',", "], 'PatientCorporateDetails' :[ { 'patientCompanyname': patientList[i]['patientCompanyname'], 'patientCorporateContactperson': patientList[i]['patientCorporateContactperson'], 'patientCorporateEmail': patientList[i]['patientCorporateEmail'], 'patientCorporatePhone': patientList[i]['patientCorporatePhone'], 'patientCorporatewhatsappnumber':", "userList[i]['state'], 'country': userList[i]['country'] }], 'zip_code' : userList[i]['zip_code'] }], 'Analytics' :[{ 'last_login_at': userList[i]['last_login_at'], 'current_login_at':", "'testTAT': testList[i]['testTAT'], 'testPrice': testList[i]['testPrice'] }, indent=2 ) #print(data2) file.write(data2) file.close() return data2, dataList", "'patientPhonenumber': patientList[i]['patientPhonenumber'], 'patientAltPhonenumber': patientList[i]['patientAltPhonenumber'], 'patientwhatsappnumber': patientList[i]['patientwhatsappnumber'], 'patientAddress': patientList[i]['patientAddress'], 'patientCity': patientList[i]['patientCity'], 'patientState': patientList[i]['patientState'], 'patientCountry':", "open(f'{pwd}/table_json/{dbtable}_casded.json', 'w+') as file: for i in range(0, len(transactionList)) : data2 = json.dumps(", "'referenceOrchange': transactionList[i]['referenceOrchange'], 'sessionconfirm': transactionList[i]['sessionconfirm'], 'paymentconfirm': transactionList[i]['paymentconfirm'], 'barcode': transactionList[i]['barcode'], 'phlebotomy_processed': transactionList[i]['phlebotomy_processed'] } ], 'PaymentPtocessor'", "data2, dataList elif dbtable == 'labtests' : testList = json.loads(dataList) with open(f'{pwd}/table_json/{dbtable}_casded.json', 'w+')", "indent=4) with open(f'{pwd}/table_json/{dbtable}.json', 'w+') as file: file.write(dataList) file.close() if dbtable == 'patients': patientList", ":[{ 'firstname' : userList[i]['firstname'], 'lastname': userList[i]['lastname'], 'email': userList[i]['email'], 'phonenumber': userList[i]['phonenumber'], 'AlternatePhonenumber' : userList[i]['altnumber'],", "'subtotal': transactionList[i]['subtotal'], 'discount': transactionList[i]['discount'], 'equalltax': transactionList[i]['equalltax'], 'total': transactionList[i]['total'], 'paymentmethod': transactionList[i]['paymentmethod'], 'payment': transactionList[i]['payment'], 'referenceOrchange':", "patientList[i]['patientStatus'], 'patientType': patientList[i]['patientType'], 'ageGrade': patientList[i]['ageGrade'], 'patientDateofBirth': patientList[i]['patientDateofBirth'], 'patientTitle': patientList[i]['patientTitle'], 'patientFirstname': patientList[i]['patientFirstname'], 'patientLastname': patientList[i]['patientLastname'],", "file.close() if dbtable == 'patients': patientList = json.loads(dataList) with open(f'{pwd}/table_json/{dbtable}_casded.json', 'w+') as file:", "reformatjson(self): dbtable = self.dbtable if dbtable not in ['patients', 'labtests', 'transactions', 'user']: return", "transactionList[i]['testscheduletype'] } ], 'Payment_Reference' : [ { 'subtotal': transactionList[i]['subtotal'], 'discount': transactionList[i]['discount'], 'equalltax': transactionList[i]['equalltax'],", "userList[i]['active'] }] }, indent=2 ) #print(data2) file.write(data2) file.close() # End for statement' return", "}, indent=2 ) #print(data2) file.write(data2) file.close() # End for statement' return data2, dataList", "1, 'transactTime': transactionList[i]['transactTime'], 'labSessionTestDetails' : [ { 'invoicemnemonics': transactionList[i]['invoicemnemonics'], 'invoicetestname': transactionList[i]['invoicetestname'], 'invoiceprice': transactionList[i]['invoiceprice'],", "in range(0, len(transactionList)) : data2 = json.dumps( { 'transaction_id': 1, 'transactTime': transactionList[i]['transactTime'], 'labSessionTestDetails'", "file.close() #print(transactionList[0]) return data2, dataList elif dbtable == 'user' : userList = json.loads(dataList)", "range(0, len(transactionList)) : data2 = json.dumps( { 'transaction_id': 1, 'transactTime': transactionList[i]['transactTime'], 'labSessionTestDetails' :", "file: file.write(dataList) file.close() if dbtable == 'patients': patientList = json.loads(dataList) with open(f'{pwd}/table_json/{dbtable}_casded.json', 'w+')", "transactionList[i]['paymentupdateTime'] } ] }, indent=2 ) #print(data2) file.write(data2) file.close() #print(transactionList[0]) return data2, dataList", "FROM {dbtable}' .format(dbtable=dbtable)) #engine.dispose() dataList = json.dumps([dict(row) for row in dbtableData], default=alchemyencoder, indent=4)", "testList[i]['testPrice'] }, indent=2 ) #print(data2) file.write(data2) file.close() return data2, dataList elif dbtable ==", "transactionList = json.loads(dataList) with open(f'{pwd}/table_json/{dbtable}_casded.json', 'w+') as file: for i in range(0, len(transactionList))", "in range(0, len(patientList)) : data2 = json.dumps( { 'patient_row_id' : patientList[i]['patient_id'], 'patient_unique_ID': patientList[i]['patientID'],", "userList[i]['email'], 'phonenumber': userList[i]['phonenumber'], 'AlternatePhonenumber' : userList[i]['altnumber'], 'location' :[{ 'location': userList[i]['location'], 'city' : userList[i]['city'],", "range(0, len(patientList)) : data2 = json.dumps( { 'patient_row_id' : patientList[i]['patient_id'], 'patient_unique_ID': patientList[i]['patientID'], 'labsessioncount'", "os.path.dirname(os.path.abspath(__file__)) class flatToCascadedJson(object): def __init__(self, dbtable, *args): super(flatToCascadedJson, self).__init__(*args) self.dbtable =dbtable if not", ":[{ 'location': userList[i]['location'], 'city' : userList[i]['city'], 'state': userList[i]['state'], 'country': userList[i]['country'] }], 'zip_code' :", "#print(data2) file.write(data2) file.close() #print(patientList) return data2, dataList elif dbtable == 'labtests' : testList", "'patientState': patientList[i]['patientState'], 'patientCountry': patientList[i]['patientCountry'], 'patientpersonalEnroledby': patientList[i]['patientpersonalEnroledby'] } ], 'PatientCorporateDetails' :[ { 'patientCompanyname': patientList[i]['patientCompanyname'],", "userList[i]['password'] }], 'designation': userList[i]['designation'], 'userDetails' :[{ 'firstname' : userList[i]['firstname'], 'lastname': userList[i]['lastname'], 'email': userList[i]['email'],", "available in database'}, inspect(engine) dbtableData = engine.execute('SELECT * FROM {dbtable}' .format(dbtable=dbtable)) #engine.dispose() dataList", "'Analytics' :[{ 'last_login_at': userList[i]['last_login_at'], 'current_login_at': userList[i]['current_login_at'], 'last_login_ip': userList[i]['last_login_ip'], 'current_login_ip': userList[i]['current_login_ip'], 'login_count': userList[i]['login_count'], 'confirmed_at':", "for i in range(0, len(patientList)) : data2 = json.dumps( { 'patient_row_id' : patientList[i]['patient_id']," ]
[ "= deque() for x in range(n): if x not in neighbor: return False", "neighbor[x] -= 1 queue.append(x) count = 0 while queue: node = queue.popleft() count", "node in edge: neighbor[edge[0]] -= 1 neighbor[edge[1]] -= 1 if len(queue) == 0:", "up a valid tree. 样例 Example 1: Input: n = 5 edges =", "and a list of undirected edges (each edge is a pair of nodes),", "and thus will not appear together in edges. ''' from collections import defaultdict,deque", "= 1 else: neighbor[edge[0]] += 1 if edge[1] not in neighbor: neighbor[edge[1]] =", "if len(queue) == 0: for key in neighbor: if neighbor[key] == 1 or", "注意事项 You can assume that no duplicate edges will appear in edges. Since", "make up a valid tree. 样例 Example 1: Input: n = 5 edges", "edges): # write your code here if len(edges) != n - 1: return", "if node in edge: neighbor[edge[0]] -= 1 neighbor[edge[1]] -= 1 if len(queue) ==", "len(queue) == 0: for key in neighbor: if neighbor[key] == 1 or neighbor[key]", "in neighbor: if neighbor[key] == 1 or neighbor[key] == 0: queue.append(key) if count", "Output: true. Example 2: Input: n = 5 edges = [[0, 1], [1,", "= 0 while queue: node = queue.popleft() count += 1 for edge in", "is a pair of nodes), write a function to check whether these edges", "a valid tree, or false \"\"\" def validTree(self, n, edges): # write your", "1 if edge[1] not in neighbor: neighbor[edge[1]] = 1 else: neighbor[edge[1]] += 1", "2], [2, 3], [1, 3], [1, 4]] Output: false. 注意事项 You can assume", "to check whether these edges make up a valid tree. 样例 Example 1:", "\"\"\" @param n: An integer @param edges: a list of undirected edges @return:", "a pair of nodes), write a function to check whether these edges make", "in edges. Since all edges are undirected, [0, 1] is the same as", "0] and thus will not appear together in edges. ''' from collections import", "neighbor: neighbor[edge[1]] = 1 else: neighbor[edge[1]] += 1 queue = deque() for x", "all edges are undirected, [0, 1] is the same as [1, 0] and", "defaultdict,deque class Solution: \"\"\" @param n: An integer @param edges: a list of", "if it's a valid tree, or false \"\"\" def validTree(self, n, edges): #", "code here if len(edges) != n - 1: return False if len(edges) ==", "integer @param edges: a list of undirected edges @return: true if it's a", "1 or neighbor[key] == 0: queue.append(key) if count < n: return False return", "- 1 and a list of undirected edges (each edge is a pair", "edges. ''' from collections import defaultdict,deque class Solution: \"\"\" @param n: An integer", "0 to n - 1 and a list of undirected edges (each edge", "class Solution: \"\"\" @param n: An integer @param edges: a list of undirected", "neighbor: if neighbor[key] == 1 or neighbor[key] == 0: queue.append(key) if count <", "1], [1, 2], [2, 3], [1, 3], [1, 4]] Output: false. 注意事项 You", "for edge in edges: if node in edge: neighbor[edge[0]] -= 1 neighbor[edge[1]] -=", "neighbor[edge[1]] += 1 queue = deque() for x in range(n): if x not", "function to check whether these edges make up a valid tree. 样例 Example", "''' Given n nodes labeled from 0 to n - 1 and a", "undirected edges (each edge is a pair of nodes), write a function to", "false. 注意事项 You can assume that no duplicate edges will appear in edges.", "edges. Since all edges are undirected, [0, 1] is the same as [1,", "0 while queue: node = queue.popleft() count += 1 for edge in edges:", "appear together in edges. ''' from collections import defaultdict,deque class Solution: \"\"\" @param", "if x not in neighbor: return False elif neighbor[x] == 1: neighbor[x] -=", "@param n: An integer @param edges: a list of undirected edges @return: true", "collections import defaultdict,deque class Solution: \"\"\" @param n: An integer @param edges: a", "= [[0, 1], [0, 2], [0, 3], [1, 4]] Output: true. Example 2:", "[1, 2], [2, 3], [1, 3], [1, 4]] Output: false. 注意事项 You can", "Input: n = 5 edges = [[0, 1], [1, 2], [2, 3], [1,", "neighbor[edge[1]] -= 1 if len(queue) == 0: for key in neighbor: if neighbor[key]", "deque() for x in range(n): if x not in neighbor: return False elif", "neighbor = defaultdict() for edge in edges: if edge[0] not in neighbor: neighbor[edge[0]]", "from 0 to n - 1 and a list of undirected edges (each", "a function to check whether these edges make up a valid tree. 样例", "assume that no duplicate edges will appear in edges. Since all edges are", "of undirected edges @return: true if it's a valid tree, or false \"\"\"", "not in neighbor: neighbor[edge[0]] = 1 else: neighbor[edge[0]] += 1 if edge[1] not", "in neighbor: neighbor[edge[0]] = 1 else: neighbor[edge[0]] += 1 if edge[1] not in", "[[0, 1], [1, 2], [2, 3], [1, 3], [1, 4]] Output: false. 注意事项", "edges make up a valid tree. 样例 Example 1: Input: n = 5", "edges = [[0, 1], [1, 2], [2, 3], [1, 3], [1, 4]] Output:", "write your code here if len(edges) != n - 1: return False if", "= 5 edges = [[0, 1], [0, 2], [0, 3], [1, 4]] Output:", "whether these edges make up a valid tree. 样例 Example 1: Input: n", "neighbor[edge[0]] = 1 else: neighbor[edge[0]] += 1 if edge[1] not in neighbor: neighbor[edge[1]]", "1 else: neighbor[edge[1]] += 1 queue = deque() for x in range(n): if", "edge[0] not in neighbor: neighbor[edge[0]] = 1 else: neighbor[edge[0]] += 1 if edge[1]", "edges: if node in edge: neighbor[edge[0]] -= 1 neighbor[edge[1]] -= 1 if len(queue)", "queue.append(x) count = 0 while queue: node = queue.popleft() count += 1 for", "1 for edge in edges: if node in edge: neighbor[edge[0]] -= 1 neighbor[edge[1]]", "neighbor[edge[1]] = 1 else: neighbor[edge[1]] += 1 queue = deque() for x in", "not in neighbor: neighbor[edge[1]] = 1 else: neighbor[edge[1]] += 1 queue = deque()", "return False elif neighbor[x] == 1: neighbor[x] -= 1 queue.append(x) count = 0", "1: Input: n = 5 edges = [[0, 1], [0, 2], [0, 3],", "样例 Example 1: Input: n = 5 edges = [[0, 1], [0, 2],", "An integer @param edges: a list of undirected edges @return: true if it's", "if edge[1] not in neighbor: neighbor[edge[1]] = 1 else: neighbor[edge[1]] += 1 queue", "n: An integer @param edges: a list of undirected edges @return: true if", "1 queue.append(x) count = 0 while queue: node = queue.popleft() count += 1", "-= 1 if len(queue) == 0: for key in neighbor: if neighbor[key] ==", "labeled from 0 to n - 1 and a list of undirected edges", "5 edges = [[0, 1], [1, 2], [2, 3], [1, 3], [1, 4]]", "true. Example 2: Input: n = 5 edges = [[0, 1], [1, 2],", "= [[0, 1], [1, 2], [2, 3], [1, 3], [1, 4]] Output: false.", "\"\"\" def validTree(self, n, edges): # write your code here if len(edges) !=", "to n - 1 and a list of undirected edges (each edge is", "[0, 2], [0, 3], [1, 4]] Output: true. Example 2: Input: n =", "[1, 3], [1, 4]] Output: false. 注意事项 You can assume that no duplicate", "edges = [[0, 1], [0, 2], [0, 3], [1, 4]] Output: true. Example", "4]] Output: false. 注意事项 You can assume that no duplicate edges will appear", "edge in edges: if edge[0] not in neighbor: neighbor[edge[0]] = 1 else: neighbor[edge[0]]", "if len(edges) != n - 1: return False if len(edges) == 0: return", "[0, 1] is the same as [1, 0] and thus will not appear", "[2, 3], [1, 3], [1, 4]] Output: false. 注意事项 You can assume that", "1: neighbor[x] -= 1 queue.append(x) count = 0 while queue: node = queue.popleft()", "as [1, 0] and thus will not appear together in edges. ''' from", "edges: a list of undirected edges @return: true if it's a valid tree,", "nodes), write a function to check whether these edges make up a valid", "write a function to check whether these edges make up a valid tree.", "a valid tree. 样例 Example 1: Input: n = 5 edges = [[0,", "[[0, 1], [0, 2], [0, 3], [1, 4]] Output: true. Example 2: Input:", "edge is a pair of nodes), write a function to check whether these", "duplicate edges will appear in edges. Since all edges are undirected, [0, 1]", "neighbor[key] == 1 or neighbor[key] == 0: queue.append(key) if count < n: return", "in edges: if edge[0] not in neighbor: neighbor[edge[0]] = 1 else: neighbor[edge[0]] +=", "0: return n == 1 neighbor = defaultdict() for edge in edges: if", "@return: true if it's a valid tree, or false \"\"\" def validTree(self, n,", "in neighbor: neighbor[edge[1]] = 1 else: neighbor[edge[1]] += 1 queue = deque() for", "3], [1, 4]] Output: false. 注意事项 You can assume that no duplicate edges", "for key in neighbor: if neighbor[key] == 1 or neighbor[key] == 0: queue.append(key)", "2: Input: n = 5 edges = [[0, 1], [1, 2], [2, 3],", "here if len(edges) != n - 1: return False if len(edges) == 0:", "n - 1: return False if len(edges) == 0: return n == 1", "Since all edges are undirected, [0, 1] is the same as [1, 0]", "defaultdict() for edge in edges: if edge[0] not in neighbor: neighbor[edge[0]] = 1", "(each edge is a pair of nodes), write a function to check whether", "1 and a list of undirected edges (each edge is a pair of", "neighbor[x] == 1: neighbor[x] -= 1 queue.append(x) count = 0 while queue: node", "not in neighbor: return False elif neighbor[x] == 1: neighbor[x] -= 1 queue.append(x)", "will not appear together in edges. ''' from collections import defaultdict,deque class Solution:", "if edge[0] not in neighbor: neighbor[edge[0]] = 1 else: neighbor[edge[0]] += 1 if", "n nodes labeled from 0 to n - 1 and a list of", "your code here if len(edges) != n - 1: return False if len(edges)", "queue.popleft() count += 1 for edge in edges: if node in edge: neighbor[edge[0]]", "not appear together in edges. ''' from collections import defaultdict,deque class Solution: \"\"\"", "!= n - 1: return False if len(edges) == 0: return n ==", "1], [0, 2], [0, 3], [1, 4]] Output: true. Example 2: Input: n", "true if it's a valid tree, or false \"\"\" def validTree(self, n, edges):", "Example 2: Input: n = 5 edges = [[0, 1], [1, 2], [2,", "1 queue = deque() for x in range(n): if x not in neighbor:", "neighbor[edge[0]] += 1 if edge[1] not in neighbor: neighbor[edge[1]] = 1 else: neighbor[edge[1]]", "edges @return: true if it's a valid tree, or false \"\"\" def validTree(self,", "= defaultdict() for edge in edges: if edge[0] not in neighbor: neighbor[edge[0]] =", "in edge: neighbor[edge[0]] -= 1 neighbor[edge[1]] -= 1 if len(queue) == 0: for", "queue = deque() for x in range(n): if x not in neighbor: return", "else: neighbor[edge[1]] += 1 queue = deque() for x in range(n): if x", "in range(n): if x not in neighbor: return False elif neighbor[x] == 1:", "== 1 or neighbor[key] == 0: queue.append(key) if count < n: return False", "neighbor: return False elif neighbor[x] == 1: neighbor[x] -= 1 queue.append(x) count =", "neighbor: neighbor[edge[0]] = 1 else: neighbor[edge[0]] += 1 if edge[1] not in neighbor:", "@param edges: a list of undirected edges @return: true if it's a valid", "in neighbor: return False elif neighbor[x] == 1: neighbor[x] -= 1 queue.append(x) count", "else: neighbor[edge[0]] += 1 if edge[1] not in neighbor: neighbor[edge[1]] = 1 else:", "a list of undirected edges @return: true if it's a valid tree, or", "return False if len(edges) == 0: return n == 1 neighbor = defaultdict()", "list of undirected edges @return: true if it's a valid tree, or false", "n - 1 and a list of undirected edges (each edge is a", "False if len(edges) == 0: return n == 1 neighbor = defaultdict() for", "edge in edges: if node in edge: neighbor[edge[0]] -= 1 neighbor[edge[1]] -= 1", "2], [0, 3], [1, 4]] Output: true. Example 2: Input: n = 5", "appear in edges. Since all edges are undirected, [0, 1] is the same", "Example 1: Input: n = 5 edges = [[0, 1], [0, 2], [0,", "[1, 0] and thus will not appear together in edges. ''' from collections", "+= 1 queue = deque() for x in range(n): if x not in", "Input: n = 5 edges = [[0, 1], [0, 2], [0, 3], [1,", "neighbor[edge[0]] -= 1 neighbor[edge[1]] -= 1 if len(queue) == 0: for key in", "can assume that no duplicate edges will appear in edges. Since all edges", "return n == 1 neighbor = defaultdict() for edge in edges: if edge[0]", "if neighbor[key] == 1 or neighbor[key] == 0: queue.append(key) if count < n:", "a list of undirected edges (each edge is a pair of nodes), write", "together in edges. ''' from collections import defaultdict,deque class Solution: \"\"\" @param n:", "in edges. ''' from collections import defaultdict,deque class Solution: \"\"\" @param n: An", "of undirected edges (each edge is a pair of nodes), write a function", "that no duplicate edges will appear in edges. Since all edges are undirected,", "import defaultdict,deque class Solution: \"\"\" @param n: An integer @param edges: a list", "same as [1, 0] and thus will not appear together in edges. '''", "+= 1 for edge in edges: if node in edge: neighbor[edge[0]] -= 1", "Output: false. 注意事项 You can assume that no duplicate edges will appear in", "will appear in edges. Since all edges are undirected, [0, 1] is the", "thus will not appear together in edges. ''' from collections import defaultdict,deque class", "while queue: node = queue.popleft() count += 1 for edge in edges: if", "edges will appear in edges. Since all edges are undirected, [0, 1] is", "it's a valid tree, or false \"\"\" def validTree(self, n, edges): # write", "of nodes), write a function to check whether these edges make up a", "4]] Output: true. Example 2: Input: n = 5 edges = [[0, 1],", "x in range(n): if x not in neighbor: return False elif neighbor[x] ==", "= queue.popleft() count += 1 for edge in edges: if node in edge:", "edge: neighbor[edge[0]] -= 1 neighbor[edge[1]] -= 1 if len(queue) == 0: for key", "1] is the same as [1, 0] and thus will not appear together", "list of undirected edges (each edge is a pair of nodes), write a", "range(n): if x not in neighbor: return False elif neighbor[x] == 1: neighbor[x]", "n == 1 neighbor = defaultdict() for edge in edges: if edge[0] not", "edges (each edge is a pair of nodes), write a function to check", "for edge in edges: if edge[0] not in neighbor: neighbor[edge[0]] = 1 else:", "pair of nodes), write a function to check whether these edges make up", "n = 5 edges = [[0, 1], [0, 2], [0, 3], [1, 4]]", "def validTree(self, n, edges): # write your code here if len(edges) != n", "# write your code here if len(edges) != n - 1: return False", "-= 1 neighbor[edge[1]] -= 1 if len(queue) == 0: for key in neighbor:", "You can assume that no duplicate edges will appear in edges. Since all", "1 neighbor[edge[1]] -= 1 if len(queue) == 0: for key in neighbor: if", "0: for key in neighbor: if neighbor[key] == 1 or neighbor[key] == 0:", "are undirected, [0, 1] is the same as [1, 0] and thus will", "validTree(self, n, edges): # write your code here if len(edges) != n -", "== 1: neighbor[x] -= 1 queue.append(x) count = 0 while queue: node =", "- 1: return False if len(edges) == 0: return n == 1 neighbor", "count += 1 for edge in edges: if node in edge: neighbor[edge[0]] -=", "valid tree. 样例 Example 1: Input: n = 5 edges = [[0, 1],", "tree, or false \"\"\" def validTree(self, n, edges): # write your code here", "[1, 4]] Output: false. 注意事项 You can assume that no duplicate edges will", "= 5 edges = [[0, 1], [1, 2], [2, 3], [1, 3], [1,", "n = 5 edges = [[0, 1], [1, 2], [2, 3], [1, 3],", "check whether these edges make up a valid tree. 样例 Example 1: Input:", "elif neighbor[x] == 1: neighbor[x] -= 1 queue.append(x) count = 0 while queue:", "node = queue.popleft() count += 1 for edge in edges: if node in", "[0, 3], [1, 4]] Output: true. Example 2: Input: n = 5 edges", "= 1 else: neighbor[edge[1]] += 1 queue = deque() for x in range(n):", "key in neighbor: if neighbor[key] == 1 or neighbor[key] == 0: queue.append(key) if", "3], [1, 4]] Output: true. Example 2: Input: n = 5 edges =", "tree. 样例 Example 1: Input: n = 5 edges = [[0, 1], [0,", "x not in neighbor: return False elif neighbor[x] == 1: neighbor[x] -= 1", "len(edges) == 0: return n == 1 neighbor = defaultdict() for edge in", "1: return False if len(edges) == 0: return n == 1 neighbor =", "[1, 4]] Output: true. Example 2: Input: n = 5 edges = [[0,", "for x in range(n): if x not in neighbor: return False elif neighbor[x]", "5 edges = [[0, 1], [0, 2], [0, 3], [1, 4]] Output: true.", "edges are undirected, [0, 1] is the same as [1, 0] and thus", "if len(edges) == 0: return n == 1 neighbor = defaultdict() for edge", "-= 1 queue.append(x) count = 0 while queue: node = queue.popleft() count +=", "is the same as [1, 0] and thus will not appear together in", "valid tree, or false \"\"\" def validTree(self, n, edges): # write your code", "n, edges): # write your code here if len(edges) != n - 1:", "== 0: return n == 1 neighbor = defaultdict() for edge in edges:", "count = 0 while queue: node = queue.popleft() count += 1 for edge", "1 neighbor = defaultdict() for edge in edges: if edge[0] not in neighbor:", "1 else: neighbor[edge[0]] += 1 if edge[1] not in neighbor: neighbor[edge[1]] = 1", "or neighbor[key] == 0: queue.append(key) if count < n: return False return True", "or false \"\"\" def validTree(self, n, edges): # write your code here if", "from collections import defaultdict,deque class Solution: \"\"\" @param n: An integer @param edges:", "''' from collections import defaultdict,deque class Solution: \"\"\" @param n: An integer @param", "== 1 neighbor = defaultdict() for edge in edges: if edge[0] not in", "edge[1] not in neighbor: neighbor[edge[1]] = 1 else: neighbor[edge[1]] += 1 queue =", "nodes labeled from 0 to n - 1 and a list of undirected", "undirected edges @return: true if it's a valid tree, or false \"\"\" def", "3], [1, 3], [1, 4]] Output: false. 注意事项 You can assume that no", "no duplicate edges will appear in edges. Since all edges are undirected, [0,", "len(edges) != n - 1: return False if len(edges) == 0: return n", "in edges: if node in edge: neighbor[edge[0]] -= 1 neighbor[edge[1]] -= 1 if", "1 if len(queue) == 0: for key in neighbor: if neighbor[key] == 1", "+= 1 if edge[1] not in neighbor: neighbor[edge[1]] = 1 else: neighbor[edge[1]] +=", "queue: node = queue.popleft() count += 1 for edge in edges: if node", "undirected, [0, 1] is the same as [1, 0] and thus will not", "these edges make up a valid tree. 样例 Example 1: Input: n =", "the same as [1, 0] and thus will not appear together in edges.", "== 0: for key in neighbor: if neighbor[key] == 1 or neighbor[key] ==", "false \"\"\" def validTree(self, n, edges): # write your code here if len(edges)", "False elif neighbor[x] == 1: neighbor[x] -= 1 queue.append(x) count = 0 while", "edges: if edge[0] not in neighbor: neighbor[edge[0]] = 1 else: neighbor[edge[0]] += 1", "Given n nodes labeled from 0 to n - 1 and a list", "Solution: \"\"\" @param n: An integer @param edges: a list of undirected edges" ]
[ "from .schemas import ( Message as Message, MultipartSubtypeEnum as MultipartSubtypeEnum ) from .", "= (0, 0, 6) __version__ = \".\".join([str(v) for v in version_info]) __author__ =", "from .config import ConnectionConfig from .schemas import ( Message as Message, MultipartSubtypeEnum as", ".config import ConnectionConfig from .schemas import ( Message as Message, MultipartSubtypeEnum as MultipartSubtypeEnum", "Mail from .config import ConnectionConfig from .schemas import ( Message as Message, MultipartSubtypeEnum", ".mail import Mail from .config import ConnectionConfig from .schemas import ( Message as", "in version_info]) __author__ = \"<EMAIL>\" __all__ = [ \"Mail\", \"ConnectionConfig\", \"Message\", \"utils\", \"MultipartSubtypeEnum\"", "import ( Message as Message, MultipartSubtypeEnum as MultipartSubtypeEnum ) from . import utils", "Message as Message, MultipartSubtypeEnum as MultipartSubtypeEnum ) from . import utils version_info =", "as MultipartSubtypeEnum ) from . import utils version_info = (0, 0, 6) __version__", "(0, 0, 6) __version__ = \".\".join([str(v) for v in version_info]) __author__ = \"<EMAIL>\"", "MultipartSubtypeEnum ) from . import utils version_info = (0, 0, 6) __version__ =", "import Mail from .config import ConnectionConfig from .schemas import ( Message as Message,", "( Message as Message, MultipartSubtypeEnum as MultipartSubtypeEnum ) from . import utils version_info", "from . import utils version_info = (0, 0, 6) __version__ = \".\".join([str(v) for", "6) __version__ = \".\".join([str(v) for v in version_info]) __author__ = \"<EMAIL>\" __all__ =", "import utils version_info = (0, 0, 6) __version__ = \".\".join([str(v) for v in", ". import utils version_info = (0, 0, 6) __version__ = \".\".join([str(v) for v", "MultipartSubtypeEnum as MultipartSubtypeEnum ) from . import utils version_info = (0, 0, 6)", "ConnectionConfig from .schemas import ( Message as Message, MultipartSubtypeEnum as MultipartSubtypeEnum ) from", "version_info = (0, 0, 6) __version__ = \".\".join([str(v) for v in version_info]) __author__", ".schemas import ( Message as Message, MultipartSubtypeEnum as MultipartSubtypeEnum ) from . import", "as Message, MultipartSubtypeEnum as MultipartSubtypeEnum ) from . import utils version_info = (0,", "__version__ = \".\".join([str(v) for v in version_info]) __author__ = \"<EMAIL>\" __all__ = [", "import ConnectionConfig from .schemas import ( Message as Message, MultipartSubtypeEnum as MultipartSubtypeEnum )", "\".\".join([str(v) for v in version_info]) __author__ = \"<EMAIL>\" __all__ = [ \"Mail\", \"ConnectionConfig\",", ") from . import utils version_info = (0, 0, 6) __version__ = \".\".join([str(v)", "for v in version_info]) __author__ = \"<EMAIL>\" __all__ = [ \"Mail\", \"ConnectionConfig\", \"Message\",", "<reponame>jfkinslow/flask-mailing from .mail import Mail from .config import ConnectionConfig from .schemas import (", "= \".\".join([str(v) for v in version_info]) __author__ = \"<EMAIL>\" __all__ = [ \"Mail\",", "0, 6) __version__ = \".\".join([str(v) for v in version_info]) __author__ = \"<EMAIL>\" __all__", "v in version_info]) __author__ = \"<EMAIL>\" __all__ = [ \"Mail\", \"ConnectionConfig\", \"Message\", \"utils\",", "from .mail import Mail from .config import ConnectionConfig from .schemas import ( Message", "utils version_info = (0, 0, 6) __version__ = \".\".join([str(v) for v in version_info])", "Message, MultipartSubtypeEnum as MultipartSubtypeEnum ) from . import utils version_info = (0, 0,", "version_info]) __author__ = \"<EMAIL>\" __all__ = [ \"Mail\", \"ConnectionConfig\", \"Message\", \"utils\", \"MultipartSubtypeEnum\" ]" ]
[ "in authorSet: print(auth) for row in list(filtered): if row[2] == auth: print(tab +", "year1 or year2 might still be None, which is fine. return parsed_arguments def", "line arguments. with open(\"prolog.txt\", \"r\") as prolog, open(\"epilog.txt\", \"r\") as epilog: parser =", "of the substrings contains a space, surround that substring\" \" with quotes \\\"\\\".\")", "\", \" + row[1]) # Otherwise, print all book/author/year information in \"filtered\". else:", "searching the 'books.csv' file. ''' import csv import argparse def get_parsed_arguments(): # Set", "in the time \" \"interval [min(year1, year2), max(year1, year2)] \" \"within which to", "getAuthorSet(filtered, authors) -> set: authorSet = set() if authors: for row in filtered:", "arguments.books: filtered = filterBooks(filtered, arguments.books) if arguments.authors: filtered = filterAuthors(filtered, arguments.authors) authorSet =", "A command line interface for searching the 'books.csv' file. ''' import csv import", "sub in authors), filtered)) def filterYears(filtered, year1, year2) -> list: return list(filter(lambda p:", "nargs=\"+\", help=\"One or more substrings to search for in the titles of books.", "or more substrings to search for in the names of authors. If one", "arguments. with open(\"prolog.txt\", \"r\") as prolog, open(\"epilog.txt\", \"r\") as epilog: parser = argparse.ArgumentParser(description", "<NAME> A command line interface for searching the 'books.csv' file. ''' import csv", "-> list: return list(filter(lambda p: any(sub.lower() in p[2].lower() for sub in authors), filtered))", "filterYears(filtered, arguments.year1, arguments.year2) if arguments.books: filtered = filterBooks(filtered, arguments.books) if arguments.authors: filtered =", "search for in the titles of books. \" \"If one of the substrings", "prolog, open(\"epilog.txt\", \"r\") as epilog: parser = argparse.ArgumentParser(description = prolog.read(), epilog = epilog.read())", "+ \", \" + row[1]) # Otherwise, print all book/author/year information in \"filtered\".", "books.\") # Parse the command line. parsed_arguments = parser.parse_args() # Handle the years.", "substrings to search for in the names of authors. If one of the", "is None: parsed_arguments.year2 = year1 # Note that year1 or year2 might still", "= filterBooks(filtered, arguments.books) if arguments.authors: filtered = filterAuthors(filtered, arguments.authors) authorSet = getAuthorSet(filtered, arguments.authors)", "\"--authors\", nargs=\"+\", help=\"One or more substrings to search for in the names of", "else: for row in filtered: print(row[0] + \", \" + row[1] + \",", "cs257 Revised by <NAME> A command line interface for searching the 'books.csv' file.", "def get_parsed_arguments(): # Set up command line arguments. with open(\"prolog.txt\", \"r\") as prolog,", "arguments.year1: filtered = filterYears(filtered, arguments.year1, arguments.year2) if arguments.books: filtered = filterBooks(filtered, arguments.books) if", "= filterAuthors(filtered, arguments.authors) authorSet = getAuthorSet(filtered, arguments.authors) # If authorSet is nonempty, print", "list(filter(lambda p: any(sub.lower() in p[0].lower() for sub in books), filtered)) def filterAuthors(filtered, authors)", "authors) -> list: return list(filter(lambda p: any(sub.lower() in p[2].lower() for sub in authors),", "arguments from the command line. arguments = get_parsed_arguments() filtered = csv.reader(open('books.csv', 'r')) #", "fine. return parsed_arguments def filterBooks(filtered, books) -> list: return list(filter(lambda p: any(sub.lower() in", "in filtered: authorSet.add(row[2]) return authorSet def main(): # Get arguments from the command", "Set up command line arguments. with open(\"prolog.txt\", \"r\") as prolog, open(\"epilog.txt\", \"r\") as", "year2), max(year1, year2)] \" \"within which to search for books.\") # Parse the", "return list(filter(lambda p: any(sub.lower() in p[2].lower() for sub in authors), filtered)) def filterYears(filtered,", "or authors. if arguments.year1: filtered = filterYears(filtered, arguments.year1, arguments.year2) if arguments.books: filtered =", "their books. if authorSet != set(): tab = \" \" * 4 for", "\" + row[1]) # Otherwise, print all book/author/year information in \"filtered\". else: for", "p[2].lower() for sub in authors), filtered)) def filterYears(filtered, year1, year2) -> list: return", "\" \"within which to search for books.\") # Parse the command line. parsed_arguments", "Written by <NAME> and <NAME> for cs257 Revised by <NAME> A command line", "authors. If one of the substrings contains \" \"a space, surround that substring", "command line. parsed_arguments = parser.parse_args() # Handle the years. year1 = parsed_arguments.year1 if", "return list(filter(lambda p: any(sub.lower() in p[0].lower() for sub in books), filtered)) def filterAuthors(filtered,", "surround that substring\" \" with quotes \\\"\\\".\") parser.add_argument(\"-a\", \"--authors\", nargs=\"+\", help=\"One or more", "for cs257 Revised by <NAME> A command line interface for searching the 'books.csv'", "books. if authorSet != set(): tab = \" \" * 4 for auth", "line. parsed_arguments = parser.parse_args() # Handle the years. year1 = parsed_arguments.year1 if parsed_arguments.year2", "for sub in authors), filtered)) def filterYears(filtered, year1, year2) -> list: return list(filter(lambda", "if arguments.books: filtered = filterBooks(filtered, arguments.books) if arguments.authors: filtered = filterAuthors(filtered, arguments.authors) authorSet", "arguments.authors) # If authorSet is nonempty, print authors and their books. if authorSet", "names of authors. If one of the substrings contains \" \"a space, surround", "[min(year1, year2), max(year1, year2)] \" \"within which to search for books.\") # Parse", "authors. if arguments.year1: filtered = filterYears(filtered, arguments.year1, arguments.year2) if arguments.books: filtered = filterBooks(filtered,", "in p[2].lower() for sub in authors), filtered)) def filterYears(filtered, year1, year2) -> list:", "surround that substring with quotes \\\"\\\".\") # may need to fix, see python3", "more substrings to search for in the titles of books. \" \"If one", "parser.add_argument(\"year1\", nargs = \"?\", help=\"One of the years in the time \" \"interval", "one of the substrings contains \" \"a space, surround that substring with quotes", "\\\"\\\".\") # may need to fix, see python3 books.py books.csv -b 'the' 1800", "1800 1899 for example parser.add_argument(\"year1\", nargs = \"?\", help=\"One of the years in", "authors: for row in filtered: authorSet.add(row[2]) return authorSet def main(): # Get arguments", "the years. year1 = parsed_arguments.year1 if parsed_arguments.year2 is None: parsed_arguments.year2 = year1 #", "parser.add_argument(\"-a\", \"--authors\", nargs=\"+\", help=\"One or more substrings to search for in the names", "for in the names of authors. If one of the substrings contains \"", "as prolog, open(\"epilog.txt\", \"r\") as epilog: parser = argparse.ArgumentParser(description = prolog.read(), epilog =", "in list(filtered): if row[2] == auth: print(tab + row[0] + \", \" +", "authorSet def main(): # Get arguments from the command line. arguments = get_parsed_arguments()", "<NAME> and <NAME> for cs257 Revised by <NAME> A command line interface for", "for row in list(filtered): if row[2] == auth: print(tab + row[0] + \",", "filterBooks(filtered, arguments.books) if arguments.authors: filtered = filterAuthors(filtered, arguments.authors) authorSet = getAuthorSet(filtered, arguments.authors) #", "to search for books.\") parser.add_argument(\"year2\", nargs = \"?\", help=\"One of the years in", "filterBooks(filtered, books) -> list: return list(filter(lambda p: any(sub.lower() in p[0].lower() for sub in", "= getAuthorSet(filtered, arguments.authors) # If authorSet is nonempty, print authors and their books.", "parsed_arguments.year2 is None: parsed_arguments.year2 = year1 # Note that year1 or year2 might", "books.py Written by <NAME> and <NAME> for cs257 Revised by <NAME> A command", "''' books.py Written by <NAME> and <NAME> for cs257 Revised by <NAME> A", "csv import argparse def get_parsed_arguments(): # Set up command line arguments. with open(\"prolog.txt\",", "line. arguments = get_parsed_arguments() filtered = csv.reader(open('books.csv', 'r')) # Filter by years, books,", "Otherwise, print all book/author/year information in \"filtered\". else: for row in filtered: print(row[0]", "row in filtered: print(row[0] + \", \" + row[1] + \", \" +", "\" with quotes \\\"\\\".\") parser.add_argument(\"-a\", \"--authors\", nargs=\"+\", help=\"One or more substrings to search", "command line interface for searching the 'books.csv' file. ''' import csv import argparse", "p: any(sub.lower() in p[0].lower() for sub in books), filtered)) def filterAuthors(filtered, authors) ->", "\"r\") as epilog: parser = argparse.ArgumentParser(description = prolog.read(), epilog = epilog.read()) parser.add_argument(\"-b\", \"--books\",", "arguments.year1, arguments.year2) if arguments.books: filtered = filterBooks(filtered, arguments.books) if arguments.authors: filtered = filterAuthors(filtered,", "def filterBooks(filtered, books) -> list: return list(filter(lambda p: any(sub.lower() in p[0].lower() for sub", "of books. \" \"If one of the substrings contains a space, surround that", "books.csv -b 'the' 1800 1899 for example parser.add_argument(\"year1\", nargs = \"?\", help=\"One of", "the command line. arguments = get_parsed_arguments() filtered = csv.reader(open('books.csv', 'r')) # Filter by", "'the' 1800 1899 for example parser.add_argument(\"year1\", nargs = \"?\", help=\"One of the years", "any(sub.lower() in p[0].lower() for sub in books), filtered)) def filterAuthors(filtered, authors) -> list:", "from the command line. arguments = get_parsed_arguments() filtered = csv.reader(open('books.csv', 'r')) # Filter", "\"a space, surround that substring with quotes \\\"\\\".\") # may need to fix,", "print(tab + row[0] + \", \" + row[1]) # Otherwise, print all book/author/year", "to search for books.\") # Parse the command line. parsed_arguments = parser.parse_args() #", "in the titles of books. \" \"If one of the substrings contains a", "command line arguments. with open(\"prolog.txt\", \"r\") as prolog, open(\"epilog.txt\", \"r\") as epilog: parser", "auth: print(tab + row[0] + \", \" + row[1]) # Otherwise, print all", "with quotes \\\"\\\".\") # may need to fix, see python3 books.py books.csv -b", "auth in authorSet: print(auth) for row in list(filtered): if row[2] == auth: print(tab", "print all book/author/year information in \"filtered\". else: for row in filtered: print(row[0] +", "with open(\"prolog.txt\", \"r\") as prolog, open(\"epilog.txt\", \"r\") as epilog: parser = argparse.ArgumentParser(description =", "parser.add_argument(\"-b\", \"--books\", nargs=\"+\", help=\"One or more substrings to search for in the titles", "that substring with quotes \\\"\\\".\") # may need to fix, see python3 books.py", "getAuthorSet(filtered, arguments.authors) # If authorSet is nonempty, print authors and their books. if", "epilog = epilog.read()) parser.add_argument(\"-b\", \"--books\", nargs=\"+\", help=\"One or more substrings to search for", "list: return list(filter(lambda p: any(sub.lower() in p[0].lower() for sub in books), filtered)) def", "get_parsed_arguments(): # Set up command line arguments. with open(\"prolog.txt\", \"r\") as prolog, open(\"epilog.txt\",", "list(filtered): if row[2] == auth: print(tab + row[0] + \", \" + row[1])", "for in the titles of books. \" \"If one of the substrings contains", "max(year1, year2)] \" \"within which to search for books.\") # Parse the command", "-> list: return list(filter(lambda p: any(sub.lower() in p[0].lower() for sub in books), filtered))", "\" \"within which to search for books.\") parser.add_argument(\"year2\", nargs = \"?\", help=\"One of", "command line. arguments = get_parsed_arguments() filtered = csv.reader(open('books.csv', 'r')) # Filter by years,", "by years, books, or authors. if arguments.year1: filtered = filterYears(filtered, arguments.year1, arguments.year2) if", "None, which is fine. return parsed_arguments def filterBooks(filtered, books) -> list: return list(filter(lambda", "example parser.add_argument(\"year1\", nargs = \"?\", help=\"One of the years in the time \"", "if row[2] == auth: print(tab + row[0] + \", \" + row[1]) #", "\" \"a space, surround that substring with quotes \\\"\\\".\") # may need to", "arguments.books) if arguments.authors: filtered = filterAuthors(filtered, arguments.authors) authorSet = getAuthorSet(filtered, arguments.authors) # If", "years in the time \" \"interval [min(year1, year2), max(year1, year2)] \" \"within which", "= parser.parse_args() # Handle the years. year1 = parsed_arguments.year1 if parsed_arguments.year2 is None:", "be None, which is fine. return parsed_arguments def filterBooks(filtered, books) -> list: return", "list(filter(lambda p: any(sub.lower() in p[2].lower() for sub in authors), filtered)) def filterYears(filtered, year1,", "or more substrings to search for in the titles of books. \" \"If", "\\\"\\\".\") parser.add_argument(\"-a\", \"--authors\", nargs=\"+\", help=\"One or more substrings to search for in the", "books.py books.csv -b 'the' 1800 1899 for example parser.add_argument(\"year1\", nargs = \"?\", help=\"One", "authors and their books. if authorSet != set(): tab = \" \" *", "main(): # Get arguments from the command line. arguments = get_parsed_arguments() filtered =", "# Set up command line arguments. with open(\"prolog.txt\", \"r\") as prolog, open(\"epilog.txt\", \"r\")", "search for books.\") parser.add_argument(\"year2\", nargs = \"?\", help=\"One of the years in the", "year2 >= p[1], filtered)) def getAuthorSet(filtered, authors) -> set: authorSet = set() if", "parser = argparse.ArgumentParser(description = prolog.read(), epilog = epilog.read()) parser.add_argument(\"-b\", \"--books\", nargs=\"+\", help=\"One or", "p: any(sub.lower() in p[2].lower() for sub in authors), filtered)) def filterYears(filtered, year1, year2)", "print(row[0] + \", \" + row[1] + \", \" + row[2]) if __name__", "return list(filter(lambda p: year1 <= p[1] and year2 >= p[1], filtered)) def getAuthorSet(filtered,", "filterAuthors(filtered, authors) -> list: return list(filter(lambda p: any(sub.lower() in p[2].lower() for sub in", "import csv import argparse def get_parsed_arguments(): # Set up command line arguments. with", "if arguments.authors: filtered = filterAuthors(filtered, arguments.authors) authorSet = getAuthorSet(filtered, arguments.authors) # If authorSet", "\"r\") as prolog, open(\"epilog.txt\", \"r\") as epilog: parser = argparse.ArgumentParser(description = prolog.read(), epilog", "for example parser.add_argument(\"year1\", nargs = \"?\", help=\"One of the years in the time", "epilog.read()) parser.add_argument(\"-b\", \"--books\", nargs=\"+\", help=\"One or more substrings to search for in the", "which to search for books.\") parser.add_argument(\"year2\", nargs = \"?\", help=\"One of the years", "parsed_arguments def filterBooks(filtered, books) -> list: return list(filter(lambda p: any(sub.lower() in p[0].lower() for", "= epilog.read()) parser.add_argument(\"-b\", \"--books\", nargs=\"+\", help=\"One or more substrings to search for in", "the substrings contains a space, surround that substring\" \" with quotes \\\"\\\".\") parser.add_argument(\"-a\",", "interface for searching the 'books.csv' file. ''' import csv import argparse def get_parsed_arguments():", "if parsed_arguments.year2 is None: parsed_arguments.year2 = year1 # Note that year1 or year2", "'books.csv' file. ''' import csv import argparse def get_parsed_arguments(): # Set up command", "<= p[1] and year2 >= p[1], filtered)) def getAuthorSet(filtered, authors) -> set: authorSet", "and their books. if authorSet != set(): tab = \" \" * 4", "titles of books. \" \"If one of the substrings contains a space, surround", "\"?\", help=\"One of the years in the time \" \"interval [min(year1, year2), max(year1,", "year1, year2) -> list: return list(filter(lambda p: year1 <= p[1] and year2 >=", "row[2] == auth: print(tab + row[0] + \", \" + row[1]) # Otherwise,", "# Filter by years, books, or authors. if arguments.year1: filtered = filterYears(filtered, arguments.year1,", "year1 = parsed_arguments.year1 if parsed_arguments.year2 is None: parsed_arguments.year2 = year1 # Note that", "all book/author/year information in \"filtered\". else: for row in filtered: print(row[0] + \",", "see python3 books.py books.csv -b 'the' 1800 1899 for example parser.add_argument(\"year1\", nargs =", "# Get arguments from the command line. arguments = get_parsed_arguments() filtered = csv.reader(open('books.csv',", "= parsed_arguments.year1 if parsed_arguments.year2 is None: parsed_arguments.year2 = year1 # Note that year1", "substrings to search for in the titles of books. \" \"If one of", "Parse the command line. parsed_arguments = parser.parse_args() # Handle the years. year1 =", "arguments.authors: filtered = filterAuthors(filtered, arguments.authors) authorSet = getAuthorSet(filtered, arguments.authors) # If authorSet is", "return parsed_arguments def filterBooks(filtered, books) -> list: return list(filter(lambda p: any(sub.lower() in p[0].lower()", "Revised by <NAME> A command line interface for searching the 'books.csv' file. '''", "need to fix, see python3 books.py books.csv -b 'the' 1800 1899 for example", "which to search for books.\") # Parse the command line. parsed_arguments = parser.parse_args()", "authorSet != set(): tab = \" \" * 4 for auth in authorSet:", "year2)] \" \"within which to search for books.\") # Parse the command line.", "+ \", \" + row[1] + \", \" + row[2]) if __name__ ==", "that substring\" \" with quotes \\\"\\\".\") parser.add_argument(\"-a\", \"--authors\", nargs=\"+\", help=\"One or more substrings", "\"interval [min(year1, year2), max(year1, year2)] \" \"within which to search for books.\") #", "python3 books.py books.csv -b 'the' 1800 1899 for example parser.add_argument(\"year1\", nargs = \"?\",", "= get_parsed_arguments() filtered = csv.reader(open('books.csv', 'r')) # Filter by years, books, or authors.", "set() if authors: for row in filtered: authorSet.add(row[2]) return authorSet def main(): #", "nargs=\"+\", help=\"One or more substrings to search for in the names of authors.", "substrings contains a space, surround that substring\" \" with quotes \\\"\\\".\") parser.add_argument(\"-a\", \"--authors\",", "# may need to fix, see python3 books.py books.csv -b 'the' 1800 1899", "# Handle the years. year1 = parsed_arguments.year1 if parsed_arguments.year2 is None: parsed_arguments.year2 =", "open(\"prolog.txt\", \"r\") as prolog, open(\"epilog.txt\", \"r\") as epilog: parser = argparse.ArgumentParser(description = prolog.read(),", "Filter by years, books, or authors. if arguments.year1: filtered = filterYears(filtered, arguments.year1, arguments.year2)", "authorSet: print(auth) for row in list(filtered): if row[2] == auth: print(tab + row[0]", "epilog: parser = argparse.ArgumentParser(description = prolog.read(), epilog = epilog.read()) parser.add_argument(\"-b\", \"--books\", nargs=\"+\", help=\"One", "+ row[0] + \", \" + row[1]) # Otherwise, print all book/author/year information", "[min(year1, year2), max(year1, year2)] \" \"within which to search for books.\") parser.add_argument(\"year2\", nargs", "parsed_arguments.year1 if parsed_arguments.year2 is None: parsed_arguments.year2 = year1 # Note that year1 or", "more substrings to search for in the names of authors. If one of", "\" \"interval [min(year1, year2), max(year1, year2)] \" \"within which to search for books.\")", "books, or authors. if arguments.year1: filtered = filterYears(filtered, arguments.year1, arguments.year2) if arguments.books: filtered", "contains \" \"a space, surround that substring with quotes \\\"\\\".\") # may need", "file. ''' import csv import argparse def get_parsed_arguments(): # Set up command line", "may need to fix, see python3 books.py books.csv -b 'the' 1800 1899 for", "help=\"One or more substrings to search for in the titles of books. \"", "of the substrings contains \" \"a space, surround that substring with quotes \\\"\\\".\")", "\"within which to search for books.\") # Parse the command line. parsed_arguments =", "filterAuthors(filtered, arguments.authors) authorSet = getAuthorSet(filtered, arguments.authors) # If authorSet is nonempty, print authors", "if authors: for row in filtered: authorSet.add(row[2]) return authorSet def main(): # Get", "+ row[1]) # Otherwise, print all book/author/year information in \"filtered\". else: for row", "filterYears(filtered, year1, year2) -> list: return list(filter(lambda p: year1 <= p[1] and year2", "= \"?\", help=\"One of the years in the time \" \"interval [min(year1, year2),", "def getAuthorSet(filtered, authors) -> set: authorSet = set() if authors: for row in", "up command line arguments. with open(\"prolog.txt\", \"r\") as prolog, open(\"epilog.txt\", \"r\") as epilog:", "in books), filtered)) def filterAuthors(filtered, authors) -> list: return list(filter(lambda p: any(sub.lower() in", "# Otherwise, print all book/author/year information in \"filtered\". else: for row in filtered:", "in \"filtered\". else: for row in filtered: print(row[0] + \", \" + row[1]", "return authorSet def main(): # Get arguments from the command line. arguments =", "If authorSet is nonempty, print authors and their books. if authorSet != set():", "and <NAME> for cs257 Revised by <NAME> A command line interface for searching", "search for in the names of authors. If one of the substrings contains", "Note that year1 or year2 might still be None, which is fine. return", "''' import csv import argparse def get_parsed_arguments(): # Set up command line arguments.", "-b 'the' 1800 1899 for example parser.add_argument(\"year1\", nargs = \"?\", help=\"One of the", "import argparse def get_parsed_arguments(): # Set up command line arguments. with open(\"prolog.txt\", \"r\")", "for auth in authorSet: print(auth) for row in list(filtered): if row[2] == auth:", "as epilog: parser = argparse.ArgumentParser(description = prolog.read(), epilog = epilog.read()) parser.add_argument(\"-b\", \"--books\", nargs=\"+\",", "argparse.ArgumentParser(description = prolog.read(), epilog = epilog.read()) parser.add_argument(\"-b\", \"--books\", nargs=\"+\", help=\"One or more substrings", "a space, surround that substring\" \" with quotes \\\"\\\".\") parser.add_argument(\"-a\", \"--authors\", nargs=\"+\", help=\"One", "the 'books.csv' file. ''' import csv import argparse def get_parsed_arguments(): # Set up", "row in filtered: authorSet.add(row[2]) return authorSet def main(): # Get arguments from the", "parsed_arguments.year2 = year1 # Note that year1 or year2 might still be None,", "= \" \" * 4 for auth in authorSet: print(auth) for row in", "which is fine. return parsed_arguments def filterBooks(filtered, books) -> list: return list(filter(lambda p:", "parser.parse_args() # Handle the years. year1 = parsed_arguments.year1 if parsed_arguments.year2 is None: parsed_arguments.year2", "the time \" \"interval [min(year1, year2), max(year1, year2)] \" \"within which to search", "arguments.year2) if arguments.books: filtered = filterBooks(filtered, arguments.books) if arguments.authors: filtered = filterAuthors(filtered, arguments.authors)", "list: return list(filter(lambda p: any(sub.lower() in p[2].lower() for sub in authors), filtered)) def", "authors) -> set: authorSet = set() if authors: for row in filtered: authorSet.add(row[2])", "def main(): # Get arguments from the command line. arguments = get_parsed_arguments() filtered", "for searching the 'books.csv' file. ''' import csv import argparse def get_parsed_arguments(): #", "filtered: print(row[0] + \", \" + row[1] + \", \" + row[2]) if", "the substrings contains \" \"a space, surround that substring with quotes \\\"\\\".\") #", "p: year1 <= p[1] and year2 >= p[1], filtered)) def getAuthorSet(filtered, authors) ->", "= prolog.read(), epilog = epilog.read()) parser.add_argument(\"-b\", \"--books\", nargs=\"+\", help=\"One or more substrings to", "print(auth) for row in list(filtered): if row[2] == auth: print(tab + row[0] +", "authorSet = set() if authors: for row in filtered: authorSet.add(row[2]) return authorSet def", "def filterAuthors(filtered, authors) -> list: return list(filter(lambda p: any(sub.lower() in p[2].lower() for sub", "year2)] \" \"within which to search for books.\") parser.add_argument(\"year2\", nargs = \"?\", help=\"One", "books. \" \"If one of the substrings contains a space, surround that substring\"", "# If authorSet is nonempty, print authors and their books. if authorSet !=", "is fine. return parsed_arguments def filterBooks(filtered, books) -> list: return list(filter(lambda p: any(sub.lower()", "csv.reader(open('books.csv', 'r')) # Filter by years, books, or authors. if arguments.year1: filtered =", "def filterYears(filtered, year1, year2) -> list: return list(filter(lambda p: year1 <= p[1] and", "Handle the years. year1 = parsed_arguments.year1 if parsed_arguments.year2 is None: parsed_arguments.year2 = year1", "to search for in the names of authors. If one of the substrings", "of authors. If one of the substrings contains \" \"a space, surround that", "the command line. parsed_arguments = parser.parse_args() # Handle the years. year1 = parsed_arguments.year1", "authors), filtered)) def filterYears(filtered, year1, year2) -> list: return list(filter(lambda p: year1 <=", "# Note that year1 or year2 might still be None, which is fine.", "open(\"epilog.txt\", \"r\") as epilog: parser = argparse.ArgumentParser(description = prolog.read(), epilog = epilog.read()) parser.add_argument(\"-b\",", "space, surround that substring\" \" with quotes \\\"\\\".\") parser.add_argument(\"-a\", \"--authors\", nargs=\"+\", help=\"One or", "print authors and their books. if authorSet != set(): tab = \" \"", "authorSet.add(row[2]) return authorSet def main(): # Get arguments from the command line. arguments", "year2) -> list: return list(filter(lambda p: year1 <= p[1] and year2 >= p[1],", "substring\" \" with quotes \\\"\\\".\") parser.add_argument(\"-a\", \"--authors\", nargs=\"+\", help=\"One or more substrings to", "filtered)) def filterYears(filtered, year1, year2) -> list: return list(filter(lambda p: year1 <= p[1]", "arguments.authors) authorSet = getAuthorSet(filtered, arguments.authors) # If authorSet is nonempty, print authors and", "\" \" * 4 for auth in authorSet: print(auth) for row in list(filtered):", "== auth: print(tab + row[0] + \", \" + row[1]) # Otherwise, print", "list(filter(lambda p: year1 <= p[1] and year2 >= p[1], filtered)) def getAuthorSet(filtered, authors)", "for books.\") # Parse the command line. parsed_arguments = parser.parse_args() # Handle the", "might still be None, which is fine. return parsed_arguments def filterBooks(filtered, books) ->", "in authors), filtered)) def filterYears(filtered, year1, year2) -> list: return list(filter(lambda p: year1", "\"filtered\". else: for row in filtered: print(row[0] + \", \" + row[1] +", "'r')) # Filter by years, books, or authors. if arguments.year1: filtered = filterYears(filtered,", "authorSet is nonempty, print authors and their books. if authorSet != set(): tab", "-> set: authorSet = set() if authors: for row in filtered: authorSet.add(row[2]) return", "= set() if authors: for row in filtered: authorSet.add(row[2]) return authorSet def main():", "years, books, or authors. if arguments.year1: filtered = filterYears(filtered, arguments.year1, arguments.year2) if arguments.books:", "for books.\") parser.add_argument(\"year2\", nargs = \"?\", help=\"One of the years in the time", "for sub in books), filtered)) def filterAuthors(filtered, authors) -> list: return list(filter(lambda p:", "sub in books), filtered)) def filterAuthors(filtered, authors) -> list: return list(filter(lambda p: any(sub.lower()", "in p[0].lower() for sub in books), filtered)) def filterAuthors(filtered, authors) -> list: return", "one of the substrings contains a space, surround that substring\" \" with quotes", "substring with quotes \\\"\\\".\") # may need to fix, see python3 books.py books.csv", "\" \"If one of the substrings contains a space, surround that substring\" \"", "year1 # Note that year1 or year2 might still be None, which is", "4 for auth in authorSet: print(auth) for row in list(filtered): if row[2] ==", "time \" \"interval [min(year1, year2), max(year1, year2)] \" \"within which to search for", "that year1 or year2 might still be None, which is fine. return parsed_arguments", "row in list(filtered): if row[2] == auth: print(tab + row[0] + \", \"", "Get arguments from the command line. arguments = get_parsed_arguments() filtered = csv.reader(open('books.csv', 'r'))", "= csv.reader(open('books.csv', 'r')) # Filter by years, books, or authors. if arguments.year1: filtered", "the years in the time \" \"interval [min(year1, year2), max(year1, year2)] \" \"within", "\" + row[1] + \", \" + row[2]) if __name__ == \"__main__\": main()", "for row in filtered: authorSet.add(row[2]) return authorSet def main(): # Get arguments from", "filtered: authorSet.add(row[2]) return authorSet def main(): # Get arguments from the command line.", "any(sub.lower() in p[2].lower() for sub in authors), filtered)) def filterYears(filtered, year1, year2) ->", "filtered = filterYears(filtered, arguments.year1, arguments.year2) if arguments.books: filtered = filterBooks(filtered, arguments.books) if arguments.authors:", "# Parse the command line. parsed_arguments = parser.parse_args() # Handle the years. year1", "\"--books\", nargs=\"+\", help=\"One or more substrings to search for in the titles of", "if authorSet != set(): tab = \" \" * 4 for auth in", "p[1] and year2 >= p[1], filtered)) def getAuthorSet(filtered, authors) -> set: authorSet =", "parsed_arguments = parser.parse_args() # Handle the years. year1 = parsed_arguments.year1 if parsed_arguments.year2 is", "row[1]) # Otherwise, print all book/author/year information in \"filtered\". else: for row in", "books) -> list: return list(filter(lambda p: any(sub.lower() in p[0].lower() for sub in books),", "space, surround that substring with quotes \\\"\\\".\") # may need to fix, see", "year1 <= p[1] and year2 >= p[1], filtered)) def getAuthorSet(filtered, authors) -> set:", "= filterYears(filtered, arguments.year1, arguments.year2) if arguments.books: filtered = filterBooks(filtered, arguments.books) if arguments.authors: filtered", "still be None, which is fine. return parsed_arguments def filterBooks(filtered, books) -> list:", "nonempty, print authors and their books. if authorSet != set(): tab = \"", "set: authorSet = set() if authors: for row in filtered: authorSet.add(row[2]) return authorSet", "line interface for searching the 'books.csv' file. ''' import csv import argparse def", "years. year1 = parsed_arguments.year1 if parsed_arguments.year2 is None: parsed_arguments.year2 = year1 # Note", "\", \" + row[1] + \", \" + row[2]) if __name__ == \"__main__\":", "If one of the substrings contains \" \"a space, surround that substring with", "argparse def get_parsed_arguments(): # Set up command line arguments. with open(\"prolog.txt\", \"r\") as", "nargs = \"?\", help=\"One of the years in the time \" \"interval [min(year1,", "by <NAME> A command line interface for searching the 'books.csv' file. ''' import", "parser.add_argument(\"year2\", nargs = \"?\", help=\"One of the years in the time \" \"interval", "to search for in the titles of books. \" \"If one of the", "filtered)) def getAuthorSet(filtered, authors) -> set: authorSet = set() if authors: for row", "substrings contains \" \"a space, surround that substring with quotes \\\"\\\".\") # may", "with quotes \\\"\\\".\") parser.add_argument(\"-a\", \"--authors\", nargs=\"+\", help=\"One or more substrings to search for", "\"If one of the substrings contains a space, surround that substring\" \" with", "p[1], filtered)) def getAuthorSet(filtered, authors) -> set: authorSet = set() if authors: for", "<NAME> for cs257 Revised by <NAME> A command line interface for searching the", "by <NAME> and <NAME> for cs257 Revised by <NAME> A command line interface", "for row in filtered: print(row[0] + \", \" + row[1] + \", \"", "= argparse.ArgumentParser(description = prolog.read(), epilog = epilog.read()) parser.add_argument(\"-b\", \"--books\", nargs=\"+\", help=\"One or more", "\"interval [min(year1, year2), max(year1, year2)] \" \"within which to search for books.\") parser.add_argument(\"year2\",", "\" * 4 for auth in authorSet: print(auth) for row in list(filtered): if", "to fix, see python3 books.py books.csv -b 'the' 1800 1899 for example parser.add_argument(\"year1\",", "in the names of authors. If one of the substrings contains \" \"a", "and year2 >= p[1], filtered)) def getAuthorSet(filtered, authors) -> set: authorSet = set()", "help=\"One of the years in the time \" \"interval [min(year1, year2), max(year1, year2)]", "prolog.read(), epilog = epilog.read()) parser.add_argument(\"-b\", \"--books\", nargs=\"+\", help=\"One or more substrings to search", "1899 for example parser.add_argument(\"year1\", nargs = \"?\", help=\"One of the years in the", "year2), max(year1, year2)] \" \"within which to search for books.\") parser.add_argument(\"year2\", nargs =", "book/author/year information in \"filtered\". else: for row in filtered: print(row[0] + \", \"", "of the years in the time \" \"interval [min(year1, year2), max(year1, year2)] \"", "filtered = filterBooks(filtered, arguments.books) if arguments.authors: filtered = filterAuthors(filtered, arguments.authors) authorSet = getAuthorSet(filtered,", "books), filtered)) def filterAuthors(filtered, authors) -> list: return list(filter(lambda p: any(sub.lower() in p[2].lower()", "max(year1, year2)] \" \"within which to search for books.\") parser.add_argument(\"year2\", nargs = \"?\",", "arguments = get_parsed_arguments() filtered = csv.reader(open('books.csv', 'r')) # Filter by years, books, or", "if arguments.year1: filtered = filterYears(filtered, arguments.year1, arguments.year2) if arguments.books: filtered = filterBooks(filtered, arguments.books)", "!= set(): tab = \" \" * 4 for auth in authorSet: print(auth)", "\"within which to search for books.\") parser.add_argument(\"year2\", nargs = \"?\", help=\"One of the", "help=\"One or more substrings to search for in the names of authors. If", "books.\") parser.add_argument(\"year2\", nargs = \"?\", help=\"One of the years in the time \"", "contains a space, surround that substring\" \" with quotes \\\"\\\".\") parser.add_argument(\"-a\", \"--authors\", nargs=\"+\",", "the titles of books. \" \"If one of the substrings contains a space,", "filtered)) def filterAuthors(filtered, authors) -> list: return list(filter(lambda p: any(sub.lower() in p[2].lower() for", "fix, see python3 books.py books.csv -b 'the' 1800 1899 for example parser.add_argument(\"year1\", nargs", "authorSet = getAuthorSet(filtered, arguments.authors) # If authorSet is nonempty, print authors and their", "information in \"filtered\". else: for row in filtered: print(row[0] + \", \" +", "set(): tab = \" \" * 4 for auth in authorSet: print(auth) for", "quotes \\\"\\\".\") parser.add_argument(\"-a\", \"--authors\", nargs=\"+\", help=\"One or more substrings to search for in", "p[0].lower() for sub in books), filtered)) def filterAuthors(filtered, authors) -> list: return list(filter(lambda", ">= p[1], filtered)) def getAuthorSet(filtered, authors) -> set: authorSet = set() if authors:", "get_parsed_arguments() filtered = csv.reader(open('books.csv', 'r')) # Filter by years, books, or authors. if", "None: parsed_arguments.year2 = year1 # Note that year1 or year2 might still be", "-> list: return list(filter(lambda p: year1 <= p[1] and year2 >= p[1], filtered))", "= year1 # Note that year1 or year2 might still be None, which", "search for books.\") # Parse the command line. parsed_arguments = parser.parse_args() # Handle", "or year2 might still be None, which is fine. return parsed_arguments def filterBooks(filtered,", "year2 might still be None, which is fine. return parsed_arguments def filterBooks(filtered, books)", "row[0] + \", \" + row[1]) # Otherwise, print all book/author/year information in", "quotes \\\"\\\".\") # may need to fix, see python3 books.py books.csv -b 'the'", "in filtered: print(row[0] + \", \" + row[1] + \", \" + row[2])", "filtered = csv.reader(open('books.csv', 'r')) # Filter by years, books, or authors. if arguments.year1:", "the names of authors. If one of the substrings contains \" \"a space,", "tab = \" \" * 4 for auth in authorSet: print(auth) for row", "list: return list(filter(lambda p: year1 <= p[1] and year2 >= p[1], filtered)) def", "is nonempty, print authors and their books. if authorSet != set(): tab =", "* 4 for auth in authorSet: print(auth) for row in list(filtered): if row[2]", "filtered = filterAuthors(filtered, arguments.authors) authorSet = getAuthorSet(filtered, arguments.authors) # If authorSet is nonempty," ]
[ "contract.value def is_equal(self, contract): c = self[contract] return c and c.value == contract.value", "key.name: return c return None def is_lower(self, contract): c = self[contract] return c", "key): for c in self._contracts: if c.name == key.name: return c return None", "other.name and (set(self.values) & set(other.values)) def __ne__(self, other): return not (self == other)", "not (self == other) class PackageContracts: def __init__(self, contracts): self._contracts = contracts def", "contract): c = self[contract] return c and c.value < contract.value def is_equal(self, contract):", "return hash((self.name, self.values)) def __str__(self): return \"{}{}\".format(self.name, self.values) def __repr__(self): return str(self) def", "def __init__(self, contracts): self._contracts = contracts def __getitem__(self, key): for c in self._contracts:", "self.values) def __repr__(self): return str(self) def __eq__(self, other): return self.name == other.name and", "c in self._contracts: if c.name == key.name: return c return None def is_lower(self,", "contracts): self._contracts = contracts def __getitem__(self, key): for c in self._contracts: if c.name", "\"{}{}\".format(self.name, self.values) def __repr__(self): return str(self) def __eq__(self, other): return self.name == other.name", "return self.name == other.name and (set(self.values) & set(other.values)) def __ne__(self, other): return not", "other): return not (self == other) class PackageContracts: def __init__(self, contracts): self._contracts =", "other) class PackageContracts: def __init__(self, contracts): self._contracts = contracts def __getitem__(self, key): for", "__ne__(self, other): return not (self == other) class PackageContracts: def __init__(self, contracts): self._contracts", "return not (self == other) class PackageContracts: def __init__(self, contracts): self._contracts = contracts", "and c.value < contract.value def is_equal(self, contract): c = self[contract] return c and", "__init__(self, contracts): self._contracts = contracts def __getitem__(self, key): for c in self._contracts: if", "values def __hash__(self): return hash((self.name, self.values)) def __str__(self): return \"{}{}\".format(self.name, self.values) def __repr__(self):", "return c and c.value < contract.value def is_equal(self, contract): c = self[contract] return", "< contract.value def is_equal(self, contract): c = self[contract] return c and c.value ==", "def __init__(self, name, values): self.name = name self.values = values def __hash__(self): return", "def __ne__(self, other): return not (self == other) class PackageContracts: def __init__(self, contracts):", "for c in self._contracts: if c.name == key.name: return c return None def", "in self._contracts: if c.name == key.name: return c return None def is_lower(self, contract):", "name self.values = values def __hash__(self): return hash((self.name, self.values)) def __str__(self): return \"{}{}\".format(self.name,", "def __eq__(self, other): return self.name == other.name and (set(self.values) & set(other.values)) def __ne__(self,", "self.name == other.name and (set(self.values) & set(other.values)) def __ne__(self, other): return not (self", "<reponame>devopshq/crosspm2 class Contract: def __init__(self, name, values): self.name = name self.values = values", "if c.name == key.name: return c return None def is_lower(self, contract): c =", "c and c.value < contract.value def is_equal(self, contract): c = self[contract] return c", "class PackageContracts: def __init__(self, contracts): self._contracts = contracts def __getitem__(self, key): for c", "set(other.values)) def __ne__(self, other): return not (self == other) class PackageContracts: def __init__(self,", "str(self) def __eq__(self, other): return self.name == other.name and (set(self.values) & set(other.values)) def", "PackageContracts: def __init__(self, contracts): self._contracts = contracts def __getitem__(self, key): for c in", "name, values): self.name = name self.values = values def __hash__(self): return hash((self.name, self.values))", "= contracts def __getitem__(self, key): for c in self._contracts: if c.name == key.name:", "contracts def __getitem__(self, key): for c in self._contracts: if c.name == key.name: return", "def __hash__(self): return hash((self.name, self.values)) def __str__(self): return \"{}{}\".format(self.name, self.values) def __repr__(self): return", "self._contracts: if c.name == key.name: return c return None def is_lower(self, contract): c", "and (set(self.values) & set(other.values)) def __ne__(self, other): return not (self == other) class", "__repr__(self): return str(self) def __eq__(self, other): return self.name == other.name and (set(self.values) &", "== other.name and (set(self.values) & set(other.values)) def __ne__(self, other): return not (self ==", "self._contracts = contracts def __getitem__(self, key): for c in self._contracts: if c.name ==", "__init__(self, name, values): self.name = name self.values = values def __hash__(self): return hash((self.name,", "= values def __hash__(self): return hash((self.name, self.values)) def __str__(self): return \"{}{}\".format(self.name, self.values) def", "== key.name: return c return None def is_lower(self, contract): c = self[contract] return", "c.value < contract.value def is_equal(self, contract): c = self[contract] return c and c.value", "self.values = values def __hash__(self): return hash((self.name, self.values)) def __str__(self): return \"{}{}\".format(self.name, self.values)", "self[contract] return c and c.value < contract.value def is_equal(self, contract): c = self[contract]", "__hash__(self): return hash((self.name, self.values)) def __str__(self): return \"{}{}\".format(self.name, self.values) def __repr__(self): return str(self)", "c return None def is_lower(self, contract): c = self[contract] return c and c.value", "return None def is_lower(self, contract): c = self[contract] return c and c.value <", "values): self.name = name self.values = values def __hash__(self): return hash((self.name, self.values)) def", "Contract: def __init__(self, name, values): self.name = name self.values = values def __hash__(self):", "hash((self.name, self.values)) def __str__(self): return \"{}{}\".format(self.name, self.values) def __repr__(self): return str(self) def __eq__(self,", "def __getitem__(self, key): for c in self._contracts: if c.name == key.name: return c", "other): return self.name == other.name and (set(self.values) & set(other.values)) def __ne__(self, other): return", "__getitem__(self, key): for c in self._contracts: if c.name == key.name: return c return", "def is_lower(self, contract): c = self[contract] return c and c.value < contract.value def", "self.name = name self.values = values def __hash__(self): return hash((self.name, self.values)) def __str__(self):", "== other) class PackageContracts: def __init__(self, contracts): self._contracts = contracts def __getitem__(self, key):", "def __repr__(self): return str(self) def __eq__(self, other): return self.name == other.name and (set(self.values)", "is_lower(self, contract): c = self[contract] return c and c.value < contract.value def is_equal(self,", "__str__(self): return \"{}{}\".format(self.name, self.values) def __repr__(self): return str(self) def __eq__(self, other): return self.name", "= self[contract] return c and c.value < contract.value def is_equal(self, contract): c =", "return c return None def is_lower(self, contract): c = self[contract] return c and", "None def is_lower(self, contract): c = self[contract] return c and c.value < contract.value", "= name self.values = values def __hash__(self): return hash((self.name, self.values)) def __str__(self): return", "c = self[contract] return c and c.value < contract.value def is_equal(self, contract): c", "__eq__(self, other): return self.name == other.name and (set(self.values) & set(other.values)) def __ne__(self, other):", "return \"{}{}\".format(self.name, self.values) def __repr__(self): return str(self) def __eq__(self, other): return self.name ==", "c.name == key.name: return c return None def is_lower(self, contract): c = self[contract]", "def __str__(self): return \"{}{}\".format(self.name, self.values) def __repr__(self): return str(self) def __eq__(self, other): return", "(self == other) class PackageContracts: def __init__(self, contracts): self._contracts = contracts def __getitem__(self,", "self.values)) def __str__(self): return \"{}{}\".format(self.name, self.values) def __repr__(self): return str(self) def __eq__(self, other):", "(set(self.values) & set(other.values)) def __ne__(self, other): return not (self == other) class PackageContracts:", "& set(other.values)) def __ne__(self, other): return not (self == other) class PackageContracts: def", "class Contract: def __init__(self, name, values): self.name = name self.values = values def", "return str(self) def __eq__(self, other): return self.name == other.name and (set(self.values) & set(other.values))" ]
[ "check_bound=None): lon2, lat2 = np.deg2rad(x), np.deg2rad(y) lon1, lat1 = np.deg2rad(self.lon0), np.deg2rad(self.lat0) dlon =", "j1, j2 = (-1, -1) if y_points.shape[0] == 0 else y_points[0] return i1,", "ny(self): return self.nr @property def X(self): return self._X @property def Y(self): return self._Y", "import six import numpy as np from pyproj import Proj import operator from", "2 and ndim_y == 2: self._ny, self._nx = np.shape(x) else: self._raise_invalid_shape(x, y) self._raw_x,", "= xx, yy self._CX, self._CY = cxx, cyy if self._raw_x is not None", "i = np.round(i) j = np.round(j) if np.isscalar(i): i = int(i) j =", "def i2x(self, i, j): px = i * self.dx + self.x_orig py =", "if self._raw_y is not None else self._Y @property def cx(self): return self._raw_cx if", "@property def nx(self): return self.ntheta @property def ny(self): return self.nr @property def X(self):", "if ndim_x == 1 and ndim_y == 1: self._nx, self._ny = len(x), len(y)", "cjj, cii = np.mgrid[-0.5:self.ny, -0.5:self.nx] xx, yy = self.i2x(ii, jj) cxx, cyy =", "def dx(self): return self._dx @dx.setter def dx(self, value): self._dx = value self._reset_raw_xy() self._updateXY()", "self._updateXY() def _updateXY(self): r = np.linspace(self.rmin, self.rmax, self.nr) theta = np.arange(self.ntheta) * self.dtheta", "self.rmin) / (self.nr - 1) self._updateXY() def _updateXY(self): r = np.linspace(self.rmin, self.rmax, self.nr)", ">= start) & (j <= self.ny - subtracted), j, np.nan) return i, j", "def y_orig(self, value): self._y_orig = value self._reset_raw_xy() self._updateXY() @property def bbox(self): return self._bbox", "dy=None, x_orig=0.0, y_orig=0.0, **kwargs): self.proj = proj self._reset_raw_xy() if x is not None", "ndim_x, ndim_y = np.ndim(x), np.ndim(y) if ndim_x == 1 and ndim_y == 1:", "ndim_x == 1 and np.ndim(self._raw_y) == 1: self.set_xy(x, self._raw_y) elif ndim_x == 2", "y1 is None: y1 = bbox[1] if x2 is None: x2 = bbox[2]", "if self._raw_cx is not None else self._CX @property def cy(self): return self._raw_cy if", "def dump(self): return { \"X\": self.X, \"Y\": self.Y, \"nx\": self.nx, \"ny\": self.ny, }", "alone when no raw x presents.\") ndim_y = np.ndim(y) if ndim_y == 1", "rmax, nr, ntheta, theta0=0.0, r_earth=6371): self.lon0 = lon0 self.lat0 = lat0 self.rmin =", "np.sin(r_) cos_r = np.cos(r_) lat0_ = np.deg2rad(self.lat0) lon0_ = np.deg2rad(self.lon0) sin_lat0 = np.sin(lat0_)", "@property def bbox(self): return (np.min(self.X), np.min(self.Y), np.max(self.X), np.max(self.Y)) def get_bounding_ij(self, x1, y1, x2,", "if x1 is None: x1 = bbox[0] if y1 is None: y1 =", "None and self._raw_y.ndim == 1: self._raw_cy = self._CY[:, 0] else: self._raw_cy = None", "self._ny = value self._reset_raw_xy() self._updateXY() @property def dx(self): return self._dx @dx.setter def dx(self,", "= np.arctan2(a, b) c = np.sin(dlat / 2) ** 2 + cos_lat1 *", "= np.mgrid[0:self.ny, 0:self.nx] cjj, cii = np.mgrid[-0.5:self.ny, -0.5:self.nx] xx, yy = self.i2x(ii, jj)", "self.x_orig) / self.dx j = (py - self.y_orig) / self.dy if int_index: if", "THETA, R = np.meshgrid(theta, r) LON, LAT = self.r_theta_to_lon_lat(R, THETA) self._X = LON", "= self.rmin + j * self.dr lon, lat = self.r_theta_to_lon_lat(r, theta) return lon,", "= lat2 - lat1 sin_dlon = np.sin(dlon) cos_dlon = np.cos(dlon) sin_lat1 = np.sin(lat1)", "= 0 if int_index in ('lowerleft', 'll'): subtracted = 2 if np.isscalar(i): if", "== 2 and np.shape(y) == np.shape(self.X): self.set_xy(self.X, y) else: self._raise_invalid_shape(self.X, y) @property def", "= int(j) else: i = i.astype('i') j = j.astype('i') if check_bound: return self.check_bound(i,", "r_ = r / self.r_earth sin_r = np.sin(r_) cos_r = np.cos(r_) lat0_ =", "self._raw_y.ndim == 1: self._raw_cy = self._CY[:, 0] else: self._raw_cy = None self._bbox =", "r_theta_to_lon_lat(self, r, theta): r_ = r / self.r_earth sin_r = np.sin(r_) cos_r =", "def nx(self): return self.ntheta @property def ny(self): return self.nr @property def X(self): return", "* 2 / self.ntheta self.dr = (self.rmax - self.rmin) / (self.nr - 1)", "return i, j def calibrate(self, x, y, x1=None, y1=None): ndim_x, ndim_y = np.ndim(x),", "self.proj(x0, y0) self._x_orig = px0 self._y_orig = py0 if x1 is not None", "def X(self, x): if self._raw_y is None: raise ValueError(\"Cannot set x alone when", "= lon2 - lon1 dlat = lat2 - lat1 sin_dlon = np.sin(dlon) cos_dlon", "y) @property def CX(self): return self._CX @property def CY(self): return self._CY @property def", "def __call__(self, x, y, **kwargs): return x, y class GridderBase(object): \"\"\"Gridder is a", "- self.theta0) / self.dtheta % self.ntheta j = (r - self.rmin) / self.dr", "is None: raise ValueError(\"Cannot set y alone when no raw x presents.\") ndim_y", "bbox(self): return self._bbox @property def cbox(self): \"\"\"corner box\"\"\" return self._cbox def _init_with_para(self, nx,", "- sin_lat0 * sin_lat) lon = np.rad2deg(lon_) lat = np.rad2deg(lat_) return lon, lat", "x2 = bbox[2] if y2 is None: y2 = bbox[3] bad = ~((self.X", "x2i(self, x, y, int_index=True, check_bound=None): lon2, lat2 = np.deg2rad(x), np.deg2rad(y) lon1, lat1 =", "not None and y1 is not None: px1, py1 = self.proj(x1, y1) self._dx", "= np.sin(dlat / 2) ** 2 + cos_lat1 * cos_lat2 * np.sin(dlon /", "dx(self, value): self._dx = value self._reset_raw_xy() self._updateXY() @property def dy(self): return self._dy @dy.setter", "- px0 self._dy = py1 - py0 self._updateXY() def dump(self): return { \"proj\":", "(i <= self.nx - subtracted), i, np.nan) j = np.where((j >= start) &", "= np.ndim(y) if ndim_y == 1 and np.ndim(self._raw_x) == 1: self.set_xy(self._raw_x, y) elif", "1: self._raw_cx = self._CX[0] else: self._raw_cx = None if self._raw_y is not None", "= (r - self.rmin) / self.dr if int_index: i = np.round(i) j =", "self._Y def r_theta_to_lon_lat(self, r, theta): r_ = r / self.r_earth sin_r = np.sin(r_)", "sin_lat0 = np.sin(lat0_) cos_lat0 = np.cos(lat0_) sin_lat = sin_lat0 * cos_r + cos_lat0", "self._raw_x, self._raw_y = None, None def _updateXY(self): jj, ii = np.mgrid[0:self.ny, 0:self.nx] cjj,", "j1, i2, j2 def check_bound(self, i, j, int_index=True): start = -0.5 subtracted =", "return i, j class XYProjGridder(XYGridderBase): def __init__(self, proj=None, x=None, y=None, nx=None, ny=None, dx=None,", "@property def bbox(self): return self._bbox @property def cbox(self): \"\"\"corner box\"\"\" return self._cbox def", "self._CX, self._CY = cxx, cyy if self._raw_x is not None and self._raw_x.ndim ==", "self._raise_invalid_shape(x, self.Y) @property def Y(self): return self._Y @Y.setter def Y(self, y): if self._raw_x", "def i2x(self, i, j): theta = self.theta0 + i * self.dtheta r =", "actual conversion. \"\"\" @property def srs(self): return '' def __call__(self, x, y, **kwargs):", "lon, lat = self.r_theta_to_lon_lat(r, theta) return lon, lat def x2i(self, x, y, int_index=True,", "-0.5:self.nx] xx, yy = self.i2x(ii, jj) cxx, cyy = self.i2x(cii, cjj) self._X, self._Y", "r, theta): r_ = r / self.r_earth sin_r = np.sin(r_) cos_r = np.cos(r_)", "**kwargs): return self.X[j, i], self.Y[j, i] def x2i(self, x, y, *args, **kwargs): distances", "+ self.theta0 THETA, R = np.meshgrid(theta, r) LON, LAT = self.r_theta_to_lon_lat(R, THETA) self._X", "@property def dx(self): return self._dx @dx.setter def dx(self, value): self._dx = value self._reset_raw_xy()", "self._raw_cy = None self._bbox = (np.min(self._X), np.min(self._Y), np.max(self._X), np.max(self._Y)) self._cbox = (np.min(self._CX), np.min(self._CY),", "return self._bbox @property def cbox(self): \"\"\"corner box\"\"\" return self._cbox def _init_with_para(self, nx, ny,", "cxx, cyy if self._raw_x is not None and self._raw_x.ndim == 1: self._raw_cx =", "y) else: self._init_with_para(nx, ny, dx, dy, x_orig, y_orig) @property def proj(self): return self._proj", "but NullProj does not do actual conversion. \"\"\" @property def srs(self): return ''", "x(self): return self._raw_x if self._raw_x is not None else self._X @property def y(self):", "y_points.shape[0] == 0 else y_points[0] return i1, j1, i2, j2 def check_bound(self, i,", "rmax self.nr = nr self.ntheta = ntheta self.theta0 = theta0 self.r_earth = r_earth", "np.where((j >= start) & (j <= self.ny - subtracted), j, np.nan) return i,", "/ self.dtheta % self.ntheta j = (r - self.rmin) / self.dr if int_index:", "self.dx j = (py - self.y_orig) / self.dy if int_index: if int_index in", "np.asarray(x), np.asarray(y) self.calibrate(x, y) def _raise_invalid_shape(self, x, y): raise ValueError(\"Invalid x, y shape:", "= np.sin(lat0_) cos_lat0 = np.cos(lat0_) sin_lat = sin_lat0 * cos_r + cos_lat0 *", "start = 0 if int_index in ('lowerleft', 'll'): subtracted = 2 if np.isscalar(i):", "self.proj.srs, \"nx\": self.nx, \"ny\": self.ny, \"dx\": self.dx, \"dy\": self.dy, \"x_orig\": self.x_orig, \"y_orig\": self.y_orig", "self._updateXY() @property def has_null_proj(self): return isinstance(self.proj, NullProj) def set_xy(self, x, y): ndim_x, ndim_y", "= (-1, -1) if y_points.shape[0] == 0 else y_points[0] return i1, j1, i2,", "np.ndim(y) if ndim_x == 0 and ndim_y == 0: x0, y0 = x,", "self.theta0 + i * self.dtheta r = self.rmin + j * self.dr lon,", "nx, ny, dx, dy, x_orig, y_orig): self._nx = nx self._ny = ny self._dx", "ndim_y == 2: self._ny, self._nx = np.shape(x) else: self._raise_invalid_shape(x, y) self._raw_x, self._raw_y =", "= int(i) j = int(j) else: i = i.astype('i') j = j.astype('i') if", "i], self.Y[j, i] def x2i(self, x, y, *args, **kwargs): distances = np.hypot(self.X-x, self.Y-y)", "if ndim_x == 0 and ndim_y == 0: x0, y0 = x, y", "= np.argmin(distances) nx = self.X.shape[1] return flat_i / self.nx, flat_i % self.nx def", "def y(self): return self._Y def i2x(self, i, j): theta = self.theta0 + i", "np.floor(i) j = np.floor(j) else: i = np.round(i) j = np.round(j) if np.isscalar(i):", "ndim_x == 0 and ndim_y == 0: x0, y0 = x, y if", "j.astype('i') if check_bound: return self.check_bound(i, j, int_index=int_index) else: return i, j class XYIrregularGridder(XYGridderBase):", "def dump(self): return {} class XYGridderBase(GridderBase): \"\"\" Requires self.X & self.Y. \"\"\" @property", "r) LON, LAT = self.r_theta_to_lon_lat(R, THETA) self._X = LON self._Y = LAT return", "self._raw_y if self._raw_y is not None else self._Y @property def cx(self): return self._raw_cx", "return self.check_bound(i, j, int_index=int_index) else: return i, j def calibrate(self, x, y, x1=None,", "* class NullProj(object): \"\"\" Similar to pyproj.Proj, but NullProj does not do actual", "j = j.astype('i') if check_bound: return self.check_bound(i, j, int_index=int_index) else: return i, j", "@property def y_orig(self): return self._y_orig @y_orig.setter def y_orig(self, value): self._y_orig = value self._reset_raw_xy()", "else self._X @property def y(self): return self._raw_y if self._raw_y is not None else", "self.nr) theta = np.arange(self.ntheta) * self.dtheta + self.theta0 THETA, R = np.meshgrid(theta, r)", "Y(self): return self._Y @Y.setter def Y(self, y): if self._raw_x is None: raise ValueError(\"Cannot", "int_index in ('lowerleft', 'll'): subtracted = 2 if np.isscalar(i): if (i >= start", "y, x1=None, y1=None): ndim_x, ndim_y = np.ndim(x), np.ndim(y) if ndim_x == 0 and", "i2x(self, *args): \"\"\"Convert i, j, ... -> x, y, ...\"\"\" raise NotImplementedError def", "return xx, yy def i2x(self, i, j): px = i * self.dx +", "@y_orig.setter def y_orig(self, value): self._y_orig = value self._reset_raw_xy() self._updateXY() @property def bbox(self): return", "self._raw_cy = self._CY[:, 0] else: self._raw_cy = None self._bbox = (np.min(self._X), np.min(self._Y), np.max(self._X),", "bbox[0] if y1 is None: y1 = bbox[1] if x2 is None: x2", "as proj_string self._proj = Proj(str(p)) # TODO: check PY3 compatibility. self._reset_raw_xy() if all([hasattr(self,", "y, **kwargs): return x, y class GridderBase(object): \"\"\"Gridder is a helper for i,", "if int_index in ('lowerleft', 'll'): i = np.floor(i) j = np.floor(j) else: i", "self.theta0 = theta0 self.r_earth = r_earth self.dtheta = np.pi * 2 / self.ntheta", "self.lon0 = lon0 self.lat0 = lat0 self.rmin = rmin self.rmax = rmax self.nr", "/ self.r_earth sin_r = np.sin(r_) cos_r = np.cos(r_) lat0_ = np.deg2rad(self.lat0) lon0_ =", "**kwargs): bbox = self.bbox if x1 is None: x1 = bbox[0] if y1", "def x_orig(self, value): self._x_orig = value self._reset_raw_xy() self._updateXY() @property def y_orig(self): return self._y_orig", "elif ndim_x == 2 and ndim_y == 2: x0, x1 = x[0, 0],", "lat_ = np.arcsin(sin_lat) lon_ = lon0_ + np.arctan2(np.sin(theta) * sin_r * cos_lat0, cos_r", "LON, LAT = self.r_theta_to_lon_lat(R, THETA) self._X = LON self._Y = LAT return self._X,", "np.deg2rad(self.lat0) lon0_ = np.deg2rad(self.lon0) sin_lat0 = np.sin(lat0_) cos_lat0 = np.cos(lat0_) sin_lat = sin_lat0", "2: x0, x1 = x[0, 0], x[1, 1] y0, y1 = y[0, 0],", "self.dy if int_index: if int_index in ('lowerleft', 'll'): i = np.floor(i) j =", "Proj(**p) else: # Treat as proj_string self._proj = Proj(str(p)) # TODO: check PY3", "self._updateXY() @property def bbox(self): return self._bbox @property def cbox(self): \"\"\"corner box\"\"\" return self._cbox", "@dx.setter def dx(self, value): self._dx = value self._reset_raw_xy() self._updateXY() @property def dy(self): return", "self._reset_raw_xy() if all([hasattr(self, attr) for attr in ('_nx', '_ny', '_dx', '_dy', '_x_orig', '_y_orig')]):", "def Y(self): return self._Y @Y.setter def Y(self, y): if self._raw_x is None: raise", "self.dr if int_index: i = np.round(i) j = np.round(j) if np.isscalar(i): i =", "= np.ndim(x), np.ndim(y) if ndim_x == 0 and ndim_y == 0: x0, y0", "int_index=int_index) else: return i, j def calibrate(self, x, y, x1=None, y1=None): ndim_x, ndim_y", "is out of bound!\".format(i, j)) else: i = np.where((i >= start) & (i", "self._reset_raw_xy() self._updateXY() @property def bbox(self): return self._bbox @property def cbox(self): \"\"\"corner box\"\"\" return", "self.proj(px, py, inverse=True) def x2i(self, x, y, int_index=True, check_bound=None): px, py = self.proj(x,", "= np.argwhere(np.diff(np.r_[True, y_bad, True])).reshape(-1, 2) i1, i2 = (-1, -1) if x_points.shape[0] ==", "nx self._ny = ny self._dx = dx self._dy = dy self._x_orig = x_orig", "lon2, lat2 = np.deg2rad(x), np.deg2rad(y) lon1, lat1 = np.deg2rad(self.lon0), np.deg2rad(self.lat0) dlon = lon2", "= px0 self._y_orig = py0 if x1 is not None and y1 is", "self.Y. \"\"\" @property def bbox(self): return (np.min(self.X), np.min(self.Y), np.max(self.X), np.max(self.Y)) def get_bounding_ij(self, x1,", "if self._raw_y is None: raise ValueError(\"Cannot set x alone when no raw y", "x_orig, y_orig) @property def proj(self): return self._proj @proj.setter def proj(self, p): if p", "a helper for i, j <-> x, y conversion, etc.\"\"\" def i2x(self, *args):", "self._reset_raw_xy() if x is not None and y is not None: self.set_xy(x, y)", "j, int_index=int_index) else: return i, j class XYIrregularGridder(XYGridderBase): # TODO: use kdtree. def", "= np.floor(i) j = np.floor(j) else: i = np.round(i) j = np.round(j) if", "kws.update(kwargs) new_gridder = self.__class__(**kws) return new_gridder def calibrate(self, x0, y0, x1=None, y1=None): return", "@property def X(self): return self._X @X.setter def X(self, x): if self._raw_y is None:", "& (i <= self.nx - subtracted), i, np.nan) j = np.where((j >= start)", "x0, y0, x1=None, y1=None): return def dump(self): return {} class XYGridderBase(GridderBase): \"\"\" Requires", "def y(self): return self._raw_y if self._raw_y is not None else self._Y @property def", "start) & (j <= self.ny - subtracted), j, np.nan) return i, j class", "theta0=0.0, r_earth=6371): self.lon0 = lon0 self.lat0 = lat0 self.rmin = rmin self.rmax =", "XYProjGridder(XYGridderBase): def __init__(self, proj=None, x=None, y=None, nx=None, ny=None, dx=None, dy=None, x_orig=0.0, y_orig=0.0, **kwargs):", "= x_orig self._y_orig = y_orig self._updateXY() @property def has_null_proj(self): return isinstance(self.proj, NullProj) def", "= np.round(i) j = np.round(j) if np.isscalar(i): i = int(i) j = int(j)", "self.y_orig return self.proj(px, py, inverse=True) def x2i(self, x, y, int_index=True, check_bound=None): px, py", "self._Y def i2x(self, i, j): theta = self.theta0 + i * self.dtheta r", "* cos_lat2 * np.sin(dlon / 2) ** 2 d = 2 * np.arcsin(np.sqrt(c))", "theta): r_ = r / self.r_earth sin_r = np.sin(r_) cos_r = np.cos(r_) lat0_", "value self._reset_raw_xy() self._updateXY() @property def ny(self): return self._ny @ny.setter def ny(self, value): self._ny", "self._proj @proj.setter def proj(self, p): if p is None: self._proj = NullProj() elif", "r = self.rmin + j * self.dr lon, lat = self.r_theta_to_lon_lat(r, theta) return", "if int_index in ('lowerleft', 'll'): subtracted = 2 if np.isscalar(i): if (i >=", "def i2x(self, *args): \"\"\"Convert i, j, ... -> x, y, ...\"\"\" raise NotImplementedError", "<= self.nx-subtracted) and (j >= start and j <= self.ny-subtracted): return i, j", "= x[0], x[1] y0, y1 = y[0], y[1] elif ndim_x == 2 and", "i] def x2i(self, x, y, *args, **kwargs): distances = np.hypot(self.X-x, self.Y-y) flat_i =", "nx(self): return self._nx @nx.setter def nx(self, value): self._nx = value self._reset_raw_xy() self._updateXY() @property", "y2 is None: y2 = bbox[3] bad = ~((self.X >= x1) & (self.X", "ndim_x == 1 and ndim_y == 1: self._nx, self._ny = len(x), len(y) elif", "j = np.floor(j) else: i = np.round(i) j = np.round(j) if np.isscalar(i): i", "@property def Y(self): return self._Y @Y.setter def Y(self, y): if self._raw_x is None:", "cos_lat0 = np.cos(lat0_) sin_lat = sin_lat0 * cos_r + cos_lat0 * sin_r *", "= np.cos(lat0_) sin_lat = sin_lat0 * cos_r + cos_lat0 * sin_r * np.cos(theta)", "np.array(Y) if X.ndim == 1: self.X, self.Y = np.meshgrid(X, Y) else: self.X, self.Y", "None else self._X @property def y(self): return self._raw_y if self._raw_y is not None", "X(self): return self._X @X.setter def X(self, x): if self._raw_y is None: raise ValueError(\"Cannot", "y, ... -> i, j, ...\"\"\" raise NotImplementedError def copy(self, **kwargs): kws =", "* np.sin(dlon / 2) ** 2 d = 2 * np.arcsin(np.sqrt(c)) r =", "NullProj(object): \"\"\" Similar to pyproj.Proj, but NullProj does not do actual conversion. \"\"\"", "else: # Treat as proj_string self._proj = Proj(str(p)) # TODO: check PY3 compatibility.", "self._raw_y is not None and self._raw_y.ndim == 1: self._raw_cy = self._CY[:, 0] else:", "= -0.5 subtracted = 1 if int_index: start = 0 if int_index in", "self._dx @dx.setter def dx(self, value): self._dx = value self._reset_raw_xy() self._updateXY() @property def dy(self):", "else: i = np.where((i >= start) & (i <= self.nx - subtracted), i,", "= py1 - py0 self._updateXY() def dump(self): return { \"proj\": self.proj.srs, \"nx\": self.nx,", "np.floor(j) else: i = np.round(i) j = np.round(j) if np.isscalar(i): i = int(i)", "i, j, ... -> x, y, ...\"\"\" raise NotImplementedError def x2i(self, *args, **kwargs):", "d = 2 * np.arcsin(np.sqrt(c)) r = d * self.r_earth i = (theta", "dump(self): return {} class XYGridderBase(GridderBase): \"\"\" Requires self.X & self.Y. \"\"\" @property def", "self._raw_cx is not None else self._CX @property def cy(self): return self._raw_cy if self._raw_cy", "self._raw_y = np.asarray(x), np.asarray(y) self.calibrate(x, y) def _raise_invalid_shape(self, x, y): raise ValueError(\"Invalid x,", "np.shape(y))) def _reset_raw_xy(self): self._raw_x, self._raw_y = None, None def _updateXY(self): jj, ii =", "self.nx-subtracted) and (j >= start and j <= self.ny-subtracted): return i, j else:", "self._Y = LAT return self._X, self._Y def r_theta_to_lon_lat(self, r, theta): r_ = r", "self._bbox = (np.min(self._X), np.min(self._Y), np.max(self._X), np.max(self._Y)) self._cbox = (np.min(self._CX), np.min(self._CY), np.max(self._CX), np.max(self._CY)) return", "np.cos(theta) lat_ = np.arcsin(sin_lat) lon_ = lon0_ + np.arctan2(np.sin(theta) * sin_r * cos_lat0,", "np.sin(dlat / 2) ** 2 + cos_lat1 * cos_lat2 * np.sin(dlon / 2)", "operator from .exceptions import * class NullProj(object): \"\"\" Similar to pyproj.Proj, but NullProj", "*args, **kwargs): return self.X[j, i], self.Y[j, i] def x2i(self, x, y, *args, **kwargs):", "(self.Y >= y1) & (self.Y <= y2)) x_bad = np.alltrue(bad, axis=0) y_bad =", "NullProj does not do actual conversion. \"\"\" @property def srs(self): return '' def", "ndim_y = np.ndim(x), np.ndim(y) if ndim_x == 0 and ndim_y == 0: x0,", "return self.ntheta @property def ny(self): return self.nr @property def X(self): return self._X @property", "dlat = lat2 - lat1 sin_dlon = np.sin(dlon) cos_dlon = np.cos(dlon) sin_lat1 =", "sin_lat) lon = np.rad2deg(lon_) lat = np.rad2deg(lat_) return lon, lat @property def nx(self):", "if check_bound: return self.check_bound(i, j, int_index=int_index) else: return i, j def calibrate(self, x,", "if self._raw_x is not None and self._raw_x.ndim == 1: self._raw_cx = self._CX[0] else:", "NullProj)): self._proj = p elif isinstance(p, dict): self._proj = Proj(**p) else: # Treat", "self._ny = ny self._dx = dx self._dy = dy self._x_orig = x_orig self._y_orig", "Y): X = np.array(X) Y = np.array(Y) if X.ndim == 1: self.X, self.Y", "self.nr = nr self.ntheta = ntheta self.theta0 = theta0 self.r_earth = r_earth self.dtheta", "x, y, *args, **kwargs): distances = np.hypot(self.X-x, self.Y-y) flat_i = np.argmin(distances) nx =", "ndim_y == 0: x0, y0 = x, y if ndim_x == 1 and", "self._X, self._Y = xx, yy self._CX, self._CY = cxx, cyy if self._raw_x is", "i, j else: raise OutOfGridBound(\"i: {}, j: {} is out of bound!\".format(i, j))", "= bbox[1] if x2 is None: x2 = bbox[2] if y2 is None:", "ndim_y == 2: x0, x1 = x[0, 0], x[1, 1] y0, y1 =", "x, y, **kwargs): return x, y class GridderBase(object): \"\"\"Gridder is a helper for", "y) self._raw_x, self._raw_y = np.asarray(x), np.asarray(y) self.calibrate(x, y) def _raise_invalid_shape(self, x, y): raise", "cos_lat2 * np.sin(dlon / 2) ** 2 d = 2 * np.arcsin(np.sqrt(c)) r", "+ self.x_orig py = j * self.dy + self.y_orig return self.proj(px, py, inverse=True)", "# Treat as proj_string self._proj = Proj(str(p)) # TODO: check PY3 compatibility. self._reset_raw_xy()", "self.nx, flat_i % self.nx def dump(self): return { \"X\": self.X, \"Y\": self.Y, \"nx\":", "def _updateXY(self): jj, ii = np.mgrid[0:self.ny, 0:self.nx] cjj, cii = np.mgrid[-0.5:self.ny, -0.5:self.nx] xx,", "self._raw_x is not None else self._X @property def y(self): return self._raw_y if self._raw_y", "elif ndim_x == 2 and ndim_y == 2: self._ny, self._nx = np.shape(x) else:", "(Proj, NullProj)): self._proj = p elif isinstance(p, dict): self._proj = Proj(**p) else: #", "self._raise_invalid_shape(x, y) self._raw_x, self._raw_y = np.asarray(x), np.asarray(y) self.calibrate(x, y) def _raise_invalid_shape(self, x, y):", "self._updateXY() @property def dy(self): return self._dy @dy.setter def dy(self, value): self._dy = value", "x[0, 0], x[1, 1] y0, y1 = y[0, 0], y[1, 1] else: self._raise_invalid_shape(x,", "= np.pi * 2 / self.ntheta self.dr = (self.rmax - self.rmin) / (self.nr", "i, j class XYIrregularGridder(XYGridderBase): # TODO: use kdtree. def __init__(self, X, Y): X", "if self._raw_x is not None else self._X @property def y(self): return self._raw_y if", "* self.dy + self.y_orig return self.proj(px, py, inverse=True) def x2i(self, x, y, int_index=True,", "None: px1, py1 = self.proj(x1, y1) self._dx = px1 - px0 self._dy =", "\"\"\"Gridder is a helper for i, j <-> x, y conversion, etc.\"\"\" def", "px0 self._y_orig = py0 if x1 is not None and y1 is not", "return self.check_bound(i, j, int_index=int_index) else: return i, j class XYIrregularGridder(XYGridderBase): # TODO: use", "== 1: x0, x1 = x[0], x[1] y0, y1 = y[0], y[1] elif", "@property def cy(self): return self._raw_cy if self._raw_cy is not None else self._CY @property", "int_index=int_index) else: return i, j class XYIrregularGridder(XYGridderBase): # TODO: use kdtree. def __init__(self,", "int_index: start = 0 if int_index in ('lowerleft', 'll'): subtracted = 2 if", "self._updateXY() @property def dx(self): return self._dx @dx.setter def dx(self, value): self._dx = value", "np.arcsin(np.sqrt(c)) r = d * self.r_earth i = (theta - self.theta0) / self.dtheta", "y, ...\"\"\" raise NotImplementedError def x2i(self, *args, **kwargs): \"\"\"Convert x, y, ... ->", "start = -0.5 subtracted = 1 if int_index: start = 0 if int_index", "j = (r - self.rmin) / self.dr if int_index: i = np.round(i) j", "x): if self._raw_y is None: raise ValueError(\"Cannot set x alone when no raw", "self._raise_invalid_shape(x, y) px0, py0 = self.proj(x0, y0) self._x_orig = px0 self._y_orig = py0", "presents.\") ndim_y = np.ndim(y) if ndim_y == 1 and np.ndim(self._raw_x) == 1: self.set_xy(self._raw_x,", "self._raw_y = None, None def _updateXY(self): jj, ii = np.mgrid[0:self.ny, 0:self.nx] cjj, cii", "= self.dump() kws.update(kwargs) new_gridder = self.__class__(**kws) return new_gridder def calibrate(self, x0, y0, x1=None,", "no raw y presents.\") ndim_x = np.ndim(x) if ndim_x == 1 and np.ndim(self._raw_y)", "self._X @X.setter def X(self, x): if self._raw_y is None: raise ValueError(\"Cannot set x", "x2i(self, *args, **kwargs): \"\"\"Convert x, y, ... -> i, j, ...\"\"\" raise NotImplementedError", "= self.proj(x, y) i = (px - self.x_orig) / self.dx j = (py", "self.proj(x, y) i = (px - self.x_orig) / self.dx j = (py -", "Treat as proj_string self._proj = Proj(str(p)) # TODO: check PY3 compatibility. self._reset_raw_xy() if", "is not None and y1 is not None: px1, py1 = self.proj(x1, y1)", "np.shape(x) else: self._raise_invalid_shape(x, y) self._raw_x, self._raw_y = np.asarray(x), np.asarray(y) self.calibrate(x, y) def _raise_invalid_shape(self,", "def CX(self): return self._CX @property def CY(self): return self._CY @property def x(self): return", "np.max(self._X), np.max(self._Y)) self._cbox = (np.min(self._CX), np.min(self._CY), np.max(self._CX), np.max(self._CY)) return xx, yy def i2x(self,", "if (i >= start and i <= self.nx-subtracted) and (j >= start and", "(theta - self.theta0) / self.dtheta % self.ntheta j = (r - self.rmin) /", "lat0 self.rmin = rmin self.rmax = rmax self.nr = nr self.ntheta = ntheta", "not None and y is not None: self.set_xy(x, y) else: self._init_with_para(nx, ny, dx,", "y_points[0] return i1, j1, i2, j2 def check_bound(self, i, j, int_index=True): start =", "CY(self): return self._CY @property def x(self): return self._raw_x if self._raw_x is not None", "self._nx = nx self._ny = ny self._dx = dx self._dy = dy self._x_orig", "j2 = (-1, -1) if y_points.shape[0] == 0 else y_points[0] return i1, j1,", "def x_orig(self): return self._x_orig @x_orig.setter def x_orig(self, value): self._x_orig = value self._reset_raw_xy() self._updateXY()", "@proj.setter def proj(self, p): if p is None: self._proj = NullProj() elif isinstance(p,", "return flat_i / self.nx, flat_i % self.nx def dump(self): return { \"X\": self.X,", "= np.hypot(self.X-x, self.Y-y) flat_i = np.argmin(distances) nx = self.X.shape[1] return flat_i / self.nx,", "@property def y(self): return self._raw_y if self._raw_y is not None else self._Y @property", "start and j <= self.ny-subtracted): return i, j else: raise OutOfGridBound(\"i: {}, j:", ">= start) & (i <= self.nx - subtracted), i, np.nan) j = np.where((j", "bound!\".format(i, j)) else: i = np.where((i >= start) & (i <= self.nx -", "@property def srs(self): return '' def __call__(self, x, y, **kwargs): return x, y", "cos_lat1 = np.cos(lat1) sin_lat2 = np.sin(lat2) cos_lat2 = np.cos(lat2) a = cos_lat2 *", "{} is out of bound!\".format(i, j)) else: i = np.where((i >= start) &", "= self.X.shape[1] return flat_i / self.nx, flat_i % self.nx def dump(self): return {", "check_bound=None): px, py = self.proj(x, y) i = (px - self.x_orig) / self.dx", "if np.isscalar(i): i = int(i) j = int(j) else: i = i.astype('i') j", "lat1 = np.deg2rad(self.lon0), np.deg2rad(self.lat0) dlon = lon2 - lon1 dlat = lat2 -", "nx(self, value): self._nx = value self._reset_raw_xy() self._updateXY() @property def ny(self): return self._ny @ny.setter", "value self._reset_raw_xy() self._updateXY() @property def y_orig(self): return self._y_orig @y_orig.setter def y_orig(self, value): self._y_orig", "dict): self._proj = Proj(**p) else: # Treat as proj_string self._proj = Proj(str(p)) #", "1: self.set_xy(self._raw_x, y) elif ndim_y == 2 and np.shape(y) == np.shape(self.X): self.set_xy(self.X, y)", "bbox[2] if y2 is None: y2 = bbox[3] bad = ~((self.X >= x1)", "y is not None: self.set_xy(x, y) else: self._init_with_para(nx, ny, dx, dy, x_orig, y_orig)", "y) def _raise_invalid_shape(self, x, y): raise ValueError(\"Invalid x, y shape: {}, {}\".format(np.shape(x), np.shape(y)))", "<= y2)) x_bad = np.alltrue(bad, axis=0) y_bad = np.alltrue(bad, axis=1) x_points = np.argwhere(np.diff(np.r_[True,", "y[0, 0], y[1, 1] else: self._raise_invalid_shape(x, y) px0, py0 = self.proj(x0, y0) self._x_orig", "in ('_nx', '_ny', '_dx', '_dy', '_x_orig', '_y_orig')]): self._updateXY() @property def X(self): return self._X", "get_bounding_ij(self, x1, y1, x2, y2, **kwargs): bbox = self.bbox if x1 is None:", "None and y1 is not None: px1, py1 = self.proj(x1, y1) self._dx =", "lat @property def nx(self): return self.ntheta @property def ny(self): return self.nr @property def", "theta = self.theta0 + i * self.dtheta r = self.rmin + j *", "ndim_y == 1: self._nx, self._ny = len(x), len(y) elif ndim_x == 2 and", "== 1: self.set_xy(x, self._raw_y) elif ndim_x == 2 and np.shape(x) == np.shape(self.Y): self.set_xy(x,", "self.y_orig } class LonLatSurroundingGridder(XYGridderBase): def __init__(self, lon0, lat0, rmin, rmax, nr, ntheta, theta0=0.0,", "proj self._reset_raw_xy() if x is not None and y is not None: self.set_xy(x,", "self.set_xy(x, y) else: self._init_with_para(nx, ny, dx, dy, x_orig, y_orig) @property def proj(self): return", "new_gridder = self.__class__(**kws) return new_gridder def calibrate(self, x0, y0, x1=None, y1=None): return def", "if x is not None and y is not None: self.set_xy(x, y) else:", "def x2i(self, x, y, *args, **kwargs): distances = np.hypot(self.X-x, self.Y-y) flat_i = np.argmin(distances)", "theta = np.arange(self.ntheta) * self.dtheta + self.theta0 THETA, R = np.meshgrid(theta, r) LON,", "X.ndim == 1: self.X, self.Y = np.meshgrid(X, Y) else: self.X, self.Y = X,", "helper for i, j <-> x, y conversion, etc.\"\"\" def i2x(self, *args): \"\"\"Convert", "y_orig(self, value): self._y_orig = value self._reset_raw_xy() self._updateXY() @property def bbox(self): return self._bbox @property", "LAT return self._X, self._Y def r_theta_to_lon_lat(self, r, theta): r_ = r / self.r_earth", "(self.X <= x2) & (self.Y >= y1) & (self.Y <= y2)) x_bad =", "y[1] elif ndim_x == 2 and ndim_y == 2: x0, x1 = x[0,", "= r_earth self.dtheta = np.pi * 2 / self.ntheta self.dr = (self.rmax -", "'' def __call__(self, x, y, **kwargs): return x, y class GridderBase(object): \"\"\"Gridder is", "is not None else self._CX @property def cy(self): return self._raw_cy if self._raw_cy is", "- self.rmin) / (self.nr - 1) self._updateXY() def _updateXY(self): r = np.linspace(self.rmin, self.rmax,", "None and self._raw_x.ndim == 1: self._raw_cx = self._CX[0] else: self._raw_cx = None if", "0 if int_index in ('lowerleft', 'll'): subtracted = 2 if np.isscalar(i): if (i", "lon = np.rad2deg(lon_) lat = np.rad2deg(lat_) return lon, lat @property def nx(self): return", "else: self._raw_cy = None self._bbox = (np.min(self._X), np.min(self._Y), np.max(self._X), np.max(self._Y)) self._cbox = (np.min(self._CX),", "= len(x), len(y) elif ndim_x == 2 and ndim_y == 2: self._ny, self._nx", "== 2 and np.shape(x) == np.shape(self.Y): self.set_xy(x, self.Y) else: self._raise_invalid_shape(x, self.Y) @property def", "not None else self._X @property def y(self): return self._raw_y if self._raw_y is not", "= j.astype('i') if check_bound: return self.check_bound(i, j, int_index=int_index) else: return i, j def", "and self._raw_y.ndim == 1: self._raw_cy = self._CY[:, 0] else: self._raw_cy = None self._bbox", "def has_null_proj(self): return isinstance(self.proj, NullProj) def set_xy(self, x, y): ndim_x, ndim_y = np.ndim(x),", "= Proj(**p) else: # Treat as proj_string self._proj = Proj(str(p)) # TODO: check", "LAT = self.r_theta_to_lon_lat(R, THETA) self._X = LON self._Y = LAT return self._X, self._Y", "R = np.meshgrid(theta, r) LON, LAT = self.r_theta_to_lon_lat(R, THETA) self._X = LON self._Y", "self.ny - subtracted), j, np.nan) return i, j class XYProjGridder(XYGridderBase): def __init__(self, proj=None,", "p): if p is None: self._proj = NullProj() elif isinstance(p, (Proj, NullProj)): self._proj", "self.check_bound(i, j, int_index=int_index) else: return i, j def calibrate(self, x, y, x1=None, y1=None):", "check_bound: return self.check_bound(i, j, int_index=int_index) else: return i, j def calibrate(self, x, y,", "flat_i / self.nx, flat_i % self.nx def dump(self): return { \"X\": self.X, \"Y\":", "np.ndim(y) if ndim_x == 1 and ndim_y == 1: self._nx, self._ny = len(x),", "self.dtheta + self.theta0 THETA, R = np.meshgrid(theta, r) LON, LAT = self.r_theta_to_lon_lat(R, THETA)", "j.astype('i') if check_bound: return self.check_bound(i, j, int_index=int_index) else: return i, j def calibrate(self,", "dx, dy, x_orig, y_orig) @property def proj(self): return self._proj @proj.setter def proj(self, p):", "new_gridder def calibrate(self, x0, y0, x1=None, y1=None): return def dump(self): return {} class", "TODO: check PY3 compatibility. self._reset_raw_xy() if all([hasattr(self, attr) for attr in ('_nx', '_ny',", "self._ny @ny.setter def ny(self, value): self._ny = value self._reset_raw_xy() self._updateXY() @property def dx(self):", "... -> i, j, ...\"\"\" raise NotImplementedError def copy(self, **kwargs): kws = self.dump()", "self._bbox @property def cbox(self): \"\"\"corner box\"\"\" return self._cbox def _init_with_para(self, nx, ny, dx,", "NotImplementedError def x2i(self, *args, **kwargs): \"\"\"Convert x, y, ... -> i, j, ...\"\"\"", "* np.arcsin(np.sqrt(c)) r = d * self.r_earth i = (theta - self.theta0) /", "self.r_earth = r_earth self.dtheta = np.pi * 2 / self.ntheta self.dr = (self.rmax", "x1) & (self.X <= x2) & (self.Y >= y1) & (self.Y <= y2))", "2) y_points = np.argwhere(np.diff(np.r_[True, y_bad, True])).reshape(-1, 2) i1, i2 = (-1, -1) if", "cjj) self._X, self._Y = xx, yy self._CX, self._CY = cxx, cyy if self._raw_x", "y) elif ndim_y == 2 and np.shape(y) == np.shape(self.X): self.set_xy(self.X, y) else: self._raise_invalid_shape(self.X,", "return self._X @property def y(self): return self._Y def i2x(self, i, j): theta =", "int(j) else: i = i.astype('i') j = j.astype('i') if check_bound: return self.check_bound(i, j,", "cyy = self.i2x(cii, cjj) self._X, self._Y = xx, yy self._CX, self._CY = cxx,", "2) ** 2 d = 2 * np.arcsin(np.sqrt(c)) r = d * self.r_earth", "def nx(self): return self._nx @nx.setter def nx(self, value): self._nx = value self._reset_raw_xy() self._updateXY()", "ValueError(\"Invalid x, y shape: {}, {}\".format(np.shape(x), np.shape(y))) def _reset_raw_xy(self): self._raw_x, self._raw_y = None,", "numpy as np from pyproj import Proj import operator from .exceptions import *", "= proj self._reset_raw_xy() if x is not None and y is not None:", "\"x_orig\": self.x_orig, \"y_orig\": self.y_orig } class LonLatSurroundingGridder(XYGridderBase): def __init__(self, lon0, lat0, rmin, rmax,", "x1=None, y1=None): return def dump(self): return {} class XYGridderBase(GridderBase): \"\"\" Requires self.X &", "2 and np.shape(x) == np.shape(self.Y): self.set_xy(x, self.Y) else: self._raise_invalid_shape(x, self.Y) @property def Y(self):", "- subtracted), j, np.nan) return i, j class XYProjGridder(XYGridderBase): def __init__(self, proj=None, x=None,", "cos_lat2 * cos_dlon theta = np.arctan2(a, b) c = np.sin(dlat / 2) **", "in ('lowerleft', 'll'): subtracted = 2 if np.isscalar(i): if (i >= start and", "proj=None, x=None, y=None, nx=None, ny=None, dx=None, dy=None, x_orig=0.0, y_orig=0.0, **kwargs): self.proj = proj", "return self._Y @Y.setter def Y(self, y): if self._raw_x is None: raise ValueError(\"Cannot set", "return self._dx @dx.setter def dx(self, value): self._dx = value self._reset_raw_xy() self._updateXY() @property def", "def proj(self): return self._proj @proj.setter def proj(self, p): if p is None: self._proj", "y0, y1 = y[0], y[1] elif ndim_x == 2 and ndim_y == 2:", "else self._CX @property def cy(self): return self._raw_cy if self._raw_cy is not None else", "axis=0) y_bad = np.alltrue(bad, axis=1) x_points = np.argwhere(np.diff(np.r_[True, x_bad, True])).reshape(-1, 2) y_points =", "self.ntheta = ntheta self.theta0 = theta0 self.r_earth = r_earth self.dtheta = np.pi *", "(py - self.y_orig) / self.dy if int_index: if int_index in ('lowerleft', 'll'): i", "px1, py1 = self.proj(x1, y1) self._dx = px1 - px0 self._dy = py1", "OutOfGridBound(\"i: {}, j: {} is out of bound!\".format(i, j)) else: i = np.where((i", "& (self.Y <= y2)) x_bad = np.alltrue(bad, axis=0) y_bad = np.alltrue(bad, axis=1) x_points", "six import numpy as np from pyproj import Proj import operator from .exceptions", "py0 = self.proj(x0, y0) self._x_orig = px0 self._y_orig = py0 if x1 is", "* self.dr lon, lat = self.r_theta_to_lon_lat(r, theta) return lon, lat def x2i(self, x,", "self._updateXY() @property def X(self): return self._X @X.setter def X(self, x): if self._raw_y is", "self._raw_cy if self._raw_cy is not None else self._CY @property def nx(self): return self._nx", "def check_bound(self, i, j, int_index=True): start = -0.5 subtracted = 1 if int_index:", "calibrate(self, x0, y0, x1=None, y1=None): return def dump(self): return {} class XYGridderBase(GridderBase): \"\"\"", "self._proj = Proj(str(p)) # TODO: check PY3 compatibility. self._reset_raw_xy() if all([hasattr(self, attr) for", "dy self._x_orig = x_orig self._y_orig = y_orig self._updateXY() @property def has_null_proj(self): return isinstance(self.proj,", "in ('lowerleft', 'll'): i = np.floor(i) j = np.floor(j) else: i = np.round(i)", "ndim_y == 2 and np.shape(y) == np.shape(self.X): self.set_xy(self.X, y) else: self._raise_invalid_shape(self.X, y) @property", "y2)) x_bad = np.alltrue(bad, axis=0) y_bad = np.alltrue(bad, axis=1) x_points = np.argwhere(np.diff(np.r_[True, x_bad,", "* sin_dlon b = cos_lat1 * sin_lat2 - sin_lat1 * cos_lat2 * cos_dlon", "def X(self): return self._X @X.setter def X(self, x): if self._raw_y is None: raise", "** 2 + cos_lat1 * cos_lat2 * np.sin(dlon / 2) ** 2 d", "self._X = LON self._Y = LAT return self._X, self._Y def r_theta_to_lon_lat(self, r, theta):", "self.r_theta_to_lon_lat(r, theta) return lon, lat def x2i(self, x, y, int_index=True, check_bound=None): lon2, lat2", "'_y_orig')]): self._updateXY() @property def X(self): return self._X @X.setter def X(self, x): if self._raw_y", "dy(self, value): self._dy = value self._reset_raw_xy() self._updateXY() @property def x_orig(self): return self._x_orig @x_orig.setter", "= lat0 self.rmin = rmin self.rmax = rmax self.nr = nr self.ntheta =", "r = np.linspace(self.rmin, self.rmax, self.nr) theta = np.arange(self.ntheta) * self.dtheta + self.theta0 THETA,", "= ny self._dx = dx self._dy = dy self._x_orig = x_orig self._y_orig =", "cos_r + cos_lat0 * sin_r * np.cos(theta) lat_ = np.arcsin(sin_lat) lon_ = lon0_", "np.max(self._Y)) self._cbox = (np.min(self._CX), np.min(self._CY), np.max(self._CX), np.max(self._CY)) return xx, yy def i2x(self, i,", "self._nx = value self._reset_raw_xy() self._updateXY() @property def ny(self): return self._ny @ny.setter def ny(self,", ">= start and j <= self.ny-subtracted): return i, j else: raise OutOfGridBound(\"i: {},", "XYIrregularGridder(XYGridderBase): # TODO: use kdtree. def __init__(self, X, Y): X = np.array(X) Y", "y if ndim_x == 1 and ndim_y == 1: x0, x1 = x[0],", "None if self._raw_y is not None and self._raw_y.ndim == 1: self._raw_cy = self._CY[:,", "self._reset_raw_xy() self._updateXY() @property def dx(self): return self._dx @dx.setter def dx(self, value): self._dx =", "lon_ = lon0_ + np.arctan2(np.sin(theta) * sin_r * cos_lat0, cos_r - sin_lat0 *", "x2 is None: x2 = bbox[2] if y2 is None: y2 = bbox[3]", "if self._raw_y is not None and self._raw_y.ndim == 1: self._raw_cy = self._CY[:, 0]", "r_earth self.dtheta = np.pi * 2 / self.ntheta self.dr = (self.rmax - self.rmin)", "= dy self._x_orig = x_orig self._y_orig = y_orig self._updateXY() @property def has_null_proj(self): return", "j = int(j) else: i = i.astype('i') j = j.astype('i') if check_bound: return", "= i.astype('i') j = j.astype('i') if check_bound: return self.check_bound(i, j, int_index=int_index) else: return", "y1 = y[0], y[1] elif ndim_x == 2 and ndim_y == 2: x0,", "not do actual conversion. \"\"\" @property def srs(self): return '' def __call__(self, x,", "-1) if y_points.shape[0] == 0 else y_points[0] return i1, j1, i2, j2 def", "raise NotImplementedError def x2i(self, *args, **kwargs): \"\"\"Convert x, y, ... -> i, j,", "np.alltrue(bad, axis=1) x_points = np.argwhere(np.diff(np.r_[True, x_bad, True])).reshape(-1, 2) y_points = np.argwhere(np.diff(np.r_[True, y_bad, True])).reshape(-1,", "= (np.min(self._X), np.min(self._Y), np.max(self._X), np.max(self._Y)) self._cbox = (np.min(self._CX), np.min(self._CY), np.max(self._CX), np.max(self._CY)) return xx,", "y, *args, **kwargs): distances = np.hypot(self.X-x, self.Y-y) flat_i = np.argmin(distances) nx = self.X.shape[1]", "return self._x_orig @x_orig.setter def x_orig(self, value): self._x_orig = value self._reset_raw_xy() self._updateXY() @property def", "self.dtheta r = self.rmin + j * self.dr lon, lat = self.r_theta_to_lon_lat(r, theta)", "else y_points[0] return i1, j1, i2, j2 def check_bound(self, i, j, int_index=True): start", "= np.mgrid[-0.5:self.ny, -0.5:self.nx] xx, yy = self.i2x(ii, jj) cxx, cyy = self.i2x(cii, cjj)", "int_index=True, check_bound=None): lon2, lat2 = np.deg2rad(x), np.deg2rad(y) lon1, lat1 = np.deg2rad(self.lon0), np.deg2rad(self.lat0) dlon", "nx = self.X.shape[1] return flat_i / self.nx, flat_i % self.nx def dump(self): return", "+ np.arctan2(np.sin(theta) * sin_r * cos_lat0, cos_r - sin_lat0 * sin_lat) lon =", "Y(self): return self._Y @property def x(self): return self._X @property def y(self): return self._Y", "return self._raw_cx if self._raw_cx is not None else self._CX @property def cy(self): return", "else self._CY @property def nx(self): return self._nx @nx.setter def nx(self, value): self._nx =", "-*- import six import numpy as np from pyproj import Proj import operator", "None: x2 = bbox[2] if y2 is None: y2 = bbox[3] bad =", "\"\"\"corner box\"\"\" return self._cbox def _init_with_para(self, nx, ny, dx, dy, x_orig, y_orig): self._nx", "i2x(self, i, j): theta = self.theta0 + i * self.dtheta r = self.rmin", "self._x_orig = px0 self._y_orig = py0 if x1 is not None and y1", "self.X[j, i], self.Y[j, i] def x2i(self, x, y, *args, **kwargs): distances = np.hypot(self.X-x,", "self.bbox if x1 is None: x1 = bbox[0] if y1 is None: y1", "sin_lat2 - sin_lat1 * cos_lat2 * cos_dlon theta = np.arctan2(a, b) c =", "np.argwhere(np.diff(np.r_[True, y_bad, True])).reshape(-1, 2) i1, i2 = (-1, -1) if x_points.shape[0] == 0", "i * self.dx + self.x_orig py = j * self.dy + self.y_orig return", "ndim_y = np.ndim(y) if ndim_y == 1 and np.ndim(self._raw_x) == 1: self.set_xy(self._raw_x, y)", "'_x_orig', '_y_orig')]): self._updateXY() @property def X(self): return self._X @X.setter def X(self, x): if", "None: raise ValueError(\"Cannot set x alone when no raw y presents.\") ndim_x =", "lat1 sin_dlon = np.sin(dlon) cos_dlon = np.cos(dlon) sin_lat1 = np.sin(lat1) cos_lat1 = np.cos(lat1)", "jj, ii = np.mgrid[0:self.ny, 0:self.nx] cjj, cii = np.mgrid[-0.5:self.ny, -0.5:self.nx] xx, yy =", "self.Y = X, Y self.ny, self.nx = X.shape def i2x(self, i, j, *args,", "* sin_lat2 - sin_lat1 * cos_lat2 * cos_dlon theta = np.arctan2(a, b) c", "ny(self, value): self._ny = value self._reset_raw_xy() self._updateXY() @property def dx(self): return self._dx @dx.setter", "= np.arange(self.ntheta) * self.dtheta + self.theta0 THETA, R = np.meshgrid(theta, r) LON, LAT", "start) & (i <= self.nx - subtracted), i, np.nan) j = np.where((j >=", "proj_string self._proj = Proj(str(p)) # TODO: check PY3 compatibility. self._reset_raw_xy() if all([hasattr(self, attr)", "self.set_xy(x, self._raw_y) elif ndim_x == 2 and np.shape(x) == np.shape(self.Y): self.set_xy(x, self.Y) else:", "= X.shape def i2x(self, i, j, *args, **kwargs): return self.X[j, i], self.Y[j, i]", "self.dr lon, lat = self.r_theta_to_lon_lat(r, theta) return lon, lat def x2i(self, x, y,", "X, Y self.ny, self.nx = X.shape def i2x(self, i, j, *args, **kwargs): return", "(self.nr - 1) self._updateXY() def _updateXY(self): r = np.linspace(self.rmin, self.rmax, self.nr) theta =", "= ~((self.X >= x1) & (self.X <= x2) & (self.Y >= y1) &", "np.ndim(self._raw_x) == 1: self.set_xy(self._raw_x, y) elif ndim_y == 2 and np.shape(y) == np.shape(self.X):", "x, y, x1=None, y1=None): ndim_x, ndim_y = np.ndim(x), np.ndim(y) if ndim_x == 0", "attr in ('_nx', '_ny', '_dx', '_dy', '_x_orig', '_y_orig')]): self._updateXY() @property def X(self): return", "y1=None): ndim_x, ndim_y = np.ndim(x), np.ndim(y) if ndim_x == 0 and ndim_y ==", "= value self._reset_raw_xy() self._updateXY() @property def y_orig(self): return self._y_orig @y_orig.setter def y_orig(self, value):", "self._cbox = (np.min(self._CX), np.min(self._CY), np.max(self._CX), np.max(self._CY)) return xx, yy def i2x(self, i, j):", "self.__class__(**kws) return new_gridder def calibrate(self, x0, y0, x1=None, y1=None): return def dump(self): return", "x is not None and y is not None: self.set_xy(x, y) else: self._init_with_para(nx,", "ndim_x == 1 and ndim_y == 1: x0, x1 = x[0], x[1] y0,", "np.ndim(x), np.ndim(y) if ndim_x == 0 and ndim_y == 0: x0, y0 =", "Requires self.X & self.Y. \"\"\" @property def bbox(self): return (np.min(self.X), np.min(self.Y), np.max(self.X), np.max(self.Y))", "* cos_lat2 * cos_dlon theta = np.arctan2(a, b) c = np.sin(dlat / 2)", "= LON self._Y = LAT return self._X, self._Y def r_theta_to_lon_lat(self, r, theta): r_", "x, y): ndim_x, ndim_y = np.ndim(x), np.ndim(y) if ndim_x == 1 and ndim_y", "r = d * self.r_earth i = (theta - self.theta0) / self.dtheta %", "else: self._raw_cx = None if self._raw_y is not None and self._raw_y.ndim == 1:", "1 and np.ndim(self._raw_x) == 1: self.set_xy(self._raw_x, y) elif ndim_y == 2 and np.shape(y)", "return def dump(self): return {} class XYGridderBase(GridderBase): \"\"\" Requires self.X & self.Y. \"\"\"", "if y2 is None: y2 = bbox[3] bad = ~((self.X >= x1) &", "(self.rmax - self.rmin) / (self.nr - 1) self._updateXY() def _updateXY(self): r = np.linspace(self.rmin,", "x_orig self._y_orig = y_orig self._updateXY() @property def has_null_proj(self): return isinstance(self.proj, NullProj) def set_xy(self,", "= np.meshgrid(X, Y) else: self.X, self.Y = X, Y self.ny, self.nx = X.shape", "-> x, y, ...\"\"\" raise NotImplementedError def x2i(self, *args, **kwargs): \"\"\"Convert x, y,", "i2 = (-1, -1) if x_points.shape[0] == 0 else x_points[0] j1, j2 =", "**kwargs): \"\"\"Convert x, y, ... -> i, j, ...\"\"\" raise NotImplementedError def copy(self,", "i, j): theta = self.theta0 + i * self.dtheta r = self.rmin +", "self.set_xy(self.X, y) else: self._raise_invalid_shape(self.X, y) @property def CX(self): return self._CX @property def CY(self):", "1) self._updateXY() def _updateXY(self): r = np.linspace(self.rmin, self.rmax, self.nr) theta = np.arange(self.ntheta) *", "= d * self.r_earth i = (theta - self.theta0) / self.dtheta % self.ntheta", "x_bad, True])).reshape(-1, 2) y_points = np.argwhere(np.diff(np.r_[True, y_bad, True])).reshape(-1, 2) i1, i2 = (-1,", "y_orig) @property def proj(self): return self._proj @proj.setter def proj(self, p): if p is", "* self.dtheta + self.theta0 THETA, R = np.meshgrid(theta, r) LON, LAT = self.r_theta_to_lon_lat(R,", "is not None else self._X @property def y(self): return self._raw_y if self._raw_y is", "self._nx @nx.setter def nx(self, value): self._nx = value self._reset_raw_xy() self._updateXY() @property def ny(self):", "def __init__(self, proj=None, x=None, y=None, nx=None, ny=None, dx=None, dy=None, x_orig=0.0, y_orig=0.0, **kwargs): self.proj", "kdtree. def __init__(self, X, Y): X = np.array(X) Y = np.array(Y) if X.ndim", "= dx self._dy = dy self._x_orig = x_orig self._y_orig = y_orig self._updateXY() @property", "& (self.Y >= y1) & (self.Y <= y2)) x_bad = np.alltrue(bad, axis=0) y_bad", "0 else y_points[0] return i1, j1, i2, j2 def check_bound(self, i, j, int_index=True):", "self.Y-y) flat_i = np.argmin(distances) nx = self.X.shape[1] return flat_i / self.nx, flat_i %", "subtracted), i, np.nan) j = np.where((j >= start) & (j <= self.ny -", "np.max(self._CY)) return xx, yy def i2x(self, i, j): px = i * self.dx", "j)) else: i = np.where((i >= start) & (i <= self.nx - subtracted),", "yy def i2x(self, i, j): px = i * self.dx + self.x_orig py", "cxx, cyy = self.i2x(cii, cjj) self._X, self._Y = xx, yy self._CX, self._CY =", "def calibrate(self, x, y, x1=None, y1=None): ndim_x, ndim_y = np.ndim(x), np.ndim(y) if ndim_x", "('lowerleft', 'll'): i = np.floor(i) j = np.floor(j) else: i = np.round(i) j", "self.dy + self.y_orig return self.proj(px, py, inverse=True) def x2i(self, x, y, int_index=True, check_bound=None):", "self._raw_x is not None and self._raw_x.ndim == 1: self._raw_cx = self._CX[0] else: self._raw_cx", "self.x_orig py = j * self.dy + self.y_orig return self.proj(px, py, inverse=True) def", "self._nx = np.shape(x) else: self._raise_invalid_shape(x, y) self._raw_x, self._raw_y = np.asarray(x), np.asarray(y) self.calibrate(x, y)", "def bbox(self): return self._bbox @property def cbox(self): \"\"\"corner box\"\"\" return self._cbox def _init_with_para(self,", "i * self.dtheta r = self.rmin + j * self.dr lon, lat =", "@property def x(self): return self._raw_x if self._raw_x is not None else self._X @property", "= px1 - px0 self._dy = py1 - py0 self._updateXY() def dump(self): return", "return self._raw_x if self._raw_x is not None else self._X @property def y(self): return", "dx self._dy = dy self._x_orig = x_orig self._y_orig = y_orig self._updateXY() @property def", "y1) & (self.Y <= y2)) x_bad = np.alltrue(bad, axis=0) y_bad = np.alltrue(bad, axis=1)", "np.arctan2(np.sin(theta) * sin_r * cos_lat0, cos_r - sin_lat0 * sin_lat) lon = np.rad2deg(lon_)", "class NullProj(object): \"\"\" Similar to pyproj.Proj, but NullProj does not do actual conversion.", "px = i * self.dx + self.x_orig py = j * self.dy +", "np.arcsin(sin_lat) lon_ = lon0_ + np.arctan2(np.sin(theta) * sin_r * cos_lat0, cos_r - sin_lat0", "is None: raise ValueError(\"Cannot set x alone when no raw y presents.\") ndim_x", "copy(self, **kwargs): kws = self.dump() kws.update(kwargs) new_gridder = self.__class__(**kws) return new_gridder def calibrate(self,", "= None if self._raw_y is not None and self._raw_y.ndim == 1: self._raw_cy =", "lon0, lat0, rmin, rmax, nr, ntheta, theta0=0.0, r_earth=6371): self.lon0 = lon0 self.lat0 =", "x(self): return self._X @property def y(self): return self._Y def i2x(self, i, j): theta", "self._Y @Y.setter def Y(self, y): if self._raw_x is None: raise ValueError(\"Cannot set y", "return self._CY @property def x(self): return self._raw_x if self._raw_x is not None else", "if int_index: i = np.round(i) j = np.round(j) if np.isscalar(i): i = int(i)", "if y1 is None: y1 = bbox[1] if x2 is None: x2 =", "= x, y if ndim_x == 1 and ndim_y == 1: x0, x1", "x1 is None: x1 = bbox[0] if y1 is None: y1 = bbox[1]", "else: self.X, self.Y = X, Y self.ny, self.nx = X.shape def i2x(self, i,", "py0 self._updateXY() def dump(self): return { \"proj\": self.proj.srs, \"nx\": self.nx, \"ny\": self.ny, \"dx\":", "if check_bound: return self.check_bound(i, j, int_index=int_index) else: return i, j class XYIrregularGridder(XYGridderBase): #", "(-1, -1) if y_points.shape[0] == 0 else y_points[0] return i1, j1, i2, j2", "& self.Y. \"\"\" @property def bbox(self): return (np.min(self.X), np.min(self.Y), np.max(self.X), np.max(self.Y)) def get_bounding_ij(self,", "ndim_x == 2 and ndim_y == 2: x0, x1 = x[0, 0], x[1,", "ndim_x == 2 and ndim_y == 2: self._ny, self._nx = np.shape(x) else: self._raise_invalid_shape(x,", "= self.__class__(**kws) return new_gridder def calibrate(self, x0, y0, x1=None, y1=None): return def dump(self):", "def _reset_raw_xy(self): self._raw_x, self._raw_y = None, None def _updateXY(self): jj, ii = np.mgrid[0:self.ny,", "y2 = bbox[3] bad = ~((self.X >= x1) & (self.X <= x2) &", "else: i = i.astype('i') j = j.astype('i') if check_bound: return self.check_bound(i, j, int_index=int_index)", "...\"\"\" raise NotImplementedError def x2i(self, *args, **kwargs): \"\"\"Convert x, y, ... -> i,", "y(self): return self._Y def i2x(self, i, j): theta = self.theta0 + i *", "& (self.X <= x2) & (self.Y >= y1) & (self.Y <= y2)) x_bad", "= (py - self.y_orig) / self.dy if int_index: if int_index in ('lowerleft', 'll'):", "if int_index: if int_index in ('lowerleft', 'll'): i = np.floor(i) j = np.floor(j)", "= np.arcsin(sin_lat) lon_ = lon0_ + np.arctan2(np.sin(theta) * sin_r * cos_lat0, cos_r -", "np.argmin(distances) nx = self.X.shape[1] return flat_i / self.nx, flat_i % self.nx def dump(self):", "- self.x_orig) / self.dx j = (py - self.y_orig) / self.dy if int_index:", "== 0 and ndim_y == 0: x0, y0 = x, y if ndim_x", "= X, Y self.ny, self.nx = X.shape def i2x(self, i, j, *args, **kwargs):", "1: self.X, self.Y = np.meshgrid(X, Y) else: self.X, self.Y = X, Y self.ny,", "isinstance(self.proj, NullProj) def set_xy(self, x, y): ndim_x, ndim_y = np.ndim(x), np.ndim(y) if ndim_x", "raise OutOfGridBound(\"i: {}, j: {} is out of bound!\".format(i, j)) else: i =", "and ndim_y == 0: x0, y0 = x, y if ndim_x == 1", "self.ny, self.nx = X.shape def i2x(self, i, j, *args, **kwargs): return self.X[j, i],", "0: x0, y0 = x, y if ndim_x == 1 and ndim_y ==", "cos_lat2 * sin_dlon b = cos_lat1 * sin_lat2 - sin_lat1 * cos_lat2 *", "_reset_raw_xy(self): self._raw_x, self._raw_y = None, None def _updateXY(self): jj, ii = np.mgrid[0:self.ny, 0:self.nx]", "...\"\"\" raise NotImplementedError def copy(self, **kwargs): kws = self.dump() kws.update(kwargs) new_gridder = self.__class__(**kws)", "self._CY = cxx, cyy if self._raw_x is not None and self._raw_x.ndim == 1:", "'_ny', '_dx', '_dy', '_x_orig', '_y_orig')]): self._updateXY() @property def X(self): return self._X @X.setter def", "np.deg2rad(x), np.deg2rad(y) lon1, lat1 = np.deg2rad(self.lon0), np.deg2rad(self.lat0) dlon = lon2 - lon1 dlat", "= nx self._ny = ny self._dx = dx self._dy = dy self._x_orig =", "self._reset_raw_xy() self._updateXY() @property def dy(self): return self._dy @dy.setter def dy(self, value): self._dy =", "y, int_index=True, check_bound=None): lon2, lat2 = np.deg2rad(x), np.deg2rad(y) lon1, lat1 = np.deg2rad(self.lon0), np.deg2rad(self.lat0)", "THETA) self._X = LON self._Y = LAT return self._X, self._Y def r_theta_to_lon_lat(self, r,", "self._CX @property def cy(self): return self._raw_cy if self._raw_cy is not None else self._CY", "len(x), len(y) elif ndim_x == 2 and ndim_y == 2: self._ny, self._nx =", "1: x0, x1 = x[0], x[1] y0, y1 = y[0], y[1] elif ndim_x", "NullProj) def set_xy(self, x, y): ndim_x, ndim_y = np.ndim(x), np.ndim(y) if ndim_x ==", "def calibrate(self, x0, y0, x1=None, y1=None): return def dump(self): return {} class XYGridderBase(GridderBase):", "lon, lat @property def nx(self): return self.ntheta @property def ny(self): return self.nr @property", "return self._dy @dy.setter def dy(self, value): self._dy = value self._reset_raw_xy() self._updateXY() @property def", "srs(self): return '' def __call__(self, x, y, **kwargs): return x, y class GridderBase(object):", "y[1, 1] else: self._raise_invalid_shape(x, y) px0, py0 = self.proj(x0, y0) self._x_orig = px0", "2) ** 2 + cos_lat1 * cos_lat2 * np.sin(dlon / 2) ** 2", "def srs(self): return '' def __call__(self, x, y, **kwargs): return x, y class", "dy, x_orig, y_orig) @property def proj(self): return self._proj @proj.setter def proj(self, p): if", "i, j, *args, **kwargs): return self.X[j, i], self.Y[j, i] def x2i(self, x, y,", "return self._cbox def _init_with_para(self, nx, ny, dx, dy, x_orig, y_orig): self._nx = nx", "x[1] y0, y1 = y[0], y[1] elif ndim_x == 2 and ndim_y ==", "i, j def calibrate(self, x, y, x1=None, y1=None): ndim_x, ndim_y = np.ndim(x), np.ndim(y)", "bbox(self): return (np.min(self.X), np.min(self.Y), np.max(self.X), np.max(self.Y)) def get_bounding_ij(self, x1, y1, x2, y2, **kwargs):", "return self._X, self._Y def r_theta_to_lon_lat(self, r, theta): r_ = r / self.r_earth sin_r", "np.rad2deg(lat_) return lon, lat @property def nx(self): return self.ntheta @property def ny(self): return", "is not None: px1, py1 = self.proj(x1, y1) self._dx = px1 - px0", "Y self.ny, self.nx = X.shape def i2x(self, i, j, *args, **kwargs): return self.X[j,", "self.dy, \"x_orig\": self.x_orig, \"y_orig\": self.y_orig } class LonLatSurroundingGridder(XYGridderBase): def __init__(self, lon0, lat0, rmin,", "ValueError(\"Cannot set y alone when no raw x presents.\") ndim_y = np.ndim(y) if", "x, y, ... -> i, j, ...\"\"\" raise NotImplementedError def copy(self, **kwargs): kws", "np.ndim(x) if ndim_x == 1 and np.ndim(self._raw_y) == 1: self.set_xy(x, self._raw_y) elif ndim_x", "not None and self._raw_x.ndim == 1: self._raw_cx = self._CX[0] else: self._raw_cx = None", "to pyproj.Proj, but NullProj does not do actual conversion. \"\"\" @property def srs(self):", "return self._X @X.setter def X(self, x): if self._raw_y is None: raise ValueError(\"Cannot set", "j def calibrate(self, x, y, x1=None, y1=None): ndim_x, ndim_y = np.ndim(x), np.ndim(y) if", "i, j class XYProjGridder(XYGridderBase): def __init__(self, proj=None, x=None, y=None, nx=None, ny=None, dx=None, dy=None,", "else: i = np.round(i) j = np.round(j) if np.isscalar(i): i = int(i) j", "else x_points[0] j1, j2 = (-1, -1) if y_points.shape[0] == 0 else y_points[0]", "== np.shape(self.Y): self.set_xy(x, self.Y) else: self._raise_invalid_shape(x, self.Y) @property def Y(self): return self._Y @Y.setter", "bbox = self.bbox if x1 is None: x1 = bbox[0] if y1 is", "2: self._ny, self._nx = np.shape(x) else: self._raise_invalid_shape(x, y) self._raw_x, self._raw_y = np.asarray(x), np.asarray(y)", "} class LonLatSurroundingGridder(XYGridderBase): def __init__(self, lon0, lat0, rmin, rmax, nr, ntheta, theta0=0.0, r_earth=6371):", "= nr self.ntheta = ntheta self.theta0 = theta0 self.r_earth = r_earth self.dtheta =", "PY3 compatibility. self._reset_raw_xy() if all([hasattr(self, attr) for attr in ('_nx', '_ny', '_dx', '_dy',", "self._CY @property def nx(self): return self._nx @nx.setter def nx(self, value): self._nx = value", "value): self._nx = value self._reset_raw_xy() self._updateXY() @property def ny(self): return self._ny @ny.setter def", "do actual conversion. \"\"\" @property def srs(self): return '' def __call__(self, x, y,", "self.X & self.Y. \"\"\" @property def bbox(self): return (np.min(self.X), np.min(self.Y), np.max(self.X), np.max(self.Y)) def", "else: self._raise_invalid_shape(x, y) px0, py0 = self.proj(x0, y0) self._x_orig = px0 self._y_orig =", "else: return i, j def calibrate(self, x, y, x1=None, y1=None): ndim_x, ndim_y =", "conversion, etc.\"\"\" def i2x(self, *args): \"\"\"Convert i, j, ... -> x, y, ...\"\"\"", "def ny(self): return self._ny @ny.setter def ny(self, value): self._ny = value self._reset_raw_xy() self._updateXY()", "GridderBase(object): \"\"\"Gridder is a helper for i, j <-> x, y conversion, etc.\"\"\"", "cos_lat1 * cos_lat2 * np.sin(dlon / 2) ** 2 d = 2 *", "x, y, ...\"\"\" raise NotImplementedError def x2i(self, *args, **kwargs): \"\"\"Convert x, y, ...", "self.nx - subtracted), i, np.nan) j = np.where((j >= start) & (j <=", "= bbox[2] if y2 is None: y2 = bbox[3] bad = ~((self.X >=", "return new_gridder def calibrate(self, x0, y0, x1=None, y1=None): return def dump(self): return {}", "raw x presents.\") ndim_y = np.ndim(y) if ndim_y == 1 and np.ndim(self._raw_x) ==", "-*- coding:utf-8 -*- import six import numpy as np from pyproj import Proj", "self.proj(x1, y1) self._dx = px1 - px0 self._dy = py1 - py0 self._updateXY()", "distances = np.hypot(self.X-x, self.Y-y) flat_i = np.argmin(distances) nx = self.X.shape[1] return flat_i /", "self._cbox def _init_with_para(self, nx, ny, dx, dy, x_orig, y_orig): self._nx = nx self._ny", "self._ny = len(x), len(y) elif ndim_x == 2 and ndim_y == 2: self._ny,", "np.min(self._CY), np.max(self._CX), np.max(self._CY)) return xx, yy def i2x(self, i, j): px = i", "'ll'): subtracted = 2 if np.isscalar(i): if (i >= start and i <=", "= np.ndim(x), np.ndim(y) if ndim_x == 1 and ndim_y == 1: self._nx, self._ny", "None def _updateXY(self): jj, ii = np.mgrid[0:self.ny, 0:self.nx] cjj, cii = np.mgrid[-0.5:self.ny, -0.5:self.nx]", "p elif isinstance(p, dict): self._proj = Proj(**p) else: # Treat as proj_string self._proj", "@property def dy(self): return self._dy @dy.setter def dy(self, value): self._dy = value self._reset_raw_xy()", "y conversion, etc.\"\"\" def i2x(self, *args): \"\"\"Convert i, j, ... -> x, y,", "= self._CY[:, 0] else: self._raw_cy = None self._bbox = (np.min(self._X), np.min(self._Y), np.max(self._X), np.max(self._Y))", "def get_bounding_ij(self, x1, y1, x2, y2, **kwargs): bbox = self.bbox if x1 is", "def Y(self): return self._Y @property def x(self): return self._X @property def y(self): return", "- 1) self._updateXY() def _updateXY(self): r = np.linspace(self.rmin, self.rmax, self.nr) theta = np.arange(self.ntheta)", "np.shape(x) == np.shape(self.Y): self.set_xy(x, self.Y) else: self._raise_invalid_shape(x, self.Y) @property def Y(self): return self._Y", "x0, x1 = x[0, 0], x[1, 1] y0, y1 = y[0, 0], y[1,", "yy self._CX, self._CY = cxx, cyy if self._raw_x is not None and self._raw_x.ndim", "x1 = x[0, 0], x[1, 1] y0, y1 = y[0, 0], y[1, 1]", "y=None, nx=None, ny=None, dx=None, dy=None, x_orig=0.0, y_orig=0.0, **kwargs): self.proj = proj self._reset_raw_xy() if", "sin_lat1 = np.sin(lat1) cos_lat1 = np.cos(lat1) sin_lat2 = np.sin(lat2) cos_lat2 = np.cos(lat2) a", "_updateXY(self): r = np.linspace(self.rmin, self.rmax, self.nr) theta = np.arange(self.ntheta) * self.dtheta + self.theta0", "def _updateXY(self): r = np.linspace(self.rmin, self.rmax, self.nr) theta = np.arange(self.ntheta) * self.dtheta +", "@Y.setter def Y(self, y): if self._raw_x is None: raise ValueError(\"Cannot set y alone", "= value self._reset_raw_xy() self._updateXY() @property def dy(self): return self._dy @dy.setter def dy(self, value):", "bbox[3] bad = ~((self.X >= x1) & (self.X <= x2) & (self.Y >=", "= np.array(Y) if X.ndim == 1: self.X, self.Y = np.meshgrid(X, Y) else: self.X,", "None and y is not None: self.set_xy(x, y) else: self._init_with_para(nx, ny, dx, dy,", "== 1: self.set_xy(self._raw_x, y) elif ndim_y == 2 and np.shape(y) == np.shape(self.X): self.set_xy(self.X,", "# TODO: check PY3 compatibility. self._reset_raw_xy() if all([hasattr(self, attr) for attr in ('_nx',", "raise ValueError(\"Cannot set x alone when no raw y presents.\") ndim_x = np.ndim(x)", "class XYProjGridder(XYGridderBase): def __init__(self, proj=None, x=None, y=None, nx=None, ny=None, dx=None, dy=None, x_orig=0.0, y_orig=0.0,", "x2i(self, x, y, *args, **kwargs): distances = np.hypot(self.X-x, self.Y-y) flat_i = np.argmin(distances) nx", "self._X, self._Y def r_theta_to_lon_lat(self, r, theta): r_ = r / self.r_earth sin_r =", "== 1: self._nx, self._ny = len(x), len(y) elif ndim_x == 2 and ndim_y", "compatibility. self._reset_raw_xy() if all([hasattr(self, attr) for attr in ('_nx', '_ny', '_dx', '_dy', '_x_orig',", "int_index=True, check_bound=None): px, py = self.proj(x, y) i = (px - self.x_orig) /", "= y[0, 0], y[1, 1] else: self._raise_invalid_shape(x, y) px0, py0 = self.proj(x0, y0)", "sin_r * cos_lat0, cos_r - sin_lat0 * sin_lat) lon = np.rad2deg(lon_) lat =", "and y1 is not None: px1, py1 = self.proj(x1, y1) self._dx = px1", "return self._raw_cy if self._raw_cy is not None else self._CY @property def nx(self): return", "= self.proj(x1, y1) self._dx = px1 - px0 self._dy = py1 - py0", "int(i) j = int(j) else: i = i.astype('i') j = j.astype('i') if check_bound:", "and ndim_y == 2: self._ny, self._nx = np.shape(x) else: self._raise_invalid_shape(x, y) self._raw_x, self._raw_y", "else: raise OutOfGridBound(\"i: {}, j: {} is out of bound!\".format(i, j)) else: i", "self._updateXY() @property def ny(self): return self._ny @ny.setter def ny(self, value): self._ny = value", "return { \"proj\": self.proj.srs, \"nx\": self.nx, \"ny\": self.ny, \"dx\": self.dx, \"dy\": self.dy, \"x_orig\":", "X.shape def i2x(self, i, j, *args, **kwargs): return self.X[j, i], self.Y[j, i] def", "d * self.r_earth i = (theta - self.theta0) / self.dtheta % self.ntheta j", "self.check_bound(i, j, int_index=int_index) else: return i, j class XYIrregularGridder(XYGridderBase): # TODO: use kdtree.", "= None self._bbox = (np.min(self._X), np.min(self._Y), np.max(self._X), np.max(self._Y)) self._cbox = (np.min(self._CX), np.min(self._CY), np.max(self._CX),", "bad = ~((self.X >= x1) & (self.X <= x2) & (self.Y >= y1)", "if ndim_y == 1 and np.ndim(self._raw_x) == 1: self.set_xy(self._raw_x, y) elif ndim_y ==", "self.r_theta_to_lon_lat(R, THETA) self._X = LON self._Y = LAT return self._X, self._Y def r_theta_to_lon_lat(self,", "and ndim_y == 2: x0, x1 = x[0, 0], x[1, 1] y0, y1", "\"y_orig\": self.y_orig } class LonLatSurroundingGridder(XYGridderBase): def __init__(self, lon0, lat0, rmin, rmax, nr, ntheta,", "= cxx, cyy if self._raw_x is not None and self._raw_x.ndim == 1: self._raw_cx", "proj(self, p): if p is None: self._proj = NullProj() elif isinstance(p, (Proj, NullProj)):", "= np.where((i >= start) & (i <= self.nx - subtracted), i, np.nan) j", "y2, **kwargs): bbox = self.bbox if x1 is None: x1 = bbox[0] if", "dlon = lon2 - lon1 dlat = lat2 - lat1 sin_dlon = np.sin(dlon)", "lon2 - lon1 dlat = lat2 - lat1 sin_dlon = np.sin(dlon) cos_dlon =", "/ self.nx, flat_i % self.nx def dump(self): return { \"X\": self.X, \"Y\": self.Y,", "value self._reset_raw_xy() self._updateXY() @property def bbox(self): return self._bbox @property def cbox(self): \"\"\"corner box\"\"\"", "self._Y = xx, yy self._CX, self._CY = cxx, cyy if self._raw_x is not", "**kwargs): distances = np.hypot(self.X-x, self.Y-y) flat_i = np.argmin(distances) nx = self.X.shape[1] return flat_i", "i.astype('i') j = j.astype('i') if check_bound: return self.check_bound(i, j, int_index=int_index) else: return i,", "x2i(self, x, y, int_index=True, check_bound=None): px, py = self.proj(x, y) i = (px", "i2x(self, i, j): px = i * self.dx + self.x_orig py = j", "def dy(self, value): self._dy = value self._reset_raw_xy() self._updateXY() @property def x_orig(self): return self._x_orig", "Proj(str(p)) # TODO: check PY3 compatibility. self._reset_raw_xy() if all([hasattr(self, attr) for attr in", "self._nx, self._ny = len(x), len(y) elif ndim_x == 2 and ndim_y == 2:", "self._proj = p elif isinstance(p, dict): self._proj = Proj(**p) else: # Treat as", "lon0_ + np.arctan2(np.sin(theta) * sin_r * cos_lat0, cos_r - sin_lat0 * sin_lat) lon", "j: {} is out of bound!\".format(i, j)) else: i = np.where((i >= start)", "value self._reset_raw_xy() self._updateXY() @property def dx(self): return self._dx @dx.setter def dx(self, value): self._dx", "== 0: x0, y0 = x, y if ndim_x == 1 and ndim_y", "not None: self.set_xy(x, y) else: self._init_with_para(nx, ny, dx, dy, x_orig, y_orig) @property def", "== 2: x0, x1 = x[0, 0], x[1, 1] y0, y1 = y[0,", "calibrate(self, x, y, x1=None, y1=None): ndim_x, ndim_y = np.ndim(x), np.ndim(y) if ndim_x ==", "*args): \"\"\"Convert i, j, ... -> x, y, ...\"\"\" raise NotImplementedError def x2i(self,", "1 and ndim_y == 1: self._nx, self._ny = len(x), len(y) elif ndim_x ==", "np.cos(lat2) a = cos_lat2 * sin_dlon b = cos_lat1 * sin_lat2 - sin_lat1", "ny self._dx = dx self._dy = dy self._x_orig = x_orig self._y_orig = y_orig", "{}, j: {} is out of bound!\".format(i, j)) else: i = np.where((i >=", "/ self.dr if int_index: i = np.round(i) j = np.round(j) if np.isscalar(i): i", "self._Y @property def x(self): return self._X @property def y(self): return self._Y def i2x(self,", "self._updateXY() @property def y_orig(self): return self._y_orig @y_orig.setter def y_orig(self, value): self._y_orig = value", "py1 = self.proj(x1, y1) self._dx = px1 - px0 self._dy = py1 -", "= np.linspace(self.rmin, self.rmax, self.nr) theta = np.arange(self.ntheta) * self.dtheta + self.theta0 THETA, R", "y_orig): self._nx = nx self._ny = ny self._dx = dx self._dy = dy", "@property def X(self): return self._X @property def Y(self): return self._Y @property def x(self):", "sin_lat2 = np.sin(lat2) cos_lat2 = np.cos(lat2) a = cos_lat2 * sin_dlon b =", "x, y, int_index=True, check_bound=None): px, py = self.proj(x, y) i = (px -", "np.hypot(self.X-x, self.Y-y) flat_i = np.argmin(distances) nx = self.X.shape[1] return flat_i / self.nx, flat_i", "def set_xy(self, x, y): ndim_x, ndim_y = np.ndim(x), np.ndim(y) if ndim_x == 1", "int_index: if int_index in ('lowerleft', 'll'): i = np.floor(i) j = np.floor(j) else:", "y0, y1 = y[0, 0], y[1, 1] else: self._raise_invalid_shape(x, y) px0, py0 =", "j): px = i * self.dx + self.x_orig py = j * self.dy", "ndim_x, ndim_y = np.ndim(x), np.ndim(y) if ndim_x == 0 and ndim_y == 0:", "(j >= start and j <= self.ny-subtracted): return i, j else: raise OutOfGridBound(\"i:", "2 and ndim_y == 2: x0, x1 = x[0, 0], x[1, 1] y0,", "self._CX @property def CY(self): return self._CY @property def x(self): return self._raw_x if self._raw_x", "self.r_earth i = (theta - self.theta0) / self.dtheta % self.ntheta j = (r", "@nx.setter def nx(self, value): self._nx = value self._reset_raw_xy() self._updateXY() @property def ny(self): return", "return i, j class XYIrregularGridder(XYGridderBase): # TODO: use kdtree. def __init__(self, X, Y):", "j, np.nan) return i, j class XYProjGridder(XYGridderBase): def __init__(self, proj=None, x=None, y=None, nx=None,", "= (np.min(self._CX), np.min(self._CY), np.max(self._CX), np.max(self._CY)) return xx, yy def i2x(self, i, j): px", "% self.ntheta j = (r - self.rmin) / self.dr if int_index: i =", "px0, py0 = self.proj(x0, y0) self._x_orig = px0 self._y_orig = py0 if x1", "is None: self._proj = NullProj() elif isinstance(p, (Proj, NullProj)): self._proj = p elif", "/ 2) ** 2 + cos_lat1 * cos_lat2 * np.sin(dlon / 2) **", "import operator from .exceptions import * class NullProj(object): \"\"\" Similar to pyproj.Proj, but", "return self._raw_y if self._raw_y is not None else self._Y @property def cx(self): return", "self.ntheta @property def ny(self): return self.nr @property def X(self): return self._X @property def", "* cos_r + cos_lat0 * sin_r * np.cos(theta) lat_ = np.arcsin(sin_lat) lon_ =", "from .exceptions import * class NullProj(object): \"\"\" Similar to pyproj.Proj, but NullProj does", "- subtracted), i, np.nan) j = np.where((j >= start) & (j <= self.ny", "Y) else: self.X, self.Y = X, Y self.ny, self.nx = X.shape def i2x(self,", "= rmin self.rmax = rmax self.nr = nr self.ntheta = ntheta self.theta0 =", "if X.ndim == 1: self.X, self.Y = np.meshgrid(X, Y) else: self.X, self.Y =", "0] else: self._raw_cy = None self._bbox = (np.min(self._X), np.min(self._Y), np.max(self._X), np.max(self._Y)) self._cbox =", "inverse=True) def x2i(self, x, y, int_index=True, check_bound=None): px, py = self.proj(x, y) i", "Y(self, y): if self._raw_x is None: raise ValueError(\"Cannot set y alone when no", "y, int_index=True, check_bound=None): px, py = self.proj(x, y) i = (px - self.x_orig)", "ny, dx, dy, x_orig, y_orig) @property def proj(self): return self._proj @proj.setter def proj(self,", "self._proj = NullProj() elif isinstance(p, (Proj, NullProj)): self._proj = p elif isinstance(p, dict):", "TODO: use kdtree. def __init__(self, X, Y): X = np.array(X) Y = np.array(Y)", "XYGridderBase(GridderBase): \"\"\" Requires self.X & self.Y. \"\"\" @property def bbox(self): return (np.min(self.X), np.min(self.Y),", "self.dtheta = np.pi * 2 / self.ntheta self.dr = (self.rmax - self.rmin) /", "1] else: self._raise_invalid_shape(x, y) px0, py0 = self.proj(x0, y0) self._x_orig = px0 self._y_orig", "@X.setter def X(self, x): if self._raw_y is None: raise ValueError(\"Cannot set x alone", "use kdtree. def __init__(self, X, Y): X = np.array(X) Y = np.array(Y) if", "= bbox[0] if y1 is None: y1 = bbox[1] if x2 is None:", "- self.rmin) / self.dr if int_index: i = np.round(i) j = np.round(j) if", "self._raw_x if self._raw_x is not None else self._X @property def y(self): return self._raw_y", "cii = np.mgrid[-0.5:self.ny, -0.5:self.nx] xx, yy = self.i2x(ii, jj) cxx, cyy = self.i2x(cii,", "x_points = np.argwhere(np.diff(np.r_[True, x_bad, True])).reshape(-1, 2) y_points = np.argwhere(np.diff(np.r_[True, y_bad, True])).reshape(-1, 2) i1,", "i <= self.nx-subtracted) and (j >= start and j <= self.ny-subtracted): return i,", "x_orig(self, value): self._x_orig = value self._reset_raw_xy() self._updateXY() @property def y_orig(self): return self._y_orig @y_orig.setter", "(np.min(self._X), np.min(self._Y), np.max(self._X), np.max(self._Y)) self._cbox = (np.min(self._CX), np.min(self._CY), np.max(self._CX), np.max(self._CY)) return xx, yy", "ndim_y == 1 and np.ndim(self._raw_x) == 1: self.set_xy(self._raw_x, y) elif ndim_y == 2", "for i, j <-> x, y conversion, etc.\"\"\" def i2x(self, *args): \"\"\"Convert i,", "x0, x1 = x[0], x[1] y0, y1 = y[0], y[1] elif ndim_x ==", "j, *args, **kwargs): return self.X[j, i], self.Y[j, i] def x2i(self, x, y, *args,", "def ny(self, value): self._ny = value self._reset_raw_xy() self._updateXY() @property def dx(self): return self._dx", "if self._raw_x is None: raise ValueError(\"Cannot set y alone when no raw x", "sin_lat1 * cos_lat2 * cos_dlon theta = np.arctan2(a, b) c = np.sin(dlat /", "return i, j else: raise OutOfGridBound(\"i: {}, j: {} is out of bound!\".format(i,", "def CY(self): return self._CY @property def x(self): return self._raw_x if self._raw_x is not", "\"proj\": self.proj.srs, \"nx\": self.nx, \"ny\": self.ny, \"dx\": self.dx, \"dy\": self.dy, \"x_orig\": self.x_orig, \"y_orig\":", "np.shape(y) == np.shape(self.X): self.set_xy(self.X, y) else: self._raise_invalid_shape(self.X, y) @property def CX(self): return self._CX", "return self._y_orig @y_orig.setter def y_orig(self, value): self._y_orig = value self._reset_raw_xy() self._updateXY() @property def", "raise ValueError(\"Invalid x, y shape: {}, {}\".format(np.shape(x), np.shape(y))) def _reset_raw_xy(self): self._raw_x, self._raw_y =", "= lon0 self.lat0 = lat0 self.rmin = rmin self.rmax = rmax self.nr =", "- lon1 dlat = lat2 - lat1 sin_dlon = np.sin(dlon) cos_dlon = np.cos(dlon)", "x[1, 1] y0, y1 = y[0, 0], y[1, 1] else: self._raise_invalid_shape(x, y) px0,", "2 * np.arcsin(np.sqrt(c)) r = d * self.r_earth i = (theta - self.theta0)", "np.pi * 2 / self.ntheta self.dr = (self.rmax - self.rmin) / (self.nr -", "i = np.floor(i) j = np.floor(j) else: i = np.round(i) j = np.round(j)", "self._dy @dy.setter def dy(self, value): self._dy = value self._reset_raw_xy() self._updateXY() @property def x_orig(self):", "X(self): return self._X @property def Y(self): return self._Y @property def x(self): return self._X", "lon, lat def x2i(self, x, y, int_index=True, check_bound=None): lon2, lat2 = np.deg2rad(x), np.deg2rad(y)", ">= start and i <= self.nx-subtracted) and (j >= start and j <=", "xx, yy self._CX, self._CY = cxx, cyy if self._raw_x is not None and", "** 2 d = 2 * np.arcsin(np.sqrt(c)) r = d * self.r_earth i", "('lowerleft', 'll'): subtracted = 2 if np.isscalar(i): if (i >= start and i", "conversion. \"\"\" @property def srs(self): return '' def __call__(self, x, y, **kwargs): return", "self._ny, self._nx = np.shape(x) else: self._raise_invalid_shape(x, y) self._raw_x, self._raw_y = np.asarray(x), np.asarray(y) self.calibrate(x,", "x_orig=0.0, y_orig=0.0, **kwargs): self.proj = proj self._reset_raw_xy() if x is not None and", "self._dx = px1 - px0 self._dy = py1 - py0 self._updateXY() def dump(self):", "CX(self): return self._CX @property def CY(self): return self._CY @property def x(self): return self._raw_x", "def Y(self, y): if self._raw_x is None: raise ValueError(\"Cannot set y alone when", "j = np.round(j) if np.isscalar(i): i = int(i) j = int(j) else: i", "import Proj import operator from .exceptions import * class NullProj(object): \"\"\" Similar to", "check PY3 compatibility. self._reset_raw_xy() if all([hasattr(self, attr) for attr in ('_nx', '_ny', '_dx',", "return self._Y def i2x(self, i, j): theta = self.theta0 + i * self.dtheta", "value): self._x_orig = value self._reset_raw_xy() self._updateXY() @property def y_orig(self): return self._y_orig @y_orig.setter def", "self.dr = (self.rmax - self.rmin) / (self.nr - 1) self._updateXY() def _updateXY(self): r", "is not None else self._CY @property def nx(self): return self._nx @nx.setter def nx(self,", "y alone when no raw x presents.\") ndim_y = np.ndim(y) if ndim_y ==", "else: self._init_with_para(nx, ny, dx, dy, x_orig, y_orig) @property def proj(self): return self._proj @proj.setter", "np.nan) j = np.where((j >= start) & (j <= self.ny - subtracted), j,", "ntheta self.theta0 = theta0 self.r_earth = r_earth self.dtheta = np.pi * 2 /", "no raw x presents.\") ndim_y = np.ndim(y) if ndim_y == 1 and np.ndim(self._raw_x)", "= lon0_ + np.arctan2(np.sin(theta) * sin_r * cos_lat0, cos_r - sin_lat0 * sin_lat)", "theta) return lon, lat def x2i(self, x, y, int_index=True, check_bound=None): lon2, lat2 =", "= np.floor(j) else: i = np.round(i) j = np.round(j) if np.isscalar(i): i =", "py0 if x1 is not None and y1 is not None: px1, py1", "2) i1, i2 = (-1, -1) if x_points.shape[0] == 0 else x_points[0] j1,", "x, y): raise ValueError(\"Invalid x, y shape: {}, {}\".format(np.shape(x), np.shape(y))) def _reset_raw_xy(self): self._raw_x,", "= (self.rmax - self.rmin) / (self.nr - 1) self._updateXY() def _updateXY(self): r =", "ny, dx, dy, x_orig, y_orig): self._nx = nx self._ny = ny self._dx =", "j, ...\"\"\" raise NotImplementedError def copy(self, **kwargs): kws = self.dump() kws.update(kwargs) new_gridder =", "/ 2) ** 2 d = 2 * np.arcsin(np.sqrt(c)) r = d *", "self.i2x(cii, cjj) self._X, self._Y = xx, yy self._CX, self._CY = cxx, cyy if", "= np.argwhere(np.diff(np.r_[True, x_bad, True])).reshape(-1, 2) y_points = np.argwhere(np.diff(np.r_[True, y_bad, True])).reshape(-1, 2) i1, i2", "np.array(X) Y = np.array(Y) if X.ndim == 1: self.X, self.Y = np.meshgrid(X, Y)", "self.set_xy(self._raw_x, y) elif ndim_y == 2 and np.shape(y) == np.shape(self.X): self.set_xy(self.X, y) else:", "cos_lat0 * sin_r * np.cos(theta) lat_ = np.arcsin(sin_lat) lon_ = lon0_ + np.arctan2(np.sin(theta)", "np.argwhere(np.diff(np.r_[True, x_bad, True])).reshape(-1, 2) y_points = np.argwhere(np.diff(np.r_[True, y_bad, True])).reshape(-1, 2) i1, i2 =", "return (np.min(self.X), np.min(self.Y), np.max(self.X), np.max(self.Y)) def get_bounding_ij(self, x1, y1, x2, y2, **kwargs): bbox", "def cy(self): return self._raw_cy if self._raw_cy is not None else self._CY @property def", "np.linspace(self.rmin, self.rmax, self.nr) theta = np.arange(self.ntheta) * self.dtheta + self.theta0 THETA, R =", "return '' def __call__(self, x, y, **kwargs): return x, y class GridderBase(object): \"\"\"Gridder", "cos_lat0, cos_r - sin_lat0 * sin_lat) lon = np.rad2deg(lon_) lat = np.rad2deg(lat_) return", "self._CX[0] else: self._raw_cx = None if self._raw_y is not None and self._raw_y.ndim ==", "1 and np.ndim(self._raw_y) == 1: self.set_xy(x, self._raw_y) elif ndim_x == 2 and np.shape(x)", "cbox(self): \"\"\"corner box\"\"\" return self._cbox def _init_with_para(self, nx, ny, dx, dy, x_orig, y_orig):", "presents.\") ndim_x = np.ndim(x) if ndim_x == 1 and np.ndim(self._raw_y) == 1: self.set_xy(x,", "else: self._raise_invalid_shape(x, y) self._raw_x, self._raw_y = np.asarray(x), np.asarray(y) self.calibrate(x, y) def _raise_invalid_shape(self, x,", "= value self._reset_raw_xy() self._updateXY() @property def ny(self): return self._ny @ny.setter def ny(self, value):", "2 + cos_lat1 * cos_lat2 * np.sin(dlon / 2) ** 2 d =", "and np.shape(y) == np.shape(self.X): self.set_xy(self.X, y) else: self._raise_invalid_shape(self.X, y) @property def CX(self): return", "self._x_orig @x_orig.setter def x_orig(self, value): self._x_orig = value self._reset_raw_xy() self._updateXY() @property def y_orig(self):", "None: y2 = bbox[3] bad = ~((self.X >= x1) & (self.X <= x2)", "= np.deg2rad(self.lat0) lon0_ = np.deg2rad(self.lon0) sin_lat0 = np.sin(lat0_) cos_lat0 = np.cos(lat0_) sin_lat =", "self._CY[:, 0] else: self._raw_cy = None self._bbox = (np.min(self._X), np.min(self._Y), np.max(self._X), np.max(self._Y)) self._cbox", "dy(self): return self._dy @dy.setter def dy(self, value): self._dy = value self._reset_raw_xy() self._updateXY() @property", "= sin_lat0 * cos_r + cos_lat0 * sin_r * np.cos(theta) lat_ = np.arcsin(sin_lat)", "self._y_orig = py0 if x1 is not None and y1 is not None:", "== 1: self._raw_cy = self._CY[:, 0] else: self._raw_cy = None self._bbox = (np.min(self._X),", "import numpy as np from pyproj import Proj import operator from .exceptions import", "= y[0], y[1] elif ndim_x == 2 and ndim_y == 2: x0, x1", "import * class NullProj(object): \"\"\" Similar to pyproj.Proj, but NullProj does not do", "<-> x, y conversion, etc.\"\"\" def i2x(self, *args): \"\"\"Convert i, j, ... ->", "_raise_invalid_shape(self, x, y): raise ValueError(\"Invalid x, y shape: {}, {}\".format(np.shape(x), np.shape(y))) def _reset_raw_xy(self):", "= np.cos(lat1) sin_lat2 = np.sin(lat2) cos_lat2 = np.cos(lat2) a = cos_lat2 * sin_dlon", "int_index=True): start = -0.5 subtracted = 1 if int_index: start = 0 if", "= cos_lat2 * sin_dlon b = cos_lat1 * sin_lat2 - sin_lat1 * cos_lat2", "lat2 - lat1 sin_dlon = np.sin(dlon) cos_dlon = np.cos(dlon) sin_lat1 = np.sin(lat1) cos_lat1", "<= self.ny-subtracted): return i, j else: raise OutOfGridBound(\"i: {}, j: {} is out", "np.cos(lat1) sin_lat2 = np.sin(lat2) cos_lat2 = np.cos(lat2) a = cos_lat2 * sin_dlon b", "\"nx\": self.nx, \"ny\": self.ny, \"dx\": self.dx, \"dy\": self.dy, \"x_orig\": self.x_orig, \"y_orig\": self.y_orig }", "None: y1 = bbox[1] if x2 is None: x2 = bbox[2] if y2", "np.mgrid[-0.5:self.ny, -0.5:self.nx] xx, yy = self.i2x(ii, jj) cxx, cyy = self.i2x(cii, cjj) self._X,", "value self._reset_raw_xy() self._updateXY() @property def dy(self): return self._dy @dy.setter def dy(self, value): self._dy", "y_orig(self): return self._y_orig @y_orig.setter def y_orig(self, value): self._y_orig = value self._reset_raw_xy() self._updateXY() @property", "+ cos_lat0 * sin_r * np.cos(theta) lat_ = np.arcsin(sin_lat) lon_ = lon0_ +", "= j.astype('i') if check_bound: return self.check_bound(i, j, int_index=int_index) else: return i, j class", "y): ndim_x, ndim_y = np.ndim(x), np.ndim(y) if ndim_x == 1 and ndim_y ==", "x1 = bbox[0] if y1 is None: y1 = bbox[1] if x2 is", "self._reset_raw_xy() self._updateXY() @property def x_orig(self): return self._x_orig @x_orig.setter def x_orig(self, value): self._x_orig =", "ii = np.mgrid[0:self.ny, 0:self.nx] cjj, cii = np.mgrid[-0.5:self.ny, -0.5:self.nx] xx, yy = self.i2x(ii,", "x1 = x[0], x[1] y0, y1 = y[0], y[1] elif ndim_x == 2", "{} class XYGridderBase(GridderBase): \"\"\" Requires self.X & self.Y. \"\"\" @property def bbox(self): return", "value): self._y_orig = value self._reset_raw_xy() self._updateXY() @property def bbox(self): return self._bbox @property def", "* cos_lat0, cos_r - sin_lat0 * sin_lat) lon = np.rad2deg(lon_) lat = np.rad2deg(lat_)", "np.deg2rad(self.lon0) sin_lat0 = np.sin(lat0_) cos_lat0 = np.cos(lat0_) sin_lat = sin_lat0 * cos_r +", "if int_index: start = 0 if int_index in ('lowerleft', 'll'): subtracted = 2", "X, Y): X = np.array(X) Y = np.array(Y) if X.ndim == 1: self.X,", "self._init_with_para(nx, ny, dx, dy, x_orig, y_orig) @property def proj(self): return self._proj @proj.setter def", "np.cos(dlon) sin_lat1 = np.sin(lat1) cos_lat1 = np.cos(lat1) sin_lat2 = np.sin(lat2) cos_lat2 = np.cos(lat2)", "y(self): return self._raw_y if self._raw_y is not None else self._Y @property def cx(self):", "x0, y0 = x, y if ndim_x == 1 and ndim_y == 1:", "out of bound!\".format(i, j)) else: i = np.where((i >= start) & (i <=", "y0, x1=None, y1=None): return def dump(self): return {} class XYGridderBase(GridderBase): \"\"\" Requires self.X", "= np.asarray(x), np.asarray(y) self.calibrate(x, y) def _raise_invalid_shape(self, x, y): raise ValueError(\"Invalid x, y", "check_bound: return self.check_bound(i, j, int_index=int_index) else: return i, j class XYIrregularGridder(XYGridderBase): # TODO:", "np.ndim(self._raw_y) == 1: self.set_xy(x, self._raw_y) elif ndim_x == 2 and np.shape(x) == np.shape(self.Y):", "= (theta - self.theta0) / self.dtheta % self.ntheta j = (r - self.rmin)", "box\"\"\" return self._cbox def _init_with_para(self, nx, ny, dx, dy, x_orig, y_orig): self._nx =", "== np.shape(self.X): self.set_xy(self.X, y) else: self._raise_invalid_shape(self.X, y) @property def CX(self): return self._CX @property", "== 1 and np.ndim(self._raw_y) == 1: self.set_xy(x, self._raw_y) elif ndim_x == 2 and", "+ i * self.dtheta r = self.rmin + j * self.dr lon, lat", "lat = self.r_theta_to_lon_lat(r, theta) return lon, lat def x2i(self, x, y, int_index=True, check_bound=None):", "nx=None, ny=None, dx=None, dy=None, x_orig=0.0, y_orig=0.0, **kwargs): self.proj = proj self._reset_raw_xy() if x", "px, py = self.proj(x, y) i = (px - self.x_orig) / self.dx j", "px0 self._dy = py1 - py0 self._updateXY() def dump(self): return { \"proj\": self.proj.srs,", "and i <= self.nx-subtracted) and (j >= start and j <= self.ny-subtracted): return", "self.lat0 = lat0 self.rmin = rmin self.rmax = rmax self.nr = nr self.ntheta", "+ j * self.dr lon, lat = self.r_theta_to_lon_lat(r, theta) return lon, lat def", "j * self.dy + self.y_orig return self.proj(px, py, inverse=True) def x2i(self, x, y,", "x2, y2, **kwargs): bbox = self.bbox if x1 is None: x1 = bbox[0]", "*args, **kwargs): distances = np.hypot(self.X-x, self.Y-y) flat_i = np.argmin(distances) nx = self.X.shape[1] return", "self._X @property def Y(self): return self._Y @property def x(self): return self._X @property def", "= ntheta self.theta0 = theta0 self.r_earth = r_earth self.dtheta = np.pi * 2", "self._x_orig = x_orig self._y_orig = y_orig self._updateXY() @property def has_null_proj(self): return isinstance(self.proj, NullProj)", "sin_r * np.cos(theta) lat_ = np.arcsin(sin_lat) lon_ = lon0_ + np.arctan2(np.sin(theta) * sin_r", "- self.y_orig) / self.dy if int_index: if int_index in ('lowerleft', 'll'): i =", "self._raw_y is not None else self._Y @property def cx(self): return self._raw_cx if self._raw_cx", "@dy.setter def dy(self, value): self._dy = value self._reset_raw_xy() self._updateXY() @property def x_orig(self): return", "if x1 is not None and y1 is not None: px1, py1 =", "\"\"\" Requires self.X & self.Y. \"\"\" @property def bbox(self): return (np.min(self.X), np.min(self.Y), np.max(self.X),", "y1 = y[0, 0], y[1, 1] else: self._raise_invalid_shape(x, y) px0, py0 = self.proj(x0,", "1: self._raw_cy = self._CY[:, 0] else: self._raw_cy = None self._bbox = (np.min(self._X), np.min(self._Y),", "/ self.ntheta self.dr = (self.rmax - self.rmin) / (self.nr - 1) self._updateXY() def", "self.rmin) / self.dr if int_index: i = np.round(i) j = np.round(j) if np.isscalar(i):", "None self._bbox = (np.min(self._X), np.min(self._Y), np.max(self._X), np.max(self._Y)) self._cbox = (np.min(self._CX), np.min(self._CY), np.max(self._CX), np.max(self._CY))", "self.ntheta self.dr = (self.rmax - self.rmin) / (self.nr - 1) self._updateXY() def _updateXY(self):", "None else self._CX @property def cy(self): return self._raw_cy if self._raw_cy is not None", "self.rmin = rmin self.rmax = rmax self.nr = nr self.ntheta = ntheta self.theta0", "lat0_ = np.deg2rad(self.lat0) lon0_ = np.deg2rad(self.lon0) sin_lat0 = np.sin(lat0_) cos_lat0 = np.cos(lat0_) sin_lat", "= np.alltrue(bad, axis=1) x_points = np.argwhere(np.diff(np.r_[True, x_bad, True])).reshape(-1, 2) y_points = np.argwhere(np.diff(np.r_[True, y_bad,", "np.isscalar(i): if (i >= start and i <= self.nx-subtracted) and (j >= start", "elif ndim_x == 2 and np.shape(x) == np.shape(self.Y): self.set_xy(x, self.Y) else: self._raise_invalid_shape(x, self.Y)", "= value self._reset_raw_xy() self._updateXY() @property def x_orig(self): return self._x_orig @x_orig.setter def x_orig(self, value):", "- py0 self._updateXY() def dump(self): return { \"proj\": self.proj.srs, \"nx\": self.nx, \"ny\": self.ny,", "j * self.dr lon, lat = self.r_theta_to_lon_lat(r, theta) return lon, lat def x2i(self,", "@property def x_orig(self): return self._x_orig @x_orig.setter def x_orig(self, value): self._x_orig = value self._reset_raw_xy()", "2 and np.shape(y) == np.shape(self.X): self.set_xy(self.X, y) else: self._raise_invalid_shape(self.X, y) @property def CX(self):", "* self.dx + self.x_orig py = j * self.dy + self.y_orig return self.proj(px,", "1: self.set_xy(x, self._raw_y) elif ndim_x == 2 and np.shape(x) == np.shape(self.Y): self.set_xy(x, self.Y)", "rmin, rmax, nr, ntheta, theta0=0.0, r_earth=6371): self.lon0 = lon0 self.lat0 = lat0 self.rmin", "y) else: self._raise_invalid_shape(self.X, y) @property def CX(self): return self._CX @property def CY(self): return", "= np.rad2deg(lon_) lat = np.rad2deg(lat_) return lon, lat @property def nx(self): return self.ntheta", "dx=None, dy=None, x_orig=0.0, y_orig=0.0, **kwargs): self.proj = proj self._reset_raw_xy() if x is not", "= value self._reset_raw_xy() self._updateXY() @property def dx(self): return self._dx @dx.setter def dx(self, value):", "i, j): px = i * self.dx + self.x_orig py = j *", "return self.nr @property def X(self): return self._X @property def Y(self): return self._Y @property", "= x[0, 0], x[1, 1] y0, y1 = y[0, 0], y[1, 1] else:", "self._raw_cx = self._CX[0] else: self._raw_cx = None if self._raw_y is not None and", "def x(self): return self._X @property def y(self): return self._Y def i2x(self, i, j):", "self.nx def dump(self): return { \"X\": self.X, \"Y\": self.Y, \"nx\": self.nx, \"ny\": self.ny,", "= np.meshgrid(theta, r) LON, LAT = self.r_theta_to_lon_lat(R, THETA) self._X = LON self._Y =", "__init__(self, proj=None, x=None, y=None, nx=None, ny=None, dx=None, dy=None, x_orig=0.0, y_orig=0.0, **kwargs): self.proj =", "x[0], x[1] y0, y1 = y[0], y[1] elif ndim_x == 2 and ndim_y", "and y is not None: self.set_xy(x, y) else: self._init_with_para(nx, ny, dx, dy, x_orig,", "i1, j1, i2, j2 def check_bound(self, i, j, int_index=True): start = -0.5 subtracted", "2 d = 2 * np.arcsin(np.sqrt(c)) r = d * self.r_earth i =", "= p elif isinstance(p, dict): self._proj = Proj(**p) else: # Treat as proj_string", "__call__(self, x, y, **kwargs): return x, y class GridderBase(object): \"\"\"Gridder is a helper", "self._y_orig @y_orig.setter def y_orig(self, value): self._y_orig = value self._reset_raw_xy() self._updateXY() @property def bbox(self):", "+ self.y_orig return self.proj(px, py, inverse=True) def x2i(self, x, y, int_index=True, check_bound=None): px,", "np.ndim(x), np.ndim(y) if ndim_x == 1 and ndim_y == 1: self._nx, self._ny =", "when no raw y presents.\") ndim_x = np.ndim(x) if ndim_x == 1 and", "+ cos_lat1 * cos_lat2 * np.sin(dlon / 2) ** 2 d = 2", "def y_orig(self): return self._y_orig @y_orig.setter def y_orig(self, value): self._y_orig = value self._reset_raw_xy() self._updateXY()", "np.max(self.X), np.max(self.Y)) def get_bounding_ij(self, x1, y1, x2, y2, **kwargs): bbox = self.bbox if", "(np.min(self._CX), np.min(self._CY), np.max(self._CX), np.max(self._CY)) return xx, yy def i2x(self, i, j): px =", "self.theta0 THETA, R = np.meshgrid(theta, r) LON, LAT = self.r_theta_to_lon_lat(R, THETA) self._X =", "== 1: self.X, self.Y = np.meshgrid(X, Y) else: self.X, self.Y = X, Y", "not None else self._CY @property def nx(self): return self._nx @nx.setter def nx(self, value):", "raw y presents.\") ndim_x = np.ndim(x) if ndim_x == 1 and np.ndim(self._raw_y) ==", "self._CY @property def x(self): return self._raw_x if self._raw_x is not None else self._X", "x1 is not None and y1 is not None: px1, py1 = self.proj(x1,", "r_earth=6371): self.lon0 = lon0 self.lat0 = lat0 self.rmin = rmin self.rmax = rmax", "x, y if ndim_x == 1 and ndim_y == 1: x0, x1 =", "y class GridderBase(object): \"\"\"Gridder is a helper for i, j <-> x, y", "np.meshgrid(theta, r) LON, LAT = self.r_theta_to_lon_lat(R, THETA) self._X = LON self._Y = LAT", "np.cos(lat0_) sin_lat = sin_lat0 * cos_r + cos_lat0 * sin_r * np.cos(theta) lat_", "x_points[0] j1, j2 = (-1, -1) if y_points.shape[0] == 0 else y_points[0] return", "self._y_orig = value self._reset_raw_xy() self._updateXY() @property def bbox(self): return self._bbox @property def cbox(self):", "self._raw_x, self._raw_y = np.asarray(x), np.asarray(y) self.calibrate(x, y) def _raise_invalid_shape(self, x, y): raise ValueError(\"Invalid", "py, inverse=True) def x2i(self, x, y, int_index=True, check_bound=None): px, py = self.proj(x, y)", "(j <= self.ny - subtracted), j, np.nan) return i, j class XYProjGridder(XYGridderBase): def", "= np.sin(r_) cos_r = np.cos(r_) lat0_ = np.deg2rad(self.lat0) lon0_ = np.deg2rad(self.lon0) sin_lat0 =", "\"\"\" Similar to pyproj.Proj, but NullProj does not do actual conversion. \"\"\" @property", "== 1 and np.ndim(self._raw_x) == 1: self.set_xy(self._raw_x, y) elif ndim_y == 2 and", "ndim_x = np.ndim(x) if ndim_x == 1 and np.ndim(self._raw_y) == 1: self.set_xy(x, self._raw_y)", "int_index in ('lowerleft', 'll'): i = np.floor(i) j = np.floor(j) else: i =", "= np.array(X) Y = np.array(Y) if X.ndim == 1: self.X, self.Y = np.meshgrid(X,", "self._Y @property def cx(self): return self._raw_cx if self._raw_cx is not None else self._CX", "y1) self._dx = px1 - px0 self._dy = py1 - py0 self._updateXY() def", "value): self._ny = value self._reset_raw_xy() self._updateXY() @property def dx(self): return self._dx @dx.setter def", "and j <= self.ny-subtracted): return i, j else: raise OutOfGridBound(\"i: {}, j: {}", "/ self.dy if int_index: if int_index in ('lowerleft', 'll'): i = np.floor(i) j", "1] y0, y1 = y[0, 0], y[1, 1] else: self._raise_invalid_shape(x, y) px0, py0", "if x_points.shape[0] == 0 else x_points[0] j1, j2 = (-1, -1) if y_points.shape[0]", "j): theta = self.theta0 + i * self.dtheta r = self.rmin + j", "self._updateXY() def dump(self): return { \"proj\": self.proj.srs, \"nx\": self.nx, \"ny\": self.ny, \"dx\": self.dx,", "pyproj import Proj import operator from .exceptions import * class NullProj(object): \"\"\" Similar", "i, j, int_index=True): start = -0.5 subtracted = 1 if int_index: start =", "return lon, lat def x2i(self, x, y, int_index=True, check_bound=None): lon2, lat2 = np.deg2rad(x),", "self._raw_cx if self._raw_cx is not None else self._CX @property def cy(self): return self._raw_cy", "np.shape(self.X): self.set_xy(self.X, y) else: self._raise_invalid_shape(self.X, y) @property def CX(self): return self._CX @property def", "def cx(self): return self._raw_cx if self._raw_cx is not None else self._CX @property def", "np.deg2rad(y) lon1, lat1 = np.deg2rad(self.lon0), np.deg2rad(self.lat0) dlon = lon2 - lon1 dlat =", "y_bad, True])).reshape(-1, 2) i1, i2 = (-1, -1) if x_points.shape[0] == 0 else", "self.rmin + j * self.dr lon, lat = self.r_theta_to_lon_lat(r, theta) return lon, lat", "y[0], y[1] elif ndim_x == 2 and ndim_y == 2: x0, x1 =", "y) px0, py0 = self.proj(x0, y0) self._x_orig = px0 self._y_orig = py0 if", "self._raw_y) elif ndim_x == 2 and np.shape(x) == np.shape(self.Y): self.set_xy(x, self.Y) else: self._raise_invalid_shape(x,", "def x2i(self, x, y, int_index=True, check_bound=None): px, py = self.proj(x, y) i =", "@property def cbox(self): \"\"\"corner box\"\"\" return self._cbox def _init_with_para(self, nx, ny, dx, dy,", "xx, yy def i2x(self, i, j): px = i * self.dx + self.x_orig", "x, y, int_index=True, check_bound=None): lon2, lat2 = np.deg2rad(x), np.deg2rad(y) lon1, lat1 = np.deg2rad(self.lon0),", "def dy(self): return self._dy @dy.setter def dy(self, value): self._dy = value self._reset_raw_xy() self._updateXY()", "len(y) elif ndim_x == 2 and ndim_y == 2: self._ny, self._nx = np.shape(x)", "alone when no raw y presents.\") ndim_x = np.ndim(x) if ndim_x == 1", ".exceptions import * class NullProj(object): \"\"\" Similar to pyproj.Proj, but NullProj does not", "lon1 dlat = lat2 - lat1 sin_dlon = np.sin(dlon) cos_dlon = np.cos(dlon) sin_lat1", "= Proj(str(p)) # TODO: check PY3 compatibility. self._reset_raw_xy() if all([hasattr(self, attr) for attr", "def ny(self): return self.nr @property def X(self): return self._X @property def Y(self): return", "ntheta, theta0=0.0, r_earth=6371): self.lon0 = lon0 self.lat0 = lat0 self.rmin = rmin self.rmax", "is None: x2 = bbox[2] if y2 is None: y2 = bbox[3] bad", "self.Y = np.meshgrid(X, Y) else: self.X, self.Y = X, Y self.ny, self.nx =", "if p is None: self._proj = NullProj() elif isinstance(p, (Proj, NullProj)): self._proj =", "return self._ny @ny.setter def ny(self, value): self._ny = value self._reset_raw_xy() self._updateXY() @property def", "int_index: i = np.round(i) j = np.round(j) if np.isscalar(i): i = int(i) j", "= 2 * np.arcsin(np.sqrt(c)) r = d * self.r_earth i = (theta -", "self.x_orig, \"y_orig\": self.y_orig } class LonLatSurroundingGridder(XYGridderBase): def __init__(self, lon0, lat0, rmin, rmax, nr,", "ndim_y == 1: x0, x1 = x[0], x[1] y0, y1 = y[0], y[1]", "self._dx = dx self._dy = dy self._x_orig = x_orig self._y_orig = y_orig self._updateXY()", "cos_r - sin_lat0 * sin_lat) lon = np.rad2deg(lon_) lat = np.rad2deg(lat_) return lon,", "y_orig=0.0, **kwargs): self.proj = proj self._reset_raw_xy() if x is not None and y", "= j * self.dy + self.y_orig return self.proj(px, py, inverse=True) def x2i(self, x,", "def dx(self, value): self._dx = value self._reset_raw_xy() self._updateXY() @property def dy(self): return self._dy", "is None: y1 = bbox[1] if x2 is None: x2 = bbox[2] if", "return isinstance(self.proj, NullProj) def set_xy(self, x, y): ndim_x, ndim_y = np.ndim(x), np.ndim(y) if", "== 1 and ndim_y == 1: x0, x1 = x[0], x[1] y0, y1", "j class XYIrregularGridder(XYGridderBase): # TODO: use kdtree. def __init__(self, X, Y): X =", "cos_dlon = np.cos(dlon) sin_lat1 = np.sin(lat1) cos_lat1 = np.cos(lat1) sin_lat2 = np.sin(lat2) cos_lat2", "= (-1, -1) if x_points.shape[0] == 0 else x_points[0] j1, j2 = (-1,", "x_orig, y_orig): self._nx = nx self._ny = ny self._dx = dx self._dy =", "self.r_earth sin_r = np.sin(r_) cos_r = np.cos(r_) lat0_ = np.deg2rad(self.lat0) lon0_ = np.deg2rad(self.lon0)", "j class XYProjGridder(XYGridderBase): def __init__(self, proj=None, x=None, y=None, nx=None, ny=None, dx=None, dy=None, x_orig=0.0,", "bbox[1] if x2 is None: x2 = bbox[2] if y2 is None: y2", "\"dy\": self.dy, \"x_orig\": self.x_orig, \"y_orig\": self.y_orig } class LonLatSurroundingGridder(XYGridderBase): def __init__(self, lon0, lat0,", "raise NotImplementedError def copy(self, **kwargs): kws = self.dump() kws.update(kwargs) new_gridder = self.__class__(**kws) return", "= np.sin(lat2) cos_lat2 = np.cos(lat2) a = cos_lat2 * sin_dlon b = cos_lat1", "np.sin(lat1) cos_lat1 = np.cos(lat1) sin_lat2 = np.sin(lat2) cos_lat2 = np.cos(lat2) a = cos_lat2", "self._raw_x.ndim == 1: self._raw_cx = self._CX[0] else: self._raw_cx = None if self._raw_y is", "and ndim_y == 1: x0, x1 = x[0], x[1] y0, y1 = y[0],", "class XYGridderBase(GridderBase): \"\"\" Requires self.X & self.Y. \"\"\" @property def bbox(self): return (np.min(self.X),", "np.alltrue(bad, axis=0) y_bad = np.alltrue(bad, axis=1) x_points = np.argwhere(np.diff(np.r_[True, x_bad, True])).reshape(-1, 2) y_points", "return i1, j1, i2, j2 def check_bound(self, i, j, int_index=True): start = -0.5", "sin_dlon = np.sin(dlon) cos_dlon = np.cos(dlon) sin_lat1 = np.sin(lat1) cos_lat1 = np.cos(lat1) sin_lat2", "Y = np.array(Y) if X.ndim == 1: self.X, self.Y = np.meshgrid(X, Y) else:", "= (px - self.x_orig) / self.dx j = (py - self.y_orig) / self.dy", "1 and ndim_y == 1: x0, x1 = x[0], x[1] y0, y1 =", "x_points.shape[0] == 0 else x_points[0] j1, j2 = (-1, -1) if y_points.shape[0] ==", "= y_orig self._updateXY() @property def has_null_proj(self): return isinstance(self.proj, NullProj) def set_xy(self, x, y):", "value self._reset_raw_xy() self._updateXY() @property def x_orig(self): return self._x_orig @x_orig.setter def x_orig(self, value): self._x_orig", "np.rad2deg(lon_) lat = np.rad2deg(lat_) return lon, lat @property def nx(self): return self.ntheta @property", "b) c = np.sin(dlat / 2) ** 2 + cos_lat1 * cos_lat2 *", "- lat1 sin_dlon = np.sin(dlon) cos_dlon = np.cos(dlon) sin_lat1 = np.sin(lat1) cos_lat1 =", "lon1, lat1 = np.deg2rad(self.lon0), np.deg2rad(self.lat0) dlon = lon2 - lon1 dlat = lat2", "self.proj = proj self._reset_raw_xy() if x is not None and y is not", "* sin_lat) lon = np.rad2deg(lon_) lat = np.rad2deg(lat_) return lon, lat @property def", "self._raw_cy is not None else self._CY @property def nx(self): return self._nx @nx.setter def", "np.arange(self.ntheta) * self.dtheta + self.theta0 THETA, R = np.meshgrid(theta, r) LON, LAT =", "coding:utf-8 -*- import six import numpy as np from pyproj import Proj import", "== 2 and ndim_y == 2: self._ny, self._nx = np.shape(x) else: self._raise_invalid_shape(x, y)", "NullProj() elif isinstance(p, (Proj, NullProj)): self._proj = p elif isinstance(p, dict): self._proj =", "== 0 else y_points[0] return i1, j1, i2, j2 def check_bound(self, i, j,", "pyproj.Proj, but NullProj does not do actual conversion. \"\"\" @property def srs(self): return", "cy(self): return self._raw_cy if self._raw_cy is not None else self._CY @property def nx(self):", "= rmax self.nr = nr self.ntheta = ntheta self.theta0 = theta0 self.r_earth =", "j else: raise OutOfGridBound(\"i: {}, j: {} is out of bound!\".format(i, j)) else:", "when no raw x presents.\") ndim_y = np.ndim(y) if ndim_y == 1 and", "self.y_orig) / self.dy if int_index: if int_index in ('lowerleft', 'll'): i = np.floor(i)", "self.dx + self.x_orig py = j * self.dy + self.y_orig return self.proj(px, py,", "dx(self): return self._dx @dx.setter def dx(self, value): self._dx = value self._reset_raw_xy() self._updateXY() @property", "np.round(j) if np.isscalar(i): i = int(i) j = int(j) else: i = i.astype('i')", "elif ndim_y == 2 and np.shape(y) == np.shape(self.X): self.set_xy(self.X, y) else: self._raise_invalid_shape(self.X, y)", "self._raise_invalid_shape(self.X, y) @property def CX(self): return self._CX @property def CY(self): return self._CY @property", "self._dy = py1 - py0 self._updateXY() def dump(self): return { \"proj\": self.proj.srs, \"nx\":", "**kwargs): return x, y class GridderBase(object): \"\"\"Gridder is a helper for i, j", "= cos_lat1 * sin_lat2 - sin_lat1 * cos_lat2 * cos_dlon theta = np.arctan2(a,", "np.isscalar(i): i = int(i) j = int(j) else: i = i.astype('i') j =", "if np.isscalar(i): if (i >= start and i <= self.nx-subtracted) and (j >=", "(self.Y <= y2)) x_bad = np.alltrue(bad, axis=0) y_bad = np.alltrue(bad, axis=1) x_points =", "and np.ndim(self._raw_y) == 1: self.set_xy(x, self._raw_y) elif ndim_x == 2 and np.shape(x) ==", "i1, i2 = (-1, -1) if x_points.shape[0] == 0 else x_points[0] j1, j2", "sin_lat = sin_lat0 * cos_r + cos_lat0 * sin_r * np.cos(theta) lat_ =", "y1 = bbox[1] if x2 is None: x2 = bbox[2] if y2 is", "i = i.astype('i') j = j.astype('i') if check_bound: return self.check_bound(i, j, int_index=int_index) else:", "isinstance(p, dict): self._proj = Proj(**p) else: # Treat as proj_string self._proj = Proj(str(p))", "j <= self.ny-subtracted): return i, j else: raise OutOfGridBound(\"i: {}, j: {} is", "& (j <= self.ny - subtracted), j, np.nan) return i, j class XYProjGridder(XYGridderBase):", "self.rmax = rmax self.nr = nr self.ntheta = ntheta self.theta0 = theta0 self.r_earth", "x2) & (self.Y >= y1) & (self.Y <= y2)) x_bad = np.alltrue(bad, axis=0)", "x, y class GridderBase(object): \"\"\"Gridder is a helper for i, j <-> x,", "c = np.sin(dlat / 2) ** 2 + cos_lat1 * cos_lat2 * np.sin(dlon", "i = int(i) j = int(j) else: i = i.astype('i') j = j.astype('i')", "isinstance(p, (Proj, NullProj)): self._proj = p elif isinstance(p, dict): self._proj = Proj(**p) else:", "attr) for attr in ('_nx', '_ny', '_dx', '_dy', '_x_orig', '_y_orig')]): self._updateXY() @property def", "j, int_index=True): start = -0.5 subtracted = 1 if int_index: start = 0", "of bound!\".format(i, j)) else: i = np.where((i >= start) & (i <= self.nx", "= np.deg2rad(self.lon0), np.deg2rad(self.lat0) dlon = lon2 - lon1 dlat = lat2 - lat1", "np.sin(dlon / 2) ** 2 d = 2 * np.arcsin(np.sqrt(c)) r = d", "{ \"proj\": self.proj.srs, \"nx\": self.nx, \"ny\": self.ny, \"dx\": self.dx, \"dy\": self.dy, \"x_orig\": self.x_orig,", "y1=None): return def dump(self): return {} class XYGridderBase(GridderBase): \"\"\" Requires self.X & self.Y.", "if y_points.shape[0] == 0 else y_points[0] return i1, j1, i2, j2 def check_bound(self,", "(px - self.x_orig) / self.dx j = (py - self.y_orig) / self.dy if", "y0 = x, y if ndim_x == 1 and ndim_y == 1: x0,", "('_nx', '_ny', '_dx', '_dy', '_x_orig', '_y_orig')]): self._updateXY() @property def X(self): return self._X @X.setter", "= NullProj() elif isinstance(p, (Proj, NullProj)): self._proj = p elif isinstance(p, dict): self._proj", "y shape: {}, {}\".format(np.shape(x), np.shape(y))) def _reset_raw_xy(self): self._raw_x, self._raw_y = None, None def", "@property def CY(self): return self._CY @property def x(self): return self._raw_x if self._raw_x is", "0], y[1, 1] else: self._raise_invalid_shape(x, y) px0, py0 = self.proj(x0, y0) self._x_orig =", "self.set_xy(x, self.Y) else: self._raise_invalid_shape(x, self.Y) @property def Y(self): return self._Y @Y.setter def Y(self,", "None else self._CY @property def nx(self): return self._nx @nx.setter def nx(self, value): self._nx", "flat_i = np.argmin(distances) nx = self.X.shape[1] return flat_i / self.nx, flat_i % self.nx", "lon0_ = np.deg2rad(self.lon0) sin_lat0 = np.sin(lat0_) cos_lat0 = np.cos(lat0_) sin_lat = sin_lat0 *", "b = cos_lat1 * sin_lat2 - sin_lat1 * cos_lat2 * cos_dlon theta =", "subtracted), j, np.nan) return i, j class XYProjGridder(XYGridderBase): def __init__(self, proj=None, x=None, y=None,", "* sin_r * np.cos(theta) lat_ = np.arcsin(sin_lat) lon_ = lon0_ + np.arctan2(np.sin(theta) *", "if self._raw_cy is not None else self._CY @property def nx(self): return self._nx @nx.setter", "self.X, self.Y = np.meshgrid(X, Y) else: self.X, self.Y = X, Y self.ny, self.nx", "and ndim_y == 1: self._nx, self._ny = len(x), len(y) elif ndim_x == 2", "not None and self._raw_y.ndim == 1: self._raw_cy = self._CY[:, 0] else: self._raw_cy =", "np from pyproj import Proj import operator from .exceptions import * class NullProj(object):", "set x alone when no raw y presents.\") ndim_x = np.ndim(x) if ndim_x", "np.shape(self.Y): self.set_xy(x, self.Y) else: self._raise_invalid_shape(x, self.Y) @property def Y(self): return self._Y @Y.setter def", "y_orig self._updateXY() @property def has_null_proj(self): return isinstance(self.proj, NullProj) def set_xy(self, x, y): ndim_x,", "np.where((i >= start) & (i <= self.nx - subtracted), i, np.nan) j =", "<= x2) & (self.Y >= y1) & (self.Y <= y2)) x_bad = np.alltrue(bad,", "np.ndim(y) if ndim_y == 1 and np.ndim(self._raw_x) == 1: self.set_xy(self._raw_x, y) elif ndim_y", "_init_with_para(self, nx, ny, dx, dy, x_orig, y_orig): self._nx = nx self._ny = ny", "is None: x1 = bbox[0] if y1 is None: y1 = bbox[1] if", "np.nan) return i, j class XYProjGridder(XYGridderBase): def __init__(self, proj=None, x=None, y=None, nx=None, ny=None,", "self.Y) else: self._raise_invalid_shape(x, self.Y) @property def Y(self): return self._Y @Y.setter def Y(self, y):", "ndim_y = np.ndim(x), np.ndim(y) if ndim_x == 1 and ndim_y == 1: self._nx,", "i = (theta - self.theta0) / self.dtheta % self.ntheta j = (r -", "% self.nx def dump(self): return { \"X\": self.X, \"Y\": self.Y, \"nx\": self.nx, \"ny\":", "lon0 self.lat0 = lat0 self.rmin = rmin self.rmax = rmax self.nr = nr", "Similar to pyproj.Proj, but NullProj does not do actual conversion. \"\"\" @property def", "x_bad = np.alltrue(bad, axis=0) y_bad = np.alltrue(bad, axis=1) x_points = np.argwhere(np.diff(np.r_[True, x_bad, True])).reshape(-1,", "else self._Y @property def cx(self): return self._raw_cx if self._raw_cx is not None else", "@property def has_null_proj(self): return isinstance(self.proj, NullProj) def set_xy(self, x, y): ndim_x, ndim_y =", "and (j >= start and j <= self.ny-subtracted): return i, j else: raise", "def nx(self, value): self._nx = value self._reset_raw_xy() self._updateXY() @property def ny(self): return self._ny", "value): self._dx = value self._reset_raw_xy() self._updateXY() @property def dy(self): return self._dy @dy.setter def", ">= x1) & (self.X <= x2) & (self.Y >= y1) & (self.Y <=", "if x2 is None: x2 = bbox[2] if y2 is None: y2 =", "y): if self._raw_x is None: raise ValueError(\"Cannot set y alone when no raw", "np.cos(r_) lat0_ = np.deg2rad(self.lat0) lon0_ = np.deg2rad(self.lon0) sin_lat0 = np.sin(lat0_) cos_lat0 = np.cos(lat0_)", "dump(self): return { \"proj\": self.proj.srs, \"nx\": self.nx, \"ny\": self.ny, \"dx\": self.dx, \"dy\": self.dy,", "x, y conversion, etc.\"\"\" def i2x(self, *args): \"\"\"Convert i, j, ... -> x,", "cos_lat1 * sin_lat2 - sin_lat1 * cos_lat2 * cos_dlon theta = np.arctan2(a, b)", "<= self.nx - subtracted), i, np.nan) j = np.where((j >= start) & (j", "def __init__(self, lon0, lat0, rmin, rmax, nr, ntheta, theta0=0.0, r_earth=6371): self.lon0 = lon0", "= self.i2x(ii, jj) cxx, cyy = self.i2x(cii, cjj) self._X, self._Y = xx, yy", "y1, x2, y2, **kwargs): bbox = self.bbox if x1 is None: x1 =", "@property def ny(self): return self._ny @ny.setter def ny(self, value): self._ny = value self._reset_raw_xy()", "self.dump() kws.update(kwargs) new_gridder = self.__class__(**kws) return new_gridder def calibrate(self, x0, y0, x1=None, y1=None):", "(-1, -1) if x_points.shape[0] == 0 else x_points[0] j1, j2 = (-1, -1)", "0], x[1, 1] y0, y1 = y[0, 0], y[1, 1] else: self._raise_invalid_shape(x, y)", "= r / self.r_earth sin_r = np.sin(r_) cos_r = np.cos(r_) lat0_ = np.deg2rad(self.lat0)", "a = cos_lat2 * sin_dlon b = cos_lat1 * sin_lat2 - sin_lat1 *", "= py0 if x1 is not None and y1 is not None: px1,", "def copy(self, **kwargs): kws = self.dump() kws.update(kwargs) new_gridder = self.__class__(**kws) return new_gridder def", "is None: y2 = bbox[3] bad = ~((self.X >= x1) & (self.X <=", "i, np.nan) j = np.where((j >= start) & (j <= self.ny - subtracted),", "np.asarray(y) self.calibrate(x, y) def _raise_invalid_shape(self, x, y): raise ValueError(\"Invalid x, y shape: {},", "= np.deg2rad(x), np.deg2rad(y) lon1, lat1 = np.deg2rad(self.lon0), np.deg2rad(self.lat0) dlon = lon2 - lon1", "def proj(self, p): if p is None: self._proj = NullProj() elif isinstance(p, (Proj,", "self._X @property def y(self): return self._raw_y if self._raw_y is not None else self._Y", "not None else self._CX @property def cy(self): return self._raw_cy if self._raw_cy is not", "start and i <= self.nx-subtracted) and (j >= start and j <= self.ny-subtracted):", "None: raise ValueError(\"Cannot set y alone when no raw x presents.\") ndim_y =", "yy = self.i2x(ii, jj) cxx, cyy = self.i2x(cii, cjj) self._X, self._Y = xx,", "1 if int_index: start = 0 if int_index in ('lowerleft', 'll'): subtracted =", "'_dx', '_dy', '_x_orig', '_y_orig')]): self._updateXY() @property def X(self): return self._X @X.setter def X(self,", "= 1 if int_index: start = 0 if int_index in ('lowerleft', 'll'): subtracted", "0 and ndim_y == 0: x0, y0 = x, y if ndim_x ==", "check_bound(self, i, j, int_index=True): start = -0.5 subtracted = 1 if int_index: start", "_updateXY(self): jj, ii = np.mgrid[0:self.ny, 0:self.nx] cjj, cii = np.mgrid[-0.5:self.ny, -0.5:self.nx] xx, yy", "(i >= start and i <= self.nx-subtracted) and (j >= start and j", "i2x(self, i, j, *args, **kwargs): return self.X[j, i], self.Y[j, i] def x2i(self, x,", "elif isinstance(p, (Proj, NullProj)): self._proj = p elif isinstance(p, dict): self._proj = Proj(**p)", "r / self.r_earth sin_r = np.sin(r_) cos_r = np.cos(r_) lat0_ = np.deg2rad(self.lat0) lon0_", "not None: px1, py1 = self.proj(x1, y1) self._dx = px1 - px0 self._dy", "py = j * self.dy + self.y_orig return self.proj(px, py, inverse=True) def x2i(self,", "x1=None, y1=None): ndim_x, ndim_y = np.ndim(x), np.ndim(y) if ndim_x == 0 and ndim_y", "= np.rad2deg(lat_) return lon, lat @property def nx(self): return self.ntheta @property def ny(self):", "**kwargs): self.proj = proj self._reset_raw_xy() if x is not None and y is", "is a helper for i, j <-> x, y conversion, etc.\"\"\" def i2x(self,", "= np.ndim(x) if ndim_x == 1 and np.ndim(self._raw_y) == 1: self.set_xy(x, self._raw_y) elif", "self._dx = value self._reset_raw_xy() self._updateXY() @property def dy(self): return self._dy @dy.setter def dy(self,", "\"\"\"Convert i, j, ... -> x, y, ...\"\"\" raise NotImplementedError def x2i(self, *args,", "y_points = np.argwhere(np.diff(np.r_[True, y_bad, True])).reshape(-1, 2) i1, i2 = (-1, -1) if x_points.shape[0]", "class LonLatSurroundingGridder(XYGridderBase): def __init__(self, lon0, lat0, rmin, rmax, nr, ntheta, theta0=0.0, r_earth=6371): self.lon0", "@property def x(self): return self._X @property def y(self): return self._Y def i2x(self, i,", "ValueError(\"Cannot set x alone when no raw y presents.\") ndim_x = np.ndim(x) if", "value): self._dy = value self._reset_raw_xy() self._updateXY() @property def x_orig(self): return self._x_orig @x_orig.setter def", "flat_i % self.nx def dump(self): return { \"X\": self.X, \"Y\": self.Y, \"nx\": self.nx,", "return x, y class GridderBase(object): \"\"\"Gridder is a helper for i, j <->", "self.nr @property def X(self): return self._X @property def Y(self): return self._Y @property def", "@property def ny(self): return self.nr @property def X(self): return self._X @property def Y(self):", "is not None and self._raw_y.ndim == 1: self._raw_cy = self._CY[:, 0] else: self._raw_cy", "nr, ntheta, theta0=0.0, r_earth=6371): self.lon0 = lon0 self.lat0 = lat0 self.rmin = rmin", "i = (px - self.x_orig) / self.dx j = (py - self.y_orig) /", "sin_lat0 * sin_lat) lon = np.rad2deg(lon_) lat = np.rad2deg(lat_) return lon, lat @property", "== 1: self._raw_cx = self._CX[0] else: self._raw_cx = None if self._raw_y is not", "shape: {}, {}\".format(np.shape(x), np.shape(y))) def _reset_raw_xy(self): self._raw_x, self._raw_y = None, None def _updateXY(self):", "{}, {}\".format(np.shape(x), np.shape(y))) def _reset_raw_xy(self): self._raw_x, self._raw_y = None, None def _updateXY(self): jj,", "set y alone when no raw x presents.\") ndim_y = np.ndim(y) if ndim_y", "@property def CX(self): return self._CX @property def CY(self): return self._CY @property def x(self):", "def _raise_invalid_shape(self, x, y): raise ValueError(\"Invalid x, y shape: {}, {}\".format(np.shape(x), np.shape(y))) def", "lat2 = np.deg2rad(x), np.deg2rad(y) lon1, lat1 = np.deg2rad(self.lon0), np.deg2rad(self.lat0) dlon = lon2 -", "= np.cos(lat2) a = cos_lat2 * sin_dlon b = cos_lat1 * sin_lat2 -", "= np.cos(dlon) sin_lat1 = np.sin(lat1) cos_lat1 = np.cos(lat1) sin_lat2 = np.sin(lat2) cos_lat2 =", "* np.cos(theta) lat_ = np.arcsin(sin_lat) lon_ = lon0_ + np.arctan2(np.sin(theta) * sin_r *", "0 else x_points[0] j1, j2 = (-1, -1) if y_points.shape[0] == 0 else", "def bbox(self): return (np.min(self.X), np.min(self.Y), np.max(self.X), np.max(self.Y)) def get_bounding_ij(self, x1, y1, x2, y2,", "X = np.array(X) Y = np.array(Y) if X.ndim == 1: self.X, self.Y =", "1: self._nx, self._ny = len(x), len(y) elif ndim_x == 2 and ndim_y ==", "np.sin(lat2) cos_lat2 = np.cos(lat2) a = cos_lat2 * sin_dlon b = cos_lat1 *", "px1 - px0 self._dy = py1 - py0 self._updateXY() def dump(self): return {", "if ndim_x == 1 and ndim_y == 1: x0, x1 = x[0], x[1]", "self._raw_y is None: raise ValueError(\"Cannot set x alone when no raw y presents.\")", "'_dy', '_x_orig', '_y_orig')]): self._updateXY() @property def X(self): return self._X @X.setter def X(self, x):", "= self._CX[0] else: self._raw_cx = None if self._raw_y is not None and self._raw_y.ndim", "def cbox(self): \"\"\"corner box\"\"\" return self._cbox def _init_with_para(self, nx, ny, dx, dy, x_orig,", "y): raise ValueError(\"Invalid x, y shape: {}, {}\".format(np.shape(x), np.shape(y))) def _reset_raw_xy(self): self._raw_x, self._raw_y", "= np.sin(dlon) cos_dlon = np.cos(dlon) sin_lat1 = np.sin(lat1) cos_lat1 = np.cos(lat1) sin_lat2 =", "np.meshgrid(X, Y) else: self.X, self.Y = X, Y self.ny, self.nx = X.shape def", "is not None and y is not None: self.set_xy(x, y) else: self._init_with_para(nx, ny,", "= theta0 self.r_earth = r_earth self.dtheta = np.pi * 2 / self.ntheta self.dr", "np.mgrid[0:self.ny, 0:self.nx] cjj, cii = np.mgrid[-0.5:self.ny, -0.5:self.nx] xx, yy = self.i2x(ii, jj) cxx,", "@property def proj(self): return self._proj @proj.setter def proj(self, p): if p is None:", "= value self._reset_raw_xy() self._updateXY() @property def bbox(self): return self._bbox @property def cbox(self): \"\"\"corner", "self.Y[j, i] def x2i(self, x, y, *args, **kwargs): distances = np.hypot(self.X-x, self.Y-y) flat_i", "not None else self._Y @property def cx(self): return self._raw_cx if self._raw_cx is not", "is not None: self.set_xy(x, y) else: self._init_with_para(nx, ny, dx, dy, x_orig, y_orig) @property", "def x2i(self, *args, **kwargs): \"\"\"Convert x, y, ... -> i, j, ...\"\"\" raise", "self._raw_x is None: raise ValueError(\"Cannot set y alone when no raw x presents.\")", "= bbox[3] bad = ~((self.X >= x1) & (self.X <= x2) & (self.Y", "np.round(i) j = np.round(j) if np.isscalar(i): i = int(i) j = int(j) else:", "has_null_proj(self): return isinstance(self.proj, NullProj) def set_xy(self, x, y): ndim_x, ndim_y = np.ndim(x), np.ndim(y)", "np.min(self.Y), np.max(self.X), np.max(self.Y)) def get_bounding_ij(self, x1, y1, x2, y2, **kwargs): bbox = self.bbox", "raise ValueError(\"Cannot set y alone when no raw x presents.\") ndim_y = np.ndim(y)", "lat def x2i(self, x, y, int_index=True, check_bound=None): lon2, lat2 = np.deg2rad(x), np.deg2rad(y) lon1,", "return self.X[j, i], self.Y[j, i] def x2i(self, x, y, *args, **kwargs): distances =", "@x_orig.setter def x_orig(self, value): self._x_orig = value self._reset_raw_xy() self._updateXY() @property def y_orig(self): return", "j, int_index=int_index) else: return i, j def calibrate(self, x, y, x1=None, y1=None): ndim_x,", "return self._CX @property def CY(self): return self._CY @property def x(self): return self._raw_x if", "== 2: self._ny, self._nx = np.shape(x) else: self._raise_invalid_shape(x, y) self._raw_x, self._raw_y = np.asarray(x),", "y) i = (px - self.x_orig) / self.dx j = (py - self.y_orig)", "2 if np.isscalar(i): if (i >= start and i <= self.nx-subtracted) and (j", "j2 def check_bound(self, i, j, int_index=True): start = -0.5 subtracted = 1 if", "= self.theta0 + i * self.dtheta r = self.rmin + j * self.dr", "(r - self.rmin) / self.dr if int_index: i = np.round(i) j = np.round(j)", "def _init_with_para(self, nx, ny, dx, dy, x_orig, y_orig): self._nx = nx self._ny =", "self.nx, \"ny\": self.ny, \"dx\": self.dx, \"dy\": self.dy, \"x_orig\": self.x_orig, \"y_orig\": self.y_orig } class", "= self.bbox if x1 is None: x1 = bbox[0] if y1 is None:", "self.Y) @property def Y(self): return self._Y @Y.setter def Y(self, y): if self._raw_x is", "@property def Y(self): return self._Y @property def x(self): return self._X @property def y(self):", "and np.ndim(self._raw_x) == 1: self.set_xy(self._raw_x, y) elif ndim_y == 2 and np.shape(y) ==", "i2, j2 def check_bound(self, i, j, int_index=True): start = -0.5 subtracted = 1", "subtracted = 2 if np.isscalar(i): if (i >= start and i <= self.nx-subtracted)", "= np.cos(r_) lat0_ = np.deg2rad(self.lat0) lon0_ = np.deg2rad(self.lon0) sin_lat0 = np.sin(lat0_) cos_lat0 =", "ny=None, dx=None, dy=None, x_orig=0.0, y_orig=0.0, **kwargs): self.proj = proj self._reset_raw_xy() if x is", "**kwargs): kws = self.dump() kws.update(kwargs) new_gridder = self.__class__(**kws) return new_gridder def calibrate(self, x0,", "nx(self): return self.ntheta @property def ny(self): return self.nr @property def X(self): return self._X", "cos_dlon theta = np.arctan2(a, b) c = np.sin(dlat / 2) ** 2 +", "and np.shape(x) == np.shape(self.Y): self.set_xy(x, self.Y) else: self._raise_invalid_shape(x, self.Y) @property def Y(self): return", "@ny.setter def ny(self, value): self._ny = value self._reset_raw_xy() self._updateXY() @property def dx(self): return", "\"\"\" @property def srs(self): return '' def __call__(self, x, y, **kwargs): return x,", "None: self.set_xy(x, y) else: self._init_with_para(nx, ny, dx, dy, x_orig, y_orig) @property def proj(self):", "theta0 self.r_earth = r_earth self.dtheta = np.pi * 2 / self.ntheta self.dr =", "ny(self): return self._ny @ny.setter def ny(self, value): self._ny = value self._reset_raw_xy() self._updateXY() @property", "self._updateXY() @property def x_orig(self): return self._x_orig @x_orig.setter def x_orig(self, value): self._x_orig = value", "self.ny, \"dx\": self.dx, \"dy\": self.dy, \"x_orig\": self.x_orig, \"y_orig\": self.y_orig } class LonLatSurroundingGridder(XYGridderBase): def", "set_xy(self, x, y): ndim_x, ndim_y = np.ndim(x), np.ndim(y) if ndim_x == 1 and", "None else self._Y @property def cx(self): return self._raw_cx if self._raw_cx is not None", "y presents.\") ndim_x = np.ndim(x) if ndim_x == 1 and np.ndim(self._raw_y) == 1:", "* self.dtheta r = self.rmin + j * self.dr lon, lat = self.r_theta_to_lon_lat(r,", "self._y_orig = y_orig self._updateXY() @property def has_null_proj(self): return isinstance(self.proj, NullProj) def set_xy(self, x,", "x alone when no raw y presents.\") ndim_x = np.ndim(x) if ndim_x ==", "= np.where((j >= start) & (j <= self.ny - subtracted), j, np.nan) return", "is not None and self._raw_x.ndim == 1: self._raw_cx = self._CX[0] else: self._raw_cx =", "class GridderBase(object): \"\"\"Gridder is a helper for i, j <-> x, y conversion,", "xx, yy = self.i2x(ii, jj) cxx, cyy = self.i2x(cii, cjj) self._X, self._Y =", "= LAT return self._X, self._Y def r_theta_to_lon_lat(self, r, theta): r_ = r /", "self.nx = X.shape def i2x(self, i, j, *args, **kwargs): return self.X[j, i], self.Y[j,", "cyy if self._raw_x is not None and self._raw_x.ndim == 1: self._raw_cx = self._CX[0]", "np.deg2rad(self.lat0) dlon = lon2 - lon1 dlat = lat2 - lat1 sin_dlon =", "py1 - py0 self._updateXY() def dump(self): return { \"proj\": self.proj.srs, \"nx\": self.nx, \"ny\":", "does not do actual conversion. \"\"\" @property def srs(self): return '' def __call__(self,", "if all([hasattr(self, attr) for attr in ('_nx', '_ny', '_dx', '_dy', '_x_orig', '_y_orig')]): self._updateXY()", "= np.round(j) if np.isscalar(i): i = int(i) j = int(j) else: i =", "LON self._Y = LAT return self._X, self._Y def r_theta_to_lon_lat(self, r, theta): r_ =", "{}\".format(np.shape(x), np.shape(y))) def _reset_raw_xy(self): self._raw_x, self._raw_y = None, None def _updateXY(self): jj, ii", "return self._X @property def Y(self): return self._Y @property def x(self): return self._X @property", "self.X.shape[1] return flat_i / self.nx, flat_i % self.nx def dump(self): return { \"X\":", "lat0, rmin, rmax, nr, ntheta, theta0=0.0, r_earth=6371): self.lon0 = lon0 self.lat0 = lat0", "def x(self): return self._raw_x if self._raw_x is not None else self._X @property def", "-0.5 subtracted = 1 if int_index: start = 0 if int_index in ('lowerleft',", "all([hasattr(self, attr) for attr in ('_nx', '_ny', '_dx', '_dy', '_x_orig', '_y_orig')]): self._updateXY() @property", "self.theta0) / self.dtheta % self.ntheta j = (r - self.rmin) / self.dr if", "return self.proj(px, py, inverse=True) def x2i(self, x, y, int_index=True, check_bound=None): px, py =", "def r_theta_to_lon_lat(self, r, theta): r_ = r / self.r_earth sin_r = np.sin(r_) cos_r", "*args, **kwargs): \"\"\"Convert x, y, ... -> i, j, ...\"\"\" raise NotImplementedError def", "self.X, self.Y = X, Y self.ny, self.nx = X.shape def i2x(self, i, j,", "else: return i, j class XYIrregularGridder(XYGridderBase): # TODO: use kdtree. def __init__(self, X,", "== 1 and ndim_y == 1: self._nx, self._ny = len(x), len(y) elif ndim_x", "sin_r = np.sin(r_) cos_r = np.cos(r_) lat0_ = np.deg2rad(self.lat0) lon0_ = np.deg2rad(self.lon0) sin_lat0", "= self.proj(x0, y0) self._x_orig = px0 self._y_orig = py0 if x1 is not", "def x2i(self, x, y, int_index=True, check_bound=None): lon2, lat2 = np.deg2rad(x), np.deg2rad(y) lon1, lat1", "@property def y(self): return self._Y def i2x(self, i, j): theta = self.theta0 +", "np.sin(dlon) cos_dlon = np.cos(dlon) sin_lat1 = np.sin(lat1) cos_lat1 = np.cos(lat1) sin_lat2 = np.sin(lat2)", "is not None else self._Y @property def cx(self): return self._raw_cx if self._raw_cx is", "x presents.\") ndim_y = np.ndim(y) if ndim_y == 1 and np.ndim(self._raw_x) == 1:", "sin_lat0 * cos_r + cos_lat0 * sin_r * np.cos(theta) lat_ = np.arcsin(sin_lat) lon_", "x, y shape: {}, {}\".format(np.shape(x), np.shape(y))) def _reset_raw_xy(self): self._raw_x, self._raw_y = None, None", "y0) self._x_orig = px0 self._y_orig = py0 if x1 is not None and", "@property def nx(self): return self._nx @nx.setter def nx(self, value): self._nx = value self._reset_raw_xy()", "= np.sin(lat1) cos_lat1 = np.cos(lat1) sin_lat2 = np.sin(lat2) cos_lat2 = np.cos(lat2) a =", "NotImplementedError def copy(self, **kwargs): kws = self.dump() kws.update(kwargs) new_gridder = self.__class__(**kws) return new_gridder", "\"dx\": self.dx, \"dy\": self.dy, \"x_orig\": self.x_orig, \"y_orig\": self.y_orig } class LonLatSurroundingGridder(XYGridderBase): def __init__(self,", "self._X @property def y(self): return self._Y def i2x(self, i, j): theta = self.theta0", "self._x_orig = value self._reset_raw_xy() self._updateXY() @property def y_orig(self): return self._y_orig @y_orig.setter def y_orig(self,", "def dump(self): return { \"proj\": self.proj.srs, \"nx\": self.nx, \"ny\": self.ny, \"dx\": self.dx, \"dy\":", "from pyproj import Proj import operator from .exceptions import * class NullProj(object): \"\"\"", "@property def cx(self): return self._raw_cx if self._raw_cx is not None else self._CX @property", "dx, dy, x_orig, y_orig): self._nx = nx self._ny = ny self._dx = dx", "def __init__(self, X, Y): X = np.array(X) Y = np.array(Y) if X.ndim ==", "= self.i2x(cii, cjj) self._X, self._Y = xx, yy self._CX, self._CY = cxx, cyy", "np.arctan2(a, b) c = np.sin(dlat / 2) ** 2 + cos_lat1 * cos_lat2", "np.sin(lat0_) cos_lat0 = np.cos(lat0_) sin_lat = sin_lat0 * cos_r + cos_lat0 * sin_r", "as np from pyproj import Proj import operator from .exceptions import * class", "self.calibrate(x, y) def _raise_invalid_shape(self, x, y): raise ValueError(\"Invalid x, y shape: {}, {}\".format(np.shape(x),", "py = self.proj(x, y) i = (px - self.x_orig) / self.dx j =", "np.max(self.Y)) def get_bounding_ij(self, x1, y1, x2, y2, **kwargs): bbox = self.bbox if x1", "y1 is not None: px1, py1 = self.proj(x1, y1) self._dx = px1 -", "proj(self): return self._proj @proj.setter def proj(self, p): if p is None: self._proj =", "x_orig(self): return self._x_orig @x_orig.setter def x_orig(self, value): self._x_orig = value self._reset_raw_xy() self._updateXY() @property", "= i * self.dx + self.x_orig py = j * self.dy + self.y_orig", "* sin_r * cos_lat0, cos_r - sin_lat0 * sin_lat) lon = np.rad2deg(lon_) lat", ">= y1) & (self.Y <= y2)) x_bad = np.alltrue(bad, axis=0) y_bad = np.alltrue(bad,", "/ (self.nr - 1) self._updateXY() def _updateXY(self): r = np.linspace(self.rmin, self.rmax, self.nr) theta", "True])).reshape(-1, 2) y_points = np.argwhere(np.diff(np.r_[True, y_bad, True])).reshape(-1, 2) i1, i2 = (-1, -1)", "return {} class XYGridderBase(GridderBase): \"\"\" Requires self.X & self.Y. \"\"\" @property def bbox(self):", "-> i, j, ...\"\"\" raise NotImplementedError def copy(self, **kwargs): kws = self.dump() kws.update(kwargs)", "elif isinstance(p, dict): self._proj = Proj(**p) else: # Treat as proj_string self._proj =", "__init__(self, lon0, lat0, rmin, rmax, nr, ntheta, theta0=0.0, r_earth=6371): self.lon0 = lon0 self.lat0", "Proj import operator from .exceptions import * class NullProj(object): \"\"\" Similar to pyproj.Proj,", "j = (py - self.y_orig) / self.dy if int_index: if int_index in ('lowerleft',", "= self.r_theta_to_lon_lat(R, THETA) self._X = LON self._Y = LAT return self._X, self._Y def", "for attr in ('_nx', '_ny', '_dx', '_dy', '_x_orig', '_y_orig')]): self._updateXY() @property def X(self):", "np.max(self._CX), np.max(self._CY)) return xx, yy def i2x(self, i, j): px = i *", "lat = np.rad2deg(lat_) return lon, lat @property def nx(self): return self.ntheta @property def", "None, None def _updateXY(self): jj, ii = np.mgrid[0:self.ny, 0:self.nx] cjj, cii = np.mgrid[-0.5:self.ny,", "self.rmax, self.nr) theta = np.arange(self.ntheta) * self.dtheta + self.theta0 THETA, R = np.meshgrid(theta,", "else: self._raise_invalid_shape(x, self.Y) @property def Y(self): return self._Y @Y.setter def Y(self, y): if", "'ll'): i = np.floor(i) j = np.floor(j) else: i = np.round(i) j =", "# TODO: use kdtree. def __init__(self, X, Y): X = np.array(X) Y =", "= np.shape(x) else: self._raise_invalid_shape(x, y) self._raw_x, self._raw_y = np.asarray(x), np.asarray(y) self.calibrate(x, y) def", "class XYIrregularGridder(XYGridderBase): # TODO: use kdtree. def __init__(self, X, Y): X = np.array(X)", "= 2 if np.isscalar(i): if (i >= start and i <= self.nx-subtracted) and", "jj) cxx, cyy = self.i2x(cii, cjj) self._X, self._Y = xx, yy self._CX, self._CY", "<= self.ny - subtracted), j, np.nan) return i, j class XYProjGridder(XYGridderBase): def __init__(self,", "* self.r_earth i = (theta - self.theta0) / self.dtheta % self.ntheta j =", "ndim_x == 2 and np.shape(x) == np.shape(self.Y): self.set_xy(x, self.Y) else: self._raise_invalid_shape(x, self.Y) @property", "* cos_dlon theta = np.arctan2(a, b) c = np.sin(dlat / 2) ** 2", "self.dx, \"dy\": self.dy, \"x_orig\": self.x_orig, \"y_orig\": self.y_orig } class LonLatSurroundingGridder(XYGridderBase): def __init__(self, lon0,", "LonLatSurroundingGridder(XYGridderBase): def __init__(self, lon0, lat0, rmin, rmax, nr, ntheta, theta0=0.0, r_earth=6371): self.lon0 =", "== 0 else x_points[0] j1, j2 = (-1, -1) if y_points.shape[0] == 0", "None: self._proj = NullProj() elif isinstance(p, (Proj, NullProj)): self._proj = p elif isinstance(p,", "self.ntheta j = (r - self.rmin) / self.dr if int_index: i = np.round(i)", "theta = np.arctan2(a, b) c = np.sin(dlat / 2) ** 2 + cos_lat1", "-1) if x_points.shape[0] == 0 else x_points[0] j1, j2 = (-1, -1) if", "self._proj = Proj(**p) else: # Treat as proj_string self._proj = Proj(str(p)) # TODO:", "return self._proj @proj.setter def proj(self, p): if p is None: self._proj = NullProj()", "j, ... -> x, y, ...\"\"\" raise NotImplementedError def x2i(self, *args, **kwargs): \"\"\"Convert", "None: x1 = bbox[0] if y1 is None: y1 = bbox[1] if x2", "kws = self.dump() kws.update(kwargs) new_gridder = self.__class__(**kws) return new_gridder def calibrate(self, x0, y0,", "= np.alltrue(bad, axis=0) y_bad = np.alltrue(bad, axis=1) x_points = np.argwhere(np.diff(np.r_[True, x_bad, True])).reshape(-1, 2)", "cos_lat2 = np.cos(lat2) a = cos_lat2 * sin_dlon b = cos_lat1 * sin_lat2", "self._raw_cx = None if self._raw_y is not None and self._raw_y.ndim == 1: self._raw_cy", "~((self.X >= x1) & (self.X <= x2) & (self.Y >= y1) & (self.Y", "self._reset_raw_xy() self._updateXY() @property def ny(self): return self._ny @ny.setter def ny(self, value): self._ny =", "j = np.where((j >= start) & (j <= self.ny - subtracted), j, np.nan)", "2 / self.ntheta self.dr = (self.rmax - self.rmin) / (self.nr - 1) self._updateXY()", "i, j <-> x, y conversion, etc.\"\"\" def i2x(self, *args): \"\"\"Convert i, j,", "= np.deg2rad(self.lon0) sin_lat0 = np.sin(lat0_) cos_lat0 = np.cos(lat0_) sin_lat = sin_lat0 * cos_r", "= self.r_theta_to_lon_lat(r, theta) return lon, lat def x2i(self, x, y, int_index=True, check_bound=None): lon2,", "def i2x(self, i, j, *args, **kwargs): return self.X[j, i], self.Y[j, i] def x2i(self,", "= None, None def _updateXY(self): jj, ii = np.mgrid[0:self.ny, 0:self.nx] cjj, cii =", "etc.\"\"\" def i2x(self, *args): \"\"\"Convert i, j, ... -> x, y, ...\"\"\" raise", "np.min(self._Y), np.max(self._X), np.max(self._Y)) self._cbox = (np.min(self._CX), np.min(self._CY), np.max(self._CX), np.max(self._CY)) return xx, yy def", "dy, x_orig, y_orig): self._nx = nx self._ny = ny self._dx = dx self._dy", "self._reset_raw_xy() self._updateXY() @property def y_orig(self): return self._y_orig @y_orig.setter def y_orig(self, value): self._y_orig =", "\"ny\": self.ny, \"dx\": self.dx, \"dy\": self.dy, \"x_orig\": self.x_orig, \"y_orig\": self.y_orig } class LonLatSurroundingGridder(XYGridderBase):", "x1, y1, x2, y2, **kwargs): bbox = self.bbox if x1 is None: x1", "i, j, ...\"\"\" raise NotImplementedError def copy(self, **kwargs): kws = self.dump() kws.update(kwargs) new_gridder", "\"\"\" @property def bbox(self): return (np.min(self.X), np.min(self.Y), np.max(self.X), np.max(self.Y)) def get_bounding_ij(self, x1, y1,", "0:self.nx] cjj, cii = np.mgrid[-0.5:self.ny, -0.5:self.nx] xx, yy = self.i2x(ii, jj) cxx, cyy", "sin_dlon b = cos_lat1 * sin_lat2 - sin_lat1 * cos_lat2 * cos_dlon theta", "- sin_lat1 * cos_lat2 * cos_dlon theta = np.arctan2(a, b) c = np.sin(dlat", "self.ny-subtracted): return i, j else: raise OutOfGridBound(\"i: {}, j: {} is out of", "if ndim_x == 1 and np.ndim(self._raw_y) == 1: self.set_xy(x, self._raw_y) elif ndim_x ==", "# -*- coding:utf-8 -*- import six import numpy as np from pyproj import", "self.i2x(ii, jj) cxx, cyy = self.i2x(cii, cjj) self._X, self._Y = xx, yy self._CX,", "\"\"\"Convert x, y, ... -> i, j, ...\"\"\" raise NotImplementedError def copy(self, **kwargs):", "return self._Y @property def x(self): return self._X @property def y(self): return self._Y def", "(np.min(self.X), np.min(self.Y), np.max(self.X), np.max(self.Y)) def get_bounding_ij(self, x1, y1, x2, y2, **kwargs): bbox =", "self._dy = value self._reset_raw_xy() self._updateXY() @property def x_orig(self): return self._x_orig @x_orig.setter def x_orig(self,", "X(self, x): if self._raw_y is None: raise ValueError(\"Cannot set x alone when no", "== 2 and ndim_y == 2: x0, x1 = x[0, 0], x[1, 1]", "... -> x, y, ...\"\"\" raise NotImplementedError def x2i(self, *args, **kwargs): \"\"\"Convert x,", "def X(self): return self._X @property def Y(self): return self._Y @property def x(self): return", "cos_r = np.cos(r_) lat0_ = np.deg2rad(self.lat0) lon0_ = np.deg2rad(self.lon0) sin_lat0 = np.sin(lat0_) cos_lat0", "subtracted = 1 if int_index: start = 0 if int_index in ('lowerleft', 'll'):", "and self._raw_x.ndim == 1: self._raw_cx = self._CX[0] else: self._raw_cx = None if self._raw_y", "p is None: self._proj = NullProj() elif isinstance(p, (Proj, NullProj)): self._proj = p", "return self._nx @nx.setter def nx(self, value): self._nx = value self._reset_raw_xy() self._updateXY() @property def", "nr self.ntheta = ntheta self.theta0 = theta0 self.r_earth = r_earth self.dtheta = np.pi", "cx(self): return self._raw_cx if self._raw_cx is not None else self._CX @property def cy(self):", "y_bad = np.alltrue(bad, axis=1) x_points = np.argwhere(np.diff(np.r_[True, x_bad, True])).reshape(-1, 2) y_points = np.argwhere(np.diff(np.r_[True,", "self._dy = dy self._x_orig = x_orig self._y_orig = y_orig self._updateXY() @property def has_null_proj(self):", "/ self.dx j = (py - self.y_orig) / self.dy if int_index: if int_index", "j <-> x, y conversion, etc.\"\"\" def i2x(self, *args): \"\"\"Convert i, j, ...", "axis=1) x_points = np.argwhere(np.diff(np.r_[True, x_bad, True])).reshape(-1, 2) y_points = np.argwhere(np.diff(np.r_[True, y_bad, True])).reshape(-1, 2)", "i = np.where((i >= start) & (i <= self.nx - subtracted), i, np.nan)", "__init__(self, X, Y): X = np.array(X) Y = np.array(Y) if X.ndim == 1:", "x=None, y=None, nx=None, ny=None, dx=None, dy=None, x_orig=0.0, y_orig=0.0, **kwargs): self.proj = proj self._reset_raw_xy()", "return lon, lat @property def nx(self): return self.ntheta @property def ny(self): return self.nr", "else: self._raise_invalid_shape(self.X, y) @property def CX(self): return self._CX @property def CY(self): return self._CY", "np.deg2rad(self.lon0), np.deg2rad(self.lat0) dlon = lon2 - lon1 dlat = lat2 - lat1 sin_dlon", "True])).reshape(-1, 2) i1, i2 = (-1, -1) if x_points.shape[0] == 0 else x_points[0]", "rmin self.rmax = rmax self.nr = nr self.ntheta = ntheta self.theta0 = theta0", "self.dtheta % self.ntheta j = (r - self.rmin) / self.dr if int_index: i" ]
[ "= '1' try: from setuptools_scm import get_version __version__ = get_version(root=\"..\", relative_to=__file__) del get_version", "from importlib_metadata import version except: from importlib.metadata import version __version__ = version(__name__) del", "'1' try: from setuptools_scm import get_version __version__ = get_version(root=\"..\", relative_to=__file__) del get_version except", "print (f'(Running UniTVelo {__version__})') print (strftime(\"%Y-%m-%d %H:%M:%S\", gmtime())) from .main import run_model from", "{__version__})') print (strftime(\"%Y-%m-%d %H:%M:%S\", gmtime())) from .main import run_model from .config import Configuration", "except: from importlib.metadata import version __version__ = version(__name__) del version print (f'(Running UniTVelo", "run_model from .config import Configuration from .eval_utils import evaluate from .gene_influence import influence", "time import gmtime, strftime os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1' try: from setuptools_scm import get_version __version__", "os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1' try: from setuptools_scm import get_version __version__ = get_version(root=\"..\", relative_to=__file__) del", "version __version__ = version(__name__) del version print (f'(Running UniTVelo {__version__})') print (strftime(\"%Y-%m-%d %H:%M:%S\",", "version(__name__) del version print (f'(Running UniTVelo {__version__})') print (strftime(\"%Y-%m-%d %H:%M:%S\", gmtime())) from .main", "ImportError): try: from importlib_metadata import version except: from importlib.metadata import version __version__ =", "version except: from importlib.metadata import version __version__ = version(__name__) del version print (f'(Running", "<filename>unitvelo/__init__.py #%% import os from time import gmtime, strftime os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1' try:", "from .main import run_model from .config import Configuration from .eval_utils import evaluate from", "from time import gmtime, strftime os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1' try: from setuptools_scm import get_version", "del get_version except (LookupError, ImportError): try: from importlib_metadata import version except: from importlib.metadata", "print (strftime(\"%Y-%m-%d %H:%M:%S\", gmtime())) from .main import run_model from .config import Configuration from", "import run_model from .config import Configuration from .eval_utils import evaluate from .gene_influence import", "(f'(Running UniTVelo {__version__})') print (strftime(\"%Y-%m-%d %H:%M:%S\", gmtime())) from .main import run_model from .config", "get_version(root=\"..\", relative_to=__file__) del get_version except (LookupError, ImportError): try: from importlib_metadata import version except:", "(LookupError, ImportError): try: from importlib_metadata import version except: from importlib.metadata import version __version__", "gmtime())) from .main import run_model from .config import Configuration from .eval_utils import evaluate", "from importlib.metadata import version __version__ = version(__name__) del version print (f'(Running UniTVelo {__version__})')", "import version __version__ = version(__name__) del version print (f'(Running UniTVelo {__version__})') print (strftime(\"%Y-%m-%d", "gmtime, strftime os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1' try: from setuptools_scm import get_version __version__ = get_version(root=\"..\",", "UniTVelo {__version__})') print (strftime(\"%Y-%m-%d %H:%M:%S\", gmtime())) from .main import run_model from .config import", "setuptools_scm import get_version __version__ = get_version(root=\"..\", relative_to=__file__) del get_version except (LookupError, ImportError): try:", "__version__ = version(__name__) del version print (f'(Running UniTVelo {__version__})') print (strftime(\"%Y-%m-%d %H:%M:%S\", gmtime()))", "from setuptools_scm import get_version __version__ = get_version(root=\"..\", relative_to=__file__) del get_version except (LookupError, ImportError):", "= get_version(root=\"..\", relative_to=__file__) del get_version except (LookupError, ImportError): try: from importlib_metadata import version", "except (LookupError, ImportError): try: from importlib_metadata import version except: from importlib.metadata import version", "importlib_metadata import version except: from importlib.metadata import version __version__ = version(__name__) del version", "(strftime(\"%Y-%m-%d %H:%M:%S\", gmtime())) from .main import run_model from .config import Configuration from .eval_utils", "get_version __version__ = get_version(root=\"..\", relative_to=__file__) del get_version except (LookupError, ImportError): try: from importlib_metadata", "import version except: from importlib.metadata import version __version__ = version(__name__) del version print", "os from time import gmtime, strftime os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1' try: from setuptools_scm import", "import get_version __version__ = get_version(root=\"..\", relative_to=__file__) del get_version except (LookupError, ImportError): try: from", "try: from setuptools_scm import get_version __version__ = get_version(root=\"..\", relative_to=__file__) del get_version except (LookupError,", "__version__ = get_version(root=\"..\", relative_to=__file__) del get_version except (LookupError, ImportError): try: from importlib_metadata import", "try: from importlib_metadata import version except: from importlib.metadata import version __version__ = version(__name__)", "version print (f'(Running UniTVelo {__version__})') print (strftime(\"%Y-%m-%d %H:%M:%S\", gmtime())) from .main import run_model", "#%% import os from time import gmtime, strftime os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1' try: from", "importlib.metadata import version __version__ = version(__name__) del version print (f'(Running UniTVelo {__version__})') print", "import gmtime, strftime os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1' try: from setuptools_scm import get_version __version__ =", ".main import run_model from .config import Configuration from .eval_utils import evaluate from .gene_influence", "= version(__name__) del version print (f'(Running UniTVelo {__version__})') print (strftime(\"%Y-%m-%d %H:%M:%S\", gmtime())) from", "del version print (f'(Running UniTVelo {__version__})') print (strftime(\"%Y-%m-%d %H:%M:%S\", gmtime())) from .main import", "import os from time import gmtime, strftime os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1' try: from setuptools_scm", "%H:%M:%S\", gmtime())) from .main import run_model from .config import Configuration from .eval_utils import", "strftime os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1' try: from setuptools_scm import get_version __version__ = get_version(root=\"..\", relative_to=__file__)", "get_version except (LookupError, ImportError): try: from importlib_metadata import version except: from importlib.metadata import", "relative_to=__file__) del get_version except (LookupError, ImportError): try: from importlib_metadata import version except: from" ]
[]
[ "\"0\": [30, 29, 28, 27, 26, 25, 24, 16, 17, 18, 19, 20,", "2: 21, 3: 22, 4: 23, 5: 24, 6: 25, 7: 26}, \"6\":", "22, 4: 23, 5: 24, 6: 25, 7: 26}, \"6\": {0: 10, 1:", "-1), 30) self.assertEqual(HardwareConfig.get_adjacent_beam_index(60, 1), 60) self.assertEqual(HardwareConfig.get_adjacent_beam_index(60, -1), 60) def test_get_pwr_offset(self) -> None: self.assertEqual(HardwareConfig.get_pwr_offset(channel=\"2\",", "13, 14, 15], }, \"1\": { \"0\": [30, 29, 28, 27, 26, 25,", "18, 3: 19, 4: 20, 5: 21, 6: 22, 7: 23, } },", "20, 21, 22, 23] }, }, ) self.assertDictEqual( HardwareConfig.TXPOWERIDX_TO_TXPOWER, { \"2\": { \"10\":", "20, 2: 21, 3: 22, 4: 23, 5: 24, 6: 25, 7: 26},", "13, 4: 14, 5: 15, 6: 16, 7: 17}, }, \"3\": {\"5\": {0:", "self.assertEqual( HardwareConfig.get_pwr_offset(target_pwr_idx=4, channel=\"2\", mcs=\"6\"), -9 ) self.assertEqual( HardwareConfig.get_pwr_offset(ref_pwr_idx=4, channel=\"2\", mcs=\"6\"), 9 ) self.assertEqual(", "20, 5: 21, 6: 22, 7: 23, } }, }, ) self.assertEqual(HardwareConfig.BORESIDE_BW_IDX, 10)", "25, 24, 16, 17, 18, 19, 20, 21, 22, 23] }, }, )", "3: 19, 4: 20, 5: 21, 6: 22, 7: 23, } }, },", ") self.assertEqual(HardwareConfig.BORESIDE_BW_IDX, 10) self.assertEqual(HardwareConfig.MINIMUM_SNR_DB, -10) self.assertEqual(HardwareConfig.SNR_SATURATE_THRESH_DB, 25) self.assertEqual(HardwareConfig.BEAM_SEPERATE_IDX, 3) self.assertEqual(HardwareConfig.MAX_SIDELOBE_LEVEL_DB, 12) self.assertEqual(HardwareConfig.MAX_POWER, 23)", "22, 23] }, }, ) self.assertDictEqual( HardwareConfig.TXPOWERIDX_TO_TXPOWER, { \"2\": { \"10\": {0: 19,", "HardwareConfig class HardwareConfigTests(unittest.TestCase): def setUp(self) -> None: with open(\"tests/hardware_config.json\") as f: hardware_config =", "HardwareConfig.get_pwr_offset(ref_pwr_idx=4, channel=\"2\", mcs=\"6\"), 9 ) self.assertEqual( HardwareConfig.get_pwr_offset(ref_pwr_idx=5, channel=\"3\", mcs=\"5\"), 7 ) self.assertEqual( HardwareConfig.get_pwr_offset(ref_pwr_idx=7,", "6, 7], \"18\": [8, 9, 10, 11, 12, 13, 14, 15], }, \"1\":", "self.assertEqual(HardwareConfig.SNR_SATURATE_THRESH_DB, 25) self.assertEqual(HardwareConfig.BEAM_SEPERATE_IDX, 3) self.assertEqual(HardwareConfig.MAX_SIDELOBE_LEVEL_DB, 12) self.assertEqual(HardwareConfig.MAX_POWER, 23) def test_get_adjacent_beam_index(self) -> None: self.assertEqual(HardwareConfig.get_adjacent_beam_index(0,", "1) self.assertEqual(HardwareConfig.get_adjacent_beam_index(0, -1), 0) self.assertEqual(HardwareConfig.get_adjacent_beam_index(8, 1), 9) self.assertEqual(HardwareConfig.get_adjacent_beam_index(8, -1), 8) self.assertEqual(HardwareConfig.get_adjacent_beam_index(15, 1), 15)", "import List from bidict import bidict from scan_service.utils.hardware_config import HardwareConfig class HardwareConfigTests(unittest.TestCase): def", "22, 7: 23, } }, }, ) self.assertEqual(HardwareConfig.BORESIDE_BW_IDX, 10) self.assertEqual(HardwareConfig.MINIMUM_SNR_DB, -10) self.assertEqual(HardwareConfig.SNR_SATURATE_THRESH_DB, 25)", "6: 22, 7: 23, } }, }, ) self.assertEqual(HardwareConfig.BORESIDE_BW_IDX, 10) self.assertEqual(HardwareConfig.MINIMUM_SNR_DB, -10) self.assertEqual(HardwareConfig.SNR_SATURATE_THRESH_DB,", "0) self.assertEqual(HardwareConfig.get_adjacent_beam_index(8, 1), 9) self.assertEqual(HardwareConfig.get_adjacent_beam_index(8, -1), 8) self.assertEqual(HardwareConfig.get_adjacent_beam_index(15, 1), 15) self.assertEqual(HardwareConfig.get_adjacent_beam_index(15, -1), 14)", "1), 23) self.assertEqual(HardwareConfig.get_adjacent_beam_index(23, -1), 22) self.assertEqual(HardwareConfig.get_adjacent_beam_index(30, 1), 29) self.assertEqual(HardwareConfig.get_adjacent_beam_index(30, -1), 30) self.assertEqual(HardwareConfig.get_adjacent_beam_index(60, 1),", "26, 25, 24, 16, 17, 18, 19, 20, 21, 22, 23] }, },", "channel=\"2\", mcs=\"6\"), 9 ) self.assertEqual( HardwareConfig.get_pwr_offset(ref_pwr_idx=5, channel=\"3\", mcs=\"5\"), 7 ) self.assertEqual( HardwareConfig.get_pwr_offset(ref_pwr_idx=7, channel=\"2\",", ") self.assertDictEqual( HardwareConfig.TXPOWERIDX_TO_TXPOWER, { \"2\": { \"10\": {0: 19, 1: 20, 2: 21,", "7], \"18\": [8, 9, 10, 11, 12, 13, 14, 15], }, \"1\": {", "17, 18, 19, 20, 21, 22, 23] }, }, ) self.assertDictEqual( HardwareConfig.TXPOWERIDX_TO_TXPOWER, {", "self.assertEqual(HardwareConfig.get_adjacent_beam_index(30, 1), 29) self.assertEqual(HardwareConfig.get_adjacent_beam_index(30, -1), 30) self.assertEqual(HardwareConfig.get_adjacent_beam_index(60, 1), 60) self.assertEqual(HardwareConfig.get_adjacent_beam_index(60, -1), 60) def", "-1), 22) self.assertEqual(HardwareConfig.get_adjacent_beam_index(30, 1), 29) self.assertEqual(HardwareConfig.get_adjacent_beam_index(30, -1), 30) self.assertEqual(HardwareConfig.get_adjacent_beam_index(60, 1), 60) self.assertEqual(HardwareConfig.get_adjacent_beam_index(60, -1),", "1: 12, 2: 13, 3: 14, 4: 15, 5: 16, 6: 17, 7:", "mcs=\"6\"), 0) self.assertEqual( HardwareConfig.get_pwr_offset(target_pwr_idx=4, channel=\"2\", mcs=\"6\"), -9 ) self.assertEqual( HardwareConfig.get_pwr_offset(ref_pwr_idx=4, channel=\"2\", mcs=\"6\"), 9", "9 ) self.assertEqual( HardwareConfig.get_pwr_offset(ref_pwr_idx=5, channel=\"3\", mcs=\"5\"), 7 ) self.assertEqual( HardwareConfig.get_pwr_offset(ref_pwr_idx=7, channel=\"2\", mcs=\"10\"), -3", "12) self.assertEqual(HardwareConfig.MAX_POWER, 23) def test_get_adjacent_beam_index(self) -> None: self.assertEqual(HardwareConfig.get_adjacent_beam_index(0, 1), 1) self.assertEqual(HardwareConfig.get_adjacent_beam_index(0, -1), 0)", "self.assertEqual(HardwareConfig.get_adjacent_beam_index(8, -1), 8) self.assertEqual(HardwareConfig.get_adjacent_beam_index(15, 1), 15) self.assertEqual(HardwareConfig.get_adjacent_beam_index(15, -1), 14) self.assertEqual(HardwareConfig.get_adjacent_beam_index(16, 1), 17) self.assertEqual(HardwareConfig.get_adjacent_beam_index(16,", "30) self.assertEqual(HardwareConfig.get_adjacent_beam_index(60, 1), 60) self.assertEqual(HardwareConfig.get_adjacent_beam_index(60, -1), 60) def test_get_pwr_offset(self) -> None: self.assertEqual(HardwareConfig.get_pwr_offset(channel=\"2\", mcs=\"6\"),", "= json.load(f) HardwareConfig.set_config(hardware_config) def test_class_variables(self) -> None: self.assertDictEqual( HardwareConfig.BEAM_ORDER, { \"0\": { \"-18\":", "{0: 11, 1: 12, 2: 13, 3: 14, 4: 15, 5: 16, 6:", "16, 7: 17}, }, \"3\": {\"5\": {0: 11, 1: 12, 2: 13, 3:", "self.assertEqual(HardwareConfig.BORESIDE_BW_IDX, 10) self.assertEqual(HardwareConfig.MINIMUM_SNR_DB, -10) self.assertEqual(HardwareConfig.SNR_SATURATE_THRESH_DB, 25) self.assertEqual(HardwareConfig.BEAM_SEPERATE_IDX, 3) self.assertEqual(HardwareConfig.MAX_SIDELOBE_LEVEL_DB, 12) self.assertEqual(HardwareConfig.MAX_POWER, 23) def", "11, 1: 12, 2: 13, 3: 14, 4: 15, 5: 16, 6: 17,", "17, 2: 18, 3: 19, 4: 20, 5: 21, 6: 22, 7: 23,", "1), 17) self.assertEqual(HardwareConfig.get_adjacent_beam_index(16, -1), 24) self.assertEqual(HardwareConfig.get_adjacent_beam_index(23, 1), 23) self.assertEqual(HardwareConfig.get_adjacent_beam_index(23, -1), 22) self.assertEqual(HardwareConfig.get_adjacent_beam_index(30, 1),", "channel=\"2\", mcs=\"6\"), -9 ) self.assertEqual( HardwareConfig.get_pwr_offset(ref_pwr_idx=4, channel=\"2\", mcs=\"6\"), 9 ) self.assertEqual( HardwareConfig.get_pwr_offset(ref_pwr_idx=5, channel=\"3\",", "self.assertEqual( HardwareConfig.get_pwr_offset(ref_pwr_idx=4, channel=\"2\", mcs=\"6\"), 9 ) self.assertEqual( HardwareConfig.get_pwr_offset(ref_pwr_idx=5, channel=\"3\", mcs=\"5\"), 7 ) self.assertEqual(", "15], }, \"1\": { \"0\": [30, 29, 28, 27, 26, 25, 24, 16,", "self.assertEqual(HardwareConfig.get_adjacent_beam_index(8, 1), 9) self.assertEqual(HardwareConfig.get_adjacent_beam_index(8, -1), 8) self.assertEqual(HardwareConfig.get_adjacent_beam_index(15, 1), 15) self.assertEqual(HardwareConfig.get_adjacent_beam_index(15, -1), 14) self.assertEqual(HardwareConfig.get_adjacent_beam_index(16,", "self.assertEqual(HardwareConfig.MINIMUM_SNR_DB, -10) self.assertEqual(HardwareConfig.SNR_SATURATE_THRESH_DB, 25) self.assertEqual(HardwareConfig.BEAM_SEPERATE_IDX, 3) self.assertEqual(HardwareConfig.MAX_SIDELOBE_LEVEL_DB, 12) self.assertEqual(HardwareConfig.MAX_POWER, 23) def test_get_adjacent_beam_index(self) ->", "self.assertEqual(HardwareConfig.get_adjacent_beam_index(16, 1), 17) self.assertEqual(HardwareConfig.get_adjacent_beam_index(16, -1), 24) self.assertEqual(HardwareConfig.get_adjacent_beam_index(23, 1), 23) self.assertEqual(HardwareConfig.get_adjacent_beam_index(23, -1), 22) self.assertEqual(HardwareConfig.get_adjacent_beam_index(30,", "-1), 14) self.assertEqual(HardwareConfig.get_adjacent_beam_index(16, 1), 17) self.assertEqual(HardwareConfig.get_adjacent_beam_index(16, -1), 24) self.assertEqual(HardwareConfig.get_adjacent_beam_index(23, 1), 23) self.assertEqual(HardwareConfig.get_adjacent_beam_index(23, -1),", "}, }, ) self.assertEqual(HardwareConfig.BORESIDE_BW_IDX, 10) self.assertEqual(HardwareConfig.MINIMUM_SNR_DB, -10) self.assertEqual(HardwareConfig.SNR_SATURATE_THRESH_DB, 25) self.assertEqual(HardwareConfig.BEAM_SEPERATE_IDX, 3) self.assertEqual(HardwareConfig.MAX_SIDELOBE_LEVEL_DB, 12)", "23, 5: 24, 6: 25, 7: 26}, \"6\": {0: 10, 1: 11, 2:", "7: 26}, \"6\": {0: 10, 1: 11, 2: 12, 3: 13, 4: 14,", "14, 4: 15, 5: 16, 6: 17, 7: 18}}, \"default_channel\": { \"default_mcs\": {", "HardwareConfigTests(unittest.TestCase): def setUp(self) -> None: with open(\"tests/hardware_config.json\") as f: hardware_config = json.load(f) HardwareConfig.set_config(hardware_config)", "29, 28, 27, 26, 25, 24, 16, 17, 18, 19, 20, 21, 22,", "[30, 29, 28, 27, 26, 25, 24, 16, 17, 18, 19, 20, 21,", "self.assertEqual(HardwareConfig.MAX_SIDELOBE_LEVEL_DB, 12) self.assertEqual(HardwareConfig.MAX_POWER, 23) def test_get_adjacent_beam_index(self) -> None: self.assertEqual(HardwareConfig.get_adjacent_beam_index(0, 1), 1) self.assertEqual(HardwareConfig.get_adjacent_beam_index(0, -1),", "60) def test_get_pwr_offset(self) -> None: self.assertEqual(HardwareConfig.get_pwr_offset(channel=\"2\", mcs=\"6\"), 0) self.assertEqual( HardwareConfig.get_pwr_offset(target_pwr_idx=4, channel=\"2\", mcs=\"6\"), -9", "7: 17}, }, \"3\": {\"5\": {0: 11, 1: 12, 2: 13, 3: 14,", "6: 16, 7: 17}, }, \"3\": {\"5\": {0: 11, 1: 12, 2: 13,", "Facebook. All Rights Reserved. import json import unittest from typing import List from", "16, 6: 17, 7: 18}}, \"default_channel\": { \"default_mcs\": { 0: 16, 1: 17,", "15, 6: 16, 7: 17}, }, \"3\": {\"5\": {0: 11, 1: 12, 2:", "-1), 0) self.assertEqual(HardwareConfig.get_adjacent_beam_index(8, 1), 9) self.assertEqual(HardwareConfig.get_adjacent_beam_index(8, -1), 8) self.assertEqual(HardwareConfig.get_adjacent_beam_index(15, 1), 15) self.assertEqual(HardwareConfig.get_adjacent_beam_index(15, -1),", "25) self.assertEqual(HardwareConfig.BEAM_SEPERATE_IDX, 3) self.assertEqual(HardwareConfig.MAX_SIDELOBE_LEVEL_DB, 12) self.assertEqual(HardwareConfig.MAX_POWER, 23) def test_get_adjacent_beam_index(self) -> None: self.assertEqual(HardwareConfig.get_adjacent_beam_index(0, 1),", "22) self.assertEqual(HardwareConfig.get_adjacent_beam_index(30, 1), 29) self.assertEqual(HardwareConfig.get_adjacent_beam_index(30, -1), 30) self.assertEqual(HardwareConfig.get_adjacent_beam_index(60, 1), 60) self.assertEqual(HardwareConfig.get_adjacent_beam_index(60, -1), 60)", "from typing import List from bidict import bidict from scan_service.utils.hardware_config import HardwareConfig class", "-> None: self.assertDictEqual( HardwareConfig.BEAM_ORDER, { \"0\": { \"-18\": [0, 1, 2, 3, 4,", "12, 2: 13, 3: 14, 4: 15, 5: 16, 6: 17, 7: 18}},", "26}, \"6\": {0: 10, 1: 11, 2: 12, 3: 13, 4: 14, 5:", "List from bidict import bidict from scan_service.utils.hardware_config import HardwareConfig class HardwareConfigTests(unittest.TestCase): def setUp(self)", "27, 26, 25, 24, 16, 17, 18, 19, 20, 21, 22, 23] },", "17}, }, \"3\": {\"5\": {0: 11, 1: 12, 2: 13, 3: 14, 4:", "{ \"default_mcs\": { 0: 16, 1: 17, 2: 18, 3: 19, 4: 20,", "-> None: with open(\"tests/hardware_config.json\") as f: hardware_config = json.load(f) HardwareConfig.set_config(hardware_config) def test_class_variables(self) ->", "self.assertEqual( HardwareConfig.get_pwr_offset(ref_pwr_idx=5, channel=\"3\", mcs=\"5\"), 7 ) self.assertEqual( HardwareConfig.get_pwr_offset(ref_pwr_idx=7, channel=\"2\", mcs=\"10\"), -3 ) self.assertEqual(HardwareConfig.get_pwr_offset(target_pwr_idx=5),", "self.assertDictEqual( HardwareConfig.BEAM_ORDER, { \"0\": { \"-18\": [0, 1, 2, 3, 4, 5, 6,", "12, 13, 14, 15], }, \"1\": { \"0\": [30, 29, 28, 27, 26,", "24, 16, 17, 18, 19, 20, 21, 22, 23] }, }, ) self.assertDictEqual(", "23) self.assertEqual(HardwareConfig.get_adjacent_beam_index(23, -1), 22) self.assertEqual(HardwareConfig.get_adjacent_beam_index(30, 1), 29) self.assertEqual(HardwareConfig.get_adjacent_beam_index(30, -1), 30) self.assertEqual(HardwareConfig.get_adjacent_beam_index(60, 1), 60)", "3: 22, 4: 23, 5: 24, 6: 25, 7: 26}, \"6\": {0: 10,", "24) self.assertEqual(HardwareConfig.get_adjacent_beam_index(23, 1), 23) self.assertEqual(HardwareConfig.get_adjacent_beam_index(23, -1), 22) self.assertEqual(HardwareConfig.get_adjacent_beam_index(30, 1), 29) self.assertEqual(HardwareConfig.get_adjacent_beam_index(30, -1), 30)", "\"3\": {\"5\": {0: 11, 1: 12, 2: 13, 3: 14, 4: 15, 5:", "2: 18, 3: 19, 4: 20, 5: 21, 6: 22, 7: 23, }", "14, 5: 15, 6: 16, 7: 17}, }, \"3\": {\"5\": {0: 11, 1:", "self.assertEqual(HardwareConfig.get_pwr_offset(channel=\"2\", mcs=\"6\"), 0) self.assertEqual( HardwareConfig.get_pwr_offset(target_pwr_idx=4, channel=\"2\", mcs=\"6\"), -9 ) self.assertEqual( HardwareConfig.get_pwr_offset(ref_pwr_idx=4, channel=\"2\", mcs=\"6\"),", "3) self.assertEqual(HardwareConfig.MAX_SIDELOBE_LEVEL_DB, 12) self.assertEqual(HardwareConfig.MAX_POWER, 23) def test_get_adjacent_beam_index(self) -> None: self.assertEqual(HardwareConfig.get_adjacent_beam_index(0, 1), 1) self.assertEqual(HardwareConfig.get_adjacent_beam_index(0,", "0: 16, 1: 17, 2: 18, 3: 19, 4: 20, 5: 21, 6:", "from scan_service.utils.hardware_config import HardwareConfig class HardwareConfigTests(unittest.TestCase): def setUp(self) -> None: with open(\"tests/hardware_config.json\") as", "{ \"0\": { \"-18\": [0, 1, 2, 3, 4, 5, 6, 7], \"18\":", "{0: 19, 1: 20, 2: 21, 3: 22, 4: 23, 5: 24, 6:", "1: 17, 2: 18, 3: 19, 4: 20, 5: 21, 6: 22, 7:", "3: 13, 4: 14, 5: 15, 6: 16, 7: 17}, }, \"3\": {\"5\":", "1, 2, 3, 4, 5, 6, 7], \"18\": [8, 9, 10, 11, 12,", "16, 17, 18, 19, 20, 21, 22, 23] }, }, ) self.assertDictEqual( HardwareConfig.TXPOWERIDX_TO_TXPOWER,", "13, 3: 14, 4: 15, 5: 16, 6: 17, 7: 18}}, \"default_channel\": {", "import json import unittest from typing import List from bidict import bidict from", "3, 4, 5, 6, 7], \"18\": [8, 9, 10, 11, 12, 13, 14,", "{ \"-18\": [0, 1, 2, 3, 4, 5, 6, 7], \"18\": [8, 9,", "{ \"2\": { \"10\": {0: 19, 1: 20, 2: 21, 3: 22, 4:", "2: 13, 3: 14, 4: 15, 5: 16, 6: 17, 7: 18}}, \"default_channel\":", "self.assertDictEqual( HardwareConfig.TXPOWERIDX_TO_TXPOWER, { \"2\": { \"10\": {0: 19, 1: 20, 2: 21, 3:", "17) self.assertEqual(HardwareConfig.get_adjacent_beam_index(16, -1), 24) self.assertEqual(HardwareConfig.get_adjacent_beam_index(23, 1), 23) self.assertEqual(HardwareConfig.get_adjacent_beam_index(23, -1), 22) self.assertEqual(HardwareConfig.get_adjacent_beam_index(30, 1), 29)", "json import unittest from typing import List from bidict import bidict from scan_service.utils.hardware_config", "[0, 1, 2, 3, 4, 5, 6, 7], \"18\": [8, 9, 10, 11,", "\"18\": [8, 9, 10, 11, 12, 13, 14, 15], }, \"1\": { \"0\":", "19, 1: 20, 2: 21, 3: 22, 4: 23, 5: 24, 6: 25,", "def test_class_variables(self) -> None: self.assertDictEqual( HardwareConfig.BEAM_ORDER, { \"0\": { \"-18\": [0, 1, 2,", "self.assertEqual(HardwareConfig.get_adjacent_beam_index(0, -1), 0) self.assertEqual(HardwareConfig.get_adjacent_beam_index(8, 1), 9) self.assertEqual(HardwareConfig.get_adjacent_beam_index(8, -1), 8) self.assertEqual(HardwareConfig.get_adjacent_beam_index(15, 1), 15) self.assertEqual(HardwareConfig.get_adjacent_beam_index(15,", "19, 20, 21, 22, 23] }, }, ) self.assertDictEqual( HardwareConfig.TXPOWERIDX_TO_TXPOWER, { \"2\": {", "self.assertEqual(HardwareConfig.get_adjacent_beam_index(15, -1), 14) self.assertEqual(HardwareConfig.get_adjacent_beam_index(16, 1), 17) self.assertEqual(HardwareConfig.get_adjacent_beam_index(16, -1), 24) self.assertEqual(HardwareConfig.get_adjacent_beam_index(23, 1), 23) self.assertEqual(HardwareConfig.get_adjacent_beam_index(23,", "1), 60) self.assertEqual(HardwareConfig.get_adjacent_beam_index(60, -1), 60) def test_get_pwr_offset(self) -> None: self.assertEqual(HardwareConfig.get_pwr_offset(channel=\"2\", mcs=\"6\"), 0) self.assertEqual(", "7: 18}}, \"default_channel\": { \"default_mcs\": { 0: 16, 1: 17, 2: 18, 3:", "29) self.assertEqual(HardwareConfig.get_adjacent_beam_index(30, -1), 30) self.assertEqual(HardwareConfig.get_adjacent_beam_index(60, 1), 60) self.assertEqual(HardwareConfig.get_adjacent_beam_index(60, -1), 60) def test_get_pwr_offset(self) ->", "mcs=\"6\"), 9 ) self.assertEqual( HardwareConfig.get_pwr_offset(ref_pwr_idx=5, channel=\"3\", mcs=\"5\"), 7 ) self.assertEqual( HardwareConfig.get_pwr_offset(ref_pwr_idx=7, channel=\"2\", mcs=\"10\"),", "23, } }, }, ) self.assertEqual(HardwareConfig.BORESIDE_BW_IDX, 10) self.assertEqual(HardwareConfig.MINIMUM_SNR_DB, -10) self.assertEqual(HardwareConfig.SNR_SATURATE_THRESH_DB, 25) self.assertEqual(HardwareConfig.BEAM_SEPERATE_IDX, 3)", "HardwareConfig.set_config(hardware_config) def test_class_variables(self) -> None: self.assertDictEqual( HardwareConfig.BEAM_ORDER, { \"0\": { \"-18\": [0, 1,", "23) def test_get_adjacent_beam_index(self) -> None: self.assertEqual(HardwareConfig.get_adjacent_beam_index(0, 1), 1) self.assertEqual(HardwareConfig.get_adjacent_beam_index(0, -1), 0) self.assertEqual(HardwareConfig.get_adjacent_beam_index(8, 1),", "21, 6: 22, 7: 23, } }, }, ) self.assertEqual(HardwareConfig.BORESIDE_BW_IDX, 10) self.assertEqual(HardwareConfig.MINIMUM_SNR_DB, -10)", "def setUp(self) -> None: with open(\"tests/hardware_config.json\") as f: hardware_config = json.load(f) HardwareConfig.set_config(hardware_config) def", "# Copyright 2004-present Facebook. All Rights Reserved. import json import unittest from typing", "self.assertEqual(HardwareConfig.get_adjacent_beam_index(30, -1), 30) self.assertEqual(HardwareConfig.get_adjacent_beam_index(60, 1), 60) self.assertEqual(HardwareConfig.get_adjacent_beam_index(60, -1), 60) def test_get_pwr_offset(self) -> None:", "11, 2: 12, 3: 13, 4: 14, 5: 15, 6: 16, 7: 17},", "def test_get_adjacent_beam_index(self) -> None: self.assertEqual(HardwareConfig.get_adjacent_beam_index(0, 1), 1) self.assertEqual(HardwareConfig.get_adjacent_beam_index(0, -1), 0) self.assertEqual(HardwareConfig.get_adjacent_beam_index(8, 1), 9)", "test_get_adjacent_beam_index(self) -> None: self.assertEqual(HardwareConfig.get_adjacent_beam_index(0, 1), 1) self.assertEqual(HardwareConfig.get_adjacent_beam_index(0, -1), 0) self.assertEqual(HardwareConfig.get_adjacent_beam_index(8, 1), 9) self.assertEqual(HardwareConfig.get_adjacent_beam_index(8,", "import bidict from scan_service.utils.hardware_config import HardwareConfig class HardwareConfigTests(unittest.TestCase): def setUp(self) -> None: with", "\"2\": { \"10\": {0: 19, 1: 20, 2: 21, 3: 22, 4: 23,", "1), 15) self.assertEqual(HardwareConfig.get_adjacent_beam_index(15, -1), 14) self.assertEqual(HardwareConfig.get_adjacent_beam_index(16, 1), 17) self.assertEqual(HardwareConfig.get_adjacent_beam_index(16, -1), 24) self.assertEqual(HardwareConfig.get_adjacent_beam_index(23, 1),", "None: self.assertEqual(HardwareConfig.get_pwr_offset(channel=\"2\", mcs=\"6\"), 0) self.assertEqual( HardwareConfig.get_pwr_offset(target_pwr_idx=4, channel=\"2\", mcs=\"6\"), -9 ) self.assertEqual( HardwareConfig.get_pwr_offset(ref_pwr_idx=4, channel=\"2\",", "from bidict import bidict from scan_service.utils.hardware_config import HardwareConfig class HardwareConfigTests(unittest.TestCase): def setUp(self) ->", "} }, }, ) self.assertEqual(HardwareConfig.BORESIDE_BW_IDX, 10) self.assertEqual(HardwareConfig.MINIMUM_SNR_DB, -10) self.assertEqual(HardwareConfig.SNR_SATURATE_THRESH_DB, 25) self.assertEqual(HardwareConfig.BEAM_SEPERATE_IDX, 3) self.assertEqual(HardwareConfig.MAX_SIDELOBE_LEVEL_DB,", "{ \"10\": {0: 19, 1: 20, 2: 21, 3: 22, 4: 23, 5:", "self.assertEqual(HardwareConfig.BEAM_SEPERATE_IDX, 3) self.assertEqual(HardwareConfig.MAX_SIDELOBE_LEVEL_DB, 12) self.assertEqual(HardwareConfig.MAX_POWER, 23) def test_get_adjacent_beam_index(self) -> None: self.assertEqual(HardwareConfig.get_adjacent_beam_index(0, 1), 1)", "{ 0: 16, 1: 17, 2: 18, 3: 19, 4: 20, 5: 21,", "scan_service.utils.hardware_config import HardwareConfig class HardwareConfigTests(unittest.TestCase): def setUp(self) -> None: with open(\"tests/hardware_config.json\") as f:", "5: 16, 6: 17, 7: 18}}, \"default_channel\": { \"default_mcs\": { 0: 16, 1:", "6: 25, 7: 26}, \"6\": {0: 10, 1: 11, 2: 12, 3: 13,", "HardwareConfig.get_pwr_offset(ref_pwr_idx=5, channel=\"3\", mcs=\"5\"), 7 ) self.assertEqual( HardwareConfig.get_pwr_offset(ref_pwr_idx=7, channel=\"2\", mcs=\"10\"), -3 ) self.assertEqual(HardwareConfig.get_pwr_offset(target_pwr_idx=5), -2)", "self.assertEqual(HardwareConfig.get_adjacent_beam_index(0, 1), 1) self.assertEqual(HardwareConfig.get_adjacent_beam_index(0, -1), 0) self.assertEqual(HardwareConfig.get_adjacent_beam_index(8, 1), 9) self.assertEqual(HardwareConfig.get_adjacent_beam_index(8, -1), 8) self.assertEqual(HardwareConfig.get_adjacent_beam_index(15,", "1: 20, 2: 21, 3: 22, 4: 23, 5: 24, 6: 25, 7:", "\"default_channel\": { \"default_mcs\": { 0: 16, 1: 17, 2: 18, 3: 19, 4:", "1: 11, 2: 12, 3: 13, 4: 14, 5: 15, 6: 16, 7:", "}, ) self.assertDictEqual( HardwareConfig.TXPOWERIDX_TO_TXPOWER, { \"2\": { \"10\": {0: 19, 1: 20, 2:", "-1), 24) self.assertEqual(HardwareConfig.get_adjacent_beam_index(23, 1), 23) self.assertEqual(HardwareConfig.get_adjacent_beam_index(23, -1), 22) self.assertEqual(HardwareConfig.get_adjacent_beam_index(30, 1), 29) self.assertEqual(HardwareConfig.get_adjacent_beam_index(30, -1),", "All Rights Reserved. import json import unittest from typing import List from bidict", "[8, 9, 10, 11, 12, 13, 14, 15], }, \"1\": { \"0\": [30,", "24, 6: 25, 7: 26}, \"6\": {0: 10, 1: 11, 2: 12, 3:", "HardwareConfig.TXPOWERIDX_TO_TXPOWER, { \"2\": { \"10\": {0: 19, 1: 20, 2: 21, 3: 22,", "python3 # Copyright 2004-present Facebook. All Rights Reserved. import json import unittest from", "self.assertEqual(HardwareConfig.get_adjacent_beam_index(60, -1), 60) def test_get_pwr_offset(self) -> None: self.assertEqual(HardwareConfig.get_pwr_offset(channel=\"2\", mcs=\"6\"), 0) self.assertEqual( HardwareConfig.get_pwr_offset(target_pwr_idx=4, channel=\"2\",", "Copyright 2004-present Facebook. All Rights Reserved. import json import unittest from typing import", "import unittest from typing import List from bidict import bidict from scan_service.utils.hardware_config import", "9, 10, 11, 12, 13, 14, 15], }, \"1\": { \"0\": [30, 29,", "12, 3: 13, 4: 14, 5: 15, 6: 16, 7: 17}, }, \"3\":", "7: 23, } }, }, ) self.assertEqual(HardwareConfig.BORESIDE_BW_IDX, 10) self.assertEqual(HardwareConfig.MINIMUM_SNR_DB, -10) self.assertEqual(HardwareConfig.SNR_SATURATE_THRESH_DB, 25) self.assertEqual(HardwareConfig.BEAM_SEPERATE_IDX,", "typing import List from bidict import bidict from scan_service.utils.hardware_config import HardwareConfig class HardwareConfigTests(unittest.TestCase):", "10, 1: 11, 2: 12, 3: 13, 4: 14, 5: 15, 6: 16,", "}, ) self.assertEqual(HardwareConfig.BORESIDE_BW_IDX, 10) self.assertEqual(HardwareConfig.MINIMUM_SNR_DB, -10) self.assertEqual(HardwareConfig.SNR_SATURATE_THRESH_DB, 25) self.assertEqual(HardwareConfig.BEAM_SEPERATE_IDX, 3) self.assertEqual(HardwareConfig.MAX_SIDELOBE_LEVEL_DB, 12) self.assertEqual(HardwareConfig.MAX_POWER,", "5: 24, 6: 25, 7: 26}, \"6\": {0: 10, 1: 11, 2: 12,", "1), 29) self.assertEqual(HardwareConfig.get_adjacent_beam_index(30, -1), 30) self.assertEqual(HardwareConfig.get_adjacent_beam_index(60, 1), 60) self.assertEqual(HardwareConfig.get_adjacent_beam_index(60, -1), 60) def test_get_pwr_offset(self)", "15) self.assertEqual(HardwareConfig.get_adjacent_beam_index(15, -1), 14) self.assertEqual(HardwareConfig.get_adjacent_beam_index(16, 1), 17) self.assertEqual(HardwareConfig.get_adjacent_beam_index(16, -1), 24) self.assertEqual(HardwareConfig.get_adjacent_beam_index(23, 1), 23)", "-1), 60) def test_get_pwr_offset(self) -> None: self.assertEqual(HardwareConfig.get_pwr_offset(channel=\"2\", mcs=\"6\"), 0) self.assertEqual( HardwareConfig.get_pwr_offset(target_pwr_idx=4, channel=\"2\", mcs=\"6\"),", "6: 17, 7: 18}}, \"default_channel\": { \"default_mcs\": { 0: 16, 1: 17, 2:", "16, 1: 17, 2: 18, 3: 19, 4: 20, 5: 21, 6: 22,", "HardwareConfig.get_pwr_offset(target_pwr_idx=4, channel=\"2\", mcs=\"6\"), -9 ) self.assertEqual( HardwareConfig.get_pwr_offset(ref_pwr_idx=4, channel=\"2\", mcs=\"6\"), 9 ) self.assertEqual( HardwareConfig.get_pwr_offset(ref_pwr_idx=5,", "10) self.assertEqual(HardwareConfig.MINIMUM_SNR_DB, -10) self.assertEqual(HardwareConfig.SNR_SATURATE_THRESH_DB, 25) self.assertEqual(HardwareConfig.BEAM_SEPERATE_IDX, 3) self.assertEqual(HardwareConfig.MAX_SIDELOBE_LEVEL_DB, 12) self.assertEqual(HardwareConfig.MAX_POWER, 23) def test_get_adjacent_beam_index(self)", "2, 3, 4, 5, 6, 7], \"18\": [8, 9, 10, 11, 12, 13,", "#!/usr/bin/env python3 # Copyright 2004-present Facebook. All Rights Reserved. import json import unittest", "Reserved. import json import unittest from typing import List from bidict import bidict", "25, 7: 26}, \"6\": {0: 10, 1: 11, 2: 12, 3: 13, 4:", ") self.assertEqual( HardwareConfig.get_pwr_offset(ref_pwr_idx=5, channel=\"3\", mcs=\"5\"), 7 ) self.assertEqual( HardwareConfig.get_pwr_offset(ref_pwr_idx=7, channel=\"2\", mcs=\"10\"), -3 )", "self.assertEqual(HardwareConfig.get_adjacent_beam_index(60, 1), 60) self.assertEqual(HardwareConfig.get_adjacent_beam_index(60, -1), 60) def test_get_pwr_offset(self) -> None: self.assertEqual(HardwareConfig.get_pwr_offset(channel=\"2\", mcs=\"6\"), 0)", "HardwareConfig.BEAM_ORDER, { \"0\": { \"-18\": [0, 1, 2, 3, 4, 5, 6, 7],", "self.assertEqual(HardwareConfig.get_adjacent_beam_index(15, 1), 15) self.assertEqual(HardwareConfig.get_adjacent_beam_index(15, -1), 14) self.assertEqual(HardwareConfig.get_adjacent_beam_index(16, 1), 17) self.assertEqual(HardwareConfig.get_adjacent_beam_index(16, -1), 24) self.assertEqual(HardwareConfig.get_adjacent_beam_index(23,", "9) self.assertEqual(HardwareConfig.get_adjacent_beam_index(8, -1), 8) self.assertEqual(HardwareConfig.get_adjacent_beam_index(15, 1), 15) self.assertEqual(HardwareConfig.get_adjacent_beam_index(15, -1), 14) self.assertEqual(HardwareConfig.get_adjacent_beam_index(16, 1), 17)", "as f: hardware_config = json.load(f) HardwareConfig.set_config(hardware_config) def test_class_variables(self) -> None: self.assertDictEqual( HardwareConfig.BEAM_ORDER, {", "23] }, }, ) self.assertDictEqual( HardwareConfig.TXPOWERIDX_TO_TXPOWER, { \"2\": { \"10\": {0: 19, 1:", "f: hardware_config = json.load(f) HardwareConfig.set_config(hardware_config) def test_class_variables(self) -> None: self.assertDictEqual( HardwareConfig.BEAM_ORDER, { \"0\":", "None: with open(\"tests/hardware_config.json\") as f: hardware_config = json.load(f) HardwareConfig.set_config(hardware_config) def test_class_variables(self) -> None:", "\"10\": {0: 19, 1: 20, 2: 21, 3: 22, 4: 23, 5: 24,", "3: 14, 4: 15, 5: 16, 6: 17, 7: 18}}, \"default_channel\": { \"default_mcs\":", "5: 21, 6: 22, 7: 23, } }, }, ) self.assertEqual(HardwareConfig.BORESIDE_BW_IDX, 10) self.assertEqual(HardwareConfig.MINIMUM_SNR_DB,", "2004-present Facebook. All Rights Reserved. import json import unittest from typing import List", "-> None: self.assertEqual(HardwareConfig.get_pwr_offset(channel=\"2\", mcs=\"6\"), 0) self.assertEqual( HardwareConfig.get_pwr_offset(target_pwr_idx=4, channel=\"2\", mcs=\"6\"), -9 ) self.assertEqual( HardwareConfig.get_pwr_offset(ref_pwr_idx=4,", "4: 20, 5: 21, 6: 22, 7: 23, } }, }, ) self.assertEqual(HardwareConfig.BORESIDE_BW_IDX,", "5: 15, 6: 16, 7: 17}, }, \"3\": {\"5\": {0: 11, 1: 12,", "setUp(self) -> None: with open(\"tests/hardware_config.json\") as f: hardware_config = json.load(f) HardwareConfig.set_config(hardware_config) def test_class_variables(self)", "json.load(f) HardwareConfig.set_config(hardware_config) def test_class_variables(self) -> None: self.assertDictEqual( HardwareConfig.BEAM_ORDER, { \"0\": { \"-18\": [0,", "}, \"1\": { \"0\": [30, 29, 28, 27, 26, 25, 24, 16, 17,", "2: 12, 3: 13, 4: 14, 5: 15, 6: 16, 7: 17}, },", "4, 5, 6, 7], \"18\": [8, 9, 10, 11, 12, 13, 14, 15],", "-1), 8) self.assertEqual(HardwareConfig.get_adjacent_beam_index(15, 1), 15) self.assertEqual(HardwareConfig.get_adjacent_beam_index(15, -1), 14) self.assertEqual(HardwareConfig.get_adjacent_beam_index(16, 1), 17) self.assertEqual(HardwareConfig.get_adjacent_beam_index(16, -1),", "mcs=\"6\"), -9 ) self.assertEqual( HardwareConfig.get_pwr_offset(ref_pwr_idx=4, channel=\"2\", mcs=\"6\"), 9 ) self.assertEqual( HardwareConfig.get_pwr_offset(ref_pwr_idx=5, channel=\"3\", mcs=\"5\"),", "unittest from typing import List from bidict import bidict from scan_service.utils.hardware_config import HardwareConfig", "5, 6, 7], \"18\": [8, 9, 10, 11, 12, 13, 14, 15], },", "class HardwareConfigTests(unittest.TestCase): def setUp(self) -> None: with open(\"tests/hardware_config.json\") as f: hardware_config = json.load(f)", "with open(\"tests/hardware_config.json\") as f: hardware_config = json.load(f) HardwareConfig.set_config(hardware_config) def test_class_variables(self) -> None: self.assertDictEqual(", "1), 9) self.assertEqual(HardwareConfig.get_adjacent_beam_index(8, -1), 8) self.assertEqual(HardwareConfig.get_adjacent_beam_index(15, 1), 15) self.assertEqual(HardwareConfig.get_adjacent_beam_index(15, -1), 14) self.assertEqual(HardwareConfig.get_adjacent_beam_index(16, 1),", "test_get_pwr_offset(self) -> None: self.assertEqual(HardwareConfig.get_pwr_offset(channel=\"2\", mcs=\"6\"), 0) self.assertEqual( HardwareConfig.get_pwr_offset(target_pwr_idx=4, channel=\"2\", mcs=\"6\"), -9 ) self.assertEqual(", "-9 ) self.assertEqual( HardwareConfig.get_pwr_offset(ref_pwr_idx=4, channel=\"2\", mcs=\"6\"), 9 ) self.assertEqual( HardwareConfig.get_pwr_offset(ref_pwr_idx=5, channel=\"3\", mcs=\"5\"), 7", "def test_get_pwr_offset(self) -> None: self.assertEqual(HardwareConfig.get_pwr_offset(channel=\"2\", mcs=\"6\"), 0) self.assertEqual( HardwareConfig.get_pwr_offset(target_pwr_idx=4, channel=\"2\", mcs=\"6\"), -9 )", "self.assertEqual(HardwareConfig.get_adjacent_beam_index(23, 1), 23) self.assertEqual(HardwareConfig.get_adjacent_beam_index(23, -1), 22) self.assertEqual(HardwareConfig.get_adjacent_beam_index(30, 1), 29) self.assertEqual(HardwareConfig.get_adjacent_beam_index(30, -1), 30) self.assertEqual(HardwareConfig.get_adjacent_beam_index(60,", "8) self.assertEqual(HardwareConfig.get_adjacent_beam_index(15, 1), 15) self.assertEqual(HardwareConfig.get_adjacent_beam_index(15, -1), 14) self.assertEqual(HardwareConfig.get_adjacent_beam_index(16, 1), 17) self.assertEqual(HardwareConfig.get_adjacent_beam_index(16, -1), 24)", "18}}, \"default_channel\": { \"default_mcs\": { 0: 16, 1: 17, 2: 18, 3: 19,", "60) self.assertEqual(HardwareConfig.get_adjacent_beam_index(60, -1), 60) def test_get_pwr_offset(self) -> None: self.assertEqual(HardwareConfig.get_pwr_offset(channel=\"2\", mcs=\"6\"), 0) self.assertEqual( HardwareConfig.get_pwr_offset(target_pwr_idx=4,", "{\"5\": {0: 11, 1: 12, 2: 13, 3: 14, 4: 15, 5: 16,", "28, 27, 26, 25, 24, 16, 17, 18, 19, 20, 21, 22, 23]", "14, 15], }, \"1\": { \"0\": [30, 29, 28, 27, 26, 25, 24,", "21, 3: 22, 4: 23, 5: 24, 6: 25, 7: 26}, \"6\": {0:", "\"1\": { \"0\": [30, 29, 28, 27, 26, 25, 24, 16, 17, 18,", "self.assertEqual(HardwareConfig.get_adjacent_beam_index(16, -1), 24) self.assertEqual(HardwareConfig.get_adjacent_beam_index(23, 1), 23) self.assertEqual(HardwareConfig.get_adjacent_beam_index(23, -1), 22) self.assertEqual(HardwareConfig.get_adjacent_beam_index(30, 1), 29) self.assertEqual(HardwareConfig.get_adjacent_beam_index(30,", "self.assertEqual(HardwareConfig.get_adjacent_beam_index(23, -1), 22) self.assertEqual(HardwareConfig.get_adjacent_beam_index(30, 1), 29) self.assertEqual(HardwareConfig.get_adjacent_beam_index(30, -1), 30) self.assertEqual(HardwareConfig.get_adjacent_beam_index(60, 1), 60) self.assertEqual(HardwareConfig.get_adjacent_beam_index(60,", "test_class_variables(self) -> None: self.assertDictEqual( HardwareConfig.BEAM_ORDER, { \"0\": { \"-18\": [0, 1, 2, 3,", "\"-18\": [0, 1, 2, 3, 4, 5, 6, 7], \"18\": [8, 9, 10,", "4: 15, 5: 16, 6: 17, 7: 18}}, \"default_channel\": { \"default_mcs\": { 0:", "11, 12, 13, 14, 15], }, \"1\": { \"0\": [30, 29, 28, 27,", "self.assertEqual(HardwareConfig.MAX_POWER, 23) def test_get_adjacent_beam_index(self) -> None: self.assertEqual(HardwareConfig.get_adjacent_beam_index(0, 1), 1) self.assertEqual(HardwareConfig.get_adjacent_beam_index(0, -1), 0) self.assertEqual(HardwareConfig.get_adjacent_beam_index(8,", ") self.assertEqual( HardwareConfig.get_pwr_offset(ref_pwr_idx=4, channel=\"2\", mcs=\"6\"), 9 ) self.assertEqual( HardwareConfig.get_pwr_offset(ref_pwr_idx=5, channel=\"3\", mcs=\"5\"), 7 )", "bidict from scan_service.utils.hardware_config import HardwareConfig class HardwareConfigTests(unittest.TestCase): def setUp(self) -> None: with open(\"tests/hardware_config.json\")", "10, 11, 12, 13, 14, 15], }, \"1\": { \"0\": [30, 29, 28,", "15, 5: 16, 6: 17, 7: 18}}, \"default_channel\": { \"default_mcs\": { 0: 16,", "18, 19, 20, 21, 22, 23] }, }, ) self.assertDictEqual( HardwareConfig.TXPOWERIDX_TO_TXPOWER, { \"2\":", "None: self.assertEqual(HardwareConfig.get_adjacent_beam_index(0, 1), 1) self.assertEqual(HardwareConfig.get_adjacent_beam_index(0, -1), 0) self.assertEqual(HardwareConfig.get_adjacent_beam_index(8, 1), 9) self.assertEqual(HardwareConfig.get_adjacent_beam_index(8, -1), 8)", "17, 7: 18}}, \"default_channel\": { \"default_mcs\": { 0: 16, 1: 17, 2: 18,", "4: 14, 5: 15, 6: 16, 7: 17}, }, \"3\": {\"5\": {0: 11,", "{0: 10, 1: 11, 2: 12, 3: 13, 4: 14, 5: 15, 6:", "Rights Reserved. import json import unittest from typing import List from bidict import", "1), 1) self.assertEqual(HardwareConfig.get_adjacent_beam_index(0, -1), 0) self.assertEqual(HardwareConfig.get_adjacent_beam_index(8, 1), 9) self.assertEqual(HardwareConfig.get_adjacent_beam_index(8, -1), 8) self.assertEqual(HardwareConfig.get_adjacent_beam_index(15, 1),", "None: self.assertDictEqual( HardwareConfig.BEAM_ORDER, { \"0\": { \"-18\": [0, 1, 2, 3, 4, 5,", "-10) self.assertEqual(HardwareConfig.SNR_SATURATE_THRESH_DB, 25) self.assertEqual(HardwareConfig.BEAM_SEPERATE_IDX, 3) self.assertEqual(HardwareConfig.MAX_SIDELOBE_LEVEL_DB, 12) self.assertEqual(HardwareConfig.MAX_POWER, 23) def test_get_adjacent_beam_index(self) -> None:", "0) self.assertEqual( HardwareConfig.get_pwr_offset(target_pwr_idx=4, channel=\"2\", mcs=\"6\"), -9 ) self.assertEqual( HardwareConfig.get_pwr_offset(ref_pwr_idx=4, channel=\"2\", mcs=\"6\"), 9 )", "}, }, ) self.assertDictEqual( HardwareConfig.TXPOWERIDX_TO_TXPOWER, { \"2\": { \"10\": {0: 19, 1: 20,", "-> None: self.assertEqual(HardwareConfig.get_adjacent_beam_index(0, 1), 1) self.assertEqual(HardwareConfig.get_adjacent_beam_index(0, -1), 0) self.assertEqual(HardwareConfig.get_adjacent_beam_index(8, 1), 9) self.assertEqual(HardwareConfig.get_adjacent_beam_index(8, -1),", "21, 22, 23] }, }, ) self.assertDictEqual( HardwareConfig.TXPOWERIDX_TO_TXPOWER, { \"2\": { \"10\": {0:", "{ \"0\": [30, 29, 28, 27, 26, 25, 24, 16, 17, 18, 19,", "\"6\": {0: 10, 1: 11, 2: 12, 3: 13, 4: 14, 5: 15,", "\"default_mcs\": { 0: 16, 1: 17, 2: 18, 3: 19, 4: 20, 5:", "19, 4: 20, 5: 21, 6: 22, 7: 23, } }, }, )", "14) self.assertEqual(HardwareConfig.get_adjacent_beam_index(16, 1), 17) self.assertEqual(HardwareConfig.get_adjacent_beam_index(16, -1), 24) self.assertEqual(HardwareConfig.get_adjacent_beam_index(23, 1), 23) self.assertEqual(HardwareConfig.get_adjacent_beam_index(23, -1), 22)", "import HardwareConfig class HardwareConfigTests(unittest.TestCase): def setUp(self) -> None: with open(\"tests/hardware_config.json\") as f: hardware_config", "}, \"3\": {\"5\": {0: 11, 1: 12, 2: 13, 3: 14, 4: 15,", "hardware_config = json.load(f) HardwareConfig.set_config(hardware_config) def test_class_variables(self) -> None: self.assertDictEqual( HardwareConfig.BEAM_ORDER, { \"0\": {", "\"0\": { \"-18\": [0, 1, 2, 3, 4, 5, 6, 7], \"18\": [8,", "bidict import bidict from scan_service.utils.hardware_config import HardwareConfig class HardwareConfigTests(unittest.TestCase): def setUp(self) -> None:", "4: 23, 5: 24, 6: 25, 7: 26}, \"6\": {0: 10, 1: 11,", "open(\"tests/hardware_config.json\") as f: hardware_config = json.load(f) HardwareConfig.set_config(hardware_config) def test_class_variables(self) -> None: self.assertDictEqual( HardwareConfig.BEAM_ORDER," ]
[ "variable # all print are commented #region capture Calm variables username = \"@@{eip_username}@@\"", "commented #region capture Calm variables username = \"@@{eip_username}@@\" password = <PASSWORD>}@@\" api_server =", "that the subnet cannot contains others subnets as children #endregion # region prepare", "call to {}\".format(method, url)) resp = process_request(url, method, headers) # parsing the response", "subnet cannot contains others subnets as children #endregion # region prepare api call", "print(\"Request failed\") exit(1) return r #endregion #region main processing # make the api", "subnets = json.loads(resp.content) for subnet in subnets: if subnet['subnet_ip_free_size'] != int(min_free_ip): subnets_list.append(format(subnet['subnet_name'])) #", "return r #endregion #region main processing # make the api call url =", "subnet['subnet_ip_free_size'] != int(min_free_ip): subnets_list.append(format(subnet['subnet_name'])) # return array use for dynamic variable input print(\",", "is used to retreive a list of available subnets on EIP # this", "'application/json'} # endregion #region API call function def process_request(url, method, headers, payload=None): if", "process_request(url, method, headers, payload=None): if (payload is not None): payload = json.dumps(payload) r", "subnets on EIP # this list is provided during at the application launch", "api call url = \"{0}/ip_block_subnet_list?WHERE={1}='{2}'&WHERE={3}='{4}'\".format(base_url, \"is_terminal\", is_terminal, \"parent_site_name\", site_name) #print(\"Making a {} API", "\"@@{eip_endpoint}@@\" site_name = \"@@{eip_site_name}@@\" min_free_ip = \"@@{eip_min_free_ip}@@\" is_terminal = \"1\" #means that the", "description: Get available networks attached to a site on EfficientIP # input vars:", "the application launch using dynaminy variable # all print are commented #region capture", "launch using dynaminy variable # all print are commented #region capture Calm variables", "EfficientIP # input vars: eip_site_name, eip_min_free_ip # output vars: subnet_lists #endregion # this", "parsing the response subnets_list = [] subnets = json.loads(resp.content) for subnet in subnets:", "\"@@{eip_username}@@\" password = <PASSWORD>}@@\" api_server = \"@@{eip_endpoint}@@\" site_name = \"@@{eip_site_name}@@\" min_free_ip = \"@@{eip_min_free_ip}@@\"", "resp = process_request(url, method, headers) # parsing the response subnets_list = [] subnets", "# this list is provided during at the application launch using dynaminy variable", "vars: eip_site_name, eip_min_free_ip # output vars: subnet_lists #endregion # this script is used", "{'Content-Type': 'application/json', 'Accept': 'application/json'} # endregion #region API call function def process_request(url, method,", "min_free_ip = \"@@{eip_min_free_ip}@@\" is_terminal = \"1\" #means that the subnet cannot contains others", "= \"{0}/ip_block_subnet_list?WHERE={1}='{2}'&WHERE={3}='{4}'\".format(base_url, \"is_terminal\", is_terminal, \"parent_site_name\", site_name) #print(\"Making a {} API call to {}\".format(method,", "json.dumps(payload) r = urlreq(url, verb=method, auth='BASIC', user=username, passwd=password, params=payload, verify=False, headers=headers) if not", "[] subnets = json.loads(resp.content) for subnet in subnets: if subnet['subnet_ip_free_size'] != int(min_free_ip): subnets_list.append(format(subnet['subnet_name']))", "params=payload, verify=False, headers=headers) if not r.ok: print(\"Request failed\") exit(1) return r #endregion #region", "= \"@@{eip_username}@@\" password = <PASSWORD>}@@\" api_server = \"@@{eip_endpoint}@@\" site_name = \"@@{eip_site_name}@@\" min_free_ip =", "EipGetSubnets # description: Get available networks attached to a site on EfficientIP #", "on EIP # this list is provided during at the application launch using", "are commented #region capture Calm variables username = \"@@{eip_username}@@\" password = <PASSWORD>}@@\" api_server", "api_server_endpoint = \"/rest\" method = \"GET\" base_url = \"https://{}:{}{}\".format(api_server, api_server_port, api_server_endpoint) headers =", "method = \"GET\" base_url = \"https://{}:{}{}\".format(api_server, api_server_port, api_server_endpoint) headers = {'Content-Type': 'application/json', 'Accept':", "the api call url = \"{0}/ip_block_subnet_list?WHERE={1}='{2}'&WHERE={3}='{4}'\".format(base_url, \"is_terminal\", is_terminal, \"parent_site_name\", site_name) #print(\"Making a {}", "a site on EfficientIP # input vars: eip_site_name, eip_min_free_ip # output vars: subnet_lists", "capture Calm variables username = \"@@{eip_username}@@\" password = <PASSWORD>}@@\" api_server = \"@@{eip_endpoint}@@\" site_name", "input vars: eip_site_name, eip_min_free_ip # output vars: subnet_lists #endregion # this script is", "EIP # this list is provided during at the application launch using dynaminy", "vars: subnet_lists #endregion # this script is used to retreive a list of", "api_server_port = \"443\" api_server_endpoint = \"/rest\" method = \"GET\" base_url = \"https://{}:{}{}\".format(api_server, api_server_port,", "\"parent_site_name\", site_name) #print(\"Making a {} API call to {}\".format(method, url)) resp = process_request(url,", "API call function def process_request(url, method, headers, payload=None): if (payload is not None):", "= \"1\" #means that the subnet cannot contains others subnets as children #endregion", "payload = json.dumps(payload) r = urlreq(url, verb=method, auth='BASIC', user=username, passwd=password, params=payload, verify=False, headers=headers)", "headers=headers) if not r.ok: print(\"Request failed\") exit(1) return r #endregion #region main processing", "= \"@@{eip_endpoint}@@\" site_name = \"@@{eip_site_name}@@\" min_free_ip = \"@@{eip_min_free_ip}@@\" is_terminal = \"1\" #means that", "call api_server_port = \"443\" api_server_endpoint = \"/rest\" method = \"GET\" base_url = \"https://{}:{}{}\".format(api_server,", "exit(1) return r #endregion #region main processing # make the api call url", "method, headers) # parsing the response subnets_list = [] subnets = json.loads(resp.content) for", "to retreive a list of available subnets on EIP # this list is", "of available subnets on EIP # this list is provided during at the", "#region headers # * authors: <EMAIL> # * date: 30/03/2020 # task_name: EipGetSubnets", "is not None): payload = json.dumps(payload) r = urlreq(url, verb=method, auth='BASIC', user=username, passwd=password,", "processing # make the api call url = \"{0}/ip_block_subnet_list?WHERE={1}='{2}'&WHERE={3}='{4}'\".format(base_url, \"is_terminal\", is_terminal, \"parent_site_name\", site_name)", "a {} API call to {}\".format(method, url)) resp = process_request(url, method, headers) #", "provided during at the application launch using dynaminy variable # all print are", "password = <PASSWORD>}@@\" api_server = \"@@{eip_endpoint}@@\" site_name = \"@@{eip_site_name}@@\" min_free_ip = \"@@{eip_min_free_ip}@@\" is_terminal", "<PASSWORD>}@@\" api_server = \"@@{eip_endpoint}@@\" site_name = \"@@{eip_site_name}@@\" min_free_ip = \"@@{eip_min_free_ip}@@\" is_terminal = \"1\"", "json.loads(resp.content) for subnet in subnets: if subnet['subnet_ip_free_size'] != int(min_free_ip): subnets_list.append(format(subnet['subnet_name'])) # return array", "payload=None): if (payload is not None): payload = json.dumps(payload) r = urlreq(url, verb=method,", "method, headers, payload=None): if (payload is not None): payload = json.dumps(payload) r =", "to {}\".format(method, url)) resp = process_request(url, method, headers) # parsing the response subnets_list", "prepare api call api_server_port = \"443\" api_server_endpoint = \"/rest\" method = \"GET\" base_url", "networks attached to a site on EfficientIP # input vars: eip_site_name, eip_min_free_ip #", "# output vars: subnet_lists #endregion # this script is used to retreive a", "#endregion #region main processing # make the api call url = \"{0}/ip_block_subnet_list?WHERE={1}='{2}'&WHERE={3}='{4}'\".format(base_url, \"is_terminal\",", "available networks attached to a site on EfficientIP # input vars: eip_site_name, eip_min_free_ip", "variables username = \"@@{eip_username}@@\" password = <PASSWORD>}@@\" api_server = \"@@{eip_endpoint}@@\" site_name = \"@@{eip_site_name}@@\"", "contains others subnets as children #endregion # region prepare api call api_server_port =", "#endregion # region prepare api call api_server_port = \"443\" api_server_endpoint = \"/rest\" method", "= [] subnets = json.loads(resp.content) for subnet in subnets: if subnet['subnet_ip_free_size'] != int(min_free_ip):", "r.ok: print(\"Request failed\") exit(1) return r #endregion #region main processing # make the", "eip_min_free_ip # output vars: subnet_lists #endregion # this script is used to retreive", "# parsing the response subnets_list = [] subnets = json.loads(resp.content) for subnet in", "output vars: subnet_lists #endregion # this script is used to retreive a list", "# endregion #region API call function def process_request(url, method, headers, payload=None): if (payload", "#region main processing # make the api call url = \"{0}/ip_block_subnet_list?WHERE={1}='{2}'&WHERE={3}='{4}'\".format(base_url, \"is_terminal\", is_terminal,", "r = urlreq(url, verb=method, auth='BASIC', user=username, passwd=password, params=payload, verify=False, headers=headers) if not r.ok:", "= json.dumps(payload) r = urlreq(url, verb=method, auth='BASIC', user=username, passwd=password, params=payload, verify=False, headers=headers) if", "if subnet['subnet_ip_free_size'] != int(min_free_ip): subnets_list.append(format(subnet['subnet_name'])) # return array use for dynamic variable input", "#endregion # this script is used to retreive a list of available subnets", "function def process_request(url, method, headers, payload=None): if (payload is not None): payload =", "# all print are commented #region capture Calm variables username = \"@@{eip_username}@@\" password", "#region capture Calm variables username = \"@@{eip_username}@@\" password = <PASSWORD>}@@\" api_server = \"@@{eip_endpoint}@@\"", "site_name = \"@@{eip_site_name}@@\" min_free_ip = \"@@{eip_min_free_ip}@@\" is_terminal = \"1\" #means that the subnet", "base_url = \"https://{}:{}{}\".format(api_server, api_server_port, api_server_endpoint) headers = {'Content-Type': 'application/json', 'Accept': 'application/json'} # endregion", "available subnets on EIP # this list is provided during at the application", "{} API call to {}\".format(method, url)) resp = process_request(url, method, headers) # parsing", "dynaminy variable # all print are commented #region capture Calm variables username =", "auth='BASIC', user=username, passwd=password, params=payload, verify=False, headers=headers) if not r.ok: print(\"Request failed\") exit(1) return", "int(min_free_ip): subnets_list.append(format(subnet['subnet_name'])) # return array use for dynamic variable input print(\", \".join(subnets_list)) #endregion", "!= int(min_free_ip): subnets_list.append(format(subnet['subnet_name'])) # return array use for dynamic variable input print(\", \".join(subnets_list))", "username = \"@@{eip_username}@@\" password = <PASSWORD>}@@\" api_server = \"@@{eip_endpoint}@@\" site_name = \"@@{eip_site_name}@@\" min_free_ip", "task_name: EipGetSubnets # description: Get available networks attached to a site on EfficientIP", "process_request(url, method, headers) # parsing the response subnets_list = [] subnets = json.loads(resp.content)", "the subnet cannot contains others subnets as children #endregion # region prepare api", "= urlreq(url, verb=method, auth='BASIC', user=username, passwd=password, params=payload, verify=False, headers=headers) if not r.ok: print(\"Request", "\"GET\" base_url = \"https://{}:{}{}\".format(api_server, api_server_port, api_server_endpoint) headers = {'Content-Type': 'application/json', 'Accept': 'application/json'} #", "site_name) #print(\"Making a {} API call to {}\".format(method, url)) resp = process_request(url, method,", "on EfficientIP # input vars: eip_site_name, eip_min_free_ip # output vars: subnet_lists #endregion #", "date: 30/03/2020 # task_name: EipGetSubnets # description: Get available networks attached to a", "<EMAIL> # * date: 30/03/2020 # task_name: EipGetSubnets # description: Get available networks", "# description: Get available networks attached to a site on EfficientIP # input", "others subnets as children #endregion # region prepare api call api_server_port = \"443\"", "= \"GET\" base_url = \"https://{}:{}{}\".format(api_server, api_server_port, api_server_endpoint) headers = {'Content-Type': 'application/json', 'Accept': 'application/json'}", "= process_request(url, method, headers) # parsing the response subnets_list = [] subnets =", "# region prepare api call api_server_port = \"443\" api_server_endpoint = \"/rest\" method =", "this script is used to retreive a list of available subnets on EIP", "r #endregion #region main processing # make the api call url = \"{0}/ip_block_subnet_list?WHERE={1}='{2}'&WHERE={3}='{4}'\".format(base_url,", "verb=method, auth='BASIC', user=username, passwd=password, params=payload, verify=False, headers=headers) if not r.ok: print(\"Request failed\") exit(1)", "# input vars: eip_site_name, eip_min_free_ip # output vars: subnet_lists #endregion # this script", "in subnets: if subnet['subnet_ip_free_size'] != int(min_free_ip): subnets_list.append(format(subnet['subnet_name'])) # return array use for dynamic", "urlreq(url, verb=method, auth='BASIC', user=username, passwd=password, params=payload, verify=False, headers=headers) if not r.ok: print(\"Request failed\")", "# * authors: <EMAIL> # * date: 30/03/2020 # task_name: EipGetSubnets # description:", "using dynaminy variable # all print are commented #region capture Calm variables username", "is_terminal, \"parent_site_name\", site_name) #print(\"Making a {} API call to {}\".format(method, url)) resp =", "print are commented #region capture Calm variables username = \"@@{eip_username}@@\" password = <PASSWORD>}@@\"", "region prepare api call api_server_port = \"443\" api_server_endpoint = \"/rest\" method = \"GET\"", "attached to a site on EfficientIP # input vars: eip_site_name, eip_min_free_ip # output", "this list is provided during at the application launch using dynaminy variable #", "\"is_terminal\", is_terminal, \"parent_site_name\", site_name) #print(\"Making a {} API call to {}\".format(method, url)) resp", "a list of available subnets on EIP # this list is provided during", "subnets as children #endregion # region prepare api call api_server_port = \"443\" api_server_endpoint", "#means that the subnet cannot contains others subnets as children #endregion # region", "cannot contains others subnets as children #endregion # region prepare api call api_server_port", "passwd=password, params=payload, verify=False, headers=headers) if not r.ok: print(\"Request failed\") exit(1) return r #endregion", "authors: <EMAIL> # * date: 30/03/2020 # task_name: EipGetSubnets # description: Get available", "call url = \"{0}/ip_block_subnet_list?WHERE={1}='{2}'&WHERE={3}='{4}'\".format(base_url, \"is_terminal\", is_terminal, \"parent_site_name\", site_name) #print(\"Making a {} API call", "children #endregion # region prepare api call api_server_port = \"443\" api_server_endpoint = \"/rest\"", "= <PASSWORD>}@@\" api_server = \"@@{eip_endpoint}@@\" site_name = \"@@{eip_site_name}@@\" min_free_ip = \"@@{eip_min_free_ip}@@\" is_terminal =", "= {'Content-Type': 'application/json', 'Accept': 'application/json'} # endregion #region API call function def process_request(url,", "\"/rest\" method = \"GET\" base_url = \"https://{}:{}{}\".format(api_server, api_server_port, api_server_endpoint) headers = {'Content-Type': 'application/json',", "is_terminal = \"1\" #means that the subnet cannot contains others subnets as children", "(payload is not None): payload = json.dumps(payload) r = urlreq(url, verb=method, auth='BASIC', user=username,", "= \"@@{eip_site_name}@@\" min_free_ip = \"@@{eip_min_free_ip}@@\" is_terminal = \"1\" #means that the subnet cannot", "api call api_server_port = \"443\" api_server_endpoint = \"/rest\" method = \"GET\" base_url =", "{}\".format(method, url)) resp = process_request(url, method, headers) # parsing the response subnets_list =", "for subnet in subnets: if subnet['subnet_ip_free_size'] != int(min_free_ip): subnets_list.append(format(subnet['subnet_name'])) # return array use", "is provided during at the application launch using dynaminy variable # all print", "used to retreive a list of available subnets on EIP # this list", "# make the api call url = \"{0}/ip_block_subnet_list?WHERE={1}='{2}'&WHERE={3}='{4}'\".format(base_url, \"is_terminal\", is_terminal, \"parent_site_name\", site_name) #print(\"Making", "if (payload is not None): payload = json.dumps(payload) r = urlreq(url, verb=method, auth='BASIC',", "<reponame>mlavi/blueprints #region headers # * authors: <EMAIL> # * date: 30/03/2020 # task_name:", "\"@@{eip_min_free_ip}@@\" is_terminal = \"1\" #means that the subnet cannot contains others subnets as", "headers) # parsing the response subnets_list = [] subnets = json.loads(resp.content) for subnet", "30/03/2020 # task_name: EipGetSubnets # description: Get available networks attached to a site", "to a site on EfficientIP # input vars: eip_site_name, eip_min_free_ip # output vars:", "# this script is used to retreive a list of available subnets on", "failed\") exit(1) return r #endregion #region main processing # make the api call", "site on EfficientIP # input vars: eip_site_name, eip_min_free_ip # output vars: subnet_lists #endregion", "main processing # make the api call url = \"{0}/ip_block_subnet_list?WHERE={1}='{2}'&WHERE={3}='{4}'\".format(base_url, \"is_terminal\", is_terminal, \"parent_site_name\",", "api_server = \"@@{eip_endpoint}@@\" site_name = \"@@{eip_site_name}@@\" min_free_ip = \"@@{eip_min_free_ip}@@\" is_terminal = \"1\" #means", "= \"443\" api_server_endpoint = \"/rest\" method = \"GET\" base_url = \"https://{}:{}{}\".format(api_server, api_server_port, api_server_endpoint)", "= \"https://{}:{}{}\".format(api_server, api_server_port, api_server_endpoint) headers = {'Content-Type': 'application/json', 'Accept': 'application/json'} # endregion #region", "url)) resp = process_request(url, method, headers) # parsing the response subnets_list = []", "retreive a list of available subnets on EIP # this list is provided", "# task_name: EipGetSubnets # description: Get available networks attached to a site on", "the response subnets_list = [] subnets = json.loads(resp.content) for subnet in subnets: if", "= \"/rest\" method = \"GET\" base_url = \"https://{}:{}{}\".format(api_server, api_server_port, api_server_endpoint) headers = {'Content-Type':", "subnets: if subnet['subnet_ip_free_size'] != int(min_free_ip): subnets_list.append(format(subnet['subnet_name'])) # return array use for dynamic variable", "all print are commented #region capture Calm variables username = \"@@{eip_username}@@\" password =", "api_server_port, api_server_endpoint) headers = {'Content-Type': 'application/json', 'Accept': 'application/json'} # endregion #region API call", "Calm variables username = \"@@{eip_username}@@\" password = <PASSWORD>}@@\" api_server = \"@@{eip_endpoint}@@\" site_name =", "user=username, passwd=password, params=payload, verify=False, headers=headers) if not r.ok: print(\"Request failed\") exit(1) return r", "# * date: 30/03/2020 # task_name: EipGetSubnets # description: Get available networks attached", "script is used to retreive a list of available subnets on EIP #", "headers, payload=None): if (payload is not None): payload = json.dumps(payload) r = urlreq(url,", "* authors: <EMAIL> # * date: 30/03/2020 # task_name: EipGetSubnets # description: Get", "application launch using dynaminy variable # all print are commented #region capture Calm", "during at the application launch using dynaminy variable # all print are commented", "list is provided during at the application launch using dynaminy variable # all", "subnet_lists #endregion # this script is used to retreive a list of available", "not None): payload = json.dumps(payload) r = urlreq(url, verb=method, auth='BASIC', user=username, passwd=password, params=payload,", "None): payload = json.dumps(payload) r = urlreq(url, verb=method, auth='BASIC', user=username, passwd=password, params=payload, verify=False,", "def process_request(url, method, headers, payload=None): if (payload is not None): payload = json.dumps(payload)", "\"@@{eip_site_name}@@\" min_free_ip = \"@@{eip_min_free_ip}@@\" is_terminal = \"1\" #means that the subnet cannot contains", "at the application launch using dynaminy variable # all print are commented #region", "#print(\"Making a {} API call to {}\".format(method, url)) resp = process_request(url, method, headers)", "list of available subnets on EIP # this list is provided during at", "not r.ok: print(\"Request failed\") exit(1) return r #endregion #region main processing # make", "api_server_endpoint) headers = {'Content-Type': 'application/json', 'Accept': 'application/json'} # endregion #region API call function", "Get available networks attached to a site on EfficientIP # input vars: eip_site_name,", "verify=False, headers=headers) if not r.ok: print(\"Request failed\") exit(1) return r #endregion #region main", "\"443\" api_server_endpoint = \"/rest\" method = \"GET\" base_url = \"https://{}:{}{}\".format(api_server, api_server_port, api_server_endpoint) headers", "eip_site_name, eip_min_free_ip # output vars: subnet_lists #endregion # this script is used to", "\"{0}/ip_block_subnet_list?WHERE={1}='{2}'&WHERE={3}='{4}'\".format(base_url, \"is_terminal\", is_terminal, \"parent_site_name\", site_name) #print(\"Making a {} API call to {}\".format(method, url))", "'Accept': 'application/json'} # endregion #region API call function def process_request(url, method, headers, payload=None):", "API call to {}\".format(method, url)) resp = process_request(url, method, headers) # parsing the", "subnet in subnets: if subnet['subnet_ip_free_size'] != int(min_free_ip): subnets_list.append(format(subnet['subnet_name'])) # return array use for", "* date: 30/03/2020 # task_name: EipGetSubnets # description: Get available networks attached to", "\"1\" #means that the subnet cannot contains others subnets as children #endregion #", "subnets_list = [] subnets = json.loads(resp.content) for subnet in subnets: if subnet['subnet_ip_free_size'] !=", "headers # * authors: <EMAIL> # * date: 30/03/2020 # task_name: EipGetSubnets #", "\"https://{}:{}{}\".format(api_server, api_server_port, api_server_endpoint) headers = {'Content-Type': 'application/json', 'Accept': 'application/json'} # endregion #region API", "headers = {'Content-Type': 'application/json', 'Accept': 'application/json'} # endregion #region API call function def", "if not r.ok: print(\"Request failed\") exit(1) return r #endregion #region main processing #", "url = \"{0}/ip_block_subnet_list?WHERE={1}='{2}'&WHERE={3}='{4}'\".format(base_url, \"is_terminal\", is_terminal, \"parent_site_name\", site_name) #print(\"Making a {} API call to", "'application/json', 'Accept': 'application/json'} # endregion #region API call function def process_request(url, method, headers,", "as children #endregion # region prepare api call api_server_port = \"443\" api_server_endpoint =", "= json.loads(resp.content) for subnet in subnets: if subnet['subnet_ip_free_size'] != int(min_free_ip): subnets_list.append(format(subnet['subnet_name'])) # return", "= \"@@{eip_min_free_ip}@@\" is_terminal = \"1\" #means that the subnet cannot contains others subnets", "response subnets_list = [] subnets = json.loads(resp.content) for subnet in subnets: if subnet['subnet_ip_free_size']", "call function def process_request(url, method, headers, payload=None): if (payload is not None): payload", "endregion #region API call function def process_request(url, method, headers, payload=None): if (payload is", "make the api call url = \"{0}/ip_block_subnet_list?WHERE={1}='{2}'&WHERE={3}='{4}'\".format(base_url, \"is_terminal\", is_terminal, \"parent_site_name\", site_name) #print(\"Making a", "#region API call function def process_request(url, method, headers, payload=None): if (payload is not" ]
[ "\"9876\", \"mnop\": \"5432\"} s = d1_client.session.Session(d1_test.d1_test_case.MOCK_MN_BASE_URL) curl_str = s.get_curl_command_line( \"POST\", \"http://some.bogus.address\", query=query_dict, headers=header_dict", "KIND, either express or implied. # See the License for the specific language", "Unless required by applicable law or agreed to in writing, software # distributed", "import responses import d1_common.logging_context import d1_client.session import d1_test.d1_test_case import d1_test.mock_api.get import d1_test.mock_api.post import", "fields_dict): d1_test.mock_api.post.add_callback(d1_test.d1_test_case.MOCK_MN_BASE_URL) s = d1_client.session.Session(d1_test.d1_test_case.MOCK_MN_BASE_URL) return s.POST([\"post\"], fields=fields_dict) @responses.activate def test_1000(self): \"\"\"HTTP GET", "= d1_client.session.Session(d1_test.d1_test_case.MOCK_MN_BASE_URL) return s.POST([\"post\"], fields=fields_dict) @responses.activate def test_1000(self): \"\"\"HTTP GET is successful. Mocked", "the DataONE project, and is # jointly copyrighted by participating institutions in DataONE.", "\"404\"}) assert response.status_code == 404 self.sample.assert_equals(response.text, \"get_404\") @responses.activate def test_1030(self): \"\"\"HTTP GET against", "and is # jointly copyrighted by participating institutions in DataONE. For # more", "this file except in compliance with the License. # You may obtain a", "logger = logging.getLogger(__name__) @d1_test.d1_test_case.reproducible_random_decorator(\"TestSession\") @freezegun.freeze_time(\"1945-01-02\") class TestSession(d1_test.d1_test_case.D1TestCase): def _get_hash(self, pid): d1_test.mock_api.get.add_callback(d1_test.d1_test_case.MOCK_MN_BASE_URL) s =", "with d1_common.logging_context.LoggingContext(logger): logger.setLevel(logging.ERROR) with pytest.raises(requests.exceptions.ConnectionError): s.GET(\"/\") @responses.activate def test_1040(self): \"\"\"HTTP POST is successful", "response = self._post({}, header_dict, body_bytes) r_dict = response.json() d1_test.sample.assert_equals(r_dict, \"post_roundtrip\") @responses.activate def test_1050(self):", "_post(self, query_dict, header_dict, body): d1_test.mock_api.post.add_callback(d1_test.d1_test_case.MOCK_MN_BASE_URL) s = d1_client.session.Session( d1_test.d1_test_case.MOCK_MN_BASE_URL, query={\"default_query\": \"test\"} ) return", "\"efgh\": \"5678\"} header_dict = {\"ijkl\": \"9876\", \"mnop\": \"5432\"} response = self._post(query_dict, header_dict, body_bytes)", "2009-2019 DataONE # # Licensed under the Apache License, Version 2.0 (the \"License\");", "line retains query parameters and headers.\"\"\" query_dict = {\"abcd\": \"1234\", \"efgh\": \"5678\"} header_dict", "= d1_client.session.Session(\"http://some.bogus.address\") with d1_common.logging_context.LoggingContext(logger): logger.setLevel(logging.ERROR) with pytest.raises(requests.exceptions.ConnectionError): s.GET(\"/\") @responses.activate def test_1040(self): \"\"\"HTTP POST", "more information on DataONE, see our web site at http://dataone.org. # # Copyright", "ANY KIND, either express or implied. # See the License for the specific", "combined.\"\"\" d1_test.mock_api.post.add_callback(d1_test.d1_test_case.MOCK_MN_BASE_URL) body_bytes = b\"test_body\" query_dict = {\"abcd\": \"1234\", \"efgh\": \"5678\"} header_dict =", "and headers.\"\"\" query_dict = {\"abcd\": \"1234\", \"efgh\": \"5678\"} header_dict = {\"ijkl\": \"9876\", \"mnop\":", "header_dict=None): d1_test.mock_api.get.add_callback(d1_test.d1_test_case.MOCK_MN_BASE_URL) s = d1_client.session.Session(d1_test.d1_test_case.MOCK_MN_BASE_URL) return s.GET([\"object\", pid], headers=header_dict or {}) def _post(self,", ") return s.POST([\"post\"], query=query_dict, headers=header_dict, data=body) def _post_fields(self, fields_dict): d1_test.mock_api.post.add_callback(d1_test.d1_test_case.MOCK_MN_BASE_URL) s = d1_client.session.Session(d1_test.d1_test_case.MOCK_MN_BASE_URL)", "return hashlib.sha1(response.content).hexdigest() def _get_response(self, pid, header_dict=None): d1_test.mock_api.get.add_callback(d1_test.d1_test_case.MOCK_MN_BASE_URL) s = d1_client.session.Session(d1_test.d1_test_case.MOCK_MN_BASE_URL) return s.GET([\"object\", pid],", "a2_hash assert c_hash == c1_hash assert c_hash == c2_hash @responses.activate def test_1010(self): \"\"\"Successful", "fields=fields_dict) @responses.activate def test_1000(self): \"\"\"HTTP GET is successful. Mocked GET returns object bytes", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See", "a_hash == a2_hash assert c_hash == c1_hash assert c_hash == c2_hash @responses.activate def", "response.json() d1_test.sample.assert_equals(r_dict, \"post_roundtrip_query\") @responses.activate def test_1060(self): \"\"\"Roundtrip for HTML Form fields.\"\"\" field_dict =", "test_1020(self): \"\"\"HTTP GET 404.\"\"\" response = self._get_response(\"valid_pid\", header_dict={\"trigger\": \"404\"}) assert response.status_code == 404", "params.\"\"\" body_bytes = b\"test_body\" header_dict = {\"ijkl\": \"9876\", \"mnop\": \"5432\"} response = self._post({},", "header_dict, body_bytes) r_dict = response.json() d1_test.sample.assert_equals(r_dict, \"post_roundtrip_query\") @responses.activate def test_1060(self): \"\"\"Roundtrip for HTML", "institutions in DataONE. For # more information on DataONE, see our web site", "and # limitations under the License. import hashlib import logging import freezegun import", "the License. import hashlib import logging import freezegun import pytest import requests import", "site at http://dataone.org. # # Copyright 2009-2019 DataONE # # Licensed under the", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "import requests.exceptions import responses import d1_common.logging_context import d1_client.session import d1_test.d1_test_case import d1_test.mock_api.get import", "= self._get_hash(a_pid) c1_hash = self._get_hash(c_pid) c2_hash = self._get_hash(c_pid) a2_hash = self._get_hash(a_pid) assert a_hash", "test_1000(self): \"\"\"HTTP GET is successful. Mocked GET returns object bytes uniquely tied to", "assert response.status_code == 200 @responses.activate def test_1020(self): \"\"\"HTTP GET 404.\"\"\" response = self._get_response(\"valid_pid\",", "c_hash == c2_hash @responses.activate def test_1010(self): \"\"\"Successful HTTP GET returns 200 OK.\"\"\" response", "DataONE, see our web site at http://dataone.org. # # Copyright 2009-2019 DataONE #", "OF ANY KIND, either express or implied. # See the License for the", "query=query_dict, headers=header_dict, data=body) def _post_fields(self, fields_dict): d1_test.mock_api.post.add_callback(d1_test.d1_test_case.MOCK_MN_BASE_URL) s = d1_client.session.Session(d1_test.d1_test_case.MOCK_MN_BASE_URL) return s.POST([\"post\"], fields=fields_dict)", "\"5678\"} header_dict = {\"ijkl\": \"9876\", \"mnop\": \"5432\"} s = d1_client.session.Session(d1_test.d1_test_case.MOCK_MN_BASE_URL) curl_str = s.get_curl_command_line(", "d1_test.d1_test_case import d1_test.mock_api.get import d1_test.mock_api.post import d1_test.sample logger = logging.getLogger(__name__) @d1_test.d1_test_case.reproducible_random_decorator(\"TestSession\") @freezegun.freeze_time(\"1945-01-02\") class", "s = d1_client.session.Session(d1_test.d1_test_case.MOCK_MN_BASE_URL) response = s.GET([\"object\", pid]) return hashlib.sha1(response.content).hexdigest() def _get_response(self, pid, header_dict=None):", "POST is successful Roundtrip for body, headers and query params.\"\"\" body_bytes = b\"test_body\"", "on DataONE, see our web site at http://dataone.org. # # Copyright 2009-2019 DataONE", "header_dict, body_bytes) r_dict = response.json() d1_test.sample.assert_equals(r_dict, \"post_roundtrip\") @responses.activate def test_1050(self): \"\"\"Query params passed", "assert c_hash == c1_hash assert c_hash == c2_hash @responses.activate def test_1010(self): \"\"\"Successful HTTP", "query_dict = {\"abcd\": \"1234\", \"efgh\": \"5678\"} header_dict = {\"ijkl\": \"9876\", \"mnop\": \"5432\"} s", "header_dict = {\"ijkl\": \"9876\", \"mnop\": \"5432\"} response = self._post({}, header_dict, body_bytes) r_dict =", "d1_test.mock_api.get.add_callback(d1_test.d1_test_case.MOCK_MN_BASE_URL) s = d1_client.session.Session(d1_test.d1_test_case.MOCK_MN_BASE_URL) return s.GET([\"object\", pid], headers=header_dict or {}) def _post(self, query_dict,", "self._get_hash(a_pid) b_hash = self._get_hash(b_pid) c_hash = self._get_hash(c_pid) assert a_hash != b_hash assert b_hash", "@responses.activate def test_1000(self): \"\"\"HTTP GET is successful. Mocked GET returns object bytes uniquely", "header_dict = {\"ijkl\": \"9876\", \"mnop\": \"5432\"} s = d1_client.session.Session(d1_test.d1_test_case.MOCK_MN_BASE_URL) curl_str = s.get_curl_command_line( \"POST\",", "software # distributed under the License is distributed on an \"AS IS\" BASIS,", "\"9876\", \"mnop\": \"5432\"} response = self._post({}, header_dict, body_bytes) r_dict = response.json() d1_test.sample.assert_equals(r_dict, \"post_roundtrip\")", "\"post_roundtrip\") @responses.activate def test_1050(self): \"\"\"Query params passed to Session() and individual POST are", "<filename>lib_client/src/d1_client/tests/test_session.py<gh_stars>10-100 #!/usr/bin/env python # This work was created by participants in the DataONE", "to Session() and individual POST are correctly combined.\"\"\" d1_test.mock_api.post.add_callback(d1_test.d1_test_case.MOCK_MN_BASE_URL) body_bytes = b\"test_body\" query_dict", "self._get_response(\"valid_pid\", header_dict={\"trigger\": \"404\"}) assert response.status_code == 404 self.sample.assert_equals(response.text, \"get_404\") @responses.activate def test_1030(self): \"\"\"HTTP", "response.json() d1_test.sample.assert_equals(r_dict, \"post_roundtrip_form_fields\") @responses.activate def test_1070(self): \"\"\"cURL command line retains query parameters and", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to", "\"efgh\": \"5678\"} header_dict = {\"ijkl\": \"9876\", \"mnop\": \"5432\"} s = d1_client.session.Session(d1_test.d1_test_case.MOCK_MN_BASE_URL) curl_str =", "permissions and # limitations under the License. import hashlib import logging import freezegun", "import d1_test.sample logger = logging.getLogger(__name__) @d1_test.d1_test_case.reproducible_random_decorator(\"TestSession\") @freezegun.freeze_time(\"1945-01-02\") class TestSession(d1_test.d1_test_case.D1TestCase): def _get_hash(self, pid): d1_test.mock_api.get.add_callback(d1_test.d1_test_case.MOCK_MN_BASE_URL)", "under the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "specific language governing permissions and # limitations under the License. import hashlib import", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "c1_hash assert c_hash == c2_hash @responses.activate def test_1010(self): \"\"\"Successful HTTP GET returns 200", "participating institutions in DataONE. For # more information on DataONE, see our web", "governing permissions and # limitations under the License. import hashlib import logging import", "d1_client.session.Session(\"http://some.bogus.address\") with d1_common.logging_context.LoggingContext(logger): logger.setLevel(logging.ERROR) with pytest.raises(requests.exceptions.ConnectionError): s.GET(\"/\") @responses.activate def test_1040(self): \"\"\"HTTP POST is", "d1_test.sample.assert_equals(r_dict, \"post_roundtrip_query\") @responses.activate def test_1060(self): \"\"\"Roundtrip for HTML Form fields.\"\"\" field_dict = {\"post_data_1\":", "@freezegun.freeze_time(\"1945-01-02\") class TestSession(d1_test.d1_test_case.D1TestCase): def _get_hash(self, pid): d1_test.mock_api.get.add_callback(d1_test.d1_test_case.MOCK_MN_BASE_URL) s = d1_client.session.Session(d1_test.d1_test_case.MOCK_MN_BASE_URL) response = s.GET([\"object\",", "assert b_hash != c_hash assert a_hash != c_hash a1_hash = self._get_hash(a_pid) c1_hash =", "@responses.activate def test_1070(self): \"\"\"cURL command line retains query parameters and headers.\"\"\" query_dict =", "s.POST([\"post\"], query=query_dict, headers=header_dict, data=body) def _post_fields(self, fields_dict): d1_test.mock_api.post.add_callback(d1_test.d1_test_case.MOCK_MN_BASE_URL) s = d1_client.session.Session(d1_test.d1_test_case.MOCK_MN_BASE_URL) return s.POST([\"post\"],", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "header_dict, body): d1_test.mock_api.post.add_callback(d1_test.d1_test_case.MOCK_MN_BASE_URL) s = d1_client.session.Session( d1_test.d1_test_case.MOCK_MN_BASE_URL, query={\"default_query\": \"test\"} ) return s.POST([\"post\"], query=query_dict,", "= self._get_hash(b_pid) c_hash = self._get_hash(c_pid) assert a_hash != b_hash assert b_hash != c_hash", "s.GET([\"object\", pid], headers=header_dict or {}) def _post(self, query_dict, header_dict, body): d1_test.mock_api.post.add_callback(d1_test.d1_test_case.MOCK_MN_BASE_URL) s =", "required by applicable law or agreed to in writing, software # distributed under", "return s.POST([\"post\"], query=query_dict, headers=header_dict, data=body) def _post_fields(self, fields_dict): d1_test.mock_api.post.add_callback(d1_test.d1_test_case.MOCK_MN_BASE_URL) s = d1_client.session.Session(d1_test.d1_test_case.MOCK_MN_BASE_URL) return", "applicable law or agreed to in writing, software # distributed under the License", "c_hash == c1_hash assert c_hash == c2_hash @responses.activate def test_1010(self): \"\"\"Successful HTTP GET", "test_1040(self): \"\"\"HTTP POST is successful Roundtrip for body, headers and query params.\"\"\" body_bytes", "@responses.activate def test_1010(self): \"\"\"Successful HTTP GET returns 200 OK.\"\"\" response = self._get_response(\"pid1\") assert", "@responses.activate def test_1040(self): \"\"\"HTTP POST is successful Roundtrip for body, headers and query", "created by participants in the DataONE project, and is # jointly copyrighted by", "s.POST([\"post\"], fields=fields_dict) @responses.activate def test_1000(self): \"\"\"HTTP GET is successful. Mocked GET returns object", "import freezegun import pytest import requests import requests.exceptions import responses import d1_common.logging_context import", "or agreed to in writing, software # distributed under the License is distributed", "= self._get_hash(a_pid) b_hash = self._get_hash(b_pid) c_hash = self._get_hash(c_pid) assert a_hash != b_hash assert", "self._get_hash(c_pid) assert a_hash != b_hash assert b_hash != c_hash assert a_hash != c_hash", "\"1234\", \"efgh\": \"5678\"} header_dict = {\"ijkl\": \"9876\", \"mnop\": \"5432\"} s = d1_client.session.Session(d1_test.d1_test_case.MOCK_MN_BASE_URL) curl_str", "CONDITIONS OF ANY KIND, either express or implied. # See the License for", "field_dict = {\"post_data_1\": \"1234\", \"post_data_2\": \"5678\"} response = self._post_fields(field_dict) r_dict = response.json() d1_test.sample.assert_equals(r_dict,", "def _post(self, query_dict, header_dict, body): d1_test.mock_api.post.add_callback(d1_test.d1_test_case.MOCK_MN_BASE_URL) s = d1_client.session.Session( d1_test.d1_test_case.MOCK_MN_BASE_URL, query={\"default_query\": \"test\"} )", "jointly copyrighted by participating institutions in DataONE. For # more information on DataONE,", "was created by participants in the DataONE project, and is # jointly copyrighted", "is successful Roundtrip for body, headers and query params.\"\"\" body_bytes = b\"test_body\" header_dict", "ConnectionError.\"\"\" s = d1_client.session.Session(\"http://some.bogus.address\") with d1_common.logging_context.LoggingContext(logger): logger.setLevel(logging.ERROR) with pytest.raises(requests.exceptions.ConnectionError): s.GET(\"/\") @responses.activate def test_1040(self):", "r_dict = response.json() d1_test.sample.assert_equals(r_dict, \"post_roundtrip_form_fields\") @responses.activate def test_1070(self): \"\"\"cURL command line retains query", "under the Apache License, Version 2.0 (the \"License\"); # you may not use", "= s.GET([\"object\", pid]) return hashlib.sha1(response.content).hexdigest() def _get_response(self, pid, header_dict=None): d1_test.mock_api.get.add_callback(d1_test.d1_test_case.MOCK_MN_BASE_URL) s = d1_client.session.Session(d1_test.d1_test_case.MOCK_MN_BASE_URL)", "writing, software # distributed under the License is distributed on an \"AS IS\"", "import d1_test.d1_test_case import d1_test.mock_api.get import d1_test.mock_api.post import d1_test.sample logger = logging.getLogger(__name__) @d1_test.d1_test_case.reproducible_random_decorator(\"TestSession\") @freezegun.freeze_time(\"1945-01-02\")", "s.GET(\"/\") @responses.activate def test_1040(self): \"\"\"HTTP POST is successful Roundtrip for body, headers and", "or {}) def _post(self, query_dict, header_dict, body): d1_test.mock_api.post.add_callback(d1_test.d1_test_case.MOCK_MN_BASE_URL) s = d1_client.session.Session( d1_test.d1_test_case.MOCK_MN_BASE_URL, query={\"default_query\":", "You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "License. # You may obtain a copy of the License at # #", "self._get_hash(a_pid) assert a_hash == a1_hash assert a_hash == a2_hash assert c_hash == c1_hash", "d1_test.mock_api.post.add_callback(d1_test.d1_test_case.MOCK_MN_BASE_URL) s = d1_client.session.Session(d1_test.d1_test_case.MOCK_MN_BASE_URL) return s.POST([\"post\"], fields=fields_dict) @responses.activate def test_1000(self): \"\"\"HTTP GET is", "def _get_response(self, pid, header_dict=None): d1_test.mock_api.get.add_callback(d1_test.d1_test_case.MOCK_MN_BASE_URL) s = d1_client.session.Session(d1_test.d1_test_case.MOCK_MN_BASE_URL) return s.GET([\"object\", pid], headers=header_dict or", "= {\"ijkl\": \"9876\", \"mnop\": \"5432\"} response = self._post(query_dict, header_dict, body_bytes) r_dict = response.json()", "\"5432\"} s = d1_client.session.Session(d1_test.d1_test_case.MOCK_MN_BASE_URL) curl_str = s.get_curl_command_line( \"POST\", \"http://some.bogus.address\", query=query_dict, headers=header_dict ) d1_test.sample.assert_equals(curl_str,", "our web site at http://dataone.org. # # Copyright 2009-2019 DataONE # # Licensed", "compliance with the License. # You may obtain a copy of the License", "= self._get_response(\"pid1\") assert response.status_code == 200 @responses.activate def test_1020(self): \"\"\"HTTP GET 404.\"\"\" response", "query_dict = {\"abcd\": \"1234\", \"efgh\": \"5678\"} header_dict = {\"ijkl\": \"9876\", \"mnop\": \"5432\"} response", "\"mnop\": \"5432\"} response = self._post(query_dict, header_dict, body_bytes) r_dict = response.json() d1_test.sample.assert_equals(r_dict, \"post_roundtrip_query\") @responses.activate", "freezegun import pytest import requests import requests.exceptions import responses import d1_common.logging_context import d1_client.session", "query={\"default_query\": \"test\"} ) return s.POST([\"post\"], query=query_dict, headers=header_dict, data=body) def _post_fields(self, fields_dict): d1_test.mock_api.post.add_callback(d1_test.d1_test_case.MOCK_MN_BASE_URL) s", "c_hash assert a_hash != c_hash a1_hash = self._get_hash(a_pid) c1_hash = self._get_hash(c_pid) c2_hash =", "This work was created by participants in the DataONE project, and is #", "logging import freezegun import pytest import requests import requests.exceptions import responses import d1_common.logging_context", "r_dict = response.json() d1_test.sample.assert_equals(r_dict, \"post_roundtrip\") @responses.activate def test_1050(self): \"\"\"Query params passed to Session()", "for the specific language governing permissions and # limitations under the License. import", "{\"ijkl\": \"9876\", \"mnop\": \"5432\"} s = d1_client.session.Session(d1_test.d1_test_case.MOCK_MN_BASE_URL) curl_str = s.get_curl_command_line( \"POST\", \"http://some.bogus.address\", query=query_dict,", "project, and is # jointly copyrighted by participating institutions in DataONE. For #", "test_1050(self): \"\"\"Query params passed to Session() and individual POST are correctly combined.\"\"\" d1_test.mock_api.post.add_callback(d1_test.d1_test_case.MOCK_MN_BASE_URL)", "TestSession(d1_test.d1_test_case.D1TestCase): def _get_hash(self, pid): d1_test.mock_api.get.add_callback(d1_test.d1_test_case.MOCK_MN_BASE_URL) s = d1_client.session.Session(d1_test.d1_test_case.MOCK_MN_BASE_URL) response = s.GET([\"object\", pid]) return", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "are correctly combined.\"\"\" d1_test.mock_api.post.add_callback(d1_test.d1_test_case.MOCK_MN_BASE_URL) body_bytes = b\"test_body\" query_dict = {\"abcd\": \"1234\", \"efgh\": \"5678\"}", "def test_1050(self): \"\"\"Query params passed to Session() and individual POST are correctly combined.\"\"\"", "d1_test.mock_api.post.add_callback(d1_test.d1_test_case.MOCK_MN_BASE_URL) body_bytes = b\"test_body\" query_dict = {\"abcd\": \"1234\", \"efgh\": \"5678\"} header_dict = {\"ijkl\":", "== a2_hash assert c_hash == c1_hash assert c_hash == c2_hash @responses.activate def test_1010(self):", "OK.\"\"\" response = self._get_response(\"pid1\") assert response.status_code == 200 @responses.activate def test_1020(self): \"\"\"HTTP GET", "= b\"test_body\" query_dict = {\"abcd\": \"1234\", \"efgh\": \"5678\"} header_dict = {\"ijkl\": \"9876\", \"mnop\":", "response.json() d1_test.sample.assert_equals(r_dict, \"post_roundtrip\") @responses.activate def test_1050(self): \"\"\"Query params passed to Session() and individual", "returns object bytes uniquely tied to given PID.\"\"\" a_pid = \"pid_hy7tf83453y498\" b_pid =", "requests.exceptions import responses import d1_common.logging_context import d1_client.session import d1_test.d1_test_case import d1_test.mock_api.get import d1_test.mock_api.post", "assert c_hash == c2_hash @responses.activate def test_1010(self): \"\"\"Successful HTTP GET returns 200 OK.\"\"\"", "self._get_hash(b_pid) c_hash = self._get_hash(c_pid) assert a_hash != b_hash assert b_hash != c_hash assert", "body_bytes) r_dict = response.json() d1_test.sample.assert_equals(r_dict, \"post_roundtrip_query\") @responses.activate def test_1060(self): \"\"\"Roundtrip for HTML Form", "\"5678\"} response = self._post_fields(field_dict) r_dict = response.json() d1_test.sample.assert_equals(r_dict, \"post_roundtrip_form_fields\") @responses.activate def test_1070(self): \"\"\"cURL", "is successful. Mocked GET returns object bytes uniquely tied to given PID.\"\"\" a_pid", "successful. Mocked GET returns object bytes uniquely tied to given PID.\"\"\" a_pid =", "a2_hash = self._get_hash(a_pid) assert a_hash == a1_hash assert a_hash == a2_hash assert c_hash", "_post_fields(self, fields_dict): d1_test.mock_api.post.add_callback(d1_test.d1_test_case.MOCK_MN_BASE_URL) s = d1_client.session.Session(d1_test.d1_test_case.MOCK_MN_BASE_URL) return s.POST([\"post\"], fields=fields_dict) @responses.activate def test_1000(self): \"\"\"HTTP", "not use this file except in compliance with the License. # You may", "import d1_test.mock_api.post import d1_test.sample logger = logging.getLogger(__name__) @d1_test.d1_test_case.reproducible_random_decorator(\"TestSession\") @freezegun.freeze_time(\"1945-01-02\") class TestSession(d1_test.d1_test_case.D1TestCase): def _get_hash(self,", "class TestSession(d1_test.d1_test_case.D1TestCase): def _get_hash(self, pid): d1_test.mock_api.get.add_callback(d1_test.d1_test_case.MOCK_MN_BASE_URL) s = d1_client.session.Session(d1_test.d1_test_case.MOCK_MN_BASE_URL) response = s.GET([\"object\", pid])", "test_1070(self): \"\"\"cURL command line retains query parameters and headers.\"\"\" query_dict = {\"abcd\": \"1234\",", "import requests import requests.exceptions import responses import d1_common.logging_context import d1_client.session import d1_test.d1_test_case import", "b_hash assert b_hash != c_hash assert a_hash != c_hash a1_hash = self._get_hash(a_pid) c1_hash", "== a1_hash assert a_hash == a2_hash assert c_hash == c1_hash assert c_hash ==", "License, Version 2.0 (the \"License\"); # you may not use this file except", "DataONE project, and is # jointly copyrighted by participating institutions in DataONE. For", "= \"pid_09y68gh73n60\" c_pid = \"pid_987i075058679589060\" a_hash = self._get_hash(a_pid) b_hash = self._get_hash(b_pid) c_hash =", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "under the License. import hashlib import logging import freezegun import pytest import requests", "def test_1030(self): \"\"\"HTTP GET against http://some.bogus.address/ raises ConnectionError.\"\"\" s = d1_client.session.Session(\"http://some.bogus.address\") with d1_common.logging_context.LoggingContext(logger):", "POST are correctly combined.\"\"\" d1_test.mock_api.post.add_callback(d1_test.d1_test_case.MOCK_MN_BASE_URL) body_bytes = b\"test_body\" query_dict = {\"abcd\": \"1234\", \"efgh\":", "participants in the DataONE project, and is # jointly copyrighted by participating institutions", "requests import requests.exceptions import responses import d1_common.logging_context import d1_client.session import d1_test.d1_test_case import d1_test.mock_api.get", "GET returns 200 OK.\"\"\" response = self._get_response(\"pid1\") assert response.status_code == 200 @responses.activate def", "# you may not use this file except in compliance with the License.", "agreed to in writing, software # distributed under the License is distributed on", "def test_1010(self): \"\"\"Successful HTTP GET returns 200 OK.\"\"\" response = self._get_response(\"pid1\") assert response.status_code", "pytest.raises(requests.exceptions.ConnectionError): s.GET(\"/\") @responses.activate def test_1040(self): \"\"\"HTTP POST is successful Roundtrip for body, headers", "(the \"License\"); # you may not use this file except in compliance with", "response = self._post(query_dict, header_dict, body_bytes) r_dict = response.json() d1_test.sample.assert_equals(r_dict, \"post_roundtrip_query\") @responses.activate def test_1060(self):", "# Unless required by applicable law or agreed to in writing, software #", "by applicable law or agreed to in writing, software # distributed under the", "Mocked GET returns object bytes uniquely tied to given PID.\"\"\" a_pid = \"pid_hy7tf83453y498\"", "s = d1_client.session.Session(d1_test.d1_test_case.MOCK_MN_BASE_URL) return s.POST([\"post\"], fields=fields_dict) @responses.activate def test_1000(self): \"\"\"HTTP GET is successful.", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "import hashlib import logging import freezegun import pytest import requests import requests.exceptions import", "# limitations under the License. import hashlib import logging import freezegun import pytest", "header_dict = {\"ijkl\": \"9876\", \"mnop\": \"5432\"} response = self._post(query_dict, header_dict, body_bytes) r_dict =", "pytest import requests import requests.exceptions import responses import d1_common.logging_context import d1_client.session import d1_test.d1_test_case", "file except in compliance with the License. # You may obtain a copy", "http://dataone.org. # # Copyright 2009-2019 DataONE # # Licensed under the Apache License,", "for body, headers and query params.\"\"\" body_bytes = b\"test_body\" header_dict = {\"ijkl\": \"9876\",", "\"mnop\": \"5432\"} response = self._post({}, header_dict, body_bytes) r_dict = response.json() d1_test.sample.assert_equals(r_dict, \"post_roundtrip\") @responses.activate", "headers and query params.\"\"\" body_bytes = b\"test_body\" header_dict = {\"ijkl\": \"9876\", \"mnop\": \"5432\"}", "logging.getLogger(__name__) @d1_test.d1_test_case.reproducible_random_decorator(\"TestSession\") @freezegun.freeze_time(\"1945-01-02\") class TestSession(d1_test.d1_test_case.D1TestCase): def _get_hash(self, pid): d1_test.mock_api.get.add_callback(d1_test.d1_test_case.MOCK_MN_BASE_URL) s = d1_client.session.Session(d1_test.d1_test_case.MOCK_MN_BASE_URL) response", "\"mnop\": \"5432\"} s = d1_client.session.Session(d1_test.d1_test_case.MOCK_MN_BASE_URL) curl_str = s.get_curl_command_line( \"POST\", \"http://some.bogus.address\", query=query_dict, headers=header_dict )", "404 self.sample.assert_equals(response.text, \"get_404\") @responses.activate def test_1030(self): \"\"\"HTTP GET against http://some.bogus.address/ raises ConnectionError.\"\"\" s", "\"5432\"} response = self._post({}, header_dict, body_bytes) r_dict = response.json() d1_test.sample.assert_equals(r_dict, \"post_roundtrip\") @responses.activate def", "hashlib.sha1(response.content).hexdigest() def _get_response(self, pid, header_dict=None): d1_test.mock_api.get.add_callback(d1_test.d1_test_case.MOCK_MN_BASE_URL) s = d1_client.session.Session(d1_test.d1_test_case.MOCK_MN_BASE_URL) return s.GET([\"object\", pid], headers=header_dict", "d1_test.d1_test_case.MOCK_MN_BASE_URL, query={\"default_query\": \"test\"} ) return s.POST([\"post\"], query=query_dict, headers=header_dict, data=body) def _post_fields(self, fields_dict): d1_test.mock_api.post.add_callback(d1_test.d1_test_case.MOCK_MN_BASE_URL)", "License for the specific language governing permissions and # limitations under the License.", "uniquely tied to given PID.\"\"\" a_pid = \"pid_hy7tf83453y498\" b_pid = \"pid_09y68gh73n60\" c_pid =", "def test_1020(self): \"\"\"HTTP GET 404.\"\"\" response = self._get_response(\"valid_pid\", header_dict={\"trigger\": \"404\"}) assert response.status_code ==", "to in writing, software # distributed under the License is distributed on an", "work was created by participants in the DataONE project, and is # jointly", "parameters and headers.\"\"\" query_dict = {\"abcd\": \"1234\", \"efgh\": \"5678\"} header_dict = {\"ijkl\": \"9876\",", "implied. # See the License for the specific language governing permissions and #", "b_hash = self._get_hash(b_pid) c_hash = self._get_hash(c_pid) assert a_hash != b_hash assert b_hash !=", "assert a_hash != b_hash assert b_hash != c_hash assert a_hash != c_hash a1_hash", "\"License\"); # you may not use this file except in compliance with the", "data=body) def _post_fields(self, fields_dict): d1_test.mock_api.post.add_callback(d1_test.d1_test_case.MOCK_MN_BASE_URL) s = d1_client.session.Session(d1_test.d1_test_case.MOCK_MN_BASE_URL) return s.POST([\"post\"], fields=fields_dict) @responses.activate def", "a_hash == a1_hash assert a_hash == a2_hash assert c_hash == c1_hash assert c_hash", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "d1_client.session.Session(d1_test.d1_test_case.MOCK_MN_BASE_URL) return s.POST([\"post\"], fields=fields_dict) @responses.activate def test_1000(self): \"\"\"HTTP GET is successful. Mocked GET", "{\"post_data_1\": \"1234\", \"post_data_2\": \"5678\"} response = self._post_fields(field_dict) r_dict = response.json() d1_test.sample.assert_equals(r_dict, \"post_roundtrip_form_fields\") @responses.activate", "= logging.getLogger(__name__) @d1_test.d1_test_case.reproducible_random_decorator(\"TestSession\") @freezegun.freeze_time(\"1945-01-02\") class TestSession(d1_test.d1_test_case.D1TestCase): def _get_hash(self, pid): d1_test.mock_api.get.add_callback(d1_test.d1_test_case.MOCK_MN_BASE_URL) s = d1_client.session.Session(d1_test.d1_test_case.MOCK_MN_BASE_URL)", "DataONE. For # more information on DataONE, see our web site at http://dataone.org.", "d1_test.mock_api.get.add_callback(d1_test.d1_test_case.MOCK_MN_BASE_URL) s = d1_client.session.Session(d1_test.d1_test_case.MOCK_MN_BASE_URL) response = s.GET([\"object\", pid]) return hashlib.sha1(response.content).hexdigest() def _get_response(self, pid,", "b_pid = \"pid_09y68gh73n60\" c_pid = \"pid_987i075058679589060\" a_hash = self._get_hash(a_pid) b_hash = self._get_hash(b_pid) c_hash", "import d1_common.logging_context import d1_client.session import d1_test.d1_test_case import d1_test.mock_api.get import d1_test.mock_api.post import d1_test.sample logger", "DataONE # # Licensed under the Apache License, Version 2.0 (the \"License\"); #", "GET returns object bytes uniquely tied to given PID.\"\"\" a_pid = \"pid_hy7tf83453y498\" b_pid", "s = d1_client.session.Session(d1_test.d1_test_case.MOCK_MN_BASE_URL) return s.GET([\"object\", pid], headers=header_dict or {}) def _post(self, query_dict, header_dict,", "self._get_hash(a_pid) c1_hash = self._get_hash(c_pid) c2_hash = self._get_hash(c_pid) a2_hash = self._get_hash(a_pid) assert a_hash ==", "or implied. # See the License for the specific language governing permissions and", "d1_test.sample logger = logging.getLogger(__name__) @d1_test.d1_test_case.reproducible_random_decorator(\"TestSession\") @freezegun.freeze_time(\"1945-01-02\") class TestSession(d1_test.d1_test_case.D1TestCase): def _get_hash(self, pid): d1_test.mock_api.get.add_callback(d1_test.d1_test_case.MOCK_MN_BASE_URL) s", "\"get_404\") @responses.activate def test_1030(self): \"\"\"HTTP GET against http://some.bogus.address/ raises ConnectionError.\"\"\" s = d1_client.session.Session(\"http://some.bogus.address\")", "Apache License, Version 2.0 (the \"License\"); # you may not use this file", "OR CONDITIONS OF ANY KIND, either express or implied. # See the License", "may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "query params.\"\"\" body_bytes = b\"test_body\" header_dict = {\"ijkl\": \"9876\", \"mnop\": \"5432\"} response =", "response = self._get_response(\"pid1\") assert response.status_code == 200 @responses.activate def test_1020(self): \"\"\"HTTP GET 404.\"\"\"", "response = self._post_fields(field_dict) r_dict = response.json() d1_test.sample.assert_equals(r_dict, \"post_roundtrip_form_fields\") @responses.activate def test_1070(self): \"\"\"cURL command", "\"\"\"HTTP GET 404.\"\"\" response = self._get_response(\"valid_pid\", header_dict={\"trigger\": \"404\"}) assert response.status_code == 404 self.sample.assert_equals(response.text,", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,", "and query params.\"\"\" body_bytes = b\"test_body\" header_dict = {\"ijkl\": \"9876\", \"mnop\": \"5432\"} response", "in writing, software # distributed under the License is distributed on an \"AS", "GET is successful. Mocked GET returns object bytes uniquely tied to given PID.\"\"\"", "\"1234\", \"post_data_2\": \"5678\"} response = self._post_fields(field_dict) r_dict = response.json() d1_test.sample.assert_equals(r_dict, \"post_roundtrip_form_fields\") @responses.activate def", "correctly combined.\"\"\" d1_test.mock_api.post.add_callback(d1_test.d1_test_case.MOCK_MN_BASE_URL) body_bytes = b\"test_body\" query_dict = {\"abcd\": \"1234\", \"efgh\": \"5678\"} header_dict", "python # This work was created by participants in the DataONE project, and", "# See the License for the specific language governing permissions and # limitations", "the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "\"\"\"Query params passed to Session() and individual POST are correctly combined.\"\"\" d1_test.mock_api.post.add_callback(d1_test.d1_test_case.MOCK_MN_BASE_URL) body_bytes", "def test_1040(self): \"\"\"HTTP POST is successful Roundtrip for body, headers and query params.\"\"\"", "\"\"\"HTTP POST is successful Roundtrip for body, headers and query params.\"\"\" body_bytes =", "= self._get_hash(c_pid) assert a_hash != b_hash assert b_hash != c_hash assert a_hash !=", "def _post_fields(self, fields_dict): d1_test.mock_api.post.add_callback(d1_test.d1_test_case.MOCK_MN_BASE_URL) s = d1_client.session.Session(d1_test.d1_test_case.MOCK_MN_BASE_URL) return s.POST([\"post\"], fields=fields_dict) @responses.activate def test_1000(self):", "self._post_fields(field_dict) r_dict = response.json() d1_test.sample.assert_equals(r_dict, \"post_roundtrip_form_fields\") @responses.activate def test_1070(self): \"\"\"cURL command line retains", "bytes uniquely tied to given PID.\"\"\" a_pid = \"pid_hy7tf83453y498\" b_pid = \"pid_09y68gh73n60\" c_pid", "#!/usr/bin/env python # This work was created by participants in the DataONE project,", "retains query parameters and headers.\"\"\" query_dict = {\"abcd\": \"1234\", \"efgh\": \"5678\"} header_dict =", "the Apache License, Version 2.0 (the \"License\"); # you may not use this", "you may not use this file except in compliance with the License. #", "HTML Form fields.\"\"\" field_dict = {\"post_data_1\": \"1234\", \"post_data_2\": \"5678\"} response = self._post_fields(field_dict) r_dict", "PID.\"\"\" a_pid = \"pid_hy7tf83453y498\" b_pid = \"pid_09y68gh73n60\" c_pid = \"pid_987i075058679589060\" a_hash = self._get_hash(a_pid)", "assert a_hash == a1_hash assert a_hash == a2_hash assert c_hash == c1_hash assert", "in the DataONE project, and is # jointly copyrighted by participating institutions in", "see our web site at http://dataone.org. # # Copyright 2009-2019 DataONE # #", "by participating institutions in DataONE. For # more information on DataONE, see our", "s = d1_client.session.Session(d1_test.d1_test_case.MOCK_MN_BASE_URL) curl_str = s.get_curl_command_line( \"POST\", \"http://some.bogus.address\", query=query_dict, headers=header_dict ) d1_test.sample.assert_equals(curl_str, \"curl_command_line\")", "use this file except in compliance with the License. # You may obtain", "successful Roundtrip for body, headers and query params.\"\"\" body_bytes = b\"test_body\" header_dict =", "self._post({}, header_dict, body_bytes) r_dict = response.json() d1_test.sample.assert_equals(r_dict, \"post_roundtrip\") @responses.activate def test_1050(self): \"\"\"Query params", "import pytest import requests import requests.exceptions import responses import d1_common.logging_context import d1_client.session import", "return s.POST([\"post\"], fields=fields_dict) @responses.activate def test_1000(self): \"\"\"HTTP GET is successful. Mocked GET returns", "= d1_client.session.Session(d1_test.d1_test_case.MOCK_MN_BASE_URL) return s.GET([\"object\", pid], headers=header_dict or {}) def _post(self, query_dict, header_dict, body):", "# Licensed under the Apache License, Version 2.0 (the \"License\"); # you may", "= self._post({}, header_dict, body_bytes) r_dict = response.json() d1_test.sample.assert_equals(r_dict, \"post_roundtrip\") @responses.activate def test_1050(self): \"\"\"Query", "\"pid_09y68gh73n60\" c_pid = \"pid_987i075058679589060\" a_hash = self._get_hash(a_pid) b_hash = self._get_hash(b_pid) c_hash = self._get_hash(c_pid)", "\"\"\"Successful HTTP GET returns 200 OK.\"\"\" response = self._get_response(\"pid1\") assert response.status_code == 200", "License. import hashlib import logging import freezegun import pytest import requests import requests.exceptions", "params passed to Session() and individual POST are correctly combined.\"\"\" d1_test.mock_api.post.add_callback(d1_test.d1_test_case.MOCK_MN_BASE_URL) body_bytes =", "= response.json() d1_test.sample.assert_equals(r_dict, \"post_roundtrip\") @responses.activate def test_1050(self): \"\"\"Query params passed to Session() and", "= b\"test_body\" header_dict = {\"ijkl\": \"9876\", \"mnop\": \"5432\"} response = self._post({}, header_dict, body_bytes)", "200 OK.\"\"\" response = self._get_response(\"pid1\") assert response.status_code == 200 @responses.activate def test_1020(self): \"\"\"HTTP", "2.0 (the \"License\"); # you may not use this file except in compliance", "self._post(query_dict, header_dict, body_bytes) r_dict = response.json() d1_test.sample.assert_equals(r_dict, \"post_roundtrip_query\") @responses.activate def test_1060(self): \"\"\"Roundtrip for", "c_hash a1_hash = self._get_hash(a_pid) c1_hash = self._get_hash(c_pid) c2_hash = self._get_hash(c_pid) a2_hash = self._get_hash(a_pid)", "@responses.activate def test_1050(self): \"\"\"Query params passed to Session() and individual POST are correctly", "= response.json() d1_test.sample.assert_equals(r_dict, \"post_roundtrip_form_fields\") @responses.activate def test_1070(self): \"\"\"cURL command line retains query parameters", "copyrighted by participating institutions in DataONE. For # more information on DataONE, see", "against http://some.bogus.address/ raises ConnectionError.\"\"\" s = d1_client.session.Session(\"http://some.bogus.address\") with d1_common.logging_context.LoggingContext(logger): logger.setLevel(logging.ERROR) with pytest.raises(requests.exceptions.ConnectionError): s.GET(\"/\")", "by participants in the DataONE project, and is # jointly copyrighted by participating", "d1_client.session import d1_test.d1_test_case import d1_test.mock_api.get import d1_test.mock_api.post import d1_test.sample logger = logging.getLogger(__name__) @d1_test.d1_test_case.reproducible_random_decorator(\"TestSession\")", "import logging import freezegun import pytest import requests import requests.exceptions import responses import", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the", "for HTML Form fields.\"\"\" field_dict = {\"post_data_1\": \"1234\", \"post_data_2\": \"5678\"} response = self._post_fields(field_dict)", "== c1_hash assert c_hash == c2_hash @responses.activate def test_1010(self): \"\"\"Successful HTTP GET returns", "# # Unless required by applicable law or agreed to in writing, software", "_get_hash(self, pid): d1_test.mock_api.get.add_callback(d1_test.d1_test_case.MOCK_MN_BASE_URL) s = d1_client.session.Session(d1_test.d1_test_case.MOCK_MN_BASE_URL) response = s.GET([\"object\", pid]) return hashlib.sha1(response.content).hexdigest() def", "self._get_response(\"pid1\") assert response.status_code == 200 @responses.activate def test_1020(self): \"\"\"HTTP GET 404.\"\"\" response =", "express or implied. # See the License for the specific language governing permissions", "\"pid_987i075058679589060\" a_hash = self._get_hash(a_pid) b_hash = self._get_hash(b_pid) c_hash = self._get_hash(c_pid) assert a_hash !=", "the specific language governing permissions and # limitations under the License. import hashlib", "at http://dataone.org. # # Copyright 2009-2019 DataONE # # Licensed under the Apache", "either express or implied. # See the License for the specific language governing", "\"\"\"HTTP GET against http://some.bogus.address/ raises ConnectionError.\"\"\" s = d1_client.session.Session(\"http://some.bogus.address\") with d1_common.logging_context.LoggingContext(logger): logger.setLevel(logging.ERROR) with", "import d1_test.mock_api.get import d1_test.mock_api.post import d1_test.sample logger = logging.getLogger(__name__) @d1_test.d1_test_case.reproducible_random_decorator(\"TestSession\") @freezegun.freeze_time(\"1945-01-02\") class TestSession(d1_test.d1_test_case.D1TestCase):", "Copyright 2009-2019 DataONE # # Licensed under the Apache License, Version 2.0 (the", "c2_hash @responses.activate def test_1010(self): \"\"\"Successful HTTP GET returns 200 OK.\"\"\" response = self._get_response(\"pid1\")", "{\"ijkl\": \"9876\", \"mnop\": \"5432\"} response = self._post(query_dict, header_dict, body_bytes) r_dict = response.json() d1_test.sample.assert_equals(r_dict,", "raises ConnectionError.\"\"\" s = d1_client.session.Session(\"http://some.bogus.address\") with d1_common.logging_context.LoggingContext(logger): logger.setLevel(logging.ERROR) with pytest.raises(requests.exceptions.ConnectionError): s.GET(\"/\") @responses.activate def", "pid): d1_test.mock_api.get.add_callback(d1_test.d1_test_case.MOCK_MN_BASE_URL) s = d1_client.session.Session(d1_test.d1_test_case.MOCK_MN_BASE_URL) response = s.GET([\"object\", pid]) return hashlib.sha1(response.content).hexdigest() def _get_response(self,", "d1_test.sample.assert_equals(r_dict, \"post_roundtrip\") @responses.activate def test_1050(self): \"\"\"Query params passed to Session() and individual POST", "Licensed under the Apache License, Version 2.0 (the \"License\"); # you may not", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "\"\"\"Roundtrip for HTML Form fields.\"\"\" field_dict = {\"post_data_1\": \"1234\", \"post_data_2\": \"5678\"} response =", "HTTP GET returns 200 OK.\"\"\" response = self._get_response(\"pid1\") assert response.status_code == 200 @responses.activate", "a_hash = self._get_hash(a_pid) b_hash = self._get_hash(b_pid) c_hash = self._get_hash(c_pid) assert a_hash != b_hash", "query_dict, header_dict, body): d1_test.mock_api.post.add_callback(d1_test.d1_test_case.MOCK_MN_BASE_URL) s = d1_client.session.Session( d1_test.d1_test_case.MOCK_MN_BASE_URL, query={\"default_query\": \"test\"} ) return s.POST([\"post\"],", "!= c_hash assert a_hash != c_hash a1_hash = self._get_hash(a_pid) c1_hash = self._get_hash(c_pid) c2_hash", "self._get_hash(c_pid) c2_hash = self._get_hash(c_pid) a2_hash = self._get_hash(a_pid) assert a_hash == a1_hash assert a_hash", "Form fields.\"\"\" field_dict = {\"post_data_1\": \"1234\", \"post_data_2\": \"5678\"} response = self._post_fields(field_dict) r_dict =", "assert a_hash != c_hash a1_hash = self._get_hash(a_pid) c1_hash = self._get_hash(c_pid) c2_hash = self._get_hash(c_pid)", "\"test\"} ) return s.POST([\"post\"], query=query_dict, headers=header_dict, data=body) def _post_fields(self, fields_dict): d1_test.mock_api.post.add_callback(d1_test.d1_test_case.MOCK_MN_BASE_URL) s =", "logger.setLevel(logging.ERROR) with pytest.raises(requests.exceptions.ConnectionError): s.GET(\"/\") @responses.activate def test_1040(self): \"\"\"HTTP POST is successful Roundtrip for", "c1_hash = self._get_hash(c_pid) c2_hash = self._get_hash(c_pid) a2_hash = self._get_hash(a_pid) assert a_hash == a1_hash", "= \"pid_987i075058679589060\" a_hash = self._get_hash(a_pid) b_hash = self._get_hash(b_pid) c_hash = self._get_hash(c_pid) assert a_hash", "# Copyright 2009-2019 DataONE # # Licensed under the Apache License, Version 2.0", "a_hash != c_hash a1_hash = self._get_hash(a_pid) c1_hash = self._get_hash(c_pid) c2_hash = self._get_hash(c_pid) a2_hash", "== c2_hash @responses.activate def test_1010(self): \"\"\"Successful HTTP GET returns 200 OK.\"\"\" response =", "the License. # You may obtain a copy of the License at #", "http://some.bogus.address/ raises ConnectionError.\"\"\" s = d1_client.session.Session(\"http://some.bogus.address\") with d1_common.logging_context.LoggingContext(logger): logger.setLevel(logging.ERROR) with pytest.raises(requests.exceptions.ConnectionError): s.GET(\"/\") @responses.activate", "\"1234\", \"efgh\": \"5678\"} header_dict = {\"ijkl\": \"9876\", \"mnop\": \"5432\"} response = self._post(query_dict, header_dict,", "# distributed under the License is distributed on an \"AS IS\" BASIS, #", "fields.\"\"\" field_dict = {\"post_data_1\": \"1234\", \"post_data_2\": \"5678\"} response = self._post_fields(field_dict) r_dict = response.json()", "# more information on DataONE, see our web site at http://dataone.org. # #", "@responses.activate def test_1020(self): \"\"\"HTTP GET 404.\"\"\" response = self._get_response(\"valid_pid\", header_dict={\"trigger\": \"404\"}) assert response.status_code", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "d1_client.session.Session(d1_test.d1_test_case.MOCK_MN_BASE_URL) return s.GET([\"object\", pid], headers=header_dict or {}) def _post(self, query_dict, header_dict, body): d1_test.mock_api.post.add_callback(d1_test.d1_test_case.MOCK_MN_BASE_URL)", "header_dict={\"trigger\": \"404\"}) assert response.status_code == 404 self.sample.assert_equals(response.text, \"get_404\") @responses.activate def test_1030(self): \"\"\"HTTP GET", "d1_common.logging_context.LoggingContext(logger): logger.setLevel(logging.ERROR) with pytest.raises(requests.exceptions.ConnectionError): s.GET(\"/\") @responses.activate def test_1040(self): \"\"\"HTTP POST is successful Roundtrip", "= self._get_hash(c_pid) c2_hash = self._get_hash(c_pid) a2_hash = self._get_hash(a_pid) assert a_hash == a1_hash assert", "and individual POST are correctly combined.\"\"\" d1_test.mock_api.post.add_callback(d1_test.d1_test_case.MOCK_MN_BASE_URL) body_bytes = b\"test_body\" query_dict = {\"abcd\":", "return s.GET([\"object\", pid], headers=header_dict or {}) def _post(self, query_dict, header_dict, body): d1_test.mock_api.post.add_callback(d1_test.d1_test_case.MOCK_MN_BASE_URL) s", "body, headers and query params.\"\"\" body_bytes = b\"test_body\" header_dict = {\"ijkl\": \"9876\", \"mnop\":", "= {\"abcd\": \"1234\", \"efgh\": \"5678\"} header_dict = {\"ijkl\": \"9876\", \"mnop\": \"5432\"} response =", "body_bytes = b\"test_body\" query_dict = {\"abcd\": \"1234\", \"efgh\": \"5678\"} header_dict = {\"ijkl\": \"9876\",", "object bytes uniquely tied to given PID.\"\"\" a_pid = \"pid_hy7tf83453y498\" b_pid = \"pid_09y68gh73n60\"", "body_bytes = b\"test_body\" header_dict = {\"ijkl\": \"9876\", \"mnop\": \"5432\"} response = self._post({}, header_dict,", "!= c_hash a1_hash = self._get_hash(a_pid) c1_hash = self._get_hash(c_pid) c2_hash = self._get_hash(c_pid) a2_hash =", "c_pid = \"pid_987i075058679589060\" a_hash = self._get_hash(a_pid) b_hash = self._get_hash(b_pid) c_hash = self._get_hash(c_pid) assert", "test_1060(self): \"\"\"Roundtrip for HTML Form fields.\"\"\" field_dict = {\"post_data_1\": \"1234\", \"post_data_2\": \"5678\"} response", "= {\"post_data_1\": \"1234\", \"post_data_2\": \"5678\"} response = self._post_fields(field_dict) r_dict = response.json() d1_test.sample.assert_equals(r_dict, \"post_roundtrip_form_fields\")", "with the License. # You may obtain a copy of the License at", "404.\"\"\" response = self._get_response(\"valid_pid\", header_dict={\"trigger\": \"404\"}) assert response.status_code == 404 self.sample.assert_equals(response.text, \"get_404\") @responses.activate", "{\"abcd\": \"1234\", \"efgh\": \"5678\"} header_dict = {\"ijkl\": \"9876\", \"mnop\": \"5432\"} response = self._post(query_dict,", "c2_hash = self._get_hash(c_pid) a2_hash = self._get_hash(a_pid) assert a_hash == a1_hash assert a_hash ==", "def test_1070(self): \"\"\"cURL command line retains query parameters and headers.\"\"\" query_dict = {\"abcd\":", "= {\"ijkl\": \"9876\", \"mnop\": \"5432\"} s = d1_client.session.Session(d1_test.d1_test_case.MOCK_MN_BASE_URL) curl_str = s.get_curl_command_line( \"POST\", \"http://some.bogus.address\",", "given PID.\"\"\" a_pid = \"pid_hy7tf83453y498\" b_pid = \"pid_09y68gh73n60\" c_pid = \"pid_987i075058679589060\" a_hash =", "response = self._get_response(\"valid_pid\", header_dict={\"trigger\": \"404\"}) assert response.status_code == 404 self.sample.assert_equals(response.text, \"get_404\") @responses.activate def", "def test_1000(self): \"\"\"HTTP GET is successful. Mocked GET returns object bytes uniquely tied", "# # Licensed under the Apache License, Version 2.0 (the \"License\"); # you", "b_hash != c_hash assert a_hash != c_hash a1_hash = self._get_hash(a_pid) c1_hash = self._get_hash(c_pid)", "\"\"\"HTTP GET is successful. Mocked GET returns object bytes uniquely tied to given", "= d1_client.session.Session(d1_test.d1_test_case.MOCK_MN_BASE_URL) response = s.GET([\"object\", pid]) return hashlib.sha1(response.content).hexdigest() def _get_response(self, pid, header_dict=None): d1_test.mock_api.get.add_callback(d1_test.d1_test_case.MOCK_MN_BASE_URL)", "@responses.activate def test_1030(self): \"\"\"HTTP GET against http://some.bogus.address/ raises ConnectionError.\"\"\" s = d1_client.session.Session(\"http://some.bogus.address\") with", "test_1030(self): \"\"\"HTTP GET against http://some.bogus.address/ raises ConnectionError.\"\"\" s = d1_client.session.Session(\"http://some.bogus.address\") with d1_common.logging_context.LoggingContext(logger): logger.setLevel(logging.ERROR)", "response.status_code == 404 self.sample.assert_equals(response.text, \"get_404\") @responses.activate def test_1030(self): \"\"\"HTTP GET against http://some.bogus.address/ raises", "pid, header_dict=None): d1_test.mock_api.get.add_callback(d1_test.d1_test_case.MOCK_MN_BASE_URL) s = d1_client.session.Session(d1_test.d1_test_case.MOCK_MN_BASE_URL) return s.GET([\"object\", pid], headers=header_dict or {}) def", "with pytest.raises(requests.exceptions.ConnectionError): s.GET(\"/\") @responses.activate def test_1040(self): \"\"\"HTTP POST is successful Roundtrip for body,", "law or agreed to in writing, software # distributed under the License is", "the License for the specific language governing permissions and # limitations under the", "= self._get_response(\"valid_pid\", header_dict={\"trigger\": \"404\"}) assert response.status_code == 404 self.sample.assert_equals(response.text, \"get_404\") @responses.activate def test_1030(self):", "to given PID.\"\"\" a_pid = \"pid_hy7tf83453y498\" b_pid = \"pid_09y68gh73n60\" c_pid = \"pid_987i075058679589060\" a_hash", "d1_common.logging_context import d1_client.session import d1_test.d1_test_case import d1_test.mock_api.get import d1_test.mock_api.post import d1_test.sample logger =", "= {\"abcd\": \"1234\", \"efgh\": \"5678\"} header_dict = {\"ijkl\": \"9876\", \"mnop\": \"5432\"} s =", "body_bytes) r_dict = response.json() d1_test.sample.assert_equals(r_dict, \"post_roundtrip\") @responses.activate def test_1050(self): \"\"\"Query params passed to", "# # Copyright 2009-2019 DataONE # # Licensed under the Apache License, Version", "= self._get_hash(c_pid) a2_hash = self._get_hash(a_pid) assert a_hash == a1_hash assert a_hash == a2_hash", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "hashlib import logging import freezegun import pytest import requests import requests.exceptions import responses", "Session() and individual POST are correctly combined.\"\"\" d1_test.mock_api.post.add_callback(d1_test.d1_test_case.MOCK_MN_BASE_URL) body_bytes = b\"test_body\" query_dict =", "!= b_hash assert b_hash != c_hash assert a_hash != c_hash a1_hash = self._get_hash(a_pid)", "query parameters and headers.\"\"\" query_dict = {\"abcd\": \"1234\", \"efgh\": \"5678\"} header_dict = {\"ijkl\":", "c_hash = self._get_hash(c_pid) assert a_hash != b_hash assert b_hash != c_hash assert a_hash", "pid], headers=header_dict or {}) def _post(self, query_dict, header_dict, body): d1_test.mock_api.post.add_callback(d1_test.d1_test_case.MOCK_MN_BASE_URL) s = d1_client.session.Session(", "def test_1060(self): \"\"\"Roundtrip for HTML Form fields.\"\"\" field_dict = {\"post_data_1\": \"1234\", \"post_data_2\": \"5678\"}", "s = d1_client.session.Session(\"http://some.bogus.address\") with d1_common.logging_context.LoggingContext(logger): logger.setLevel(logging.ERROR) with pytest.raises(requests.exceptions.ConnectionError): s.GET(\"/\") @responses.activate def test_1040(self): \"\"\"HTTP", "in compliance with the License. # You may obtain a copy of the", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "\"pid_hy7tf83453y498\" b_pid = \"pid_09y68gh73n60\" c_pid = \"pid_987i075058679589060\" a_hash = self._get_hash(a_pid) b_hash = self._get_hash(b_pid)", "self.sample.assert_equals(response.text, \"get_404\") @responses.activate def test_1030(self): \"\"\"HTTP GET against http://some.bogus.address/ raises ConnectionError.\"\"\" s =", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #", "== 404 self.sample.assert_equals(response.text, \"get_404\") @responses.activate def test_1030(self): \"\"\"HTTP GET against http://some.bogus.address/ raises ConnectionError.\"\"\"", "tied to given PID.\"\"\" a_pid = \"pid_hy7tf83453y498\" b_pid = \"pid_09y68gh73n60\" c_pid = \"pid_987i075058679589060\"", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "@d1_test.d1_test_case.reproducible_random_decorator(\"TestSession\") @freezegun.freeze_time(\"1945-01-02\") class TestSession(d1_test.d1_test_case.D1TestCase): def _get_hash(self, pid): d1_test.mock_api.get.add_callback(d1_test.d1_test_case.MOCK_MN_BASE_URL) s = d1_client.session.Session(d1_test.d1_test_case.MOCK_MN_BASE_URL) response =", "self._get_hash(c_pid) a2_hash = self._get_hash(a_pid) assert a_hash == a1_hash assert a_hash == a2_hash assert", "command line retains query parameters and headers.\"\"\" query_dict = {\"abcd\": \"1234\", \"efgh\": \"5678\"}", "a_pid = \"pid_hy7tf83453y498\" b_pid = \"pid_09y68gh73n60\" c_pid = \"pid_987i075058679589060\" a_hash = self._get_hash(a_pid) b_hash", "d1_client.session.Session( d1_test.d1_test_case.MOCK_MN_BASE_URL, query={\"default_query\": \"test\"} ) return s.POST([\"post\"], query=query_dict, headers=header_dict, data=body) def _post_fields(self, fields_dict):", "{}) def _post(self, query_dict, header_dict, body): d1_test.mock_api.post.add_callback(d1_test.d1_test_case.MOCK_MN_BASE_URL) s = d1_client.session.Session( d1_test.d1_test_case.MOCK_MN_BASE_URL, query={\"default_query\": \"test\"}", "See the License for the specific language governing permissions and # limitations under", "# This work was created by participants in the DataONE project, and is", "import d1_client.session import d1_test.d1_test_case import d1_test.mock_api.get import d1_test.mock_api.post import d1_test.sample logger = logging.getLogger(__name__)", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "\"5678\"} header_dict = {\"ijkl\": \"9876\", \"mnop\": \"5432\"} response = self._post(query_dict, header_dict, body_bytes) r_dict", "d1_test.sample.assert_equals(r_dict, \"post_roundtrip_form_fields\") @responses.activate def test_1070(self): \"\"\"cURL command line retains query parameters and headers.\"\"\"", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "= {\"ijkl\": \"9876\", \"mnop\": \"5432\"} response = self._post({}, header_dict, body_bytes) r_dict = response.json()", "limitations under the License. import hashlib import logging import freezegun import pytest import", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in", "responses import d1_common.logging_context import d1_client.session import d1_test.d1_test_case import d1_test.mock_api.get import d1_test.mock_api.post import d1_test.sample", "\"post_roundtrip_query\") @responses.activate def test_1060(self): \"\"\"Roundtrip for HTML Form fields.\"\"\" field_dict = {\"post_data_1\": \"1234\",", "headers=header_dict, data=body) def _post_fields(self, fields_dict): d1_test.mock_api.post.add_callback(d1_test.d1_test_case.MOCK_MN_BASE_URL) s = d1_client.session.Session(d1_test.d1_test_case.MOCK_MN_BASE_URL) return s.POST([\"post\"], fields=fields_dict) @responses.activate", "\"9876\", \"mnop\": \"5432\"} response = self._post(query_dict, header_dict, body_bytes) r_dict = response.json() d1_test.sample.assert_equals(r_dict, \"post_roundtrip_query\")", "response = s.GET([\"object\", pid]) return hashlib.sha1(response.content).hexdigest() def _get_response(self, pid, header_dict=None): d1_test.mock_api.get.add_callback(d1_test.d1_test_case.MOCK_MN_BASE_URL) s =", "s.GET([\"object\", pid]) return hashlib.sha1(response.content).hexdigest() def _get_response(self, pid, header_dict=None): d1_test.mock_api.get.add_callback(d1_test.d1_test_case.MOCK_MN_BASE_URL) s = d1_client.session.Session(d1_test.d1_test_case.MOCK_MN_BASE_URL) return", "{\"ijkl\": \"9876\", \"mnop\": \"5432\"} response = self._post({}, header_dict, body_bytes) r_dict = response.json() d1_test.sample.assert_equals(r_dict,", "b\"test_body\" query_dict = {\"abcd\": \"1234\", \"efgh\": \"5678\"} header_dict = {\"ijkl\": \"9876\", \"mnop\": \"5432\"}", "== 200 @responses.activate def test_1020(self): \"\"\"HTTP GET 404.\"\"\" response = self._get_response(\"valid_pid\", header_dict={\"trigger\": \"404\"})", "d1_test.mock_api.post import d1_test.sample logger = logging.getLogger(__name__) @d1_test.d1_test_case.reproducible_random_decorator(\"TestSession\") @freezegun.freeze_time(\"1945-01-02\") class TestSession(d1_test.d1_test_case.D1TestCase): def _get_hash(self, pid):", "200 @responses.activate def test_1020(self): \"\"\"HTTP GET 404.\"\"\" response = self._get_response(\"valid_pid\", header_dict={\"trigger\": \"404\"}) assert", "= d1_client.session.Session( d1_test.d1_test_case.MOCK_MN_BASE_URL, query={\"default_query\": \"test\"} ) return s.POST([\"post\"], query=query_dict, headers=header_dict, data=body) def _post_fields(self,", "assert a_hash == a2_hash assert c_hash == c1_hash assert c_hash == c2_hash @responses.activate", "@responses.activate def test_1060(self): \"\"\"Roundtrip for HTML Form fields.\"\"\" field_dict = {\"post_data_1\": \"1234\", \"post_data_2\":", "{\"abcd\": \"1234\", \"efgh\": \"5678\"} header_dict = {\"ijkl\": \"9876\", \"mnop\": \"5432\"} s = d1_client.session.Session(d1_test.d1_test_case.MOCK_MN_BASE_URL)", "GET 404.\"\"\" response = self._get_response(\"valid_pid\", header_dict={\"trigger\": \"404\"}) assert response.status_code == 404 self.sample.assert_equals(response.text, \"get_404\")", "in DataONE. For # more information on DataONE, see our web site at", "# jointly copyrighted by participating institutions in DataONE. For # more information on", "\"5432\"} response = self._post(query_dict, header_dict, body_bytes) r_dict = response.json() d1_test.sample.assert_equals(r_dict, \"post_roundtrip_query\") @responses.activate def", "assert response.status_code == 404 self.sample.assert_equals(response.text, \"get_404\") @responses.activate def test_1030(self): \"\"\"HTTP GET against http://some.bogus.address/", "a_hash != b_hash assert b_hash != c_hash assert a_hash != c_hash a1_hash =", "Roundtrip for body, headers and query params.\"\"\" body_bytes = b\"test_body\" header_dict = {\"ijkl\":", "Version 2.0 (the \"License\"); # you may not use this file except in", "except in compliance with the License. # You may obtain a copy of", "web site at http://dataone.org. # # Copyright 2009-2019 DataONE # # Licensed under", "For # more information on DataONE, see our web site at http://dataone.org. #", "a1_hash assert a_hash == a2_hash assert c_hash == c1_hash assert c_hash == c2_hash", "headers.\"\"\" query_dict = {\"abcd\": \"1234\", \"efgh\": \"5678\"} header_dict = {\"ijkl\": \"9876\", \"mnop\": \"5432\"}", "\"\"\"cURL command line retains query parameters and headers.\"\"\" query_dict = {\"abcd\": \"1234\", \"efgh\":", "= self._post(query_dict, header_dict, body_bytes) r_dict = response.json() d1_test.sample.assert_equals(r_dict, \"post_roundtrip_query\") @responses.activate def test_1060(self): \"\"\"Roundtrip", "GET against http://some.bogus.address/ raises ConnectionError.\"\"\" s = d1_client.session.Session(\"http://some.bogus.address\") with d1_common.logging_context.LoggingContext(logger): logger.setLevel(logging.ERROR) with pytest.raises(requests.exceptions.ConnectionError):", "# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "may not use this file except in compliance with the License. # You", "License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "_get_response(self, pid, header_dict=None): d1_test.mock_api.get.add_callback(d1_test.d1_test_case.MOCK_MN_BASE_URL) s = d1_client.session.Session(d1_test.d1_test_case.MOCK_MN_BASE_URL) return s.GET([\"object\", pid], headers=header_dict or {})", "= response.json() d1_test.sample.assert_equals(r_dict, \"post_roundtrip_query\") @responses.activate def test_1060(self): \"\"\"Roundtrip for HTML Form fields.\"\"\" field_dict", "b\"test_body\" header_dict = {\"ijkl\": \"9876\", \"mnop\": \"5432\"} response = self._post({}, header_dict, body_bytes) r_dict", "= \"pid_hy7tf83453y498\" b_pid = \"pid_09y68gh73n60\" c_pid = \"pid_987i075058679589060\" a_hash = self._get_hash(a_pid) b_hash =", "passed to Session() and individual POST are correctly combined.\"\"\" d1_test.mock_api.post.add_callback(d1_test.d1_test_case.MOCK_MN_BASE_URL) body_bytes = b\"test_body\"", "response.status_code == 200 @responses.activate def test_1020(self): \"\"\"HTTP GET 404.\"\"\" response = self._get_response(\"valid_pid\", header_dict={\"trigger\":", "= self._get_hash(a_pid) assert a_hash == a1_hash assert a_hash == a2_hash assert c_hash ==", "= self._post_fields(field_dict) r_dict = response.json() d1_test.sample.assert_equals(r_dict, \"post_roundtrip_form_fields\") @responses.activate def test_1070(self): \"\"\"cURL command line", "headers=header_dict or {}) def _post(self, query_dict, header_dict, body): d1_test.mock_api.post.add_callback(d1_test.d1_test_case.MOCK_MN_BASE_URL) s = d1_client.session.Session( d1_test.d1_test_case.MOCK_MN_BASE_URL,", "d1_test.mock_api.post.add_callback(d1_test.d1_test_case.MOCK_MN_BASE_URL) s = d1_client.session.Session( d1_test.d1_test_case.MOCK_MN_BASE_URL, query={\"default_query\": \"test\"} ) return s.POST([\"post\"], query=query_dict, headers=header_dict, data=body)", "language governing permissions and # limitations under the License. import hashlib import logging", "body): d1_test.mock_api.post.add_callback(d1_test.d1_test_case.MOCK_MN_BASE_URL) s = d1_client.session.Session( d1_test.d1_test_case.MOCK_MN_BASE_URL, query={\"default_query\": \"test\"} ) return s.POST([\"post\"], query=query_dict, headers=header_dict,", "def _get_hash(self, pid): d1_test.mock_api.get.add_callback(d1_test.d1_test_case.MOCK_MN_BASE_URL) s = d1_client.session.Session(d1_test.d1_test_case.MOCK_MN_BASE_URL) response = s.GET([\"object\", pid]) return hashlib.sha1(response.content).hexdigest()", "\"post_data_2\": \"5678\"} response = self._post_fields(field_dict) r_dict = response.json() d1_test.sample.assert_equals(r_dict, \"post_roundtrip_form_fields\") @responses.activate def test_1070(self):", "is # jointly copyrighted by participating institutions in DataONE. For # more information", "s = d1_client.session.Session( d1_test.d1_test_case.MOCK_MN_BASE_URL, query={\"default_query\": \"test\"} ) return s.POST([\"post\"], query=query_dict, headers=header_dict, data=body) def", "d1_client.session.Session(d1_test.d1_test_case.MOCK_MN_BASE_URL) response = s.GET([\"object\", pid]) return hashlib.sha1(response.content).hexdigest() def _get_response(self, pid, header_dict=None): d1_test.mock_api.get.add_callback(d1_test.d1_test_case.MOCK_MN_BASE_URL) s", "individual POST are correctly combined.\"\"\" d1_test.mock_api.post.add_callback(d1_test.d1_test_case.MOCK_MN_BASE_URL) body_bytes = b\"test_body\" query_dict = {\"abcd\": \"1234\",", "d1_test.mock_api.get import d1_test.mock_api.post import d1_test.sample logger = logging.getLogger(__name__) @d1_test.d1_test_case.reproducible_random_decorator(\"TestSession\") @freezegun.freeze_time(\"1945-01-02\") class TestSession(d1_test.d1_test_case.D1TestCase): def", "a1_hash = self._get_hash(a_pid) c1_hash = self._get_hash(c_pid) c2_hash = self._get_hash(c_pid) a2_hash = self._get_hash(a_pid) assert", "test_1010(self): \"\"\"Successful HTTP GET returns 200 OK.\"\"\" response = self._get_response(\"pid1\") assert response.status_code ==", "information on DataONE, see our web site at http://dataone.org. # # Copyright 2009-2019", "r_dict = response.json() d1_test.sample.assert_equals(r_dict, \"post_roundtrip_query\") @responses.activate def test_1060(self): \"\"\"Roundtrip for HTML Form fields.\"\"\"", "distributed under the License is distributed on an \"AS IS\" BASIS, # WITHOUT", "pid]) return hashlib.sha1(response.content).hexdigest() def _get_response(self, pid, header_dict=None): d1_test.mock_api.get.add_callback(d1_test.d1_test_case.MOCK_MN_BASE_URL) s = d1_client.session.Session(d1_test.d1_test_case.MOCK_MN_BASE_URL) return s.GET([\"object\",", "\"post_roundtrip_form_fields\") @responses.activate def test_1070(self): \"\"\"cURL command line retains query parameters and headers.\"\"\" query_dict", "returns 200 OK.\"\"\" response = self._get_response(\"pid1\") assert response.status_code == 200 @responses.activate def test_1020(self):" ]
[ "s_p, is_terminal(s_p) \"\"\" return 0.0, None, False def get_info(self): \"\"\" Get general information", "\"\"\" class Environment(object): def __init__(self, name, actions, gamma): self.name = name self.actions =", "\"\"\" :param s: state :param a: actionSmall :return: r, s_p, is_terminal(s_p) \"\"\" return", "a: actionSmall :return: r, s_p, is_terminal(s_p) \"\"\" return 0.0, None, False def get_info(self):", "self.gamma = gamma def get_state_dimension(self): return None def get_state_dtype(self): return None def get_state_magnitude(self):", "\"\"\" return 0.0, None, False def get_info(self): \"\"\" Get general information to be", "Environment(object): def __init__(self, name, actions, gamma): self.name = name self.actions = actions self.gamma", "None def get_state_magnitude(self): return None def get_initial_state(self): return None def step(self, s, a):", "saved on disk. \"\"\" return { 'name': self.name, 'actions': self.actions, 'gamma': self.gamma }", "actions, gamma): self.name = name self.actions = actions self.gamma = gamma def get_state_dimension(self):", "name, actions, gamma): self.name = name self.actions = actions self.gamma = gamma def", "get_initial_state(self): return None def step(self, s, a): \"\"\" :param s: state :param a:", "def get_state_dimension(self): return None def get_state_dtype(self): return None def get_state_magnitude(self): return None def", "self.name = name self.actions = actions self.gamma = gamma def get_state_dimension(self): return None", "\"\"\" Get general information to be saved on disk. \"\"\" return { 'name':", "= gamma def get_state_dimension(self): return None def get_state_dtype(self): return None def get_state_magnitude(self): return", "get_state_dimension(self): return None def get_state_dtype(self): return None def get_state_magnitude(self): return None def get_initial_state(self):", "def get_initial_state(self): return None def step(self, s, a): \"\"\" :param s: state :param", "gamma): self.name = name self.actions = actions self.gamma = gamma def get_state_dimension(self): return", "\"\"\" Abstract environment class \"\"\" class Environment(object): def __init__(self, name, actions, gamma): self.name", "def get_state_dtype(self): return None def get_state_magnitude(self): return None def get_initial_state(self): return None def", "is_terminal(s_p) \"\"\" return 0.0, None, False def get_info(self): \"\"\" Get general information to", "0.0, None, False def get_info(self): \"\"\" Get general information to be saved on", "return None def get_state_dtype(self): return None def get_state_magnitude(self): return None def get_initial_state(self): return", "general information to be saved on disk. \"\"\" return { 'name': self.name, 'actions':", "get_state_dtype(self): return None def get_state_magnitude(self): return None def get_initial_state(self): return None def step(self,", "actionSmall :return: r, s_p, is_terminal(s_p) \"\"\" return 0.0, None, False def get_info(self): \"\"\"", "to be saved on disk. \"\"\" return { 'name': self.name, 'actions': self.actions, 'gamma':", "def __init__(self, name, actions, gamma): self.name = name self.actions = actions self.gamma =", "False def get_info(self): \"\"\" Get general information to be saved on disk. \"\"\"", "Get general information to be saved on disk. \"\"\" return { 'name': self.name,", "information to be saved on disk. \"\"\" return { 'name': self.name, 'actions': self.actions,", "class Environment(object): def __init__(self, name, actions, gamma): self.name = name self.actions = actions", "return 0.0, None, False def get_info(self): \"\"\" Get general information to be saved", "= actions self.gamma = gamma def get_state_dimension(self): return None def get_state_dtype(self): return None", "self.actions = actions self.gamma = gamma def get_state_dimension(self): return None def get_state_dtype(self): return", "Abstract environment class \"\"\" class Environment(object): def __init__(self, name, actions, gamma): self.name =", "def get_state_magnitude(self): return None def get_initial_state(self): return None def step(self, s, a): \"\"\"", "state :param a: actionSmall :return: r, s_p, is_terminal(s_p) \"\"\" return 0.0, None, False", ":return: r, s_p, is_terminal(s_p) \"\"\" return 0.0, None, False def get_info(self): \"\"\" Get", "gamma def get_state_dimension(self): return None def get_state_dtype(self): return None def get_state_magnitude(self): return None", "get_state_magnitude(self): return None def get_initial_state(self): return None def step(self, s, a): \"\"\" :param", ":param s: state :param a: actionSmall :return: r, s_p, is_terminal(s_p) \"\"\" return 0.0,", "def get_info(self): \"\"\" Get general information to be saved on disk. \"\"\" return", "return None def get_state_magnitude(self): return None def get_initial_state(self): return None def step(self, s,", "None, False def get_info(self): \"\"\" Get general information to be saved on disk.", "def step(self, s, a): \"\"\" :param s: state :param a: actionSmall :return: r,", "be saved on disk. \"\"\" return { 'name': self.name, 'actions': self.actions, 'gamma': self.gamma", "s, a): \"\"\" :param s: state :param a: actionSmall :return: r, s_p, is_terminal(s_p)", ":param a: actionSmall :return: r, s_p, is_terminal(s_p) \"\"\" return 0.0, None, False def", "get_info(self): \"\"\" Get general information to be saved on disk. \"\"\" return {", "None def get_initial_state(self): return None def step(self, s, a): \"\"\" :param s: state", "actions self.gamma = gamma def get_state_dimension(self): return None def get_state_dtype(self): return None def", "return None def step(self, s, a): \"\"\" :param s: state :param a: actionSmall", "a): \"\"\" :param s: state :param a: actionSmall :return: r, s_p, is_terminal(s_p) \"\"\"", "__init__(self, name, actions, gamma): self.name = name self.actions = actions self.gamma = gamma", "name self.actions = actions self.gamma = gamma def get_state_dimension(self): return None def get_state_dtype(self):", "return None def get_initial_state(self): return None def step(self, s, a): \"\"\" :param s:", "None def step(self, s, a): \"\"\" :param s: state :param a: actionSmall :return:", "r, s_p, is_terminal(s_p) \"\"\" return 0.0, None, False def get_info(self): \"\"\" Get general", "class \"\"\" class Environment(object): def __init__(self, name, actions, gamma): self.name = name self.actions", "= name self.actions = actions self.gamma = gamma def get_state_dimension(self): return None def", "environment class \"\"\" class Environment(object): def __init__(self, name, actions, gamma): self.name = name", "s: state :param a: actionSmall :return: r, s_p, is_terminal(s_p) \"\"\" return 0.0, None,", "step(self, s, a): \"\"\" :param s: state :param a: actionSmall :return: r, s_p,", "None def get_state_dtype(self): return None def get_state_magnitude(self): return None def get_initial_state(self): return None" ]
[ "AnalyzedRestViewSet routes = [ {'regex': r'rest', 'viewset': RestViewSet, 'basename': 'Rest'}, {'regex': r'analyzed', 'viewset':", "common.api.views.analyzed import AnalyzedRestViewSet routes = [ {'regex': r'rest', 'viewset': RestViewSet, 'basename': 'Rest'}, {'regex':", "{'regex': r'rest', 'viewset': RestViewSet, 'basename': 'Rest'}, {'regex': r'analyzed', 'viewset': AnalyzedRestViewSet, 'basename': 'Analyzed'} ]", "common.api.views.base import RestViewSet from common.api.views.analyzed import AnalyzedRestViewSet routes = [ {'regex': r'rest', 'viewset':", "<filename>backend/common/routes.py from common.api.views.base import RestViewSet from common.api.views.analyzed import AnalyzedRestViewSet routes = [ {'regex':", "[ {'regex': r'rest', 'viewset': RestViewSet, 'basename': 'Rest'}, {'regex': r'analyzed', 'viewset': AnalyzedRestViewSet, 'basename': 'Analyzed'}", "RestViewSet from common.api.views.analyzed import AnalyzedRestViewSet routes = [ {'regex': r'rest', 'viewset': RestViewSet, 'basename':", "import AnalyzedRestViewSet routes = [ {'regex': r'rest', 'viewset': RestViewSet, 'basename': 'Rest'}, {'regex': r'analyzed',", "from common.api.views.base import RestViewSet from common.api.views.analyzed import AnalyzedRestViewSet routes = [ {'regex': r'rest',", "import RestViewSet from common.api.views.analyzed import AnalyzedRestViewSet routes = [ {'regex': r'rest', 'viewset': RestViewSet,", "from common.api.views.analyzed import AnalyzedRestViewSet routes = [ {'regex': r'rest', 'viewset': RestViewSet, 'basename': 'Rest'},", "= [ {'regex': r'rest', 'viewset': RestViewSet, 'basename': 'Rest'}, {'regex': r'analyzed', 'viewset': AnalyzedRestViewSet, 'basename':", "routes = [ {'regex': r'rest', 'viewset': RestViewSet, 'basename': 'Rest'}, {'regex': r'analyzed', 'viewset': AnalyzedRestViewSet," ]